diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml new file mode 100644 index 000000000..d2dfa4136 --- /dev/null +++ b/.github/workflows/ansible-lint.yml @@ -0,0 +1,24 @@ +--- +name: ansible-lint +# on: +# push: +# branches: +# - main +# pull_request: +# branches: +# - main +on: + - pull_request + +jobs: + build: + name: Ansible Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.x (latest) + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Run ansible-lint + uses: ansible/ansible-lint@main diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index 7e213caaa..44be1e10d 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -1,12 +1,14 @@ --- name: Black +# on: +# push: +# branches: +# - main +# pull_request: +# branches: +# - main on: - push: - branches: - - main - pull_request: - branches: - - main + - pull_request jobs: black: diff --git a/.github/workflows/flake8.yml b/.github/workflows/flake8.yml index 1f68cbff5..bc62cbb3e 100644 --- a/.github/workflows/flake8.yml +++ b/.github/workflows/flake8.yml @@ -1,12 +1,14 @@ --- name: Flake8 +# on: +# push: +# branches: +# - main +# pull_request: +# branches: +# - main on: - push: - branches: - - main - pull_request: - branches: - - main + - pull_request jobs: flake8: diff --git a/.github/workflows/isort.yml b/.github/workflows/isort.yml index 676cb7244..8802e97b4 100644 --- a/.github/workflows/isort.yml +++ b/.github/workflows/isort.yml @@ -1,12 +1,14 @@ --- name: Isort +# on: +# push: +# branches: +# - main +# pull_request: +# branches: +# - main on: - push: - branches: - - main - pull_request: - branches: - - main + - pull_request jobs: isort: diff --git a/.github/workflows/unit_testing.yml b/.github/workflows/unit_testing.yml index d05b487c7..9844e9162 100644 --- a/.github/workflows/unit_testing.yml +++ b/.github/workflows/unit_testing.yml @@ -1,13 +1,15 @@ --- name: Run compile and tests +# on: +# push: +# branches: +# - main +# pull_request: +# branches: +# - main on: - push: - branches: - - main - pull_request: - branches: - - main + - pull_request jobs: sanity1: name: Sanity tests with ansible-core==2.15.0 diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 4d6506826..453693154 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -3,457 +3,469 @@ releases: 1.0.0: changes: bugfixes: - - Creating a VM based on a disk_image without specifying the size_gb - - icmp "any" code value in module PBR + - Creating a VM based on a disk_image without specifying the size_gb + - icmp "any" code value in module PBR minor_changes: - - Add meta file for collection - - Allow environment variables for nutanix connection parameters - release_date: '2022-03-02' + - Add meta file for collection + - Allow environment variables for nutanix connection parameters + release_date: "2022-03-02" 1.0.0-beta.1: changes: bugfixes: - - Client SDK with inventory [\#45](https://github.com/nutanix/nutanix.ansible/pull/45) - - Fix error messages for get_uuid() reponse [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) - - black fixes [\#30](https://github.com/nutanix/nutanix.ansible/pull/30) - - black fixes [\#32](https://github.com/nutanix/nutanix.ansible/pull/32) - - clear unused files and argument [\#29](https://github.com/nutanix/nutanix.ansible/pull/29) - - device index calculation fixes, updates for get by name functionality[\#254](https://github.com/nutanix/nutanix.ansible/pull/42) - - fixes to get spec from collection [\#17](https://github.com/nutanix/nutanix.ansible/pull/17) - - solve python 2.7 issues [\#41](https://github.com/nutanix/nutanix.ansible/pull/41) - - updates for guest customization spec [\#20](https://github.com/nutanix/nutanix.ansible/pull/20) + - Client SDK with inventory [\#45](https://github.com/nutanix/nutanix.ansible/pull/45) + - Fix error messages for get_uuid() reponse [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) + - black fixes [\#30](https://github.com/nutanix/nutanix.ansible/pull/30) + - black fixes [\#32](https://github.com/nutanix/nutanix.ansible/pull/32) + - clear unused files and argument [\#29](https://github.com/nutanix/nutanix.ansible/pull/29) + - device index calculation fixes, updates for get by name functionality[\#254](https://github.com/nutanix/nutanix.ansible/pull/42) + - fixes to get spec from collection [\#17](https://github.com/nutanix/nutanix.ansible/pull/17) + - solve python 2.7 issues [\#41](https://github.com/nutanix/nutanix.ansible/pull/41) + - updates for guest customization spec [\#20](https://github.com/nutanix/nutanix.ansible/pull/20) major_changes: - - CICD pipeline using GitHub actions + - CICD pipeline using GitHub actions modules: - - description: Nutanix module for vms - name: ntnx_vms - namespace: '' - release_date: '2022-01-28' + - description: Nutanix module for vms + name: ntnx_vms + namespace: "" + release_date: "2022-01-28" 1.0.0-beta.2: changes: bugfixes: - - Bug/cluster UUID issue68 [\#72](https://github.com/nutanix/nutanix.ansible/pull/72) - - Fix/integ [\#96](https://github.com/nutanix/nutanix.ansible/pull/96) - - Sanity and python fix [\#46](https://github.com/nutanix/nutanix.ansible/pull/46) - - Task/fix failing sanity [\#117](https://github.com/nutanix/nutanix.ansible/pull/117) - - clean up pbrs.py [\#113](https://github.com/nutanix/nutanix.ansible/pull/113) - - code cleanup - fix github issue#59 [\#60](https://github.com/nutanix/nutanix.ansible/pull/60) - - fix project name [\#107](https://github.com/nutanix/nutanix.ansible/pull/107) - - fixed variables names issue74 [\#77](https://github.com/nutanix/nutanix.ansible/pull/77) + - Bug/cluster UUID issue68 [\#72](https://github.com/nutanix/nutanix.ansible/pull/72) + - Fix/integ [\#96](https://github.com/nutanix/nutanix.ansible/pull/96) + - Sanity and python fix [\#46](https://github.com/nutanix/nutanix.ansible/pull/46) + - Task/fix failing sanity [\#117](https://github.com/nutanix/nutanix.ansible/pull/117) + - clean up pbrs.py [\#113](https://github.com/nutanix/nutanix.ansible/pull/113) + - code cleanup - fix github issue#59 [\#60](https://github.com/nutanix/nutanix.ansible/pull/60) + - fix project name [\#107](https://github.com/nutanix/nutanix.ansible/pull/107) + - fixed variables names issue74 [\#77](https://github.com/nutanix/nutanix.ansible/pull/77) minor_changes: - - Codegen - Ansible code generator - - Imprv cluster uuid [\#75](https://github.com/nutanix/nutanix.ansible/pull/75) - - Imprv/code coverage [\#97](https://github.com/nutanix/nutanix.ansible/pull/97) - - Imprv/vpcs network prefix [\#81](https://github.com/nutanix/nutanix.ansible/pull/81) + - Codegen - Ansible code generator + - Imprv cluster uuid [\#75](https://github.com/nutanix/nutanix.ansible/pull/75) + - Imprv/code coverage [\#97](https://github.com/nutanix/nutanix.ansible/pull/97) + - Imprv/vpcs network prefix [\#81](https://github.com/nutanix/nutanix.ansible/pull/81) modules: - - description: Nutanix module for floating Ips - name: ntnx_floating_ips - namespace: '' - - description: Nutanix module for policy based routing - name: ntnx_pbrs - namespace: '' - - description: Nutanix module for subnets - name: ntnx_subnets - namespace: '' - - description: Nutanix module for vpcs - name: ntnx_vpcs - namespace: '' - release_date: '2022-02-22' + - description: Nutanix module for floating Ips + name: ntnx_floating_ips + namespace: "" + - description: Nutanix module for policy based routing + name: ntnx_pbrs + namespace: "" + - description: Nutanix module for subnets + name: ntnx_subnets + namespace: "" + - description: Nutanix module for vpcs + name: ntnx_vpcs + namespace: "" + release_date: "2022-02-22" 1.1.0: changes: minor_changes: - - Added integration tests for foundation and foundation central + - Added integration tests for foundation and foundation central 1.1.0-beta.1: modules: - - description: Nutanix module to image nodes and optionally create clusters - name: ntnx_foundation - namespace: '' - - description: Nutanix module which configures IPMI IP address on BMC of nodes. - name: ntnx_foundation_bmc_ipmi_config - namespace: '' - - description: Nutanix module which returns nodes discovered by Foundation - name: ntnx_foundation_discover_nodes_info - namespace: '' - - description: Nutanix module which returns the hypervisor images uploaded to - Foundation - name: ntnx_foundation_hypervisor_images_info - namespace: '' - - description: Nutanix module which uploads hypervisor or AOS image to foundation - vm. - name: ntnx_foundation_image_upload - namespace: '' - - description: Nutanix module which returns node network information discovered - by Foundation - name: ntnx_foundation_node_network_info - namespace: '' - release_date: '2022-04-11' + - description: Nutanix module to image nodes and optionally create clusters + name: ntnx_foundation + namespace: "" + - description: Nutanix module which configures IPMI IP address on BMC of nodes. + name: ntnx_foundation_bmc_ipmi_config + namespace: "" + - description: Nutanix module which returns nodes discovered by Foundation + name: ntnx_foundation_discover_nodes_info + namespace: "" + - description: + Nutanix module which returns the hypervisor images uploaded to + Foundation + name: ntnx_foundation_hypervisor_images_info + namespace: "" + - description: + Nutanix module which uploads hypervisor or AOS image to foundation + vm. + name: ntnx_foundation_image_upload + namespace: "" + - description: + Nutanix module which returns node network information discovered + by Foundation + name: ntnx_foundation_node_network_info + namespace: "" + release_date: "2022-04-11" 1.1.0-beta.2: modules: - - description: Nutanix module to imaged Nodes and optionally create cluster - name: ntnx_foundation_central - namespace: '' - - description: Nutanix module which creates api key for foundation central - name: ntnx_foundation_central_api_keys - namespace: '' - - description: Nutanix module which returns the api key - name: ntnx_foundation_central_api_keys_info - namespace: '' - - description: Nutanix module which returns the imaged clusters within the Foudation - Central - name: ntnx_foundation_central_imaged_clusters_info - namespace: '' - - description: Nutanix module which returns the imaged nodes within the Foudation - Central - name: ntnx_foundation_central_imaged_nodes_info - namespace: '' - release_date: '2022-04-28' + - description: Nutanix module to imaged Nodes and optionally create cluster + name: ntnx_foundation_central + namespace: "" + - description: Nutanix module which creates api key for foundation central + name: ntnx_foundation_central_api_keys + namespace: "" + - description: Nutanix module which returns the api key + name: ntnx_foundation_central_api_keys_info + namespace: "" + - description: + Nutanix module which returns the imaged clusters within the Foudation + Central + name: ntnx_foundation_central_imaged_clusters_info + namespace: "" + - description: + Nutanix module which returns the imaged nodes within the Foudation + Central + name: ntnx_foundation_central_imaged_nodes_info + namespace: "" + release_date: "2022-04-28" 1.2.0: changes: minor_changes: - - VM's update functionality + - VM's update functionality modules: - - description: Nutanix info module for floating Ips - name: ntnx_floating_ips_info - namespace: '' - - description: Nutanix info module for policy based routing - name: ntnx_pbrs_info - namespace: '' - - description: Nutanix info module for subnets - name: ntnx_subnets_info - namespace: '' - - description: VM module which supports VM clone operations - name: ntnx_vms_clone - namespace: '' - - description: Nutanix info module for vms - name: ntnx_vms_info - namespace: '' - - description: VM module which supports ova creation - name: ntnx_vms_ova - namespace: '' - - description: Nutanix info module for vpcs - name: ntnx_vpcs_info - namespace: '' - release_date: '2022-06-03' + - description: Nutanix info module for floating Ips + name: ntnx_floating_ips_info + namespace: "" + - description: Nutanix info module for policy based routing + name: ntnx_pbrs_info + namespace: "" + - description: Nutanix info module for subnets + name: ntnx_subnets_info + namespace: "" + - description: VM module which supports VM clone operations + name: ntnx_vms_clone + namespace: "" + - description: Nutanix info module for vms + name: ntnx_vms_info + namespace: "" + - description: VM module which supports ova creation + name: ntnx_vms_ova + namespace: "" + - description: Nutanix info module for vpcs + name: ntnx_vpcs_info + namespace: "" + release_date: "2022-06-03" 1.3.0: modules: - - description: image placement policies info module - name: ntnx_image_placement_policies_info - namespace: '' - - description: image placement policy module which supports Create, update and - delete operations - name: ntnx_image_placement_policy - namespace: '' - - description: images module which supports pc images management CRUD operations - name: ntnx_images - namespace: '' - - description: images info module - name: ntnx_images_info - namespace: '' - - description: security_rule module which suports security_rule CRUD operations - name: ntnx_security_rules - namespace: '' - - description: security_rule info module - name: ntnx_security_rules_info - namespace: '' - - description: vpc static routes - name: ntnx_static_routes - namespace: '' - - description: vpc static routes info module - name: ntnx_static_routes_info - namespace: '' - release_date: '2022-07-04' + - description: image placement policies info module + name: ntnx_image_placement_policies_info + namespace: "" + - description: + image placement policy module which supports Create, update and + delete operations + name: ntnx_image_placement_policy + namespace: "" + - description: images module which supports pc images management CRUD operations + name: ntnx_images + namespace: "" + - description: images info module + name: ntnx_images_info + namespace: "" + - description: security_rule module which suports security_rule CRUD operations + name: ntnx_security_rules + namespace: "" + - description: security_rule info module + name: ntnx_security_rules_info + namespace: "" + - description: vpc static routes + name: ntnx_static_routes + namespace: "" + - description: vpc static routes info module + name: ntnx_static_routes_info + namespace: "" + release_date: "2022-07-04" 1.4.0: changes: bugfixes: - - Fix examples of info modules [\#226](https://github.com/nutanix/nutanix.ansible/issues/226) + - Fix examples of info modules [\#226](https://github.com/nutanix/nutanix.ansible/issues/226) modules: - - description: acp module which suports acp Create, update and delete operations - name: ntnx_acps - namespace: '' - - description: acp info module - name: ntnx_acps_info - namespace: '' - - description: module which supports address groups CRUD operations - name: ntnx_address_groups - namespace: '' - - description: address groups info module - name: ntnx_address_groups_info - namespace: '' - - description: category module which supports pc category management CRUD operations - name: ntnx_categories - namespace: '' - - description: categories info module - name: ntnx_categories_info - namespace: '' - - description: cluster info module - name: ntnx_clusters_info - namespace: '' - - description: host info module - name: ntnx_hosts_info - namespace: '' - - description: permissions info module - name: ntnx_permissions_info - namespace: '' - - description: module for create, update and delete pc projects - name: ntnx_projects - namespace: '' - - description: projects info module - name: ntnx_projects_info - namespace: '' - - description: module which supports role CRUD operations - name: ntnx_roles - namespace: '' - - description: role info module - name: ntnx_roles_info - namespace: '' - - description: service_groups module which suports service_groups CRUD operations - name: ntnx_service_groups - namespace: '' - - description: service_group info module - name: ntnx_service_groups_info - namespace: '' - - description: user_groups module which supports pc user_groups management create - delete operations - name: ntnx_user_groups - namespace: '' - - description: User Groups info module - name: ntnx_user_groups_info - namespace: '' - - description: users module which supports pc users management create delete operations - name: ntnx_users - namespace: '' - - description: users info module - name: ntnx_users_info - namespace: '' - release_date: '2022-07-28' + - description: acp module which suports acp Create, update and delete operations + name: ntnx_acps + namespace: "" + - description: acp info module + name: ntnx_acps_info + namespace: "" + - description: module which supports address groups CRUD operations + name: ntnx_address_groups + namespace: "" + - description: address groups info module + name: ntnx_address_groups_info + namespace: "" + - description: category module which supports pc category management CRUD operations + name: ntnx_categories + namespace: "" + - description: categories info module + name: ntnx_categories_info + namespace: "" + - description: cluster info module + name: ntnx_clusters_info + namespace: "" + - description: host info module + name: ntnx_hosts_info + namespace: "" + - description: permissions info module + name: ntnx_permissions_info + namespace: "" + - description: module for create, update and delete pc projects + name: ntnx_projects + namespace: "" + - description: projects info module + name: ntnx_projects_info + namespace: "" + - description: module which supports role CRUD operations + name: ntnx_roles + namespace: "" + - description: role info module + name: ntnx_roles_info + namespace: "" + - description: service_groups module which suports service_groups CRUD operations + name: ntnx_service_groups + namespace: "" + - description: service_group info module + name: ntnx_service_groups_info + namespace: "" + - description: + user_groups module which supports pc user_groups management create + delete operations + name: ntnx_user_groups + namespace: "" + - description: User Groups info module + name: ntnx_user_groups_info + namespace: "" + - description: users module which supports pc users management create delete operations + name: ntnx_users + namespace: "" + - description: users info module + name: ntnx_users_info + namespace: "" + release_date: "2022-07-28" 1.5.0: modules: - - description: Nutanix module for protection rules - name: ntnx_protection_rules - namespace: '' - - description: Nutanix info module for protection rules - name: ntnx_protection_rules_info - namespace: '' - - description: Nutanix module for recovery plan jobs - name: ntnx_recovery_plan_jobs - namespace: '' - - description: Nutanix info module for protection - name: ntnx_recovery_plan_jobs_info - namespace: '' - - description: Nutanix module for recovery plan - name: ntnx_recovery_plans - namespace: '' - - description: Nutanix info module for recovery plan - name: ntnx_recovery_plans_info - namespace: '' + - description: Nutanix module for protection rules + name: ntnx_protection_rules + namespace: "" + - description: Nutanix info module for protection rules + name: ntnx_protection_rules_info + namespace: "" + - description: Nutanix module for recovery plan jobs + name: ntnx_recovery_plan_jobs + namespace: "" + - description: Nutanix info module for protection + name: ntnx_recovery_plan_jobs_info + namespace: "" + - description: Nutanix module for recovery plan + name: ntnx_recovery_plans + namespace: "" + - description: Nutanix info module for recovery plan + name: ntnx_recovery_plans_info + namespace: "" 1.6.0: modules: - - description: Nutanix module for karbon clusters - name: ntnx_karbon_clusters - namespace: '' - - description: Nutanix info module for karbon clusters with kubeconifg and ssh - config - name: ntnx_karbon_clusters_info - namespace: '' - - description: Nutanix module for karbon private registry - name: ntnx_karbon_registries - namespace: '' - - description: Nutanix info module for karbon private registry - name: ntnx_karbon_registries_info - namespace: '' - release_date: '2022-09-09' + - description: Nutanix module for karbon clusters + name: ntnx_karbon_clusters + namespace: "" + - description: + Nutanix info module for karbon clusters with kubeconifg and ssh + config + name: ntnx_karbon_clusters_info + namespace: "" + - description: Nutanix module for karbon private registry + name: ntnx_karbon_registries + namespace: "" + - description: Nutanix info module for karbon private registry + name: ntnx_karbon_registries_info + namespace: "" + release_date: "2022-09-09" 1.7.0: changes: bugfixes: - - ntnx_projects - [Bug] Clusters and subnets configured in project are not visible - in new projects UI [\#283](https://github.com/nutanix/nutanix.ansible/issues/283) - - ntnx_vms - Subnet Name --> UUID Lookup should be PE Cluster Aware [\#260](https://github.com/nutanix/nutanix.ansible/issues/260) - - nutanix.ncp.ntnx_prism_vm_inventory - [Bug] Inventory does not fetch more - than 500 Entities [[\#228](https://github.com/nutanix/nutanix.ansible/issues/228)] + - ntnx_projects - [Bug] Clusters and subnets configured in project are not visible + in new projects UI [\#283](https://github.com/nutanix/nutanix.ansible/issues/283) + - ntnx_vms - Subnet Name --> UUID Lookup should be PE Cluster Aware [\#260](https://github.com/nutanix/nutanix.ansible/issues/260) + - nutanix.ncp.ntnx_prism_vm_inventory - [Bug] Inventory does not fetch more + than 500 Entities [[\#228](https://github.com/nutanix/nutanix.ansible/issues/228)] minor_changes: - - examples - [Imprv] Add version related notes to examples [\#279](https://github.com/nutanix/nutanix.ansible/issues/279) - - examples - [Imprv] Fix IaaS example [\#250](https://github.com/nutanix/nutanix.ansible/issues/250) - - examples - [Imprv] add examples of Images and Static Routes Module [\#256](https://github.com/nutanix/nutanix.ansible/issues/256) - - ntnx_projects - [Feat] Add capability to configure role mappings with collaboration - on/off in ntnx_projects [\#252](https://github.com/nutanix/nutanix.ansible/issues/252) - - ntnx_projects - [Imprv] add vpcs and overlay subnets configure capability - to module ntnx_projects [\#289](https://github.com/nutanix/nutanix.ansible/issues/289) - - ntnx_vms - [Imprv] add functionality to set network mac_address to module - ntnx_vms [\#201](https://github.com/nutanix/nutanix.ansible/issues/201) - - nutanix.ncp.ntnx_prism_vm_inventory - [Imprv] add functionality constructed - to module inventory [\#235](https://github.com/nutanix/nutanix.ansible/issues/235) - release_date: '2022-09-30' + - examples - [Imprv] Add version related notes to examples [\#279](https://github.com/nutanix/nutanix.ansible/issues/279) + - examples - [Imprv] Fix IaaS example [\#250](https://github.com/nutanix/nutanix.ansible/issues/250) + - examples - [Imprv] add examples of Images and Static Routes Module [\#256](https://github.com/nutanix/nutanix.ansible/issues/256) + - ntnx_projects - [Feat] Add capability to configure role mappings with collaboration + on/off in ntnx_projects [\#252](https://github.com/nutanix/nutanix.ansible/issues/252) + - ntnx_projects - [Imprv] add vpcs and overlay subnets configure capability + to module ntnx_projects [\#289](https://github.com/nutanix/nutanix.ansible/issues/289) + - ntnx_vms - [Imprv] add functionality to set network mac_address to module + ntnx_vms [\#201](https://github.com/nutanix/nutanix.ansible/issues/201) + - nutanix.ncp.ntnx_prism_vm_inventory - [Imprv] add functionality constructed + to module inventory [\#235](https://github.com/nutanix/nutanix.ansible/issues/235) + release_date: "2022-09-30" 1.8.0: modules: - - description: module for authorizing db server vm - name: ntnx_ndb_authorize_db_server_vms - namespace: '' - - description: Create, Update and Delete NDB clusters - name: ntnx_ndb_clusters - namespace: '' - - description: module for database clone refresh. - name: ntnx_ndb_database_clone_refresh - namespace: '' - - description: module for create, update and delete of ndb database clones - name: ntnx_ndb_database_clones - namespace: '' - - description: module for performing log catchups action - name: ntnx_ndb_database_log_catchup - namespace: '' - - description: module for restoring database instance - name: ntnx_ndb_database_restore - namespace: '' - - description: module for scaling database instance - name: ntnx_ndb_database_scale - namespace: '' - - description: module for creating, updating and deleting database snapshots - name: ntnx_ndb_database_snapshots - namespace: '' - - description: module for create, delete and update of database server vms - name: ntnx_ndb_db_server_vms - namespace: '' - - description: module to manage linked databases of a database instance - name: ntnx_ndb_linked_databases - namespace: '' - - description: module to add and remove maintenance related tasks - name: ntnx_ndb_maintenance_tasks - namespace: '' - - description: module to create, update and delete mainetance window - name: ntnx_ndb_maintenance_window - namespace: '' - - description: module for fetching maintenance windows info - name: ntnx_ndb_maintenance_windows_info - namespace: '' - - description: module for create, update and delete of profiles - name: ntnx_ndb_profiles - namespace: '' - - description: module for database instance registration - name: ntnx_ndb_register_database - namespace: '' - - description: module for registration of database server vm - name: ntnx_ndb_register_db_server_vm - namespace: '' - - description: module for replicating database snapshots across clusters of time - machine - name: ntnx_ndb_replicate_database_snapshots - namespace: '' - - description: moudle for creating, updating and deleting slas - name: ntnx_ndb_slas - namespace: '' - - description: info module for ndb snapshots info - name: ntnx_ndb_snapshots_info - namespace: '' - - description: Module for create, update and delete of stretched vlan. - name: ntnx_ndb_stretched_vlans - namespace: '' - - description: module for create, update and delete of tags - name: ntnx_ndb_tags - namespace: '' - - description: Module for create, update and delete for data access management - in time machines. - name: ntnx_ndb_time_machine_clusters - namespace: '' - - description: Module for create, update and delete of ndb vlan. - name: ntnx_ndb_vlans - namespace: '' - - description: info module for ndb vlans - name: ntnx_ndb_vlans_info - namespace: '' - release_date: '2023-02-28' + - description: module for authorizing db server vm + name: ntnx_ndb_authorize_db_server_vms + namespace: "" + - description: Create, Update and Delete NDB clusters + name: ntnx_ndb_clusters + namespace: "" + - description: module for database clone refresh. + name: ntnx_ndb_database_clone_refresh + namespace: "" + - description: module for create, update and delete of ndb database clones + name: ntnx_ndb_database_clones + namespace: "" + - description: module for performing log catchups action + name: ntnx_ndb_database_log_catchup + namespace: "" + - description: module for restoring database instance + name: ntnx_ndb_database_restore + namespace: "" + - description: module for scaling database instance + name: ntnx_ndb_database_scale + namespace: "" + - description: module for creating, updating and deleting database snapshots + name: ntnx_ndb_database_snapshots + namespace: "" + - description: module for create, delete and update of database server vms + name: ntnx_ndb_db_server_vms + namespace: "" + - description: module to manage linked databases of a database instance + name: ntnx_ndb_linked_databases + namespace: "" + - description: module to add and remove maintenance related tasks + name: ntnx_ndb_maintenance_tasks + namespace: "" + - description: module to create, update and delete mainetance window + name: ntnx_ndb_maintenance_window + namespace: "" + - description: module for fetching maintenance windows info + name: ntnx_ndb_maintenance_windows_info + namespace: "" + - description: module for create, update and delete of profiles + name: ntnx_ndb_profiles + namespace: "" + - description: module for database instance registration + name: ntnx_ndb_register_database + namespace: "" + - description: module for registration of database server vm + name: ntnx_ndb_register_db_server_vm + namespace: "" + - description: + module for replicating database snapshots across clusters of time + machine + name: ntnx_ndb_replicate_database_snapshots + namespace: "" + - description: moudle for creating, updating and deleting slas + name: ntnx_ndb_slas + namespace: "" + - description: info module for ndb snapshots info + name: ntnx_ndb_snapshots_info + namespace: "" + - description: Module for create, update and delete of stretched vlan. + name: ntnx_ndb_stretched_vlans + namespace: "" + - description: module for create, update and delete of tags + name: ntnx_ndb_tags + namespace: "" + - description: + Module for create, update and delete for data access management + in time machines. + name: ntnx_ndb_time_machine_clusters + namespace: "" + - description: Module for create, update and delete of ndb vlan. + name: ntnx_ndb_vlans + namespace: "" + - description: info module for ndb vlans + name: ntnx_ndb_vlans_info + namespace: "" + release_date: "2023-02-28" 1.8.0-beta.1: modules: - - description: info module for database clones - name: ntnx_ndb_clones_info - namespace: '' - - description: info module for ndb clusters info - name: ntnx_ndb_clusters_info - namespace: '' - - description: Module for create, update and delete of single instance database. - Currently, postgres type database is officially supported. - name: ntnx_ndb_databases - namespace: '' - - description: info module for ndb database instances - name: ntnx_ndb_databases_info - namespace: '' - - description: info module for ndb db server vms info - name: ntnx_ndb_db_servers_info - namespace: '' - - description: info module for ndb profiles - name: ntnx_ndb_profiles_info - namespace: '' - - description: info module for ndb slas - name: ntnx_ndb_slas_info - namespace: '' - - description: info module for ndb time machines - name: ntnx_ndb_time_machines_info - namespace: '' - release_date: '2022-10-20' + - description: info module for database clones + name: ntnx_ndb_clones_info + namespace: "" + - description: info module for ndb clusters info + name: ntnx_ndb_clusters_info + namespace: "" + - description: + Module for create, update and delete of single instance database. + Currently, postgres type database is officially supported. + name: ntnx_ndb_databases + namespace: "" + - description: info module for ndb database instances + name: ntnx_ndb_databases_info + namespace: "" + - description: info module for ndb db server vms info + name: ntnx_ndb_db_servers_info + namespace: "" + - description: info module for ndb profiles + name: ntnx_ndb_profiles_info + namespace: "" + - description: info module for ndb slas + name: ntnx_ndb_slas_info + namespace: "" + - description: info module for ndb time machines + name: ntnx_ndb_time_machines_info + namespace: "" + release_date: "2022-10-20" 1.9.0: changes: bugfixes: - - info modules - [Bug] Multiple filters params are not considered for fetching - entities in PC based info modules [[\#352](https://github.com/nutanix/nutanix.ansible/issues/352)] - - ntnx_foundation - [Bug] clusters parameters not being passed to Foundation - Server in module nutanix.ncp.ntnx_foundation [[\#307](https://github.com/nutanix/nutanix.ansible/issues/307)] - - ntnx_karbon_clusters - [Bug] error in sample karbon/create_k8s_cluster.yml - [[\#349](https://github.com/nutanix/nutanix.ansible/issues/349)] - - ntnx_karbon_clusters - [Bug] impossible to deploy NKE cluster with etcd using - disk smaller than 120GB [[\#350](https://github.com/nutanix/nutanix.ansible/issues/350)] - - ntnx_subnets - [Bug] wrong virtual_switch selected in module ntnx_subnets - [\#328](https://github.com/nutanix/nutanix.ansible/issues/328) + - info modules - [Bug] Multiple filters params are not considered for fetching + entities in PC based info modules [[\#352](https://github.com/nutanix/nutanix.ansible/issues/352)] + - ntnx_foundation - [Bug] clusters parameters not being passed to Foundation + Server in module nutanix.ncp.ntnx_foundation [[\#307](https://github.com/nutanix/nutanix.ansible/issues/307)] + - ntnx_karbon_clusters - [Bug] error in sample karbon/create_k8s_cluster.yml + [[\#349](https://github.com/nutanix/nutanix.ansible/issues/349)] + - ntnx_karbon_clusters - [Bug] impossible to deploy NKE cluster with etcd using + disk smaller than 120GB [[\#350](https://github.com/nutanix/nutanix.ansible/issues/350)] + - ntnx_subnets - [Bug] wrong virtual_switch selected in module ntnx_subnets + [\#328](https://github.com/nutanix/nutanix.ansible/issues/328) deprecated_features: - - ntnx_security_rules - The ``apptier`` option in target group has been removed. - New option called ``apptiers`` has been added to support multi tier policy. + - ntnx_security_rules - The ``apptier`` option in target group has been removed. + New option called ``apptiers`` has been added to support multi tier policy. minor_changes: - - ntnx_profiles_info - [Impr] Develop ansible module for getting available IPs - for given network profiles in NDB [\#345](https://github.com/nutanix/nutanix.ansible/issues/345) - - ntnx_security_rules - [Imprv] Flow Network Security Multi-Tier support in - Security Policy definition [\#319](https://github.com/nutanix/nutanix.ansible/issues/319) + - ntnx_profiles_info - [Impr] Develop ansible module for getting available IPs + for given network profiles in NDB [\#345](https://github.com/nutanix/nutanix.ansible/issues/345) + - ntnx_security_rules - [Imprv] Flow Network Security Multi-Tier support in + Security Policy definition [\#319](https://github.com/nutanix/nutanix.ansible/issues/319) modules: - - description: Create,Update and Delete a worker node pools with the provided - configuration. - name: ntnx_karbon_clusters_node_pools - namespace: '' - - description: info module for ndb tags info - name: ntnx_ndb_tags_info - namespace: '' - release_date: '2023-07-11' + - description: + Create,Update and Delete a worker node pools with the provided + configuration. + name: ntnx_karbon_clusters_node_pools + namespace: "" + - description: info module for ndb tags info + name: ntnx_ndb_tags_info + namespace: "" + release_date: "2023-07-11" 1.9.1: changes: bugfixes: - - ntnx_foundation - [Bug] Error when Clusters Block is missing in module ntnx_foundation - [[\#397](https://github.com/nutanix/nutanix.ansible/issues/397)] - - ntnx_ndb_time_machines_info - [Bug] ntnx_ndb_time_machines_info not fetching - all attributes when name is used for fetching [[\#418](https://github.com/nutanix/nutanix.ansible/issues/418)] - - ntnx_security_rules - Fix Syntax Errors in Create App Security Rule Example - [[\#394](https://github.com/nutanix/nutanix.ansible/pull/394/files)] - - ntnx_vms - [Bug] Error when updating size_gb using the int filter in module - ntnx_vms [[\#400](https://github.com/nutanix/nutanix.ansible/issues/400)] - - ntnx_vms - [Bug] hard_poweroff has been moved to state from operation [[\#415](https://github.com/nutanix/nutanix.ansible/issues/415)] - - ntnx_vms_clone - [Bug] cannot change boot_config when cloning in module ntnx_vms_clone - [[\#360](https://github.com/nutanix/nutanix.ansible/issues/359)] - - website - [Bug] Github page deployment action is failing. [[\#483](https://github.com/nutanix/nutanix.ansible/issues/483)] + - ntnx_foundation - [Bug] Error when Clusters Block is missing in module ntnx_foundation + [[\#397](https://github.com/nutanix/nutanix.ansible/issues/397)] + - ntnx_ndb_time_machines_info - [Bug] ntnx_ndb_time_machines_info not fetching + all attributes when name is used for fetching [[\#418](https://github.com/nutanix/nutanix.ansible/issues/418)] + - ntnx_security_rules - Fix Syntax Errors in Create App Security Rule Example + [[\#394](https://github.com/nutanix/nutanix.ansible/pull/394/files)] + - ntnx_vms - [Bug] Error when updating size_gb using the int filter in module + ntnx_vms [[\#400](https://github.com/nutanix/nutanix.ansible/issues/400)] + - ntnx_vms - [Bug] hard_poweroff has been moved to state from operation [[\#415](https://github.com/nutanix/nutanix.ansible/issues/415)] + - ntnx_vms_clone - [Bug] cannot change boot_config when cloning in module ntnx_vms_clone + [[\#360](https://github.com/nutanix/nutanix.ansible/issues/359)] + - website - [Bug] Github page deployment action is failing. [[\#483](https://github.com/nutanix/nutanix.ansible/issues/483)] minor_changes: - - docs - [Imprv] add doc regarding running integration tests locally [[\#435](https://github.com/nutanix/nutanix.ansible/issues/435)] - - info modules - [Imprv] add examples for custom_filter [[\#416](https://github.com/nutanix/nutanix.ansible/issues/416)] - - ndb clones - [Imprv] Enable database clones and clone refresh using latest - snapshot flag [[\#391](https://github.com/nutanix/nutanix.ansible/issues/391)] - - ndb clones - [Imprv] add examples for NDB database clone under examples folder - [[\#386](https://github.com/nutanix/nutanix.ansible/issues/386)] - - ntnx_prism_vm_inventory - Add support for PC Categories [[\#405](https://github.com/nutanix/nutanix.ansible/issues/405)] - - ntnx_prism_vm_inventory - [Imprv] add examples for dynamic inventory using - ntnx_prism_vm_inventory [[\#401](https://github.com/nutanix/nutanix.ansible/issues/401)] - - ntnx_vms - [Imprv] add possibility to specify / modify vm user ownership and - project [[\#378](https://github.com/nutanix/nutanix.ansible/issues/378)] - - ntnx_vms - owner association upon vm creation module [[\#359](https://github.com/nutanix/nutanix.ansible/issues/359)] - - ntnx_vms_info - [Imprv] add examples with guest customization for module ntnx_vms - [[\#395](https://github.com/nutanix/nutanix.ansible/issues/395)] + - docs - [Imprv] add doc regarding running integration tests locally [[\#435](https://github.com/nutanix/nutanix.ansible/issues/435)] + - info modules - [Imprv] add examples for custom_filter [[\#416](https://github.com/nutanix/nutanix.ansible/issues/416)] + - ndb clones - [Imprv] Enable database clones and clone refresh using latest + snapshot flag [[\#391](https://github.com/nutanix/nutanix.ansible/issues/391)] + - ndb clones - [Imprv] add examples for NDB database clone under examples folder + [[\#386](https://github.com/nutanix/nutanix.ansible/issues/386)] + - ntnx_prism_vm_inventory - Add support for PC Categories [[\#405](https://github.com/nutanix/nutanix.ansible/issues/405)] + - ntnx_prism_vm_inventory - [Imprv] add examples for dynamic inventory using + ntnx_prism_vm_inventory [[\#401](https://github.com/nutanix/nutanix.ansible/issues/401)] + - ntnx_vms - [Imprv] add possibility to specify / modify vm user ownership and + project [[\#378](https://github.com/nutanix/nutanix.ansible/issues/378)] + - ntnx_vms - owner association upon vm creation module [[\#359](https://github.com/nutanix/nutanix.ansible/issues/359)] + - ntnx_vms_info - [Imprv] add examples with guest customization for module ntnx_vms + [[\#395](https://github.com/nutanix/nutanix.ansible/issues/395)] release_summary: This release included bug fixes and improvement. - release_date: '2023-10-09' + release_date: "2023-10-09" 1.9.2: changes: breaking_changes: - - nutanix.ncp collection - Due to all versions of ansible-core version less - than v2.15.0 are EOL, we are also deprecating support for same and minimum - version to use this collection is ansible-core==2.15.0. [[\#479](https://github.com/nutanix/nutanix.ansible/issues/479)] + - nutanix.ncp collection - Due to all versions of ansible-core version less + than v2.15.0 are EOL, we are also deprecating support for same and minimum + version to use this collection is ansible-core==2.15.0. [[\#479](https://github.com/nutanix/nutanix.ansible/issues/479)] release_summary: Deprecating support for ansible-core less than v2.15.0 - release_date: '2024-05-30' + release_date: "2024-05-30" diff --git a/changelogs/config.yaml b/changelogs/config.yaml index 541e0b49c..7b2766a0d 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -11,21 +11,21 @@ prelude_section_name: release_summary prelude_section_title: Release Summary sanitize_changelog: true sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: Nutanix.Ncp -trivial_section_name: trivial \ No newline at end of file +trivial_section_name: trivial diff --git a/examples/acp.yml b/examples/acp.yml index 8efb39915..b48af6b08 100644 --- a/examples/acp.yml +++ b/examples/acp.yml @@ -2,8 +2,6 @@ - name: ACP playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,10 +10,9 @@ validate_certs: false tasks: - - name: Create ACP with all specfactions - ntnx_acps: - validate_certs: False + nutanix.ncp.ntnx_acps: + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -41,7 +38,7 @@ collection: ALL - name: Delete ACP - ntnx_acps: + nutanix.ncp.ntnx_acps: state: absent acp_uuid: "{{ acp_uuid }}" register: result diff --git a/examples/acp_info.yml b/examples/acp_info.yml index 41850e614..9b57514bd 100644 --- a/examples/acp_info.yml +++ b/examples/acp_info.yml @@ -2,8 +2,6 @@ - name: ACP_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,20 +10,19 @@ validate_certs: false tasks: - - name: List ACPs using ascending, sorting and name filter - ntnx_floating_ips_info: + nutanix.ncp.ntnx_acps_info: filter: name: "{{ acp_name }}" kind: access_control_policy sort_order: "ASCENDING" sort_attribute: "name" register: result - ignore_errors: True + ignore_errors: true - name: List ACPs using length and offset - ntnx_floating_ips_info: + nutanix.ncp.ntnx_acps_info: length: 3 offset: 0 register: result - ignore_errors: True + ignore_errors: true diff --git a/examples/address_groups_crud.yml b/examples/address_groups_crud.yml index 35e5febdb..6cd5ad0a0 100644 --- a/examples/address_groups_crud.yml +++ b/examples/address_groups_crud.yml @@ -1,8 +1,6 @@ - name: Address group crud playbook. Here we will create, update, read and delete the address group. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Create a address group - ntnx_address_groups: + nutanix.ncp.ntnx_address_groups: state: present name: test-ansible-group-1 desc: test-ansible-group-1-desc @@ -22,8 +20,8 @@ network_prefix: 32 register: ag - - name: update address group - ntnx_address_groups: + - name: Update address group + nutanix.ncp.ntnx_address_groups: state: present address_group_uuid: "{{ ag.address_group_uuid }}" name: test-ansible-group-1-updated @@ -34,16 +32,16 @@ register: updated_ag - name: Read the updated address group - ntnx_address_groups_info: + nutanix.ncp.ntnx_address_groups_info: address_group_uuid: "{{ updated_ag.address_group_uuid }}" register: ag_info - name: Print the address group details - debug: + ansible.builtin.debug: msg: "{{ ag_info }}" - name: Delete the address group. - ntnx_address_groups: + nutanix.ncp.ntnx_address_groups: state: absent address_group_uuid: "{{ updated_ag.address_group_uuid }}" register: op diff --git a/examples/category_crud.yml b/examples/category_crud.yml index 3c4c6b12e..c5c88664b 100644 --- a/examples/category_crud.yml +++ b/examples/category_crud.yml @@ -1,8 +1,6 @@ -- name: categories crud playbook. Here we will create, update, read and delete the category key values. +- name: Categories crud playbook. Here we will create, update, read and delete the category key values. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,14 +9,14 @@ validate_certs: false tasks: - name: Create only category key with description - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-1" desc: "test-cat-1-desc" register: cat1 - name: Add category values to test-cat-1 - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-1" values: @@ -26,7 +24,7 @@ - "val2" - name: Create category key with values - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-2" desc: "test-cat-2-desc" @@ -36,7 +34,7 @@ register: cat2 - name: Add more category values to test-cat-2 - ntnx_categories: + nutanix.ncp.ntnx_categories: state: "present" name: "test-cat-2" values: @@ -44,25 +42,25 @@ - "val6" - name: Get categories info - ntnx_categories_info: + nutanix.ncp.ntnx_categories_info: name: "test-cat-1" register: cat1_info - name: Delete val1 category value from test-cat-1 - ntnx_categories: + nutanix.ncp.ntnx_categories: state: absent name: "test-cat-1" values: - val1 - - name: delete all category values from test-cat-1 - ntnx_categories: + - name: Delete all category values from test-cat-1 + nutanix.ncp.ntnx_categories: state: absent name: "test-cat-1" remove_values: true - - name: delete category key test-cat-2 including its all values - ntnx_categories: + - name: Delete category key test-cat-2 including its all values + nutanix.ncp.ntnx_categories: state: absent name: "test-cat-2" remove_values: true diff --git a/examples/clusters_info.yml b/examples/clusters_info.yml index c50b8286b..84eb807e0 100644 --- a/examples/clusters_info.yml +++ b/examples/clusters_info.yml @@ -2,8 +2,6 @@ - name: Clusters_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,27 +10,27 @@ validate_certs: false tasks: - - name: test getting all clusters - ntnx_clusters_info: - register: clusters + - name: Test getting all clusters + nutanix.ncp.ntnx_clusters_info: + register: clusters - - name: test getting particular cluster using uuid - ntnx_clusters_info: - cluster_uuid: '{{ clusters.response.entities[0].metadata.uuid }}' - register: result + - name: Test getting particular cluster using uuid + nutanix.ncp.ntnx_clusters_info: + cluster_uuid: "{{ clusters.response.entities[0].metadata.uuid }}" + register: result - - name: List clusters using length, offset, sort order and priority sort attribute - ntnx_clusters_info: - length: 2 - offset: 0 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result + - name: List clusters using length, offset, sort order and priority sort attribute + nutanix.ncp.ntnx_clusters_info: + length: 2 + offset: 0 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result - - name: List clusters using filter and custom_filter - ntnx_clusters_info: - filter: - name: - custom_filter: - external_ip: - register: result + - name: List clusters using filter and custom_filter + nutanix.ncp.ntnx_clusters_info: + filter: + name: + custom_filter: + external_ip: + register: result diff --git a/examples/dr/protection_policy.yml b/examples/dr/protection_policy.yml index 1b5a2d816..218f64a86 100644 --- a/examples/dr/protection_policy.yml +++ b/examples/dr/protection_policy.yml @@ -1,3 +1,4 @@ +--- ######## Description ########### # Tasks done by this playbook: # 1. Create synchronous protection policy and asynchronous protection policy @@ -5,163 +6,160 @@ # 3. Get created protection plans info and associated entities # 4. Delete protection plan from primary site. ################################# - - - name: PC DR hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: Create protection rule with synchronous schedule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: present - wait: True - name: test-ansible - desc: test-ansible-desc - protected_categories: - Environment: - - Dev - - Staging - primary_site: - availability_zone_url: "" - schedules: - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: SYNC - auto_suspend_timeout: 20 - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: SYNC - auto_suspend_timeout: 10 - register: pr + - name: Create protection rule with synchronous schedule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + wait: true + name: test-ansible + desc: test-ansible-desc + protected_categories: + Environment: + - Dev + - Staging + primary_site: + availability_zone_url: + schedules: + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: SYNC + auto_suspend_timeout: 20 + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: SYNC + auto_suspend_timeout: 10 + register: pr - - name: delete the protection rule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "absent" - rule_uuid: "{{ pr.rule_uuid }}" + - name: Delete the protection rule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + rule_uuid: "{{ pr.rule_uuid }}" - - name: Create protection rule with async schedule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: present - wait: True - name: test-ansible-1 - desc: test-ansible-desc-1 - protected_categories: - Environment: - - Dev - - Testing - primary_site: - availability_zone_url: "" - schedules: - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 1 - rpo_unit: HOUR - snapshot_type: "CRASH_CONSISTENT" - local_retention_policy: - num_snapshots: 1 - remote_retention_policy: - rollup_retention_policy: - snapshot_interval_type: HOURLY - multiple: 2 + - name: Create protection rule with async schedule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + wait: true + name: test-ansible-1 + desc: test-ansible-desc-1 + protected_categories: + Environment: + - Dev + - Testing + primary_site: + availability_zone_url: + schedules: + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 1 + rpo_unit: HOUR + snapshot_type: CRASH_CONSISTENT + local_retention_policy: + num_snapshots: 1 + remote_retention_policy: + rollup_retention_policy: + snapshot_interval_type: HOURLY + multiple: 2 - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 1 - rpo_unit: HOUR - snapshot_type: "CRASH_CONSISTENT" - local_retention_policy: - num_snapshots: 2 - remote_retention_policy: - num_snapshots: 1 - register: result + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 1 + rpo_unit: HOUR + snapshot_type: CRASH_CONSISTENT + local_retention_policy: + num_snapshots: 2 + remote_retention_policy: + num_snapshots: 1 + register: result - - name: Update previously created protection policy - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: present - wait: True - rule_uuid: "{{result.rule_uuid}}" - name: test-ansible-updated - desc: test-ansible-desc-updated - protected_categories: - Environment: - - Testing - primary_site: - availability_zone_url: "" - schedules: - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 2 - rpo_unit: DAY - snapshot_type: "APPLICATION_CONSISTENT" - local_retention_policy: - num_snapshots: 1 - remote_retention_policy: - rollup_retention_policy: - snapshot_interval_type: YEARLY - multiple: 2 + - name: Update previously created protection policy + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + wait: true + rule_uuid: "{{ result.rule_uuid }}" + name: test-ansible-updated + desc: test-ansible-desc-updated + protected_categories: + Environment: + - Testing + primary_site: + availability_zone_url: + schedules: + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 2 + rpo_unit: DAY + snapshot_type: APPLICATION_CONSISTENT + local_retention_policy: + num_snapshots: 1 + remote_retention_policy: + rollup_retention_policy: + snapshot_interval_type: YEARLY + multiple: 2 - - source: - availability_zone_url: "" - destination: - availability_zone_url: "" - protection_type: ASYNC - rpo: 2 - rpo_unit: DAY - snapshot_type: "APPLICATION_CONSISTENT" - local_retention_policy: - num_snapshots: 1 - remote_retention_policy: - num_snapshots: 2 - register: pr + - source: + availability_zone_url: + destination: + availability_zone_url: + protection_type: ASYNC + rpo: 2 + rpo_unit: DAY + snapshot_type: APPLICATION_CONSISTENT + local_retention_policy: + num_snapshots: 1 + remote_retention_policy: + num_snapshots: 2 + register: pr - - name: Get protection policy info and its associated vms info - ntnx_protection_rules_info: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - rule_uuid: "{{ pr.rule_uuid }}" - register: result + - name: Get protection policy info and its associated vms info + nutanix.ncp.ntnx_protection_rules_info: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + rule_uuid: "{{ pr.rule_uuid }}" + register: result - - debug: - msg: "{{ result }}" + - name: Print protection policy info + ansible.builtin.debug: + msg: "{{ result }}" - - name: delete the protection rule - ntnx_protection_rules: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "absent" - rule_uuid: "{{ pr.rule_uuid }}" + - name: Delete the protection rule + nutanix.ncp.ntnx_protection_rules: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + rule_uuid: "{{ pr.rule_uuid }}" diff --git a/examples/dr/recovery_plan_with_execution.yml b/examples/dr/recovery_plan_with_execution.yml index 561536001..92fed079d 100644 --- a/examples/dr/recovery_plan_with_execution.yml +++ b/examples/dr/recovery_plan_with_execution.yml @@ -1,3 +1,4 @@ +--- ######## Description ########### # Tasks done by this playbook: # 1. Create Recovery plan using ntnx_recovery_plans @@ -11,170 +12,169 @@ - name: PC DR hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: -################################# Create recovery plans using ntnx_recovery_plans ############# - - name: Create recovery plan with custom ip network mapping - ntnx_recovery_plans: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "present" - name: example-rp - desc: recovery plan desc - stages: - - vms: - - name: "test-check" - enable_script_exec: true - delay: 10 - primary_location: - url: "" - recovery_location: - url: "" - network_type: NON_STRETCH - network_mappings: - - primary: - test: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - prod: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - recovery: - test: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - prod: - name: "" - gateway_ip: "xx.xx.xx.xx" - prefix: "24" - custom_ip_config: - - vm: - name: "test-check" - ip: "xx.xx.xx.xx" - register: result + ################################# Create recovery plans using ntnx_recovery_plans ############# + - name: Create recovery plan with custom ip network mapping + nutanix.ncp.ntnx_recovery_plans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + name: example-rp + desc: recovery plan desc + stages: + - vms: + - name: test-check + enable_script_exec: true + delay: 10 + primary_location: + url: + recovery_location: + url: + network_type: NON_STRETCH + network_mappings: + - primary: + test: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + prod: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + recovery: + test: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + prod: + name: + gateway_ip: xx.xx.xx.xx + prefix: "24" + custom_ip_config: + - vm: + name: test-check + ip: xx.xx.xx.xx + register: result + - name: Print recovery plan details + ansible.builtin.debug: + msg: "{{ result }}" - - debug: - msg: "{{ result }}" + - name: Update recovery plan by adding more stages and remove custom IP to enable dynamic IP allocation + nutanix.ncp.ntnx_recovery_plans: + plan_uuid: "{{ result.plan_uuid }}" + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + name: example-rp-updated + desc: recovery plan desc updated + stages: + - vms: + - name: test-check + enable_script_exec: true + categories: + - key: Environment + value: Staging + enable_script_exec: true + delay: 2 + - categories: + - key: Environment + value: Dev + primary_location: + url: + recovery_location: + url: + network_type: NON_STRETCH + network_mappings: + - primary: + test: + name: + prod: + name: + recovery: + test: + name: + prod: + name: + register: recovery_plan + #################################### Lets recover the vms on recovery site using ntnx_recovery_plan_jobs ################ - - name: Update recovery plan by adding more stages and remove custom IP to enable dynamic IP allocation - ntnx_recovery_plans: - plan_uuid: "{{result.plan_uuid}}" - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "present" - name: example-rp-updated - desc: recovery plan desc updated - stages: - - vms: - - name: "test-check" - enable_script_exec: true - categories: - - key: Environment - value: Staging - enable_script_exec: true - delay: 2 - - categories: - - key: Environment - value: Dev - primary_location: - url: "" - recovery_location: - url: "" - network_type: NON_STRETCH - network_mappings: - - primary: - test: - name: "" - prod: - name: "" - recovery: - test: - name: "" - prod: - name: "" - register: recovery_plan + - name: Recovery plan info and its affected entities get + nutanix.ncp.ntnx_recovery_plans_info: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + plan_uuid: "{{ recovery_plan.plan_uuid }}" + register: recovery_plan_info + - name: Print recovery plan info + ansible.builtin.debug: + msg: "{{ recovery_plan_info }}" -#################################### Lets recover the vms on recovery site using ntnx_recovery_plan_jobs ################ + # We can also perform FAILOVER, LIVE_MIGRATE and FAILOVER here + - name: Run migrate (planned failover) + nutanix.ncp.ntnx_recovery_plan_jobs: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: present + name: test-failover-123 + recovery_plan: + uuid: "{{ recovery_plan.plan_uuid }}" + failed_site: + url: + recovery_site: + url: + action: MIGRATE + ignore_validation_failures: true + register: migrate_job - - name: recovery plan info and its affected entities get - ntnx_recovery_plans_info: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - plan_uuid: "{{recovery_plan.plan_uuid}}" - register: recovery_plan_info + - name: Print migrate job + ansible.builtin.debug: + msg: "{{ migrate_job }}" - - debug: - msg: "{{recovery_plan_info}}" + - name: Get recovery plan job status using info module + nutanix.ncp.ntnx_recovery_plan_jobs_info: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + job_uuid: "{{ migrate_job.job_uuid }}" + register: result + ignore_errors: true - # We can also perform FAILOVER, LIVE_MIGRATE and FAILOVER here - - name: Run migrate (planned failover) - ntnx_recovery_plan_jobs: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - state: "present" - name: test-failover-123 - recovery_plan: - uuid: "{{recovery_plan.plan_uuid}}" - failed_site: - url: "" - recovery_site: - url: "" - action: MIGRATE - ignore_validation_failures: true - register: migrate_job + - name: Print recovery plan job status + ansible.builtin.debug: + msg: "{{ result }}" - - debug: - msg: "{{migrate_job}}" + ###################################### delete the recovery plan ################# - - name: Get recovery plan job status using info module - ntnx_recovery_plan_jobs_info: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - job_uuid: "{{migrate_job.job_uuid}}" - register: result - ignore_errors: True - - - debug: - msg: "{{ result }}" - - ###################################### delete the recovery plan ################# - - - name: Delete recovery plan - ntnx_recovery_plans: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - job_uuid: "{{migrate_job.job_uuid}}" - plan_uuid: "{{recovery_plan.plan_uuid}}" - state: "absent" - register: result + - name: Delete recovery plan + nutanix.ncp.ntnx_recovery_plans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + job_uuid: "{{ migrate_job.job_uuid }}" + plan_uuid: "{{ recovery_plan.plan_uuid }}" + state: absent + register: result diff --git a/examples/fc/api_keys_create.yml b/examples/fc/api_keys_create.yml index 41a5df90c..6618cf2b8 100644 --- a/examples/fc/api_keys_create.yml +++ b/examples/fc/api_keys_create.yml @@ -2,19 +2,16 @@ - name: API Keys Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Create a new API Key - ntnx_foundation_central_api_keys: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - alias: "test-alias" - register: output + - name: Create a new API Key + nutanix.ncp.ntnx_foundation_central_api_keys: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + alias: test-alias + register: output - - name: output of api_key - debug: - msg: '{{ output }}' + - name: Output of api_key + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/api_keys_info.yml b/examples/fc/api_keys_info.yml index 7ea8513e5..5999781d0 100644 --- a/examples/fc/api_keys_info.yml +++ b/examples/fc/api_keys_info.yml @@ -2,28 +2,25 @@ - name: API Keys Info Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: API key response with alias - ntnx_foundation_central_api_keys: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - alias: "test" - register: output + - name: API key response with alias + nutanix.ncp.ntnx_foundation_central_api_keys: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + alias: test + register: output - - name: API key response with key_uuid - ntnx_foundation_central_api_keys: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - key_uuid: "" - register: output + - name: API key response with key_uuid + nutanix.ncp.ntnx_foundation_central_api_keys: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + key_uuid: + register: output - - name: output of api_key - debug: - msg: '{{ output }}' + - name: Output of api_key + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/fc.yml b/examples/fc/fc.yml index 00f9732fb..0489d25d1 100644 --- a/examples/fc/fc.yml +++ b/examples/fc/fc.yml @@ -2,99 +2,96 @@ - name: Foundation Central Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Nodes Imaging with Cluster Creation with manual mode. - ntnx_foundation_central: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - cluster_name: "test" - # skip_cluster_creation: false #set this to true to skip cluster creation - common_network_settings: - cvm_dns_servers: - - 10.x.xx.xx - hypervisor_dns_servers: - - 10.x.xx.xx - cvm_ntp_servers: - - "ntp" - hypervisor_ntp_servers: - - "ntp" - nodes_list: - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-1" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" + - name: Nodes Imaging with Cluster Creation with manual mode. + nutanix.ncp.ntnx_foundation_central: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: test + # skip_cluster_creation: false #set this to true to skip cluster creation + common_network_settings: + cvm_dns_servers: + - 10.x.xx.xx + hypervisor_dns_servers: + - 10.x.xx.xx + cvm_ntp_servers: + - ntp + hypervisor_ntp_servers: + - ntp + nodes_list: + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-1 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-2" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-2 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm - redundancy_factor: 2 - aos_package_url: "" - hypervisor_iso_details: - url: "" - register: output + redundancy_factor: 2 + aos_package_url: + hypervisor_iso_details: + url: + register: output - - name: Nodes Imaging without Cluster Creation with discovery mode. - ntnx_foundation_central: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - cluster_name: "test" - skip_cluster_creation: true - common_network_settings: - cvm_dns_servers: - - 10.x.xx.xx - hypervisor_dns_servers: - - 10.x.xx.xx - cvm_ntp_servers: - - "ntp" - hypervisor_ntp_servers: - - "ntp" - nodes_list: - - discovery_mode: - node_serial: "" - - discovery_mode: - node_serial: "" - - discovery_mode: - node_serial: "" - discovery_override: - cvm_ip: + - name: Nodes Imaging without Cluster Creation with discovery mode. + nutanix.ncp.ntnx_foundation_central: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: test + skip_cluster_creation: true + common_network_settings: + cvm_dns_servers: + - 10.x.xx.xx + hypervisor_dns_servers: + - 10.x.xx.xx + cvm_ntp_servers: + - ntp + hypervisor_ntp_servers: + - ntp + nodes_list: + - discovery_mode: + node_serial: + - discovery_mode: + node_serial: + - discovery_mode: + node_serial: + discovery_override: + cvm_ip: - redundancy_factor: 2 - aos_package_url: "" - hypervisor_iso_details: - url: "" - register: output + redundancy_factor: 2 + aos_package_url: + hypervisor_iso_details: + url: + register: output - - name: output of list - debug: - msg: '{{ output }}' + - name: Output of list + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/imaged_cluster_info.yml b/examples/fc/imaged_cluster_info.yml index 6e23546a7..f6fe53e88 100644 --- a/examples/fc/imaged_cluster_info.yml +++ b/examples/fc/imaged_cluster_info.yml @@ -2,39 +2,36 @@ - name: Imaged Clusters Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Imaged-Cluster details with imaged_cluster_uuid - ntnx_foundation_central_imaged_clusters_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - imaged_cluster_uuid: "" - register: output + - name: Imaged-Cluster details with imaged_cluster_uuid + nutanix.ncp.ntnx_foundation_central_imaged_clusters_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + imaged_cluster_uuid: + register: output - - name: Imaged-Cluster details with filters - ntnx_foundation_central_imaged_clusters_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - filters: - archived: true - register: output + - name: Imaged-Cluster details with filters + nutanix.ncp.ntnx_foundation_central_imaged_clusters_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filters: + archived: true + register: output - - name: Imaged-Cluster details with custom filter - ntnx_foundation_central_imaged_clusters_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - custom_filter: - cvm_gateway: "" - register: output + - name: Imaged-Cluster details with custom filter + nutanix.ncp.ntnx_foundation_central_imaged_clusters_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + custom_filter: + cvm_gateway: + register: output - - name: details of imaged-clusters - debug: - msg: '{{ output }}' + - name: Details of imaged-clusters + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fc/imaged_nodes_info.yml b/examples/fc/imaged_nodes_info.yml index 3eda3012d..0cdf06ae3 100644 --- a/examples/fc/imaged_nodes_info.yml +++ b/examples/fc/imaged_nodes_info.yml @@ -2,39 +2,36 @@ - name: Imaged Nodes Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Imaged-Node details with imaged_node_uuid - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - imaged_node_uuid: "" - register: output + - name: Imaged-Node details with imaged_node_uuid + nutanix.ncp.ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + imaged_node_uuid: + register: output - - name: Imaged-Node details with filters - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - filters: - node_state: "STATE_IMAGING" - register: output + - name: Imaged-Node details with filters + nutanix.ncp.ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filters: + node_state: STATE_IMAGING + register: output - - name: Imaged-Node details with custom filter - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - custom_filter: - model: "" - register: output + - name: Imaged-Node details with custom filter + nutanix.ncp.ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + custom_filter: + model: + register: output - - name: details of imaged node - debug: - msg: '{{ output }}' + - name: Details of imaged node + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/fip.yml b/examples/fip.yml index 0fb7976c2..985a8b62b 100644 --- a/examples/fip.yml +++ b/examples/fip.yml @@ -2,8 +2,6 @@ - name: FIP playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,20 +10,20 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: external_subnet_name: "" vm_name: "" - name: Create floating ip with external subnet uuid - ntnx_floating_ips: + nutanix.ncp.ntnx_floating_ips: state: present - wait: True + wait: true external_subnet: name: "{{ external_subnet_name }}" register: result - name: Assign floating ip to vm - ntnx_floating_ips: + nutanix.ncp.ntnx_floating_ips: state: present external_subnet: name: "{{ external_subnet.name }}" @@ -34,7 +32,7 @@ register: result - name: Delete all created floating ips - ntnx_floating_ips: + nutanix.ncp.ntnx_floating_ips: state: absent fip_uuid: "{{ result.fip_uuid }}" register: result diff --git a/examples/fip_info.yml b/examples/fip_info.yml index 3d92c2c20..aa3e9cbd2 100644 --- a/examples/fip_info.yml +++ b/examples/fip_info.yml @@ -2,8 +2,6 @@ - name: FIP_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,27 +10,25 @@ validate_certs: false tasks: - - name: List floating_ips using ascending ip sorting and floating_ip filter - ntnx_floating_ips_info: + nutanix.ncp.ntnx_floating_ips_info: filter: floating_ip: "10.0.1.2" kind: floating_ip sort_order: "ASCENDING" sort_attribute: "floating_ip" register: result - ignore_errors: True + ignore_errors: true - name: List floating_ips using length and offset - ntnx_floating_ips_info: + nutanix.ncp.ntnx_floating_ips_info: length: 3 offset: 0 register: result - ignore_errors: True - + ignore_errors: true - name: List floating_ips using filter and custom_filter - ntnx_floating_ips_info: + nutanix.ncp.ntnx_floating_ips_info: filter: name: custom_filter: diff --git a/examples/foundation/get_images_info.yml b/examples/foundation/get_images_info.yml index 9ee06f18b..31b5095e7 100644 --- a/examples/foundation/get_images_info.yml +++ b/examples/foundation/get_images_info.yml @@ -1,21 +1,22 @@ +--- # pull hypervisor and nos packages info from FVM - name: Get hypervisor and nos packages info from FVM hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: get hypervisor images info from foundation - ntnx_foundation_hypervisor_images_info: - nutanix_host: "10.xx.xx.xx" + - name: Get hypervisor images info from foundation + nutanix.ncp.ntnx_foundation_hypervisor_images_info: + nutanix_host: 10.xx.xx.xx register: hyp - - name: get aos images info from foundation - ntnx_foundation_aos_packages_info: - nutanix_host: "10.xx.xx.xx" + - name: Get aos images info from foundation + nutanix.ncp.ntnx_foundation_aos_packages_info: + nutanix_host: 10.xx.xx.xx register: nos - - debug: + - name: Print available hypervisor image details + ansible.builtin.debug: msg: "{{ hyp }}" - - debug: + - name: Print available NOS image details + ansible.builtin.debug: msg: "{{ nos }}" diff --git a/examples/foundation/image_nodes.yml b/examples/foundation/image_nodes.yml index 6e2b7cb58..0e486f6c0 100644 --- a/examples/foundation/image_nodes.yml +++ b/examples/foundation/image_nodes.yml @@ -1,63 +1,63 @@ +--- # Here we will image three kind of nodes with different methods. # We will image one node using manual mode. Second node, which can be dos running node, will be imaged using discovery mode using cvm # Third node, which can be nutanix imaged(aos running) node, will be imaged using discovery mode using IPMI - name: Image nodes hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: Image nodes using manual and discovery modes. Create cluster - ntnx_foundation: - timeout: 4500 - nutanix_host: "10.xx.xx.xx" - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - hypervisor_gateway: "10.xx.xx.xx" - hypervisor_netmask: "xx.xx.xx.xx" - default_ipmi_user: "" - nos_package: "" - blocks: - - block_id: "xxxxx" - nodes: - - manual_mode: - cvm_ip: "10.xx.xx.xx" - cvm_gb_ram: 50 - hypervisor_hostname: "superman1" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_gateway: "10.xx.xx.xx" - ipmi_ip: "10.xx.xx.xx" - ipmi_password: "" - hypervisor: "kvm" - hypervisor_ip: "10.xx.xx.xx" - node_position: "A" - #dos node using cvm and discover it using discovery mode. Here we have to provide hypervisor details mandatorily as its dos nodes. - #You can skip hypervisor details incase of aos running node and discovery mode. AOS running nodes have hypervisor running and network - #configuration is pulled internally. - - discovery_mode: - cvm_gb_ram: 50 - node_serial: "xxxxxx" - device_hint: "vm_installer" - discovery_override: - hypervisor_hostname: "superman2" - hypervisor_ip: "10.xx.xx.xx" - cvm_ip: "10.xx.xx.xx" - hypervisor: "kvm" - #image aos running node using ipmi and discover it using discovery mode - - discovery_mode: - cvm_gb_ram: 50 - ipmi_password: "" - node_serial: "xxxxxx" - discovery_override: - hypervisor_hostname: "superman3" - clusters: + - name: Image nodes using manual and discovery modes. Create cluster + nutanix.ncp.ntnx_foundation: + timeout: 4500 + nutanix_host: 10.xx.xx.xx + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + hypervisor_gateway: 10.xx.xx.xx + hypervisor_netmask: xx.xx.xx.xx + default_ipmi_user: + nos_package: + blocks: + - block_id: xxxxx + nodes: + - manual_mode: + cvm_ip: 10.xx.xx.xx + cvm_gb_ram: 50 + hypervisor_hostname: superman1 + ipmi_netmask: xx.xx.xx.xx + ipmi_gateway: 10.xx.xx.xx + ipmi_ip: 10.xx.xx.xx + ipmi_password: + hypervisor: kvm + hypervisor_ip: 10.xx.xx.xx + node_position: A + # dos node using cvm and discover it using discovery mode. Here we have to provide hypervisor details mandatorily as its dos nodes. + # You can skip hypervisor details incase of aos running node and discovery mode. AOS running nodes have hypervisor running and network + # configuration is pulled internally. + - discovery_mode: + cvm_gb_ram: 50 + node_serial: xxxxxx + device_hint: vm_installer + discovery_override: + hypervisor_hostname: superman2 + hypervisor_ip: 10.xx.xx.xx + cvm_ip: 10.xx.xx.xx + hypervisor: kvm + # image aos running node using ipmi and discover it using discovery mode + - discovery_mode: + cvm_gb_ram: 50 + ipmi_password: + node_serial: xxxxxx + discovery_override: + hypervisor_hostname: superman3 + clusters: - redundancy_factor: 2 cluster_members: - - "10.xx.xx.xx" - - "10.xx.xx.xx" - - "10.xx.xx.xx" - name: "test-cluster" - register: output + - 10.xx.xx.xx + - 10.xx.xx.xx + - 10.xx.xx.xx + name: test-cluster + register: output - - debug: - msg: '{{ output }}' + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/foundation/image_upload.yml b/examples/foundation/image_upload.yml index cb1463240..cd4d9171b 100644 --- a/examples/foundation/image_upload.yml +++ b/examples/foundation/image_upload.yml @@ -1,22 +1,22 @@ +--- # Here this will upload image from local machine (where this script runs) to the FVM - name: Upload images hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - name: Image upload # check_mode: yes - ntnx_foundation_image_upload: - nutanix_host: "10.xx.xx.xx" + nutanix.ncp.ntnx_foundation_image_upload: + nutanix_host: 10.xx.xx.xx # change state to "absent" to delete this image. For delete, source is not required state: present - source: "" - filename: "" + source: + filename: # value of installer_type must be one of: kvm, esx, hyperv, xen or nos installer_type: kvm timeout: 1800 register: upload_result - - debug: - msg: '{{ upload_result }}' + - name: Print upload result + ansible.builtin.debug: + msg: "{{ upload_result }}" diff --git a/examples/foundation/ipmi_config.yml b/examples/foundation/ipmi_config.yml index 20fce2b9f..7f7342a5a 100644 --- a/examples/foundation/ipmi_config.yml +++ b/examples/foundation/ipmi_config.yml @@ -1,23 +1,23 @@ +--- # Here we will configure IPMI of one node - name: Configure IPMI hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: configure ipmi + - name: Configure ipmi # check_mode: yes - ntnx_foundation_bmc_ipmi_config: - nutanix_host: "10.xx.xx.xx" - ipmi_user: "" - ipmi_password: "" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_gateway: "10.xx.xx.xx" - blocks: + nutanix.ncp.ntnx_foundation_bmc_ipmi_config: + nutanix_host: 10.xx.xx.xx + ipmi_user: + ipmi_password: + ipmi_netmask: xx.xx.xx.xx + ipmi_gateway: 10.xx.xx.xx + blocks: - nodes: - ipmi_mac: xx:xx:xx:xx:xx:xx - ipmi_ip: "10.xx.xx.xx" - register: output + ipmi_ip: 10.xx.xx.xx + register: output - - debug: - msg: '{{ output }}' + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/foundation/node_discovery_network_info.yml b/examples/foundation/node_discovery_network_info.yml index 2f81eb083..526d77338 100644 --- a/examples/foundation/node_discovery_network_info.yml +++ b/examples/foundation/node_discovery_network_info.yml @@ -1,25 +1,25 @@ +--- # Here we will discover nodes and also get node network info of particular some discovered nodes - name: Discover nodes and get their network info hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - - name: Discover all nodes - ntnx_foundation_discover_nodes_info: - nutanix_host: "10.xx.xx.xx" + - name: Discover all nodes + nutanix.ncp.ntnx_foundation_discover_nodes_info: + nutanix_host: 10.xx.xx.xx # unskip line 12 to include configured(nodes part of cluster) nodes in the output # include_configured: true - register: discovered_nodes + register: discovered_nodes - # get network info of nodes discovered from ntnx_foundation_discover_nodes_info module - - name: Get node network info of some discovered nodes - ntnx_foundation_node_network_info: - nutanix_host: "10.xx.xx.xx" - nodes: - - "{{discovered_nodes.blocks.0.nodes.0.ipv6_address}}" - - "{{discovered_nodes.blocks.1.nodes.0.ipv6_address}}" - register: result + # get network info of nodes discovered from ntnx_foundation_discover_nodes_info module + - name: Get node network info of some discovered nodes + nutanix.ncp.ntnx_foundation_node_network_info: + nutanix_host: 10.xx.xx.xx + nodes: + - "{{discovered_nodes.blocks.0.nodes.0.ipv6_address}}" + - "{{discovered_nodes.blocks.1.nodes.0.ipv6_address}}" + register: result - - debug: - msg: "{{ result }}" + - name: Print node network info + ansible.builtin.debug: + msg: "{{ result }}" diff --git a/examples/hosts_info.yml b/examples/hosts_info.yml index f478b5249..0d204ea7f 100644 --- a/examples/hosts_info.yml +++ b/examples/hosts_info.yml @@ -2,8 +2,6 @@ - name: Hosts_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,28 +10,28 @@ validate_certs: false tasks: - - name: test getting all hosts - ntnx_hosts_info: - register: hosts + - name: Test getting all hosts + nutanix.ncp.ntnx_hosts_info: + register: hosts_list - - name: test getting particular host using uuid - ntnx_hosts_info: - host_uuid: '{{ hosts.response.entities[0].metadata.uuid }}' - register: result + - name: Test getting particular host using uuid + nutanix.ncp.ntnx_hosts_info: + host_uuid: "{{ hosts_list.response.entities[0].metadata.uuid }}" + register: result - - name: List hosts using length, offset, sort order and name sort attribute - ntnx_hosts_info: - length: 2 - offset: 0 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result - ignore_errors: True + - name: List hosts using length, offset, sort order and name sort attribute + nutanix.ncp.ntnx_hosts_info: + length: 2 + offset: 0 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result + ignore_errors: true - - name: List hosts using filter and custom_filter - ntnx_hosts_info: - filter: - name: - custom_filter: - serial_number: - register: result + - name: List hosts using filter and custom_filter + nutanix.ncp.ntnx_hosts_info: + filter: + name: + custom_filter: + serial_number: + register: result diff --git a/examples/iaas/iaas.yml b/examples/iaas/iaas.yml index 1275e3521..5c1059560 100644 --- a/examples/iaas/iaas.yml +++ b/examples/iaas/iaas.yml @@ -2,8 +2,6 @@ - name: IaaS Provisioning hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,17 +10,23 @@ validate_certs: false tasks: - name: Include vars file - include_vars: + ansible.builtin.include_vars: file: vars.yml - - include_role: + - name: Include external_subnet role + ansible.builtin.include_role: name: external_subnet - - include_role: + - name: Include vpc role + ansible.builtin.include_role: name: vpc - - include_role: + - name: Include static_route role + ansible.builtin.include_role: name: static_route - - include_role: + - name: Include overlay_subnet role + ansible.builtin.include_role: name: overlay_subnet - - include_role: + - name: Include vm role + ansible.builtin.include_role: name: vm - - include_role: + - name: Include fip role + ansible.builtin.include_role: name: fip diff --git a/examples/iaas/policies_create.yml b/examples/iaas/policies_create.yml index c0150c2c7..9ae92b544 100644 --- a/examples/iaas/policies_create.yml +++ b/examples/iaas/policies_create.yml @@ -2,8 +2,6 @@ - name: Policy based routing - NACLs hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,7 +10,8 @@ validate_certs: false tasks: - name: Include vars file - include_vars: + ansible.builtin.include_vars: file: vars.yml - - include_role: + - name: Include pbr role + ansible.builtin.include_role: name: pbr diff --git a/examples/iaas/policies_delete.yml b/examples/iaas/policies_delete.yml index b2ff1aea0..7bf4ff32f 100644 --- a/examples/iaas/policies_delete.yml +++ b/examples/iaas/policies_delete.yml @@ -2,8 +2,6 @@ - name: Policy based routing - NACLs hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,5 +9,6 @@ nutanix_password: validate_certs: false tasks: - - include_role: + - name: Include pbr role + ansible.builtin.include_role: name: pbr_delete diff --git a/examples/iaas/roles/external_subnet/meta/main.yml b/examples/iaas/roles/external_subnet/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/external_subnet/meta/main.yml +++ b/examples/iaas/roles/external_subnet/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/external_subnet/tasks/external_subnet.yml b/examples/iaas/roles/external_subnet/tasks/external_subnet.yml index b4c232d4e..f7c889d7d 100644 --- a/examples/iaas/roles/external_subnet/tasks/external_subnet.yml +++ b/examples/iaas/roles/external_subnet/tasks/external_subnet.yml @@ -1,22 +1,27 @@ --- - - name: Create {{ item.name }} external subnet - ntnx_subnets: - state: present - name: "{{ item.name }}" - external_subnet: - vlan_id: "{{ item.vlan_id }}" - cluster: - name: "{{ cluster.name }}" - enable_nat: "{{ item.eNat }}" - ipam: - network_ip: "{{ item.ip }}" - network_prefix: "{{ item.prefix }}" - gateway_ip: "{{ item.gip }}" - ip_pools: - - start_ip: "{{ item.sip }}" - end_ip: "{{ item.eip }}" - register: external_network - - debug: - msg: - - "name: Ext_Nat" - - "uuid: {{ external_network.subnet_uuid }}" +- name: Start external subnet task + ansible.builtin.debug: + msg: Create {{ item.name }} external subnet + +- name: Create external subnet + nutanix.ncp.ntnx_subnets: + state: present + name: "{{ item.name }}" + external_subnet: + vlan_id: "{{ item.vlan_id }}" + cluster: + name: "{{ cluster.name }}" + enable_nat: "{{ item.eNat }}" + ipam: + network_ip: "{{ item.ip }}" + network_prefix: "{{ item.prefix }}" + gateway_ip: "{{ item.gip }}" + ip_pools: + - start_ip: "{{ item.sip }}" + end_ip: "{{ item.eip }}" + register: external_network +- name: Print external subnet name and uuid + ansible.builtin.debug: + msg: + - "name: Ext_Nat" + - "uuid: {{ external_network.subnet_uuid }}" diff --git a/examples/iaas/roles/external_subnet/tasks/main.yml b/examples/iaas/roles/external_subnet/tasks/main.yml index d0a8dcff9..e1c78b4f7 100644 --- a/examples/iaas/roles/external_subnet/tasks/main.yml +++ b/examples/iaas/roles/external_subnet/tasks/main.yml @@ -1,5 +1,12 @@ --- - name: Inputs for external subnets task - include_tasks: external_subnet.yml + ansible.builtin.include_tasks: external_subnet.yml with_items: - - { name: "{{external_subnet.name}}", vlan_id: "{{external_subnet.vlan_id}}", ip: "{{external_subnet.ip}}", prefix: "{{external_subnet.prefix}}", gip: "{{external_subnet.gip}}", sip: "{{external_subnet.sip}}", eip: "{{external_subnet.eip}}", eNat: "{{external_subnet.eNat}}" } + - name: "{{ external_subnet.name }}" + vlan_id: "{{ external_subnet.vlan_id }}" + ip: "{{ external_subnet.ip }}" + prefix: "{{ external_subnet.prefix }}" + gip: "{{ external_subnet.gip }}" + sip: "{{ external_subnet.sip }}" + eip: "{{ external_subnet.eip }}" + eNat: "{{ external_subnet.eNat }}" diff --git a/examples/iaas/roles/fip/meta/main.yml b/examples/iaas/roles/fip/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/fip/meta/main.yml +++ b/examples/iaas/roles/fip/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/fip/tasks/fip.yml b/examples/iaas/roles/fip/tasks/fip.yml index 9db0eda42..56f3c36f7 100644 --- a/examples/iaas/roles/fip/tasks/fip.yml +++ b/examples/iaas/roles/fip/tasks/fip.yml @@ -1,13 +1,14 @@ --- - name: Assign Floating IP for "{{ item.vm_name }}" - ntnx_floating_ips: - state: present - external_subnet: - name: "Ext-Nat" - vm: - name: "{{ item.vm_name }}" + nutanix.ncp.ntnx_floating_ips: + state: present + external_subnet: + name: Ext-Nat + vm: + name: "{{ item.vm_name }}" register: fip -- debug: +- name: Print fip uuid and vm name + ansible.builtin.debug: msg: - - "VM name: {{ item.vm_name }}" - - "uuid: {{ fip.fip_uuid }}" + - "VM name: {{ item.vm_name }}" + - "uuid: {{ fip.fip_uuid }}" diff --git a/examples/iaas/roles/fip/tasks/main.yml b/examples/iaas/roles/fip/tasks/main.yml index 5f20326a5..aa531a7f6 100644 --- a/examples/iaas/roles/fip/tasks/main.yml +++ b/examples/iaas/roles/fip/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Inputs for Floating IP task - include_tasks: fip.yml + ansible.builtin.include_tasks: fip.yml with_items: - - {vm_name: "Prod-Wordpress-App"} - - {vm_name: "Dev-Wordpress-App"} + - { vm_name: Prod-Wordpress-App } + - { vm_name: Dev-Wordpress-App } diff --git a/examples/iaas/roles/overlay_subnet/meta/main.yml b/examples/iaas/roles/overlay_subnet/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/overlay_subnet/meta/main.yml +++ b/examples/iaas/roles/overlay_subnet/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/overlay_subnet/tasks/main.yml b/examples/iaas/roles/overlay_subnet/tasks/main.yml index 357cd3989..3831c3133 100644 --- a/examples/iaas/roles/overlay_subnet/tasks/main.yml +++ b/examples/iaas/roles/overlay_subnet/tasks/main.yml @@ -1,14 +1,44 @@ --- - name: Inputs for overlay subnets - include_tasks: overlay_subnet.yml + ansible.builtin.include_tasks: overlay_subnet.yml with_items: - - { name: "{{Prod_SubnetA.name}}", vpc_name: "{{Prod_SubnetA.vpc_name}}", - nip: "{{Prod_SubnetA.nip}}", prefix: "{{Prod_SubnetA.prefix}}", - gip: "{{Prod_SubnetA.gip}}", sip: "{{Prod_SubnetA.sip}}", eip: "{{Prod_SubnetA.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } - - { name: "{{Prod_SubnetB.name}}", vpc_name: "{{Prod_SubnetB.vpc_name}}", nip: "{{Prod_SubnetB.nip}}", prefix: "{{Prod_SubnetB.prefix}}", gip: "{{Prod_SubnetB.gip}}", sip: "{{Prod_SubnetB.sip}}", eip: "{{Prod_SubnetB.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } - - { name: "{{Dev_SubnetA.name}}", vpc_name: "{{Dev_SubnetA.vpc_name}}", nip: "{{Dev_SubnetA.nip}}", prefix: "{{Dev_SubnetA.prefix}}", gip: "{{Dev_SubnetA.gip}}", sip: "{{Dev_SubnetA.sip}}", eip: "{{Dev_SubnetA.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } - - { name: "{{Dev_SubnetB.name}}", vpc_name: "{{Dev_SubnetB.vpc_name}}", nip: "{{Dev_SubnetB.nip}}", prefix: "{{Dev_SubnetB.prefix}}", gip: "{{Dev_SubnetB.gip}}", sip: "{{Dev_SubnetB.sip}}", eip: "{{Dev_SubnetB.eip}}", - domain_name: "{{domain_name}}", dns_servers: "{{dns_servers}}", domain_search: "{{domain_search}}" } + - name: "{{ Prod_SubnetA.name }}" + vpc_name: "{{ Prod_SubnetA.vpc_name }}" + nip: "{{ Prod_SubnetA.nip }}" + prefix: "{{ Prod_SubnetA.prefix }}" + gip: "{{ Prod_SubnetA.gip }}" + sip: "{{ Prod_SubnetA.sip }}" + eip: "{{ Prod_SubnetA.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + - name: "{{ Prod_SubnetB.name }}" + vpc_name: "{{ Prod_SubnetB.vpc_name }}" + nip: "{{ Prod_SubnetB.nip }}" + prefix: "{{ Prod_SubnetB.prefix }}" + gip: "{{ Prod_SubnetB.gip }}" + sip: "{{ Prod_SubnetB.sip }}" + eip: "{{ Prod_SubnetB.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + - name: "{{ Dev_SubnetA.name }}" + vpc_name: "{{ Dev_SubnetA.vpc_name }}" + nip: "{{ Dev_SubnetA.nip }}" + prefix: "{{ Dev_SubnetA.prefix }}" + gip: "{{ Dev_SubnetA.gip }}" + sip: "{{ Dev_SubnetA.sip }}" + eip: "{{ Dev_SubnetA.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + - name: "{{ Dev_SubnetB.name }}" + vpc_name: "{{ Dev_SubnetB.vpc_name }}" + nip: "{{ Dev_SubnetB.nip }}" + prefix: "{{ Dev_SubnetB.prefix }}" + gip: "{{ Dev_SubnetB.gip }}" + sip: "{{ Dev_SubnetB.sip }}" + eip: "{{ Dev_SubnetB.eip }}" + domain_name: "{{ domain_name }}" + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" diff --git a/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml b/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml index 50866ba23..1e02cb1d3 100644 --- a/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml +++ b/examples/iaas/roles/overlay_subnet/tasks/overlay_subnet.yml @@ -1,6 +1,10 @@ --- -- name: Create {{ item.name }} overlay subnet - ntnx_subnets: +- name: Start overlay subnet task + ansible.builtin.debug: + msg: Create {{ item.name }} overlay subnet + +- name: Create overlay subnet + nutanix.ncp.ntnx_subnets: state: present name: "{{ item.name }}" overlay_subnet: @@ -11,14 +15,15 @@ network_prefix: "{{ item.prefix }}" gateway_ip: "{{ item.gip }}" ip_pools: - - start_ip: "{{ item.sip }}" - end_ip: "{{ item.eip }}" + - start_ip: "{{ item.sip }}" + end_ip: "{{ item.eip }}" dhcp: - dns_servers: "{{ item.dns_servers }}" - domain_name: "{{ item.domain_name }}" - domain_search: "{{ item.domain_search }}" + dns_servers: "{{ item.dns_servers }}" + domain_name: "{{ item.domain_name }}" + domain_search: "{{ item.domain_search }}" register: overlay -- debug: +- name: Print overlay name and uuid + ansible.builtin.debug: msg: - "name: {{ overlay.response.status.name }}" - "uuid: {{ overlay.subnet_uuid }}" diff --git a/examples/iaas/roles/pbr/meta/main.yml b/examples/iaas/roles/pbr/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/pbr/meta/main.yml +++ b/examples/iaas/roles/pbr/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/pbr/tasks/main.yml b/examples/iaas/roles/pbr/tasks/main.yml index 72428dd2b..6c1f2e67b 100644 --- a/examples/iaas/roles/pbr/tasks/main.yml +++ b/examples/iaas/roles/pbr/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Include PBR task - include_tasks: pbr.yml + ansible.builtin.include_tasks: pbr.yml with_items: - - { vpc_name: "{{vpc_names[0]}}", priority: 101, nip: 10.1.2.0, prefix: 24 } - - { vpc_name: "{{vpc_names[1]}}", priority: 101, nip: 10.1.2.0, prefix: 24 } + - { vpc_name: "{{ vpc_names[0] }}", priority: 101, nip: 10.1.2.0, prefix: 24 } + - { vpc_name: "{{ vpc_names[1] }}", priority: 101, nip: 10.1.2.0, prefix: 24 } diff --git a/examples/iaas/roles/pbr/tasks/pbr.yml b/examples/iaas/roles/pbr/tasks/pbr.yml index 760c69621..1cd490972 100644 --- a/examples/iaas/roles/pbr/tasks/pbr.yml +++ b/examples/iaas/roles/pbr/tasks/pbr.yml @@ -1,22 +1,23 @@ --- - name: Create PBR for vpc "{{ item.vpc_name }}" - ntnx_pbrs: + nutanix.ncp.ntnx_pbrs: state: present vpc: name: "{{ item.vpc_name }}" priority: "{{ item.priority }}" source: network: - ip: "{{item.nip}}" - prefix: "{{item.prefix}}" + ip: "{{ item.nip }}" + prefix: "{{ item.prefix }}" destination: - external: True + external: true protocol: - any: True + any: true action: - deny: True + deny: true register: pbr -- debug: +- name: Print vpc name and pbr uuid + ansible.builtin.debug: msg: - - "vpc_name: {{ item.vpc_name }}" - - "uuid: {{ pbr.pbr_uuid }}" + - "vpc_name: {{ item.vpc_name }}" + - "uuid: {{ pbr.pbr_uuid }}" diff --git a/examples/iaas/roles/pbr_delete/meta/main.yml b/examples/iaas/roles/pbr_delete/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/pbr_delete/meta/main.yml +++ b/examples/iaas/roles/pbr_delete/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/pbr_delete/tasks/main.yml b/examples/iaas/roles/pbr_delete/tasks/main.yml index 0129299a5..209c40f68 100644 --- a/examples/iaas/roles/pbr_delete/tasks/main.yml +++ b/examples/iaas/roles/pbr_delete/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Include PBR task - include_tasks: pbr_delete.yml + ansible.builtin.include_tasks: pbr_delete.yml with_items: - { pbr_uuid: 8c6ce427-a63d-482d-bf59-b8a14a062c1d } - { pbr_uuid: fb6fb539-7b24-48a1-b285-9a1fb8b97e5f } diff --git a/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml b/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml index dd2562cf1..d758c66f1 100644 --- a/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml +++ b/examples/iaas/roles/pbr_delete/tasks/pbr_delete.yml @@ -1,9 +1,10 @@ --- - name: Delete PBR "{{ item.pbr_uuid }}" - ntnx_pbrs: + nutanix.ncp.nutanix.ncp.ntnx_pbrs: state: absent pbr_uuid: "{{ item.pbr_uuid }}" register: pbr -- debug: +- name: Print pbr uuid + ansible.builtin.debug: msg: - - "uuid: {{ pbr.pbr_uuid }}" + - "uuid: {{ pbr.pbr_uuid }}" diff --git a/examples/iaas/roles/static_route/meta/main.yml b/examples/iaas/roles/static_route/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/static_route/meta/main.yml +++ b/examples/iaas/roles/static_route/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/static_route/tasks/main.yml b/examples/iaas/roles/static_route/tasks/main.yml index c4a177a8f..3bfe9cf24 100644 --- a/examples/iaas/roles/static_route/tasks/main.yml +++ b/examples/iaas/roles/static_route/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Inputs for static routes task - include_tasks: static_route.yml + ansible.builtin.include_tasks: static_route.yml with_items: - - { vpc_uuid: "{{vpc_uuids[0]}}", subnet_name: "{{external_subnet.name}}", destination: "10.2.2.0/24"} - - { vpc_uuid: "{{vpc_uuids[1]}}", subnet_name: "{{external_subnet.name}}", destination: "10.2.3.0/24"} + - { vpc_uuid: "{{ vpc_uuids[0] }}", subnet_name: "{{ external_subnet.name }}", destination: 10.2.2.0/24 } + - { vpc_uuid: "{{ vpc_uuids[1] }}", subnet_name: "{{ external_subnet.name }}", destination: 10.2.3.0/24 } diff --git a/examples/iaas/roles/static_route/tasks/static_route.yml b/examples/iaas/roles/static_route/tasks/static_route.yml index 924571431..fbe1202f9 100644 --- a/examples/iaas/roles/static_route/tasks/static_route.yml +++ b/examples/iaas/roles/static_route/tasks/static_route.yml @@ -1,14 +1,15 @@ --- - - name: Create static route - ntnx_static_routes: - state: present - vpc_uuid: "{{ item.vpc_uuid }}" - static_routes: - - destination: "{{ item.destination }}" - next_hop: - external_subnet_ref: - name: "{{ item.subnet_name }}" - register: static_route - - debug: - msg: - - "uuid: {{ static_route.response.metadata.uuid }}" +- name: Create static route + nutanix.ncp.ntnx_static_routes: + state: present + vpc_uuid: "{{ item.vpc_uuid }}" + static_routes: + - destination: "{{ item.destination }}" + next_hop: + external_subnet_ref: + name: "{{ item.subnet_name }}" + register: static_route +- name: Print static route uuid + ansible.builtin.debug: + msg: + - "uuid: {{ static_route.response.metadata.uuid }}" diff --git a/examples/iaas/roles/vm/meta/main.yml b/examples/iaas/roles/vm/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/vm/meta/main.yml +++ b/examples/iaas/roles/vm/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/vm/tasks/main.yml b/examples/iaas/roles/vm/tasks/main.yml index 7c3949250..829884979 100644 --- a/examples/iaas/roles/vm/tasks/main.yml +++ b/examples/iaas/roles/vm/tasks/main.yml @@ -1,8 +1,28 @@ --- - name: Inputs for vm task - include_tasks: vm.yml + ansible.builtin.include_tasks: vm.yml with_items: - - {name: "Prod-Wordpress-App", desc: "Prod-Wordpress-App", is_connected: True, subnet_name: "{{Prod_SubnetA.name}}", image_name: "wordpress-appserver", private_ip: ""} - - {name: "Prod-Wordpress-DB", desc: "Prod-Wordpress-DB", is_connected: True, subnet_name: "{{Prod_SubnetB.name}}", image_name: "wordpress-db", private_ip: 10.1.2.5} - - {name: "Dev-Wordpress-App", desc: "Dev-Wordpress-App", is_connected: True, subnet_name: "{{Dev_SubnetA.name}}", image_name: "wordpress-appserver", private_ip: ""} - - {name: "Dev-Wordpress-DB", desc: "Dev-Wordpress-DB", is_connected: True, subnet_name: "{{Dev_SubnetB.name}}", image_name: "wordpress-db", private_ip: 10.1.2.5} + - name: Prod-Wordpress-App + desc: Prod-Wordpress-App + is_connected: true + subnet_name: "{{ Prod_SubnetA.name }}" + image_name: wordpress-appserver + private_ip: "" + - name: Prod-Wordpress-DB + desc: Prod-Wordpress-DB + is_connected: true + subnet_name: "{{ Prod_SubnetB.name }}" + image_name: wordpress-db + private_ip: 10.1.2.5 + - name: Dev-Wordpress-App + desc: Dev-Wordpress-App + is_connected: true + subnet_name: "{{ Dev_SubnetA.name }}" + image_name: wordpress-appserver + private_ip: "" + - name: Dev-Wordpress-DB + desc: Dev-Wordpress-DB + is_connected: true + subnet_name: "{{ Dev_SubnetB.name }}" + image_name: wordpress-db + private_ip: 10.1.2.5 diff --git a/examples/iaas/roles/vm/tasks/vm.yml b/examples/iaas/roles/vm/tasks/vm.yml index ca2332e4c..e07e64578 100644 --- a/examples/iaas/roles/vm/tasks/vm.yml +++ b/examples/iaas/roles/vm/tasks/vm.yml @@ -1,6 +1,10 @@ --- -- name: Create "{{ item.name }}" VM - ntnx_vms: +- name: Start vm task + ansible.builtin.debug: + msg: Create "{{ item.name }}" VM + +- name: Create VM + nutanix.ncp.ntnx_vms: state: present name: "{{ item.name }}" desc: "{{ item.desc }}" @@ -12,13 +16,14 @@ name: "{{ item.subnet_name }}" private_ip: "{{ item.private_ip }}" disks: - - type: "DISK" - size_gb: 30 - bus: "SATA" - clone_image: - name: "{{ item.image_name }}" + - type: DISK + size_gb: 30 + bus: SATA + clone_image: + name: "{{ item.image_name }}" register: vm -- debug: +- name: Print vm name and uuid + ansible.builtin.debug: msg: - - "name: {{ vm.response.status.name }}" - - "uuid: {{ vm.vm_uuid }}" + - "name: {{ vm.response.status.name }}" + - "uuid: {{ vm.vm_uuid }}" diff --git a/examples/iaas/roles/vpc/meta/main.yml b/examples/iaas/roles/vpc/meta/main.yml index a734b2343..5289004d7 100644 --- a/examples/iaas/roles/vpc/meta/main.yml +++ b/examples/iaas/roles/vpc/meta/main.yml @@ -1,2 +1,3 @@ - collections: - - nutanix.ncp +--- +collections: + - nutanix.ncp diff --git a/examples/iaas/roles/vpc/tasks/main.yml b/examples/iaas/roles/vpc/tasks/main.yml index 21ce27258..a15150041 100644 --- a/examples/iaas/roles/vpc/tasks/main.yml +++ b/examples/iaas/roles/vpc/tasks/main.yml @@ -1,8 +1,9 @@ --- -- set_fact: +- name: Define vpc_uuids variable + ansible.builtin.set_fact: vpc_uuids: [] - name: Inputs vpcs task - include_tasks: vpc.yml + ansible.builtin.include_tasks: vpc.yml with_items: - - { name: "{{vpc_names[0]}}", subnet_name: "{{external_subnet.name}}"} - - { name: "{{vpc_names[1]}}", subnet_name: "{{external_subnet.name}}"} + - { name: "{{ vpc_names[0] }}", subnet_name: "{{ external_subnet.name }}" } + - { name: "{{ vpc_names[1] }}", subnet_name: "{{ external_subnet.name }}" } diff --git a/examples/iaas/roles/vpc/tasks/vpc.yml b/examples/iaas/roles/vpc/tasks/vpc.yml index a87d5b5d2..9513ff964 100644 --- a/examples/iaas/roles/vpc/tasks/vpc.yml +++ b/examples/iaas/roles/vpc/tasks/vpc.yml @@ -1,14 +1,20 @@ --- -- name: Create {{ item.name }} VPC with external connectivity to "{{ item.subnet_name }}" - ntnx_vpcs: - state: present - name: "{{ item.name }}" - external_subnets: - - subnet_name: "{{ item.subnet_name }}" +- name: Start vpc task + ansible.builtin.debug: + msg: Create {{ item.name }} VPC with external connectivity to "{{ item.subnet_name }}" + +- name: Create VPC with external connectivity + nutanix.ncp.ntnx_vpcs: + state: present + name: "{{ item.name }}" + external_subnets: + - subnet_name: "{{ item.subnet_name }}" register: vpc -- debug: +- name: Print vpc response + ansible.builtin.debug: msg: - - "name: {{ vpc.response.status.name }}" - - "uuid: {{ vpc.vpc_uuid }}" -- set_fact: - vpc_uuids: "{{ vpc_uuids + [ vpc.vpc_uuid ] }}" + - "name: {{ vpc.response.status.name }}" + - "uuid: {{ vpc.vpc_uuid }}" +- name: Define vpc_uuids variable + ansible.builtin.set_fact: + vpc_uuids: "{{ vpc_uuids + [vpc.vpc_uuid] }}" diff --git a/examples/iaas/vars.yml b/examples/iaas/vars.yml index ff9daee05..86cf7a804 100644 --- a/examples/iaas/vars.yml +++ b/examples/iaas/vars.yml @@ -9,41 +9,41 @@ external_subnet: gip: 10.44.3.193 sip: 10.44.3.198 eip: 10.44.3.207 - eNat: True -vpc_names: ["Prod", "Dev"] -domain_search: ["calm.nutanix.com", "eng.nutanix.com"] -dns_servers: ["8.8.8.8", "8.8.8.4"] -domain_name: "calm.nutanix.com" + eNat: true +vpc_names: [Prod, Dev] +domain_search: [calm.nutanix.com, eng.nutanix.com] +dns_servers: [8.8.8.8, 8.8.8.4] +domain_name: calm.nutanix.com Prod_SubnetA: - name: Prod_SubnetA - vpc_name: Prod - nip: 10.1.1.0 - prefix: 24 - gip: 10.1.1.1 - sip: 10.1.1.2 - eip: 10.1.1.5 + name: Prod_SubnetA + vpc_name: Prod + nip: 10.1.1.0 + prefix: 24 + gip: 10.1.1.1 + sip: 10.1.1.2 + eip: 10.1.1.5 Prod_SubnetB: - name: Prod_SubnetB - vpc_name: Prod - nip: 10.1.2.0 - prefix: 24 - gip: 10.1.2.1 - sip: 10.1.2.2 - eip: 10.1.2.5 + name: Prod_SubnetB + vpc_name: Prod + nip: 10.1.2.0 + prefix: 24 + gip: 10.1.2.1 + sip: 10.1.2.2 + eip: 10.1.2.5 Dev_SubnetA: - name: Dev_SubnetA - vpc_name: Dev - nip: 10.1.1.0 - prefix: 24 - gip: 10.1.1.1 - sip: 10.1.1.2 - eip: 10.1.1.5 + name: Dev_SubnetA + vpc_name: Dev + nip: 10.1.1.0 + prefix: 24 + gip: 10.1.1.1 + sip: 10.1.1.2 + eip: 10.1.1.5 Dev_SubnetB: - name: Dev_SubnetB - vpc_name: Dev - nip: 10.1.2.0 - prefix: 24 - gip: 10.1.2.1 - sip: 10.1.2.2 - eip: 10.1.2.5 + name: Dev_SubnetB + vpc_name: Dev + nip: 10.1.2.0 + prefix: 24 + gip: 10.1.2.1 + sip: 10.1.2.2 + eip: 10.1.2.5 diff --git a/examples/images.yml b/examples/images.yml index 82f11491d..5624ac958 100644 --- a/examples/images.yml +++ b/examples/images.yml @@ -2,8 +2,6 @@ - name: Images playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,14 +10,14 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: image_uuid: "" source_path: "" source_uri: "" - clusters_name: "" + clusters_name: "" - - name: create image from local workstation - ntnx_images: + - name: Create image from local workstation + nutanix.ncp.ntnx_images: state: "present" source_path: "{{source_path}}" clusters: @@ -38,8 +36,8 @@ product_version: "1.2.0" wait: true - - name: create image from with source as remote server file location - ntnx_images: + - name: Create image from with source as remote server file location + nutanix.ncp.ntnx_images: state: "present" source_uri: "{{source_uri}}" clusters: @@ -58,8 +56,8 @@ product_version: "1.2.0" wait: true - - name: override categories of existing image - ntnx_images: + - name: Override categories of existing image + nutanix.ncp.ntnx_images: state: "present" image_uuid: "{{image-uuid}}" categories: @@ -69,15 +67,15 @@ - Backup wait: true - - name: dettach all categories from existing image - ntnx_images: + - name: Dettach all categories from existing image + nutanix.ncp.ntnx_images: state: "present" image_uuid: "00000000-0000-0000-0000-000000000000" remove_categories: true wait: true - - name: delete existing image - ntnx_images: + - name: Delete existing image + nutanix.ncp.ntnx_images: state: "absent" image_uuid: "00000000-0000-0000-0000-000000000000" wait: true diff --git a/examples/inventory/nutanix.yaml b/examples/inventory/nutanix.yaml index 89f3c20ce..16abac947 100644 --- a/examples/inventory/nutanix.yaml +++ b/examples/inventory/nutanix.yaml @@ -1,13 +1,14 @@ +--- plugin: nutanix.ncp.ntnx_prism_vm_inventory nutanix_hostname: nutanix_username: nutanix_password: validate_certs: false -data: {"offset": 0, "length": 1000} +data: { offset: 0, length: 1000 } groups: group_1: "'' in name" group_2: "''==name" keyed_groups: - - prefix: "host" - separator: ':' - key: "ansible_host" + - prefix: host + separator: ":" + key: ansible_host diff --git a/examples/karbon/cluster_info.yml b/examples/karbon/cluster_info.yml index b5e916b25..7c56ebf71 100644 --- a/examples/karbon/cluster_info.yml +++ b/examples/karbon/cluster_info.yml @@ -1,9 +1,7 @@ --- -- name: get k8s cluster info +- name: Get k8s cluster info hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,22 +10,23 @@ validate_certs: false tasks: - - set_fact: - cluster_name: + - name: Set cluster name + ansible.builtin.set_fact: + cluster_name: - - name: test getting cluster using name - ntnx_karbon_clusters_info: - cluster_name: "{{cluster_name}}" - register: result + - name: Test getting cluster using name + nutanix.ncp.ntnx_karbon_clusters_info: + cluster_name: "{{ cluster_name }}" + register: result - - name: test getting cluster with ssh config using cluster name - ntnx_karbon_clusters_info: - cluster_name: "{{cluster_name}}" - fetch_ssh_credentials: true - register: result + - name: Test getting cluster with ssh config using cluster name + nutanix.ncp.ntnx_karbon_clusters_info: + cluster_name: "{{ cluster_name }}" + fetch_ssh_credentials: true + register: result - - name: test getting cluster with kubeconfig config using cluster name - ntnx_karbon_clusters_info: - cluster_name: "{{cluster_name}}" - fetch_kubeconfig: true - register: result + - name: Test getting cluster with kubeconfig config using cluster name + nutanix.ncp.ntnx_karbon_clusters_info: + cluster_name: "{{ cluster_name }}" + fetch_kubeconfig: true + register: result diff --git a/examples/karbon/create_k8s_cluster.yml b/examples/karbon/create_k8s_cluster.yml index 689975485..ae17d9e4d 100644 --- a/examples/karbon/create_k8s_cluster.yml +++ b/examples/karbon/create_k8s_cluster.yml @@ -1,9 +1,7 @@ --- -- name: create k8s cluster +- name: Create k8s cluster hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,171 +10,171 @@ validate_certs: false tasks: - - set_fact: - cluster: - name: - uuid: - node_subnet: - name: - uuid: - storage_class: - name: - storage_container: - name: - cni: - node_cidr_mask_size: 24 - service_ipv4_cidr: "172.19.0.0/16" - pod_ipv4_cidr: "172.20.0.0/16" - karbon_name: test-module21 - k8s_version: "1.19.8-0" - host_os: "ntnx-1.0" - nutanix_cluster_password: - nutanix_cluster_username: - control_plane_virtual_ip: + - name: Set vars + ansible.builtin.set_fact: + cluster: + name: + uuid: + node_subnet: + name: + uuid: + storage_class: + name: + storage_container: + name: + cni: + node_cidr_mask_size: 24 + service_ipv4_cidr: 172.19.0.0/16 + pod_ipv4_cidr: 172.20.0.0/16 + karbon_name: test-module21 + k8s_version: 1.19.8-0 + host_os: ntnx-1.0 + nutanix_cluster_password: + nutanix_cluster_username: + control_plane_virtual_ip: + ############################# + - name: Create DEV cluster with Flannel network provider + nutanix.ncp.ntnx_karbon_clusters: + cluster: + uuid: "{{ cluster.uuid }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" + node_subnet: + name: "{{ node_subnet.name }}" + cluster_type: DEV + cni: + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" + network_provider: Flannel + storage_class: + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true + name: "{{ storage_class.name }}" + reclaim_policy: Delete + storage_container: "{{ storage_container.name }}" + file_system: ext4 + flash_mode: false + register: result - ############################# - - name: create DEV cluster with Flannel network provider - ntnx_karbon_clusters: - cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - name: "{{node_subnet.name}}" - cluster_type: DEV - cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" - network_provider: Flannel - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: "{{storage_class.name}}" - reclaim_policy: Delete - storage_container: "{{storage_container.name}}" - file_system: ext4 - flash_mode: False - register: result + - name: Delete dev cluster + nutanix.ncp.ntnx_karbon_clusters: + state: absent + name: "{{ result.response.name }}" + register: result - - name: delete dev cluster - ntnx_karbon_clusters: - state: absent - name: "{{result.response.name}}" - register: result + - name: Create DEV cluster with Calico network provider + nutanix.ncp.ntnx_karbon_clusters: + cluster: + name: "{{ cluster.name }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" + node_subnet: + uuid: "{{ node_subnet.uuid }}" + cni: + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" + network_provider: Calico + custom_node_configs: + etcd: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + masters: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + workers: + num_instances: 1 + cpu: 8 + memory_gb: 8 + disk_gb: 120 + storage_class: + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true + name: "{{ storage_class.name }}" + reclaim_policy: Retain + storage_container: "{{ storage_container.name }}" + file_system: xfs + flash_mode: true + register: result - - name: create DEV cluster with Calico network provider - ntnx_karbon_clusters: - cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - uuid: "{{node_subnet.uuid}}" - cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" - network_provider: Calico - custom_node_configs: - etcd: - num_instances: 1 + - name: Create worker node pool with subnet uuid + nutanix.ncp.ntnx_karbon_clusters_node_pools: + node_subnet: + uuid: + node_pool_name: "{{ karbon_name }}" + cluster_name: "{{ cluster.name }}" + pool_config: + num_instances: 2 cpu: 4 memory_gb: 8 disk_gb: 120 - masters: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 120 - workers: - num_instances: 1 - cpu: 8 - memory_gb: 8 - disk_gb: 120 - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: "{{storage_class.name}}" - reclaim_policy: Retain - storage_container: "{{storage_container.name}}" - file_system: xfs - flash_mode: true - register: result - - - name: Create worker node pool with subnet uuid - ntnx_karbon_clusters_node_pools: - node_subnet: - uuid: "" - node_pool_name: "{{karbon_name}}" - cluster_name: "{{cluster.name}}" - pool_config: - num_instances: 2 - cpu: 4 - memory_gb: 8 - disk_gb: 120 - register: result - ignore_errors: true + register: result + ignore_errors: true - - name: update pool by increasing cpu,memory_gb,num_instances and add labels - ntnx_karbon_clusters_node_pools: - wait: True - node_pool_name: "{{karbon_name}}" - cluster_name: "{{cluster.name}}" - pool_config: + - name: Update pool by increasing cpu,memory_gb,num_instances and add labels + nutanix.ncp.ntnx_karbon_clusters_node_pools: + wait: true + node_pool_name: "{{ karbon_name }}" + cluster_name: "{{ cluster.name }}" + pool_config: cpu: 6 memory_gb: 10 disk_gb: 150 num_instances: 4 - add_labels: - property1: "test-property1" - register: result - ignore_errors: true + add_labels: + property1: test-property1 + register: result + ignore_errors: true - - name: create prod cluster - ntnx_karbon_clusters: - cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - name: "{{node_subnet.name}}" - cluster_type: PROD - cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" - network_provider: Flannel - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: "{{storage_class.name}}" - reclaim_policy: Delete - storage_container: "{{storage_container.name}}" - file_system: ext4 - flash_mode: False - control_plane_virtual_ip: "{{control_plane_virtual_ip}}" - custom_node_configs: - etcd: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 240 - masters: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 240 - workers: - num_instances: 1 - cpu: 8 - memory_gb: 8 - disk_gb: 240 - register: result + - name: Create prod cluster + nutanix.ncp.ntnx_karbon_clusters: + cluster: + uuid: "{{ cluster.uuid }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" + node_subnet: + name: "{{ node_subnet.name }}" + cluster_type: PROD + cni: + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" + network_provider: Flannel + storage_class: + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true + name: "{{ storage_class.name }}" + reclaim_policy: Delete + storage_container: "{{ storage_container.name }}" + file_system: ext4 + flash_mode: false + control_plane_virtual_ip: "{{ control_plane_virtual_ip }}" + custom_node_configs: + etcd: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 240 + masters: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 240 + workers: + num_instances: 1 + cpu: 8 + memory_gb: 8 + disk_gb: 240 + register: result diff --git a/examples/karbon/create_registries.yml b/examples/karbon/create_registries.yml index 42c75e310..5992fbee8 100644 --- a/examples/karbon/create_registries.yml +++ b/examples/karbon/create_registries.yml @@ -1,9 +1,7 @@ --- -- name: create registeries +- name: Create registeries hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,30 +10,31 @@ validate_certs: false tasks: - - set_fact: - registry_name: - url: - port_number: - username: - password: + - name: Set vars + ansible.builtin.set_fact: + registry_name: + url: + port_number: + username: + password: - - name: create registry - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - port: "{{port_number}}" - register: result + - name: Create registry + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + url: "{{ url }}" + port: "{{ port_number }}" + register: result - - name: delete registry - ntnx_karbon_registries: - name: "{{registry_name}}" - state: absent - register: result + - name: Delete registry + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + state: absent + register: result - - name: create registry with username and password - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - username: "{{username}}" - password: "{{password}}" - register: result + - name: Create registry with username and password + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + url: "{{ url }}" + username: "{{ username }}" + password: "{{ password }}" + register: result diff --git a/examples/karbon/registries_info.yml b/examples/karbon/registries_info.yml index 81c2d8742..935658ee6 100644 --- a/examples/karbon/registries_info.yml +++ b/examples/karbon/registries_info.yml @@ -1,9 +1,7 @@ --- -- name: get registeries info +- name: Get registeries info hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,11 +10,11 @@ validate_certs: false tasks: - - name: test getting all registries - ntnx_karbon_registries_info: - register: registries + - name: Test getting all registries + nutanix.ncp.ntnx_karbon_registries_info: + register: registries - - name: test getting particular register using name - ntnx_karbon_registries_info: + - name: Test getting particular register using name + nutanix.ncp.ntnx_karbon_registries_info: registry_name: "{{ registries.response[1].name }}" - register: result + register: result diff --git a/examples/ndb/all_day2_actions.yml b/examples/ndb/all_day2_actions.yml index 4322a1302..35dc5e2e7 100644 --- a/examples/ndb/all_day2_actions.yml +++ b/examples/ndb/all_day2_actions.yml @@ -7,13 +7,9 @@ # 4. Restore database to previously created snapshot and latest snapshot # 5. Scale database # 6. Add/Remove linked databases - - -- name: perform day2 actions +- name: Perform day2 actions hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -22,110 +18,109 @@ validate_certs: false tasks: - ############################################ snapshots ########################################### - - name: create snapshot with minimal spec - ntnx_ndb_database_snapshots: - name: "{{snapshot_name}}1" - time_machine_uuid: "{{time_machine_uuid}}" + - name: Create snapshot with minimal spec + nutanix.ncp.ntnx_ndb_database_snapshots: + name: "{{ snapshot_name }}1" + time_machine_uuid: "{{ time_machine_uuid }}" register: result - - name: create snapshot with expiry - ntnx_ndb_database_snapshots: - name: "{{snapshot_name}}2" - time_machine_uuid: "{{time_machine_uuid}}" + - name: Create snapshot with expiry + nutanix.ncp.ntnx_ndb_database_snapshots: + name: "{{ snapshot_name }}2" + time_machine_uuid: "{{ time_machine_uuid }}" expiry_days: 4 register: result - - set_fact: - snapshot_uuid: "{{result.snapshot_uuid}}" + - name: Set snapshot uuid + ansible.builtin.set_fact: + snapshot_uuid: "{{ result.snapshot_uuid }}" - - name: rename snapshot - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" - name: "{{snapshot_name}}2-updated" + - name: Rename snapshot + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" + name: "{{ snapshot_name }}2-updated" register: result - - name: update expiry - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + - name: Update expiry + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" expiry_days: 5 register: result - - name: remove expiry schedule - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + - name: Remove expiry schedule + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" remove_expiry: true register: result - name: Add expiry schedule and rename - ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" - name: "{{snapshot_name}}2" + nutanix.ncp.ntnx_ndb_database_snapshots: + snapshot_uuid: "{{ snapshot_uuid }}" + name: "{{ snapshot_name }}2" expiry_days: 6 register: result - ############################################ log catchup ###################################### - - name: perform log catchup for restore - ntnx_ndb_database_log_catchup: - time_machine_uuid: "{{time_machine_uuid}}" + - name: Perform log catchup for restore + nutanix.ncp.ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{ time_machine_uuid }}" for_restore: true register: result - - name: perform log catchup - ntnx_ndb_database_log_catchup: - time_machine_uuid: "{{time_machine_uuid}}" + - name: Perform log catchup + nutanix.ncp.ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{ time_machine_uuid }}" for_restore: true register: result ########################################### restore ########################################### - - name: perform using pitr timestamp - ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" + - name: Perform using pitr timestamp + nutanix.ncp.ntnx_ndb_database_restore: + db_uuid: "{{ db_uuid }}" pitr_timestamp: "2023-01-02 11:02:22" - timezone: "UTC" + timezone: UTC register: result - - name: perform restore using latest snapshot - ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + - name: Perform restore using latest snapshot + nutanix.ncp.ntnx_ndb_database_restore: + db_uuid: "{{ db_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result - - name: perform restore using snapshot uuid - ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + - name: Perform restore using snapshot uuid + nutanix.ncp.ntnx_ndb_database_restore: + db_uuid: "{{ db_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result ########################################### scaling ########################################### - - name: extend database storage for scaling database - ntnx_ndb_database_scale: - db_uuid: "{{db_uuid}}" + - name: Extend database storage for scaling database + nutanix.ncp.ntnx_ndb_database_scale: + db_uuid: "{{ db_uuid }}" storage_gb: 2 - pre_update_cmd: "ls" - post_update_cmd: "ls -a" + pre_update_cmd: ls + post_update_cmd: ls -a register: result ############################################ add / remove linked databases ########################################### - - name: add databases in database instance - ntnx_ndb_linked_databases: - db_instance_uuid: "{{db_uuid}}" + - name: Add databases in database instance + nutanix.ncp.ntnx_ndb_linked_databases: + db_instance_uuid: "{{ db_uuid }}" databases: - test1 - test2 register: result - - name: remove databases in database instance - ntnx_ndb_linked_databases: - state: "absent" - db_instance_uuid: "{{db_uuid}}" - database_uuid: "{{linked_databases.test1}}" + - name: Remove databases in database instance + nutanix.ncp.ntnx_ndb_linked_databases: + state: absent + db_instance_uuid: "{{ db_uuid }}" + database_uuid: "{{ linked_databases.test1 }}" register: result diff --git a/examples/ndb/create_clone.yml b/examples/ndb/create_clone.yml index 468058423..5fd9f98e4 100644 --- a/examples/ndb/create_clone.yml +++ b/examples/ndb/create_clone.yml @@ -2,8 +2,6 @@ - name: Create clone hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,7 +10,8 @@ validate_certs: false tasks: - - set_fact: + - name: Set vars + ansible.builtin.set_fact: clone_db: name: db_params_profile: @@ -32,21 +31,21 @@ snapshot: uuid: - - name: create clone using snapshot - ntnx_ndb_database_clones: - name: "{{clone_db.name}}" - desc: "ansible-created-clone" + - name: Create clone using snapshot + nutanix.ncp.ntnx_ndb_database_clones: + name: "{{ clone_db.name }}" + desc: ansible-created-clone db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm.name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm.password }}" cluster: - name: "{{cluster.name}}" + name: "{{ cluster.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: @@ -54,43 +53,44 @@ pub_ssh_key: "{{ public_ssh_key }}" postgres: - db_password: "{{vm.password}}" + db_password: "{{ vm.password }}" time_machine: - name: "{{time_machine.name}}" - snapshot_uuid: "{{snapshot.uuid}}" + name: "{{ time_machine.name }}" + snapshot_uuid: "{{ snapshot.uuid }}" removal_schedule: days: 2 - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" - - name: create clone using point in time - ntnx_ndb_database_clones: - name: "{{clone_db.name}}" - desc: "ansible-created-clone" + - name: Create clone using point in time + nutanix.ncp.ntnx_ndb_database_clones: + name: "{{ clone_db.name }}" + desc: ansible-created-clone db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm.name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm.password }}" cluster: - name: "{{cluster.name}}" + name: "{{ cluster.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: @@ -98,24 +98,25 @@ pub_ssh_key: "{{ public_ssh_key }}" postgres: - db_password: "{{vm.password}}" + db_password: "{{ vm.password }}" time_machine: - name: "{{time_machine.name}}" + name: "{{ time_machine.name }}" pitr_timestamp: "2023-02-28 12:00:00" - timestamp: "Asia/Calcutta" + timestamp: Asia/Calcutta removal_schedule: days: 2 - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/create_stretched_vlan.yml b/examples/ndb/create_stretched_vlan.yml index 29e6c9e20..54b19f85e 100644 --- a/examples/ndb/create_stretched_vlan.yml +++ b/examples/ndb/create_stretched_vlan.yml @@ -2,8 +2,6 @@ - name: Create stretched vlan hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,15 +10,15 @@ validate_certs: false tasks: - - name: Create stretched vlan - ntnx_ndb_stretched_vlans: - name: "{{st_vlan.name}}" - desc: "{{st_vlan.desc}}" + nutanix.ncp.ntnx_ndb_stretched_vlans: + name: "{{ st_vlan.name }}" + desc: "{{ st_vlan.desc }}" vlans: - - "" - - "" + - + - register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/create_time_machine_cluster.yml b/examples/ndb/create_time_machine_cluster.yml index 95f23a22a..bf64731d9 100644 --- a/examples/ndb/create_time_machine_cluster.yml +++ b/examples/ndb/create_time_machine_cluster.yml @@ -2,8 +2,6 @@ - name: NDB time machine's cluster creation hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,16 +10,16 @@ validate_certs: false tasks: - - name: NDB time machine's cluster creation - ntnx_ndb_time_machine_clusters: - time_machine_uuid: "{{time_machine.uuid}}" + nutanix.ncp.ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{ time_machine.uuid }}" cluster: - name: "{{cluster.name}}" + name: "{{ cluster.name }}" sla: - name: "{{sla.name}}" - type: "{{type}}" + name: "{{ sla.name }}" + type: "{{ type }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/create_vlan.yml b/examples/ndb/create_vlan.yml index a77864d95..29207f24f 100644 --- a/examples/ndb/create_vlan.yml +++ b/examples/ndb/create_vlan.yml @@ -2,8 +2,6 @@ - name: Create Dhcp ndb vlan hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,33 +10,34 @@ validate_certs: false tasks: - - name: Create Dhcp ndb vlan - ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + nutanix.ncp.ntnx_ndb_vlans: + name: "{{ ndb_vlan.name }}" vlan_type: DHCP cluster: - uuid: "{{cluster.uuid}}" + uuid: "{{ cluster.uuid }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" - name: Create Static ndb vlan - ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + nutanix.ncp.ntnx_ndb_vlans: + name: "{{ ndb_vlan.name }}" vlan_type: Static - gateway: "{{ndb_vlan.gateway}}" - subnet_mask: "{{ndb_vlan.subnet_mask}}" + gateway: "{{ ndb_vlan.gateway }}" + subnet_mask: "{{ ndb_vlan.subnet_mask }}" ip_pools: - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" - primary_dns: "{{ndb_vlan.primary_dns}}" - secondary_dns: "{{ndb_vlan.secondary_dns}}" - dns_domain: "{{ndb_vlan.dns_domain}}" + primary_dns: "{{ ndb_vlan.primary_dns }}" + secondary_dns: "{{ ndb_vlan.secondary_dns }}" + dns_domain: "{{ ndb_vlan.dns_domain }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/db_server_vms.yml b/examples/ndb/db_server_vms.yml index 7ae35cc47..faa0f288a 100644 --- a/examples/ndb/db_server_vms.yml +++ b/examples/ndb/db_server_vms.yml @@ -2,8 +2,6 @@ - name: NDB db server vms hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,44 +10,43 @@ validate_certs: false tasks: - - - name: create spec for db server vm using time machine - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True - name: "ansible-created-vm1-from-time-machine" - desc: "ansible-created-vm1-from-time-machine-time-machine" + - name: Create spec for db server vm using time machine + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + name: ansible-created-vm1-from-time-machine + desc: ansible-created-vm1-from-time-machine-time-machine time_machine: - uuid: "test_uuid" - snapshot_uuid: "test_snapshot_uuid" + uuid: test_uuid + snapshot_uuid: test_snapshot_uuid compute_profile: - uuid: "test_compute_uuid" + uuid: test_compute_uuid network_profile: - uuid: "test_network_uuid" + uuid: test_network_uuid cluster: - uuid: "test_cluster_uuid" - password: "test_password" - pub_ssh_key: "test_public_key" - database_type: "postgres_database" + uuid: test_cluster_uuid + password: test_password + pub_ssh_key: test_public_key + database_type: postgres_database automated_patching: maintenance_window: - uuid: "test_window_uuid" + uuid: test_window_uuid tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: check_mode_result - - name: create spec for db server vm using software profile and names of profile - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True + - name: Create spec for db server vm using software profile and names of profile + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -60,25 +57,25 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - name: create db server vm using software profile - ntnx_ndb_db_server_vms: - wait: True + - name: Create db server vm using software profile + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -89,232 +86,226 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - - name: update db server vm name, desc, credentials, tags - ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" - name: "{{vm1_name_updated}}" - desc: "ansible-created-vm1-updated-desc" - reset_name_in_ntnx_cluster: True - reset_desc_in_ntnx_cluster: True + - name: Update db server vm name, desc, credentials, tags + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + uuid: "{{ db_server_uuid }}" + name: "{{ vm1_name_updated }}" + desc: ansible-created-vm1-updated-desc + reset_name_in_ntnx_cluster: true + reset_desc_in_ntnx_cluster: true update_credentials: - - username: "{{vm_username}}" - password: "{{vm_password}}" + - username: "{{ vm_username }}" + password: "{{ vm_password }}" tags: ansible-db-server-vms: ansible-updated register: result - - name: create spec for update db server vm credentials - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" + - name: Create spec for update db server vm credentials + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + uuid: "{{ db_server_uuid }}" update_credentials: - - username: "user" - password: "pass" + - username: user + password: pass register: result - - name: List NDB db_servers - ntnx_ndb_db_servers_info: + nutanix.ncp.ntnx_ndb_db_servers_info: register: db_servers - - - name: get NDB db_servers using it's name - ntnx_ndb_db_servers_info: + - name: Get NDB db_servers using it's name + nutanix.ncp.ntnx_ndb_db_servers_info: filters: load_metrics: true - load_databases: True + load_databases: true value_type: name - value: "{{db_servers.response[0].name}}" + value: "{{ db_servers.response[0].name }}" register: result - - name: get NDB db_servers using it's ip - ntnx_ndb_db_servers_info: + - name: Get NDB db_servers using it's ip + nutanix.ncp.ntnx_ndb_db_servers_info: filters: value_type: ip - value: "{{db_servers.response[0].ipAddresses[0]}}" + value: "{{ db_servers.response[0].ipAddresses[0] }}" register: result - - name: get NDB db_servers using it's name - ntnx_ndb_db_servers_info: - name: "{{db_servers.response[0].name}}" + - name: Get NDB db_servers using it's name + nutanix.ncp.ntnx_ndb_db_servers_info: + name: "{{ db_servers.response[0].name }}" register: result - - name: get NDB db_servers using it's id - ntnx_ndb_db_servers_info: - uuid: "{{db_servers.response[0].id}}" + - name: Get NDB db_servers using it's id + nutanix.ncp.ntnx_ndb_db_servers_info: + uuid: "{{ db_servers.response[0].id }}" register: result - - name: get NDB db_servers using ip - ntnx_ndb_db_servers_info: - server_ip: "{{db_servers.response[0].ipAddresses[0]}}" + - name: Get NDB db_servers using ip + nutanix.ncp.ntnx_ndb_db_servers_info: + server_ip: "{{ db_servers.response[0].ipAddresses[0] }}" register: result ################################### maintenance tasks update tasks ############################# - - name: create spec for adding maintenance window tasks to db server vm - check_mode: yes - ntnx_ndb_maintenance_tasks: + - name: Create spec for adding maintenance window tasks to db server vm + check_mode: true + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" - - uuid: "test_vm_1" + - name: "{{ vm1_name_updated }}" + - uuid: test_vm_1 db_server_clusters: - - uuid: "test_cluter_1" - - uuid: "test_cluter_2" + - uuid: test_cluter_1 + - uuid: test_cluter_2 maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result - - name: create spec for removing maintenance window tasks from above created vm - check_mode: yes - ntnx_ndb_maintenance_tasks: + - name: Create spec for removing maintenance window tasks from above created vm + check_mode: true + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result - - - name: remove maintenance tasks - ntnx_ndb_maintenance_tasks: + - name: Remove maintenance tasks + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result - name: Add maitenance window task for vm - ntnx_ndb_maintenance_tasks: + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" + - name: "{{ vm1_name_updated }}" maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result ################################### DB server VM unregistration tasks ############################# - - name: generate check mode spec for unregister with default values - check_mode: yes - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + - name: Generate check mode spec for unregister with default values + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" register: result - - name: genereate check mode spec for delete vm with vgs and snapshots - check_mode: yes - ntnx_ndb_db_server_vms: - state: "absent" - uuid: "{{db_server_uuid}}" - delete_from_cluster: True - delete_vgs: True - delete_vm_snapshots: True + - name: Genereate check mode spec for delete vm with vgs and snapshots + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + uuid: "{{ db_server_uuid }}" + delete_from_cluster: true + delete_vgs: true + delete_vm_snapshots: true register: result - - name: unregister vm - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" - delete_from_cluster: False - soft_remove: True - delete_vgs: True - delete_vm_snapshots: True + - name: Unregister vm + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" + delete_from_cluster: false + soft_remove: true + delete_vgs: true + delete_vm_snapshots: true register: result ################################### DB server VM Registration tasks ############################# - - - name: generate spec for registeration of the previous unregistered vm using check mode - check_mode: yes - ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + - name: Generate spec for registeration of the previous unregistered vm using check mode + check_mode: true + nutanix.ncp.ntnx_ndb_register_db_server_vm: + ip: "{{ vm_ip }}" + desc: register-vm-desc reset_desc_in_ntnx_cluster: true cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: - software_path: "{{postgres.software_home}}" - private_ssh_key: "check-key" - username: "{{vm_username}}" + software_path: "{{ postgres.software_home }}" + private_ssh_key: check-key + username: "{{ vm_username }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" - working_directory: "/check" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F + working_directory: /check register: result - - name: register the previous unregistered vm - ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + - name: Register the previous unregistered vm + nutanix.ncp.ntnx_ndb_register_db_server_vm: + ip: "{{ vm_ip }}" + desc: register-vm-desc cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: listener_port: 5432 - software_path: "{{postgres.software_home}}" - username: "{{vm_username}}" - password: "{{vm_password}}" + software_path: "{{ postgres.software_home }}" + username: "{{ vm_username }}" + password: "{{ vm_password }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result ################################### DB server VM Delete tasks ############################# - - - name: unregister db server vm - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + - name: Unregister db server vm + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" delete_from_cluster: false - delete_vgs: True - delete_vm_snapshots: True + delete_vgs: true + delete_vm_snapshots: true register: result diff --git a/examples/ndb/provision_database_on_registered_db_server.yml b/examples/ndb/provision_database_on_registered_db_server.yml index ca196cc88..110d010f1 100644 --- a/examples/ndb/provision_database_on_registered_db_server.yml +++ b/examples/ndb/provision_database_on_registered_db_server.yml @@ -2,8 +2,6 @@ - name: Single instance postgres database creation on registered db server hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,9 +10,8 @@ validate_certs: false tasks: - - name: Create single instance postgres database on registered db server vm - ntnx_ndb_databases: + nutanix.ncp.ntnx_ndb_databases: name: POSTGRES_DATABASE_ANSIBLE @@ -46,5 +43,6 @@ register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/provision_postgres_ha_instance_with_ips.yml b/examples/ndb/provision_postgres_ha_instance_with_ips.yml index 00e95fc68..e9c3621b8 100644 --- a/examples/ndb/provision_postgres_ha_instance_with_ips.yml +++ b/examples/ndb/provision_postgres_ha_instance_with_ips.yml @@ -4,8 +4,6 @@ - name: Create stretched vlan hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -14,64 +12,64 @@ validate_certs: false tasks: - - name: create HA instance postgres database with static IP assignments to vms and cluster IP - ntnx_ndb_databases: + - name: Create HA instance postgres database with static IP assignments to vms and cluster IP + nutanix.ncp.ntnx_ndb_databases: wait: true timeout: 5400 - name: "" - desc: "ansible-created-db-desc" + name: + desc: ansible-created-db-desc db_params_profile: - name: "" + name: db_server_cluster: new_cluster: - name: "" + name: cluster: - name: "" + name: ips: - cluster: - name: "" - ip: "" + name: + ip: software_profile: - name: "" + name: network_profile: - name: "" + name: compute_profile: - name: "" - password: "" - pub_ssh_key: "" + name: + password: + pub_ssh_key: vms: - - name: "vm-1" - node_type: "database" - role: "Primary" - ip: "" + - name: vm-1 + node_type: database + role: Primary + ip: - - name: "vm-2" - node_type: "database" - role: "Secondary" - ip: "" + - name: vm-2 + node_type: database + role: Secondary + ip: - - name: "vm-3" - node_type: "database" - role: "Secondary" - ip: "" + - name: vm-3 + node_type: database + role: Secondary + ip: - - name: "vm-ha-proxy1" - node_type: "haproxy" - ip: "" + - name: vm-ha-proxy1 + node_type: haproxy + ip: - - name: "vm-ha-proxy2" - node_type: "haproxy" - ip: "" + - name: vm-ha-proxy2 + node_type: haproxy + ip: postgres: - type: "ha" + type: ha db_name: testAnsible - db_password: "" + db_password: db_size: 200 - patroni_cluster_name: "" + patroni_cluster_name: ha_proxy: provision_virtual_ip: true @@ -79,7 +77,7 @@ name: TM2 desc: TM-desc sla: - name: "" + name: schedule: daily: "11:10:02" weekly: WEDNESDAY @@ -88,22 +86,23 @@ log_catchup: 30 snapshots_per_day: 2 clusters: - - name: "" + - name: tags: - ansible-databases: "ha-instance-dbs" + ansible-databases: ha-instance-dbs automated_patching: maintenance_window: - name: "" + name: tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - debug: + - name: Print output + ansible.builtin.debug: msg: "{{ result }}" diff --git a/examples/ndb/refresh_clone.yml b/examples/ndb/refresh_clone.yml index 3806fb9d9..a420d28f4 100644 --- a/examples/ndb/refresh_clone.yml +++ b/examples/ndb/refresh_clone.yml @@ -2,8 +2,6 @@ - name: Create Refresh clone hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,29 +10,31 @@ validate_certs: false tasks: - - set_fact: + - name: Set vars + ansible.builtin.set_fact: clone_db: uuid: snapshot: uuid: - - name: create spec for refresh clone to a pitr timestamp - check_mode: yes - ntnx_ndb_database_clone_refresh: - uuid: "{{clone_db.uuid}}" + - name: Create spec for refresh clone to a pitr timestamp + check_mode: true + nutanix.ncp.ntnx_ndb_database_clone_refresh: + uuid: "{{ clone_db.uuid }}" pitr_timestamp: "2023-02-04 07:29:36" - timezone: "UTC" + timezone: UTC register: output + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" - - debug: - msg: "{{output}}" - - - name: refresh db clone - ntnx_ndb_database_clone_refresh: - uuid: "{{clone_db.uuid}}" - snapshot_uuid: "{{snapshot.uuid}}" + - name: Refresh db clone + nutanix.ncp.ntnx_ndb_database_clone_refresh: + uuid: "{{ clone_db.uuid }}" + snapshot_uuid: "{{ snapshot.uuid }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/registr_cluster.yml b/examples/ndb/registr_cluster.yml index 80108160d..c425abba4 100644 --- a/examples/ndb/registr_cluster.yml +++ b/examples/ndb/registr_cluster.yml @@ -2,8 +2,6 @@ - name: NDB cluster creation hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,34 +10,34 @@ validate_certs: false tasks: - - name: NDB cluster creation - ntnx_ndb_clusters: - name: "{{cluster.name}}" - desc: "{{cluster.desc}}" - name_prefix: "{{cluster.name_prefix}}" - cluster_ip: "{{cluster.cluster_ip}}" + nutanix.ncp.ntnx_ndb_clusters: + name: "{{ cluster.name }}" + desc: "{{ cluster.desc }}" + name_prefix: "{{ cluster.name_prefix }}" + cluster_ip: "{{ cluster.cluster_ip }}" cluster_credentials: - username: "{{cluster_credentials.username}}" - password: "{{cluster_credentials.password}}" + username: "{{ cluster_credentials.username }}" + password: "{{ cluster_credentials.password }}" agent_network: dns_servers: - - "{{agent_network.dns_servers[0]}}" - - "{{agent_network.dns_servers[1]}}" + - "{{ agent_network.dns_servers[0] }}" + - "{{ agent_network.dns_servers[1] }}" ntp_servers: - - "{{agent_network.ntp_servers[0]}}" - - "{{agent_network.ntp_servers[1]}}" - - "{{agent_network.ntp_servers[2]}}" - - "{{agent_network.ntp_servers[3]}}" + - "{{ agent_network.ntp_servers[0] }}" + - "{{ agent_network.ntp_servers[1] }}" + - "{{ agent_network.ntp_servers[2] }}" + - "{{ agent_network.ntp_servers[3] }}" vlan_access: prism_vlan: - vlan_name: "{{prism_vlan.vlan_name}}" - vlan_type: "{{prism_vlan.vlan_type}}" - static_ip: "{{prism_vlan.static_ip}}" - gateway: "{{prism_vlan.gateway}}" - subnet_mask: "{{prism_vlan.subnet_mask}}" - storage_container: "{{storage_container.name}}" + vlan_name: "{{ prism_vlan.vlan_name }}" + vlan_type: "{{ prism_vlan.vlan_type }}" + static_ip: "{{ prism_vlan.static_ip }}" + gateway: "{{ prism_vlan.gateway }}" + subnet_mask: "{{ prism_vlan.subnet_mask }}" + storage_container: "{{ storage_container.name }}" register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/single_instance_postgress_database.yml b/examples/ndb/single_instance_postgress_database.yml index 8f8f83339..62f8e5351 100644 --- a/examples/ndb/single_instance_postgress_database.yml +++ b/examples/ndb/single_instance_postgress_database.yml @@ -2,8 +2,6 @@ - name: Single instance postgres database creation with new db server VM hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,9 +10,8 @@ validate_certs: false tasks: - - name: Create single instance postgres database - ntnx_ndb_databases: + nutanix.ncp.ntnx_ndb_databases: name: POSTGRES_DATABASE_ANSIBLE @@ -33,7 +30,7 @@ name: DEFAULT_OOB_POSTGRESQL_NETWORK compute_profile: name: DEFAULT_OOB_SMALL_COMPUTE - pub_ssh_key: "" + pub_ssh_key: postgres: listener_port: "5432" @@ -56,5 +53,6 @@ register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/soft_delete_database_instance.yml b/examples/ndb/soft_delete_database_instance.yml index 07b15fab1..0ba663012 100644 --- a/examples/ndb/soft_delete_database_instance.yml +++ b/examples/ndb/soft_delete_database_instance.yml @@ -2,8 +2,6 @@ - name: Soft delete single instance database and time machine associated hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,14 +10,14 @@ validate_certs: false tasks: - - name: Soft delete single instance database and time machine associated - ntnx_ndb_databases: - state: "absent" + nutanix.ncp.ntnx_ndb_databases: + state: absent db_uuid: c0a4433a-49f2-40f3-ae52-d88788d2824b soft_delete: true delete_time_machine: true register: output - - debug: - msg: "{{output}}" + - name: Print output + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/ndb/software_profiles.yml b/examples/ndb/software_profiles.yml index fa22d873c..3af55b2b2 100644 --- a/examples/ndb/software_profiles.yml +++ b/examples/ndb/software_profiles.yml @@ -10,8 +10,6 @@ - name: Create software profiles hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -20,168 +18,168 @@ validate_certs: false tasks: - - name: create software profile create spec - check_mode: yes - ntnx_ndb_profiles: - name: "{{profile1_name}}" - desc: "{{profile1_name}}-desc" - type: "software" - database_type: "postgres" + - name: Create software profile create spec + check_mode: true + nutanix.ncp.ntnx_ndb_profiles: + name: "{{ profile1_name }}" + desc: "{{ profile1_name }}-desc" + type: software + database_type: postgres software: - topology: "cluster" - name: "v1.0" - desc: "v1.0-desc" + topology: cluster + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - name: "{{db_server_vm.name}}" + name: "{{ db_server_vm.name }}" clusters: - - name: "" - - uuid: "" + - name: + - uuid: register: result - - name: create software profile with base version and cluster instance topology. Replicate to multiple clusters - ntnx_ndb_profiles: - name: "{{profile1_name}}-replicated" - desc: "{{profile1_name}}-desc-replicated" - type: "software" - database_type: "postgres" + - name: Create software profile with base version and cluster instance topology. Replicate to multiple clusters + nutanix.ncp.ntnx_ndb_profiles: + name: "{{ profile1_name }}-replicated" + desc: "{{ profile1_name }}-desc-replicated" + type: software + database_type: postgres software: - topology: "cluster" - name: "v1.0" - desc: "v1.0-desc" + topology: cluster + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" clusters: - - name: "" - - uuid: "" + - name: + - uuid: register: result - - name: create software profile with base version and single instance topology - ntnx_ndb_profiles: - name: "{{profile2_name}}" - desc: "{{profile2_name}}-desc" - type: "software" - database_type: "postgres" + - name: Create software profile with base version and single instance topology + nutanix.ncp.ntnx_ndb_profiles: + name: "{{ profile2_name }}" + desc: "{{ profile2_name }}-desc" + type: software + database_type: postgres software: - topology: "single" - name: "v1.0" - desc: "v1.0-desc" + topology: single + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" clusters: - - name: "" + - name: register: result - - name: update software profile - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - name: "{{profile1_name}}-updated1" - desc: "{{profile1_name}}-desc-updated" + - name: Update software profile + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + name: "{{ profile1_name }}-updated1" + desc: "{{ profile1_name }}-desc-updated" register: result - - name: create software profile version spec - check_mode: yes - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Create software profile version spec + check_mode: true + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - name: "v2.0" - desc: "v2.0-desc" + name: v2.0 + desc: v2.0-desc notes: - os: "os_notes for v2" - db_software: "db_notes for v2" + os: os_notes for v2 + db_software: db_notes for v2 db_server_vm: - name: "{{db_server_vm.name}}" + name: "{{ db_server_vm.name }}" register: result - - name: create software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Create software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - name: "v2.0" - desc: "v2.0-desc" + name: v2.0 + desc: v2.0-desc notes: - os: "os_notes for v2" - db_software: "db_notes for v2" + os: os_notes for v2 + db_software: db_notes for v2 db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" register: result - - name: create spec for update software profile version - check_mode: yes - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Create spec for update software profile version + check_mode: true + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - version_uuid: "{{result.version_uuid}}" - name: "v2.0-updated" - desc: "v2.0-desc-updated" + version_uuid: "{{ result.version_uuid }}" + name: v2.0-updated + desc: v2.0-desc-updated register: result - - name: update software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + - name: Update software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - version_uuid: "{{result.version_uuid}}" - name: "v2.0-updated" - desc: "v2.0-desc-updated" + version_uuid: "{{ result.version_uuid }}" + name: v2.0-updated + desc: v2.0-desc-updated register: result - - name: publish software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Publish software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - publish: True + version_uuid: "{{ version_uuid }}" + publish: true register: result - - name: unpublish software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Unpublish software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" + version_uuid: "{{ version_uuid }}" publish: false register: result - - name: deprecate software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Deprecate software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - deprecate: True + version_uuid: "{{ version_uuid }}" + deprecate: true register: result - - name: delete software profile version - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Delete software profile version + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - state: "absent" + version_uuid: "{{ version_uuid }}" + state: absent register: result - - name: replicate software profile - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + - name: Replicate software profile + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" clusters: - - name: "{{cluster.cluster2.name}}" + - name: "{{ cluster.cluster2.name }}" register: result - - name: delete software profile - ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - state: "absent" + - name: Delete software profile + nutanix.ncp.ntnx_ndb_profiles: + profile_uuid: "{{ profile_uuid }}" + state: absent register: result diff --git a/examples/pbr.yml b/examples/pbr.yml index 3e36f8097..eca3ac302 100644 --- a/examples/pbr.yml +++ b/examples/pbr.yml @@ -2,8 +2,6 @@ - name: PBR playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,28 +10,28 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: cluster_name: "" cluster_uuid: "" priority: "" vpc_uuid: "" - - name: create PBR with vpc uuid with any source or destination or protocol with deny action - ntnx_pbrs: - state: present - priority: "{{ priority }}" - vpc: - uuid: "{{ vpc_uuid }}" - source: - any: True - destination: - any: True - action: - deny: True - protocol: - any: True + - name: Create PBR with vpc uuid with any source or destination or protocol with deny action + nutanix.ncp.ntnx_pbrs: + state: present + priority: "{{ priority }}" + vpc: + uuid: "{{ vpc_uuid }}" + source: + any: true + destination: + any: true + action: + deny: true + protocol: + any: true register: result - name: Delete pbrs - ntnx_pbrs: + nutanix.ncp.ntnx_pbrs: state: absent pbr_uuid: "{{ result.pbr_uuid }}" diff --git a/examples/pbr_info.yml b/examples/pbr_info.yml index d59512893..ae712a1ad 100644 --- a/examples/pbr_info.yml +++ b/examples/pbr_info.yml @@ -2,8 +2,6 @@ - name: PBR_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,17 +10,16 @@ validate_certs: false tasks: - - name: List pbrs using length and offset - ntnx_pbrs_info: + nutanix.ncp.ntnx_pbrs_info: length: 1 offset: 0 register: result - ignore_errors: True + ignore_errors: true - name: List pbrs using ascending priority sorting - ntnx_pbrs_info: + nutanix.ncp.ntnx_pbrs_info: sort_order: "ASCENDING" sort_attribute: "priority" register: result - ignore_errors: True + ignore_errors: true diff --git a/examples/permissions_info.yml b/examples/permissions_info.yml index 8b8bc3960..d84376659 100644 --- a/examples/permissions_info.yml +++ b/examples/permissions_info.yml @@ -2,31 +2,29 @@ - name: PC permissions hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false tasks: - - name: get all permissions - ntnx_permissions_info: - register: op1 + - name: Get all permissions + nutanix.ncp.ntnx_permissions_info: + register: op1 - - name: get permissions using filter - ntnx_permissions_info: - filter: - name: - register: op2 + - name: Get permissions using filter + nutanix.ncp.ntnx_permissions_info: + filter: + name: + register: op2 - - name: get permission using uuid - ntnx_permissions_info: - permission_uuid: - register: op3 + - name: Get permission using uuid + nutanix.ncp.ntnx_permissions_info: + permission_uuid: + register: op3 - - name: output - debug: - msg: "{{ op3 }}" + - name: Output + ansible.builtin.debug: + msg: "{{ op3 }}" diff --git a/examples/projects_crud.yml b/examples/projects_crud.yml index ff785196a..1a715edf5 100644 --- a/examples/projects_crud.yml +++ b/examples/projects_crud.yml @@ -1,8 +1,6 @@ -- name: projects crud playbook. Here we will create, update, read and delete the project. +- name: Projects crud playbook. Here we will create, update, read and delete the project. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Create a project - ntnx_projects: + nutanix.ncp.ntnx_projects: name: "test-ansible-project-1" desc: "desc-123" subnets: @@ -31,8 +29,8 @@ - name: register: project1 - - name: update project - ntnx_projects: + - name: Update project + nutanix.ncp.ntnx_projects: state: present project_uuid: "{{project1.project_uuid}}" name: "test-ansible-project-1" @@ -45,16 +43,16 @@ register: updated_project - name: Read the updated project - ntnx_projects_info: + nutanix.ncp.ntnx_projects_info: project_uuid: "{{updated_project.project_uuid}}" register: project_info - name: Print the project details - debug: + ansible.builtin.debug: msg: "{{project_info}}" - name: Delete the project - ntnx_projects: + nutanix.ncp.ntnx_projects: state: absent project_uuid: "{{updated_project.project_uuid}}" register: op diff --git a/examples/projects_with_role_mapping.yml b/examples/projects_with_role_mapping.yml index c66fb47d5..e289cc271 100644 --- a/examples/projects_with_role_mapping.yml +++ b/examples/projects_with_role_mapping.yml @@ -1,8 +1,6 @@ -- name: projects crud playbook. Here we will create, update, read and delete the project with role mappings. +- name: Projects crud playbook. Here we will create, update, read and delete the project with role mappings. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Create a project with role mappings - ntnx_projects: + nutanix.ncp.ntnx_projects: name: "test-ansible-project-1" desc: "desc-123" clusters: @@ -30,18 +28,18 @@ - name: accounts: - name: - collaboration: True + collaboration: true role_mappings: - user: uuid: role: name: "Project Admin" - user_group: - uuid: + uuid: role: name: "Developer" - user: - uuid: + uuid: role: name: "Consumer" - user: @@ -57,13 +55,13 @@ name: "Consumer" register: project1 - - name: update role mappings of project - ntnx_projects: + - name: Update role mappings of project + nutanix.ncp.ntnx_projects: state: present project_uuid: "{{project1.project_uuid}}" name: "test-ansible-project-1" desc: "test-ansible-project-1-updated" - collaboration: True + collaboration: true role_mappings: - user: uuid: @@ -76,17 +74,17 @@ register: updated_project - name: Read the updated project - ntnx_projects_info: + nutanix.ncp.ntnx_projects_info: project_uuid: "{{updated_project.project_uuid}}" include_acps: true register: project_info - name: Print the project details - debug: + ansible.builtin.debug: msg: "{{project_info}}" - name: Delete the project - ntnx_projects: + nutanix.ncp.ntnx_projects: state: absent project_uuid: "{{updated_project.project_uuid}}" register: op diff --git a/examples/roles_crud.yml b/examples/roles_crud.yml index b01c02eca..d364c804f 100644 --- a/examples/roles_crud.yml +++ b/examples/roles_crud.yml @@ -1,8 +1,6 @@ - name: Roles crud playbook. Here we will create, update, read and delete the role. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -10,12 +8,12 @@ nutanix_password: validate_certs: false tasks: - - name: get some permissions for adding in roles - ntnx_permissions_info: + - name: Get some permissions for adding in roles + nutanix.ncp.ntnx_permissions_info: register: permissions - name: Create a role with 2 permissions. Here we will be using name or uuid for referenceing permissions - ntnx_roles: + nutanix.ncp.ntnx_roles: state: present name: test-ansible-role-1 desc: @@ -26,7 +24,7 @@ register: role1 - name: Update role - ntnx_roles: + nutanix.ncp.ntnx_roles: state: present role_uuid: "{{ role1.role_uuid }}" name: test-ansible-role-1 @@ -36,16 +34,16 @@ register: updated_role1 - name: Read the updated role - ntnx_roles_info: + nutanix.ncp.ntnx_roles_info: role_uuid: "{{ updated_role1.role_uuid }}" register: role1_info - name: Print the role details - debug: + ansible.builtin.debug: msg: "{{role1_info}}" - name: Delete the role. - ntnx_roles: + nutanix.ncp.ntnx_roles: state: absent role_uuid: "{{ updated_role1.role_uuid }}" wait: true diff --git a/examples/static_routes.yml b/examples/static_routes.yml index 846168e3a..c3a54a6ce 100644 --- a/examples/static_routes.yml +++ b/examples/static_routes.yml @@ -2,8 +2,6 @@ - name: Static Routes playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,15 +10,15 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: vpc_uuid: "" vpn_uuid: "" external_nat_subnet: name: "" - uuid: "" + uuid: "" - - name: create static routes and default static routes with external nat subnet - ntnx_static_routes: + - name: Create static routes and default static routes with external nat subnet + nutanix.ncp.ntnx_static_routes: vpc_uuid: "{{ vpc_uuid }}" static_routes: - destination: "0.0.0.0/0" @@ -40,7 +38,7 @@ vpn_connection_ref: uuid: "{{ vpn_uuid }}" - - name: remove all routes excluding dynamic and local routes - ntnx_static_routes: + - name: Remove all routes excluding dynamic and local routes + nutanix.ncp.ntnx_static_routes: vpc_uuid: "{{ vpc_uuid }}" remove_all_routes: true diff --git a/examples/subnet.yml b/examples/subnet.yml index c77be095a..68003406b 100644 --- a/examples/subnet.yml +++ b/examples/subnet.yml @@ -1,8 +1,6 @@ - name: Subnet playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,7 +9,7 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: cluster_name: "" cluster_uuid: "" virtual_switch_name: "" @@ -30,71 +28,71 @@ vpc_name: "" vpc_uuid: "" - - name: 'VLAN subnet with IPAM, IP pools and DHCP' - ntnx_subnets: + - name: "VLAN subnet with IPAM, IP pools and DHCP" + nutanix.ncp.ntnx_subnets: state: present name: VLAN subnet with IPAM IP pools and DHCP vlan_subnet: vlan_id: 29 virtual_switch: - name: '{{ virtual_switch_name }}' + name: "{{ virtual_switch_name }}" cluster: - name: '{{ cluster_name }}' + name: "{{ cluster_name }}" ipam: - network_ip: '{{ network_ip }}' - network_prefix: '{{ network_prefix }}' - gateway_ip: '{{ gateway_ip_address }}' + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" ip_pools: - - start_ip: '{{ start_address }}' - end_ip: '{{ end_address }}' + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" dhcp: - dns_servers: '{{ dns_servers }}' - domain_search: '{{ domain_search }}' - domain_name: '{{ domain_name }}' - tftp_server_name: '{{ tftp_server_name }}' - boot_file: '{{ boot_file }}' - dhcp_server_ip: '{{ dhcp_server_address }}' + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + domain_name: "{{ domain_name }}" + tftp_server_name: "{{ tftp_server_name }}" + boot_file: "{{ boot_file }}" + dhcp_server_ip: "{{ dhcp_server_address }}" register: result ignore_errors: true - name: External subnet with NAT - ntnx_subnets: + nutanix.ncp.ntnx_subnets: state: present - name: ' External subnet with NAT ' + name: " External subnet with NAT " external_subnet: vlan_id: 30 enable_nat: true cluster: - name: '{{ cluster_name }}' + name: "{{ cluster_name }}" ipam: - network_ip: '{{ network_ip }}' - network_prefix: '{{ network_prefix }}' - gateway_ip: '{{ gateway_ip_address }}' + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" ip_pools: - - start_ip: '{{ start_address }}' - end_ip: '{{ end_address }}' + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" register: result ignore_errors: true - name: Overlay Subnet with IP_pools and DHCP - ntnx_subnets: + nutanix.ncp.ntnx_subnets: state: present name: Overlay Subnet with IP_pools and DHCP overlay_subnet: vpc: - name: '{{ vpc_name }}' + name: "{{ vpc_name }}" ipam: - network_ip: '{{ network_ip }}' - network_prefix: '{{ network_prefix }}' - gateway_ip: '{{ gateway_ip_address }}' + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" ip_pools: - - start_ip: '{{ start_address }}' - end_ip: '{{ end_address }}' + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" dhcp: - dns_servers: '{{ dns_servers }}' - domain_search: '{{ domain_search }}' - domain_name: '{{ domain_name }}' - tftp_server_name: '{{ tftp_server_name }}' - boot_file_name: '{{ boot_file }}' + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + domain_name: "{{ domain_name }}" + tftp_server_name: "{{ tftp_server_name }}" + boot_file_name: "{{ boot_file }}" register: result ignore_errors: true diff --git a/examples/subnet_info.yml b/examples/subnet_info.yml index ce5bb045d..e4cf5e36d 100644 --- a/examples/subnet_info.yml +++ b/examples/subnet_info.yml @@ -2,8 +2,6 @@ - name: Subnet_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -13,25 +11,25 @@ tasks: - name: List subnets using subnet_type filter criteria - ntnx_subnets_info: + nutanix.ncp.ntnx_subnets_info: filter: - subnet_type: "VLAN" + subnet_type: "VLAN" kind: subnet register: result - ignore_errors: True + ignore_errors: true - name: List subnets using length, offset and vlan_id ascending sorting - ntnx_subnets_info: + nutanix.ncp.ntnx_subnets_info: length: 1 offset: 2 sort_order: "ASCENDING" sort_attribute: "vlan_id" check_mode: true register: result - ignore_errors: True + ignore_errors: true - name: List subnets filter and custom_filter - ntnx_subnets_info: + nutanix.ncp.ntnx_subnets_info: filter: name: custom_filter: diff --git a/examples/user-groups.yml b/examples/user-groups.yml index 9dcc96e91..2075ee485 100644 --- a/examples/user-groups.yml +++ b/examples/user-groups.yml @@ -1,9 +1,7 @@ --- -- name: user_group playbook +- name: User_group playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,8 +9,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: distinguished_name: "" principal_name: "" directory_service_uuid: "" @@ -20,31 +18,31 @@ project: uuid: "" - - name: create user group - ntnx_user_groups: - distinguished_name: "{{distinguished_name}}" - project: - uuid: "{{project.uuid}}" - categories: - Environment: - - "Dev" - register: result + - name: Create user group + nutanix.ncp.ntnx_user_groups: + distinguished_name: "{{distinguished_name}}" + project: + uuid: "{{project.uuid}}" + categories: + Environment: + - "Dev" + register: result - - name: delete user group - ntnx_user_groups: - state: absent - user_group_uuid: "{{result.user_group_uuid}}" - register: result + - name: Delete user group + nutanix.ncp.ntnx_user_groups: + state: absent + user_group_uuid: "{{result.user_group_uuid}}" + register: result - - name: create user group with idp - ntnx_user_groups: - idp: - idp_uuid: "{{identity_provider_uuid}}" - group_name: test_group_987 - register: result + - name: Create user group with idp + nutanix.ncp.ntnx_user_groups: + idp: + idp_uuid: "{{identity_provider_uuid}}" + group_name: test_group_987 + register: result - - name: delete user group - ntnx_user_groups: - state: absent - user_group_uuid: "{{result.user_group_uuid}}" - register: result + - name: Delete user group + nutanix.ncp.ntnx_user_groups: + state: absent + user_group_uuid: "{{result.user_group_uuid}}" + register: result diff --git a/examples/user.yml b/examples/user.yml index e70b61b14..1afca3252 100644 --- a/examples/user.yml +++ b/examples/user.yml @@ -1,9 +1,7 @@ --- -- name: users playbook +- name: Users playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,51 +9,51 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: directory_service_uuid: "" principal_name: "" project: uuid: "" identity_provider_uuid: "" - - name: create local user - ntnx_users: - principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" - register: result + - name: Create local user + nutanix.ncp.ntnx_users: + principal_name: "{{principal_name}}" + directory_service_uuid: "{{directory_service_uuid}}" + register: result - - name: Delete created user - ntnx_users: - state: absent - user_uuid: "{{ result.user_uuid }}" + - name: Delete created user + nutanix.ncp.ntnx_users: + state: absent + user_uuid: "{{ result.user_uuid }}" - - name: create local user with project and categories - ntnx_users: - principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" - project: - uuid: "{{project.uuid}}" - categories: - Environment: - - "Dev" - AppType: - - "Default" - register: result + - name: Create local user with project and categories + nutanix.ncp.ntnx_users: + principal_name: "{{principal_name}}" + directory_service_uuid: "{{directory_service_uuid}}" + project: + uuid: "{{project.uuid}}" + categories: + Environment: + - "Dev" + AppType: + - "Default" + register: result - - name: Delete created user - ntnx_users: - state: absent - user_uuid: "{{ result.user_uuid }}" + - name: Delete created user + nutanix.ncp.ntnx_users: + state: absent + user_uuid: "{{ result.user_uuid }}" - - name: create idp user - ntnx_users: - identity_provider_uuid: "{{identity_provider_uuid}}" - username: testing_user - register: result - ignore_errors: true + - name: Create idp user + nutanix.ncp.ntnx_users: + identity_provider_uuid: "{{identity_provider_uuid}}" + username: testing_user + register: result + ignore_errors: true - - name: Delete created user - ntnx_users: - state: absent - user_uuid: "{{ result.user_uuid }}" + - name: Delete created user + nutanix.ncp.ntnx_users: + state: absent + user_uuid: "{{ result.user_uuid }}" diff --git a/examples/vm.yml b/examples/vm.yml index f88ab7064..e6c83b471 100644 --- a/examples/vm.yml +++ b/examples/vm.yml @@ -2,8 +2,6 @@ - name: VM playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -11,8 +9,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: cluster_name: "" script_path: "" subnet_name: "" @@ -20,55 +18,56 @@ password: "" fqdn: "" - - name: Create Cloud-init Script file - copy: - dest: "cloud_init.yml" - content: | - #cloud-config - chpasswd: - list: | - root: "{{ password }}" - expire: False - fqdn: "{{ fqdn }}" + - name: Create Cloud-init Script file + ansible.builtin.copy: + mode: "0644" + dest: "cloud_init.yml" + content: | + #cloud-config + chpasswd: + list: | + root: "{{ password }}" + expire: False + fqdn: "{{ fqdn }}" - - name: create Vm - ntnx_vms: - state: present - name: "ansible_automation_demo" - desc: "ansible_vm_description" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{cluster_name}}" - networks: - - is_connected: True - subnet: - name: "{{ subnet_name }}" - # mention cluster only when there are multiple subnets with same name accross clusters - # and subnet name is set above - cluster: - name: "{{cluster_name}}" - disks: - - type: "DISK" - size_gb: 30 - bus: "SATA" - clone_image: - name: "{{ image_name }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: output + - name: Create Vm + nutanix.ncp.ntnx_vms: + state: present + name: "ansible_automation_demo" + desc: "ansible_vm_description" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{cluster_name}}" + networks: + - is_connected: true + subnet: + name: "{{ subnet_name }}" + # mention cluster only when there are multiple subnets with same name accross clusters + # and subnet name is set above + cluster: + name: "{{cluster_name}}" + disks: + - type: "DISK" + size_gb: 30 + bus: "SATA" + clone_image: + name: "{{ image_name }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: true + register: output - - name: output of vm created - debug: - msg: '{{ output }}' + - name: Output of vm created + ansible.builtin.debug: + msg: "{{ output }}" - - name: delete VM - ntnx_vms: - state: absent - vm_uuid: "{{output.vm_uuid}}" + - name: Delete VM + nutanix.ncp.ntnx_vms: + state: absent + vm_uuid: "{{output.vm_uuid}}" diff --git a/examples/vm_info.yml b/examples/vm_info.yml index 309102064..6ae7dc8e3 100644 --- a/examples/vm_info.yml +++ b/examples/vm_info.yml @@ -2,8 +2,6 @@ - name: VM_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,34 +10,34 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: vm_name: "" - name: List vms using name filter criteria - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: filter: vm_name: "{{ vm_name }}" kind: vm register: result - ignore_errors: True + ignore_errors: true - name: List vms using FIQL filter string - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: filter_string: "vm_name=={{vm.name}};power_state==off" register: result - ignore_errors: True + ignore_errors: true - name: List vms using length, offset and ascending vm_name sorting - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: length: 10 offset: 1 sort_order: "ASCENDING" sort_attribute: "vm_name" register: result - ignore_errors: True + ignore_errors: true - name: List vms using filter and custom_filter - ntnx_vms_info: + nutanix.ncp.ntnx_vms_info: filter: vm_name: custom_filter: diff --git a/examples/vm_operations.yml b/examples/vm_operations.yml index c6bef4dbc..18bd53c18 100644 --- a/examples/vm_operations.yml +++ b/examples/vm_operations.yml @@ -3,8 +3,6 @@ - name: VM operations playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,31 +10,31 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: script_path: "" subnet_name: "" vm_uuid: "" - - name: hard power off the vm - ntnx_vms: + - name: Hard power off the vm + nutanix.ncp.ntnx_vms: state: hard_poweroff vm_uuid: "{{ vm_uuid }}" - register: result - ignore_errors: true + register: result + ignore_errors: true - - name: create_ova_image while vm is on - ntnx_vms_ova: + - name: Create_ova_image while vm is on + nutanix.ncp.ntnx_vms_ova: state: present src_vm_uuid: "{{ vm_uuid }}" name: integration_test_VMDK_ova file_format: VMDK wait: true - register: result - ignore_errors: true + register: result + ignore_errors: true - - name: clone vm while it's off also add network and script - ntnx_vms_clone: + - name: Clone vm while it's off also add network and script + nutanix.ncp.ntnx_vms_clone: state: present src_vm_uuid: "{{ vm_uuid }}" networks: @@ -46,6 +44,6 @@ guest_customization: type: "cloud_init" script_path: "{{ script_path }}" - is_overridable: True - register: result - ignore_errors: true + is_overridable: true + register: result + ignore_errors: true diff --git a/examples/vm_update.yml b/examples/vm_update.yml index 45de3d642..33865a528 100644 --- a/examples/vm_update.yml +++ b/examples/vm_update.yml @@ -3,8 +3,6 @@ - name: VM update playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,8 +10,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + ansible.builtin.set_fact: cluster_name: "" script_path: "" subnet_name: "" @@ -26,56 +24,56 @@ remove_disk_uuid: "" subnet_uuid: "" - - name: Update VM - ntnx_vms: - vm_uuid: "{{ vm_uuid }}" - name: updated - desc: updated - categories: - AppType: - - Apache_Spark - disks: - - type: "DISK" - clone_image: - name: "{{ image_name }}" - bus: "SCSI" - size_gb: 20 - - type: DISK - size_gb: 3 - bus: PCI - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - uuid: "{{ storage_container_uuid }}" - networks: - - is_connected: true - subnet: - uuid: "{{ network_dhcp_uuid }}" - - is_connected: false - subnet: - uuid: "{{ static.uuid }}" - private_ip: "{{ network_static_ip }}" - register: result + - name: Update VM + nutanix.ncp.ntnx_vms: + vm_uuid: "{{ vm_uuid }}" + name: updated + desc: updated + categories: + AppType: + - Apache_Spark + disks: + - type: "DISK" + clone_image: + name: "{{ image_name }}" + bus: "SCSI" + size_gb: 20 + - type: DISK + size_gb: 3 + bus: PCI + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + uuid: "{{ storage_container_uuid }}" + networks: + - is_connected: true + subnet: + uuid: "{{ network_dhcp_uuid }}" + - is_connected: false + subnet: + uuid: "{{ static.uuid }}" + private_ip: "{{ network_static_ip }}" + register: result - - name: Update VM by deleting and editing disks and subnets - ntnx_vms: - vm_uuid: "{{ vm_uuid }}" - name: update diks - desc: update disks - disks: - - type: "DISK" - uuid: "{{ disk_uuid }}" - size_gb: 30 - - state: absent - uuid: "{{ remove_disk_uuid }}" - networks: - - state: absent - uuid: "{{ subnet_uuid }}" - register: result + - name: Update VM by deleting and editing disks and subnets + nutanix.ncp.ntnx_vms: + vm_uuid: "{{ vm_uuid }}" + name: update diks + desc: update disks + disks: + - type: "DISK" + uuid: "{{ disk_uuid }}" + size_gb: 30 + - state: absent + uuid: "{{ remove_disk_uuid }}" + networks: + - state: absent + uuid: "{{ subnet_uuid }}" + register: result - - name: Update VM by deleting it - ntnx_vms: - state: absent - vm_uuid: "{{ vm_uuid }}" - register: result + - name: Update VM by deleting it + nutanix.ncp.ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result diff --git a/examples/vpc.yml b/examples/vpc.yml index 3af08fa53..5bf24dc1e 100644 --- a/examples/vpc.yml +++ b/examples/vpc.yml @@ -2,8 +2,6 @@ - name: VPC playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,21 +10,21 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: external_subnet_name: "" vm_name: "" - name: Create min VPC with subnet name - ntnx_vpcs: + nutanix.ncp.ntnx_vpcs: state: present - wait: True + wait: true name: MinVPC external_subnets: - subnet_name: "{{ external_subnet.name }}" register: result - name: Delete all created vpcs - ntnx_vpcs: + nutanix.ncp.ntnx_vpcs: state: absent vpc_uuid: "{{ result.vpc_uuid }}" register: result diff --git a/examples/vpc_info.yml b/examples/vpc_info.yml index 652d157fd..583a2ba6f 100644 --- a/examples/vpc_info.yml +++ b/examples/vpc_info.yml @@ -2,8 +2,6 @@ - name: VPC_Info playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,24 +10,23 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: vpc_name: "" - name: List VPC using name filter criteria - ntnx_vpcs_info: + nutanix.ncp.ntnx_vpcs_info: filter: name: "{{ vpc_name }}" kind: vpc register: result - ignore_errors: True - + ignore_errors: true - name: List VPC using length, offset and descending name sorting - ntnx_vpcs_info: + nutanix.ncp.ntnx_vpcs_info: length: 4 offset: 1 sort_order: "DESCENDING" sort_attribute: "name" check_mode: true register: result - ignore_errors: True + ignore_errors: true diff --git a/galaxy.yml b/galaxy.yml index 07f60d06e..1201aba7b 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: "nutanix" name: "ncp" -version: "1.9.2" +version: "2.0.0" readme: "README.md" authors: - "Abhishek Chaudhary (@abhimutant)" diff --git a/meta/runtime.yml b/meta/runtime.yml index 3265405b3..7bbd5369b 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -61,6 +61,7 @@ action_groups: - ntnx_foundation_central_api_keys_info - ntnx_karbon_clusters - ntnx_karbon_clusters_info + - ntnx_karbon_clusters_node_pools - ntnx_karbon_registries - ntnx_karbon_registries_info - ntnx_ndb_databases_info @@ -96,4 +97,95 @@ action_groups: - ntnx_ndb_maintenance_window - ntnx_ndb_maintenance_windows_info - ntnx_ndb_slas - - ntnx_karbon_clusters_node_pools + - ntnx_floating_ips_info_v2 + - ntnx_floating_ips_v2 + - ntnx_pbrs_v2 + - ntnx_pbrs_info_v2 + - ntnx_subnets_v2 + - ntnx_subnets_info_v2 + - ntnx_vpcs_info_v2 + - ntnx_vpcs_v2 + - ntnx_permissions_info_v2 + - ntnx_roles_info_v2 + - ntnx_roles_v2 + - ntnx_images_v2 + - ntnx_images_info_v2 + - ntnx_image_placement_policies_v2 + - ntnx_image_placement_policies_info_v2 + - ntnx_vms_ngt_v2 + - ntnx_vms_ngt_update_v2 + - ntnx_vms_ngt_upgrade_v2 + - ntnx_vms_ngt_insert_iso_v2 + - ntnx_vms_ngt_info_v2 + - ntnx_vms_disks_v2 + - ntnx_vms_disks_info_v2 + - ntnx_vms_v2 + - ntnx_vms_info_v2 + - ntnx_vms_categories_v2 + - ntnx_vms_nics_v2 + - ntnx_vms_nics_info_v2 + - ntnx_vms_nics_ip_v2 + - ntnx_vms_nics_migrate_v2 + - ntnx_vms_cd_rom_v2 + - ntnx_vms_cd_rom_info_v2 + - ntnx_vms_cd_rom_iso_v2 + - ntnx_vms_stage_guest_customization_v2 + - ntnx_vms_serial_port_v2 + - ntnx_vms_serial_port_info_v2 + - ntnx_templates_deploy_v2 + - ntnx_templates_guest_os_v2 + - ntnx_templates_v2 + - ntnx_templates_info_v2 + - ntnx_templates_version_v2 + - ntnx_templates_versions_info_v2 + - ntnx_vms_clone_v2 + - ntnx_vms_power_actions_v2 + - ntnx_routes_v2 + - ntnx_routes_info_v2 + - ntnx_route_tables_info_v2 + - ntnx_categories_v2 + - ntnx_categories_info_v2 + - ntnx_volume_groups_v2 + - ntnx_volume_groups_info_v2 + - ntnx_volume_groups_disks_v2 + - ntnx_volume_groups_disks_info_v2 + - ntnx_volume_groups_vms_v2 + - ntnx_volume_groups_iscsi_clients_v2 + - ntnx_volume_groups_iscsi_clients_info_v2 + - ntnx_roles_v2 + - ntnx_roles_info_v2 + - ntnx_directory_services_v2 + - ntnx_directory_services_info_v2 + - ntnx_saml_identity_providers_v2 + - ntnx_saml_identity_providers_info_v2 + - ntnx_user_groups_v2 + - ntnx_user_groups_info_v2 + - ntnx_users_v2 + - ntnx_users_info_v2 + - ntnx_operations_info_v2 + - ntnx_authorization_policies_v2 + - ntnx_authorization_policies_info_v2 + - ntnx_security_rules_v2 + - ntnx_security_rules_info_v2 + - ntnx_service_groups_v2 + - ntnx_service_groups_info_v2 + - ntnx_address_groups_v2 + - ntnx_address_groups_info_v2 + - ntnx_clusters_v2 + - ntnx_clusters_info_v2 + - ntnx_hosts_info_v2 + - ntnx_recovery_points_info_v2 + - ntnx_vm_recovery_point_info_v2 + - ntnx_recovery_points_v2 + - ntnx_recovery_point_restore_v2 + - ntnx_vm_revert_v2 + - ntnx_recovery_point_replicate_v2 + - ntnx_gpus_v2 + - ntnx_gpus_info_v2 + - ntnx_clusters_nodes_v2 + - ntnx_nodes_network_info_v2 + - ntnx_pc_registration_v2 + - ntnx_discover_unconfigured_nodes_v2 + - ntnx_storage_containers_stats_v2 + - ntnx_storage_containers_info_v2 + - ntnx_storage_containers_v2 diff --git a/plugins/doc_fragments/ntnx_info_v2.py b/plugins/doc_fragments/ntnx_info_v2.py new file mode 100644 index 000000000..0791d047a --- /dev/null +++ b/plugins/doc_fragments/ntnx_info_v2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Plugin options for ntnx files info + DOCUMENTATION = r""" +options: + filter: + description: + - The filter in FIQL syntax used for the results + type: str + page: + description: + - The number of page + type: int + limit: + description: + - The number of records + type: int + orderby: + description: + - The sort order in which results are returned + type: str + select: + description: + - The attribute name to select + type: str +""" diff --git a/plugins/doc_fragments/ntnx_operations_v2.py b/plugins/doc_fragments/ntnx_operations_v2.py new file mode 100644 index 000000000..3bd90af21 --- /dev/null +++ b/plugins/doc_fragments/ntnx_operations_v2.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Plugin options for ntnx files CRUD operations + DOCUMENTATION = r""" +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be create the item + - >- + If C(state) is set to C(absent) and if the item exists, then + item is removed. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the CRUD operation to complete. + type: bool + required: false + default: True +""" diff --git a/plugins/inventory/ntnx_prism_vm_inventory.py b/plugins/inventory/ntnx_prism_vm_inventory.py index dd6d7fe9b..d87875f1d 100644 --- a/plugins/inventory/ntnx_prism_vm_inventory.py +++ b/plugins/inventory/ntnx_prism_vm_inventory.py @@ -73,7 +73,7 @@ from ansible.plugins.inventory import BaseInventoryPlugin, Constructable # noqa: E402 -from ..module_utils.prism import vms # noqa: E402 +from ..module_utils.v3.prism import vms # noqa: E402 class Mock_Module: diff --git a/plugins/module_utils/base_module.py b/plugins/module_utils/base_module.py index 3695b4947..39ce0dd1b 100644 --- a/plugins/module_utils/base_module.py +++ b/plugins/module_utils/base_module.py @@ -12,6 +12,7 @@ class BaseModule(AnsibleModule): """Basic module with common arguments""" + unsupported_spec_keys = ["obj"] argument_spec = dict( nutanix_host=dict( type="str", fallback=(env_fallback, ["NUTANIX_HOST"]), required=True @@ -38,10 +39,22 @@ class BaseModule(AnsibleModule): def __init__(self, **kwargs): argument_spec = deepcopy(self.argument_spec) if kwargs.get("argument_spec"): - argument_spec.update(kwargs["argument_spec"]) + argument_spec.update(deepcopy(kwargs["argument_spec"])) + self.argument_spec_with_extra_keys = deepcopy(argument_spec) + self.strip_extra_attributes(argument_spec) kwargs["argument_spec"] = argument_spec if not kwargs.get("supports_check_mode"): kwargs["supports_check_mode"] = True super(BaseModule, self).__init__(**kwargs) + + def strip_extra_attributes(self, argument_spec): + """ + This recursive method checks argument spec and remove extra spec definations which are not allowed in ansible + """ + for spec in argument_spec.values(): + for k in self.unsupported_spec_keys: + spec.pop(k, None) + if spec.get("options"): + self.strip_extra_attributes(spec["options"]) diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 37eb139d7..93feec8d0 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -38,25 +38,6 @@ def strip_extra_attrs(spec1, spec2, deep=True): break -def check_for_idempotency(spec, resp, **kwargs): - state = kwargs.get("state") - if spec == resp: - if ( - state == "present" - # only for VMs - or ( - state in ["soft_shutdown", "hard_poweroff", "power_off"] - and resp["spec"]["resources"]["power_state"] == "OFF" - ) - # only for VMs - or ( - state == "power_on" and resp["spec"]["resources"]["power_state"] == "ON" - ) - ): - return True - return False - - def intersection(first_obj, second_obj): if isinstance(first_obj, dict): for key, value in first_obj.items(): @@ -99,6 +80,13 @@ def extract_uuids_from_references_list(reference_lists): return uuids +def list_to_string(lst): + """ + This routine create comma seperated string from list of strings + """ + return ",".join(lst) + + def format_filters_map(filters, except_keys=None): if filters: mapped_filters = {} @@ -111,6 +99,12 @@ def format_filters_map(filters, except_keys=None): return filters +def conv_mb_to_bytes(val): + if not isinstance(val, int): + return None, "Invalid value type passed for conv_mb_to_bytes" + return val * 1024 * 1024, None + + def create_filter_criteria_string(filters): """ This method creates filter criteria string as per filters map for v3 apis diff --git a/plugins/module_utils/__init__.py b/plugins/module_utils/v3/__init__.py similarity index 100% rename from plugins/module_utils/__init__.py rename to plugins/module_utils/v3/__init__.py diff --git a/plugins/module_utils/base_info_module.py b/plugins/module_utils/v3/base_info_module.py similarity index 90% rename from plugins/module_utils/base_info_module.py rename to plugins/module_utils/v3/base_info_module.py index 5adefc4ea..e46fefd63 100644 --- a/plugins/module_utils/base_info_module.py +++ b/plugins/module_utils/v3/base_info_module.py @@ -4,12 +4,16 @@ from copy import deepcopy -from ..module_utils.base_module import BaseModule +from ..base_module import BaseModule __metaclass__ = type class BaseInfoModule(BaseModule): + """ + Base Info module class for Nutanix PC v3 list APIs based modules + """ + info_argument_spec = dict( offset=dict(type="int"), length=dict(type="int"), diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/v3/constants.py similarity index 100% rename from plugins/module_utils/constants.py rename to plugins/module_utils/v3/constants.py diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/v3/entity.py similarity index 78% rename from plugins/module_utils/entity.py rename to plugins/module_utils/v3/entity.py index 953b7828d..79bcc552f 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/v3/entity.py @@ -12,7 +12,7 @@ from ansible.module_utils._text import to_text from ansible.module_utils.urls import fetch_url -from ..module_utils import utils +from .. import utils try: from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse @@ -20,6 +20,110 @@ from urlparse import urlparse # python2 +class EntityV4(object): + module = None + + def __init__(self, module): + self.module = module + + # old_spec is used for updating entity, where there is already spec object present. + def get_spec(self, module_args, params=None, obj=None, old_spec=None): + """ + For given module parameters input, it will create new spec object or update old_spec object. + It will pick module.params if 'params' is not given. + Args: + module_args (dict): module argument spec for reference + obj (object): spec class from sdk + params (dict): input for creating spec + old_spec (object): Old state obj of entity + Returns: + spec (object): spec object + """ + + if not params: + params = copy.deepcopy(self.module.params) + + if not old_spec and not obj: + return ( + None, + "Either 'old_spec' or 'obj' is required to create/update spec object", + ) + + # create spec obj or shallow copy of old spec as per entity spec - new or existing + spec = copy.copy(old_spec) if old_spec else obj() + + # Resolve each input param w.r.t its module argument spec + for attr, schema in module_args.items(): + + if attr in params: + + type = schema.get("type") + if not type: + return ( + None, + "Invalid module argument: 'type' is required parameter for attribute {0}".format( + attr + ), + ) + + options = schema.get("options") + _obj = schema.get("obj") + elements = schema.get("elements") + + # for dict type attribute, recursively create spec objects + if type == "dict" and options is not None and _obj is not None: + s, err = self.get_spec( + module_args=options, + obj=_obj, + params=params[attr], + old_spec=getattr(spec, attr), + ) + if err: + return None, err + setattr(spec, attr, s) + + # for list type attribute, create list of spec objects recursively + elif ( + type == "list" + and elements == "dict" + and options is not None + and _obj is not None + ): + lst = [] + for item in params[attr]: + s, err = self.get_spec( + module_args=options, obj=_obj, params=item + ) + if err: + return None, err + lst.append(s) + setattr(spec, attr, lst) + + # for other types directly assign + else: + setattr(spec, attr, params[attr]) + + return spec, None + + def get_info_spec(self, params=None): + + if not params: + params = copy.deepcopy(self.module.params) + spec = {} + all_params = ["page", "limit", "filter", "orderby", "select"] + if params.get("name"): + _filter = params.get("filter") + if _filter: + _filter += f"""and name eq '{params["name"]}'""" + else: + _filter = f"""name eq '{params["name"]}'""" + params["filter"] = _filter + for key, val in params.items(): + if key in all_params: + spec[f"_{key}"] = val + return spec + + class Entity(object): entities_limitation = 20 entity_type = "entities" @@ -67,6 +171,8 @@ def read( raise_error=True, no_response=False, timeout=30, + method="GET", + **kwargs, ): url = self.base_url + "/{0}".format(uuid) if uuid else self.base_url if endpoint: @@ -75,10 +181,11 @@ def read( url = self._build_url_with_query(url, query) return self._fetch_url( url, - method="GET", + method=method, raise_error=raise_error, no_response=no_response, timeout=timeout, + **kwargs, ) def update( @@ -91,6 +198,7 @@ def update( no_response=False, timeout=30, method="PUT", + **kwargs, ): url = self.base_url + "/{0}".format(uuid) if uuid else self.base_url if endpoint: @@ -104,6 +212,7 @@ def update( raise_error=raise_error, no_response=no_response, timeout=timeout, + **kwargs, ) # source is the file path of resource where ansible yaml runs @@ -342,18 +451,29 @@ def _build_url_with_query(self, url, query): return urlunparse(url) def _fetch_url( - self, url, method, data=None, raise_error=True, no_response=False, timeout=30 + self, + url, + method, + data=None, + raise_error=True, + no_response=False, + timeout=30, + **kwargs, ): # only jsonify if content-type supports, added to avoid incase of form-url-encodeded type data if self.headers["Content-Type"] == "application/json" and data is not None: data = self.module.jsonify(data) + headers = copy.deepcopy(self.headers) + if kwargs.get("additional_headers"): + headers.update(kwargs.get("additional_headers")) + resp, info = fetch_url( self.module, url, data=data, method=method, - headers=self.headers, + headers=headers, cookies=self.cookies, timeout=timeout, ) @@ -394,7 +514,9 @@ def _fetch_url( return resp_json if status_code >= 300: - if resp_json and resp_json.get("message"): # for ndb apis + if ( + resp_json and isinstance(resp_json, dict) and resp_json.get("message") + ): # for ndb apis err = resp_json["message"] elif info.get("msg"): err = info["msg"] @@ -418,6 +540,8 @@ def _fetch_url( response=resp_json, ) + if kwargs.get("include_etag"): + resp_json["etag"] = info.get("etag") return resp_json # upload file in chunks to the given url diff --git a/plugins/module_utils/fc/__init__.py b/plugins/module_utils/v3/fc/__init__.py similarity index 100% rename from plugins/module_utils/fc/__init__.py rename to plugins/module_utils/v3/fc/__init__.py diff --git a/plugins/module_utils/fc/api_keys.py b/plugins/module_utils/v3/fc/api_keys.py similarity index 100% rename from plugins/module_utils/fc/api_keys.py rename to plugins/module_utils/v3/fc/api_keys.py diff --git a/plugins/module_utils/fc/fc.py b/plugins/module_utils/v3/fc/fc.py similarity index 100% rename from plugins/module_utils/fc/fc.py rename to plugins/module_utils/v3/fc/fc.py diff --git a/plugins/module_utils/fc/imaged_clusters.py b/plugins/module_utils/v3/fc/imaged_clusters.py similarity index 100% rename from plugins/module_utils/fc/imaged_clusters.py rename to plugins/module_utils/v3/fc/imaged_clusters.py diff --git a/plugins/module_utils/fc/imaged_nodes.py b/plugins/module_utils/v3/fc/imaged_nodes.py similarity index 100% rename from plugins/module_utils/fc/imaged_nodes.py rename to plugins/module_utils/v3/fc/imaged_nodes.py diff --git a/plugins/module_utils/foundation/__init__.py b/plugins/module_utils/v3/foundation/__init__.py similarity index 100% rename from plugins/module_utils/foundation/__init__.py rename to plugins/module_utils/v3/foundation/__init__.py diff --git a/plugins/module_utils/foundation/base_module.py b/plugins/module_utils/v3/foundation/base_module.py similarity index 100% rename from plugins/module_utils/foundation/base_module.py rename to plugins/module_utils/v3/foundation/base_module.py diff --git a/plugins/module_utils/foundation/bmc_ipmi_config.py b/plugins/module_utils/v3/foundation/bmc_ipmi_config.py similarity index 100% rename from plugins/module_utils/foundation/bmc_ipmi_config.py rename to plugins/module_utils/v3/foundation/bmc_ipmi_config.py diff --git a/plugins/module_utils/foundation/discover_nodes.py b/plugins/module_utils/v3/foundation/discover_nodes.py similarity index 100% rename from plugins/module_utils/foundation/discover_nodes.py rename to plugins/module_utils/v3/foundation/discover_nodes.py diff --git a/plugins/module_utils/foundation/enumerate_aos_packages.py b/plugins/module_utils/v3/foundation/enumerate_aos_packages.py similarity index 82% rename from plugins/module_utils/foundation/enumerate_aos_packages.py rename to plugins/module_utils/v3/foundation/enumerate_aos_packages.py index a1088d26c..feada64a9 100644 --- a/plugins/module_utils/foundation/enumerate_aos_packages.py +++ b/plugins/module_utils/v3/foundation/enumerate_aos_packages.py @@ -2,9 +2,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -from ansible_collections.nutanix.ncp.plugins.module_utils.foundation.foundation import ( - Foundation, -) +from .foundation import Foundation __metaclass__ = type diff --git a/plugins/module_utils/foundation/enumerate_hypervisor_isos.py b/plugins/module_utils/v3/foundation/enumerate_hypervisor_isos.py similarity index 80% rename from plugins/module_utils/foundation/enumerate_hypervisor_isos.py rename to plugins/module_utils/v3/foundation/enumerate_hypervisor_isos.py index 53a0f9ddc..3ff087b6d 100644 --- a/plugins/module_utils/foundation/enumerate_hypervisor_isos.py +++ b/plugins/module_utils/v3/foundation/enumerate_hypervisor_isos.py @@ -2,9 +2,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -from ansible_collections.nutanix.ncp.plugins.module_utils.foundation.foundation import ( - Foundation, -) +from .foundation import Foundation __metaclass__ = type diff --git a/plugins/module_utils/foundation/foundation.py b/plugins/module_utils/v3/foundation/foundation.py similarity index 100% rename from plugins/module_utils/foundation/foundation.py rename to plugins/module_utils/v3/foundation/foundation.py diff --git a/plugins/module_utils/foundation/image_nodes.py b/plugins/module_utils/v3/foundation/image_nodes.py similarity index 100% rename from plugins/module_utils/foundation/image_nodes.py rename to plugins/module_utils/v3/foundation/image_nodes.py diff --git a/plugins/module_utils/foundation/image_upload.py b/plugins/module_utils/v3/foundation/image_upload.py similarity index 100% rename from plugins/module_utils/foundation/image_upload.py rename to plugins/module_utils/v3/foundation/image_upload.py diff --git a/plugins/module_utils/foundation/node_discovery.py b/plugins/module_utils/v3/foundation/node_discovery.py similarity index 100% rename from plugins/module_utils/foundation/node_discovery.py rename to plugins/module_utils/v3/foundation/node_discovery.py diff --git a/plugins/module_utils/foundation/node_network_details.py b/plugins/module_utils/v3/foundation/node_network_details.py similarity index 100% rename from plugins/module_utils/foundation/node_network_details.py rename to plugins/module_utils/v3/foundation/node_network_details.py diff --git a/plugins/module_utils/foundation/progress.py b/plugins/module_utils/v3/foundation/progress.py similarity index 95% rename from plugins/module_utils/foundation/progress.py rename to plugins/module_utils/v3/foundation/progress.py index e2c25d9d7..e6c894f72 100644 --- a/plugins/module_utils/foundation/progress.py +++ b/plugins/module_utils/v3/foundation/progress.py @@ -4,9 +4,7 @@ import time -from ansible_collections.nutanix.ncp.plugins.module_utils.foundation.foundation import ( - Foundation, -) +from .foundation import Foundation __metaclass__ = type diff --git a/plugins/module_utils/karbon/clusters.py b/plugins/module_utils/v3/karbon/clusters.py similarity index 100% rename from plugins/module_utils/karbon/clusters.py rename to plugins/module_utils/v3/karbon/clusters.py diff --git a/plugins/module_utils/karbon/karbon.py b/plugins/module_utils/v3/karbon/karbon.py similarity index 100% rename from plugins/module_utils/karbon/karbon.py rename to plugins/module_utils/v3/karbon/karbon.py diff --git a/plugins/module_utils/karbon/node_pools.py b/plugins/module_utils/v3/karbon/node_pools.py similarity index 100% rename from plugins/module_utils/karbon/node_pools.py rename to plugins/module_utils/v3/karbon/node_pools.py diff --git a/plugins/module_utils/karbon/registries.py b/plugins/module_utils/v3/karbon/registries.py similarity index 100% rename from plugins/module_utils/karbon/registries.py rename to plugins/module_utils/v3/karbon/registries.py diff --git a/plugins/module_utils/ndb/__init__.py b/plugins/module_utils/v3/ndb/__init__.py similarity index 100% rename from plugins/module_utils/ndb/__init__.py rename to plugins/module_utils/v3/ndb/__init__.py diff --git a/plugins/module_utils/ndb/base_info_module.py b/plugins/module_utils/v3/ndb/base_info_module.py similarity index 100% rename from plugins/module_utils/ndb/base_info_module.py rename to plugins/module_utils/v3/ndb/base_info_module.py diff --git a/plugins/module_utils/ndb/base_module.py b/plugins/module_utils/v3/ndb/base_module.py similarity index 100% rename from plugins/module_utils/ndb/base_module.py rename to plugins/module_utils/v3/ndb/base_module.py diff --git a/plugins/module_utils/ndb/clusters.py b/plugins/module_utils/v3/ndb/clusters.py similarity index 100% rename from plugins/module_utils/ndb/clusters.py rename to plugins/module_utils/v3/ndb/clusters.py diff --git a/plugins/module_utils/ndb/database_clones.py b/plugins/module_utils/v3/ndb/database_clones.py similarity index 100% rename from plugins/module_utils/ndb/database_clones.py rename to plugins/module_utils/v3/ndb/database_clones.py diff --git a/plugins/module_utils/ndb/database_engines/database_engine.py b/plugins/module_utils/v3/ndb/database_engines/database_engine.py similarity index 100% rename from plugins/module_utils/ndb/database_engines/database_engine.py rename to plugins/module_utils/v3/ndb/database_engines/database_engine.py diff --git a/plugins/module_utils/ndb/database_engines/db_engine_factory.py b/plugins/module_utils/v3/ndb/database_engines/db_engine_factory.py similarity index 100% rename from plugins/module_utils/ndb/database_engines/db_engine_factory.py rename to plugins/module_utils/v3/ndb/database_engines/db_engine_factory.py diff --git a/plugins/module_utils/ndb/database_engines/postgres.py b/plugins/module_utils/v3/ndb/database_engines/postgres.py similarity index 100% rename from plugins/module_utils/ndb/database_engines/postgres.py rename to plugins/module_utils/v3/ndb/database_engines/postgres.py diff --git a/plugins/module_utils/ndb/database_instances.py b/plugins/module_utils/v3/ndb/database_instances.py similarity index 100% rename from plugins/module_utils/ndb/database_instances.py rename to plugins/module_utils/v3/ndb/database_instances.py diff --git a/plugins/module_utils/ndb/db_server_cluster.py b/plugins/module_utils/v3/ndb/db_server_cluster.py similarity index 100% rename from plugins/module_utils/ndb/db_server_cluster.py rename to plugins/module_utils/v3/ndb/db_server_cluster.py diff --git a/plugins/module_utils/ndb/db_server_vm.py b/plugins/module_utils/v3/ndb/db_server_vm.py similarity index 100% rename from plugins/module_utils/ndb/db_server_vm.py rename to plugins/module_utils/v3/ndb/db_server_vm.py diff --git a/plugins/module_utils/ndb/maintenance_window.py b/plugins/module_utils/v3/ndb/maintenance_window.py similarity index 100% rename from plugins/module_utils/ndb/maintenance_window.py rename to plugins/module_utils/v3/ndb/maintenance_window.py diff --git a/plugins/module_utils/ndb/nutanix_database.py b/plugins/module_utils/v3/ndb/nutanix_database.py similarity index 100% rename from plugins/module_utils/ndb/nutanix_database.py rename to plugins/module_utils/v3/ndb/nutanix_database.py diff --git a/plugins/module_utils/ndb/operations.py b/plugins/module_utils/v3/ndb/operations.py similarity index 100% rename from plugins/module_utils/ndb/operations.py rename to plugins/module_utils/v3/ndb/operations.py diff --git a/plugins/module_utils/ndb/profiles/profile_types.py b/plugins/module_utils/v3/ndb/profiles/profile_types.py similarity index 100% rename from plugins/module_utils/ndb/profiles/profile_types.py rename to plugins/module_utils/v3/ndb/profiles/profile_types.py diff --git a/plugins/module_utils/ndb/profiles/profiles.py b/plugins/module_utils/v3/ndb/profiles/profiles.py similarity index 100% rename from plugins/module_utils/ndb/profiles/profiles.py rename to plugins/module_utils/v3/ndb/profiles/profiles.py diff --git a/plugins/module_utils/ndb/slas.py b/plugins/module_utils/v3/ndb/slas.py similarity index 100% rename from plugins/module_utils/ndb/slas.py rename to plugins/module_utils/v3/ndb/slas.py diff --git a/plugins/module_utils/ndb/snapshots.py b/plugins/module_utils/v3/ndb/snapshots.py similarity index 100% rename from plugins/module_utils/ndb/snapshots.py rename to plugins/module_utils/v3/ndb/snapshots.py diff --git a/plugins/module_utils/ndb/stretched_vlans.py b/plugins/module_utils/v3/ndb/stretched_vlans.py similarity index 100% rename from plugins/module_utils/ndb/stretched_vlans.py rename to plugins/module_utils/v3/ndb/stretched_vlans.py diff --git a/plugins/module_utils/ndb/tags.py b/plugins/module_utils/v3/ndb/tags.py similarity index 100% rename from plugins/module_utils/ndb/tags.py rename to plugins/module_utils/v3/ndb/tags.py diff --git a/plugins/module_utils/ndb/time_machines.py b/plugins/module_utils/v3/ndb/time_machines.py similarity index 100% rename from plugins/module_utils/ndb/time_machines.py rename to plugins/module_utils/v3/ndb/time_machines.py diff --git a/plugins/module_utils/ndb/vlans.py b/plugins/module_utils/v3/ndb/vlans.py similarity index 100% rename from plugins/module_utils/ndb/vlans.py rename to plugins/module_utils/v3/ndb/vlans.py diff --git a/plugins/module_utils/prism/__init__.py b/plugins/module_utils/v3/prism/__init__.py similarity index 100% rename from plugins/module_utils/prism/__init__.py rename to plugins/module_utils/v3/prism/__init__.py diff --git a/plugins/module_utils/prism/accounts.py b/plugins/module_utils/v3/prism/accounts.py similarity index 100% rename from plugins/module_utils/prism/accounts.py rename to plugins/module_utils/v3/prism/accounts.py diff --git a/plugins/module_utils/prism/acps.py b/plugins/module_utils/v3/prism/acps.py similarity index 100% rename from plugins/module_utils/prism/acps.py rename to plugins/module_utils/v3/prism/acps.py diff --git a/plugins/module_utils/prism/address_groups.py b/plugins/module_utils/v3/prism/address_groups.py similarity index 100% rename from plugins/module_utils/prism/address_groups.py rename to plugins/module_utils/v3/prism/address_groups.py diff --git a/plugins/module_utils/prism/categories.py b/plugins/module_utils/v3/prism/categories.py similarity index 100% rename from plugins/module_utils/prism/categories.py rename to plugins/module_utils/v3/prism/categories.py diff --git a/plugins/module_utils/prism/clusters.py b/plugins/module_utils/v3/prism/clusters.py similarity index 100% rename from plugins/module_utils/prism/clusters.py rename to plugins/module_utils/v3/prism/clusters.py diff --git a/plugins/module_utils/prism/floating_ips.py b/plugins/module_utils/v3/prism/floating_ips.py similarity index 100% rename from plugins/module_utils/prism/floating_ips.py rename to plugins/module_utils/v3/prism/floating_ips.py diff --git a/plugins/module_utils/prism/groups.py b/plugins/module_utils/v3/prism/groups.py similarity index 100% rename from plugins/module_utils/prism/groups.py rename to plugins/module_utils/v3/prism/groups.py diff --git a/plugins/module_utils/prism/hosts.py b/plugins/module_utils/v3/prism/hosts.py similarity index 100% rename from plugins/module_utils/prism/hosts.py rename to plugins/module_utils/v3/prism/hosts.py diff --git a/plugins/module_utils/prism/idempotence_identifiers.py b/plugins/module_utils/v3/prism/idempotence_identifiers.py similarity index 100% rename from plugins/module_utils/prism/idempotence_identifiers.py rename to plugins/module_utils/v3/prism/idempotence_identifiers.py diff --git a/plugins/module_utils/prism/image_placement_policy.py b/plugins/module_utils/v3/prism/image_placement_policy.py similarity index 100% rename from plugins/module_utils/prism/image_placement_policy.py rename to plugins/module_utils/v3/prism/image_placement_policy.py diff --git a/plugins/module_utils/prism/images.py b/plugins/module_utils/v3/prism/images.py similarity index 100% rename from plugins/module_utils/prism/images.py rename to plugins/module_utils/v3/prism/images.py diff --git a/plugins/module_utils/prism/pbrs.py b/plugins/module_utils/v3/prism/pbrs.py similarity index 100% rename from plugins/module_utils/prism/pbrs.py rename to plugins/module_utils/v3/prism/pbrs.py diff --git a/plugins/module_utils/prism/permissions.py b/plugins/module_utils/v3/prism/permissions.py similarity index 100% rename from plugins/module_utils/prism/permissions.py rename to plugins/module_utils/v3/prism/permissions.py diff --git a/plugins/module_utils/prism/prism.py b/plugins/module_utils/v3/prism/prism.py similarity index 100% rename from plugins/module_utils/prism/prism.py rename to plugins/module_utils/v3/prism/prism.py diff --git a/plugins/module_utils/prism/projects.py b/plugins/module_utils/v3/prism/projects.py similarity index 100% rename from plugins/module_utils/prism/projects.py rename to plugins/module_utils/v3/prism/projects.py diff --git a/plugins/module_utils/prism/projects_internal.py b/plugins/module_utils/v3/prism/projects_internal.py similarity index 100% rename from plugins/module_utils/prism/projects_internal.py rename to plugins/module_utils/v3/prism/projects_internal.py diff --git a/plugins/module_utils/prism/protection_rules.py b/plugins/module_utils/v3/prism/protection_rules.py similarity index 99% rename from plugins/module_utils/prism/protection_rules.py rename to plugins/module_utils/v3/prism/protection_rules.py index ed4bd751a..b26f25790 100644 --- a/plugins/module_utils/prism/protection_rules.py +++ b/plugins/module_utils/v3/prism/protection_rules.py @@ -4,7 +4,7 @@ from copy import deepcopy -from ..utils import convert_to_secs +from ...utils import convert_to_secs from .prism import Prism __metaclass__ = type diff --git a/plugins/module_utils/prism/recovery_plan_jobs.py b/plugins/module_utils/v3/prism/recovery_plan_jobs.py similarity index 100% rename from plugins/module_utils/prism/recovery_plan_jobs.py rename to plugins/module_utils/v3/prism/recovery_plan_jobs.py diff --git a/plugins/module_utils/prism/recovery_plans.py b/plugins/module_utils/v3/prism/recovery_plans.py similarity index 100% rename from plugins/module_utils/prism/recovery_plans.py rename to plugins/module_utils/v3/prism/recovery_plans.py diff --git a/plugins/module_utils/prism/roles.py b/plugins/module_utils/v3/prism/roles.py similarity index 100% rename from plugins/module_utils/prism/roles.py rename to plugins/module_utils/v3/prism/roles.py diff --git a/plugins/module_utils/prism/security_rules.py b/plugins/module_utils/v3/prism/security_rules.py similarity index 100% rename from plugins/module_utils/prism/security_rules.py rename to plugins/module_utils/v3/prism/security_rules.py diff --git a/plugins/module_utils/prism/service_groups.py b/plugins/module_utils/v3/prism/service_groups.py similarity index 100% rename from plugins/module_utils/prism/service_groups.py rename to plugins/module_utils/v3/prism/service_groups.py diff --git a/plugins/module_utils/prism/spec/categories_mapping.py b/plugins/module_utils/v3/prism/spec/categories_mapping.py similarity index 100% rename from plugins/module_utils/prism/spec/categories_mapping.py rename to plugins/module_utils/v3/prism/spec/categories_mapping.py diff --git a/plugins/module_utils/prism/spec/vms.py b/plugins/module_utils/v3/prism/spec/vms.py similarity index 100% rename from plugins/module_utils/prism/spec/vms.py rename to plugins/module_utils/v3/prism/spec/vms.py diff --git a/plugins/module_utils/prism/static_routes.py b/plugins/module_utils/v3/prism/static_routes.py similarity index 100% rename from plugins/module_utils/prism/static_routes.py rename to plugins/module_utils/v3/prism/static_routes.py diff --git a/plugins/module_utils/prism/subnets.py b/plugins/module_utils/v3/prism/subnets.py similarity index 100% rename from plugins/module_utils/prism/subnets.py rename to plugins/module_utils/v3/prism/subnets.py diff --git a/plugins/module_utils/prism/tasks.py b/plugins/module_utils/v3/prism/tasks.py similarity index 100% rename from plugins/module_utils/prism/tasks.py rename to plugins/module_utils/v3/prism/tasks.py diff --git a/plugins/module_utils/prism/user_groups.py b/plugins/module_utils/v3/prism/user_groups.py similarity index 100% rename from plugins/module_utils/prism/user_groups.py rename to plugins/module_utils/v3/prism/user_groups.py diff --git a/plugins/module_utils/prism/users.py b/plugins/module_utils/v3/prism/users.py similarity index 100% rename from plugins/module_utils/prism/users.py rename to plugins/module_utils/v3/prism/users.py diff --git a/plugins/module_utils/prism/virtual_switches.py b/plugins/module_utils/v3/prism/virtual_switches.py similarity index 95% rename from plugins/module_utils/prism/virtual_switches.py rename to plugins/module_utils/v3/prism/virtual_switches.py index 4f6b1f7be..bac4d0b5d 100644 --- a/plugins/module_utils/prism/virtual_switches.py +++ b/plugins/module_utils/v3/prism/virtual_switches.py @@ -4,7 +4,8 @@ __metaclass__ = type -from ..utils import create_filter_criteria_string + +from ...utils import create_filter_criteria_string from .groups import Groups # Helper functions diff --git a/plugins/module_utils/prism/vms.py b/plugins/module_utils/v3/prism/vms.py similarity index 100% rename from plugins/module_utils/prism/vms.py rename to plugins/module_utils/v3/prism/vms.py diff --git a/plugins/module_utils/prism/vpcs.py b/plugins/module_utils/v3/prism/vpcs.py similarity index 100% rename from plugins/module_utils/prism/vpcs.py rename to plugins/module_utils/v3/prism/vpcs.py diff --git a/plugins/module_utils/prism/vpn_connections.py b/plugins/module_utils/v3/prism/vpn_connections.py similarity index 100% rename from plugins/module_utils/prism/vpn_connections.py rename to plugins/module_utils/v3/prism/vpn_connections.py diff --git a/plugins/module_utils/v3/utils.py b/plugins/module_utils/v3/utils.py new file mode 100644 index 000000000..30f35329c --- /dev/null +++ b/plugins/module_utils/v3/utils.py @@ -0,0 +1,25 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +def check_for_idempotency(spec, resp, **kwargs): + state = kwargs.get("state") + if spec == resp: + if ( + state == "present" + # only for VMs + or ( + state in ["soft_shutdown", "hard_poweroff", "power_off"] + and resp["spec"]["resources"]["power_state"] == "OFF" + ) + # only for VMs + or ( + state == "power_on" and resp["spec"]["resources"]["power_state"] == "ON" + ) + ): + return True + return False diff --git a/plugins/module_utils/v4/__init__.py b/plugins/module_utils/v4/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/module_utils/v4/base_info_module.py b/plugins/module_utils/v4/base_info_module.py new file mode 100644 index 000000000..b9b44ebd8 --- /dev/null +++ b/plugins/module_utils/v4/base_info_module.py @@ -0,0 +1,35 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +from ..base_module import BaseModule + +__metaclass__ = type + + +class BaseInfoModule(BaseModule): + """ + Base Info module class for Nutanix PC v4 list APIs based modules + """ + + info_argument_spec = dict( + filter=dict(type="str"), + page=dict(type="int"), + limit=dict(type="int"), + orderby=dict(type="str"), + select=dict(type="str"), + ) + + def __init__(self, skip_info_args=False, **kwargs): + self.argument_spec = deepcopy(BaseModule.argument_spec) + self.argument_spec.pop("state") + self.argument_spec.pop("wait") + if not skip_info_args: + self.argument_spec.update(self.info_argument_spec) + super(BaseInfoModule, self).__init__(**kwargs) diff --git a/plugins/module_utils/v4/clusters_mgmt/api_client.py b/plugins/module_utils/v4/clusters_mgmt/api_client.py new file mode 100644 index 000000000..ea0b49001 --- /dev/null +++ b/plugins/module_utils/v4/clusters_mgmt/api_client.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +SDK_IMP_ERROR = None +try: + import ntnx_clustermgmt_py_client +except ImportError: + SDK_IMP_ERROR = traceback.format_exc() + + +def get_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + """ + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_clustermgmt_py_client"), + exception=SDK_IMP_ERROR, + ) + + config = ntnx_clustermgmt_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_clustermgmt_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + """ + return ntnx_clustermgmt_py_client.ApiClient.get_etag(data) + + +def get_clusters_api_instance(module): + """ + This method will return clusters api instance from sdk + Args: + module (AnsibleModule): AnsibleModule instance + Returns: + ClustersApi: ClustersApi instance + """ + client = get_api_client(module) + return ntnx_clustermgmt_py_client.ClustersApi(client) + + +def get_storage_containers_api_instance(module): + """ + This method will return storage containers api instance from sdk + Args: + module (AnsibleModule): AnsibleModule instance + Returns: + StorageContainersApi: StorageContainersApi instance + """ + client = get_api_client(module) + return ntnx_clustermgmt_py_client.StorageContainersApi(client) diff --git a/plugins/module_utils/v4/clusters_mgmt/helpers.py b/plugins/module_utils/v4/clusters_mgmt/helpers.py new file mode 100644 index 000000000..59341ce32 --- /dev/null +++ b/plugins/module_utils/v4/clusters_mgmt/helpers.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..utils import raise_api_exception # noqa: E402 + + +def get_cluster(module, api_instance, ext_id): + """ + This method will return cluster info using external ID. + Args: + module: Ansible module + api_instance: ClusterApi instance from sdk + ext_id (str): cluster external ID + return: + cluster info (object): cluster info + """ + try: + return api_instance.get_cluster_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching cluster info using ext_id", + ) + + +def get_host(module, api_instance, ext_id, cluster_ext_id): + """ + This method will return host info using external ID. + Args: + module: Ansible module + api_instance: ClusterApi instance from sdk + ext_id (str): host external ID + cluster_ext_id (str): cluster external ID + return: + host info (object): host info + """ + try: + return api_instance.get_host_by_id( + clusterExtId=cluster_ext_id, extId=ext_id + ).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching host info using ext_id", + ) + + +def get_storage_container(module, api_instance, ext_id): + """ + This method will return storage container info using external ID. + Args: + module: Ansible module + api_instance: ClusterApi instance from sdk + ext_id (str): storage container external ID + return: + storage container info (object): storage container info + """ + try: + return api_instance.get_storage_container_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching storage container info using ext_id", + ) diff --git a/plugins/module_utils/v4/clusters_mgmt/spec/clusters.py b/plugins/module_utils/v4/clusters_mgmt/spec/clusters.py new file mode 100644 index 000000000..dec9093be --- /dev/null +++ b/plugins/module_utils/v4/clusters_mgmt/spec/clusters.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from copy import deepcopy + +SDK_IMP_ERROR = None +try: + import ntnx_clustermgmt_py_client as clusters_sdk # noqa: E402 +except ImportError: + from ....v4.sdk_mock import mock_sdk as clusters_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +class ClusterSpecs: + """Module specs related to cluster and its sub entities""" + + ipv4_address = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False, default=32), + ) + + ipv6_address = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False, default=128), + ) + + fqdn = dict(value=dict(type="str", required=True)) + + ip_address_or_fqdn = dict( + ipv4=dict( + type="dict", + options=ipv4_address, + obj=clusters_sdk.IPv4Address, + required=False, + ), + ipv6=dict( + type="dict", + options=ipv6_address, + obj=clusters_sdk.IPv6Address, + required=False, + ), + fqdn=dict(type="dict", options=fqdn, obj=clusters_sdk.FQDN, required=False), + ) + + ip_address = dict( + ipv4=dict( + type="dict", + options=ipv4_address, + obj=clusters_sdk.IPv4Address, + required=False, + ), + ipv6=dict( + type="dict", + options=ipv6_address, + obj=clusters_sdk.IPv6Address, + required=False, + ), + ) + + smtp_network = dict( + ip_address=dict( + type="dict", options=ip_address_or_fqdn, obj=clusters_sdk.IPAddress + ), + port=dict(type="int"), + username=dict(type="str"), + password=dict(type="str", no_log=True), + ) + + smtp_server = dict( + email_address=dict(type="str"), + server=dict(type="dict", options=smtp_network, obj=clusters_sdk.SmtpNetwork), + type=dict(type="str", choices=["PLAIN", "STARTTLS", "SSL"]), + ) + + management_server = dict( + ip=dict(type="dict", options=ip_address, obj=clusters_sdk.IPAddress), + type=dict(type="str", choices=["VCENTER"]), + is_registered=dict(type="bool"), + in_use=dict(type="bool"), + is_drs_enabled=dict(type="bool"), + ) + + backplane_network = dict( + is_segmentation_enabled=dict(type="bool"), + vlan_tag=dict(type="int"), + subnet=dict(type="dict", options=ipv4_address, obj=clusters_sdk.IPv4Address), + netmask=dict(type="dict", options=ipv4_address, obj=clusters_sdk.IPv4Address), + ) + http_proxy_list = dict( + ip_address=dict(type="dict", options=ip_address, obj=clusters_sdk.IPAddress), + port=dict(type="int"), + username=dict(type="str"), + password=dict(type="str", no_log=True), + name=dict(type="str", required=True), + proxy_types=dict( + type="list", elements="str", choices=["HTTP", "HTTPS", "SOCKS"] + ), + ) + http_proxy_white_list = dict( + target_type=dict( + type="str", + choices=[ + "IPV6_ADDRESS", + "HOST_NAME", + "DOMAIN_NAME_SUFFIX", + "IPV4_NETWORK_MASK", + "IPV4_ADDRESS", + ], + required=True, + ), + target=dict(type="str", required=True), + ) + cluster_network_config = dict( + external_address=dict( + type="dict", options=ip_address, obj=clusters_sdk.IPAddress + ), + external_data_service_ip=dict( + type="dict", options=ip_address, obj=clusters_sdk.IPAddress + ), + nfs_subnet_whitelist=dict(type="list", elements="str"), + ntp_server_ip_list=dict( + type="list", + elements="dict", + options=ip_address_or_fqdn, + obj=clusters_sdk.IPAddressOrFQDN, + ), + name_server_ip_list=dict( + type="list", + elements="dict", + options=ip_address_or_fqdn, + obj=clusters_sdk.IPAddressOrFQDN, + ), + smtp_server=dict( + type="dict", options=smtp_server, obj=clusters_sdk.SmtpServerRef + ), + masquerading_ip=dict( + type="dict", options=ip_address, obj=clusters_sdk.IPAddress + ), + management_server=dict( + type="dict", options=management_server, obj=clusters_sdk.ManagementServerRef + ), + fqdn=dict(type="str"), + key_management_server_type=dict( + type="str", choices=["LOCAL", "PRISM_CENTRAL", "EXTERNAL"] + ), + backplane=dict( + type="dict", + options=backplane_network, + obj=clusters_sdk.BackplaneNetworkParams, + ), + http_proxy_list=dict( + type="list", + elements="dict", + options=http_proxy_list, + obj=clusters_sdk.HttpProxyConfig, + ), + http_proxy_white_list=dict( + type="list", + elements="dict", + options=http_proxy_white_list, + obj=clusters_sdk.HttpProxyWhiteListConfig, + ), + ) + node = dict( + controller_vm_ip=dict( + type="dict", options=ip_address, obj=clusters_sdk.IPAddress, required=True + ), + host_ip=dict( + type="dict", options=ip_address, obj=clusters_sdk.IPAddress, required=False + ), + ) + + nodes = dict( + node_list=dict( + type="list", + elements="dict", + options=node, + obj=clusters_sdk.NodeListItemReference, + required=True, + ) + ) + + public_key = dict( + name=dict(type="str", required=True), + key=dict(type="str", required=True, no_log=False), + ) + + fault_tolerance_state = dict( + domain_awareness_level=dict( + type="str", required=True, choices=["NODE", "BLOCK", "RACK", "DISK"] + ), + desired_cluster_fault_tolerance=dict( + type="str", + choices=["CFT_1N_OR_1D", "CFT_2N_OR_2D", "CFT_1N_AND_1D", "CFT_0N_AND_0D"], + ), + ) + cluster_config = dict( + cluster_function=dict( + type="list", elements="str", choices=["AOS", "ONE_NODE", "TWO_NODE"] + ), + authorized_public_key_list=dict( + type="list", elements="dict", options=public_key, obj=clusters_sdk.PublicKey + ), + redundancy_factor=dict(type="int"), + cluster_arch=dict(type="str", choices=["X86_64", "PPC64LE"]), + fault_tolerance_state=dict( + type="dict", + options=fault_tolerance_state, + obj=clusters_sdk.FaultToleranceState, + ), + operation_mode=dict( + type="str", + choices=[ + "NORMAL", + "READ_ONLY", + "STAND_ALONE", + "SWITCH_TO_TWO_NODE", + "OVERRIDE", + ], + ), + encryption_in_transit_status=dict(type="str", choices=["ENABLED", "DISABLED"]), + ) + + cluster = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + nodes=dict(type="dict", options=nodes, obj=clusters_sdk.NodeReference), + config=dict( + type="dict", options=cluster_config, obj=clusters_sdk.ClusterConfigReference + ), + network=dict( + type="dict", + options=cluster_network_config, + obj=clusters_sdk.ClusterNetworkReference, + ), + container_name=dict(type="str"), + categories=dict(type="dict"), + ) + + @classmethod + def get_cluster_spec(cls): + return deepcopy(cls.cluster) diff --git a/plugins/module_utils/v4/constants.py b/plugins/module_utils/v4/constants.py new file mode 100644 index 000000000..9c9b90bc7 --- /dev/null +++ b/plugins/module_utils/v4/constants.py @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class NutanixFiles: + + API_REQUEST_TIMEOUT_SECS = 30 + + +class Tasks: + TASK_SERVICE = "ergon" + + class RelEntityType: + """Relation Entity Types for the task entities affected""" + + FLOATING_IPS = "networking:config:floating-ips" + VM = "vmm:ahv:config:vm" + IMAGES = "vmm:content:image" + IMAGE_PLACEMENT_POLICY = "vmm:images:config:placement-policy" + TEMPLATES = "vmm:content:template" + VOLUME_GROUP = "volumes:config:volume-group" + VOLUME_GROUP_DISK = "volumes:config:volume-groups:disk" + ISCSI_CLIENT = "volumes:config:iscsi-client" + VPC = "networking:config:vpc" + SUBNET = "networking:config:subnet" + FLOATING_IP = "networking:config:floating-ip" + PBRS = "networking:config:routing-policy" + SECURITY_POLICY = "microseg:config:policy" + SERVICE_GROUP = "microseg:config:service-group" + ADDRESS_GROUP = "microseg:config:address-group" + VM_DISK = "vmm:ahv:config:vm:disk" + CD_ROM = "vmm:ahv:config:vm:cdrom" + SERIAL_PORT = "vmm:ahv:config:vm:serialport" + VM_NIC = "vmm:ahv:config:vm:nic" + RECOVERY_POINT = "dataprotection:config:recovery-point" + VM_RECOVERY_POINT = "dataprotection:config:vm-recovery-point" + STORAGE_CONTAINER = "clustermgmt:config:storage-containers" + ROUTE = "networking:config:route" + + class CompletetionDetailsName: + """Completion details name for the task entities affected""" + + RECOVERY_POINT = "recoveryPointExtId" + VM_EXT_IDS = "vmExtIds" + VG_EXT_IDS = "volumeGroupExtIds" diff --git a/plugins/module_utils/v4/data_protection/api_client.py b/plugins/module_utils/v4/data_protection/api_client.py new file mode 100644 index 000000000..bf25588c9 --- /dev/null +++ b/plugins/module_utils/v4/data_protection/api_client.py @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +SDK_IMP_ERROR = None +try: + import ntnx_dataprotection_py_client +except ImportError: + SDK_IMP_ERROR = traceback.format_exc() + + +def get_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + """ + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_dataprotection_py_client"), + exception=SDK_IMP_ERROR, + ) + + config = ntnx_dataprotection_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_dataprotection_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + Returns: + str: etag value + """ + return ntnx_dataprotection_py_client.ApiClient.get_etag(data) + + +def get_recovery_point_api_instance(module): + """ + This method will return data protection api instance. + Args: + module (object): Ansible module object + Returns: + api_instance (object): data protection api instance + """ + client = get_api_client(module) + return ntnx_dataprotection_py_client.RecoveryPointsApi(client) diff --git a/plugins/module_utils/v4/data_protection/helpers.py b/plugins/module_utils/v4/data_protection/helpers.py new file mode 100644 index 000000000..19f8fb478 --- /dev/null +++ b/plugins/module_utils/v4/data_protection/helpers.py @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..utils import raise_api_exception # noqa: E402 + + +def get_vm_recovery_point( + module, api_instance, recovery_point_ext_id, vm_recovery_point_ext_id +): + """ + This method will return vm recovery point info which is part of given top level recovery_point_ext_id + Args: + module: Ansible module + api_instance: VmRecoveryPointsApi instance from ntnx_dataprotection_py_client sdk + recovery_point_ext_id (str): top level recovery point external ID + vm_recovery_point_ext_id (str): vm recovery point external ID + Returns: + vm_recovery_point_info (object): vm recovery point info + """ + try: + return api_instance.get_vm_recovery_point_by_id( + recoveryPointExtId=recovery_point_ext_id, extId=vm_recovery_point_ext_id + ).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm recovery point info using ext_id", + ) + + +def get_recovery_point(module, api_instance, ext_id): + """ + This method will return recovery point info using external ID. + Args: + module: Ansible module + api_instance: AddressGroupsApi instance from ntnx_dataprotection_py_client sdk + ext_id (str): top level recovery point external ID + Returns: + recovery_point_info (object): recovery point info + """ + try: + return api_instance.get_recovery_point_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching recovery point info using ext_id", + ) diff --git a/plugins/module_utils/v4/flow/api_client.py b/plugins/module_utils/v4/flow/api_client.py new file mode 100644 index 000000000..45e46172c --- /dev/null +++ b/plugins/module_utils/v4/flow/api_client.py @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +SDK_IMP_ERROR = None +try: + import ntnx_microseg_py_client +except ImportError: + SDK_IMP_ERROR = traceback.format_exc() + + +def get_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + """ + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_microseg_py_client"), exception=SDK_IMP_ERROR + ) + + config = ntnx_microseg_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_microseg_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + Returns: + str: etag value + """ + return ntnx_microseg_py_client.ApiClient.get_etag(data) + + +def get_service_groups_api_instance(module): + """ + This method will return service groups api instance. + Args: + module (object): Ansible module object + Returns: + api_instance (object): service group api instance + """ + client = get_api_client(module) + return ntnx_microseg_py_client.ServiceGroupsApi(client) + + +def get_address_groups_api_instance(module): + """ + This method will return address groups api instance. + Args: + module (object): Ansible module object + Returns: + api_instance (object): address group api instance + """ + client = get_api_client(module) + return ntnx_microseg_py_client.AddressGroupsApi(client) + + +def get_network_security_policy_api_instance(module): + """ + This method will return NetworkSecurityPoliciesApi instance. + Args: + module (object): Ansible module object + return: + api_instance (object): network security rule api instance + """ + client = get_api_client(module) + return ntnx_microseg_py_client.NetworkSecurityPoliciesApi(client) diff --git a/plugins/module_utils/v4/flow/helpers.py b/plugins/module_utils/v4/flow/helpers.py new file mode 100644 index 000000000..ab9b809da --- /dev/null +++ b/plugins/module_utils/v4/flow/helpers.py @@ -0,0 +1,86 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..utils import raise_api_exception # noqa: E402 + + +def get_network_security_policy(module, api_instance, ext_id): + """ + This method will return network security rule info using external ID. + Args: + module: Ansible module + api_instance: NetworkSecurityPoliciesApi instance from ntnx_microseg_py_client sdk + ext_id (str): network security rule info external ID + return: + security_policy_info (object): network security rule info + """ + try: + return api_instance.get_network_security_policy_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching network security rule info using ext_id", + ) + + +def get_service_group(module, api_instance, ext_id): + """ + This method will return service group info using external ID. + Args: + module: Ansible module + api_instance: ServiceGroupsApi instance from ntnx_microseg_py_client sdk + ext_id (str): service group info external ID + return: + service_group_info (object): service group info + """ + try: + return api_instance.get_service_group_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching service group info using ext_id", + ) + + +def get_address_group(module, api_instance, ext_id): + """ + This method will return address group info using external ID. + Args: + module: Ansible module + api_instance: AddressGroupsApi instance from ntnx_microseg_py_client sdk + ext_id (str): address group info external ID + return: + address_group_info (object): address group info + """ + try: + return api_instance.get_address_group_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching address group info using ext_id", + ) + + +def strip_service_group_extra_attributes(obj): + """ + This method will remove antivirus object's extra fields from given object. + Args: + obj (object): antivirus object + """ + + extra_fields = ["is_system_defined"] + + for field in extra_fields: + setattr(obj, field, None) + + return obj diff --git a/plugins/module_utils/v4/iam/api_client.py b/plugins/module_utils/v4/iam/api_client.py new file mode 100644 index 000000000..f6b234c35 --- /dev/null +++ b/plugins/module_utils/v4/iam/api_client.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +SDK_IMP_ERROR = None +try: + import ntnx_iam_py_client +except ImportError: + SDK_IMP_ERROR = traceback.format_exc() + + +def get_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + """ + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + config = ntnx_iam_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_iam_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + """ + return ntnx_iam_py_client.ApiClient.get_etag(data) + + +def get_authorization_policy_api_instance(module): + """ + This method will create authorization policies api instance + Args: + module (object): Ansible module object + Returns: + api_instance (object): Authorization policy api instance + """ + api_client = get_api_client(module) + return ntnx_iam_py_client.AuthorizationPoliciesApi(api_client=api_client) + + +def get_directory_service_api_instance(module): + """ + This method will return directory service api instance. + Args: + module (object): Ansible module object + Returns: + api_instance (object): Directory service api instance + """ + api_client = get_api_client(module) + return ntnx_iam_py_client.DirectoryServicesApi(api_client=api_client) + + +def get_user_group_api_instance(module): + """ + This method will return user group api instance. + Args: + module (AnsibleModule): Ansible module object + Returns: + api_instance (object): User group api instance + """ + api_client = get_api_client(module) + return ntnx_iam_py_client.UserGroupsApi(api_client=api_client) + + +def get_role_api_instance(module): + """ + This method will return role api instance. + Args: + module (object): Ansible module object + Returns: + api_instance (object): Role api instance + """ + api_client = get_api_client(module) + return ntnx_iam_py_client.RolesApi(api_client=api_client) + + +def get_permission_api_instance(module): + """ + This method will return permission api instance. + Args: + module (object): Ansible module object + Returns: + api_instance (object): Permission api instance + """ + api_client = get_api_client(module) + return ntnx_iam_py_client.OperationsApi(api_client=api_client) + + +def get_user_api_instance(module): + """ + This method will return user api instance. + Args: + module (object): Ansible module object + Returns: + api_instance (object): User api instance + """ + api_client = get_api_client(module) + return ntnx_iam_py_client.UsersApi(api_client=api_client) + + +def get_identity_provider_api_instance(module): + """ + This method will return SAMLIdentityProvidersApi instance. + Args: + module (object): Ansible module object + return: + api_instance (object): Identity provider api instance + """ + api_client = get_api_client(module) + return ntnx_iam_py_client.SAMLIdentityProvidersApi(api_client=api_client) diff --git a/plugins/module_utils/v4/iam/helpers.py b/plugins/module_utils/v4/iam/helpers.py new file mode 100644 index 000000000..2141a6bef --- /dev/null +++ b/plugins/module_utils/v4/iam/helpers.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..utils import raise_api_exception # noqa: E402 + + +def get_authorization_policy(module, api_instance, ext_id): + """ + This method will return authorization policy info using ext_id. + Args: + module (object): Ansible module object + api_instance (object): Authorization policy api instance + ext_id (str): External id of authorization policy + Returns: + authorization_policy_info (dict): Authorization policy info + """ + try: + return api_instance.get_authorization_policy_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching authorization policy using ext_id", + ) + + +def get_role(module, api_instance, ext_id): + """ + This method will return role info using ext_id. + Args: + module (object): Ansible module object + api_instance (object): Role api instance + ext_id (str): External id of role + Returns: + role_info (dict): Role info + """ + try: + return api_instance.get_role_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching role info using ext_id", + ) + + +def get_permission(module, api_instance, ext_id): + """ + This method will return permission info using ext_id. + Args: + module (object): Ansible module object + api_instance (object): Permission api instance + ext_id (str): External id of permission + Returns: + permission_info (dict): Permission info + """ + try: + return api_instance.get_operation_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching permission info", + ) + + +def get_user_group(module, api_instance, ext_id): + """ + This method will return user group info using ext_id. + Args: + module (AnsibleModule): Ansible module object + api_instance (ApiClient): ApiClient object + ext_id (str): External ID of the user group + Returns: + user_group_info (dict): User group info + """ + try: + return api_instance.get_user_group_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching user group info", + ) + + +def get_user(module, api_instance, ext_id): + """ + This method will return user info using ext_id. + Args: + module (object): Ansible module object + api_instance (object): User api instance + ext_id (str): External id of user + Returns: + user_info (dict): User info + """ + try: + return api_instance.get_user_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching user info", + ) + + +def get_identity_provider(module, api_instance, ext_id): + """ + Get identity provider by ext_id + Args: + module: Ansible module + api_instance: SAMLIdentityProvidersApi instance from ntnx_iam_py_client sdk + ext_id: External id of identity provider + Returns: + identity provider (obj): identity provider info object + """ + try: + return api_instance.get_saml_identity_provider_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching directory service info", + ) + + +def get_directory_service(module, api_instance, ext_id): + """ + This method will return directory service info using ext_id. + Args: + module (object): Ansible module object + api_instance (object): Directory service api instance + ext_id (str): External id of directory service + Returns: + directory_service_info (dict): Directory service info + """ + try: + return api_instance.get_directory_service_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching directory service info", + ) diff --git a/plugins/module_utils/v4/network/api_client.py b/plugins/module_utils/v4/network/api_client.py new file mode 100644 index 000000000..c156cf641 --- /dev/null +++ b/plugins/module_utils/v4/network/api_client.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +SDK_IMP_ERROR = None +try: + import ntnx_networking_py_client +except ImportError: + SDK_IMP_ERROR = traceback.format_exc() + + +def get_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + Args: + module (object): Ansible module object + return: + client (object): api client object + """ + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_networking_py_client"), + exception=SDK_IMP_ERROR, + ) + + config = ntnx_networking_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_networking_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + return: + etag (str): etag value + """ + return ntnx_networking_py_client.ApiClient.get_etag(data) + + +def get_routing_policies_api_instance(module): + """ + This method will return RoutingPoliciesApi instance. + Args: + module (object): Ansible module object + return: + api_instance (object): routing policies api instance + """ + api_client = get_api_client(module) + return ntnx_networking_py_client.RoutingPoliciesApi(api_client=api_client) + + +def get_floating_ip_api_instance(module): + """ + This method will return FloatingIpsApi instance. + Args: + module (object): Ansible module object + return: + api_instance (object): floating ip api instance + """ + api_client = get_api_client(module) + return ntnx_networking_py_client.FloatingIpsApi(api_client=api_client) + + +def get_vpc_api_instance(module): + """ + This method will return VpcsApi instance. + Args: + module (object): Ansible module object + return: + api_instance (object): vpc api instance + """ + api_client = get_api_client(module) + return ntnx_networking_py_client.VpcsApi(api_client=api_client) + + +def get_subnet_api_instance(module): + """ + This method will return subnet api instance. + Args: + module (object): Ansible module object + return: + api_instance (object): subnet api instance + """ + api_client = get_api_client(module) + return ntnx_networking_py_client.SubnetsApi(api_client=api_client) + + +def get_route_tables_api_instance(module): + """ + This method will return RouteTablesApi instance. + Args: + module (object): Ansible module object + return: + api_instance (object): route tables api instance + """ + api_client = get_api_client(module) + return ntnx_networking_py_client.RouteTablesApi(api_client=api_client) + + +def get_routes_api_instance(module): + """ + This method will return RoutesApi instance. + Args: + module (object): Ansible module object + return: + api_instance (object): routes api instance + """ + api_client = get_api_client(module) + return ntnx_networking_py_client.RoutesApi(api_client=api_client) diff --git a/plugins/module_utils/v4/network/helpers.py b/plugins/module_utils/v4/network/helpers.py new file mode 100644 index 000000000..dfc37eec2 --- /dev/null +++ b/plugins/module_utils/v4/network/helpers.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..utils import raise_api_exception # noqa: E402 + + +def get_routing_policy(module, api_instance, ext_id): + """ + This method will return routing policy info using its ext_id + Args: + module: Ansible module + api_instance: RoutingPoliciesApi instance from ntnx_networking_py_client sdk + ext_id (str): routing policy info external ID + return: + info (object): routing policy info + """ + try: + return api_instance.get_routing_policy_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching routing policy info using ext_id", + ) + + +def get_floating_ip(module, api_instance, ext_id): + """ + Get Floating ip by ext_id + Args: + module: Ansible module + api_instance: FloatingIpsApi instance from ntnx_networking_py_client sdk + ext_id: ext_id of Floating ip + Returns: + floating_ip (obj): Floating ip info object + """ + try: + return api_instance.get_floating_ip_by_id(extId=ext_id).data + + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching floating_ip info using ext_id", + ) + + +def get_vpc(module, api_instance, ext_id): + """ + Get vpc by ext_id + Args: + module: Ansible module + api_instance: VpcsApi instance from ntnx_networking_py_client sdk + ext_id: ext_id of vpc + Returns: + vpc (obj): Vpc info object + """ + try: + return api_instance.get_vpc_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vpc info using ext_id", + ) + + +def get_subnet(module, api_instance, ext_id): + """ + This method will return subnet info using subnet external ID. + Args: + module (object): Ansible module object + api_instance (object): Api client instance + ext_id (str): subnet external ID + return: + subnet_info (object): subnet info + """ + try: + return api_instance.get_subnet_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching subnet info using ext_id", + ) + + +def get_route_table(module, api_instance, ext_id): + """ + This method will return route table info using route table external ID. + Args: + module (object): Ansible module object + api_instance (object): Route table API instance from SDK + ext_id (str): route table external ID + return: + subnet_info (object): route table info + """ + try: + return api_instance.get_route_table_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching route table info using ext_id", + ) + + +def get_route(module, api_instance, ext_id, route_table_ext_id): + """ + This method will return route info using route external ID and table external ID. + Args: + module (object): Ansible module object + api_instance (object): Route API instance from SDK + ext_id (str): route external ID + route_table_ext_id (str): route table external ID + return: + route_info (object): route info + """ + try: + return api_instance.get_route_for_route_table_by_id( + extId=ext_id, routeTableExtId=route_table_ext_id + ).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching route info using ext_id and table ext_id", + ) diff --git a/plugins/module_utils/v4/prism/pc_api_client.py b/plugins/module_utils/v4/prism/pc_api_client.py new file mode 100644 index 000000000..9da763af1 --- /dev/null +++ b/plugins/module_utils/v4/prism/pc_api_client.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +PRISM_SDK_IMP_ERROR = None +try: + import ntnx_prism_py_client +except ImportError: + PRISM_SDK_IMP_ERROR = traceback.format_exc() + + +def get_pc_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + """ + if PRISM_SDK_IMP_ERROR: + module.fail_json( + missing_required_lib("ntnx_prism_py_client"), PRISM_SDK_IMP_ERROR + ) + + config = ntnx_prism_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_prism_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + """ + return ntnx_prism_py_client.ApiClient.get_etag(data) + + +def get_domain_manager_api_instance(module): + """ + This method will return domain manager api instance. + Args: + module (object): Ansible module object + return: + api_instance (object): domain manager api instance + """ + api_client = get_pc_api_client(module) + return ntnx_prism_py_client.DomainManagerApi(api_client=api_client) diff --git a/plugins/module_utils/v4/prism/tasks.py b/plugins/module_utils/v4/prism/tasks.py new file mode 100644 index 000000000..f83b632d2 --- /dev/null +++ b/plugins/module_utils/v4/prism/tasks.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +from ..utils import strip_internal_attributes # noqa: E40 + +__metaclass__ = type + +import base64 # noqa: E40 +import time # noqa: E40 +import traceback # noqa: E40 + +PRISM_SDK_IMP_ERROR = None +try: + import ntnx_prism_py_client # noqa: E40 +except ImportError: + PRISM_SDK_IMP_ERROR = traceback.format_exc() + + +from ..constants import Tasks # noqa: E40 +from .pc_api_client import get_pc_api_client # noqa: E40 + + +def wait_for_completion( + module, + ext_id, + task_service=None, + polling_gap=2, + raise_error=True, + add_task_service=False, +): + """ + Wait for a task to complete. + Args: + module: The Ansible module. + ext_id: The external ID of the task. + task_service: The service of the task. + polling_gap: The time interval between polling. + raise_error: Flag to raise an error if the task fails. + add_task_service: Flag to add the task service to the external ID. + Returns: + Task data. + """ + api_client = get_pc_api_client(module=module) + tasks = ntnx_prism_py_client.TasksApi(api_client=api_client) + + # add encoded service prefix to create complete task external ID + if add_task_service: + if task_service: + ext_id = base64.b64encode(task_service.encode()).decode() + ":" + ext_id + else: + ext_id = ( + base64.b64encode(Tasks.TASK_SERVICE.encode()).decode() + ":" + ext_id + ) + + status = "" + + timeout_time = None + if module.params.get("timeout", None): + timeout_time = time.time() + module.params.get("timeout") + + while status != "SUCCEEDED": + task = tasks.get_task_by_id(ext_id).data + # convert to dict to output as module output in case of errors + resp = deepcopy(task) + if not isinstance(resp, dict): + resp = resp.to_dict() + + status = resp["status"] + + if not status: + module.fail_json( + msg="Unable to fetch task status", + response=strip_internal_attributes(resp), + ) + if status == "FAILED": + if not raise_error: + break + module.fail_json( + msg="Task Failed", + response=strip_internal_attributes(resp), + ) + time.sleep(polling_gap) + if timeout_time: + if time.time() > timeout_time: + module.fail_json( + msg="Timeout Error: Task did not complete in time", + response=strip_internal_attributes(resp), + ) + + return task + + +def get_entity_ext_id_from_task(data, rel=None): + """ + Get the external ID of an entity from a task. + Args: + data: The task data. + rel: Entity type identified as 'namespace:module[:submodule]:entityType' + Returns: + The external ID of the entity, or None if not found. + """ + entities_affected = getattr(data, "entities_affected", []) + if not entities_affected: + return None + + ext_id = None + for entity in entities_affected: + if rel: + if entity.rel == rel: + ext_id = entity.ext_id + break + else: + ext_id = entity.ext_id + break + + return ext_id + + +def get_ext_id_from_task_completion_details(data, name=None): + """ + Get the external ID of an entity from a task info. + Args: + data(object): The task info object. + name(str): Name of the entity. + Returns: + The external ID of the entity, or None if not found. + """ + completion_details = getattr(data, "completion_details", []) + if not completion_details: + return None + + ext_id = None + for entity in completion_details: + if name: + if entity.name == name: + ext_id = entity.value + break + else: + ext_id = entity.value + break + + return ext_id + + +def wait_for_entity_ext_id_in_task(module, ext_id, rel, time_out=300): + """ + Wait for an entity external ID in a task. + Args: + module: The Ansible module. + ext_id: The external ID of the task. + rel: Entity type identified as 'namespace:module[:submodule]:entityType' + Returns: + ext_id(str): The external ID of the entity. + err(str): Error message. + """ + data = wait_for_completion(module=module, ext_id=ext_id) + err = None + while time_out > 0: + entity_ext_id = get_entity_ext_id_from_task(data=data, rel=rel) + if entity_ext_id: + return entity_ext_id, err + time.sleep(2) + time_out -= 2 + data = wait_for_completion(module=module, ext_id=ext_id) + err = "Timeout Error: Timeout while waiting for ext_id of entity to come in task" + return None, err diff --git a/plugins/module_utils/v4/sdk_mock.py b/plugins/module_utils/v4/sdk_mock.py new file mode 100644 index 000000000..b60bb595c --- /dev/null +++ b/plugins/module_utils/v4/sdk_mock.py @@ -0,0 +1,20 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class MockSDK(object): + def __getattribute__(self, item): + try: + return object.__getattribute__(self, item) + except AttributeError: + return None + + +mock_sdk = MockSDK() diff --git a/plugins/module_utils/v4/spec_generator.py b/plugins/module_utils/v4/spec_generator.py new file mode 100644 index 000000000..12e3de1cd --- /dev/null +++ b/plugins/module_utils/v4/spec_generator.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import copy + + +class SpecGenerator(object): + module = None + + def __init__(self, module): + self.module = module + + def generate_spec(self, obj, attr=None, **kwargs): + """ + This method will populate spec obj as per input attrs + Args: + obj (object): spec object + attr (dict): Optional params for generating spec. By default module.params will be used + kwargs (dict): Keyword arguments + Returns: + spec (object): spec object + error (str): error string if any + """ + + if not isinstance(obj, object): + return None, "Spec object is of invalid type" + + module_args = kwargs.get( + "module_args", self.module.argument_spec_with_extra_keys + ) + + if not attr: + attr = copy.deepcopy(self.module.params) + + # Resolve each input param w.r.t its module argument spec + for key, schema in module_args.items(): + + if key in attr and hasattr(obj, key): + + attr_type = schema.get("type") + if not attr_type: + return ( + None, + "Invalid module argument: 'type' is required parameter for attribute {0}".format( + key + ), + ) + + options = schema.get("options") + kwargs["module_args"] = options + + _obj = schema.get("obj") + + # for attributes with dynamic obj + if type(_obj) is dict: + + for dynamic_obj_key, dynamic_obj_value in attr[key].items(): + + _obj = _obj.get(dynamic_obj_key) + attr[key] = dynamic_obj_value + kwargs["module_args"] = options.get(dynamic_obj_key).get( + "options" + ) + break + + elements = schema.get("elements") + + # for dict type attribute, recursively create spec objects + if attr_type == "dict" and options is not None and _obj is not None: + + # check if spec obj exist else create a new obj to populate spec + o = getattr(obj, key) + if not o: + o = _obj() + + s, err = self.generate_spec(obj=o, attr=attr[key], **kwargs) + if err: + return None, err + setattr(obj, key, s) + + # for list type attribute, create list of spec objects recursively + elif ( + attr_type == "list" + and elements == "dict" + and options is not None + and _obj is not None + ): + lst = [] + for item in attr[key]: + s, err = self.generate_spec(obj=_obj(), attr=item, **kwargs) + if err: + return None, err + lst.append(s) + setattr(obj, key, lst) + + # for other types directly assign + else: + setattr(obj, key, attr[key]) + + return obj, None + + def get_info_spec(self, attr=None, extra_params=None): + + if not attr: + attr = copy.deepcopy(self.module.params) + spec = {} + all_params = ["page", "limit", "filter", "orderby", "select"] + if extra_params is None: + extra_params = [] + all_params.extend(extra_params) + if attr.get("name"): + _filter = attr.get("filter") + if _filter: + _filter += f"""and name eq '{attr["name"]}'""" + else: + _filter = f"""name eq '{attr["name"]}'""" + attr["filter"] = _filter + for key, val in attr.items(): + if key in all_params: + spec[f"_{key}"] = val + return spec, None + + def get_stats_spec(self, attr=None): + attribute_map = { + "start_time": "_startTime", + "end_time": "_endTime", + "sampling_interval": "_samplingInterval", + "stat_type": "_statType", + } + if not attr: + attr = copy.deepcopy(self.module.params) + spec = {} + for key, val in attr.items(): + if key in attribute_map: + spec[attribute_map[key]] = val + return spec, None + + def map_bytes_to_mb(self, mb, byte): + if self.module.params.get(mb): + self.module.params[byte] = self.module.params[mb] * 1024 * 1024 diff --git a/plugins/module_utils/v4/utils.py b/plugins/module_utils/v4/utils.py new file mode 100644 index 000000000..ff5df56cf --- /dev/null +++ b/plugins/module_utils/v4/utils.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +import json + +__metaclass__ = type + + +def strip_internal_attributes(data, exclude_attributes=None): + """ + This method will remove v4 api internal fields like _reserved, _object_type and _unknown_fields + from given data. + Args: + data (dict): v4 api data + exclude_attributes (list): list of attributes that need to exclude + """ + internal_attributes = [ + "_object_type", + "_reserved", + "_unknown_fields", + "$dataItemDiscriminator", + ] + if exclude_attributes is None: + exclude_attributes = [] + if isinstance(data, dict): + for attr in internal_attributes: + if attr in data and attr not in exclude_attributes: + data.pop(attr) + + for key, val in data.items(): + if isinstance(val, dict): + strip_internal_attributes(val, exclude_attributes) + elif isinstance(val, list) and val and isinstance(val[0], dict): + for item in val: + strip_internal_attributes(item, exclude_attributes) + elif isinstance(data, list): + for item in data: + strip_internal_attributes(item, exclude_attributes) + + return data + + +def raise_api_exception(module, exception, msg=None): + """ + This routine raise module failure as per exception + Args: + module (AnsibleModule): certain ansible module + exception(ApiException): api exception object + msg (str): error message + """ + kwargs = { + "msg": msg, + "status": getattr(exception, "status", ""), + "error": getattr(exception, "reason", ""), + } + if getattr(exception, "body", None): + kwargs["response"] = json.loads(exception.body) + else: + kwargs["response"] = str(exception) + module.fail_json(**kwargs) + + +def strip_antivirus_extra_attributes(obj): + """ + This method will remove antivirus object's extra fields from given object. + Args: + obj (object): antivirus object + Returns: + object: antivirus object with stripped extra fields + """ + + extra_fields = ["connection_status", "partner"] + + for field in extra_fields: + setattr(obj, field, None) + + return obj + + +def strip_users_empty_attributes(obj): + """ + This method will remove empty attributes from given user object. + Args: + obj (object): user object + Returns: + object: user object with stripped empty attributes + """ + exclude = ["password"] + + for key, value in obj.to_dict().items(): + if value == "" and key not in exclude: + setattr(obj, key, None) + + +def remove_empty_ip_config(obj): + """ + This method will remove empty ip_config from given object. + Args: + obj (object): object + Returns: + object: object with stripped empty ip_config + """ + internal_attributes = [ + "_object_type", + "_reserved", + "_unknown_fields", + "$dataItemDiscriminator", + ] + + ip_config = obj.to_dict().get("ip_config", []) + empty_ipv4 = False + empty_ipv6 = False + for item in ip_config.copy(): + if not item.get("ipv4") or all( + value is None + for key, value in item["ipv4"].items() + if key not in internal_attributes + ): + empty_ipv4 = True + if not item.get("ipv6") or all( + value is None + for key, value in item["ipv6"].items() + if key not in internal_attributes + ): + empty_ipv6 = True + + if empty_ipv6 and empty_ipv4: + ip_config.remove(item) + setattr(obj, "ip_config", ip_config) diff --git a/plugins/module_utils/v4/vmm/api_client.py b/plugins/module_utils/v4/vmm/api_client.py new file mode 100644 index 000000000..0a4ab16dc --- /dev/null +++ b/plugins/module_utils/v4/vmm/api_client.py @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client +except ImportError: + SDK_IMP_ERROR = traceback.format_exc() + + +def get_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + """ + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + config = ntnx_vmm_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_vmm_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_vm_api_instance(module): + """ + This method will return VMM API instance + Args: + api_instance (obj): v4 VMM api instance + """ + api_client = get_api_client(module) + return ntnx_vmm_py_client.VmApi(api_client=api_client) + + +def get_image_api_instance(module): + """ + This method will return Image API instance + Args: + api_instance (obj): v4 Image api instance + """ + api_client = get_api_client(module) + return ntnx_vmm_py_client.ImagesApi(api_client=api_client) + + +def get_image_placement_policy_api_instance(module): + """ + This method will return Image Placement policy API instance + Args: + api_instance (obj): v4 Image placement policy api instance + """ + api_client = get_api_client(module) + return ntnx_vmm_py_client.ImagePlacementPoliciesApi(api_client=api_client) + + +def get_templates_api_instance(module): + """ + This method will return Templates API instance + Args: + api_instance (obj): v4 Templates api instance + """ + api_client = get_api_client(module) + return ntnx_vmm_py_client.TemplatesApi(api_client=api_client) + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + """ + return ntnx_vmm_py_client.ApiClient.get_etag(data) diff --git a/plugins/module_utils/v4/vmm/helpers.py b/plugins/module_utils/v4/vmm/helpers.py new file mode 100644 index 000000000..3f9a3e1a1 --- /dev/null +++ b/plugins/module_utils/v4/vmm/helpers.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..utils import raise_api_exception # noqa: E402 + + +def get_vm(module, api_instance, ext_id): + """ + Get VM by ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + ext_id: ext_id of VM + Returns: + vm (obj): VM info object + """ + try: + return api_instance.get_vm_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching Vms info using ext_id", + ) + + +def get_nic(module, api_instance, ext_id, vm_ext_id): + """ + Get NIC by ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + ext_id: ext_id of NIC + vm_ext_id: ext_id of VM + Returns: + nic (obj): NIC info object + """ + try: + return api_instance.get_nic_by_id(vmExtId=vm_ext_id, extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm nic info using ext_id", + ) + + +def get_ngt_status(module, api_instance, vm_ext_id): + """ + Get NGT info by vm ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + vm_ext_id: ext_id of VM + Returns: + ngt (obj): NGT info object + """ + try: + return api_instance.get_guest_tools_by_id(extId=vm_ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching NGT info for given vm", + ) + + +def get_disk(module, api_instance, ext_id, vm_ext_id): + """ + Get Disk by ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + ext_id: ext_id of Disk + vm_ext_id: ext_id of VM + Returns: + disk (obj): Disk info object + """ + try: + return api_instance.get_disk_by_id(vmExtId=vm_ext_id, extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm disk info using ext_id", + ) + + +def get_serial_port(module, api_instance, ext_id, vm_ext_id): + """ + Get Serial Port by ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + ext_id: ext_id of Serial Port + vm_ext_id: ext_id of VM + Returns: + serial_port (obj): Serial Port info object + """ + try: + return api_instance.get_serial_port_by_id(vmExtId=vm_ext_id, extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm serial port info using ext_id", + ) + + +def get_template(module, api_instance, ext_id): + """ + Get Template by ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + ext_id: ext_id of Template + Returns: + template (obj): Template info object + """ + try: + return api_instance.get_template_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching template info using ext_id", + ) + + +def get_cd_rom(module, api_instance, ext_id, vm_ext_id): + """ + Get CD ROM by ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + ext_id: ext_id of CD ROM + vm_ext_id: ext_id of VM + Returns: + cd_rom (obj): CD ROM info object + """ + try: + return api_instance.get_cd_rom_by_id(vmExtId=vm_ext_id, extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm cd rom info using ext_id", + ) + + +def get_gpu(module, api_instance, ext_id, vm_ext_id): + """ + Get GPU by ext_id + Args: + module: Ansible module + api_instance: VmApi instance from ntnx_vmm_py_client sdk + ext_id: ext_id of GPU + vm_ext_id: ext_id of VM + Returns: + gpu (obj): GPU info object + """ + try: + return api_instance.get_gpu_by_id(vmExtId=vm_ext_id, extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching GPU info using ext_id", + ) diff --git a/plugins/module_utils/v4/vmm/spec/vms.py b/plugins/module_utils/v4/vmm/spec/vms.py new file mode 100644 index 000000000..4d9b39483 --- /dev/null +++ b/plugins/module_utils/v4/vmm/spec/vms.py @@ -0,0 +1,504 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from copy import deepcopy + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ...sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +class VmSpecs: + """Module specs related to Vms""" + + # Allowed object types for variouse attributes + + disk_backing_info_allowed_types = { + "vm_disk": vmm_sdk.AhvConfigVmDisk, + "adsf_volume_group": vmm_sdk.ADSFVolumeGroupReference, + } + + guest_customization_param_allowed_types = { + "sysprep": vmm_sdk.Sysprep, + "cloudinit": vmm_sdk.CloudInit, + } + + sysprep_param_allowed_types = { + "unattendxml": vmm_sdk.Unattendxml, + "custom_key_values": vmm_sdk.CustomKeyValues, + } + + cloud_init_script_allowed_types = { + "user_data": vmm_sdk.Userdata, + "custom_key_values": vmm_sdk.CustomKeyValues, + } + + boot_device_allowed_types = { + "boot_device_disk": vmm_sdk.BootDeviceDisk, + "boot_device_nic": vmm_sdk.BootDeviceNic, + } + + data_source_reference_allowed_types = { + "image_reference": vmm_sdk.ImageReference, + "vm_disk_reference": vmm_sdk.VmDiskReference, + } + + boot_config_allowed_types = { + "legacy_boot": vmm_sdk.LegacyBoot, + "uefi_boot": vmm_sdk.UefiBoot, + } + + reference_spec = dict( + ext_id=dict(type="str", required=True), + ) + + disk_storage_config_spec = dict(is_flash_mode_enabled=dict(type="bool")) + + image_reference_spec = dict( + image_ext_id=dict(type="str"), + ) + + disk_address_spec = dict( + bus_type=dict( + type="str", choices=["SCSI", "IDE", "PCI", "SATA", "SPAPR"], required=True + ), + index=dict(type="int"), + ) + + vm_disk_reference_spec = dict( + disk_ext_id=dict(type="str"), + disk_address=dict( + type="dict", options=disk_address_spec, obj=vmm_sdk.AhvConfigDiskAddress + ), + vm_reference=dict( + type="dict", options=reference_spec, obj=vmm_sdk.AhvConfigVmReference + ), + ) + + data_source_reference_spec = dict( + image_reference=dict(type="dict", options=image_reference_spec), + vm_disk_reference=dict(type="dict", options=vm_disk_reference_spec), + ) + + data_source_spec = dict( + reference=dict( + type="dict", + options=data_source_reference_spec, + obj=data_source_reference_allowed_types, + mutually_exclusive=[("image_reference", "vm_disk_reference")], + ), + ) + + vm_disk_spec = dict( + disk_size_bytes=dict(type="int"), + storage_container=dict( + type="dict", + options=reference_spec, + obj=vmm_sdk.AhvConfigVmDiskContainerReference, + ), + storage_config=dict( + type="dict", + options=disk_storage_config_spec, + obj=vmm_sdk.AhvConfigVmDiskStorageConfig, + ), + data_source=dict(type="dict", options=data_source_spec, obj=vmm_sdk.DataSource), + ) + + adsf_volume_group_spec = dict( + volume_group_ext_id=dict(type="str"), + ) + + disk_backing_info_spec = dict( + vm_disk=dict(type="dict", options=vm_disk_spec), + adsf_volume_group=dict(type="dict", options=adsf_volume_group_spec), + ) + + disk_spec = dict( + backing_info=dict( + type="dict", + options=disk_backing_info_spec, + obj=disk_backing_info_allowed_types, + mutually_exclusive=[("vm_disk", "adsf_volume_group")], + ), + disk_address=dict( + type="dict", options=disk_address_spec, obj=vmm_sdk.AhvConfigDiskAddress + ), + ) + + cd_rom_address_spec = dict( + bus_type=dict(type="str", choices=["IDE", "SATA"]), index=dict(type="int") + ) + + cd_rom_spec = dict( + backing_info=dict( + type="dict", options=vm_disk_spec, obj=vmm_sdk.AhvConfigVmDisk + ), + disk_address=dict( + type="dict", options=cd_rom_address_spec, obj=vmm_sdk.AhvConfigCdRomAddress + ), + ) + + ip_address_sub_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int"), + ) + + ipv4_config_spec = dict( + should_assign_ip=dict(type="bool"), + ip_address=dict( + type="dict", options=ip_address_sub_spec, obj=vmm_sdk.IPv4Address + ), + secondary_ip_address_list=dict( + type="list", + elements="dict", + options=ip_address_sub_spec, + obj=vmm_sdk.IPv4Address, + ), + ) + + nic_backup_info_spec = dict( + model=dict(type="str", choices=["VIRTIO", "E1000"]), + mac_address=dict(type="str"), + is_connected=dict(type="bool"), + num_queues=dict(type="int"), + ) + + nic_info_spec = dict( + nic_type=dict( + type="str", + choices=[ + "NORMAL_NIC", + "DIRECT_NIC", + "NETWORK_FUNCTION_NIC", + "SPAN_DESTINATION_NIC", + ], + ), + network_function_chain=dict( + type="dict", + options=reference_spec, + obj=vmm_sdk.NetworkFunctionChainReference, + ), + network_function_nic_type=dict( + type="str", choices=["INGRESS", "EGRESS", "TAP"] + ), + subnet=dict(type="dict", options=reference_spec, obj=vmm_sdk.SubnetReference), + vlan_mode=dict(type="str", choices=["ACCESS", "TRUNK"]), + trunked_vlans=dict(type="list", elements="int"), + should_allow_unknown_macs=dict(type="bool"), + ipv4_config=dict(type="dict", options=ipv4_config_spec, obj=vmm_sdk.Ipv4Config), + ) + + nic_spec = dict( + backing_info=dict( + type="dict", options=nic_backup_info_spec, obj=vmm_sdk.EmulatedNic + ), + network_info=dict( + type="dict", options=nic_info_spec, obj=vmm_sdk.AhvConfigNicNetworkInfo + ), + ) + + unattendxml_spec = dict( + value=dict(type="str"), + ) + + kvpair_spec = dict(name=dict(type="str"), value=dict(type="raw", no_log=False)) + + custom_key_values_spec = dict( + key_value_pairs=dict( + type="list", + elements="dict", + options=kvpair_spec, + obj=vmm_sdk.KVPair, + no_log=False, + ), + ) + + sysprep_param_spec = dict( + unattendxml=dict( + type="dict", options=unattendxml_spec, obj=vmm_sdk.Unattendxml + ), + custom_key_values=dict( + type="dict", + options=custom_key_values_spec, + obj=vmm_sdk.CustomKeyValues, + no_log=False, + ), + ) + + sysprep_spec = dict( + install_type=dict(type="str", choices=["FRESH", "PREPARED"]), + sysprep_script=dict( + type="dict", + options=sysprep_param_spec, + no_log=False, + obj=sysprep_param_allowed_types, + mutually_exclusive=[("unattendxml", "custom_key_values")], + ), + ) + + user_data = dict( + value=dict(type="str", required=True), + ) + + cloud_init_script = dict( + user_data=dict(type="dict", options=user_data, obj=vmm_sdk.Userdata), + custom_key_values=dict( + type="dict", + options=custom_key_values_spec, + obj=vmm_sdk.CustomKeyValues, + no_log=False, + ), + ) + + cloudinit_spec = dict( + datasource_type=dict(type="str", choices=["CONFIG_DRIVE_V2"]), + metadata=dict(type="str"), + cloud_init_script=dict( + type="dict", + options=cloud_init_script, + obj=cloud_init_script_allowed_types, + mutually_exclusive=[("user_data", "custom_key_values")], + ), + ) + + guest_customization_param_spec = dict( + sysprep=dict(type="dict", options=sysprep_spec, obj=vmm_sdk.Sysprep), + cloudinit=dict(type="dict", options=cloudinit_spec, obj=vmm_sdk.CloudInit), + ) + + guest_customization_spec = dict( + config=dict( + type="dict", + options=guest_customization_param_spec, + obj=guest_customization_param_allowed_types, + mutually_exclusive=[("sysprep", "cloudinit")], + ), + ) + + boot_device_disk_spec = dict( + disk_address=dict( + type="dict", options=disk_address_spec, obj=vmm_sdk.AhvConfigDiskAddress + ), + ) + + boot_device_nic_spec = dict( + mac_address=dict(type="str"), + ) + + boot_device_spec = dict( + boot_device_disk=dict(type="dict", options=boot_device_disk_spec), + boot_device_nic=dict(type="dict", options=boot_device_nic_spec), + ) + + legacy_boot_spec = dict( + boot_device=dict( + type="dict", + options=boot_device_spec, + obj=boot_device_allowed_types, + mutually_exclusive=[("boot_device_disk", "boot_device_nic")], + ), + boot_order=dict( + type="list", elements="str", choices=["CDROM", "NETWORK", "DISK"] + ), + ) + + nvram_device_spec = dict( + backing_storage_info=dict( + type="dict", options=vm_disk_spec, obj=vmm_sdk.AhvConfigVmDisk + ), + ) + + uefi_boot_spec = dict( + is_secure_boot_enabled=dict(type="bool"), + nvram_device=dict( + type="dict", options=nvram_device_spec, obj=vmm_sdk.NvramDevice + ), + ) + + boot_config_spec = dict( + legacy_boot=dict(type="dict", options=legacy_boot_spec), + uefi_boot=dict(type="dict", options=uefi_boot_spec), + ) + + vtpm_config_spec = dict( + is_vtpm_enabled=dict(type="bool"), + version=dict(type="str"), + ) + + cpu_model_reference_spec = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + ) + + apc_config_spec = dict( + is_apc_enabled=dict(type="bool"), + cpu_model=dict( + type="dict", options=cpu_model_reference_spec, obj=vmm_sdk.CpuModelReference + ), + ) + + qos_config_spec = dict( + throttled_iops=dict(type="int"), + ) + + storage_config_spec = dict( + is_flash_mode_enabled=dict(type="bool"), + qos_config=dict(type="dict", options=qos_config_spec, obj=vmm_sdk.QosConfig), + ) + + pci_address_spec = dict( + segment=dict(type="int"), + bus=dict(type="int"), + device=dict(type="int"), + func=dict(type="int"), + ) + + gpu_spec = dict( + name=dict(type="str"), + mode=dict( + type="str", + choices=["PASSTHROUGH_GRAPHICS", "PASSTHROUGH_COMPUTE", "VIRTUAL"], + ), + device_id=dict(type="int"), + vendor=dict(type="str", choices=["NVIDIA", "INTEL", "AMD"]), + pci_address=dict(type="dict", options=pci_address_spec, obj=vmm_sdk.SBDF), + ) + + serial_port_spec = dict( + ext_id=dict(type="str"), + is_connected=dict(type="bool"), + index=dict(type="int"), + ) + + guest_tools_spec = dict( + is_enabled=dict(type="bool"), + capabilities=dict( + type="list", + elements="str", + choices=["SELF_SERVICE_RESTORE", "VSS_SNAPSHOT"], + ), + ) + + vm_spec = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + num_sockets=dict(type="int"), + num_cores_per_socket=dict(type="int"), + num_threads_per_core=dict(type="int"), + num_numa_nodes=dict(type="int"), + memory_size_bytes=dict(type="int"), + is_vcpu_hard_pinning_enabled=dict(type="bool"), + is_cpu_passthrough_enabled=dict(type="bool"), + enabled_cpu_features=dict( + type="list", elements="str", choices=["HARDWARE_VIRTUALIZATION"] + ), + is_memory_overcommit_enabled=dict(type="bool"), + is_gpu_console_enabled=dict(type="bool"), + categories=dict( + type="list", + elements="dict", + options=reference_spec, + obj=vmm_sdk.AhvConfigCategoryReference, + ), + cluster=dict( + type="dict", options=reference_spec, obj=vmm_sdk.AhvConfigClusterReference + ), + availability_zone=dict( + type="dict", options=reference_spec, obj=vmm_sdk.AvailabilityZoneReference + ), + guest_customization=dict( + type="dict", + options=guest_customization_spec, + obj=vmm_sdk.GuestCustomizationParams, + ), + guest_tools=dict(type="dict", options=guest_tools_spec, obj=vmm_sdk.GuestTools), + hardware_clock_timezone=dict(type="str"), + is_branding_enabled=dict(type="bool"), + boot_config=dict( + type="dict", + options=boot_config_spec, + obj=boot_config_allowed_types, + mutually_exclusive=[("legacy_boot", "uefi_boot")], + ), + is_vga_console_enabled=dict(type="bool"), + machine_type=dict(type="str", choices=["PC", "PSERIES", "Q35"]), + vtpm_config=dict(type="dict", options=vtpm_config_spec, obj=vmm_sdk.VtpmConfig), + is_agent_vm=dict(type="bool"), + apc_config=dict(type="dict", options=apc_config_spec, obj=vmm_sdk.ApcConfig), + storage_config=dict( + type="dict", options=storage_config_spec, obj=vmm_sdk.ADSFVmStorageConfig + ), + disks=dict( + type="list", elements="dict", options=disk_spec, obj=vmm_sdk.AhvConfigDisk + ), + cd_roms=dict( + type="list", + elements="dict", + options=cd_rom_spec, + obj=vmm_sdk.AhvConfigCdRom, + ), + nics=dict( + type="list", elements="dict", options=nic_spec, obj=vmm_sdk.AhvConfigNic + ), + gpus=dict(type="list", elements="dict", options=gpu_spec, obj=vmm_sdk.Gpu), + serial_ports=dict( + type="list", + elements="dict", + options=serial_port_spec, + obj=vmm_sdk.SerialPort, + ), + ) + + @classmethod + def get_vm_spec(cls): + return deepcopy(cls.vm_spec) + + @classmethod + def get_nic_spec(cls): + return deepcopy(cls.nic_spec) + + @classmethod + def get_gc_spec(cls): + return deepcopy(cls.guest_customization_spec) + + @classmethod + def get_boot_config_spec(cls): + return deepcopy(cls.boot_config_spec) + + @classmethod + def get_boot_config_allowed_types(cls): + return deepcopy(cls.boot_config_allowed_types) + + @classmethod + def get_disk_spec(cls): + return deepcopy(cls.disk_spec) + + @classmethod + def get_cd_rom_spec(cls): + return deepcopy(cls.cd_rom_spec) + + @classmethod + def get_vm_disk_spec(cls): + return deepcopy(cls.vm_disk_spec) + + @classmethod + def get_gc_param_spec(cls): + return deepcopy(cls.guest_customization_param_spec) + + @classmethod + def get_gc_allowed_types_spec(cls): + return deepcopy(cls.guest_customization_param_allowed_types) diff --git a/plugins/module_utils/v4/volumes/api_client.py b/plugins/module_utils/v4/volumes/api_client.py new file mode 100644 index 000000000..f644af1cf --- /dev/null +++ b/plugins/module_utils/v4/volumes/api_client.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from base64 import b64encode + +from ansible.module_utils.basic import missing_required_lib + +SDK_IMP_ERROR = None +try: + import ntnx_volumes_py_client +except ImportError: + SDK_IMP_ERROR = traceback.format_exc() + + +def get_api_client(module): + """ + This method will return client to be used in api connection using + given connection details. + """ + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + config = ntnx_volumes_py_client.Configuration() + config.host = module.params.get("nutanix_host") + config.port = module.params.get("nutanix_port") + config.username = module.params.get("nutanix_username") + config.password = module.params.get("nutanix_password") + config.verify_ssl = module.params.get("validate_certs") + client = ntnx_volumes_py_client.ApiClient(configuration=config) + + cred = "{0}:{1}".format(config.username, config.password) + try: + encoded_cred = b64encode(bytes(cred, encoding="ascii")).decode("ascii") + except BaseException: + encoded_cred = b64encode(bytes(cred).encode("ascii")).decode("ascii") + auth_header = "Basic " + encoded_cred + client.add_default_header(header_name="Authorization", header_value=auth_header) + return client + + +def get_etag(data): + """ + This method will fetch etag from a v4 api response. + Args: + data (dict): v4 api response + """ + return ntnx_volumes_py_client.ApiClient.get_etag(data) + + +def get_vg_api_instance(module): + """ + This method will return VolumeGroupApi instance. + """ + client = get_api_client(module) + return ntnx_volumes_py_client.VolumeGroupsApi(api_client=client) + + +def get_iscsi_client_api_instance(module): + """ + This method will return IscsiClientApi instance. + """ + client = get_api_client(module) + return ntnx_volumes_py_client.IscsiClientsApi(api_client=client) diff --git a/plugins/module_utils/v4/volumes/helpers.py b/plugins/module_utils/v4/volumes/helpers.py new file mode 100644 index 000000000..ff1cde4f4 --- /dev/null +++ b/plugins/module_utils/v4/volumes/helpers.py @@ -0,0 +1,31 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ..utils import raise_api_exception # noqa: E402 + + +def get_volume_group(module, api_instance, ext_id): + """ + Get volume group by ext_id + Args: + module: Ansible module + api_instance: VolumeGroupApi instance from ntnx_volumes_py_client sdk + ext_id: ext_id of volume group + Returns: + vg (obj): VolumeGroup info object + """ + try: + return api_instance.get_volume_group_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching Volume group info using ext_id", + ) diff --git a/plugins/module_utils/v4/volumes/spec/volume_group.py b/plugins/module_utils/v4/volumes/spec/volume_group.py new file mode 100644 index 000000000..a9a4a522f --- /dev/null +++ b/plugins/module_utils/v4/volumes/spec/volume_group.py @@ -0,0 +1,79 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import traceback +from copy import deepcopy + +SDK_IMP_ERROR = None +try: + import ntnx_volumes_py_client as volumes_sdk # noqa: E402 +except ImportError: + + from ...sdk_mock import mock_sdk as volumes_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +class VGSpecs: + """Module specs related to Volume Groups""" + + flash_mode = dict( + is_enabled=dict(type="bool", required=True), + ) + storage_features = dict( + flash_mode=dict( + type="dict", options=flash_mode, required=True, obj=volumes_sdk.FlashMode + ) + ) + iscsi_features = dict( + target_secret=dict(type="str", required=True, no_log=True), + enabled_authentications=dict( + type="str", choices=["CHAP", "NONE"], required=False + ), + ) + volume_group = dict( + ext_id=dict(type="str", required=False), + name=dict(type="str", required=False), + description=dict(type="str", required=False), + should_load_balance_vm_attachments=dict(type="bool", required=False), + sharing_status=dict( + type="str", choices=["SHARED", "NOT_SHARED"], required=False + ), + target_prefix=dict(type="str", required=False), + target_name=dict(type="str", required=False), + enabled_authentications=dict( + type="str", choices=["CHAP", "NONE"], required=False + ), + cluster_reference=dict(type="str", required=False), + usage_type=dict( + type="str", choices=["BACKUP_TARGET", "INTERNAL", "TEMPORARY", "USER"] + ), + is_hidden=dict(type="bool", required=False), + storage_features=dict( + type="dict", + required=False, + options=storage_features, + obj=volumes_sdk.StorageFeatures, + ), + iscsi_features=dict( + type="dict", + required=False, + options=iscsi_features, + obj=volumes_sdk.IscsiFeatures, + ), + ) + + @classmethod + def get_volume_group_spec(cls): + return deepcopy(cls.volume_group) + + @classmethod + def get_storage_features_spec(cls): + return deepcopy(cls.storage_features) diff --git a/plugins/modules/ntnx_acps.py b/plugins/modules/ntnx_acps.py index 04f1f1f56..51e4aca05 100644 --- a/plugins/modules/ntnx_acps.py +++ b/plugins/modules/ntnx_acps.py @@ -117,7 +117,7 @@ EXAMPLES = r""" - name: Create min ACP ntnx_acps: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -129,7 +129,7 @@ - name: Create ACP with user reference ntnx_acps: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -142,7 +142,7 @@ - name: Create ACP with user ad user group reference ntnx_acps: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -157,7 +157,7 @@ - name: Create ACP with all specfactions ntnx_acps: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -303,9 +303,9 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.acps import ACP # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.prism.acps import ACP # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_acps_info.py b/plugins/modules/ntnx_acps_info.py index 02362e3d3..dadc0bb63 100644 --- a/plugins/modules/ntnx_acps_info.py +++ b/plugins/modules/ntnx_acps_info.py @@ -39,28 +39,28 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List acp using name filter criteria - ntnx_acps_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - name: "{{ acp.name }}" - kind: access_control_policy - register: result - - - name: List acp using length, offset, sort order and name sort attribute - ntnx_acps_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 1 - offset: 1 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result +- name: List acp using name filter criteria + ntnx_acps_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + name: "{{ acp.name }}" + kind: access_control_policy + register: result + +- name: List acp using length, offset, sort order and name sort attribute + ntnx_acps_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 1 + offset: 1 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result """ RETURN = r""" api_version: @@ -193,9 +193,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.acps import ACP # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.acps import ACP # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_address_groups.py b/plugins/modules/ntnx_address_groups.py index 55b950c73..9cffc125d 100644 --- a/plugins/modules/ntnx_address_groups.py +++ b/plugins/modules/ntnx_address_groups.py @@ -82,7 +82,6 @@ - network_ip: "10.1.2.2" network_prefix: 32 register: result - - name: delete address group ntnx_address_groups: nutanix_host: @@ -118,7 +117,7 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.address_groups import AddressGroup # noqa: E402 +from ..module_utils.v3.prism.address_groups import AddressGroup # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_address_groups_info.py b/plugins/modules/ntnx_address_groups_info.py index 7c4bafe10..6290d3c93 100644 --- a/plugins/modules/ntnx_address_groups_info.py +++ b/plugins/modules/ntnx_address_groups_info.py @@ -97,9 +97,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.address_groups import AddressGroup # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.address_groups import AddressGroup # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_address_groups_info_v2.py b/plugins/modules/ntnx_address_groups_info_v2.py new file mode 100644 index 000000000..72ba19abf --- /dev/null +++ b/plugins/modules/ntnx_address_groups_info_v2.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_address_groups_info_v2 +short_description: Get address groups info +version_added: 2.0.0 +description: + - Fetch specific address group info using external ID + - Fetch list of multiple address groups info if external ID is not provided with optional filters + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External ID to fetch specific address group info + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: Fetch address groups using external id + nutanix.ncp.ntnx_address_groups_info_v2: + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "{{ test_address_groups_uuid }}" + register: result + +- name: List all address groups + nutanix.ncp.ntnx_address_groups_info_v2: + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + register: result +""" +RETURN = r""" +response: + description: + - Response for fetching address groups info + - One address group info if External ID is provided + - List of multiple address groups info if External ID is not provided + returned: always + type: dict + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "description": "test-ansible-group-3-desc", + "ext_id": "63311404-8b2e-4dbf-9e33-7848cc88d330", + "ip_ranges": null, + "ipv4_addresses": [ + { + "prefix_length": 32, + "value": "10.1.4.1" + } + ], + "links": null, + "name": "yclaDaQKtEGIansible-ag1", + "policy_references": null, + "tenant_id": null + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +""" +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.flow.api_client import ( # noqa: E402 + get_address_groups_api_instance, +) +from ..module_utils.v4.flow.helpers import get_address_group # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_address_group_using_ext_id(module, result): + ext_id = module.params.get("ext_id") + address_groups = get_address_groups_api_instance(module) + resp = get_address_group(module, address_groups, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_address_groups(module, result): + address_groups = get_address_groups_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating address groups info Spec", **result) + + try: + resp = address_groups.list_address_groups(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching address groups info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_address_group_using_ext_id(module, result) + else: + get_address_groups(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_address_groups_v2.py b/plugins/modules/ntnx_address_groups_v2.py new file mode 100644 index 000000000..d0353cc3f --- /dev/null +++ b/plugins/modules/ntnx_address_groups_v2.py @@ -0,0 +1,392 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_address_groups_v2 +short_description: Create, Update, Delete address groups +version_added: 2.0.0 +description: + - Create, Update, Delete address groups + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - State of the address group, whether to create, update or delete. + - present -> Create address group if external ID is not provided, Update address group if external ID is provided. + - absent -> Delete address group with the given external ID. + type: str + choices: ['present', 'absent'] + + ext_id: + description: + - Address group External ID. + - Required for updating or deleting address group. + type: str + + name: + description: + - Address group name. + type: str + + description: + description: + - Address group description. + type: str + + ipv4_addresses: + description: List of IPv4 addresses. + type: list + elements: dict + suboptions: + value: + description: The IPv4 address value. + type: str + prefix_length: + description: The prefix length of the IPv4 address. + type: int + ip_ranges: + description: List of IP ranges. + type: list + elements: dict + suboptions: + start_ip: + description: Starting IP address of the range. + type: str + end_ip: + description: Ending IP address of the range. + type: str + wait: + description: + - Wait for the task to complete + type: bool + default: true + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Create address group + nutanix.ncp.ntnx_address_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: present + name: "{{ag1}}" + description: test-ansible-group-1-desc + ipv4_addresses: + - value: "10.1.1.0" + prefix_length: 24 + - value: "10.1.2.2" + prefix_length: 32 + +- name: delete address group + nutanix.ncp.ntnx_address_groups_v2: + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "{{ todelete }}" +""" + +RETURN = r""" +response: + description: + - Response for address groups operations + - Address group details if C(wait) is True + - Task details if C(wait) is False + returned: always + type: dict + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "description": "test-ansible-group-1-desc", + "ext_id": "63311404-8b2e-4dbf-9e33-7848cc88d330", + "ip_ranges": null, + "ipv4_addresses": [ + { + "prefix_length": 24, + "value": "10.1.1.0" + }, + { + "prefix_length": 32, + "value": "10.1.2.2" + } + ], + "links": null, + "name": "yclaDaQKtEGIansible-ag2", + "policy_references": null, + "tenant_id": null + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field indicates if the task execution failed + returned: always + type: bool + sample: false + +ext_id: + description: The created address group ext_id + returned: always + type: str + sample: "63311404-8b2e-4dbf-9e33-7848cc88d330" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.flow.api_client import ( # noqa: E402 + get_address_groups_api_instance, + get_etag, +) +from ..module_utils.v4.flow.helpers import get_address_group # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_microseg_py_client as mic_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as mic_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + ip_address_sub_spec = dict( + value=dict(type="str"), + prefix_length=dict(type="int"), + ) + + ip_range_spec = dict( + start_ip=dict(type="str"), + end_ip=dict(type="str"), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + ipv4_addresses=dict( + type="list", + elements="dict", + options=ip_address_sub_spec, + obj=mic_sdk.IPv4Address, + ), + ip_ranges=dict( + type="list", elements="dict", options=ip_range_spec, obj=mic_sdk.IPv4Range + ), + ) + + return module_args + + +def create_address_group(module, result): + address_groups = get_address_groups_api_instance(module) + + sg = SpecGenerator(module) + default_spec = mic_sdk.AddressGroup() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create address groups Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = address_groups.create_address_group(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating address group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.ADDRESS_GROUP + ) + if ext_id: + resp = get_address_group(module, address_groups, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_address_groups_idempotency(old_spec, update_spec): + strip_internal_attributes(old_spec) + strip_internal_attributes(update_spec) + if old_spec != update_spec: + return False + + return True + + +def update_address_group(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + address_groups = get_address_groups_api_instance(module) + + current_spec = get_address_group(module, address_groups, ext_id) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating address_groups update spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + # check for idempotency + if check_address_groups_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = None + try: + resp = address_groups.update_address_group_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating address_group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id, True) + resp = get_address_group(module, address_groups, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_address_group(module, result): + address_groups = get_address_groups_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_address_group(module, address_groups, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for deleting address group", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = address_groups.delete_address_group_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting address_group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_microseg_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + update_address_group(module, result) + else: + create_address_group(module, result) + else: + delete_address_group(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_authorization_policies_info_v2.py b/plugins/modules/ntnx_authorization_policies_info_v2.py new file mode 100644 index 000000000..4aecaf002 --- /dev/null +++ b/plugins/modules/ntnx_authorization_policies_info_v2.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_authorization_policies_info_v2 +short_description: Fetch Authorization policies info from Nutanix PC. +version_added: 2.0.0 +description: + - Get authorization policies info + - It will fetch specific authorization policy if external ID is provided + - It will fetch multiple authorization policies if external ID is not provided + - Use filters to fetch specific authorization policies + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - authorization_policy external ID + - if provided, it will fetch the authorization policy with the given external ID + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: Get 10 auth policies + nutanix.ncp.ntnx_authorization_policies_info_v2: + nutanix_host: "{{ nutanix_host }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + limit: 10 + register: result + ignore_errors: true + +- name: Get specific auth policy + nutanix.ncp.ntnx_authorization_policies_info_v2: + nutanix_host: "{{ nutanix_host }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "ebbfbd38-794b-5529-adcc-dcb6b4177387" + register: result + ignore_errors: true + +- name: Fetch using filters + nutanix.ncp.ntnx_authorization_policies_info_v2: + nutanix_host: "{{ nutanix_host }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + filter: "displayName eq 'acp1'" + register: result + ignore_errors: true +""" +RETURN = r""" +response: + description: + - It will have specific authorization policy if external ID is provided + - It will have list of multiple authorization policies if external ID is not provided + returned: always + type: dict + sample: + { + "authorizationPolicyType": "PREDEFINED_READ_ONLY", + "clientName": "", + "createdBy": "", + "createdTime": "2024-03-20T09:54:34.846946Z", + "description": "", + "displayName": "Super Admin_acp", + "entities": [ + { + "$reserved": { + "*": { + "*": { + "eq": "*" + } + } + } + } + ], + "extId": "00000000-0000-0000-0300-000000000000", + "identities": [ + { + "$reserved": { + "user": { + "uuid": { + "anyof": [ + "00000002-0000-0000-0000-000000000000" + ] + } + } + } + } + ], + "isSystemDefined": true, + "lastUpdatedTime": "2024-03-20T09:54:34.846946Z", + "role": "00000001-0000-0000-0000-000000000000" + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +ext_id: + description: External Id of the authorization policy + returned: always + type: bool + sample: "00000000-0000-0000-0000-000000000000" +""" + +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_authorization_policy_api_instance, +) +from ..module_utils.v4.iam.helpers import ( # noqa: E402 + get_authorization_policy as get_authorization_policy_by_id, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def format_acp_spec(spec): + identities = strip_internal_attributes( + deepcopy(spec.pop("identities")), ["_reserved"] + ) + entities = strip_internal_attributes(deepcopy(spec.pop("entities")), ["_reserved"]) + spec = strip_internal_attributes(spec) + + # taking out identity and entity present under $reserved in spec + formatted_identities = [] + if identities: + for identity in identities: + formatted_identities.append(identity["_reserved"]) + spec["identities"] = formatted_identities + + formatted_entities = [] + if entities: + for entity in entities: + formatted_entities.append(entity["_reserved"]) + spec["entities"] = formatted_entities + + return spec + + +def get_authorization_policies(module, api_instance, result): + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating authorization_policies info Spec", **result + ) + + try: + resp = api_instance.list_authorization_policies(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching authorization_policies info", + ) + + policies = [] + if getattr(resp, "data", []): + policies = resp.to_dict().get("data") + + for policy in policies: + format_acp_spec(policy) + + result["response"] = policies + + +def get_authorization_policy(module, api_instance, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + policy = get_authorization_policy_by_id(module, api_instance, ext_id) + result["response"] = format_acp_spec(policy.to_dict()) + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + + result = {"changed": False, "error": None, "response": None} + + authorization_policies = get_authorization_policy_api_instance(module) + if module.params.get("ext_id"): + get_authorization_policy(module, authorization_policies, result) + else: + get_authorization_policies(module, authorization_policies, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_authorization_policies_v2.py b/plugins/modules/ntnx_authorization_policies_v2.py new file mode 100644 index 000000000..719ce6b0f --- /dev/null +++ b/plugins/modules/ntnx_authorization_policies_v2.py @@ -0,0 +1,478 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +module: ntnx_authorization_policies_v2 +short_description: Manage Nutanix PC IAM authorization policies +description: + - This module allows you to create, update, and delete authorization policies in Nutanix PC. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + state: + description: + - If C(state) is C(present) and C(ext_id) is not provided, create a new authorization policy. + - If C(state) is C(present) and C(ext_id) is provided, update the authorization policy. + - If C(state) is C(absent), it will delete the authorization policy with the given External ID. + type: str + choices: ['present', 'absent'] + ext_id: + description: + - Role External ID. + - Required for updating or deleting the auth policy. + type: str + required: false + display_name: + description: + - The display name for the Authorization Policy. + required: false + type: str + description: + description: + - Description of the Authorization Policy. + required: false + type: str + identities: + description: + - List of expressions representing identities for access to given entities. + - During update, the identities are replaced with the new given identities. + - Check examples for more information. + required: false + type: list + elements: dict + entities: + description: + - List of expressions representing entities access to identities. + - During update, the entities are replaced with the new given entities. + required: false + type: list + elements: dict + role: + description: + - The Role associated with the Authorization Policy + required: false + type: str + authorization_policy_type: + description: + - Type of Authorization Policy. + required: false + type: str + choices: + - USER_DEFINED + default: USER_DEFINED + wait: + description: + - Wait for the task to complete. + - Not supported for this module. + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - Pradeepsingh Bhati (@bhati-pradeep) +""" + +EXAMPLES = r""" +- name: Create auth policy with access to images, certain directory service and self owned marketplace items + nutanix.ncp.ntnx_authorization_policies_v2: + nutanix_host: "{{ nutanix_host }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: present + display_name: "ansible-created-acp" + description: "ansible created acps" + role: "ebbfbd38-122b-5529-adcc-dcb6b4177382" + authorization_policy_type: "USER_DEFINED" + entities: + - "images": + "*": + "eq": "*" + - "directory_service": + "uuid": + "anyof": + - "ebbfbd38-794b-5529-adcc-dcb6b4177382" + - "ebbfbd38-794b-5529-adcc-dcb6b4177383" + - "marketplace_item": + "owner_uuid": + "eq": "SELF_OWNED" + identities: + - "user": + "uuid": + "anyof": + - "ebbfbd38-794b-5529-adcc-dcb6b4177384" + - "ebbfbd38-794b-5529-adcc-dcb6b4177385" + - "user": + "group": + "anyof": + - "ebbfbd38-794b-5529-adcc-dcb6b4177386" + - "ebbfbd38-794b-5529-adcc-dcb6b4177387" + register: result + ignore_errors: true + +- name: Create a basic auth policy with access to all images + nutanix.ncp.ntnx_authorization_policies_v2: + nutanix_host: "{{ nutanix_host }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: present + display_name: "acp1" + role: "ebb123232-794b-5529-adcc-dcb6b4137384" + entities: + - "images": + "*": + "eq": "*" + identities: + - "user": + "group": + "anyof": + - "" + register: result + ignore_errors: true + +- name: Update access to users and remove access of user groups + nutanix.ncp.ntnx_authorization_policies_v2: + nutanix_host: "{{ nutanix_host }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: present + ext_id: "ebbfbd38-794b-5529-adcc-dcb6b4137384" + identities: + - "user": + "uuid": + "anyof": + - "ebbfbd38-794b-1529-adcc-dcb6b4177384" + - "ebbfbd38-794b-5521-adcc-dcb6b4177384" + register: result + ignore_errors: true + +- name: delete auth policy + nutanix.ncp.ntnx_authorization_policies_v2: + nutanix_host: "{{ nutanix_host }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: absent + ext_id: "ebbfbd38-794b-5529-adcc-dcb6b4137384" + register: result + ignore_errors: true +""" + +RETURN = r""" +response: + description: Authorization policy current state spec + returned: always + type: dict + sample: + { + "authorizationPolicyType": "PREDEFINED_READ_ONLY", + "clientName": "", + "createdBy": "", + "createdTime": "2024-03-20T09:54:34.846946Z", + "description": "", + "displayName": "Super Admin_acp", + "entities": [ + { + "$reserved": { + "*": { + "*": { + "eq": "*" + } + } + } + } + ], + "extId": "00000000-0000-0000-0000-000000000000", + "identities": [ + { + "$reserved": { + "user": { + "uuid": { + "anyof": [ + "00000000-0000-0000-0000-000000000000" + ] + } + } + } + } + ], + "isSystemDefined": true, + "lastUpdatedTime": "2024-03-20T09:54:34.846946Z", + "role": "00000000-0000-0000-0000-000000000000" + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +ext_id: + description: authorization policy external id + returned: always + type: str + sample: "00001000-0000-0000-0000-000000000000" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_authorization_policy_api_instance, + get_etag, +) +from ..module_utils.v4.iam.helpers import get_authorization_policy # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_iam_py_client as iam_sdk # noqa: E402 +except ImportError: + from ..module_utils.v4.sdk_mock import mock_sdk as iam_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + display_name=dict(type="str"), + description=dict(type="str"), + identities=dict(type="list", elements="dict"), + entities=dict(type="list", elements="dict"), + role=dict(type="str"), + authorization_policy_type=dict( + type="str", + choices=["USER_DEFINED"], + default="USER_DEFINED", + ), + ) + return module_args + + +def create_entities_spec(entities): + entities_spec = [] + for entity in entities: + entity_spec = iam_sdk.EntityFilter(entity_filter=entity) + entities_spec.append(entity_spec) + return entities_spec + + +def create_identities_spec(identities): + identities_spec = [] + for identity in identities: + identity_spec = iam_sdk.IdentityFilter(identity_filter=identity) + identities_spec.append(identity_spec) + return identities_spec + + +def create_authorization_policy(module, result): + authorization_policies = get_authorization_policy_api_instance(module) + + sg = SpecGenerator(module) + default_spec = iam_sdk.AuthorizationPolicy() + spec, err = sg.generate_spec(obj=default_spec) + + # handling identities and entities spec creation separately as their dicts are to be passed in $reserved + identities = module.params.get("identities") + if identities: + spec.identities = create_identities_spec(identities) + entities = module.params.get("entities") + if entities: + spec.entities = create_entities_spec(entities) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating create authorization policy spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = authorization_policies.create_authorization_policy(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating authorization policy", + ) + + result["ext_id"] = resp.data.ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + result["changed"] = True + + +def check_authorization_policies_idempotency(old_spec, update_spec): + if old_spec == update_spec: + return True + return False + + +def update_authorization_policy(module, result): + authorization_policies = get_authorization_policy_api_instance(module) + + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_authorization_policy( + module, authorization_policies, ext_id=ext_id + ) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating authorization policy update spec", **result + ) + + # handling identities and entities spec creation separately as their dicts are to be passed in $reserved + identities = module.params.get("identities") + if identities: + update_spec.identities = create_identities_spec(identities) + entities = module.params.get("entities") + if entities: + update_spec.entities = create_entities_spec(entities) + # check for idempotency + if check_authorization_policies_idempotency( + strip_internal_attributes(current_spec.to_dict()), + strip_internal_attributes(update_spec.to_dict()), + ): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = authorization_policies.update_authorization_policy_by_id( + extId=ext_id, body=update_spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating authorization policy", + ) + + if getattr(resp.data, "severity", None) == "ERROR": + result["error"] = resp.data.message + module.fail_json( + msg="Failed to update authorization policy", + **result, + ) + + resp = get_authorization_policy(module, authorization_policies, ext_id=ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def delete_authorization_policy(module, result): + authorization_policies = get_authorization_policy_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_authorization_policy( + module, authorization_policies, ext_id=ext_id + ) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for deleting authorization policy", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = authorization_policies.delete_authorization_policy_by_id( + extId=ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting authorization policy", + ) + + result["changed"] = True + if resp is None: + result[ + "msg" + ] = "Authorization policy with ext_id: {} deleted successfully".format(ext_id) + else: + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ( + "state", + "present", + ("display_name", "identities", "entities", "role"), + True, + ), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_authorization_policy(module, result) + else: + create_authorization_policy(module, result) + else: + delete_authorization_policy(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_categories.py b/plugins/modules/ntnx_categories.py index a2855894e..149c9491d 100644 --- a/plugins/modules/ntnx_categories.py +++ b/plugins/modules/ntnx_categories.py @@ -56,7 +56,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false state: "present" name: "{{first_category.name}}" desc: "{{first_category.desc}}" @@ -66,7 +66,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false state: "present" name: "{{first_category.name}}" desc: "{{first_category.update_desc}}" @@ -74,12 +74,12 @@ - "{{values.0}}" - "{{values.1}}" register: result -- name: update existing category by deleting some values +- name: Update existing category by deleting some values ntnx_categories: nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false state: "absent" name: "{{first_category.name}}" desc: "{{first_category.update_desc}}" @@ -91,7 +91,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false state: "absent" name: "{{first_category.name}}" remove_values: true @@ -101,7 +101,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false state: "absent" name: "{{first_category.name}}" register: result @@ -110,7 +110,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false state: "present" name: "{{second_category.name}}" desc: test description @@ -155,7 +155,7 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.categories import CategoryKey, CategoryValue # noqa: E402 +from ..module_utils.v3.prism.categories import CategoryKey, CategoryValue # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_categories_info.py b/plugins/modules/ntnx_categories_info.py index 7cf558af7..78ad31e0b 100644 --- a/plugins/modules/ntnx_categories_info.py +++ b/plugins/modules/ntnx_categories_info.py @@ -34,32 +34,32 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" -- name: test getting all categories +- name: Test getting all categories ntnx_categories_info: nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false register: result ignore_errors: true -- name: test getting the category with filter by it's name +- name: Test getting the category with filter by it's name ntnx_categories_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - name: "{{category_name}}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + name: "{{category_name}}" register: result -- name: test getting the category by it's name +- name: Test getting the category by it's name ntnx_categories_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - name: "{{category_name}}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: "{{category_name}}" register: result """ RETURN = r""" @@ -99,9 +99,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.categories import Category, CategoryKey # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.categories import Category, CategoryKey # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_categories_info_v2.py b/plugins/modules/ntnx_categories_info_v2.py new file mode 100644 index 000000000..6bf10c2b9 --- /dev/null +++ b/plugins/modules/ntnx_categories_info_v2.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_categories_info_v2 +short_description: Nutanix PC categories info module +version_added: 2.0.0 +description: + - Get categories info + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - category UUID + type: str + expand: + description: + - Additional query param to expand the response with more details + type: str + choices: ['associations', 'detailedAssociations'] +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: list all categories + nutanix.ncp.ntnx_categories_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + +- name: List all categories with keyname & expand associations + nutanix.ncp.ntnx_categories_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + expand: associations + filter: "key eq '{{category_name}}'" +""" +RETURN = r""" +response: + description: + - By default, List of category key values. + - If ext_id is provided then it will have dictionary consisting category key value info. + type: dict + returned: always + sample: + { + "associations": [ + { + "category_id": null, + "count": 18, + "resource_group": "ENTITY", + "resource_type": "VM" + } + ], + "description": "Created by CALM", + "detailed_associations": null, + "ext_id": "cc16efb4-6591-4b89-a643-8c835f035393", + "key": "OSType", + "links": [ + { + "href": "https://00.00.00.00:9440/api/prism/v4.0.b1/config/categories/cc16efb4-6591-4b89-a643-8c835f035393", + "rel": "self" + } + ], + "owner_uuid": null, + "tenant_id": null, + "type": "USER", + "value": "Linux" + } + +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the category if given in input. + type: str + returned: always + sample: "dded1b87-e566-419a-aac0-fb282792fb83" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.prism.pc_api_client import get_pc_api_client # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_prism_py_client as prism_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as prism_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + expand=dict(type="str", choices=["associations", "detailedAssociations"]), + ) + + return module_args + + +def get_category_api_instance(module): + api_client = get_pc_api_client(module) + return prism_sdk.CategoriesApi(api_client=api_client) + + +def get_category(module, result): + categories = get_category_api_instance(module) + ext_id = module.params.get("ext_id") + expand = module.params.get("expand") + + try: + resp = categories.get_category_by_id(ext_id, expand) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching category info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_categories(module, result): + categories = get_category_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params, extra_params=["expand"]) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating categories info Spec", **result) + + try: + resp = categories.list_categories(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching categories info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_prism_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_category(module, result) + else: + get_categories(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_categories_v2.py b/plugins/modules/ntnx_categories_v2.py new file mode 100644 index 000000000..7f35cbc87 --- /dev/null +++ b/plugins/modules/ntnx_categories_v2.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_categories_v2 +short_description: Manage categories in Nutanix Prism Central +version_added: "2.0.0" +description: + - This module allows you to create, update, and delete categories in Nutanix Prism Central. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the category. + required: false + type: str + key: + description: + - The key of the category. + required: false + type: str + + value: + description: + - The value of the category. + required: false + type: str + type: + description: + - The type of the category. + required: false + choices: ['USER'] + default: 'USER' + type: str + owner_uuid: + description: + - The owner UUID of the category. + required: false + type: str + description: + description: + - The description of the category. + required: false + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Create category key & value + nutanix.ncp.ntnx_categories_v2: + nutanix_host: + nutanix_username: + nutanix_password: + key: "key1" + value: "val1" + description: "ansible test" + +- name: Update category value and description + nutanix.ncp.ntnx_categories_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: "" + key: "key1" + value: "val2" + description: "ansible test New value" + +- name: Delete created category key value pair + nutanix.ncp.ntnx_categories_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: "" + state: absent +""" + +RETURN = r""" +response: + description: + - when wait is false, the response will be task status. + - Else The response from the Nutanix PC category v4 API. + type: dict + returned: always + sample: + { + "associations": [ + { + "category_id": null, + "count": 18, + "resource_group": "ENTITY", + "resource_type": "VM" + } + ], + "description": "Created by CALM", + "detailed_associations": null, + "ext_id": "cc16efb4-6591-4b89-a643-8c835f035393", + "key": "OSType", + "links": [ + { + "href": "https://00.00.00.00:9440/api/prism/v4.0.b1/config/categories/cc16efb4-6591-4b89-a643-8c835f035393", + "rel": "self" + } + ], + "owner_uuid": null, + "tenant_id": null, + "type": "USER", + "value": "Linux" + } + +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the category is fetched. + type: str + returned: always + sample: "dded1b87-e566-419a-aac0-fb282792fb83" +changed: + description: Indicates whether the resource has changed. + type: bool + returned: always + sample: true +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.pc_api_client import ( # noqa: E402 + get_etag, + get_pc_api_client, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_prism_py_client as prism_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as prism_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + key=dict(type="str", no_log=False), + value=dict(type="str"), + type=dict(type="str", choices=["USER"], default="USER"), + owner_uuid=dict(type="str"), + description=dict(type="str"), + ) + + return module_args + + +_PRISM_SDK = None + + +def get_category_api_instance(module): + global _PRISM_SDK + if not _PRISM_SDK: + api_client = get_pc_api_client(module) + _PRISM_SDK = prism_sdk.CategoriesApi(api_client=api_client) + + return _PRISM_SDK + + +def get_category(module, ext_id): + categories = get_category_api_instance(module) + try: + return categories.get_category_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching category info using ext_id", + ) + + +def create_category(module, result): + categories = get_category_api_instance(module) + + sg = SpecGenerator(module) + default_spec = prism_sdk.Category() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create categories Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = categories.create_category(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating category", + ) + + result["ext_id"] = resp.data.ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["changed"] = True + + +def check_categories_idempotency(old_spec, update_spec): + old_spec = deepcopy(old_spec) + update_spec = deepcopy(update_spec) + strip_internal_attributes(old_spec) + strip_internal_attributes(update_spec) + if old_spec != update_spec: + return False + + return True + + +def update_category(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_category(module, ext_id=ext_id) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating categories update spec", **result) + + # check for idempotency + if check_categories_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + categories = get_category_api_instance(module) + try: + resp = categories.update_category_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating category", + ) + result["response"] = strip_internal_attributes(resp.data[0].to_dict()) + + try: + resp = categories.get_category_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching category info", + ) + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + result["changed"] = True + + +def delete_category(module, result): + categories = get_category_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_category(module, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("Unable to fetch etag for deleting category", **result) + + kwargs = {"if_match": etag} + + try: + resp = categories.delete_category_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting category", + ) + + result["response"] = strip_internal_attributes(resp) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("ext_id", "key", "value"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_prism_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_category(module, result) + else: + create_category(module, result) + else: + delete_category(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_clusters_info.py b/plugins/modules/ntnx_clusters_info.py index 33b100eb3..90737e386 100644 --- a/plugins/modules/ntnx_clusters_info.py +++ b/plugins/modules/ntnx_clusters_info.py @@ -32,23 +32,23 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List clusterss - ntnx_clusters_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - kind: cluster - register: result - - - name: test getting particular cluster using uuid - ntnx_clusters_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - cluster_uuid: cluster_uuid - register: result +- name: List clusterss + ntnx_clusters_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + kind: cluster + register: result + +- name: test getting particular cluster using uuid + ntnx_clusters_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_uuid: cluster_uuid + register: result """ RETURN = r""" api_version: @@ -188,9 +188,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.clusters import Cluster # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.clusters import Cluster # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_clusters_info_v2.py b/plugins/modules/ntnx_clusters_info_v2.py new file mode 100644 index 000000000..8a35ca5a9 --- /dev/null +++ b/plugins/modules/ntnx_clusters_info_v2.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_clusters_info_v2 +short_description: Retrieve information about Nutanix clusters from PC +version_added: 2.0.0 +description: + - This module retrieves information about Nutanix clusters from PC. + - Fetch particular cluster info using external ID + - Fetch multiple clusters info with/without using filters, limit, etc. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the cluster. + - If not provided, multiple clusters info will be fetched. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Pradeepsingh Bhati (@bhati-pradeep) +""" + +EXAMPLES = r""" +- name: fetch cluster info using external ID + nutanix.ncp.ntnx_clusters_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + register: result + +- name: fetch all clusters info + nutanix.ncp.ntnx_clusters_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + register: result + +- name: fetch all clusters info with filter + nutanix.ncp.ntnx_clusters_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + filter: "name eq 'cluster1'" + register: result +""" + +RETURN = r""" +response: + description: + - Response for fetching cluster info. + - Returns cluster info if ext_id is provided or list of multiple clusters. + type: dict + returned: always + sample: + { + "config": + { + "authorized_public_key_list": + [ + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ6", + "name": "key1", + }, + ], + "build_info": + { + "build_type": "release", + "commit_id": "9b27c8bcb5fcaac58016f3bed74009655a157049", + "full_version": "el8.5-release-fraser-6.8-stable-9b27c8bcb5fcaac58016f3bed74009655a157049", + "short_commit_id": "9b27c8", + "version": "fraser-6.8-stable", + }, + "cluster_arch": "X86_64", + "cluster_function": ["AOS", "ONE_NODE"], + "cluster_software_map": + [ + { "software_type": "NCC", "version": "ncc-5.0.0" }, + { + "software_type": "NOS", + "version": "el8.5-release-fraser-6.8-stable-9b27c8bcb5fcaac58016f3bed74009655a157049", + }, + ], + "encryption_in_transit_status": null, + "encryption_option": null, + "encryption_scope": null, + "fault_tolerance_state": + { + "current_max_fault_tolerance": 0, + "desired_max_fault_tolerance": 0, + "domain_awareness_level": "DISK", + }, + "hypervisor_types": ["AHV"], + "incarnation_id": 1283123882137, + "is_lts": false, + "operation_mode": "NORMAL", + "password_remote_login_enabled": true, + "redundancy_factor": 1, + "remote_support": false, + "timezone": "UTC", + }, + "container_name": null, + "ext_id": "00061de6-4a87-6b06-185b-ac1f6b6f97e2", + "inefficient_vm_count": null, + "links": null, + "name": "ansible_ag", + "network": + { + "backplane": + { + "is_segmentation_enabled": false, + "netmask": null, + "subnet": null, + "vlan_tag": null, + }, + "external_address": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.1" }, + "ipv6": null, + }, + "external_data_service_ip": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.2" }, + "ipv6": null, + }, + "fqdn": null, + "key_management_server_type": null, + "management_server": null, + "masquerading_ip": null, + "masquerading_port": null, + "name_server_ip_list": + [ + { + "fqdn": null, + "ipv4": { "prefix_length": 32, "value": "10.0.0.6" }, + "ipv6": null, + }, + ], + "nfs_subnet_whitelist": null, + "ntp_server_ip_list": + [ + { + "fqdn": { "value": "0.ntp.org" }, + "ipv4": null, + "ipv6": null, + }, + ], + "smtp_server": + { + "email_address": "test@test.com", + "server": + { + "ip_address": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.8" }, + "ipv6": null, + }, + "password": null, + "port": 25, + "username": "username", + }, + "type": "STARTTLS", + }, + }, + "nodes": + { + "node_list": + [ + { + "controller_vm_ip": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.6" }, + "ipv6": null, + }, + "host_ip": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.10" }, + "ipv6": null, + }, + "node_uuid": "af49a0bb-b3d7-41c0-b9c2-f4ca0e8763e9", + }, + ], + "number_of_nodes": 1, + }, + "run_prechecks_only": null, + "tenant_id": null, + "upgrade_status": "SUCCEEDED", + "vm_count": 1, +} +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the cluster if given in input. + type: str + returned: always + sample: "00061de6-4a87-6b06-185b-ac1f6b6f97e2" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_clusters_api_instance, +) +from ..module_utils.v4.clusters_mgmt.helpers import get_cluster # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_cluster_by_ext_id(module, result): + ext_id = module.params.get("ext_id") + clusters = get_clusters_api_instance(module) + resp = get_cluster(module, clusters, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_clusters(module, result): + clusters = get_clusters_api_instance(module) + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(module.params) + if err: + module.fail_json("Failed creating query parameters for fetching clusters info") + resp = None + try: + resp = clusters.list_clusters(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching clusters info", + ) + + if getattr(resp, "data", None): + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + else: + result["response"] = [] + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + skip_info_args=False, + mutually_exclusive=[("ext_id", "filter")], + ) + + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id") or module.params.get("name"): + get_cluster_by_ext_id(module, result) + else: + get_clusters(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_clusters_nodes_v2.py b/plugins/modules/ntnx_clusters_nodes_v2.py new file mode 100644 index 000000000..51df2add7 --- /dev/null +++ b/plugins/modules/ntnx_clusters_nodes_v2.py @@ -0,0 +1,970 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_clusters_nodes_v2 +short_description: Add or Remove nodes from cluster using Nutanix PC +description: + - This module allows you to manage Nutanix cluster nodes. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + node_params: + description: + - parameters for adding or removing cluster nodes. + type: dict + suboptions: + block_list: + description: + - List of blocks to which the nodes belong. + type: list + elements: dict + suboptions: + block_id: + description: ID of the block. + type: str + required: false + rack_name: + description: Name of the rack. + type: str + required: false + node_list: + description: + - List of nodes to be added or removed. + type: list + elements: dict + suboptions: + node_uuid: + description: UUID of the node. + type: str + required: false + block_id: + description: ID of the block to which the node belongs. + type: str + required: false + node_position: + description: Position of the node. + type: str + required: false + hypervisor_type: + description: Type of the hypervisor. + type: str + choices: ['AHV', 'ESX', 'HYPERV', 'XEN', 'NATIVEHOST'] + required: false + is_robo_mixed_hypervisor: + description: Whether the node is a mixed hypervisor in a ROBO deployment. + type: bool + required: false + hypervisor_hostname: + description: Hostname of the hypervisor. + type: str + required: false + hypervisor_version: + description: Version of the hypervisor. + type: str + required: false + nos_version: + description: Version of the Nutanix Operating System (NOS). + type: str + required: false + is_light_compute: + description: Whether the node is a light compute node. + type: bool + required: false + ipmi_ip: + description: IP address for IPMI. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + digital_certificate_map_list: + description: List of digital certificates. + type: list + elements: dict + suboptions: + key: + description: Key of the digital certificate. + type: str + value: + description: Value of the digital certificate. + type: str + required: false + cvm_ip: + description: IP address for CVM. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + hypervisor_ip: + description: IP address for the hypervisor. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + model: + description: Model of the node. + type: str + required: false + current_network_interface: + description: Current network interface of the node. + type: str + required: false + networks: + description: List of networks for the node. + type: list + elements: dict + suboptions: + name: + description: Name of the network. + type: str + networks: + description: List of network names. + type: list + elements: str + uplinks: + description: uplink information. + type: dict + suboptions: + active: + description: List of active uplinks. + type: list + elements: dict + suboptions: + mac: + description: MAC address of the uplink. + type: str + name: + description: Name of the uplink. + type: str + value: + description: Value of the uplink. + type: str + standby: + description: List of standby uplinks. + type: list + elements: dict + suboptions: + mac: + description: MAC address of the uplink. + type: str + name: + description: Name of the uplink. + type: str + value: + description: Value of the uplink. + type: str + required: false + compute_node_list: + description: + - List of compute nodes to be added or removed. + type: list + elements: dict + suboptions: + node_uuid: + description: UUID of the compute node. + type: str + required: false + block_id: + description: ID of the block to which the compute node belongs. + type: str + required: false + node_position: + description: Position of the compute node. + type: str + required: false + hypervisor_ip: + description: IP address for the hypervisor. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + ipmi_ip: + description: IP address for IPMI. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + digital_certificate_map_list: + description: List of digital certificates. + type: list + elements: dict + suboptions: + key: + description: Key of the digital certificate. + type: str + value: + description: Value of the digital certificate. + type: str + required: false + hypervisor_hostname: + description: Hostname of the hypervisor. + type: str + required: false + model: + description: Model of the compute node. + type: str + required: false + hyperv_sku: + description: write + type: str + bundle_info: + description: write + type: dict + suboptions: + name: + description: write + type: str + should_skip_host_networking: + description: write + type: bool + hypervisor_isos: + description: + - List of hypervisor ISOs. + type: list + elements: dict + suboptions: + type: + description: Type of the hypervisor. + type: str + choices: ['AHV', 'ESX', 'HYPERV', 'XEN', 'NATIVEHOST'] + md5_sum: + description: MD5 sum of the ISO. + type: str + required: false + config_params: + description: + - configuration parameters. + type: dict + suboptions: + should_skip_discovery: + description: Whether to skip the discovery process. + type: bool + required: false + should_skip_imaging: + description: Whether to skip the imaging process. + type: bool + required: false + should_validate_rack_awareness: + description: Whether to validate rack awareness. + type: bool + required: false + is_nos_compatible: + description: Whether the nodes are compatible with the Nutanix Operating System (NOS). + type: bool + required: false + is_compute_only: + description: Whether the nodes are compute-only nodes. + type: bool + required: false + is_never_scheduleable: + description: Whether the nodes are never scheduleable. + type: bool + required: false + target_hypervisor: + description: Target hypervisor for the nodes. + type: str + required: false + hyperv: + description: hyper-v credentials. + type: dict + suboptions: + domain_details: + description: domain details. + type: dict + suboptions: + username: + description: Username for the domain. + type: str + password: + description: Password for the domain. + type: str + cluster_name: + description: Name of the cluster. + type: str + required: false + failover_cluster_details: + description: failover cluster details. + type: dict + suboptions: + username: + description: Username for the failover cluster. + type: str + password: + description: Password for the failover cluster. + type: str + cluster_name: + description: Name of the cluster. + type: str + required: false + required: false + should_skip_add_node: + description: Whether to skip adding nodes. + type: bool + required: false + should_skip_pre_expand_checks: + description: Whether to skip pre-expand checks. + type: bool + required: false + cluster_ext_id: + description: External ID of the cluster. + type: str + required: true + should_skip_prechecks: + description: Whether to skip pre-checks. + type: bool + required: false + should_skip_remove: + description: Whether to skip removing nodes. + type: bool + required: false + node_uuids: + description: List of UUIDs of the nodes to be removed. + type: list + elements: str + required: false + extra_params: + description: + - extra parameters. + type: dict + suboptions: + should_skip_upgrade_check: + description: Whether to skip the upgrade check. + type: bool + required: false + should_skip_space_check: + description: Whether to skip the space check. + type: bool + required: false + should_skip_add_check: + description: Whether to skip the add check. + type: bool + required: false + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Alaa Bishtawi (@alaabishtawi) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Expand cluster + nutanix.ncp.ntnx_clusters_nodes_v2: + nutanix_host: + nutanix_username: + nutanix_password: + cluster_ext_id: "000628e4-4c8f-1239-5575-0cc47a9a3e6d" + node_params: + node_list: + - block_id: "18SM8B010159" + current_network_interface: "eth1" + cvm_ip: + ipv4: + prefix_length: 32 + value: "10.0.0.1" + hypervisor_hostname: "test" + hypervisor_ip: + ipv4: + prefix_length: 32 + value: "10.0.0.2" + hypervisor_type: "AHV" + hypervisor_version: "10.0-793" + ipmi_ip: + ipv4: + prefix_length: 32 + value: "10.0.0.3" + is_light_compute: false + is_robo_mixed_hypervisor: true + model: "NX-3060-G5" + networks: + - name: "br0" + networks: + - "Management" + uplinks: + active: + - mac: "1c:f4:7b:5f:a9:2a" + name: "eth1" + value: "eth1" + standby: + - mac: "12:ee:23:33:2f:43" + name: "eth2" + value: "eth2" + node_position: "B" + node_uuid: "54b7581b-2e35-413e-8608-0531b065a5d8" + nos_version: "7.0" + config_params: + is_compute_only: false + is_never_scheduleable: false + is_nos_compatible: false + should_skip_discovery: false + should_skip_imaging: true + register: result + +- name: Remove node from cluster + nutanix.ncp.ntnx_clusters_nodes_v2: + nutanix_host: + nutanix_username: + nutanix_password: + cluster_ext_id: "000628e4-4c8f-1239-5575-0cc47a9a3e6d" + node_uuids: + - "54b7581b-2e35-413e-8608-0531b065a5d8" + register: result +""" + +RETURN = r""" +response: + description: Task response for adding or removing cluster nodes. + type: dict + returned: always + sample: + { + "cluster_ext_ids": [ + "000628e4-4c8f-1239-5575-0cc47a9a3e6d" + ], + "completed_time": "2024-12-10T06:26:48.551062+00:00", + "completion_details": null, + "created_time": "2024-12-10T06:17:52.467169+00:00", + "entities_affected": [ + { + "ext_id": "000628e4-4c8f-1239-5575-0cc47a9a3e6d", + "name": null, + "rel": "clustermgmt:config:cluster" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:a2734e72-f034-49de-a3c8-d50e2dbaf44a", + "is_background_task": false, + "is_cancelable": false, + "last_updated_time": "2024-12-10T06:26:48.551061+00:00", + "legacy_error_message": null, + "number_of_entities_affected": 1, + "number_of_subtasks": 1, + "operation": "Expand Cluster", + "operation_description": "Expand Cluster", + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "root_task": null, + "started_time": "2024-12-10T06:17:52.492332+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": [ + { + "ext_id": "ZXJnb24=:74cb5bb6-f888-4c4a-7c99-74d95d76443a", + "href": "https://10.44.76.117:9440/api/prism/v4.0/config/tasks/ZXJnb24=:74cb5bb6-f888-4c4a-7c99-74d95d76443a", + "rel": "subtask" + } + ], + "warnings": null + } +task_ext_id: + description: The external ID of the task. + type: str + returned: always +ext_id: + description: The external ID of the cluster. + type: str + returned: always +changed: + description: Whether the state of the cluster nodes has changed. + type: bool + returned: always +error: + description: The error message, if any. + type: str + returned: on error +cluster_ext_id: + description: The external ID of the cluster. + type: str + returned: always +""" + + +import traceback # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_clusters_api_instance, +) +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +try: + import ntnx_clustermgmt_py_client as clustermgmt_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as clustermgmt_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +def get_module_spec(): + block_list_spec = dict( + block_id=dict(type="str", required=False), + rack_name=dict(type="str", required=False), + ) + ipv4_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + ipv6_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + + ip_address_spec = dict( + ipv4=dict( + type="dict", + options=ipv4_spec, + obj=clustermgmt_sdk.IPv4Address, + required=False, + ), + ipv6=dict( + type="dict", + options=ipv6_spec, + obj=clustermgmt_sdk.IPv6Address, + required=False, + ), + ) + digital_certificate_map_list_spec = dict( + key=dict(type="str", required=False, no_log=False), + value=dict(type="str", required=False), + ) + uplinks_types_spec = dict( + mac=dict(type="str", required=False), + name=dict(type="str", required=False), + value=dict(type="str", required=False), + ) + uplinks_spec = dict( + active=dict( + type="list", + elements="dict", + options=uplinks_types_spec, + obj=clustermgmt_sdk.UplinksField, + required=False, + ), + standby=dict( + type="list", + elements="dict", + options=uplinks_types_spec, + obj=clustermgmt_sdk.UplinksField, + required=False, + ), + ) + networks_spec = dict( + name=dict(type="str", required=False), + networks=dict(type="list", elements="str", required=False), + uplinks=dict( + type="dict", + options=uplinks_spec, + obj=clustermgmt_sdk.Uplinks, + required=False, + ), + ) + node_list_spec = dict( + node_uuid=dict(type="str", required=False), + block_id=dict(type="str", required=False), + node_position=dict(type="str", required=False), + hypervisor_type=dict( + type="str", + choices=["AHV", "ESX", "HYPERV", "XEN", "NATIVEHOST"], + obj=clustermgmt_sdk.HypervisorType, + required=False, + ), + is_robo_mixed_hypervisor=dict(type="bool", required=False), + hypervisor_hostname=dict(type="str", required=False), + hypervisor_version=dict(type="str", required=False), + nos_version=dict(type="str", required=False), + is_light_compute=dict(type="bool", required=False), + ipmi_ip=dict( + type="dict", + options=ip_address_spec, + obj=clustermgmt_sdk.IPAddress, + required=False, + ), + digital_certificate_map_list=dict( + type="list", + elements="dict", + options=digital_certificate_map_list_spec, + obj=clustermgmt_sdk.DigitalCertificateMapReference, + required=False, + ), + cvm_ip=dict( + type="dict", + options=ip_address_spec, + obj=clustermgmt_sdk.IPAddress, + required=False, + ), + hypervisor_ip=dict( + type="dict", + options=ip_address_spec, + obj=clustermgmt_sdk.IPAddress, + required=False, + ), + model=dict(type="str", required=False), + current_network_interface=dict(type="str", required=False), + networks=dict( + type="list", + elements="dict", + options=networks_spec, + obj=clustermgmt_sdk.UplinkNetworkItem, + required=False, + ), + ) + compute_node_list_spec = dict( + node_uuid=dict(type="str", required=False), + block_id=dict(type="str", required=False), + node_position=dict(type="str", required=False), + hypervisor_ip=dict(type="dict", options=ip_address_spec, required=False), + ipmi_ip=dict(type="dict", options=ip_address_spec, required=False), + digital_certificate_map_list=dict( + type="list", + elements="dict", + options=digital_certificate_map_list_spec, + required=False, + ), + hypervisor_hostname=dict(type="str", required=False), + model=dict(type="str", required=False), + ) + hypervisor_isos_spec = dict( + type=dict( + type="str", + choices=["AHV", "ESX", "HYPERV", "XEN", "NATIVEHOST"], + required=False, + ), + md5_sum=dict(type="str", required=False), + ) + bundle_info_spec = dict( + name=dict(type="str", required=False), + ) + + node_params_spec = dict( + block_list=dict( + type="list", + elements="dict", + options=block_list_spec, + obj=clustermgmt_sdk.BlockItem, + required=False, + ), + node_list=dict( + type="list", + elements="dict", + options=node_list_spec, + obj=clustermgmt_sdk.NodeItem, + required=False, + ), + compute_node_list=dict( + type="list", + elements="dict", + options=compute_node_list_spec, + obj=clustermgmt_sdk.ComputeNodeItem, + required=False, + ), + hypervisor_isos=dict( + type="list", + elements="dict", + options=hypervisor_isos_spec, + obj=clustermgmt_sdk.HypervisorIsoMap, + required=False, + ), + hyperv_sku=dict(type="str", required=False), + bundle_info=dict( + type="dict", + options=bundle_info_spec, + obj=clustermgmt_sdk.BundleInfo, + required=False, + ), + should_skip_host_networking=dict(type="bool", required=False), + ) + user_info_spec = dict( + username=dict(type="str", required=False), + password=dict(type="str", required=False, no_log=False), + cluster_name=dict(type="str", required=False), + ) + hyperv_spec = dict( + domain_details=dict(type="dict", options=user_info_spec, required=False), + failover_cluster_details=dict( + type="dict", options=user_info_spec, required=False + ), + ) + config_params_spec = dict( + should_skip_discovery=dict(type="bool", required=False), + should_skip_imaging=dict(type="bool", required=False), + should_validate_rack_awareness=dict(type="bool", required=False), + is_nos_compatible=dict(type="bool", required=False), + is_compute_only=dict(type="bool", required=False), + is_never_scheduleable=dict(type="bool", required=False), + target_hypervisor=dict(type="str", required=False), + hyperv=dict(type="dict", options=hyperv_spec, required=False), + ) + extra_params_spec = dict( + should_skip_upgrade_check=dict(type="bool", required=False), + should_skip_space_check=dict(type="bool", required=False), + should_skip_add_check=dict(type="bool", required=False), + ) + module_args = dict( + node_params=dict( + type="dict", + options=node_params_spec, + obj=clustermgmt_sdk.NodeParam, + required=False, + ), + config_params=dict( + type="dict", + options=config_params_spec, + obj=clustermgmt_sdk.ConfigParams, + required=False, + ), + should_skip_add_node=dict(type="bool", required=False), + should_skip_pre_expand_checks=dict(type="bool", required=False), + cluster_ext_id=dict(type="str", required=True), + should_skip_prechecks=dict(type="bool", required=False), + should_skip_remove=dict(type="bool", required=False), + node_uuids=dict(type="list", elements="str", required=False), + extra_params=dict( + type="dict", + options=extra_params_spec, + obj=clustermgmt_sdk.NodeRemovalExtraParam, + required=False, + ), + ) + return module_args + + +def add_cluster_node(module, cluster_node_api, result): + sg = SpecGenerator(module) + default_spec = clustermgmt_sdk.ExpandClusterParams() + spec, err = sg.generate_spec(default_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating spec for adding cluster node", **result) + cluster_ext_id = module.params.get("cluster_ext_id") + result["cluster_ext_id"] = cluster_ext_id + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + resp = None + + try: + resp = cluster_node_api.expand_cluster(clusterExtId=cluster_ext_id, body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="API Exception while expanding cluster", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + + result["changed"] = True + + +def remove_cluster_node(module, cluster_node_api, result): + cluster_ext_id = module.params.get("cluster_ext_id") + result["cluster_ext_id"] = cluster_ext_id + sg = SpecGenerator(module) + default_spec = clustermgmt_sdk.NodeRemovalParams() + spec, err = sg.generate_spec(default_spec) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for removing cluster node", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + try: + resp = cluster_node_api.remove_node(clusterExtId=cluster_ext_id, body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="API Exception while removing node", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("node_params",)), + ("state", "absent", ("node_uuids",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_clustermgmt_py_client"), + exception=SDK_IMP_ERROR, + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params.get("state") + cluster_node_api = get_clusters_api_instance(module) + if state == "present": + add_cluster_node(module, cluster_node_api, result) + elif state == "absent": + remove_cluster_node(module, cluster_node_api, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_clusters_v2.py b/plugins/modules/ntnx_clusters_v2.py new file mode 100644 index 000000000..b87952129 --- /dev/null +++ b/plugins/modules/ntnx_clusters_v2.py @@ -0,0 +1,1227 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_clusters_v2 +short_description: Manage Nutanix clusters in Prism Central +description: + - This module allows you to create, update, and destroy Nutanix clusters using Prism Central. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be create the cluster. + - if C(state) is set to C(present) and C(ext_id) is given then it will update that cluster. + - If C(state) is set to C(absent) and if the cluster exists, then cluster is destroyed. + - After cluster create, register the cluster to PC for update or destroy. + choices: + - present + - absent + type: str + default: present + ext_id: + description: + - The external ID of the cluster. + - Mandatory to trigger update or destroy operation. + type: str + name: + description: + - The name of the cluster. + type: str + nodes: + description: + - The list of nodes in the cluster. + type: dict + suboptions: + node_list: + description: + - The list of nodes in the cluster. + type: list + elements: dict + required: true + suboptions: + controller_vm_ip: + description: + - The IP address of the controller VM. + type: dict + required: true + suboptions: + ipv4: + description: + - The IPv4 address of the controller VM. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the controller VM. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + host_ip: + description: + - The IP address of the host. + - Not required for cluster creation. + type: dict + suboptions: + ipv4: + description: + - The IPv4 address of the host. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the host. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + config: + description: + - The configuration of the cluster. + type: dict + suboptions: + cluster_function: + description: + - The function of the cluster. + type: list + elements: str + choices: + - AOS + - ONE_NODE + - TWO_NODE + authorized_public_key_list: + description: + - The list of authorized public keys. + - Cannot be set during cluster create. + - Use cluster update to update the list. + - Given public keys will override the existing public keys. + type: list + elements: dict + suboptions: + name: + description: + - The name of the public key. + type: str + required: true + key: + description: + - The key of the public key. + type: str + required: true + redundancy_factor: + description: + - The redundancy factor of the cluster. + type: int + cluster_arch: + description: + - The architecture of the cluster. + type: str + choices: + - X86_64 + - PPC64LE + fault_tolerance_state: + description: + - The fault tolerance state of the cluster. + type: dict + suboptions: + domain_awareness_level: + description: + - The domain awareness level of the fault tolerance state. + type: str + required: true + choices: + - NODE + - BLOCK + - RACK + - DISK + desired_cluster_fault_tolerance: + description: + - The desired cluster fault tolerance of the fault tolerance state. + type: str + choices: + - CFT_1N_OR_1D + - CFT_2N_OR_2D + - CFT_1N_AND_1D + - CFT_0N_AND_0D + operation_mode: + description: + - The operation mode of the cluster. + type: str + choices: + - NORMAL + - READ_ONLY + - STAND_ALONE + - SWITCH_TO_TWO_NODE + - OVERRIDE + encryption_in_transit_status: + description: + - The encryption in transit status of the cluster. + type: str + choices: + - ENABLED + - DISABLED + network: + description: + - The network configuration of the cluster. + type: dict + suboptions: + external_address: + description: + - The external address of the cluster. + type: dict + suboptions: + ipv4: + description: + - The IPv4 address of the external address. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the external address. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + external_data_service_ip: + description: + - The external data service IP of the cluster. + - Cannot be set during cluster create. + - Use cluster update to add/update. + type: dict + suboptions: + ipv4: + description: + - The IPv4 address of the external data service IP. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the external data service IP. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + nfs_subnet_whitelist: + description: + - The list of NFS subnet whitelist. + type: list + elements: str + ntp_server_ip_list: + description: + - The list of NTP servers. + type: list + elements: dict + suboptions: + ipv4: + description: + - The IPv4 address of the NTP server. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the NTP server. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + fqdn: + description: + - The FQDN of the NTP server. + type: dict + suboptions: + value: + description: + - The FQDN value. + type: str + required: true + name_server_ip_list: + description: + - The list of name servers. + type: list + elements: dict + suboptions: + ipv4: + description: + - The IPv4 address of the name server. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the name server. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + fqdn: + description: + - The FQDN of the name server. + type: dict + suboptions: + value: + description: + - The FQDN value. + type: str + required: true + smtp_server: + description: + - The SMTP server configuration. + - Cannot be set during cluster create. + - Add/Update smtp server during cluster update. + type: dict + suboptions: + email_address: + description: + - The email address of the SMTP server. + type: str + server: + description: + - The server configuration of the SMTP server. + type: dict + suboptions: + ip_address: + description: + - The IP address of the SMTP server. + type: dict + suboptions: + ipv4: + description: + - The IPv4 address of the SMTP server. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the SMTP server. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + fqdn: + description: + - The FQDN of the SMTP server. + type: dict + suboptions: + value: + description: + - The FQDN value. + type: str + required: true + port: + description: + - The port of the SMTP server. + type: int + username: + description: + - The username of the SMTP server. + type: str + password: + description: + - The password of the SMTP server. + - If password is set, then idempotency checks will be skipped. + type: str + type: + description: + - The type of the SMTP server. + type: str + choices: + - PLAIN + - STARTTLS + - SSL + masquerading_ip: + description: + - The masquerading IP of the cluster. + type: dict + suboptions: + ipv4: + description: + - The IPv4 address of the masquerading IP. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the masquerading IP. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + management_server: + description: + - The management server configuration. + type: dict + suboptions: + ip: + description: + - The IP address of the management server. + type: dict + suboptions: + ipv4: + description: + - The IPv4 address of the management server. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the management server. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + type: + description: + - The type of the management server. + type: str + choices: + - VCENTER + is_registered: + description: + - Whether the management server is registered. + type: bool + in_use: + description: + - Whether the management server is in use. + type: bool + is_drs_enabled: + description: + - Whether DRS is enabled for the management server. + type: bool + fqdn: + description: + - The FQDN of the cluster. + type: str + key_management_server_type: + description: + - The key management server type of the cluster. + type: str + choices: + - LOCAL + - PRISM_CENTRAL + - EXTERNAL + backplane: + description: + - The backplane network configuration. + type: dict + suboptions: + is_segmentation_enabled: + description: + - Whether segmentation is enabled for the backplane network. + type: bool + vlan_tag: + description: + - The VLAN tag of the backplane network. + type: int + subnet: + description: + - The subnet of the backplane network. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + netmask: + description: + - The netmask of the backplane network. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + http_proxy_list: + description: + - The list of HTTP proxies. + type: list + elements: dict + suboptions: + ip_address: + description: + - The IP address of the HTTP proxy. + type: dict + suboptions: + ipv4: + description: + - The IPv4 address of the HTTP proxy. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the HTTP proxy. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + default: 128 + port: + description: + - The port of the HTTP proxy. + type: int + username: + description: + - The username of the HTTP proxy. + type: str + password: + description: + - The password of the HTTP proxy. + type: str + name: + description: + - The name of the HTTP proxy. + type: str + required: true + proxy_types: + description: + - The types of the HTTP proxy. + type: list + elements: str + choices: + - HTTP + - HTTPS + - SOCKS + http_proxy_white_list: + description: + - The list of HTTP proxy white list. + type: list + elements: dict + suboptions: + target_type: + description: + - The target type of the HTTP proxy white list. + type: str + required: true + choices: + - IPV6_ADDRESS + - HOST_NAME + - DOMAIN_NAME_SUFFIX + - IPV4_NETWORK_MASK + - IPV4_ADDRESS + target: + description: + - The target of the HTTP proxy white list. + type: str + required: true + categories: + description: + - The categories of the cluster. + type: dict + container_name: + description: + - The name of the container. + type: str + dryrun: + description: + - Whether to run prechecks only. + type: bool + timeout: + description: + - The timeout for the operation. + - The timeout in seconds. + - By default there is no timeout + type: int + wait: + description: + - Whether to wait for the operation to complete. + type: bool + default: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Pradeepsingh Bhati (@bhati-pradeep) +""" + +EXAMPLES = r""" +- name: Create cluster + nutanix.ncp.ntnx_clusters_v2: + nutanix_host: + nutanix_username: + nutanix_password: + name: "cluster1" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "10.0.0.1" + config: + cluster_function: ["AOS"] + redundancy_factor: 1 + cluster_arch: "X86_64" + fault_tolerance_state: + domain_awareness_level: "DISK" + +- name: Create cluster with network configuration + nutanix.ncp.ntnx_clusters_v2: + nutanix_host: + nutanix_username: + nutanix_password: + name: "cluster1" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "10.0.0.1" + config: + cluster_function: ["AOS"] + authorized_public_key_list: + - name: "key1" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ6" + redundancy_factor: 1 + cluster_arch: X86_64 + fault_tolerance_state: + domain_awareness_level: "DISK" + network: + external_address: + ipv4: + value: "10.0.0.2" + ntp_server_ip_list: + - fqdn: + value: "test.ntp.org" + name_server_ip_list: + - ipv4: + value: "10.0.0.9" + timeout: 1800 + +- name: Update cluster + nutanix.ncp.ntnx_clusters_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: "" + name: "cluster1" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "10.1.0.1" + config: + cluster_function: ["AOS"] + redundancy_factor: 1 + cluster_arch: "X86_64" + fault_tolerance_state: + domain_awareness_level: "DISK" + +- name: Destroy cluster + nutanix.ncp.ntnx_clusters_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: "00061de6-4a87-6b06-185b-ac1f6b6f97e2" + state: absent +""" + +RETURN = r""" +response: + description: + - Response for the cluster operation. + - For update, it will be cluster details if C(wait) is True. + - For update, it will be task details if C(wait) is False. + - For create and delete, it will be always task details. + type: dict + returned: always + sample: + { + "config": + { + "authorized_public_key_list": + [ + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ6", + "name": "key1", + }, + ], + "build_info": + { + "build_type": "release", + "commit_id": "9b27c8bcb5fcaac58016f3bed74009655a157049", + "full_version": "el8.5-release-fraser-6.8-stable-9b27c8bcb5fcaac58016f3bed74009655a157049", + "short_commit_id": "9b27c8", + "version": "fraser-6.8-stable", + }, + "cluster_arch": "X86_64", + "cluster_function": ["AOS", "ONE_NODE"], + "cluster_software_map": + [ + { "software_type": "NCC", "version": "ncc-5.0.0" }, + { + "software_type": "NOS", + "version": "el8.5-release-fraser-6.8-stable-9b27c8bcb5fcaac58016f3bed74009655a157049", + }, + ], + "encryption_in_transit_status": null, + "encryption_option": null, + "encryption_scope": null, + "fault_tolerance_state": + { + "current_max_fault_tolerance": 0, + "desired_max_fault_tolerance": 0, + "domain_awareness_level": "DISK", + }, + "hypervisor_types": ["AHV"], + "incarnation_id": 1283123882137, + "is_lts": false, + "operation_mode": "NORMAL", + "password_remote_login_enabled": true, + "redundancy_factor": 1, + "remote_support": false, + "timezone": "UTC", + }, + "container_name": null, + "ext_id": "00061de6-4a87-6b06-185b-ac1f6b6f97e2", + "inefficient_vm_count": null, + "links": null, + "name": "ansible_ag", + "network": + { + "backplane": + { + "is_segmentation_enabled": false, + "netmask": null, + "subnet": null, + "vlan_tag": null, + }, + "external_address": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.1" }, + "ipv6": null, + }, + "external_data_service_ip": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.2" }, + "ipv6": null, + }, + "fqdn": null, + "key_management_server_type": null, + "management_server": null, + "masquerading_ip": null, + "masquerading_port": null, + "name_server_ip_list": + [ + { + "fqdn": null, + "ipv4": { "prefix_length": 32, "value": "10.0.0.6" }, + "ipv6": null, + }, + ], + "nfs_subnet_whitelist": null, + "ntp_server_ip_list": + [ + { + "fqdn": { "value": "0.ntp.org" }, + "ipv4": null, + "ipv6": null, + }, + ], + "smtp_server": + { + "email_address": "test@test.com", + "server": + { + "ip_address": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.8" }, + "ipv6": null, + }, + "password": null, + "port": 25, + "username": "username", + }, + "type": "STARTTLS", + }, + }, + "nodes": + { + "node_list": + [ + { + "controller_vm_ip": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.6" }, + "ipv6": null, + }, + "host_ip": + { + "ipv4": { "prefix_length": 32, "value": "10.0.0.10" }, + "ipv6": null, + }, + "node_uuid": "af49a0bb-b3d7-41c0-b9c2-f4ca0e8763e9", + }, + ], + "number_of_nodes": 1, + }, + "run_prechecks_only": null, + "tenant_id": null, + "upgrade_status": "SUCCEEDED", + "vm_count": 1, +} +ext_id: + description: + - The external ID of the cluster. + type: str + returned: always + sample: "00061de6-4a87-6b06-185b-ac1f6b6f97e2" +task_ext_id: + description: + - The task external ID. + type: str + returned: always + sample: "ZXJnb24=:100a5778-9824-49c7-9444-222aa97f5874" +changed: + description: + - Indicates if any changes were made during the operation. + type: bool + returned: always + sample: true +msg: + description: + - The message from module operation if any. + type: str + returned: always + sample: "Cluster with external ID '00061de6-4a87-6b06-185b-ac1f6b6f97e2' will be deleted." +error: + description: + - The error message if an error occurs. + type: str + returned: when an error occurs +skipped: + description: + - Indicates if the operation was skipped. + type: bool + returned: when the operation was skipped + sample: true +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_clusters_api_instance, + get_etag, +) +from ..module_utils.v4.clusters_mgmt.helpers import get_cluster # noqa: E402 +from ..module_utils.v4.clusters_mgmt.spec.clusters import ClusterSpecs # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_clustermgmt_py_client as clusters_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as clusters_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = ClusterSpecs.get_cluster_spec() + module_args["timeout"] = dict(type="int") + module_args["dryrun"] = dict(type="bool") + return module_args + + +def create_cluster(module, result): + clusters = get_clusters_api_instance(module) + sg = SpecGenerator(module) + default_spec = clusters_sdk.Cluster() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating cluster create spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + dry_run = module.params.get("dryrun", False) + try: + resp = clusters.create_cluster(body=spec, _dryrun=dry_run) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating cluster", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, polling_gap=15) + + # Post cluster create, cluster is still not registered to PC + # So there will be no cluster info in PC, hence respond with task status + result["response"] = strip_internal_attributes(resp.to_dict()) + + if module.params.get("dryrun", False): + result["changed"] = False + else: + result["changed"] = True + + +def check_cluster_idempotency(current_spec, update_spec): + + # trigger update if smtp server password is set + if getattr(update_spec.network, "smtp_server", None): + if getattr(update_spec.network.smtp_server, "server", None): + if getattr(update_spec.network.smtp_server.server, "password", None): + return False + if current_spec != update_spec: + return False + return True + + +def update_cluster(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + sg = SpecGenerator(module) + default_spec = clusters_sdk.Cluster() + spec, err = sg.generate_spec(obj=default_spec) + clusters = get_clusters_api_instance(module) + current_spec = get_cluster(module, clusters, ext_id=ext_id) + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("Unable to fetch etag for updating cluster", **result) + + kwargs = {"if_match": etag} + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating clusters update spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + if check_cluster_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + resp = None + try: + resp = clusters.update_cluster_by_id(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating cluster", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_cluster(module, clusters, ext_id=ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def destroy_cluster(module, result): + ext_id = module.params.get("ext_id") + + result["ext_id"] = ext_id + + if module.check_mode: + result["msg"] = "Cluster with external ID '{0}' will be deleted.".format(ext_id) + return + + clusters = get_clusters_api_instance(module) + current_spec = get_cluster(module, clusters, ext_id=ext_id) + + etag = get_etag(current_spec) + if not etag: + return module.fail_json("unable to fetch etag for destroying cluster", **result) + + kwargs = {"if_match": etag} + dry_run = module.params.get("dryrun", False) + resp = None + try: + resp = clusters.delete_cluster_by_id(extId=ext_id, _dryrun=dry_run, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting cluster", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_clustermgmt_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "task_ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_cluster(module, result) + else: + create_cluster(module, result) + else: + destroy_cluster(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_directory_services_info_v2.py b/plugins/modules/ntnx_directory_services_info_v2.py new file mode 100644 index 000000000..d9fe978a4 --- /dev/null +++ b/plugins/modules/ntnx_directory_services_info_v2.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_directory_services_info_v2 +short_description: Fetch directory services info +version_added: 2.0.0 +description: + - This module is used to fetch directory services. + - Fetch a directory service using ext_id or multiple directory services. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - directory service external ID. + - If used, only the directory service with the specified external ID will be fetched. + Else, multiple directory services will be fetched as per query params. + required: false + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: List all directory services + nutanix.ncp.ntnx_directory_services_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: result + +- name: Fetch directory service by ext_id + nutanix.ncp.ntnx_directory_services_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "6863c60b-ae9d-5c32-b8c1-2d45b9ba343a" + register: result + +- name: List all directory services with filter + nutanix.ncp.ntnx_directory_services_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "name eq 'directory_service_name'" + register: result +""" +RETURN = r""" +response: + description: + - Response for fetching directory services. + - Returns directory service info using directory service external ID or list of directory services. + type: dict + returned: always + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-05-29T08:34:50.438254+00:00", + "directory_type": "ACTIVE_DIRECTORY", + "domain_name": "nutanix", + "ext_id": "6863c60b-ae9d-5c32-b8c1-2d45b9ba343a", + "group_search_type": "NON_RECURSIVE", + "last_updated_time": "2024-05-29T13:10:40.771273+00:00", + "links": null, + "name": "qa_nucalm_io", + "open_ldap_configuration": null, + "secondary_urls": null, + "service_account": { + "password": "****", + "username": "admin@email.com" + }, + "tenant_id": "59d5de78-a964-5746-8c6e-677c4c7a79df", + "url": "ldap://10.0.0.1:256", + "white_listed_groups": [ + "cn=group1,cn=users_test,dc=nutanix", + "cn=group2,cn=users_test,dc=nutanix", + "cn=group3,cn=users_test,dc=nutanix" + ] + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_directory_service_api_instance, +) +from ..module_utils.v4.iam.helpers import get_directory_service # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_directory_service_by_ext_id(module, directory_services, result): + ext_id = module.params.get("ext_id") + resp = get_directory_service(module, directory_services, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_directory_services(module, directory_services, result): + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json(msg="Failed generating directory services info Spec", **result) + + try: + resp = directory_services.list_directory_services(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching directory services info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + directory_services = get_directory_service_api_instance(module) + if module.params.get("ext_id"): + get_directory_service_by_ext_id(module, directory_services, result) + else: + get_directory_services(module, directory_services, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_directory_services_v2.py b/plugins/modules/ntnx_directory_services_v2.py new file mode 100644 index 000000000..8a954d8ad --- /dev/null +++ b/plugins/modules/ntnx_directory_services_v2.py @@ -0,0 +1,481 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_directory_services_v2 +short_description: Module to create, update and delete directory services in Nutanix PC. +version_added: "2.0.0" +description: + - This module is used to create, update and delete directory services in Nutanix PC. + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - State of the directory service. Whether to create, update, or delete. + - If C(state) is C(present) and C(ext_id) is not provided, create a new directory service. + - If C(state) is C(present) and C(ext_id) is provided, update the directory service. + - If C(state) is C(absent), it will delete the directory service with the given External ID. + type: str + choices: ['present', 'absent'] + ext_id: + description: + - External ID of the Directory Service. + - Required for updating or deleting the Directory Service. + required: false + type: str + name: + description: + - Name for the Directory Service. + - Required for creating directory service. + type: str + url: + description: + - URL for the Directory Service. + - Required for creating directory service. + type: str + secondary_urls: + description: + - Secondary URL for the Directory Service. + required: false + type: list + elements: str + domain_name: + description: + - Domain name for the Directory Service. + - Required for creating directory service. + type: str + directory_type: + description: + - Type of Directory Service. + choices: ['ACTIVE_DIRECTORY', 'OPEN_LDAP'] + type: str + service_account: + description: + - Information of Service account to connect to the Directory Service. + - Required for creating directory service. + type: dict + suboptions: + username: + description: + - Username to connect to the Directory Service. + type: str + password: + description: + - Password to connect to the Directory Service. + - If provided, idempotency check will be skipped. + type: str + open_ldap_configuration: + description: + - Configuration for OpenLDAP Directory Service. + required: false + type: dict + suboptions: + user_configuration: + description: + - User configuration for OpenLDAP Directory Service. + type: dict + suboptions: + user_object_class: + description: + - Object class in the OpenLDAP system that corresponds to Users. + type: str + user_search_base: + description: + - Base DN for User search. + type: str + username_attribute: + description: + - Unique Identifier for each User which can be used in Authentication. + type: str + user_group_configuration: + description: + - User Group configuration for OpenLDAP Directory Service. + type: dict + suboptions: + group_object_class: + description: + - Object class in the OpenLDAP system that corresponds to groups. + type: str + group_search_base: + description: + - Base DN for group search. + type: str + group_member_attribute: + description: + - Attribute in a group that associates Users to the group. + type: str + group_member_attribute_value: + description: + - User attribute value that will be used in group entity to associate User to the group. + type: str + group_search_type: + description: + - Group membership search type for the Directory Service. + required: false + choices: ['NON_RECURSIVE', 'RECURSIVE'] + type: str + white_listed_groups: + description: + - List of allowed User Groups for the Directory Service. + required: false + type: list + elements: str + wait: + description: + - Wait for the operation to complete. + - it is not supported here as this module does not have task polling required. + type: bool + required: false + default: True +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Create ACTIVE_DIRECTORY service + nutanix.ncp.ntnx_directory_services_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + name: directory_service_name + url: ldap://10.0.0.0:389 + directory_type: "ACTIVE_DIRECTORY" + domain_name: "nutanix" + service_account: + username: admin + password: Nutanix@123456 + register: result + +- name: Update ACTIVE_DIRECTORY service + nutanix.ncp.ntnx_directory_services_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + ext_id: "6863c60b-ae9d-5c32-b8c1-2d45b9ba343a" + name: directory_service_name + url: ldap://10.0.0.0:389 + directory_type: "ACTIVE_DIRECTORY" + domain_name: "nutanix" + service_account: + username: admin + password: Nutanix@123456 + white_listed_groups: + - test_group_updated + register: result + +- name: Delete ACTIVE_DIRECTORY service + nutanix.ncp.ntnx_directory_services_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "6863c60b-ae9d-5c32-b8c1-2d45b9ba343a" + register: result +""" +RETURN = r""" +response: + description: + - Response for creating, updating or deleting directory services. + - Directory service details in case of creating or updating directory service. + - None in case of deleting directory service. + type: dict + returned: always + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-07-02T05:34:25.878533+00:00", + "directory_type": "ACTIVE_DIRECTORY", + "domain_name": "nutanix", + "ext_id": "075b79bb-fc36-5cdd-9296-0b141d531266", + "group_search_type": "NON_RECURSIVE", + "last_updated_time": "2024-07-02T05:34:25.878533+00:00", + "links": null, + "name": "stxRVGlcSTMhansible-ag", + "open_ldap_configuration": null, + "secondary_urls": null, + "service_account": { + "password": "****", + "username": "nutanix@email.com" + }, + "tenant_id": "59d5de78-a964-5746-8c6e-677c4c7a79df", + "url": "ldap://10.0.0.2:485", + "white_listed_groups": [ + "test_updated" + ] + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: External ID of the Directory Service + returned: always + type: str + sample: "075b79bb-fc36-5cdd-9296-0b141d531266" + +failed: + description: This indicates whether the task failed + returned: always + type: bool + sample: false + +msg: + description: String containing any message from module. + returned: always + type: str + sample: "Directory Service with ext_id: 075b79bb-fc36-5cdd-9296-0b141d531266 deleted successfully" +""" + + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_directory_service_api_instance, + get_etag, +) +from ..module_utils.v4.iam.helpers import get_directory_service # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_iam_py_client as iam_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as iam_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + account_spec = dict( + username=dict(type="str"), + password=dict(type="str", no_log=True), + ) + + user_config_spec = dict( + user_object_class=dict(type="str"), + user_search_base=dict(type="str"), + username_attribute=dict(type="str"), + ) + + user_group_config_spec = dict( + group_object_class=dict(type="str"), + group_search_base=dict(type="str"), + group_member_attribute=dict(type="str"), + group_member_attribute_value=dict(type="str"), + ) + + open_ldap_config_spec = dict( + user_configuration=dict( + type="dict", options=user_config_spec, obj=iam_sdk.UserConfiguration + ), + user_group_configuration=dict( + type="dict", + options=user_group_config_spec, + obj=iam_sdk.UserGroupConfiguration, + ), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + url=dict(type="str"), + secondary_urls=dict(type="list", elements="str"), + domain_name=dict(type="str"), + directory_type=dict(type="str", choices=["ACTIVE_DIRECTORY", "OPEN_LDAP"]), + service_account=dict( + type="dict", options=account_spec, obj=iam_sdk.DsServiceAccount + ), + open_ldap_configuration=dict( + type="dict", options=open_ldap_config_spec, obj=iam_sdk.OpenLdapConfig + ), + group_search_type=dict(type="str", choices=["NON_RECURSIVE", "RECURSIVE"]), + white_listed_groups=dict(type="list", elements="str"), + ) + return module_args + + +def create_directory_service(module, directory_services, result): + sg = SpecGenerator(module) + default_spec = iam_sdk.DirectoryService() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating create directory services spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = directory_services.create_directory_service(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating directory service", + ) + + result["ext_id"] = resp.data.ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["changed"] = True + + +def check_directory_services_idempotency(old_spec, update_spec): + if old_spec != update_spec: + return False + + return True + + +def update_directory_service(module, directory_services, result): + + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_directory_service(module, directory_services, ext_id=ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating directory services update spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + # check for idempotency + if not module.params.get("service_account", {}).get("password"): + if check_directory_services_idempotency( + current_spec.to_dict(), update_spec.to_dict() + ): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + resp = None + try: + resp = directory_services.update_directory_service_by_id( + extId=ext_id, body=update_spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating directory service", + ) + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + result["changed"] = True + + +def delete_directory_service(module, directory_services, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + current_spec = get_directory_service(module, directory_services, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "unable to fetch etag for deleting directory service", **result + ) + + kwargs = {"if_match": etag} + try: + resp = directory_services.delete_directory_service_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting directory service", + ) + + result["changed"] = True + if resp is None: + result["msg"] = "Directory Service with ext_id: {} deleted successfully".format( + ext_id + ) + else: + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + directory_services = get_directory_service_api_instance(module) + if state == "present": + if module.params.get("ext_id"): + update_directory_service(module, directory_services, result) + else: + create_directory_service(module, directory_services, result) + else: + delete_directory_service(module, directory_services, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_discover_unconfigured_nodes_v2.py b/plugins/modules/ntnx_discover_unconfigured_nodes_v2.py new file mode 100644 index 000000000..1e08cae16 --- /dev/null +++ b/plugins/modules/ntnx_discover_unconfigured_nodes_v2.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_discover_unconfigured_nodes_v2 +short_description: Discover unconfigured nodes from Nutanix Prism Central +description: + - Discover unconfigured nodes from Nutanix Prism Central. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + address_type: + description: + - Specifies the type of address, either IPv4 or IPv6. + type: str + choices: + - IPV4 + - IPV6 + ip_filter_list: + description: + - IP addresses of the unconfigured nodes. + type: list + elements: dict + suboptions: + ipv4: + description: + - Configuration for IPv4 address. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv4 address. + type: int + required: false + ipv6: + description: + - Configuration for IPv6 address. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The prefix length of the IPv6 address. + type: int + required: false + uuid_filter_list: + description: + - Unconfigured node UUIDs. + type: list + elements: str + interface_filter_list: + description: + - Interface name that is used for packet broadcasting. + type: list + elements: str + is_manual_discovery: + description: + - Indicates if the discovery is manual or not. + type: bool + timeout: + description: + - Timeout for the workflow in seconds. + type: int + cluster_ext_id: + description: + - External ID of the cluster. + - If not provided, Prism Central cluster will be fetched. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Alaa Bishtawi (@alaabishtawi) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Discover unconfigured node + nutanix.ncp.ntnx_discover_unconfigured_nodes_v2: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + address_type: "IPV4" + ip_filter_list: + - ipv4: + value: "10.12.102.22" + register: result + ignore_errors: true +""" + +RETURN = r""" +response: + description: + - Response for the discover unconfigured node operation. + - This field typically holds the task details. + type: dict + returned: always + sample: + { + "node_list": + [ + { + "arch": null, + "attributes": null, + "cluster_id": null, + "cpu_type": null, + "current_cvm_vlan_tag": "null", + "current_network_interface": null, + "cvm_ip": + { + "ipv4": { "prefix_length": 32, "value": "10.37.186.154" }, + "ipv6": null, + }, + "foundation_version": null, + "host_name": "ntnx-nahv-671ddc077298f65ebce337ed-1\n", + "host_type": "HYPER_CONVERGED", + "hypervisor_ip": null, + "hypervisor_type": "AHV", + "hypervisor_version": "10.0-727", + "interface_ipv6": null, + "ipmi_ip": null, + "is_one_node_cluster_supported": true, + "is_secure_booted": null, + "is_two_node_cluster_supported": true, + "node_position": "A", + "node_serial_number": "10-37-186-154", + "node_uuid": null, + "nos_version": "6.9", + "rackable_unit_max_nodes": null, + "rackable_unit_model": "NestedAHV", + "rackable_unit_serial": "10-37-186-154", + }, + ], + } + +task_ext_id: + description: Task external ID. + type: str + returned: always + sample: "ZXJnb24=:5235a162-25c2-41b7-50ba-71b2e545fdba" + +error: + description: Error message if an error occurs. + type: str + returned: when an error occurs +""" + +import traceback # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_clusters_api_instance, +) +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +try: + import ntnx_clustermgmt_py_client as clustermgmt_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as clustermgmt_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +def get_module_spec(): + ipv4_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + ipv6_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + ip_address_spec = dict( + ipv4=dict(type="dict", options=ipv4_spec), + ipv6=dict(type="dict", options=ipv6_spec), + ) + module_args = dict( + address_type=dict(type="str", choices=["IPV4", "IPV6"]), + ip_filter_list=dict(type="list", elements="dict", options=ip_address_spec), + uuid_filter_list=dict(type="list", elements="str"), + interface_filter_list=dict(type="list", elements="str"), + is_manual_discovery=dict(type="bool"), + timeout=dict(type="int"), + cluster_ext_id=dict(type="str"), + ) + return module_args + + +def discover_unconfigured_cluster_node(module, cluster_node_api, result): + sg = SpecGenerator(module) + default_spec = clustermgmt_sdk.NodeDiscoveryParams() + spec, err = sg.generate_spec(default_spec) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for discovering cluster nodes", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + resp = None + if module.params.get("cluster_ext_id"): + cluster_ext_id = module.params.get("cluster_ext_id") + else: + params = { + "filter": "config/clusterFunction/any(t:t eq Clustermgmt.Config.ClusterFunctionRef'PRISM_CENTRAL')" + } + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=params) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for fetching prism central cluster", + **result, + ) + try: + pc_cluster = cluster_node_api.list_clusters(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching clusters info", + ) + if not pc_cluster.data: + module.fail_json( + msg="No Prism Central cluster found in the environment", **result + ) + cluster_ext_id = pc_cluster.data[0].ext_id + result["cluster_ext_id"] = cluster_ext_id + try: + resp = cluster_node_api.discover_unconfigured_nodes( + clusterExtId=cluster_ext_id, body=spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="API Exception while discovering cluster nodes", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + if ":" in task_ext_id: + task_ext_id = task_ext_id.split(":")[1] + task_status = cluster_node_api.fetch_task_response( + extId=task_ext_id, taskResponseType="UNCONFIGURED_NODES" + ) + result["response"] = strip_internal_attributes(task_status.data.to_dict()) + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_clustermgmt_py_client"), + exception=SDK_IMP_ERROR, + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "cluster_ext_id": None, + } + cluster_node_api = get_clusters_api_instance(module) + discover_unconfigured_cluster_node(module, cluster_node_api, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_floating_ips.py b/plugins/modules/ntnx_floating_ips.py index 826083bbe..8ac80dd92 100644 --- a/plugins/modules/ntnx_floating_ips.py +++ b/plugins/modules/ntnx_floating_ips.py @@ -74,7 +74,7 @@ EXAMPLES = r""" - name: create Floating IP with External Subnet Name ntnx_floating_ips: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -84,7 +84,7 @@ - name: create Floating IP with vpc Name with external subnet uuid ntnx_floating_ips: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -92,12 +92,12 @@ external_subnet: uuid: "{{external_subnet.subnet_uuiid}}" vpc: - name: "{{vpc.vpc_name}}" + name: "{{vpc.vpc_name}}" private_ip: "{{private_ip}}" - name: create Floating IP with External Subnet with vm ntnx_floating_ips: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -178,9 +178,9 @@ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.floating_ips import FloatingIP # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.prism.floating_ips import FloatingIP # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_floating_ips_info.py b/plugins/modules/ntnx_floating_ips_info.py index bb0389e5f..9e4b03171 100644 --- a/plugins/modules/ntnx_floating_ips_info.py +++ b/plugins/modules/ntnx_floating_ips_info.py @@ -32,29 +32,28 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List Floating ip using ip starts with 10 filter criteria - ntnx_floating_ips_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - floating_ip: "10." - kind: floating_ip - register: result - - - name: List Floating ip using length, offset, sort order and floating_ip sort attribute - ntnx_floating_ips_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 3 - offset: 0 - sort_order: "DESCENDING" - sort_attribute: "floating_ip" - register: result - +- name: List Floating ip using ip starts with 10 filter criteria + ntnx_floating_ips_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + floating_ip: "10." + kind: floating_ip + register: result + +- name: List Floating ip using length, offset, sort order and floating_ip sort attribute + ntnx_floating_ips_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 3 + offset: 0 + sort_order: "DESCENDING" + sort_attribute: "floating_ip" + register: result """ RETURN = r""" api_version: @@ -125,9 +124,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.floating_ips import FloatingIP # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.floating_ips import FloatingIP # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_floating_ips_info_v2.py b/plugins/modules/ntnx_floating_ips_info_v2.py new file mode 100644 index 000000000..6f53540b3 --- /dev/null +++ b/plugins/modules/ntnx_floating_ips_info_v2.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_floating_ips_info_v2 +short_description: floating_ip info module +version_added: 2.0.0 +description: + - Get floating_ips info + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - floating_ip external ID + type: str + expand: + description: + - flag to expand related resources for the floating IP + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: List Floating_ips + nutanix.ncp.ntnx_floating_ips_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + +- name: Get floating_ips using ext_id + nutanix.ncp.ntnx_floating_ips_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" +""" +RETURN = r""" +response: + description: + - The response from the floating ips v4 API. + - it can be floating ip or list of floating ips as per spec. + returned: always + type: dict + sample: { + "data": [ + { + "extId": "00000000-0000-0000-0000-000000000000", + "metadata": { + "ownerReferenceId": "00000000-0000-0000-0000-000000000000", + "ownerUserName": "admin" + }, + "floatingIp": { + "ipv4": { + "value": "192.168.1.69" + } + }, + "externalSubnetReference": "00000000-0000-0000-0000-000000000000" + } + ], + "metadata": { + "flags": [ + { + "name": "hasError", + "value": false + }, + { + "name": "isPaginated", + "value": false + } + ], + "totalAvailableResults": 1 + } +} + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: + - The external ID of the floating ip when specific floating ip is fetched. + type: str + returned: always + sample: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_floating_ip_api_instance, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + expand=dict(type="str"), + ) + + return module_args + + +def get_floating_ips(module, result): + floating_ips = get_floating_ip_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params, extra_params=["expand"]) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating floating_ips info Spec", **result) + + try: + resp = floating_ips.list_floating_ips(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching floating_ips info", + ) + + if not resp or not getattr(resp, "data", []): + result["response"] = [] + else: + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_floating_ip(module, result): + floating_ips = get_floating_ip_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = floating_ips.get_floating_ip_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching floating IP info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_floating_ip(module, result) + else: + get_floating_ips(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_floating_ips_v2.py b/plugins/modules/ntnx_floating_ips_v2.py new file mode 100644 index 000000000..f61dd6dec --- /dev/null +++ b/plugins/modules/ntnx_floating_ips_v2.py @@ -0,0 +1,529 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_floating_ips_v2 +short_description: floating_ips module which supports floating_ip CRUD operations +version_added: 2.0.0 +description: + - Create, Update, Delete floating_ips + - For floating IP create and delete, module will return tasks status in response after operation. + - For floating IP update, module will return floating IP info if C(wait) is true, else task status. + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - if C(state) is present, it will create or update the floating IP. + - If C(state) is set to C(present) and ext_id is not provided then the operation will be create the floating IP + - If C(state) is set to C(present) and ext_id is provided then the operation will be update the floating IP + - If C(state) is set to C(absent) and ext_id is provided , then operation will be delete the floating IP + type: str + choices: ['present', 'absent'] + ext_id: + description: + - Subnet external ID + - Required only for updating or deleting the subnet. + type: str + association: + description: Spec to associating Floating IP with either VM NIC or Private IP + type: dict + suboptions: + private_ip_association: + description: An unique address that identifies a device on the internet or a local network in IPv4 or IPv6 format. + type: dict + suboptions: + private_ip: + description: Private IP address specification. + type: dict + required: true + suboptions: + ipv4: + description: IPv4 address specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + ipv6: + description: IPv6 address specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + vpc_reference: + description: VPC in which the private IP exists. + type: str + required: true + vm_nic_association: + description: VM NIC reference. + type: dict + suboptions: + vpc_reference: + description: VPC reference ID. + type: str + vm_nic_reference: + description: VM NIC reference ID. + type: str + required: true + description: + description: Description for the Floating IP. + type: str + external_subnet_reference: + description: External subnet reference ID. + type: str + floating_ip: + description: Floating IP address. + type: dict + suboptions: + ipv4: + description: IPv4 floating IP details. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + ipv6: + description: IPv6 floating IP details. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + metadata: + description: Metadata for the floating ip. + type: dict + suboptions: + category_ids: + description: A list of globally unique identifiers that represent all the categories the resource is associated with. + elements: str + type: list + owner_reference_id: + description: A globally unique identifier that represents the owner of this resource. + type: str + owner_user_name: + description: The userName of the owner of this resource. + type: str + project_name: + description: The name of the project this resource belongs to. + type: str + project_reference_id: + description: A globally unique identifier that represents the project this resource belongs to. + type: str + name: + description: + - Name of the floating ip. + - Required for create. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Create floating ip using private IP in VPC + nutanix.ncp.ntnx_floating_ips_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + vpc_reference: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + name: "test" + association: + private_ip_association: + private_ip: + ipv4: + value: "192.168.1.2" + vpc_reference: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + +- name: Delete floating IP + nutanix.ncp.ntnx_floating_ips_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + +- name: Create floating ip with external subnet and vm nic reference + nutanix.ncp.ntnx_floating_ips_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + external_subnet_reference: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + association: + vm_nic_association: + vm_nic_reference: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + name: "test" +""" + +RETURN = r""" +response: + description: + - Floating IP info for update operation + - Task details for create or delete operation + returned: always + type: dict + sample: { + "data": { + "extId": "00000000-0000-0000-0000-000000000000", + "metadata": { + "ownerReferenceId": "00000000-0000-0000-0000-000000000000", + "ownerUserName": "admin" + }, + "floatingIp": { + "ipv4": { + "value": "192.168.1.69" + } + }, + "externalSubnetReference": "00000000-0000-0000-0000-000000000000" + }, + "metadata": { + "flags": [ + { + "name": "hasError", + "value": false + }, + { + "name": "isPaginated", + "value": false + } + ], + "totalAvailableResults": 1 + } +} +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: Floating IP + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_etag, + get_floating_ip_api_instance, +) +from ..module_utils.v4.network.helpers import get_floating_ip # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_networking_py_client as net_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as net_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + association_obj_map = { + "vm_nic_association": net_sdk.VmNicAssociation, + "private_ip_association": net_sdk.PrivateIpAssociation, + } + + ip_address_sub_spec = dict( + value=dict(type="str"), + prefix_length=dict(type="int"), + ) + + ip_address_spec = dict( + ipv4=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv4Address), + ipv6=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv6Address), + ) + + floating_ip_spec = dict( + ipv4=dict( + type="dict", options=ip_address_sub_spec, obj=net_sdk.FloatingIPv4Address + ), + ipv6=dict( + type="dict", options=ip_address_sub_spec, obj=net_sdk.FloatingIPv6Address + ), + ) + + metadata_spec = dict( + owner_reference_id=dict(type="str"), + owner_user_name=dict(type="str"), + project_reference_id=dict(type="str"), + project_name=dict(type="str"), + category_ids=dict(type="list", elements="str"), + ) + + vm_nic_association_spec = dict( + vm_nic_reference=dict(type="str", required=True), + vpc_reference=dict(type="str"), + ) + + private_ip_association_spec = dict( + private_ip=dict( + type="dict", options=ip_address_spec, obj=net_sdk.IPAddress, required=True + ), + vpc_reference=dict(type="str", required=True), + ) + + association_spec = dict( + vm_nic_association=dict(type="dict", options=vm_nic_association_spec), + private_ip_association=dict(type="dict", options=private_ip_association_spec), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + association=dict( + type="dict", + options=association_spec, + obj=association_obj_map, + mutually_exclusive=[("vm_nic_association", "private_ip_association")], + ), + floating_ip=dict( + type="dict", options=floating_ip_spec, obj=net_sdk.FloatingIPAddress + ), + external_subnet_reference=dict(type="str"), + metadata=dict(type="dict", options=metadata_spec, obj=net_sdk.Metadata), + ) + + return module_args + + +def create_floating_ip(module, result): + floating_ips = get_floating_ip_api_instance(module) + + sg = SpecGenerator(module) + default_spec = net_sdk.FloatingIp() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create floating_ips Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = floating_ips.create_floating_ip(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating floating_ip", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, TASK_CONSTANTS.RelEntityType.FLOATING_IP + ) + if ext_id: + resp = get_floating_ip(module, floating_ips, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def check_floating_ips_idempotency(old_spec, update_spec): + if old_spec != update_spec: + return False + return True + + +def update_floating_ip(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + floating_ips = get_floating_ip_api_instance(module) + + current_spec = get_floating_ip(module, floating_ips, ext_id=ext_id) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating floating_ips update spec", **result) + + # handle update of association type + if getattr(current_spec, "association", None): + association_type = current_spec.association.get_object_type() + if module.params.get("association").get("private_ip_association", None): + + if association_type != "networking.v4.config.PrivateIpAssociation": + associationSpec = net_sdk.PrivateIpAssociation() + params = module.params.get("association").get("private_ip_association") + update_spec.association, err = sg.generate_spec(associationSpec, params) + + elif module.params.get("association").get("vm_nic_association", None): + + if association_type != "networking.v4.config.VmNicAssociation": + associationSpec = net_sdk.VmNicAssociation() + params = module.params.get("association").get("vm_nic_association") + update_spec.association, err = sg.generate_spec(associationSpec, params) + + # check for idempotency + if check_floating_ips_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = floating_ips.update_floating_ip_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating floating_ip", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_floating_ip(module, floating_ips, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_floating_ip(module, result): + floating_ips = get_floating_ip_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_floating_ip(module, floating_ips, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "unable to fetch etag for deleting floating_ip", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = floating_ips.delete_floating_ip_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting floating_ip", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_networking_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_floating_ip(module, result) + else: + create_floating_ip(module, result) + else: + delete_floating_ip(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_foundation.py b/plugins/modules/ntnx_foundation.py index f5b7a3d15..b4b7a7573 100644 --- a/plugins/modules/ntnx_foundation.py +++ b/plugins/modules/ntnx_foundation.py @@ -936,71 +936,64 @@ EXAMPLES = r""" # in this example, we will image three nodes with new aos package and create cluster - name: Image nodes - hosts: localhost - gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Image nodes - ntnx_foundation: - timeout : 3660 - nutanix_host: "10.xx.xx.xx" - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - hypervisor_gateway: "10.xx.xx.xx" - hypervisor_netmask: "xx.xx.xx.xx" - default_ipmi_user: "username" - nos_package: "nutanix_aos_installer.tar.gz" - blocks: - - block_id: "" - nodes: - # manually added node / baremetal - - manual_mode : - current_cvm_vlan_tag: xx - cvm_gb_ram: 50 - ipmi_password: "password" - ipmi_ip: "10.xx.xx.xx" + ntnx_foundation: + timeout: 3660 + nutanix_host: "10.xx.xx.xx" + cvm_gateway: "10.xx.xx.xx" + cvm_netmask: "xx.xx.xx.xx" + hypervisor_gateway: "10.xx.xx.xx" + hypervisor_netmask: "xx.xx.xx.xx" + default_ipmi_user: "username" + nos_package: "nutanix_aos_installer.tar.gz" + blocks: + - block_id: "" + nodes: + # manually added node / baremetal + - manual_mode: + current_cvm_vlan_tag: xx + cvm_gb_ram: 50 + ipmi_password: "password" + ipmi_ip: "10.xx.xx.xx" + cvm_ip: "10.xx.xx.xx" + hypervisor: "kvm" + hypervisor_ip: "10.xx.xx.xx" + hypervisor_hostname: "superman-1" + node_position: "D" + # dos based node + - discovery_mode: + cvm_gb_ram: 50 + ipmi_password: "password" + node_serial: "node_serial" + discovery_override: + hypervisor_hostname: "superman-2" + hypervisor_ip: "10.xx.xx.xx" cvm_ip: "10.xx.xx.xx" hypervisor: "kvm" - hypervisor_ip: "10.xx.xx.xx" - hypervisor_hostname: "superman-1" - node_position: "D" - # dos based node - - discovery_mode: - cvm_gb_ram: 50 - ipmi_password : "password" - node_serial : "node_serial" - discovery_override: - hypervisor_hostname: "superman-2" - hypervisor_ip: "10.xx.xx.xx" - cvm_ip: "10.xx.xx.xx" - hypervisor: "kvm" - # aos based node - - discovery_mode: - cvm_gb_ram: 50 - ipmi_password : "password" - node_serial : "node_serial" - discovery_override: - hypervisor_hostname: "superman-3" - cvm_ip : "10.xx.xx.xx" - - clusters: - - name : "superman" - redundancy_factor: 2 - cluster_members: - - "10.xx.xx.xx" - - "10.xx.xx.xx" - - "10.xx.xx.xx" + # aos based node + - discovery_mode: + cvm_gb_ram: 50 + ipmi_password: "password" + node_serial: "node_serial" + discovery_override: + hypervisor_hostname: "superman-3" + cvm_ip: "10.xx.xx.xx" + clusters: + - name: "superman" + redundancy_factor: 2 + cluster_members: + - "10.xx.xx.xx" + - "10.xx.xx.xx" + - "10.xx.xx.xx" """ RETURN = r""" """ -from ..module_utils.foundation.base_module import FoundationBaseModule # noqa: E402 -from ..module_utils.foundation.image_nodes import ImageNodes # noqa: E402 -from ..module_utils.foundation.progress import Progress # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.foundation.base_module import FoundationBaseModule # noqa: E402 +from ..module_utils.v3.foundation.image_nodes import ImageNodes # noqa: E402 +from ..module_utils.v3.foundation.progress import Progress # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_aos_packages_info.py b/plugins/modules/ntnx_foundation_aos_packages_info.py index e5f030b8b..a6294e327 100644 --- a/plugins/modules/ntnx_foundation_aos_packages_info.py +++ b/plugins/modules/ntnx_foundation_aos_packages_info.py @@ -24,9 +24,9 @@ """ EXAMPLES = r""" - - name: List packages - ntnx_foundation_aos_packages_info: - nutanix_host: "{{ ip }}" +- name: List packages + ntnx_foundation_aos_packages_info: + nutanix_host: "{{ ip }}" """ RETURN = r""" @@ -39,11 +39,11 @@ "package2",s ] """ -from ..module_utils.foundation.base_module import FoundationBaseModule # noqa: E402 -from ..module_utils.foundation.enumerate_aos_packages import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.foundation.base_module import FoundationBaseModule # noqa: E402 +from ..module_utils.v3.foundation.enumerate_aos_packages import ( # noqa: E402 EnumerateAOSPackages, ) -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_bmc_ipmi_config.py b/plugins/modules/ntnx_foundation_bmc_ipmi_config.py index 3c1a921d3..61b78a4ac 100644 --- a/plugins/modules/ntnx_foundation_bmc_ipmi_config.py +++ b/plugins/modules/ntnx_foundation_bmc_ipmi_config.py @@ -78,15 +78,14 @@ """ EXAMPLES = r""" - """ RETURN = r""" """ -from ..module_utils.foundation.base_module import FoundationBaseModule # noqa: E402 -from ..module_utils.foundation.bmc_ipmi_config import BMC # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.foundation.base_module import FoundationBaseModule # noqa: E402 +from ..module_utils.v3.foundation.bmc_ipmi_config import BMC # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_central.py b/plugins/modules/ntnx_foundation_central.py index 7c8acdb76..edc099052 100644 --- a/plugins/modules/ntnx_foundation_central.py +++ b/plugins/modules/ntnx_foundation_central.py @@ -299,67 +299,67 @@ """ EXAMPLES = r""" - - name: image nodes and create cluster - ntnx_foundation_central: - state: present - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - cluster_name: "test-cls" - common_network_settings: - cvm_dns_servers: - - xx.xx.xx.xx - hypervisor_dns_servers: - - xx.xx.xx.xx - cvm_ntp_servers: - - xx.x.x.xx - hypervisor_ntp_servers: - - xx.x.x.xx - nodes_list: +- name: image nodes and create cluster + ntnx_foundation_central: + state: present + nutanix_host: '{{ pc }}' + nutanix_username: '{{ username }}' + nutanix_password: '{{ password }}' + validate_certs: false + cluster_name: test-cls + common_network_settings: + cvm_dns_servers: + - xx.xx.xx.xx + hypervisor_dns_servers: + - xx.xx.xx.xx + cvm_ntp_servers: + - xx.x.x.xx + hypervisor_ntp_servers: + - xx.x.x.xx + nodes_list: # manual based nodes - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-1" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-2" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" - # discovery nodes based on node serial - - discovery_mode: - node_serial: "" - - discovery_mode: - node_serial: "" - discovery_override: - hypervisor_hostname: "" - cvm_ip: "" - ipmi_ip: "" - redundancy_factor: 2 - skip_cluster_creation: true - aos_package_url: "" + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-1 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-2 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm + # discovery nodes based on node serial + - discovery_mode: + node_serial: + - discovery_mode: + node_serial: + discovery_override: + hypervisor_hostname: + cvm_ip: + ipmi_ip: + redundancy_factor: 2 + skip_cluster_creation: true + aos_package_url: """ RETURN = r""" @@ -488,9 +488,9 @@ import time # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.fc.imaged_clusters import ImagedCluster # noqa: E402 -from ..module_utils.fc.imaged_nodes import ImagedNode # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.fc.imaged_clusters import ImagedCluster # noqa: E402 +from ..module_utils.v3.fc.imaged_nodes import ImagedNode # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_central_api_keys.py b/plugins/modules/ntnx_foundation_central_api_keys.py index d8d9056dd..8312bd14e 100644 --- a/plugins/modules/ntnx_foundation_central_api_keys.py +++ b/plugins/modules/ntnx_foundation_central_api_keys.py @@ -30,13 +30,13 @@ """ EXAMPLES = r""" - - name: Create API key - ntnx_foundation_central_api_keys_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - alias: "test" +- name: Create API key + ntnx_foundation_central_api_keys_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + alias: "test" """ RETURN = r""" @@ -56,8 +56,8 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.fc.api_keys import ApiKey # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.fc.api_keys import ApiKey # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_central_api_keys_info.py b/plugins/modules/ntnx_foundation_central_api_keys_info.py index a083706cf..624653650 100644 --- a/plugins/modules/ntnx_foundation_central_api_keys_info.py +++ b/plugins/modules/ntnx_foundation_central_api_keys_info.py @@ -36,28 +36,28 @@ """ EXAMPLES = r""" - - name: Get API key with alias filter - ntnx_foundation_central_api_keys_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - alias: "test" - - - name: Get API key with key_uuid filter - ntnx_foundation_central_api_keys_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - key_uuid : "{{ uuid of key }}" - - - name: List all the API key within FC - ntnx_foundation_central_api_keys_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False +- name: Get API key with alias filter + ntnx_foundation_central_api_keys_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + alias: "test" + +- name: Get API key with key_uuid filter + ntnx_foundation_central_api_keys_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + key_uuid: "{{ uuid of key }}" + +- name: List all the API key within FC + ntnx_foundation_central_api_keys_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false """ RETURN = r""" @@ -77,8 +77,8 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.fc.api_keys import ApiKey # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.fc.api_keys import ApiKey # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py b/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py index deae2d704..961f61c57 100644 --- a/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py +++ b/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py @@ -58,24 +58,24 @@ """ EXAMPLES = r""" - - name: Get cluster details using uuid - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - imaged_cluster_uuid: "{{node_uuid}}" +- name: Get cluster details using uuid + ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + imaged_cluster_uuid: "{{node_uuid}}" - - name: Get imaged clusters list based on filters - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - filters: - archived: false - length: 5 - offset: 1 +- name: Get imaged clusters list based on filters + ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filters: + archived: false + length: 5 + offset: 1 """ RETURN = r""" @@ -248,8 +248,8 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.fc.imaged_clusters import ImagedCluster # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.fc.imaged_clusters import ImagedCluster # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py b/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py index 4a62ec521..aa979c588 100644 --- a/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py +++ b/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py @@ -62,24 +62,24 @@ """ EXAMPLES = r""" - - name: Get node details using uuid - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - imaged_node_uuid: "{{node_uuid}}" - - - name: Get imaged node list based on filters - ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - filters: - node_state: STATE_AVAILABLE - length: 5 - offset: 1 +- name: Get node details using uuid + ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + imaged_node_uuid: "{{node_uuid}}" + +- name: Get imaged node list based on filters + ntnx_foundation_central_imaged_nodes_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filters: + node_state: STATE_AVAILABLE + length: 5 + offset: 1 """ RETURN = r""" @@ -139,8 +139,8 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.fc.imaged_nodes import ImagedNode # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.fc.imaged_nodes import ImagedNode # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_discover_nodes_info.py b/plugins/modules/ntnx_foundation_discover_nodes_info.py index 740917864..08e09b60d 100644 --- a/plugins/modules/ntnx_foundation_discover_nodes_info.py +++ b/plugins/modules/ntnx_foundation_discover_nodes_info.py @@ -34,18 +34,19 @@ """ EXAMPLES = r""" - - name: Discover nodes - ntnx_foundation_discover_nodes_info: - nutanix_host: "{{ ip }}" - - - name: Discover all nodes - ntnx_foundation_discover_nodes_info: - nutanix_host: "{{ ip }}" - include_configured: true - - name: Discover nodes and include network info - ntnx_foundation_discover_nodes_info: - nutanix_host: "{{ ip }}" - include_network_details: true +- name: Discover nodes + ntnx_foundation_discover_nodes_info: + nutanix_host: '{{ ip }}' + +- name: Discover all nodes + ntnx_foundation_discover_nodes_info: + nutanix_host: '{{ ip }}' + include_configured: true + +- name: Discover nodes and include network info + ntnx_foundation_discover_nodes_info: + nutanix_host: '{{ ip }}' + include_network_details: true """ RETURN = r""" @@ -74,9 +75,9 @@ }, ] """ -from ..module_utils.foundation.base_module import FoundationBaseModule # noqa: E402 -from ..module_utils.foundation.node_discovery import NodeDiscovery # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.foundation.base_module import FoundationBaseModule # noqa: E402 +from ..module_utils.v3.foundation.node_discovery import NodeDiscovery # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_hypervisor_images_info.py b/plugins/modules/ntnx_foundation_hypervisor_images_info.py index b5404f523..87aef4dd2 100644 --- a/plugins/modules/ntnx_foundation_hypervisor_images_info.py +++ b/plugins/modules/ntnx_foundation_hypervisor_images_info.py @@ -24,9 +24,9 @@ """ EXAMPLES = r""" - - name: List hypervisor images - ntnx_foundation_hypervisor_images_info: - nutanix_host: "{{ ip }}" +- name: List hypervisor images + ntnx_foundation_hypervisor_images_info: + nutanix_host: "{{ ip }}" """ RETURN = r""" @@ -39,11 +39,11 @@ "package2", ] """ -from ..module_utils.foundation.base_module import FoundationBaseModule # noqa: E402 -from ..module_utils.foundation.enumerate_hypervisor_isos import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.foundation.base_module import FoundationBaseModule # noqa: E402 +from ..module_utils.v3.foundation.enumerate_hypervisor_isos import ( # noqa: E402 EnumerateHypervisorIsos, ) -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_image_upload.py b/plugins/modules/ntnx_foundation_image_upload.py index 719f66eaa..e0ea9b0f7 100644 --- a/plugins/modules/ntnx_foundation_image_upload.py +++ b/plugins/modules/ntnx_foundation_image_upload.py @@ -53,7 +53,7 @@ - name: Delete Image with esx installer_type ntnx_foundation_image_upload: nutanix_host: "{{ ip }}" - state : "absent" + state: "absent" filename: "temptar_dont_use.iso" installer_type: "esx" """ @@ -61,9 +61,9 @@ RETURN = r""" """ -from ..module_utils.foundation.base_module import FoundationBaseModule # noqa: E402 -from ..module_utils.foundation.image_upload import Image # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.foundation.base_module import FoundationBaseModule # noqa: E402 +from ..module_utils.v3.foundation.image_upload import Image # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_foundation_node_network_info.py b/plugins/modules/ntnx_foundation_node_network_info.py index 35f48856e..bdbe73106 100644 --- a/plugins/modules/ntnx_foundation_node_network_info.py +++ b/plugins/modules/ntnx_foundation_node_network_info.py @@ -29,15 +29,13 @@ """ EXAMPLES = r""" - - name: Get node network info - ntnx_foundation_node_network_info: - nutanix_host: "{{ ip }}" - nodes: - - node_1_ipv6 - - node_2_ipv6 - - node_3_ipv6 - - +- name: Get node network info + ntnx_foundation_node_network_info: + nutanix_host: '{{ ip }}' + nodes: + - node_1_ipv6 + - node_2_ipv6 + - node_3_ipv6 """ RETURN = r""" @@ -64,11 +62,11 @@ } ] """ -from ..module_utils.foundation.base_module import FoundationBaseModule # noqa: E402 -from ..module_utils.foundation.node_network_details import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.foundation.base_module import FoundationBaseModule # noqa: E402 +from ..module_utils.v3.foundation.node_network_details import ( # noqa: E402 NodeNetworkDetails, ) -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_gpus_info_v2.py b/plugins/modules/ntnx_gpus_info_v2.py new file mode 100644 index 000000000..878f19902 --- /dev/null +++ b/plugins/modules/ntnx_gpus_info_v2.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_gpus_info_v2 +short_description: Fetches GPU(s) information attached to VM in a Nutanix prism central. +description: + - This module fetches GPU(s) information attached to a virtual machine in a Nutanix Prism Central. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External ID of the GPU. + - It can be used to get specific GPU info. + required: false + type: str + vm_ext_id: + description: + - External ID of the virtual machine. + required: true + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - George Ghawali (@george-ghawali) + +""" + +EXAMPLES = r""" +- name: Fetch GPU information by GPU external ID and VM external ID + nutanix.ncp.ntnx_gpus_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "d7db5aa1-34cc-4f96-a436-eb2d85c7ff9e" + vm_ext_id: "9a1aa5ca-20ff-4703-672a-f41ad0a401b9" + register: result + +- name: Fetch all GPUs attached to a VM + nutanix.ncp.ntnx_gpus_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: "9a1aa5ca-20ff-4703-672a-f41ad0a401b9" + register: result + +- name: Fetch all GPUs attached to a VM using filter + nutanix.ncp.ntnx_gpus_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "mode eq 'PASSTHROUGH_COMPUTE'" + vm_ext_id: "9a1aa5ca-20ff-4703-672a-f41ad0a401b9" + register: result +""" + +RETURN = r""" +response: + description: + - Response for fetching GPU(s) information. + - Returns GPU information if GPU external ID and VM external ID are provided. + - Returns list of multiple GPUs information if only VM external ID is provided. + type: dict + returned: always + sample: + { + "device_id": 5053, + "ext_id": "ca1f8f73-88f2-4ded-879e-da623c374bd4", + "fraction": 0, + "frame_buffer_size_bytes": 0, + "guest_driver_version": null, + "links": null, + "mode": "PASSTHROUGH_GRAPHICS", + "name": "Tesla_M10", + "num_virtual_display_heads": 0, + "pci_address": { "bus": 8, "device": 0, "func": 0, "segment": 0 }, + "tenant_id": null, + "vendor": "NVIDIA", + } +ext_id: + description: The external ID of the GPU. + type: str + returned: always +changed: + description: Indicates whether the state of the GPU has changed. + type: bool + returned: always +error: + description: The error message, if any. + type: str + returned: always +failed: + description: Indicates whether the task failed. + type: bool + returned: always +""" + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_gpu # noqa: E402 + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + vm_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_gpu_by_ext_id(module, gpus, result): + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + resp = get_gpu(module, gpus, ext_id, vm_ext_id) + result["ext_id"] = module.params.get("ext_id") + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_gpus(module, gpus, result): + sg = SpecGenerator(module=module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json(msg="Failed generating GPUs list Spec", **result) + + try: + resp = gpus.list_gpus_by_vm_id(vmExtId=module.params.get("vm_ext_id"), **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching GPUs list using VM external ID", + ) + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + gpus = get_vm_api_instance(module) + if module.params.get("ext_id"): + get_gpu_by_ext_id(module, gpus, result) + else: + get_gpus(module, gpus, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_gpus_v2.py b/plugins/modules/ntnx_gpus_v2.py new file mode 100644 index 000000000..adfeef3a9 --- /dev/null +++ b/plugins/modules/ntnx_gpus_v2.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_gpus_v2 +short_description: Module to attach/detach GPUs to/from VMs in Nutanix prism central. +description: + - This module allows you to attach or detach GPUs to or from virtual machines in Nutanix Prism Central. + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - State of the GPU. Whether to attach or detach the GPU. + - Present -> Attaches the GPU to the VM. + - Absent -> Detaches the GPU from the VM. + required: false + type: str + choices: ["present", "absent"] + ext_id: + description: + - The external ID of the GPU. + - Required for attaching or detaching a GPU. + required: false + type: str + vm_ext_id: + description: + - The external ID of the virtual machine. + - Required for attaching or detaching a GPU. + required: true + type: str + mode: + description: + - The mode of the GPU. + choices: ["PASSTHROUGH_GRAPHICS", "PASSTHROUGH_COMPUTE", "VIRTUAL"] + required: false + type: str + device_id: + description: + - The ID of the GPU device. + required: false + type: int + vendor: + description: + - The vendor of the GPU. + choices: ["NVIDIA", "AMD", "INTEL"] + required: false + type: str + wait: + description: + - Wait for the task to complete. + type: bool + required: false + default: True +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Attach GPU to VM + nutanix.ncp.ntnx_gpus_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "d7db5aa1-34cc-4f96-a436-eb2d85c7ff9e" + vm_ext_id: "9a1aa5ca-20ff-4703-672a-f41ad0a401b9" + name: test-gpu + mode: VIRTUAL + device_id: 123 + vendor: NVIDIA + +- name: Detach GPU from VM + nutanix.ncp.ntnx_gpus_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "d7db5aa1-34cc-4f96-a436-eb2d85c7ff9e" + vm_ext_id: "9a1aa5ca-20ff-4703-672a-f41ad0a401b9" + state: absent + wait: true +""" + +RETURN = r""" +response: + description: + - The response of the GPU operation. + - It will have updated list of GPUs attached to the VM. + type: dict + returned: always + sample: + [ + { + "device_id": 5053, + "ext_id": "16bbf848-5926-46f2-857a-87d4825bd347", + "fraction": null, + "frame_buffer_size_bytes": null, + "guest_driver_version": null, + "links": null, + "mode": "PASSTHROUGH_GRAPHICS", + "name": null, + "num_virtual_display_heads": null, + "pci_address": null, + "tenant_id": null, + "vendor": "NVIDIA", + }, + ] + +task_ext_id: + description: The external ID of the task. + type: str + returned: always +vm_ext_id: + description: The external ID of the virtual machine. + type: str + returned: always +changed: + description: Indicates whether the state of the GPU has changed. + type: bool + returned: always +error: + description: The error message, if any. + type: str + returned: when an error occurs +""" + +import traceback # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402) + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_gpu, get_vm # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str", required=False), + vm_ext_id=dict(type="str", required=True), + mode=dict( + type="str", + choices=["PASSTHROUGH_GRAPHICS", "PASSTHROUGH_COMPUTE", "VIRTUAL"], + obj=vmm_sdk.GpuMode, + required=False, + ), + device_id=dict(type="int", required=False), + vendor=dict( + type="str", + choices=["NVIDIA", "INTEL", "AMD"], + obj=vmm_sdk.GpuVendor, + required=False, + ), + ) + return module_args + + +def attach_gpu(module, vms, result): + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + sg = SpecGenerator(module) + default_spec = vmm_sdk.Gpu() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating attach GPU Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + vm = get_vm(module, vms, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.create_gpu(vmExtId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while attaching GPU", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id, True) + resp = get_vm(module, vms, vm_ext_id) + if not resp.gpus: + return module.fail_json( + "GPUs list is empty when fetching VM info", **result + ) + result["response"] = strip_internal_attributes(resp.gpus.to_dict()) + result["changed"] = True + + +def detach_gpu(module, vms, result): + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + current_spec = get_gpu(module, vms, ext_id, vm_ext_id) + result["vm_ext_id"] = vm_ext_id + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("unable to fetch etag for detaching GPU", **result) + kwargs = {"if_match": etag} + + try: + resp = vms.delete_gpu_by_id( + vmExtId=vm_ext_id, + extId=ext_id, + **kwargs, + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while detaching GPU", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id, True) + resp = get_vm(module, vms, vm_ext_id) + if not resp.gpus: + result["response"] = [] + else: + result["response"] = strip_internal_attributes(resp.gpus.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "vm_ext_id": None, + } + state = module.params["state"] + vms = get_vm_api_instance(module) + if state == "present": + attach_gpu(module, vms, result) + else: + detach_gpu(module, vms, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_hosts_info.py b/plugins/modules/ntnx_hosts_info.py index 8b7331677..91e4fd878 100644 --- a/plugins/modules/ntnx_hosts_info.py +++ b/plugins/modules/ntnx_hosts_info.py @@ -32,23 +32,23 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List hosts - ntnx_hosts_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - kind: host - register: result - - - name: test getting particular host using uuid - ntnx_hosts_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - host_uuid: - register: result +- name: List hosts + ntnx_hosts_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + kind: host + register: result + +- name: test getting particular host using uuid + ntnx_hosts_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + host_uuid: + register: result """ RETURN = r""" api_version: @@ -149,9 +149,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.hosts import Host # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.hosts import Host # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_hosts_info_v2.py b/plugins/modules/ntnx_hosts_info_v2.py new file mode 100644 index 000000000..3b6c5c8db --- /dev/null +++ b/plugins/modules/ntnx_hosts_info_v2.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_hosts_info_v2 +short_description: Retrieve information about Nutanix hosts from PC. +version_added: 2.0.0 +description: + - This module retrieves information about Nutanix hosts from PC. + - Fetch particular host info using external ID. + - Fetch multiple hosts info with/without using filters, limit, etc. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the host. + - If not provided, multiple hosts info will be fetched. + type: str + cluster_ext_id: + description: + - The external ID of the cluster to filter hosts by. + - If provided, hosts info will be fetched for the specified cluster. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Pradeepsingh Bhati (@bhati-pradeep) +""" + +EXAMPLES = r""" +- name: Get host by external ID + nutanix.ncp.ntnx_hosts_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: af49a0bb-b3d7-41c0-b9c2-f4ca0e8763e9 + register: result + +- name: Get hosts by cluster external ID + nutanix.ncp.ntnx_hosts_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + cluster_ext_id: 3a5b8c0e-1d7b-4c1d-9c3e-6f6e4d5b7a8c + register: result + +- name: List all hosts + nutanix.ncp.ntnx_hosts_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + register: result + +- name: List all hosts with filter + nutanix.ncp.ntnx_hosts_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + filter: hostName eq 'host1' + register: result +""" + +RETURN = r""" +response: + description: + - Response for fetching host info. + - Returns host info if ext_id is provided or list of multiple hosts. + type: dict + returned: always + sample: + { + "extId": "af49a0bb-b3d7-41c0-b9c2-f4ca0e8763e9", + "hostName": "ansible_ag", + "hostType": "HYPER_CONVERGED", + "hypervisor": + { + "externalAddress": + { "ipv4": { "value": "10.0.0.1", "prefixLength": 32 } }, + "userName": "root", + "fullName": "Nutanix124", + "type": "AHV", + "numberOfVms": 1, + "state": "ACROPOLIS_NORMAL", + "acropolisConnectionState": "CONNECTED", + }, + "cluster": + { + "uuid": "00061de8-3ccd-9d88-185b-ac1f6b6f97e2", + "name": "ansible_ag", + }, + "controllerVm": + { + "id": 2, + "externalAddress": + { "ipv4": { "value": "10.0.0.2", "prefixLength": 32 } }, + "backplaneAddress": + { "ipv4": { "value": "10.0.0.3", "prefixLength": 32 } }, + "ipmi": + { + "ip": { "ipv4": { "value": "10.0.0.4", "prefixLength": 32 } }, + "username": "ADMIN", + }, + "rackableUnitUuid": "f2522411-7085-4771-9007-262286cbaa9b", + }, + "disk": + [ + { + "uuid": "742fd128-721e-4cbb-96af-8f1211c27e95", + "mountPath": "/home/nutnanix/temp", + "sizeInBytes": 1800937370625, + "serialId": "ABCD", + "storageTier": "HDD", + } + ], + "isSecureBooted": false, + "isHardwareVirtualized": false, + "hasCsr": false, + "numberOfCpuCores": 16, + "numberOfCpuThreads": 32, + "numberOfCpuSockets": 2, + "cpuFrequencyHz": 2100000000, + "cpuModel": "Model_Name", + "bootTimeUsecs": 1720164675311071, + "memorySizeBytes": 269813284864, + "blockSerial": "128MSLLI", + "blockModel": "NX-1065-G5", + "maintenanceState": "normal", + "nodeStatus": "NORMAL", + } +ext_id: + description: + - The external ID of the host. + type: str + returned: always + sample: af49a0bb-b3d7-41c0-b9c2-f4ca0e8763e9 +error: + description: Error message if any. + type: str + returned: always +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_clusters_api_instance, +) +from ..module_utils.v4.clusters_mgmt.helpers import get_host # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + cluster_ext_id=dict(type="str"), + ) + + return module_args + + +def get_host_by_ext_id(module, result): + ext_id = module.params.get("ext_id") + cluster_ext_id = module.params.get("cluster_ext_id") + clusters = get_clusters_api_instance(module) + resp = get_host(module, clusters, ext_id, cluster_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_hosts(module, result): + clusters = get_clusters_api_instance(module) + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(module.params) + if err: + module.fail_json("Failed creating query parameters for fetching hosts info") + resp = None + try: + cluster_ext_id = module.params.get("cluster_ext_id") + if not cluster_ext_id: + resp = clusters.list_hosts(**kwargs) + else: + resp = clusters.list_hosts_by_cluster_id( + clusterExtId=cluster_ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching hosts info", + ) + + if getattr(resp, "data", None): + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + else: + result["response"] = [] + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + skip_info_args=False, + mutually_exclusive=[("ext_id", "filter")], + ) + + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id") or module.params.get("name"): + get_host_by_ext_id(module, result) + else: + get_hosts(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_image_placement_policies_info.py b/plugins/modules/ntnx_image_placement_policies_info.py index 4e0644c95..4fe42f0d3 100644 --- a/plugins/modules/ntnx_image_placement_policies_info.py +++ b/plugins/modules/ntnx_image_placement_policies_info.py @@ -31,36 +31,36 @@ - Pradeepsingh Bhati (@bhati-pradeep) """ EXAMPLES = r""" - - name: Get image placement policy using policy_uuid - ntnx_image_placement_policies_info: - policy_uuid: "" - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - - name: test_policy - register: result +- name: Get image placement policy using policy_uuid + ntnx_image_placement_policies_info: + policy_uuid: "" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + - name: test_policy + register: result - - name: List image placement policies using name filter criteria - ntnx_image_placement_policies_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - - name: test_policy - register: result +- name: List image placement policies using name filter criteria + ntnx_image_placement_policies_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + - name: test_policy + register: result - - name: List image placement policies using length, offset - ntnx_image_placement_policies_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 2 - offset: 1 - register: result +- name: List image placement policies using length, offset + ntnx_image_placement_policies_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 2 + offset: 1 + register: result """ RETURN = r""" api_version: @@ -224,11 +224,11 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.image_placement_policy import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.image_placement_policy import ( # noqa: E402 ImagePlacementPolicy, ) -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_image_placement_policies_info_v2.py b/plugins/modules/ntnx_image_placement_policies_info_v2.py new file mode 100644 index 000000000..8f5cbe405 --- /dev/null +++ b/plugins/modules/ntnx_image_placement_policies_info_v2.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_image_placement_policies_info_v2 +short_description: Fetches information about Nutanix PC image placement policies. +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +description: + - This module fetches information about Nutanix PC image placement policies. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the image placement policy. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Get image placement policy by ID + nutanix.ncp.ntnx_image_placement_policies_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "policy-12345" + +- name: Get all image placement policies + nutanix.ncp.ntnx_image_placement_policies_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false +""" + + +RETURN = r""" +response: + description: + - The response from the Nutanix PC Image Placement policies. + - it can be single policy or list of policies as per spec. + type: dict + returned: always + sample: { + "cluster_entity_filter": { + "category_ext_ids": [ + "605a0cf9-d04e-3be7-911b-1e6f193f6ebe" + ], + "type": "CATEGORIES_MATCH_ANY" + }, + "create_time": "2024-03-25T23:03:17.610346+00:00", + "description": "new1-description-updated", + "enforcement_state": "SUSPENDED", + "ext_id": "54fe0ed5-02d8-4588-b10b-3b9736bf3d06", + "image_entity_filter": { + "category_ext_ids": [ + "98b9dc89-be08-3c56-b554-692b8b676fd1" + ], + "type": "CATEGORIES_MATCH_ALL" + }, + "last_update_time": "2024-03-25T23:44:01.955468+00:00", + "links": null, + "name": "new1-updated", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "placement_type": "SOFT", + "tenant_id": null + } +ext_id: + description: + - The external ID of the policy. + type: str + sample: "98b9dc89-be08-3c56-b554-692b8b676fd2" + returned: always +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +""" +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import ( # noqa: E402 + get_image_placement_policy_api_instance, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_policy(module, result): + policies = get_image_placement_policy_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = policies.get_placement_policy_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching image placement policy info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_policies(module, result): + policies = get_image_placement_policy_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating image placement policies info Spec", **result + ) + + try: + resp = policies.list_placement_policies(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching image placement policies info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_policy(module, result) + else: + get_policies(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_image_placement_policies_v2.py b/plugins/modules/ntnx_image_placement_policies_v2.py new file mode 100644 index 000000000..f2bda2cd2 --- /dev/null +++ b/plugins/modules/ntnx_image_placement_policies_v2.py @@ -0,0 +1,546 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_image_placement_policies_v2 +short_description: Manage image placement policies in Nutanix Prism Central +description: + - This module allows you to create, update, and delete image placement policies in Nutanix Prism Central. + - This module allows you to suspend and resume image placement policies in Nutanix Prism Central. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The unique identifier of the image placement policy. + - This parameter is required for update and delete operations. + required: false + type: str + name: + description: + - The name of the image placement policy. + required: false + type: str + description: + description: + - The description of the image placement policy. + required: false + type: str + placement_type: + description: + - The placement type of the image placement policy. + required: false + choices: + - HARD + - SOFT + type: str + image_entity_filter: + description: + - The filter for selecting images for the image placement policy. + required: false + type: dict + suboptions: + type: + description: + - The type of filter to apply. + required: true + choices: + - CATEGORIES_MATCH_ALL + - CATEGORIES_MATCH_ANY + type: str + category_ext_ids: + description: + - The list of category external IDs to match. + required: true + type: list + elements: str + cluster_entity_filter: + description: + - The filter for selecting clusters for the image placement policy. + required: false + type: dict + suboptions: + type: + description: + - The type of filter to apply. + required: true + choices: + - CATEGORIES_MATCH_ALL + - CATEGORIES_MATCH_ANY + type: str + category_ext_ids: + description: + - The list of category external IDs to match. + required: true + type: list + elements: str + enforcement_state: + description: + - The enforcement state of the image placement policy. + - This parameter is required for suspending and resuming the image placement policy. + required: false + choices: + - ACTIVE + - SUSPENDED + type: str + should_cancel_running_tasks: + description: + - Whether to cancel running tasks when suspending the image placement policy. + required: false + type: bool + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be to create the item. + - if C(state) is set to C(present) and C(ext_id) is given then it will update that policy. + - if C(state) is set to C(present) then C(ext_id) or C(name) needs to be set. + - >- + If C(state) is set to C(absent) and if the item exists, then + item is removed. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the CRUD operation to complete. + type: bool + required: false + default: True +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +notes: + - This module follows two steps update process. Configuration update and enforcement state update. + - If enforcement state is changed, then task_ext_id will have the task id of enforcement state update. + - Else it will be create, update config or delete task id as per C(state). +""" + +EXAMPLES = r""" +- name: Create an image placement policy + nutanix.ncp.ntnx_image_placement_policies_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: my_policy + description: My image placement policy + placement_type: HARD + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - category1 + - category2 + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - category3 + - category4 + enforcement_state: ACTIVE + state: present + wait: true + +- name: Delete an image placement policy + nutanix.ncp.ntnx_image_placement_policies_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 605a0cf9-d04e-3be7-911b-1e6f193f6eb9 + state: absent + wait: true +""" + + +RETURN = r""" +response: + description: + - The response from the image placement policy operation. + - It will be task response if C(wait) is false. + type: dict + returned: always + sample: { + "cluster_entity_filter": { + "category_ext_ids": [ + "605a0cf9-d04e-3be7-911b-1e6f193f6ebe" + ], + "type": "CATEGORIES_MATCH_ANY" + }, + "create_time": "2024-03-25T23:03:17.610346+00:00", + "description": "new1-description-updated", + "enforcement_state": "SUSPENDED", + "ext_id": "54fe0ed5-02d8-4588-b10b-3b9736bf3d06", + "image_entity_filter": { + "category_ext_ids": [ + "98b9dc89-be08-3c56-b554-692b8b676fd1" + ], + "type": "CATEGORIES_MATCH_ALL" + }, + "last_update_time": "2024-03-25T23:44:01.955468+00:00", + "links": null, + "name": "new1-updated", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "placement_type": "SOFT", + "tenant_id": null + } +task_ext_id: + description: + - The external ID of the task associated with the image placement policy operation. + - If enforcement state is changed, then task_ext_id will have the task id of enforcement state update. + type: str + returned: when a task is created + sample: "98b9dc89-be08-3c56-b554-692b8b676fd2" +ext_id: + description: + - The external ID of the policy + type: str + sample: "98b9dc89-be08-3c56-b554-692b8b676fd2" + returned: always +changed: + description: Indicates whether the image placement policy was changed. + type: bool + returned: always +error: + description: The error message if an error occurred during the image placement policy operation. + type: str + returned: when an error occurs +skipped: + description: Indicates whether the image placement policy operation was skipped. + type: bool + returned: when the operation is skipped +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import ( # noqa: E402 + get_etag, + get_image_placement_policy_api_instance, +) + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + """ + Returns the module specification. + """ + entity_filter = dict( + type=dict( + type="str", + required=True, + choices=["CATEGORIES_MATCH_ALL", "CATEGORIES_MATCH_ANY"], + ), + category_ext_ids=dict(type="list", required=True, elements="str"), + ) + module_args = dict( + ext_id=dict(type="str", required=False), + name=dict(type="str", required=False), + description=dict(type="str", required=False), + placement_type=dict(type="str", choices=["HARD", "SOFT"], required=False), + image_entity_filter=dict( + type="dict", required=False, options=entity_filter, obj=vmm_sdk.Filter + ), + cluster_entity_filter=dict( + type="dict", required=False, options=entity_filter, obj=vmm_sdk.Filter + ), + enforcement_state=dict( + type="str", choices=["ACTIVE", "SUSPENDED"], required=False + ), + should_cancel_running_tasks=dict(type="bool", required=False), + ) + return module_args + + +def get_policy(module, api_instance, ext_id): + try: + return api_instance.get_placement_policy_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching image placement policy info using ext_id", + ) + + +def create_policy(module, result): + policies = get_image_placement_policy_api_instance(module) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.PlacementPolicy() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating create Image Placement Policy Spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = policies.create_placement_policy(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating Image Placement Policy", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + ext_id = get_entity_ext_id_from_task( + task, rel=Tasks.RelEntityType.IMAGE_PLACEMENT_POLICY + ) + policy = get_policy(module, policies, ext_id) + if ext_id: + result["ext_id"] = ext_id + + # update policy enforcement state if needed + if check_if_state_update_required(policy, spec): + task_ext_id = update_policy_state(module, result, policies, ext_id) + if task_ext_id: + wait_for_completion(module, task_ext_id) + result["task_ext_id"] = task_ext_id + + policy = get_policy(module, policies, ext_id) + result["response"] = strip_internal_attributes(policy.to_dict()) + + result["changed"] = True + + +def check_if_config_update_required(current_spec, update_spec): + if current_spec == update_spec: + return False + + lhs = current_spec.to_dict() + rhs = update_spec.to_dict() + for key in lhs.keys(): + if key != "enforcement_state" and lhs.get(key) != rhs.get(key): + return True + return False + + +def check_if_state_update_required(current_spec, update_spec): + if current_spec.enforcement_state == update_spec.enforcement_state: + return False + return True + + +def update_policy_config(module, api_instance, update_spec, ext_id): + resp = None + try: + resp = api_instance.update_placement_policy_by_id( + extId=ext_id, body=update_spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating Image Placement Policy", + ) + + task_ext_id = resp.data.ext_id + return task_ext_id + + +def update_policy_state(module, result, policies, ext_id): + desired_state = module.params.get("enforcement_state") + + policy = get_policy(module, policies, ext_id) + etag = get_etag(data=policy) + if not etag: + return module.fail_json( + "unable to fetch etag for updating Placement Policy enforcement state", + **result, + ) + + kwargs = {"if_match": etag} + resp = None + if desired_state == vmm_sdk.EnforcementState.SUSPENDED: + should_cancel_running_tasks = module.params.get("should_cancel_running_tasks") + spec = vmm_sdk.SuspendPlacementPolicyConfig( + should_cancel_running_tasks=should_cancel_running_tasks + ) + try: + resp = policies.suspend_placement_policy(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while suspending given Placement Policy", + ) + else: + try: + resp = policies.resume_placement_policy(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while resuming Placement Policy", + ) + task_ext_id = resp.data.ext_id + return task_ext_id + + +def update_policy(module, result): + policies = get_image_placement_policy_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_policy(module, policies, ext_id=ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating Image Placement Policy update spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + state_update_required = check_if_state_update_required(current_spec, update_spec) + config_update_required = check_if_config_update_required(current_spec, update_spec) + + if not (config_update_required or state_update_required): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + previous_task_ext_id = None + if config_update_required: + previous_task_ext_id = update_policy_config( + module, policies, update_spec, ext_id + ) + + if state_update_required: + + if previous_task_ext_id: + wait_for_completion(module, previous_task_ext_id) + + previous_task_ext_id = update_policy_state(module, result, policies, ext_id) + + if previous_task_ext_id and module.params.get("wait", False): + wait_for_completion(module, previous_task_ext_id) + + result["ext_id"] = ext_id + + updated_policy = get_policy(module, policies, ext_id) + result["response"] = strip_internal_attributes(updated_policy.to_dict()) + result["task_ext_id"] = previous_task_ext_id + result["changed"] = True + + +def delete_policy(module, result): + policies = get_image_placement_policy_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_policy(module, policies, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "unable to fetch etag for deleting Image Placement Policy", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = policies.delete_placement_policy_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting Image Placement Policy", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_policy(module, result) + else: + create_policy(module, result) + else: + delete_policy(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_image_placement_policy.py b/plugins/modules/ntnx_image_placement_policy.py index 087e7ea56..af3f56116 100644 --- a/plugins/modules/ntnx_image_placement_policy.py +++ b/plugins/modules/ntnx_image_placement_policy.py @@ -104,7 +104,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false name: "test_policy_1" placement_type: soft image_categories: @@ -121,7 +121,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false name: "test_policy_2" desc: "test_policy_2_desc" placement_type: hard @@ -146,7 +146,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false name: "test_policy_2-uodated" desc: "test_policy_2_desc-updated" placement_type: hard @@ -168,8 +168,8 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False - remove_categories: True + validate_certs: false + remove_categories: true register: result - name: Delete image placement policy @@ -178,7 +178,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false policy_uuid: "" register: result """ @@ -279,10 +279,10 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.image_placement_policy import ( # noqa: E402 +from ..module_utils.v3.prism.image_placement_policy import ( # noqa: E402 ImagePlacementPolicy, ) -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_images.py b/plugins/modules/ntnx_images.py index 8733b9df7..3eca3c0f7 100644 --- a/plugins/modules/ntnx_images.py +++ b/plugins/modules/ntnx_images.py @@ -133,89 +133,89 @@ """ EXAMPLES = r""" - - name: create image from local workstation - ntnx_images: - state: "present" - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - source_path: "/Users/ubuntu/Downloads/alpine-virt-3.8.1-x86_64.iso" - clusters: - - name: "temp_cluster" - categories: - AppFamily: - - Backup - checksum: - checksum_algorithm: SHA_1 - checksum_value: 44610efd741a3ab4a548a81ea94869bb8b692977 - name: "ansible-test-with-categories-mapping" - desc: "description" - image_type: "ISO_IMAGE" - version: - product_name: "test" - product_version: "1.2.0" - wait: true - - - name: create image from with source as remote server file location - ntnx_images: - state: "present" - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - source_uri: "https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img" - clusters: - - name: "temp_cluster" - categories: - AppFamily: - - Backup - checksum: - checksum_algorithm: SHA_1 - checksum_value: 44610efd741a3ab4a548a81ea94869bb8b692977 - name: "ansible-test-with-categories-mapping" - desc: "description" - image_type: "DISK_IMAGE" - version: - product_name: "test" - product_version: "1.2.0" - wait: true - - - name: override categories of existing image - ntnx_images: - state: "present" - image_uuid: "" - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - categories: - AppTier: - - Default - AppFamily: - - Backup - wait: true - - - name: dettach all categories from existing image - ntnx_images: - state: "present" - image_uuid: "00000000-0000-0000-0000-000000000000" - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - remove_categories: true - wait: true - - - name: delete existing image - ntnx_images: - state: "absent" - image_uuid: "00000000-0000-0000-0000-000000000000" - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - wait: true +- name: create image from local workstation + ntnx_images: + state: "present" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + source_path: "/Users/ubuntu/Downloads/alpine-virt-3.8.1-x86_64.iso" + clusters: + - name: "temp_cluster" + categories: + AppFamily: + - Backup + checksum: + checksum_algorithm: SHA_1 + checksum_value: 44610efd741a3ab4a548a81ea94869bb8b692977 + name: "ansible-test-with-categories-mapping" + desc: "description" + image_type: "ISO_IMAGE" + version: + product_name: "test" + product_version: "1.2.0" + wait: true + +- name: create image from with source as remote server file location + ntnx_images: + state: "present" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + source_uri: "https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img" + clusters: + - name: "temp_cluster" + categories: + AppFamily: + - Backup + checksum: + checksum_algorithm: SHA_1 + checksum_value: 44610efd741a3ab4a548a81ea94869bb8b692977 + name: "ansible-test-with-categories-mapping" + desc: "description" + image_type: "DISK_IMAGE" + version: + product_name: "test" + product_version: "1.2.0" + wait: true + +- name: override categories of existing image + ntnx_images: + state: "present" + image_uuid: "" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + categories: + AppTier: + - Default + AppFamily: + - Backup + wait: true + +- name: dettach all categories from existing image + ntnx_images: + state: "present" + image_uuid: "00000000-0000-0000-0000-000000000000" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + remove_categories: true + wait: true + +- name: delete existing image + ntnx_images: + state: "absent" + image_uuid: "00000000-0000-0000-0000-000000000000" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + wait: true """ RETURN = r""" @@ -308,8 +308,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.images import Image # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.images import Image # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_images_info.py b/plugins/modules/ntnx_images_info.py index 96fbd78f2..20413bd90 100644 --- a/plugins/modules/ntnx_images_info.py +++ b/plugins/modules/ntnx_images_info.py @@ -31,15 +31,15 @@ - Pradeepsingh Bhati (@bhati-pradeep) """ EXAMPLES = r""" - - name: List images using name filter criteria - ntnx_images_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - name: Ubuntu - register: result +- name: List images using name filter criteria + ntnx_images_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + name: Ubuntu + register: result """ RETURN = r""" api_version: @@ -170,9 +170,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.images import Image # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.images import Image # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_images_info_v2.py b/plugins/modules/ntnx_images_info_v2.py new file mode 100644 index 000000000..7283e30f1 --- /dev/null +++ b/plugins/modules/ntnx_images_info_v2.py @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_images_info_v2 +short_description: Fetch information about Nutanix images +description: + - This module fetches information about Nutanix images. + - The module can fetch information about all images or a specific image. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the image. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch information about all images + nutanix.ncp.ntnx_images_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + +- name: Fetch information about a specific image + nutanix.ncp.ntnx_images_info_v2: + ext_id: abc123 + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false +""" + + +RETURN = r""" +response: + description: + - The response from the Nutanix PC Images. + - it can be single image or list of image as per spec. + type: dict + returned: always + sample: { + "category_ext_ids": [ + "98b9dc89-be08-3c56-b554-692b8b676fd1" + ], + "checksum": null, + "cluster_location_ext_ids": [ + "00061413-990f-363a-185b-ac1f6b6f97e2" + ], + "create_time": "2024-03-25T19:28:55.724068+00:00", + "description": "from disk", + "ext_id": "172e0d73-74ee-4d05-95f2-1894d83d7e09", + "last_update_time": "2024-03-25T19:28:55.724068+00:00", + "links": null, + "name": "image1_from_disk", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "placement_policy_status": null, + "size_bytes": 262472192, + "source": { + "ext_id": "05de8919-3e8c-4f5c-a9e4-a6955cedd764" + }, + "tenant_id": null, + "type": "DISK_IMAGE" + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_image_api_instance # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_image(module, result): + images = get_image_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = images.get_image_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching image info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_images(module, result): + images = get_image_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating images info Spec", **result) + + try: + resp = images.list_images(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching images info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_image(module, result) + else: + get_images(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_images_v2.py b/plugins/modules/ntnx_images_v2.py new file mode 100644 index 000000000..f6a3e8ead --- /dev/null +++ b/plugins/modules/ntnx_images_v2.py @@ -0,0 +1,527 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_images_v2 +short_description: Manage Nutanix Prism Central images. +description: + - This module allows you to create, update, and delete images in Nutanix. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be create the item. + - if C(state) is set to C(present) and C(ext_id) is given then it will update that image. + - if C(state) is set to C(present) then C(ext_id) or C(name) needs to be set. + - >- + If C(state) is set to C(absent) and if the item exists, then + item is removed. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the CRUD operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The unique identifier of the image. + - Required to do update and delete. + type: str + required: false + name: + description: + - The name of the image. + type: str + required: false + description: + description: + - The description of the image. + type: str + required: false + type: + description: + - The type of the image. + type: str + choices: + - DISK_IMAGE + - ISO_IMAGE + required: false + checksum: + description: + - The checksum of the image. + - C(sha1) and C(sha256) are mutually exclusive. + type: dict + suboptions: + sha1: + description: + - The SHA1 checksum of the image. + type: dict + suboptions: + hex_digest: + description: + - The hexadecimal digest of the SHA1 checksum. + type: str + required: true + sha256: + description: + - The SHA256 checksum of the image. + type: dict + suboptions: + hex_digest: + description: + - The hexadecimal digest of the SHA256 checksum. + type: str + required: true + required: false + source: + description: + - The source of the image. + - Required to create an image. + - C(url_source) and C(vm_disk_source) are mutually exclusive. + type: dict + suboptions: + url_source: + description: + - The URL source of the image and its config. + type: dict + suboptions: + url: + description: + - The URL of the image. + type: str + required: true + should_allow_insecure_url: + description: + - Whether to allow insecure URLs. + type: bool + required: false + default: false + basic_auth: + description: + - The basic authentication credentials for the URL source. + type: dict + suboptions: + username: + description: + - The username for basic authentication. + type: str + required: true + password: + description: + - The password for basic authentication. + type: str + required: true + vm_disk_source: + description: + - The VM disk source of the image. + type: dict + suboptions: + ext_id: + description: + - The unique identifier of the VM disk. + type: str + required: true + required: false + category_ext_ids: + description: + - The list of category key-value external IDs to be associated with the image. + - Use `[]` to remove all category key-value external IDs. + type: list + elements: str + required: false + cluster_location_ext_ids: + description: + - The list of cluster location external IDs for placing the images. + - Required to create an image. + type: list + elements: str + required: false + tenant_id: + description: + - The tenant ID to be associated with the image. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Create an image + nutanix.ncp.ntnx_images_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: my-image + description: My image + type: DISK_IMAGE + checksum: + sha1: + hex_digest: abcdef1234567890 + source: + url_source: + url: http://example.com/image.qcow2 + should_allow_insecure_url: true + basic_auth: + username: myuser + password: mypassword + category_ext_ids: + - category1 + - category2 + cluster_location_ext_ids: + - cluster1 + - cluster2 + tenant_id: tenant1 + state: present + +- name: Update an image + nutanix.ncp.ntnx_images_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 98b9dc89-be08-3c56-b554-692b8b676fd1 + name: updated-image + description: Updated image + state: present + +- name: Delete an image + nutanix.ncp.ntnx_images_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 98b9dc89-be08-3c56-b554-692b8b676fd1 + state: absent +""" + +RETURN = r""" +response: + description: + - The response from CRUD operation. + - For delete, it returns task response data. + - If update operation is idempotent, it skips the update. + type: dict + returned: always + sample: { + "category_ext_ids": null, + "checksum": null, + "cluster_location_ext_ids": [ + "00061413-990f-363a-185b-ac1f6b6f97e2" + ], + "create_time": "2024-03-25T19:18:16.547125+00:00", + "description": "image1-updated1", + "ext_id": "015e6709-d4a6-44bd-8df7-9898f062635b", + "last_update_time": "2024-03-25T19:37:02.628625+00:00", + "links": null, + "name": "image1-updated1", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "placement_policy_status": null, + "size_bytes": 262472192, + "source": { + "basic_auth": null, + "should_allow_insecure_url": false, + "url": "http://example.com/image.qcow2" + }, + "tenant_id": null, + "type": "DISK_IMAGE" + } +task_ext_id: + description: The task external ID associated with the operation. + type: str + sample: "015e6709-d4a6-44bd-8df7-9898f062635b" + returned: always +ext_id: + description: The external ID of the image. + type: str + returned: always +changed: + description: Indicates whether the state of the image was changed. + type: bool + returned: always +skipped: + description: Indicates whether the image was skipped due to idempotency. + type: bool + returned: always +error: + description: The error message, if any. + type: str + returned: always +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import ( # noqa: E402 + get_etag, + get_image_api_instance, +) + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + # maps of spec classes for attributes having more than one type of objects allowed as it value + checksum_allowed_objs = { + "sha1": vmm_sdk.ImageSha1Checksum, + "sha256": vmm_sdk.ImageSha256Checksum, + } + source_allowed_objs = { + "url_source": vmm_sdk.UrlSource, + "vm_disk_source": vmm_sdk.VmDiskSource, + } + + # module specs + hex_digest = dict(hex_digest=dict(type="str", required=True)) + checksum = dict( + sha1=dict(type="dict", options=hex_digest), + sha256=dict(type="dict", options=hex_digest), + ) + basic_auth = dict( + username=dict(type="str", required=True), + password=dict(type="str", required=True, no_log=True), + ) + vm_disk_source = dict(ext_id=dict(type="str", required=True)) + url_source = dict( + url=dict(type="str", required=True), + should_allow_insecure_url=dict(type="bool", default=False), + basic_auth=dict(type="dict", options=basic_auth, obj=vmm_sdk.UrlBasicAuth), + ) + source = dict( + url_source=dict(type="dict", options=url_source), + vm_disk_source=dict(type="dict", options=vm_disk_source), + ) + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + type=dict(type="str", choices=["DISK_IMAGE", "ISO_IMAGE"]), + checksum=dict( + type="dict", + options=checksum, + obj=checksum_allowed_objs, + mutually_exclusive=[("sha1", "sha256")], + ), + source=dict( + type="dict", + options=source, + obj=source_allowed_objs, + mutually_exclusive=[("url_source", "vm_disk_source")], + ), + category_ext_ids=dict(type="list", elements="str"), + cluster_location_ext_ids=dict(type="list", elements="str"), + tenant_id=dict(type="str"), + ) + return module_args + + +def get_image(module, api_instance, ext_id): + try: + return api_instance.get_image_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching image info using ext_id", + ) + + +def create_image(module, result): + images = get_image_api_instance(module) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.Image() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create Image Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = images.create_image(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating Image", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + ext_id = get_entity_ext_id_from_task(task, rel=Tasks.RelEntityType.IMAGES) + if ext_id: + resp = get_image(module, images, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_idempotency(current_spec, update_spec): + if current_spec == update_spec: + return True + return False + + +def update_image(module, result): + images = get_image_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_image(module, images, ext_id=ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating image update spec", **result) + + # check for idempotency + if check_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = images.update_image_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating image", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["ext_id"] = ext_id + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + resp = get_image(module, images, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def delete_image(module, result): + images = get_image_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_image(module, images, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("unable to fetch etag for deleting image", **result) + + kwargs = {"if_match": etag} + + try: + resp = images.delete_image_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting image", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_image(module, result) + else: + create_image(module, result) + else: + delete_image(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_karbon_clusters.py b/plugins/modules/ntnx_karbon_clusters.py index aeb1b64ec..87b930d21 100644 --- a/plugins/modules/ntnx_karbon_clusters.py +++ b/plugins/modules/ntnx_karbon_clusters.py @@ -185,121 +185,118 @@ """ EXAMPLES = r""" - - name: create DEV cluster with Flannel network provider - ntnx_karbon_clusters: - cluster: - uuid: "00000000-0000-0000-0000-000000000000" - name: test-module21 - k8s_version: "1.19.8-0" - host_os: "ntnx-1.0" - node_subnet: - name: "vlan.800" - cluster_type: DEV - cni: - node_cidr_mask_size: 24 - service_ipv4_cidr: "172.19.0.0/16" - pod_ipv4_cidr: "172.20.0.0/16" - network_provider: Flannel - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: test-storage-class - reclaim_policy: Delete - storage_container: "default-container-48394901932577" - file_system: ext4 - flash_mode: False - register: result - - - name: delete cluster - ntnx_karbon_clusters: - state: absent - name: "{{cluster_name}" - register: result - - - name: create DEV cluster with Calico network provider - ntnx_karbon_clusters: - cluster: - name: auto_cluster_prod_f34ce3677ecf - name: test-module21 - k8s_version: "1.19.8-0" - host_os: "ntnx-1.0" - node_subnet: - uuid: "00000000-0000-0000-0000-000000000000" - cni: - node_cidr_mask_size: 24 - service_ipv4_cidr: "172.19.0.0/16" - pod_ipv4_cidr: "172.20.0.0/16" - network_provider: Calico - custom_node_configs: - etcd: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 120 - masters: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 120 - workers: - num_instances: 1 - cpu: 8 - memory_gb: 8 - disk_gb: 120 - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: test-storage-class - reclaim_policy: Retain - storage_container: "default-container-48394901932577" - file_system: xfs - flash_mode: true - register: result - - - name: create prod cluster - ntnx_karbon_clusters: - cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - name: "{{node_subnet.name}}" - cluster_type: PROD - cni: - node_cidr_mask_size: 24 - service_ipv4_cidr: "172.19.0.0/16" - pod_ipv4_cidr: "172.20.0.0/16" - network_provider: Flannel - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: test-storage-class - reclaim_policy: Delete - storage_container: "{{storage_container.name}}" - file_system: ext4 - flash_mode: False - control_plane_virtual_ip: "{{control_plane_virtual_ip}}" - custom_node_configs: - etcd: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 240 - masters: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 240 - workers: - num_instances: 1 - cpu: 8 - memory_gb: 8 - disk_gb: 240 - register: result +- name: create DEV cluster with Flannel network provider + ntnx_karbon_clusters: + cluster: + uuid: 00000000-0000-0000-0000-000000000000 + name: test-module21 + k8s_version: 1.19.8-0 + host_os: ntnx-1.0 + node_subnet: + name: vlan.800 + cluster_type: DEV + cni: + node_cidr_mask_size: 24 + service_ipv4_cidr: 172.19.0.0/16 + pod_ipv4_cidr: 172.20.0.0/16 + network_provider: Flannel + storage_class: + nutanix_cluster_password: '{{nutanix_cluster_password}}' + nutanix_cluster_username: '{{nutanix_cluster_username}}' + default_storage_class: true + name: test-storage-class + reclaim_policy: Delete + storage_container: default-container-48394901932577 + file_system: ext4 + flash_mode: false + register: result +- name: delete cluster + ntnx_karbon_clusters: + state: absent + name: '{{cluster_name}' + register: result +- name: create DEV cluster with Calico network provider + ntnx_karbon_clusters: + cluster: + name: auto_cluster_prod_f34ce3677ecf + name: test-module21 + k8s_version: 1.19.8-0 + host_os: ntnx-1.0 + node_subnet: + uuid: 00000000-0000-0000-0000-000000000000 + cni: + node_cidr_mask_size: 24 + service_ipv4_cidr: 172.19.0.0/16 + pod_ipv4_cidr: 172.20.0.0/16 + network_provider: Calico + custom_node_configs: + etcd: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + masters: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + workers: + num_instances: 1 + cpu: 8 + memory_gb: 8 + disk_gb: 120 + storage_class: + nutanix_cluster_password: '{{nutanix_cluster_password}}' + nutanix_cluster_username: '{{nutanix_cluster_username}}' + default_storage_class: true + name: test-storage-class + reclaim_policy: Retain + storage_container: default-container-48394901932577 + file_system: xfs + flash_mode: true + register: result +- name: create prod cluster + ntnx_karbon_clusters: + cluster: + uuid: '{{cluster.uuid}}' + name: '{{karbon_name}}' + k8s_version: '{{k8s_version}}' + host_os: '{{host_os}}' + node_subnet: + name: '{{node_subnet.name}}' + cluster_type: PROD + cni: + node_cidr_mask_size: 24 + service_ipv4_cidr: 172.19.0.0/16 + pod_ipv4_cidr: 172.20.0.0/16 + network_provider: Flannel + storage_class: + nutanix_cluster_password: '{{nutanix_cluster_password}}' + nutanix_cluster_username: '{{nutanix_cluster_username}}' + default_storage_class: true + name: test-storage-class + reclaim_policy: Delete + storage_container: '{{storage_container.name}}' + file_system: ext4 + flash_mode: false + control_plane_virtual_ip: '{{control_plane_virtual_ip}}' + custom_node_configs: + etcd: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 240 + masters: + num_instances: 1 + cpu: 4 + memory_gb: 8 + disk_gb: 240 + workers: + num_instances: 1 + cpu: 8 + memory_gb: 8 + disk_gb: 240 + register: result """ RETURN = r""" @@ -371,8 +368,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.karbon.clusters import Cluster # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.karbon.clusters import Cluster # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_karbon_clusters_info.py b/plugins/modules/ntnx_karbon_clusters_info.py index bd4a8b51b..afb6ba862 100644 --- a/plugins/modules/ntnx_karbon_clusters_info.py +++ b/plugins/modules/ntnx_karbon_clusters_info.py @@ -32,42 +32,42 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List clusters - ntnx_karbon_clusters_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - register: result - - - name: Get clusters using name - ntnx_clusters_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - cluster_name: "cluster-name" - register: result - - - name: Get clusters with ssh credential - ntnx_karbon_clusters_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - cluster_name: "cluster-name" - fetch_ssh_credentials: true - register: result - - - name: Get clusters with kubeconfig - ntnx_karbon_clusters_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - cluster_name: "cluster-name" - fetch_kubeconfig: true - register: result +- name: List clusters + ntnx_karbon_clusters_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: result + +- name: Get clusters using name + ntnx_clusters_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: "cluster-name" + register: result + +- name: Get clusters with ssh credential + ntnx_karbon_clusters_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: "cluster-name" + fetch_ssh_credentials: true + register: result + +- name: Get clusters with kubeconfig + ntnx_karbon_clusters_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: "cluster-name" + fetch_kubeconfig: true + register: result """ RETURN = r""" cni_config: @@ -158,8 +158,8 @@ sample: admin """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.karbon.clusters import Cluster # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.karbon.clusters import Cluster # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_karbon_clusters_node_pools.py b/plugins/modules/ntnx_karbon_clusters_node_pools.py index bb51d6ba3..86e458d50 100644 --- a/plugins/modules/ntnx_karbon_clusters_node_pools.py +++ b/plugins/modules/ntnx_karbon_clusters_node_pools.py @@ -98,8 +98,8 @@ ntnx_karbon_clusters_node_pools: node_subnet: uuid: "" - node_pool_name: "node_pool_name" - cluster_name: "cluster_name" + node_pool_name: "node_pool_name" + cluster_name: "cluster_name" pool_config: num_instances: 2 cpu: 4 @@ -110,14 +110,14 @@ - name: update pool by increasing cpu,memory_gb,num_instances and add labels ntnx_karbon_clusters_node_pools: - wait: True - node_pool_name: "node_name" - cluster_name: "cluster_name" + wait: true + node_pool_name: "node_name" + cluster_name: "cluster_name" pool_config: - cpu: 6 - memory_gb: 10 - disk_gb: 150 - num_instances: 4 + cpu: 6 + memory_gb: 10 + disk_gb: 150 + num_instances: 4 add_labels: property1: "test-property1" register: result @@ -168,8 +168,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.karbon.node_pools import NodePool # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.karbon.node_pools import NodePool # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_karbon_registries.py b/plugins/modules/ntnx_karbon_registries.py index 9be9f76cd..bc387ba5e 100644 --- a/plugins/modules/ntnx_karbon_registries.py +++ b/plugins/modules/ntnx_karbon_registries.py @@ -44,26 +44,26 @@ """ EXAMPLES = r""" - - name: create registry - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - port: "{{port_number}}" - register: result - - - name: delete registry - ntnx_karbon_registries: - name: "{{registry_name}}" - state: absent - register: result - - - name: create registry with username and password - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - username: "{{username}}" - password: "{{password}}" - register: result +- name: create registry + ntnx_karbon_registries: + name: '{{registry_name}}' + url: '{{url}}' + port: '{{port_number}}' + register: result + +- name: delete registry + ntnx_karbon_registries: + name: '{{registry_name}}' + state: absent + register: result + +- name: create registry with username and password + ntnx_karbon_registries: + name: '{{registry_name}}' + url: '{{url}}' + username: '{{username}}' + password: '{{password}}' + register: result """ RETURN = r""" @@ -86,7 +86,7 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.karbon.registries import Registry # noqa: E402 +from ..module_utils.v3.karbon.registries import Registry # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_karbon_registries_info.py b/plugins/modules/ntnx_karbon_registries_info.py index 6f11e446d..3a0c6b2f9 100644 --- a/plugins/modules/ntnx_karbon_registries_info.py +++ b/plugins/modules/ntnx_karbon_registries_info.py @@ -26,23 +26,22 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List registries - ntnx_karbon_registries_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - register: result - - - name: Get registries using name - ntnx_registries_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - registry_name: "registry-name" - register: result - +- name: List registries + ntnx_karbon_registries_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: result + +- name: Get registries using name + ntnx_registries_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + registry_name: "registry-name" + register: result """ RETURN = r""" name: @@ -67,8 +66,8 @@ sample: "xxx.xxx.xxx.xxx:5000" """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.karbon.registries import Registry # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.karbon.registries import Registry # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_authorize_db_server_vms.py b/plugins/modules/ntnx_ndb_authorize_db_server_vms.py index 17b47061a..dae525c3c 100644 --- a/plugins/modules/ntnx_ndb_authorize_db_server_vms.py +++ b/plugins/modules/ntnx_ndb_authorize_db_server_vms.py @@ -92,9 +92,9 @@ """ -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.time_machines import TimeMachine # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_clones_info.py b/plugins/modules/ntnx_ndb_clones_info.py index 770173442..b44d2a8ff 100644 --- a/plugins/modules/ntnx_ndb_clones_info.py +++ b/plugins/modules/ntnx_ndb_clones_info.py @@ -55,7 +55,6 @@ validate_certs: false uuid: "" register: result - """ RETURN = r""" response: @@ -301,8 +300,8 @@ """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.database_clones import DatabaseClone # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.database_clones import DatabaseClone # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_clusters.py b/plugins/modules/ntnx_ndb_clusters.py index aa8b5565f..576aa748e 100644 --- a/plugins/modules/ntnx_ndb_clusters.py +++ b/plugins/modules/ntnx_ndb_clusters.py @@ -139,53 +139,53 @@ """ EXAMPLES = r""" - - name: Register Cluster with prisim_vlan - ntnx_ndb_clusters: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - name: "cluster_name" - desc: "cluster_desc" - name_prefix: "cluster_name_prefix" - cluster_ip: "cluster_ip" - cluster_credentials: - username: "{{cluster_info.cluster_credentials.username}}" - password: "{{cluster_info.cluster_credentials.password}}" - agent_network: - dns_servers: - - "{{cluster_info.agent_network.dns_servers[0]}}" - - "{{cluster_info.agent_network.dns_servers[1]}}" - ntp_servers: - - "{{cluster_info.agent_network.ntp_servers[0]}}" - - "{{cluster_info.agent_network.ntp_servers[1]}}" - vlan_access: - prism_vlan: - vlan_name: "{{cluster_info.vlan_access.prism_vlan.vlan_name}}" - vlan_type: "{{cluster_info.vlan_access.prism_vlan.vlan_type}}" - static_ip: "{{cluster_info.vlan_access.prism_vlan.static_ip}}" - gateway: "{{cluster_info.vlan_access.prism_vlan.gateway}}" - subnet_mask: "{{cluster_info.vlan_access.prism_vlan.subnet_mask}}" - storage_container: "{{cluster_info.storage_container}}" - - - name: update cluster name , desc - ntnx_ndb_clusters: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - uuid: "cluster_uuid" - name: newname - desc: newdesc - - - name: delete cluster - ntnx_ndb_clusters: - nutanix_host: "" - nutanix_username: "" - nutanix_password: "" - validate_certs: false - uuid: "cluster_uuid" - state: absent +- name: Register Cluster with prisim_vlan + ntnx_ndb_clusters: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + name: "cluster_name" + desc: "cluster_desc" + name_prefix: "cluster_name_prefix" + cluster_ip: "cluster_ip" + cluster_credentials: + username: "{{cluster_info.cluster_credentials.username}}" + password: "{{cluster_info.cluster_credentials.password}}" + agent_network: + dns_servers: + - "{{cluster_info.agent_network.dns_servers[0]}}" + - "{{cluster_info.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster_info.agent_network.ntp_servers[0]}}" + - "{{cluster_info.agent_network.ntp_servers[1]}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster_info.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster_info.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster_info.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster_info.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster_info.vlan_access.prism_vlan.subnet_mask}}" + storage_container: "{{cluster_info.storage_container}}" + +- name: update cluster name , desc + ntnx_ndb_clusters: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + uuid: "cluster_uuid" + name: newname + desc: newdesc + +- name: delete cluster + ntnx_ndb_clusters: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + uuid: "cluster_uuid" + state: absent """ RETURN = r""" @@ -299,9 +299,9 @@ import time # noqa: E402 from ..module_utils import utils # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.clusters import Cluster # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.clusters import Cluster # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_clusters_info.py b/plugins/modules/ntnx_ndb_clusters_info.py index d27527d67..8f69f307e 100644 --- a/plugins/modules/ntnx_ndb_clusters_info.py +++ b/plugins/modules/ntnx_ndb_clusters_info.py @@ -66,7 +66,6 @@ validate_certs: false uuid: "" register: result - """ RETURN = r""" response: @@ -173,8 +172,8 @@ """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.clusters import Cluster # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.clusters import Cluster # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_database_clone_refresh.py b/plugins/modules/ntnx_ndb_database_clone_refresh.py index e1e488b08..bc114a0fe 100644 --- a/plugins/modules/ntnx_ndb_database_clone_refresh.py +++ b/plugins/modules/ntnx_ndb_database_clone_refresh.py @@ -45,7 +45,7 @@ """ EXAMPLES = r""" - name: create spec for refresh clone to a pitr timestamp - check_mode: yes + check_mode: true ntnx_ndb_database_clone_refresh: uuid: "{{clone_uuid}}" pitr_timestamp: "2023-02-04 07:29:36" @@ -57,7 +57,6 @@ uuid: "{{clone_uuid}}" snapshot_uuid: "{{snapshot_uuid}}" register: result - """ RETURN = r""" response: @@ -292,10 +291,10 @@ """ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.database_clones import DatabaseClone # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.database_clones import DatabaseClone # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_database_clones.py b/plugins/modules/ntnx_ndb_database_clones.py index d8ada7059..113377f9d 100644 --- a/plugins/modules/ntnx_ndb_database_clones.py +++ b/plugins/modules/ntnx_ndb_database_clones.py @@ -311,7 +311,7 @@ days: 2 timezone: "Asia/Calcutta" remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 @@ -355,7 +355,7 @@ days: 2 timezone: "Asia/Calcutta" remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 @@ -365,7 +365,6 @@ tags: ansible-clones: ansible-test-db-clones register: result - """ RETURN = r""" @@ -602,13 +601,13 @@ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.database_clones import DatabaseClone # noqa: E402 -from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.tags import Tag # noqa: E402 -from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.database_clones import DatabaseClone # noqa: E402 +from ..module_utils.v3.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.tags import Tag # noqa: E402 +from ..module_utils.v3.ndb.time_machines import TimeMachine # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_database_log_catchup.py b/plugins/modules/ntnx_ndb_database_log_catchup.py index 122c1414b..ec7eebac4 100644 --- a/plugins/modules/ntnx_ndb_database_log_catchup.py +++ b/plugins/modules/ntnx_ndb_database_log_catchup.py @@ -102,10 +102,10 @@ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.time_machines import TimeMachine # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_database_restore.py b/plugins/modules/ntnx_ndb_database_restore.py index 1660d2328..ef690f4bc 100644 --- a/plugins/modules/ntnx_ndb_database_restore.py +++ b/plugins/modules/ntnx_ndb_database_restore.py @@ -120,10 +120,10 @@ """ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_database_scale.py b/plugins/modules/ntnx_ndb_database_scale.py index 98dc2aa0b..b298f5b98 100644 --- a/plugins/modules/ntnx_ndb_database_scale.py +++ b/plugins/modules/ntnx_ndb_database_scale.py @@ -107,10 +107,10 @@ """ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_database_snapshots.py b/plugins/modules/ntnx_ndb_database_snapshots.py index b4720f672..a53c8e0cf 100644 --- a/plugins/modules/ntnx_ndb_database_snapshots.py +++ b/plugins/modules/ntnx_ndb_database_snapshots.py @@ -233,10 +233,10 @@ """ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.snapshots import Snapshot # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.snapshots import Snapshot # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_databases.py b/plugins/modules/ntnx_ndb_databases.py index 6a7d76cdd..f1d8611a1 100644 --- a/plugins/modules/ntnx_ndb_databases.py +++ b/plugins/modules/ntnx_ndb_databases.py @@ -1038,18 +1038,18 @@ import time # noqa: E402 from copy import deepcopy # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 -from ..module_utils.ndb.db_server_cluster import DBServerCluster # noqa: E402 -from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 -from ..module_utils.ndb.maintenance_window import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.v3.ndb.db_server_cluster import DBServerCluster # noqa: E402 +from ..module_utils.v3.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.v3.ndb.maintenance_window import ( # noqa: E402 AutomatedPatchingSpec, MaintenanceWindow, ) -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.tags import Tag # noqa: E402 -from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.tags import Tag # noqa: E402 +from ..module_utils.v3.ndb.time_machines import TimeMachine # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_databases_info.py b/plugins/modules/ntnx_ndb_databases_info.py index a9e5430b7..8fc2f8c6c 100644 --- a/plugins/modules/ntnx_ndb_databases_info.py +++ b/plugins/modules/ntnx_ndb_databases_info.py @@ -98,7 +98,7 @@ - name: Get era databases using its id and detailed response ntnx_ndb_databases_info: filters: - detailed: True + detailed: true uuid: "" register: result no_log: true @@ -703,9 +703,9 @@ """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 from ..module_utils.utils import format_filters_map # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.database_instances import DatabaseInstance # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_db_server_vms.py b/plugins/modules/ntnx_ndb_db_server_vms.py index d8a57c320..057ac9d2e 100644 --- a/plugins/modules/ntnx_ndb_db_server_vms.py +++ b/plugins/modules/ntnx_ndb_db_server_vms.py @@ -452,15 +452,15 @@ import time # noqa: E402 from copy import deepcopy # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 -from ..module_utils.ndb.maintenance_window import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.v3.ndb.maintenance_window import ( # noqa: E402 AutomatedPatchingSpec, MaintenanceWindow, ) -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.tags import Tag # noqa: E402 -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.tags import Tag # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_db_servers_info.py b/plugins/modules/ntnx_ndb_db_servers_info.py index fd18f8f20..e0f644094 100644 --- a/plugins/modules/ntnx_ndb_db_servers_info.py +++ b/plugins/modules/ntnx_ndb_db_servers_info.py @@ -382,9 +382,9 @@ ] """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 from ..module_utils.utils import format_filters_map # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.db_server_vm import DBServerVM # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_linked_databases.py b/plugins/modules/ntnx_ndb_linked_databases.py index 5dbabbaa6..2d307d0bb 100644 --- a/plugins/modules/ntnx_ndb_linked_databases.py +++ b/plugins/modules/ntnx_ndb_linked_databases.py @@ -229,10 +229,10 @@ """ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_maintenance_tasks.py b/plugins/modules/ntnx_ndb_maintenance_tasks.py index 1b6411f0a..9b2c17726 100644 --- a/plugins/modules/ntnx_ndb_maintenance_tasks.py +++ b/plugins/modules/ntnx_ndb_maintenance_tasks.py @@ -313,12 +313,12 @@ from copy import deepcopy # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.maintenance_window import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.maintenance_window import ( # noqa: E402 AutomatedPatchingSpec, MaintenanceWindow, ) -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_maintenance_window.py b/plugins/modules/ntnx_ndb_maintenance_window.py index f90e8fb96..64c4d01ed 100644 --- a/plugins/modules/ntnx_ndb_maintenance_window.py +++ b/plugins/modules/ntnx_ndb_maintenance_window.py @@ -93,7 +93,6 @@ timezone: "UTC" register: result - """ RETURN = r""" response: @@ -134,9 +133,9 @@ """ -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.maintenance_window import MaintenanceWindow # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.maintenance_window import MaintenanceWindow # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_maintenance_windows_info.py b/plugins/modules/ntnx_ndb_maintenance_windows_info.py index b2d0c6b61..492fa012e 100644 --- a/plugins/modules/ntnx_ndb_maintenance_windows_info.py +++ b/plugins/modules/ntnx_ndb_maintenance_windows_info.py @@ -235,8 +235,8 @@ sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.maintenance_window import MaintenanceWindow # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.maintenance_window import MaintenanceWindow # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index 3c342679b..638632c63 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -369,7 +369,7 @@ autovacuum_vacuum_scale_factor: "{{autovacuum_vacuum_scale_factor}}" autovacuum_work_mem: "{{autovacuum_work_mem}}" autovacuum_max_workers: "{{autovacuum_max_workers}}" - autovacuum_vacuum_cost_delay: "{{autovacuum_vacuum_cost_delay}}" + autovacuum_vacuum_cost_delay: "{{autovacuum_vacuum_cost_delay}}" wal_buffers: "{{wal_buffers}}" synchronous_commit: "{{synchronous_commit}}" random_page_cost: "{{random_page_cost}}" @@ -415,9 +415,9 @@ desc: "testdesc" type: compute compute: - vcpus: 2 - cores_per_cpu: 4 - memory: 8 + vcpus: 2 + cores_per_cpu: 4 + memory: 8 register: result - name: create software profile with base version and cluster instance topology. Replicated to multiple clusters @@ -472,9 +472,8 @@ profile_uuid: "{{profile_uuid}}" software: version_uuid: "{{version_uuid}}" - publish: True + publish: true register: result - """ RETURN = r""" @@ -788,11 +787,13 @@ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.profiles.profile_types import get_profile_type_obj # noqa: E402 -from ..module_utils.ndb.profiles.profiles import Profile # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.profiles.profile_types import ( # noqa: E402 + get_profile_type_obj, +) +from ..module_utils.v3.ndb.profiles.profiles import Profile # noqa: E402 profile_types_with_version_support = ["software"] profile_types_with_wait_support = ["software"] diff --git a/plugins/modules/ntnx_ndb_profiles_info.py b/plugins/modules/ntnx_ndb_profiles_info.py index f3a354dc3..73423dafc 100644 --- a/plugins/modules/ntnx_ndb_profiles_info.py +++ b/plugins/modules/ntnx_ndb_profiles_info.py @@ -124,7 +124,6 @@ uuid: "" latest_version: true register: result - """ RETURN = r""" response: @@ -184,10 +183,10 @@ ] """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.profiles.profile_types import NetworkProfile # noqa: E402 -from ..module_utils.ndb.profiles.profiles import Profile # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.profiles.profile_types import NetworkProfile # noqa: E402 +from ..module_utils.v3.ndb.profiles.profiles import Profile # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_register_database.py b/plugins/modules/ntnx_ndb_register_database.py index 503e23bfb..b88c64870 100644 --- a/plugins/modules/ntnx_ndb_register_database.py +++ b/plugins/modules/ntnx_ndb_register_database.py @@ -326,7 +326,7 @@ username: "{{vm_username}}" password: "{{vm_password}}" desc: "vm-desc-updated" - reset_desc_in_ntnx_cluster: True + reset_desc_in_ntnx_cluster: true cluster: name: "{{cluster.cluster1.name}}" @@ -608,17 +608,17 @@ import time # noqa: E402 from copy import deepcopy # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 -from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 -from ..module_utils.ndb.maintenance_window import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.v3.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.v3.ndb.maintenance_window import ( # noqa: E402 AutomatedPatchingSpec, MaintenanceWindow, ) -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.tags import Tag # noqa: E402 -from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.tags import Tag # noqa: E402 +from ..module_utils.v3.ndb.time_machines import TimeMachine # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_register_db_server_vm.py b/plugins/modules/ntnx_ndb_register_db_server_vm.py index 3c5a9c240..0041607e1 100644 --- a/plugins/modules/ntnx_ndb_register_db_server_vm.py +++ b/plugins/modules/ntnx_ndb_register_db_server_vm.py @@ -141,7 +141,6 @@ username: "{{vm_username}}" password: "{{vm_password}}" register: result - """ RETURN = r""" response: @@ -348,14 +347,14 @@ import time # noqa: E402 from copy import deepcopy # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 -from ..module_utils.ndb.maintenance_window import ( # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.v3.ndb.maintenance_window import ( # noqa: E402 AutomatedPatchingSpec, MaintenanceWindow, ) -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_replicate_database_snapshots.py b/plugins/modules/ntnx_ndb_replicate_database_snapshots.py index b3f8203e5..369c1453f 100644 --- a/plugins/modules/ntnx_ndb_replicate_database_snapshots.py +++ b/plugins/modules/ntnx_ndb_replicate_database_snapshots.py @@ -147,10 +147,10 @@ """ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.snapshots import Snapshot # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.snapshots import Snapshot # noqa: E402 # Notes: # 1. Snapshot replication to one cluster at a time is supported currently diff --git a/plugins/modules/ntnx_ndb_slas.py b/plugins/modules/ntnx_ndb_slas.py index eb04fa925..c0b5dae68 100644 --- a/plugins/modules/ntnx_ndb_slas.py +++ b/plugins/modules/ntnx_ndb_slas.py @@ -111,12 +111,12 @@ sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" """ -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.slas import SLA # noqa: E402 from ..module_utils.utils import ( # noqa: E402 remove_param_with_none_value, strip_extra_attrs, ) +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.slas import SLA # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_slas_info.py b/plugins/modules/ntnx_ndb_slas_info.py index b16f6b43c..5ac76b239 100644 --- a/plugins/modules/ntnx_ndb_slas_info.py +++ b/plugins/modules/ntnx_ndb_slas_info.py @@ -55,7 +55,6 @@ validate_certs: false uuid: "" register: result - """ RETURN = r""" response: @@ -104,8 +103,8 @@ ] """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.slas import SLA # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.slas import SLA # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_snapshots_info.py b/plugins/modules/ntnx_ndb_snapshots_info.py index bd34bded9..adc591b79 100644 --- a/plugins/modules/ntnx_ndb_snapshots_info.py +++ b/plugins/modules/ntnx_ndb_snapshots_info.py @@ -83,7 +83,6 @@ uuid: "" get_files: true register: result - """ RETURN = r""" response: @@ -228,9 +227,9 @@ ] """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.snapshots import Snapshot # noqa: E402 from ..module_utils.utils import format_filters_map # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.snapshots import Snapshot # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_stretched_vlans.py b/plugins/modules/ntnx_ndb_stretched_vlans.py index bebead6d0..a117428f6 100644 --- a/plugins/modules/ntnx_ndb_stretched_vlans.py +++ b/plugins/modules/ntnx_ndb_stretched_vlans.py @@ -50,40 +50,39 @@ """ EXAMPLES = r""" - - name: create ndb stretched vlan - ntnx_ndb_stretched_vlans: - nutanix_host: - nutanix_username: - nutanix_password: - validate_certs: false - name: test-vlan-name - vlans: - - "00000000-0000-0000-0000-000000000000" - - "00000000-0000-0000-0000-000000000000" - register: result - - - name: update ndb stretched vlan - ntnx_ndb_stretched_vlans: - nutanix_host: - nutanix_username: - nutanix_password: - validate_certs: false - stretched_vlan_uuid: "" - vlans: - - "00000000-0000-0000-0000-000000000000" - - "00000000-0000-0000-0000-000000000000" - register: result - - - name: Delete stretched vlan - ntnx_ndb_stretched_vlans: - nutanix_host: "" - nutanix_username: - nutanix_password: - validate_certs: false - state: absent - stretched_vlan_uuid: "" - register: result - +- name: create ndb stretched vlan + ntnx_ndb_stretched_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + name: test-vlan-name + vlans: + - "00000000-0000-0000-0000-000000000000" + - "00000000-0000-0000-0000-000000000000" + register: result + +- name: update ndb stretched vlan + ntnx_ndb_stretched_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + stretched_vlan_uuid: "" + vlans: + - "00000000-0000-0000-0000-000000000000" + - "00000000-0000-0000-0000-000000000000" + register: result + +- name: Delete stretched vlan + ntnx_ndb_stretched_vlans: + nutanix_host: "" + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + stretched_vlan_uuid: "" + register: result """ RETURN = r""" @@ -104,9 +103,9 @@ sample: "test-name" """ -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.stretched_vlans import StretchedVLAN # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.stretched_vlans import StretchedVLAN # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_tags.py b/plugins/modules/ntnx_ndb_tags.py index 4c473805c..70dbecd48 100644 --- a/plugins/modules/ntnx_ndb_tags.py +++ b/plugins/modules/ntnx_ndb_tags.py @@ -58,7 +58,7 @@ ntnx_ndb_tags: name: "{{tag_name}}-clone" desc: tag-created-by-ansible - tag_value_required: False + tag_value_required: false entity_type: CLONE register: result @@ -66,7 +66,7 @@ ntnx_ndb_tags: name: "{{tag_name}}-tm" desc: tag-created-by-ansible - tag_value_required: True + tag_value_required: true entity_type: TIME_MACHINE register: result @@ -100,9 +100,9 @@ """ -from ..module_utils.ndb.base_info_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.tags import Tag # noqa: E402 from ..module_utils.utils import strip_extra_attrs # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.tags import Tag # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_tags_info.py b/plugins/modules/ntnx_ndb_tags_info.py index acedab100..4956e240c 100644 --- a/plugins/modules/ntnx_ndb_tags_info.py +++ b/plugins/modules/ntnx_ndb_tags_info.py @@ -102,8 +102,8 @@ ] """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.tags import Tag # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.tags import Tag # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_time_machine_clusters.py b/plugins/modules/ntnx_ndb_time_machine_clusters.py index 576abf9f5..a6329cb11 100644 --- a/plugins/modules/ntnx_ndb_time_machine_clusters.py +++ b/plugins/modules/ntnx_ndb_time_machine_clusters.py @@ -86,29 +86,28 @@ - name: update data access instance with new sla name ntnx_ndb_time_machine_clusters: - nutanix_host: - nutanix_username: - nutanix_password: - validate_certs: false - time_machine_uuid: "" - cluster: - name: "" - sla: - name: "" + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + time_machine_uuid: "" + cluster: + name: "" + sla: + name: "" register: result - name: delete time machine ntnx_ndb_time_machine_clusters: - nutanix_host: - nutanix_username: - nutanix_password: - validate_certs: false - state: absent - time_machine_uuid: "" - cluster: - uuid: "" + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + time_machine_uuid: "" + cluster: + uuid: "" register: result - """ RETURN = r""" @@ -154,9 +153,12 @@ import time # noqa: E402 -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.operations import Operation # noqa: E402 -from ..module_utils.ndb.time_machines import TimeMachine, get_cluster_uuid # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.operations import Operation # noqa: E402 +from ..module_utils.v3.ndb.time_machines import ( # noqa: E402 + TimeMachine, + get_cluster_uuid, +) def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_time_machines_info.py b/plugins/modules/ntnx_ndb_time_machines_info.py index 8fce6fdae..3fc8f7344 100644 --- a/plugins/modules/ntnx_ndb_time_machines_info.py +++ b/plugins/modules/ntnx_ndb_time_machines_info.py @@ -260,9 +260,9 @@ """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 from ..module_utils.utils import format_filters_map # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.time_machines import TimeMachine # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_vlans.py b/plugins/modules/ntnx_ndb_vlans.py index bd978ce57..52c18aa20 100644 --- a/plugins/modules/ntnx_ndb_vlans.py +++ b/plugins/modules/ntnx_ndb_vlans.py @@ -107,7 +107,7 @@ nutanix_username: nutanix_password: validate_certs: false - name: test-vlan-name + name: test-vlan-name vlan_type: DHCP cluster: uuid: "" @@ -119,7 +119,7 @@ nutanix_username: nutanix_password: validate_certs: false - name: test-vlan-name + name: test-vlan-name vlan_type: Static cluster: uuid: "" @@ -156,7 +156,6 @@ state: absent vlan_uuid: "" register: result - """ RETURN = r""" @@ -263,9 +262,9 @@ } ] """ -from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.vlans import VLAN # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.v3.ndb.vlans import VLAN # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_ndb_vlans_info.py b/plugins/modules/ntnx_ndb_vlans_info.py index 8b34f07db..3a82d38e9 100644 --- a/plugins/modules/ntnx_ndb_vlans_info.py +++ b/plugins/modules/ntnx_ndb_vlans_info.py @@ -55,7 +55,6 @@ validate_certs: false uuid: "" register: result - """ RETURN = r""" response: @@ -65,8 +64,8 @@ sample: [] """ -from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.vlans import VLAN # noqa: E402 +from ..module_utils.v3.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.v3.ndb.vlans import VLAN # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_nodes_network_info_v2.py b/plugins/modules/ntnx_nodes_network_info_v2.py new file mode 100644 index 000000000..a8bc90f33 --- /dev/null +++ b/plugins/modules/ntnx_nodes_network_info_v2.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_nodes_network_info_v2 +short_description: Get netowrk information for uncofigured cluster nodes +description: + - This module allows you to Get netowrk information for uncofigured cluster nodes. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + cluster_ext_id: + description: + - The external ID of the cluster. + type: str + required: true + request_type: + description: + - Type of request, either it can be expand_cluster or npe. + type: str + node_list: + description: + - List of nodes to be added or removed. + type: list + elements: dict + required: true + suboptions: + node_uuid: + description: UUID of the node. + type: str + required: false + block_id: + description: ID of the block to which the node belongs. + type: str + required: false + node_position: + description: Position of the node. + type: str + required: false + hypervisor_type: + description: Type of the hypervisor. + type: str + choices: ['AHV', 'ESX', 'HYPERV', 'XEN', 'NATIVEHOST'] + required: false + is_robo_mixed_hypervisor: + description: Whether the node is a mixed hypervisor in a ROBO deployment. + type: bool + required: false + hypervisor_version: + description: Version of the hypervisor. + type: str + required: false + nos_version: + description: Version of the Nutanix Operating System (NOS). + type: str + required: false + is_light_compute: + description: Whether the node is a light compute node. + type: bool + required: false + ipmi_ip: + description: List of IP addresses for IPMI. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + digital_certificate_map_list: + description: List of digital certificates. + type: list + elements: dict + suboptions: + key: + description: Key of the digital certificate. + type: str + value: + description: Value of the digital certificate. + type: str + required: false + cvm_ip: + description: List of IP addresses for CVM. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + hypervisor_ip: + description: List of IP addresses for the hypervisor. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: IP address. + type: str + required: true + prefix_length: + description: Prefix length of the IP address. + type: int + required: false + required: false + model: + description: Model of the node. + type: str + required: false + current_network_interface: + description: Current network interface of the node. + type: str + required: false + is_compute_only: + description: Indicates whether the node is compute only or not. + type: bool + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Alaa Bishtawi (@alaabishtawi) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Get network information for uncofigured cluster nodes + nutanix.ncp.ntnx_nodes_network_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + cluster_ext_id: 00061de6-1234-4321-1122-ac1f6b6f97e2 + node_list: + - cvm_ip: + ipv4: + value: "10.0.0.1" + hypervisor_ip: + ipv4: + value: "10.0.0.2" + request_type: "expand_cluster" +""" + +RETURN = r""" +response: + description: + - Response for getting network information for uncofigured cluster nodes. + type: dict + returned: always + sample: + { + "ext_id": "54fbdaf3-972d-4d1c-4413-005a9fe1fc1d", + "links": null, + "response": { + "network_info": { + "hci": [ + { + "hypervisor_type": "AHV", + "name": "br0", + "networks": [ + "Management" + ] + } + ], + "so": [ + { + "hypervisor_type": "AHV", + "name": "br0", + "networks": [ + "Management" + ] + } + ] + }, + "uplinks": [ + { + "cvm_ip": { + "ipv4": { + "prefix_length": 32, + "value": "10.39.6.77" + }, + "ipv6": null + }, + "uplink_list": [ + { + "mac": "00:e0:ed:36:41:a8", + "name": "eth2" + }, + { + "mac": "0c:c4:7a:c7:c2:0b", + "name": "eth1" + }, + { + "mac": "00:e0:ed:36:41:a9", + "name": "eth3" + }, + { + "mac": "0c:c4:7a:c7:c2:0a", + "name": "eth0" + } + ] + } + ], + "warnings": null + }, + "task_response_type": "NETWORKING_DETAILS", + "tenant_id": null + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs + +cluster_ext_id: + description: The external ID of the cluster. + type: str + returned: always + +task_ext_id: + description: The external ID of the task. + type: str + returned: always + +""" + + +import traceback # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_clusters_api_instance, +) +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +try: + import ntnx_clustermgmt_py_client as clustermgmt_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as clustermgmt_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +def get_module_spec(): + ipv4_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + ipv6_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + ip_address_spec = dict( + ipv4=dict(type="dict", options=ipv4_spec, obj=clustermgmt_sdk.IPv4Address), + ipv6=dict(type="dict", options=ipv6_spec, obj=clustermgmt_sdk.IPv6Address), + ) + + digital_certificate_map_list_spec = dict( + key=dict(type="str", required=False, no_log=False), + value=dict(type="str", required=False), + ) + + node_spec = dict( + node_uuid=dict(type="str", required=False), + block_id=dict(type="str", required=False), + node_position=dict(type="str", required=False), + cvm_ip=dict( + type="dict", options=ip_address_spec, obj=clustermgmt_sdk.IPAddress + ), + hypervisor_ip=dict( + type="dict", options=ip_address_spec, obj=clustermgmt_sdk.IPAddress + ), + ipmi_ip=dict( + type="dict", options=ip_address_spec, obj=clustermgmt_sdk.IPAddress + ), + digital_certificate_map_list=dict( + type="list", + elements="dict", + options=digital_certificate_map_list_spec, + obj=clustermgmt_sdk.DigitalCertificateMapReference, + ), + model=dict(type="str", required=False), + is_compute_only=dict(type="bool", required=False), + is_light_compute=dict(type="bool", required=False), + hypervisor_type=dict( + type="str", + choices=["AHV", "ESX", "HYPERV", "XEN", "NATIVEHOST"], + obj=clustermgmt_sdk.HypervisorType, + required=False, + ), + hypervisor_version=dict(type="str", required=False), + nos_version=dict(type="str", required=False), + current_network_interface=dict(type="str", required=False), + is_robo_mixed_hypervisor=dict(type="bool", required=False), + ) + module_args = dict( + node_list=dict( + type="list", + elements="dict", + options=node_spec, + obj=clustermgmt_sdk.NodeListNetworkingDetails, + required=True, + ), + cluster_ext_id=dict(type="str", required=True), + request_type=dict(type="str"), + ) + return module_args + + +def get_nodes_network_information(module, cluster_node_api, result): + sg = SpecGenerator(module) + default_spec = clustermgmt_sdk.NodeDetails() + spec, err = sg.generate_spec(default_spec) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for getting network information for cluster nodes", + **result, + ) + cluster_ext_id = module.params.get("cluster_ext_id") + result["cluster_ext_id"] = cluster_ext_id + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + resp = None + try: + resp = cluster_node_api.fetch_node_networking_details( + clusterExtId=cluster_ext_id, body=spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="API Exception while getting network information for cluster nodes", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + if ":" in task_ext_id: + task_ext_id = task_ext_id.split(":")[1] + task_status = cluster_node_api.fetch_task_response( + extId=task_ext_id, taskResponseType="NETWORKING_DETAILS" + ) + result["response"] = strip_internal_attributes(task_status.data.to_dict()) + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_clustermgmt_py_client"), + exception=SDK_IMP_ERROR, + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + } + cluster_node_api = get_clusters_api_instance(module) + get_nodes_network_information(module, cluster_node_api, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_operations_info_v2.py b/plugins/modules/ntnx_operations_info_v2.py new file mode 100644 index 000000000..a25cf4a89 --- /dev/null +++ b/plugins/modules/ntnx_operations_info_v2.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_operations_info_v2 +short_description: Module to fetch IAM operations info (previously `permissions`) +version_added: 2.0.0 +description: + - This module is used to get operations info + - It can be used to get all operations info or specific permission info using external id + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - Permission external id + type: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: List all operations + nutanix.ncp.ntnx_operations_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: result + +- name: Fetch permission info using external id + nutanix.ncp.ntnx_operations_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "{{ permission_ext_id }}" + register: result + +- name: List operations using filter criteria + nutanix.ncp.ntnx_operations_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "displayName eq 'Display_Name_Test'" +""" +RETURN = r""" +response: + description: + - Response for fetching permissions info + - Response will have list or single operation info as per the spec provided + type: dict + returned: always + sample: + { + "associated_endpoint_list": [ + { + "api_version": "V4", + "endpoint_url": "/config/file-servers/{extId}/$actions/search-user-mapping", + "http_method": "POST" + } + ], + "client_name": "FilesManagerService", + "created_time": "2024-05-28T09:24:56.559305+00:00", + "description": "Allows to search file server user mapping", + "display_name": "Search_File_Server_User_Mapping", + "entity_type": "files", + "ext_id": "251d4a4f-244f-4c84-70a9-8c8f68f9dff0", + "last_updated_time": "2024-05-28T09:25:09.249611+00:00", + "links": null, + "operation_type": "EXTERNAL", + "related_operation_list": [ + "ad8a998d-06ca-404e-5e15-e91e2de3a783" + ], + "tenant_id": null + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false + +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.iam.api_client import get_permission_api_instance # noqa: E402 +from ..module_utils.v4.iam.helpers import get_permission # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_operation_by_ext_id(module, operations, result): + ext_id = module.params.get("ext_id") + resp = get_permission(module, operations, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_operations(module, operations, result): + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json(msg="Failed generating operations info Spec", **result) + + try: + resp = operations.list_operations(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching permission info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + operations = get_permission_api_instance(module) + if module.params.get("ext_id"): + get_operation_by_ext_id(module, operations, result) + else: + get_operations(module, operations, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_pbrs.py b/plugins/modules/ntnx_pbrs.py index 497d0ac20..e620b3eb0 100644 --- a/plugins/modules/ntnx_pbrs.py +++ b/plugins/modules/ntnx_pbrs.py @@ -183,7 +183,7 @@ - name: create PBR with vpc name with any source or destination or protocol with deny action ntnx_pbrs: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" @@ -192,17 +192,17 @@ vpc: name: "{{ vpc.name }}" source: - any: True + any: true destination: - any: True + any: true action: - deny: True + deny: true protocol: - any: True + any: true - name: create PBR with vpc uuid with source Any and destination external and allow action with protocol number ntnx_pbrs: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" @@ -211,18 +211,18 @@ vpc: uuid: "{{ vpc.uuid }}" source: - any: True + any: true destination: - external: True + external: true action: - allow: True + allow: true type: bool protocol: number: "{{ protocol.number }}" - name: create PBR with vpc name with source external and destination network with reroute action and tcp port rangelist ntnx_pbrs: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" @@ -231,7 +231,7 @@ vpc: name: "{{ vpc.name }}" source: - external: True + external: true destination: network: ip: "{{ network.ip }}" @@ -245,7 +245,7 @@ - name: create PBR with vpc name with source network and destination external with reroute action and udp port rangelist ntnx_pbrs: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" @@ -258,7 +258,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - any: True + any: true action: reroute: "{{reroute_ip}}" protocol: @@ -268,7 +268,7 @@ - name: create PBR with vpc name with source network and destination external with reroute action and icmp ntnx_pbrs: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" @@ -281,7 +281,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - external: True + external: true action: reroute: "{{ reroute_ip }}" protocol: @@ -398,9 +398,9 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.pbrs import Pbr # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.prism.pbrs import Pbr # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_pbrs_info.py b/plugins/modules/ntnx_pbrs_info.py index 082a46e70..a49b26087 100644 --- a/plugins/modules/ntnx_pbrs_info.py +++ b/plugins/modules/ntnx_pbrs_info.py @@ -33,29 +33,28 @@ - Dina AbuHijleh (@dina-abuhijleh) """ EXAMPLES = r""" - - name: List pbrs using priority filter criteria - ntnx_pbrs_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - priority: "2" - kind: routing_policy - register: result - - - name: List pbrs using length, offset, sort order and priority sort attribute - ntnx_pbrs_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 2 - offset: 0 - sort_order: "ASCENDING" - sort_attribute: "priority" - register: result - +- name: List pbrs using priority filter criteria + ntnx_pbrs_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + priority: "2" + kind: routing_policy + register: result + +- name: List pbrs using length, offset, sort order and priority sort attribute + ntnx_pbrs_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 2 + offset: 0 + sort_order: "ASCENDING" + sort_attribute: "priority" + register: result """ RETURN = r""" api_version: @@ -149,9 +148,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.pbrs import Pbr # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.pbrs import Pbr # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_pbrs_info_v2.py b/plugins/modules/ntnx_pbrs_info_v2.py new file mode 100644 index 000000000..82fa5b646 --- /dev/null +++ b/plugins/modules/ntnx_pbrs_info_v2.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_pbrs_info_v2 +short_description: Routing Policies info module +version_added: 2.0.0 +description: + - Fetch a single or list of all routing policies + - if ext_id is provided, it will return the routing policy info + - if ext_id is not provided, it will return the list of all routing policies + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - Routing policy external ID + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: List all pbrs + nutanix.ncp.ntnx_pbrs_info_v2: + register: result + ignore_errors: true + +- name: List pbrs using name filter criteria + nutanix.ncp.ntnx_pbrs_info_v2: + filter: "name eq 'test_policy_name'" + register: result + ignore_errors: true + +- name: List pbrs using ext_id + nutanix.ncp.ntnx_pbrs_info_v2: + ext_id: '47ca25c3-9d27-4b94-b6b1-dfa5b25660b4' + register: result + ignore_errors: true +""" +RETURN = r""" +response: + description: + - The response from the routing policy v4 API. + - it can be routing policy or list of routing policies as per spec. + returned: always + type: dict + sample: + { + "description": null, + "ext_id": "44ad2150-b103-4346-a26b-4d0ad858cddf", + "links": null, + "metadata": { + "category_ids": null, + "owner_reference_id": "00000000-0000-0000-0000-000000000000", + "owner_user_name": "admin", + "project_name": null, + "project_reference_id": null + }, + "name": "virtual-network-deny-all", + "policies": [ + { + "is_bidirectional": false, + "policy_action": { + "action_type": "DENY", + "nexthop_ip_address": null, + "reroute_params": null + }, + "policy_match": { + "destination": { + "address_type": "ANY", + "subnet_prefix": null + }, + "protocol_parameters": null, + "protocol_type": "ANY", + "source": { + "address_type": "ANY", + "subnet_prefix": null + } + } + } + ], + "priority": 1, + "tenant_id": null, + "vpc": null, + "vpc_ext_id": "69665951-76db-401c-8b92-9a60af7d024e", + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_routing_policies_api_instance, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_pbr(module, result): + pbrs = get_routing_policies_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = pbrs.get_routing_policy_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching routing policies info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_pbrs(module, result): + pbrs = get_routing_policies_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating pbrs info Spec", **result) + + try: + resp = pbrs.list_routing_policies(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching pbrs info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_pbr(module, result) + else: + get_pbrs(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_pbrs_v2.py b/plugins/modules/ntnx_pbrs_v2.py new file mode 100644 index 000000000..5f7d4e978 --- /dev/null +++ b/plugins/modules/ntnx_pbrs_v2.py @@ -0,0 +1,1008 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_pbrs_v2 +short_description: Module for create, update and delete of Policy based routing. +version_added: 2.0.0 +description: + - Create, Update, Delete Routing Policies + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - if C(state) is present, it will create or update the routing policy. + - If C(state) is set to C(present) and ext_id is not provided then the operation will be create the routing policy + - If C(state) is set to C(present) and ext_id is provided then the operation will be update the routing policy + - If C(state) is set to C(absent) and ext_id is provided , then operation will be delete the routing policy + type: str + choices: ['present', 'absent'] + wait: + description: + - Wait for the task to complete + type: bool + default: true + description: + description: A description of the routing policy. + type: str + ext_id: + description: + - external ID of the routing policy. + - Required for updating or deleting the routing policy. + type: str + metadata: + description: Metadata associated with this resource. + suboptions: + category_ids: + description: A list of globally unique identifiers that represent all the categories the resource is associated with. + elements: str + type: list + owner_reference_id: + description: A globally unique identifier that represents the owner of this resource. + type: str + project_reference_id: + description: A globally unique identifier that represents the project this resource belongs to. + type: str + type: dict + name: + description: Name of the routing policy. + type: str + policies: + description: List of routing policy rules. + elements: dict + suboptions: + is_bidirectional: + description: If True, policies in the reverse direction will be installed with the same action but source and destination will be swapped. + type: bool + policy_action: + description: The action to be taken on the traffic matching the routing policy. + type: dict + suboptions: + action_type: + description: Routing policy action type. + choices: + - PERMIT + - DENY + - REROUTE + - FORWARD + type: str + reroute_params: + description: Parameters for rerouting action. + elements: dict + type: list + suboptions: + reroute_fallback_action: + description: Type of fallback action in reroute case when service VM is down. + choices: + - ALLOW + - DROP + - PASSTHROUGH + - NO_ACTION + type: str + service_ip: + description: An unique address that identifies a device on the internet or a local network in IPv4 or IPv6 format. + type: dict + suboptions: + ipv4: + description: IPv4 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + ipv6: + description: IPv6 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + ingress_service_ip: + description: An unique address that identifies a device on the internet or a local network in IPv4 or IPv6 format. + type: dict + suboptions: + ipv4: + description: IPv4 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + ipv6: + description: IPv6 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + egress_service_ip: + description: An unique address that identifies a device on the internet or a local network in IPv4 or IPv6 format. + type: dict + suboptions: + ipv4: + description: IPv4 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + ipv6: + description: IPv6 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + nexthop_ip_address: + description: An unique address that identifies a device on the internet or a local network in IPv4 or IPv6 format. + type: dict + suboptions: + ipv4: + description: IPv4 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + ipv6: + description: IPv6 subnet specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + + policy_match: + description: Match condition for the traffic that is entering the VPC. + type: dict + suboptions: + source: + description: Address Type like "EXTERNAL" or "ANY". + type: dict + suboptions: + address_type: + description: Address type. + choices: + - ANY + - EXTERNAL + - SUBNET + type: str + subnet_prefix: + description: Subnet prefix specification. + type: dict + suboptions: + ipv4: + description: IPv4 subnet specification. + type: dict + suboptions: + ip: + description: IPv4 address specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + prefix_length: + description: Prefix length. + type: int + ipv6: + description: IPv6 subnet specification. + type: dict + suboptions: + ip: + description: IPv6 address specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + prefix_length: + description: Prefix length. + type: int + destination: + description: Address Type like "EXTERNAL" or "ANY". + type: dict + suboptions: + address_type: + description: Address type. + choices: + - ANY + - EXTERNAL + - SUBNET + type: str + subnet_prefix: + description: Subnet prefix specification. + type: dict + suboptions: + ipv4: + description: IPv4 subnet specification. + type: dict + suboptions: + ip: + description: IPv4 address specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + prefix_length: + description: Prefix length. + type: int + ipv6: + description: IPv6 subnet specification. + type: dict + suboptions: + ip: + description: IPv6 address specification. + type: dict + suboptions: + prefix_length: + description: Prefix length. + type: int + value: + description: IP address value. + type: str + prefix_length: + description: Prefix length. + type: int + protocol_parameters: + description: Protocol parameters. + type: dict + suboptions: + icmp: + description: ICMP parameters. + type: dict + suboptions: + icmp_code: + description: ICMP code. + type: int + icmp_type: + description: ICMP type. + type: int + protocol_number: + description: Protocol number. + type: dict + suboptions: + protocol_number: + description: Protocol number. + type: int + tcp: + description: TCP parameters. + type: dict + suboptions: + destination_port_ranges: + description: Destination port ranges. + elements: dict + type: list + suboptions: + end_port: + description: End port of the range. + type: int + start_port: + description: Start port of the range. + type: int + source_port_ranges: + description: Source port ranges. + elements: dict + type: list + suboptions: + end_port: + description: End port of the range. + type: int + start_port: + description: Start port of the range. + type: int + udp: + description: UDP parameters. + type: dict + suboptions: + destination_port_ranges: + description: Destination port ranges. + elements: dict + type: list + suboptions: + end_port: + description: End port of the range. + type: int + start_port: + description: Start port of the range. + type: int + source_port_ranges: + description: Source port ranges. + elements: dict + type: list + suboptions: + end_port: + description: End port of the range. + type: int + start_port: + description: Start port of the range. + type: int + protocol_type: + description: Type of protocol. + choices: + - ANY + - ICMP + - TCP + - UDP + - PROTOCOL_NUMBER + type: str + type: list + priority: + description: Priority of the routing policy. + type: int + vpc_ext_id: + description: ExtId of the VPC extId to which the routing policy belongs. + type: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Create PBR with vpc, custom source network, external destination, reroute action and udp port rangelist + nutanix.ncp.ntnx_pbrs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vpc_ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + state: present + priority: 207 + name: "test_name" + policies: + - + policy_action: + action_type: REROUTE + reroute_params: + - + reroute_fallback_action: NO_ACTION + service_ip: + ipv4: + value: 10.0.0.16 + prefix_length: 32 + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: 192.168.1.0 + prefix_length: 24 + destination: + address_type: ANY + protocol_type: UDP + protocol_parameters: + udp: + source_port_ranges: + - start_port: 10 + end_port: 20 + destination_port_ranges: + - start_port: 30 + end_port: 40 + register: result + ignore_errors: true + +- name: Create a routing policy for a VPC to permit certain source for certain destination + nutanix.ncp.ntnx_pbrs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + name: test + priority: 2 + vpc_ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + policies: + - + policy_action: + action_type: PERMIT + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: 192.168.1.0 + prefix_length: 24 + destination: + address_type: EXTERNAL + protocol_type: ICMP + protocol_parameters: + icmp: + icmp_type: 25 + icmp_code: 1 + register: result + + +- name: Create PBR with vpc, any source, any destination, any protocol and deny action + nutanix.ncp.ntnx_pbrs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vpc_ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + state: present + priority: 3 + name: test2 + policies: + - + policy_action: + action_type: DENY + policy_match: + source: + address_type: ANY + destination: + address_type: ANY + protocol_type: ANY + register: result + +- name: Update PBR name ,description, priority + nutanix.ncp.ntnx_pbrs_v2: + state: present + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + priority: "156" + name: "new_name" + description: "Updated Test desc" + register: result + ignore_errors: true + +- name: Delete created pbr + nutanix.ncp.ntnx_pbrs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + register: result +""" + +RETURN = r""" +response: + description: + - Response for the routing policy operations. + - routing policy details if C(wait) is true. + - Task details if C(wait) is false. + returned: always + type: dict + sample: + { + "description": null, + "ext_id": "d58da731-6baa-4d36-8afd-f400619012f1", + "links": null, + "metadata": { + "category_ids": null, + "owner_reference_id": "00000000-0000-0000-0000-000000000000", + "owner_user_name": null, + "project_name": null, + "project_reference_id": null + }, + "name": "HBLzQdIgfKUoansible-pbr-4", + "policies": [ + { + "is_bidirectional": false, + "policy_action": { + "action_type": "REROUTE", + "nexthop_ip_address": null, + "reroute_params": [ + { + "egress_service_ip": { + "ipv4": { + "prefix_length": 32, + "value": "10.0.0.15" + }, + "ipv6": null + }, + "ingress_service_ip": { + "ipv4": { + "prefix_length": 32, + "value": "10.0.0.15" + }, + "ipv6": null + }, + "reroute_fallback_action": "NO_ACTION", + "service_ip": { + "ipv4": { + "prefix_length": 32, + "value": "10.0.0.15" + }, + "ipv6": null + } + } + ] + }, + "policy_match": { + "destination": { + "address_type": "SUBNET", + "subnet_prefix": { + "ipv4": { + "ip": { + "prefix_length": 32, + "value": "192.168.2.0" + }, + "prefix_length": null + }, + "ipv6": null + } + }, + "protocol_parameters": { + "destination_port_ranges": [ + { + "end_port": 120, + "start_port": 100 + } + ], + "source_port_ranges": [ + { + "end_port": 80, + "start_port": 80 + } + ] + }, + "protocol_type": "TCP", + "source": { + "address_type": "EXTERNAL", + "subnet_prefix": null + } + } + } + ], + "priority": 208, + "tenant_id": null, + "vpc": null, + "vpc_ext_id": "41a565cc-f669-4ce1-807f-56a71ae946ad" + } + +changed: + description: + - Whether the routing policy is changed or not. + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + + +ext_id: + description: + - External ID of the routing policy. + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + +skipped: + description: + - Whether the operation is skipped or not. + - Will be returned if operation is skipped. + type: bool + returned: always +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_etag, + get_routing_policies_api_instance, +) +from ..module_utils.v4.network.helpers import get_routing_policy # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_networking_py_client as net_sdk # noqa: E402 +except ImportError: + from ..module_utils.v4.sdk_mock import mock_sdk as net_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + protocol_param_obj_map = { + "icmp": net_sdk.ICMPObject, + "tcp": net_sdk.LayerFourProtocolObject, + "udp": net_sdk.LayerFourProtocolObject, + "protocol_number": net_sdk.ProtocolNumberObject, + } + + ip_address_sub_spec = dict( + value=dict(type="str"), + prefix_length=dict(type="int"), + ) + + ip_address_spec = dict( + ipv4=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv4Address), + ipv6=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv6Address), + ) + + ipv4_subnet_spec = dict( + ip=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv4Address), + prefix_length=dict(type="int"), + ) + + ipv6_subnet_spec = dict( + ip=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv6Address), + prefix_length=dict(type="int"), + ) + + ip_subnet_spec = dict( + ipv4=dict(type="dict", options=ipv4_subnet_spec, obj=net_sdk.IPv4Subnet), + ipv6=dict(type="dict", options=ipv6_subnet_spec, obj=net_sdk.IPv6Subnet), + ) + + address_type_object_spec = dict( + address_type=dict(type="str", choices=["ANY", "EXTERNAL", "SUBNET"]), + subnet_prefix=dict(type="dict", options=ip_subnet_spec, obj=net_sdk.IPSubnet), + ) + + icmp_spec = dict( + icmp_type=dict(type="int"), + icmp_code=dict(type="int"), + ) + + port_range_spec = dict( + start_port=dict(type="int"), + end_port=dict(type="int"), + ) + + tcp_udp_spec = dict( + source_port_ranges=dict( + type="list", elements="dict", options=port_range_spec, obj=net_sdk.PortRange + ), + destination_port_ranges=dict( + type="list", elements="dict", options=port_range_spec, obj=net_sdk.PortRange + ), + ) + + protocol_number_spec = dict(protocol_number=dict(type="int")) + + protocol_params_spec = dict( + icmp=dict(type="dict", options=icmp_spec), + tcp=dict(type="dict", options=tcp_udp_spec), + udp=dict(type="dict", options=tcp_udp_spec), + protocol_number=dict(type="dict", options=protocol_number_spec), + ) + + rpmc_spec = dict( + source=dict( + type="dict", options=address_type_object_spec, obj=net_sdk.AddressTypeObject + ), + destination=dict( + type="dict", options=address_type_object_spec, obj=net_sdk.AddressTypeObject + ), + protocol_type=dict( + type="str", choices=["ANY", "ICMP", "TCP", "UDP", "PROTOCOL_NUMBER"] + ), + protocol_parameters=dict( + type="dict", + options=protocol_params_spec, + obj=protocol_param_obj_map, + mutually_exclusive=[("icmp", "tcp", "udp", "protocol_number")], + ), + ) + + reroute_param_spec = dict( + service_ip=dict(type="dict", options=ip_address_spec, obj=net_sdk.IPAddress), + reroute_fallback_action=dict( + type="str", choices=["ALLOW", "DROP", "PASSTHROUGH", "NO_ACTION"] + ), + ingress_service_ip=dict( + type="dict", options=ip_address_spec, obj=net_sdk.IPAddress + ), + egress_service_ip=dict( + type="dict", options=ip_address_spec, obj=net_sdk.IPAddress + ), + ) + + rpa_spec = dict( + action_type=dict(type="str", choices=["PERMIT", "DENY", "REROUTE", "FORWARD"]), + reroute_params=dict( + type="list", + elements="dict", + options=reroute_param_spec, + obj=net_sdk.RerouteParam, + ), + nexthop_ip_address=dict( + type="dict", options=ip_address_spec, obj=net_sdk.IPAddress + ), + ) + + rpr_spec = dict( + policy_match=dict( + type="dict", options=rpmc_spec, obj=net_sdk.RoutingPolicyMatchCondition + ), + policy_action=dict( + type="dict", options=rpa_spec, obj=net_sdk.RoutingPolicyAction + ), + is_bidirectional=dict(type="bool"), + ) + + metadata_spec = dict( + owner_reference_id=dict(type="str"), + project_reference_id=dict(type="str"), + category_ids=dict(type="list", elements="str"), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + priority=dict(type="int"), + vpc_ext_id=dict(type="str"), + policies=dict( + type="list", + elements="dict", + options=rpr_spec, + obj=net_sdk.RoutingPolicyRule, + ), + metadata=dict(type="dict", options=metadata_spec, obj=net_sdk.Metadata), + ) + + return module_args + + +def get_routing_policy_ext_id(module, result, api_instance, vpc_ext_id, priority): + params = { + "filter": "priority eq {0} and vpcExtId eq '{1}'".format(priority, vpc_ext_id) + } + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=params) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for fetching routing policy using priority and vpc_ext_id", + **result, + ) + + try: + resp = api_instance.list_routing_policies(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching routing policies info", + ) + + if resp and getattr(resp, "data", []): + return resp.data[0].ext_id + else: + return None + + +def create_pbr(module, result): + if not module.params.get("vpc_ext_id") and module.params.get("priority"): + module.fail_json( + msg="vpc_ext_id and priority are required for creating routing policy", + **result, + ) + + pbrs = get_routing_policies_api_instance(module) + + sg = SpecGenerator(module) + default_spec = net_sdk.RoutingPolicy() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create pbrs Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = pbrs.create_routing_policy(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating pbr", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_routing_policy_ext_id( + module, + result, + pbrs, + module.params.get("vpc_ext_id"), + module.params.get("priority"), + ) + if ext_id: + resp = get_routing_policy(module, pbrs, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_pbrs_idempotency(old_spec, update_spec): + if old_spec != update_spec: + return False + return True + + +def update_pbr(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + pbrs = get_routing_policies_api_instance(module) + current_spec = get_routing_policy(module, pbrs, ext_id=ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating pbrs update spec", **result) + + # check for idempotency + if check_pbrs_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = pbrs.update_routing_policy_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating pbr", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_routing_policy(module, pbrs, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_pbr(module, result): + pbrs = get_routing_policies_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_routing_policy(module, pbrs, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("unable to fetch etag for deleting pbr", **result) + + kwargs = {"if_match": etag} + + try: + resp = pbrs.delete_routing_policy_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting pbr", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id", "priority", "vpc_ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_networking_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_pbr(module, result) + else: + create_pbr(module, result) + else: + delete_pbr(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_pc_registration_v2.py b/plugins/modules/ntnx_pc_registration_v2.py new file mode 100644 index 000000000..27760c7ef --- /dev/null +++ b/plugins/modules/ntnx_pc_registration_v2.py @@ -0,0 +1,530 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_pc_registration_v2 +short_description: Registers a domain manager (Prism Central) instance to other entities like PE and PC +version_added: 2.0.0 +description: + - Registers a domain manager (Prism Central) instance to other entities like PE and PC + - Unregisteration of a domain manager (Prism Central) instance is not supported + - This module uses PC v4 APIs based SDKs +options: + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the prism central cluster. + - Required for registering prism central to a remote cluster. + type: str + required: true + remote_cluster: + description: + - Description of the remote cluster. + type: dict + required: true + suboptions: + domain_manager_remote_cluster: + description: + - Domain manager (Prism Central) remote cluster details. + type: dict + suboptions: + remote_cluster: + description: + - The remote cluster details. + type: dict + required: true + suboptions: + address: + description: + - The address of the remote cluster. + type: dict + required: true + suboptions: + ipv4: + description: + - The IPv4 address of the remote cluster. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The IPv4 address prefix length. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the remote cluster. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The IPv6 address prefix length. + type: int + required: false + default: 128 + fqdn: + description: + - The FQDN of the remote cluster. + type: dict + suboptions: + value: + description: + - The FQDN value. + type: str + required: true + credentials: + description: + - The credentials of the remote cluster. + type: dict + required: true + suboptions: + authentication: + description: + - The authentication details. + type: dict + required: true + suboptions: + username: + description: + - The username of the remote cluster. + type: str + required: true + password: + description: + - The password of the remote cluster. + type: str + required: true + cloud_type: + description: + - The cloud type of the remote cluster. + type: str + choices: + - NUTANIX_HOSTED_CLOUD + - ONPREM_CLOUD + required: true + aos_remote_cluster: + description: + - The AOS remote cluster details. + - Register a Prism Element to current Prism Central + type: dict + suboptions: + remote_cluster: + description: + - The remote cluster details. + type: dict + required: true + suboptions: + address: + description: + - The address of the remote cluster. + type: dict + required: true + suboptions: + ipv4: + description: + - The IPv4 address of the remote cluster. + type: dict + suboptions: + value: + description: + - The IPv4 address value. + type: str + required: true + prefix_length: + description: + - The IPv4 address prefix length. + type: int + required: false + default: 32 + ipv6: + description: + - The IPv6 address of the remote cluster. + type: dict + suboptions: + value: + description: + - The IPv6 address value. + type: str + required: true + prefix_length: + description: + - The IPv6 address prefix length. + type: int + required: false + default: 128 + fqdn: + description: + - The FQDN of the remote cluster. + type: dict + suboptions: + value: + description: + - The FQDN value. + type: str + required: true + credentials: + description: + - The credentials of the remote cluster. + type: dict + required: true + suboptions: + authentication: + description: + - The authentication details. + type: dict + required: true + suboptions: + username: + description: + - The username of the remote cluster. + type: str + required: true + password: + description: + - The password of the remote cluster. + type: str + required: true + cluster_reference: + description: + - The cluster reference details. + type: dict + suboptions: + ext_id: + description: + - The external ID of the cluster. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: PE PC registration + nutanix.ncp.ntnx_pc_registration_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: "00000000-0000-0000-0000-000000000000" + remote_cluster: + aos_remote_cluster: + remote_cluster: + address: + ipv4: + value: "10.0.0.1" + credentials: + authentication: + username: "admin" + password: "password" + register: result + ignore_errors: true +""" + +RETURN = r""" +response: + description: + - Response for prism central registration operation. + - This field typically holds the task details. + returned: always + type: dict + sample: + { + "cluster_ext_ids": null, + "completed_time": "2024-10-15T07:16:04.131903+00:00", + "completion_details": null, + "created_time": "2024-10-15T07:15:25.618518+00:00", + "entities_affected": [ + { + "ext_id": "00062458-703d-3e3f-0992-ff4d2894511e", + "name": "00062458-703d-3e3f-0992-ff4d2894511e", + "rel": "clustermgmt:config:cluster" + }, + { + "ext_id": "d2f9994f-44fb-4d4c-ad3c-92055316444f", + "name": "PC_10.44.76.49", + "rel": "prism:management:domain_manager" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:1dd2b6d5-595d-5c4e-918b-b2e312141ac0", + "is_background_task": false, + "is_cancelable": false, + "last_updated_time": "2024-10-15T07:16:04.131902+00:00", + "legacy_error_message": null, + "number_of_entities_affected": 2, + "number_of_subtasks": 0, + "operation": "RegisterAOS", + "operation_description": "Register Prism Element", + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "root_task": null, + "started_time": "2024-10-15T07:15:27.717634+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": null, + "warnings": null + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: The external ID of the prism central cluster. + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + +task_ext_id: + description: Task external ID. + type: str + returned: always +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.pc_api_client import ( # noqa: E402 + get_domain_manager_api_instance, +) +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_prism_py_client as prism_sdk # noqa: E402 +except ImportError: + from ..module_utils.v4.sdk_mock import mock_sdk as prism_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + ipv4_address = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False, default=32), + ) + + ipv6_address = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False, default=128), + ) + + fqdn = dict(value=dict(type="str", required=True)) + + address_spec = dict( + ipv4=dict( + type="dict", + options=ipv4_address, + obj=prism_sdk.IPv4Address, + required=False, + ), + ipv6=dict( + type="dict", + options=ipv6_address, + obj=prism_sdk.IPv6Address, + required=False, + ), + fqdn=dict(type="dict", options=fqdn, obj=prism_sdk.FQDN, required=False), + ) + credentials_spec = dict( + authentication=dict( + type="dict", + obj=prism_sdk.BasicAuth, + options=dict( + username=dict(type="str", required=True), + password=dict(type="str", required=True, no_log=True), + ), + required=True, + ) + ) + + remote_cluster_spec = dict( + address=dict( + type="dict", + options=address_spec, + obj=prism_sdk.IPAddressOrFQDN, + required=True, + mutually_exclusive=[("ipv4", "ipv6", "fqdn")], + ), + credentials=dict( + type="dict", + options=credentials_spec, + obj=prism_sdk.Credentials, + required=True, + ), + ) + domain_manager_remote_cluster_spec = dict( + remote_cluster=dict( + type="dict", + options=remote_cluster_spec, + obj=prism_sdk.RemoteClusterSpec, + required=True, + ), + cloud_type=dict( + type="str", + choices=["NUTANIX_HOSTED_CLOUD", "ONPREM_CLOUD"], + required=True, + ), + ) + + aos_remote_cluster_spec = dict( + remote_cluster=dict( + type="dict", + options=remote_cluster_spec, + obj=prism_sdk.RemoteClusterSpec, + required=True, + ), + ) + + cluster_reference_spec = dict( + ext_id=dict(type="str", required=True), + ) + remote_cluster_allowed_types = { + "domain_manager_remote_cluster": prism_sdk.DomainManagerRemoteClusterSpec, + "aos_remote_cluster": prism_sdk.AOSRemoteClusterSpec, + "cluster_reference": prism_sdk.ClusterReference, + } + module_args = dict( + ext_id=dict(type="str", required=True), + remote_cluster=dict( + type="dict", + obj=remote_cluster_allowed_types, + options=dict( + domain_manager_remote_cluster=dict( + type="dict", + options=domain_manager_remote_cluster_spec, + required=False, + ), + aos_remote_cluster=dict( + type="dict", + options=aos_remote_cluster_spec, + required=False, + ), + cluster_reference=dict( + type="dict", options=cluster_reference_spec, required=False + ), + ), + mutually_exclusive=[ + ( + "domain_manager_remote_cluster", + "aos_remote_cluster", + "cluster_reference", + ) + ], + required=True, + ), + ) + return module_args + + +def register_pc(module, domain_manager, result): + sg = SpecGenerator(module) + default_spec = prism_sdk.ClusterRegistrationSpec() + spec, err = sg.generate_spec(obj=default_spec) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + if err: + result["error"] = err + module.fail_json(msg="Failed generating PC registration Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = domain_manager.register(body=spec, extId=ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while PC registration", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_prism_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + domain_manager = get_domain_manager_api_instance(module) + register_pc(module, domain_manager, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_permissions_info.py b/plugins/modules/ntnx_permissions_info.py index b8703af88..dd1e7e4f9 100644 --- a/plugins/modules/ntnx_permissions_info.py +++ b/plugins/modules/ntnx_permissions_info.py @@ -124,9 +124,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.permissions import Permission # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.permissions import Permission # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_projects.py b/plugins/modules/ntnx_projects.py index 67afb5d8c..9386d2ac9 100644 --- a/plugins/modules/ntnx_projects.py +++ b/plugins/modules/ntnx_projects.py @@ -238,7 +238,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false name: "test-ansible-project-1" desc: desc-123 subnets: @@ -357,17 +357,17 @@ from copy import deepcopy # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.idempotence_identifiers import ( # noqa: E402 - IdempotenceIdenitifiers, -) -from ..module_utils.prism.projects import Project # noqa: E402 -from ..module_utils.prism.projects_internal import ProjectsInternal # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 from ..module_utils.utils import ( # noqa: E402 extract_uuids_from_references_list, remove_param_with_none_value, strip_extra_attrs, ) +from ..module_utils.v3.prism.idempotence_identifiers import ( # noqa: E402 + IdempotenceIdenitifiers, +) +from ..module_utils.v3.prism.projects import Project # noqa: E402 +from ..module_utils.v3.prism.projects_internal import ProjectsInternal # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_projects_info.py b/plugins/modules/ntnx_projects_info.py index 17502b965..fc9a1ca45 100644 --- a/plugins/modules/ntnx_projects_info.py +++ b/plugins/modules/ntnx_projects_info.py @@ -41,7 +41,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false filter: name: "test-ansible-project-7" @@ -50,7 +50,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false register: result - name: List project using project uuid criteria @@ -58,10 +58,9 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false project_uuid: "" register: result - """ RETURN = r""" api_version: @@ -153,10 +152,10 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.projects import Project # noqa: E402 -from ..module_utils.prism.projects_internal import ProjectsInternal # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.projects import Project # noqa: E402 +from ..module_utils.v3.prism.projects_internal import ProjectsInternal # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_protection_rules.py b/plugins/modules/ntnx_protection_rules.py index a27b7d6c6..71a640e5e 100644 --- a/plugins/modules/ntnx_protection_rules.py +++ b/plugins/modules/ntnx_protection_rules.py @@ -237,7 +237,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" - wait: True + wait: true name: test-ansible desc: test-ansible-desc protected_categories: @@ -268,7 +268,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" - wait: True + wait: true name: test-ansible desc: test-ansible-desc protected_categories: @@ -279,7 +279,7 @@ availability_zone_url: "{{primary_az_url}}" schedules: - source: - availability_zone_url: "{{primary_az_url}}" + availability_zone_url: "{{primary_az_url}}" destination: availability_zone_url: "{{recovery_az_url}}" protection_type: ASYNC @@ -314,7 +314,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" - wait: True + wait: true rule_uuid: "{{rule_uuid}}" name: test-ansible-updated desc: test-ansible-desc-updated @@ -325,7 +325,7 @@ availability_zone_url: "{{primary_az_url}}" schedules: - source: - availability_zone_url: "{{primary_az_url}}" + availability_zone_url: "{{primary_az_url}}" destination: availability_zone_url: "{{recovery_az_url}}" protection_type: ASYNC @@ -360,10 +360,9 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" - wait: True + wait: true rule_uuid: "{{ rule_uuid }}" register: result - """ RETURN = r""" @@ -541,8 +540,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.protection_rules import ProtectionRule # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.protection_rules import ProtectionRule # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_protection_rules_info.py b/plugins/modules/ntnx_protection_rules_info.py index 218581dd9..0f69d5ec7 100644 --- a/plugins/modules/ntnx_protection_rules_info.py +++ b/plugins/modules/ntnx_protection_rules_info.py @@ -37,7 +37,6 @@ - Pradeepsingh Bhati (@bhati-pradeep) """ EXAMPLES = r""" - - name: List all Protection rules ntnx_protection_rules_info: nutanix_host: "{{ ip }}" @@ -45,7 +44,7 @@ nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" register: result - ignore_errors: True + ignore_errors: true - name: List protection rule using uuid criteria ntnx_protection_rules_info: @@ -55,7 +54,6 @@ validate_certs: "{{ validate_certs }}" rule_uuid: "{{ test_rule_uuid }}" register: result - """ RETURN = r""" rule_affected_entities: @@ -229,9 +227,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.protection_rules import ProtectionRule # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.protection_rules import ProtectionRule # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_recovery_plan_jobs.py b/plugins/modules/ntnx_recovery_plan_jobs.py index a1acfb586..d5ff3f694 100644 --- a/plugins/modules/ntnx_recovery_plan_jobs.py +++ b/plugins/modules/ntnx_recovery_plan_jobs.py @@ -166,7 +166,6 @@ state: "present" action: CLEANUP register: result - """ RETURN = r""" @@ -419,8 +418,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.recovery_plan_jobs import RecoveryPlanJob # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.recovery_plan_jobs import RecoveryPlanJob # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 # TO-DO: Add floating IP assignment spec diff --git a/plugins/modules/ntnx_recovery_plan_jobs_info.py b/plugins/modules/ntnx_recovery_plan_jobs_info.py index c12374c4d..60f8ac221 100644 --- a/plugins/modules/ntnx_recovery_plan_jobs_info.py +++ b/plugins/modules/ntnx_recovery_plan_jobs_info.py @@ -45,7 +45,7 @@ nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" register: result - ignore_errors: True + ignore_errors: true - name: List recovery plan job using uuid criteria ntnx_recovery_plan_jobs_info: @@ -55,7 +55,6 @@ validate_certs: "{{ validate_certs }}" job_uuid: "{{ job_uuid }}" register: result - """ RETURN = r""" api_version: @@ -305,9 +304,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.recovery_plan_jobs import RecoveryPlanJob # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.recovery_plan_jobs import RecoveryPlanJob # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_recovery_plans.py b/plugins/modules/ntnx_recovery_plans.py index 777ce9f2b..e18c8331e 100644 --- a/plugins/modules/ntnx_recovery_plans.py +++ b/plugins/modules/ntnx_recovery_plans.py @@ -473,7 +473,7 @@ name: "{{dr.recovery_site_network}}" - name: Update stage categories - check_mode: yes + check_mode: true ntnx_recovery_plans: nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" @@ -503,10 +503,9 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" - wait: True + wait: true plan_uuid: "{{ recovery_plan2.plan_uuid }}" register: result - """ RETURN = r""" @@ -874,8 +873,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.recovery_plans import RecoveryPlan # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.recovery_plans import RecoveryPlan # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 # TO-DO: Test floating IP assignments diff --git a/plugins/modules/ntnx_recovery_plans_info.py b/plugins/modules/ntnx_recovery_plans_info.py index 6ec1e4222..c6bb6373e 100644 --- a/plugins/modules/ntnx_recovery_plans_info.py +++ b/plugins/modules/ntnx_recovery_plans_info.py @@ -43,7 +43,7 @@ nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" register: result - ignore_errors: True + ignore_errors: true - name: List recovery plans using uuid criteria ntnx_recovery_plans_info: @@ -339,9 +339,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.recovery_plans import RecoveryPlan # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.recovery_plans import RecoveryPlan # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_recovery_point_replicate_v2.py b/plugins/modules/ntnx_recovery_point_replicate_v2.py new file mode 100644 index 000000000..a9608b738 --- /dev/null +++ b/plugins/modules/ntnx_recovery_point_replicate_v2.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_recovery_point_replicate_v2 +short_description: Replicate recovery points +version_added: 2.0.0 +description: + - Replicate recovery points using external ID + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - Recovery point external ID + type: str + required: true + pc_ext_id: + description: + - External ID of the target Prism Central(PC) + - Notes Use remote cluster uuid from availability zone info + type: str + cluster_ext_id: + description: + - External ID of the target cluster + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Abhinav Bansal (@abhinavbansal29) + - Pradeepsingh Bhati (@bhati-pradeep) +""" +EXAMPLES = r""" +- name: Replicate a Recovery Point + nutanix.ncp.ntnx_recovery_point_replicate_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "1ca2963d-77b6-453a-ae23-2c19e7a954a3" + pc_ext_id: "63bebabf-744c-48ff-a6d7-cb028707f972" + cluster_ext_id: "000620a9-8183-2553-1fc3-ac1f6b6029c1" + register: result + ignore_errors: true +""" +RETURN = r""" +response: + description: + - Response for replicating recovery points + - Task details + returned: always + type: dict + sample: + { + "cluster_ext_ids": [ + "00061fa4-ef93-7dd8-185b-ac1f6b6f97e2" + ], + "completed_time": "2024-09-04T10:18:39.599580+00:00", + "completion_details": [ + { + "name": "recoveryPointExtId", + "value": "6ec8e20d-5662-404f-a475-4ac569521f82" + } + ], + "created_time": "2024-09-04T10:17:47.283752+00:00", + "entities_affected": [ + { + "ext_id": "1ca2963d-77b6-453a-ae23-2c19e7a954a3", + "rel": "dataprotection:config:recovery-point" + }, + { + "ext_id": "522670d7-e92d-45c5-9139-76ccff6813c2", + "rel": "dataprotection:config:vm-recovery-point" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:c3f6cc70-fda6-4133-a97c-58802d58186a", + "is_cancelable": false, + "last_updated_time": "2024-09-04T10:18:39.599579+00:00", + "legacy_error_message": null, + "operation": "EntitySnapshotReplicate", + "operation_description": "Replicate Recovery Point", + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-09-04T10:17:47.300538+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": null, + "warnings": null + } +changed: + description: Indicates if any change is made + returned: always + type: bool + sample: true +error: + description: Error message if any + returned: when an error occurs + type: str + sample: null +task_ext_id: + description: The external ID of the task + returned: always + type: str + sample: "ZXJnb24=:c3f6cc70-fda6-4133-a97c-58802d58186a" +ext_id: + description: External ID of the recovery point + returned: always + type: str + sample: "1ca2963d-77b6-453a-ae23-2c19e7a954a3" +failed: + description: Indicates if the task failed + returned: always + type: bool + sample: false +""" +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.data_protection.api_client import ( # noqa: E402 + get_etag, + get_recovery_point_api_instance, +) +from ..module_utils.v4.data_protection.helpers import get_recovery_point # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_dataprotection_py_client as data_protection_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as data_protection_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=True), + pc_ext_id=dict(type="str"), + cluster_ext_id=dict(type="str"), + ) + return module_args + + +def replicate_recovery_point_with_ext_id(module, result): + recovery_points = get_recovery_point_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + sg = SpecGenerator(module) + default_spec = data_protection_sdk.RecoveryPointReplicationSpec() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating replicate recovery point Spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + current_spec = get_recovery_point(module, recovery_points, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + module.fail_json(msg="Failed to get etag for recovery point", **result) + + kwargs = {"if_match": etag} + + resp = None + try: + resp = recovery_points.replicate_recovery_point( + extId=ext_id, body=spec, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while replicating recovery point", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_dataprotection_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + replicate_recovery_point_with_ext_id(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_recovery_point_restore_v2.py b/plugins/modules/ntnx_recovery_point_restore_v2.py new file mode 100644 index 000000000..07075ecf9 --- /dev/null +++ b/plugins/modules/ntnx_recovery_point_restore_v2.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_recovery_point_restore_v2 +short_description: Restore recovery points, Creates a clone of the VM/VG from the selected recovery point +version_added: 2.0.0 +description: + - Restore recovery points using external ID + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External ID to restore recovery point + type: str + required: true + cluster_ext_id: + description: + - By default, recovery points are restored to their associated location reference. + - For cloud-based recovery points without a location reference, the client must specify the cluster's external identifier for restoration. + type: str + vm_recovery_point_restore_overrides: + description: + - List of specifications to restore a specific VM recovery point(s) that are a part of the top-level recovery point. + - A specific VM recovery point can be selected for restore by specifying its external identifier along with override specification (if any). + type: list + elements: dict + suboptions: + vm_recovery_point_ext_id: + description: + - External identifier of a VM recovery point, that is a part of the top-level recovery point. + type: str + volume_group_recovery_point_restore_overrides: + description: + - List of specifications to restore a specific volume group recovery point(s) that are a part of the top-level recovery point. + - A specific volume group recovery point can be selected for restore by specifying its external identifier along with override specification\ + (if any). + type: list + elements: dict + suboptions: + volume_group_recovery_point_ext_id: + description: + - External identifier of a volume group recovery point, that is a part of the top-level recovery point. + type: str + volume_group_override_spec: + description: + - Protected resource/recovery point restore that overrides the volume group configuration. + - The specified properties will be overridden for the restored volume group. + type: dict + suboptions: + name: + description: + - The name of the restored volume group. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Abhinav Bansal (@abhinavbansal29) + - Pradeepsingh Bhati (@bhati-pradeep) +""" +EXAMPLES = r""" +- name: Restore one of the VM recovery point from a recovery point + nutanix.ncp.ntnx_recovery_point_restore_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "6f4ffcee-1dc4-4982-9401-aa1f65dd7177" + cluster_ext_id: "00061fa4-ef93-7dd8-185b-ac1f6b6f97e2" + vm_recovery_point_restore_overrides: + - vm_recovery_point_ext_id: "c92aa134-5586-4cb1-b731-1ecafda83c12" + register: result + ignore_errors: true +""" +RETURN = r""" +response: + description: + - Response for restoring recovery point + - Task details + returned: always + type: dict + sample: { + "cluster_ext_ids": null, + "completed_time": "2024-09-03T06:23:43.729898+00:00", + "completion_details": [ + { + "name": "vmExtIds", + "value": "e44621b1-da4e-40d1-87b1-cbb640001347" + } + ], + "created_time": "2024-09-03T06:23:39.359388+00:00", + "entities_affected": [ + { + "ext_id": "6f4ffcee-1dc4-4982-9401-aa1f65dd7177", + "rel": "dataprotection:config:recovery-point" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:958ee0db-18f3-45c3-9b5f-2cecb9a8819e", + "is_cancelable": false, + "last_updated_time": "2024-09-03T06:23:43.729897+00:00", + "legacy_error_message": null, + "operation": "RestoreRecoveryPoint", + "operation_description": "Restore Recovery Point", + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-09-03T06:23:39.373563+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": [ + { + "ext_id": "ZXJnb24=:db656984-57cb-4aec-a36a-4d2d4fbd9dda", + "href": "https://10.44.76.48:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:db656984-57cb-4aec-a36a-4d2d4fbd9dda", + "rel": "subtask" + } + ], + "warnings": null + } +task_ext_id: + description: The external ID of the task + returned: always + type: str + sample: "ZXJnb24=:958ee0db-18f3-45c3-9b5f-2cecb9a8819e" +ext_id: + description: The external ID of the recovery point + returned: always + type: str + sample: "6f4ffcee-1dc4-4982-9401-aa1f65dd7177" +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + type: str + returned: when an error occurs + sample: "Failed to get etag for restoring recovery point" +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +vms_ext_ids: + description: List of VM external IDs + returned: when recovery point is associated with VMs + type: list + sample: ["522670d7-e92d-45c5-9139-76ccff6813c2", "522670d7-e92d-45c5-9139-76ccff6813c3"] +vgs_ext_ids: + description: List of Volume Group external IDs + returned: when recovery point is associated with Volume Groups + type: list + sample: ["322770d0-b67d-78d2-4963-96cvff6313q9", "322770d0-b67d-78d2-4963-96cvff6313q8"] +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.data_protection.api_client import ( # noqa: E402 + get_etag, + get_recovery_point_api_instance, +) +from ..module_utils.v4.data_protection.helpers import get_recovery_point # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_ext_id_from_task_completion_details, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_dataprotection_py_client as data_protection_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as data_protection_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + vm_recovery_point_restore_overrides_sub_spec = dict( + vm_recovery_point_ext_id=dict(type="str"), + ) + + volume_group_recovery_point_restore_overrides_sub_spec = dict( + volume_group_recovery_point_ext_id=dict(type="str"), + volume_group_override_spec=dict( + type="dict", + options=dict( + name=dict(type="str"), + ), + obj=data_protection_sdk.VolumeGroupOverrideSpec, + ), + ) + + module_args = dict( + ext_id=dict(type="str", required=True), + cluster_ext_id=dict(type="str"), + vm_recovery_point_restore_overrides=dict( + type="list", + elements="dict", + options=vm_recovery_point_restore_overrides_sub_spec, + obj=data_protection_sdk.VmRecoveryPointRestoreOverride, + ), + volume_group_recovery_point_restore_overrides=dict( + type="list", + elements="dict", + options=volume_group_recovery_point_restore_overrides_sub_spec, + obj=data_protection_sdk.VolumeGroupRecoveryPointRestoreOverride, + ), + ) + return module_args + + +def restore_recovery_points(module, result): + recovery_points = get_recovery_point_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + sg = SpecGenerator(module) + default_spec = data_protection_sdk.RecoveryPointRestorationSpec() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating restore recovery point Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + current_spec = get_recovery_point(module, recovery_points, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + module.fail_json( + msg="Failed to get etag for restoring recovery point", **result + ) + + kwargs = {"if_match": etag} + + resp = None + try: + resp = recovery_points.restore_recovery_point(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while restoring recovery point", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + vms_ext_ids = get_ext_id_from_task_completion_details( + resp, name=TASK_CONSTANTS.CompletetionDetailsName.VM_EXT_IDS + ) + if vms_ext_ids: + result["vms_ext_ids"] = vms_ext_ids + vgs_ext_ids = get_ext_id_from_task_completion_details( + resp, name=TASK_CONSTANTS.CompletetionDetailsName.VG_EXT_IDS + ) + if vgs_ext_ids: + result["vgs_ext_ids"] = vgs_ext_ids + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_dataprotection_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + restore_recovery_points(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_recovery_points_info_v2.py b/plugins/modules/ntnx_recovery_points_info_v2.py new file mode 100644 index 000000000..4ceddfd49 --- /dev/null +++ b/plugins/modules/ntnx_recovery_points_info_v2.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_recovery_points_info_v2 +short_description: Get recovery points info +version_added: 2.0.0 +description: + - Fetch specific recovery point info using external ID + - Fetch list of multiple recovery points info if external ID is not provided with optional filters + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External ID to fetch specific recovery point info + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Abhinav Bansal (@abhinavbansal29) + - Pradeepsingh Bhati (@bhati-pradeep) +""" +EXAMPLES = r""" +- name: Fetch recovery point using external id + nutanix.ncp.ntnx_recovery_points_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "1ca2963d-77b6-453a-ae23-2c19e7a954a3" + register: result + +- name: List all recovery points + nutanix.ncp.ntnx_recovery_points_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + register: result + +- name: Fetch details for a Recovery Point using Filters + nutanix.ncp.ntnx_recovery_points_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + filter: extId eq '840c6c3f-2b01-47d5-81fb-b285e45e89ba' + register: result + ignore_errors: true +""" +RETURN = r""" +response: + description: + - Response for fetching recovery points info + - One recovery point info if External ID is provided + - List of multiple recovery points info if External ID is not provided + returned: always + type: dict + sample: + { + "creation_time": "2024-08-22T03:23:36.701254+00:00", + "expiration_time": "2092-09-09T06:37:42+00:00", + "ext_id": "840c6c3f-2b01-47d5-81fb-b285e45e89ba", + "links": null, + "location_agnostic_id": "e5e1a71b-4911-480d-a4d4-3a175797c401", + "location_references": [ + { + "location_ext_id": "00061fa4-ef93-7dd8-185b-ac1f6b6f97e2" + } + ], + "name": "test_abhi_RP_8:53:16 am, Aug 22", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "recovery_point_type": "CRASH_CONSISTENT", + "status": "COMPLETE", + "tenant_id": null, + "vm_recovery_points": [ + { + "application_consistent_properties": null, + "consistency_group_ext_id": null, + "disk_recovery_points": [ + { + "disk_ext_id": "839feff9-bac0-4a70-9523-82ea9e431517", + "disk_recovery_point_ext_id": "21d467f0-ccef-4733-91cc-f04db58a92eb" + }, + { + "disk_ext_id": null, + "disk_recovery_point_ext_id": "91aedb3c-39c9-4750-b553-6e8360d7c1ff" + } + ], + "ext_id": "b387359d-fa5c-4d58-9eb2-3af1a4976319", + "links": null, + "location_agnostic_id": "51264897-07a8-4292-831b-ae28a37135e5", + "tenant_id": null, + "vm_categories": null, + "vm_ext_id": "2e572ceb-d955-4ed7-956f-1c90acf5b5ad" + } + ], + "volume_group_recovery_points": null + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: false + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + type: str + returned: when an error occurs + sample: null + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false + +ext_id: + description: External ID of the recovery point + type: str + returned: when external ID of top level recovery point is provided + sample: "1ca2963d-77b6-453a-ae23-2c19e7a954a3" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.data_protection.api_client import ( # noqa: E402 + get_recovery_point_api_instance, +) +from ..module_utils.v4.data_protection.helpers import get_recovery_point # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_recovery_point_using_ext_id(module, recovery_points, result): + ext_id = module.params.get("ext_id") + resp = get_recovery_point(module, recovery_points, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_recovery_points(module, recovery_points, result): + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating recovery points info Spec", **result) + + try: + resp = recovery_points.list_recovery_points(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching recovery points info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + recovery_points = get_recovery_point_api_instance(module) + if module.params.get("ext_id"): + get_recovery_point_using_ext_id(module, recovery_points, result) + else: + get_recovery_points(module, recovery_points, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_recovery_points_v2.py b/plugins/modules/ntnx_recovery_points_v2.py new file mode 100644 index 000000000..4f30c436e --- /dev/null +++ b/plugins/modules/ntnx_recovery_points_v2.py @@ -0,0 +1,520 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_recovery_points_v2 +short_description: Create, Update, Delete recovery points +version_added: 2.0.0 +description: + - Create, Update Expiry Date, Delete recovery points + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - The state of the recovery point, whether to create, update or delete. + - present -> Create recovery point if external ID is not provided, Update recovery point if external ID is provided. + - absent -> Delete recovery point using the provided recovery point external ID + type: str + choices: ["present", "absent"] + + ext_id: + description: + - External ID of the top level recovery point. + - Required for updating expiry date and deleting recovery point. + type: str + + name: + description: + - Name of the recovery point. + type: str + + expiration_time: + description: + - The UTC date and time in ISO-8601 format when the current Recovery point expires. + type: str + + recovery_point_type: + description: + - Type of the recovery point. + choices: ["CRASH_CONSISTENT", "APPLICATION_CONSISTENT"] + type: str + + vm_recovery_points: + description: + - List of VM recovery point that are a part of the specified top-level recovery point. + - Note that a recovery point can contain a maximum number of 30 entities. These entities can be a combination of VM(s) and volume group(s). + type: list + elements: dict + suboptions: + vm_ext_id: + description: + - VM external identifier which is captured as a part of this recovery point. + type: str + application_consistent_properties: + description: + - User-defined application-consistent properties for the recovery point. + type: dict + suboptions: + application_consistent_properties_spec: + description: + - Application consistent properties spec. + type: dict + suboptions: + backup_type: + description: + - The backup type defines the criteria for selecting files for application-consistent recovery points on Windows VMs/agents. + - FULL_BACKUP, Backs up all files, updating their backup history. + - COPY_BACKUP, Backs up all files without updating their backup history. + type: str + choices: ["FULL_BACKUP", "COPY_BACKUP"] + required: true + should_include_writers: + description: + - Indicates whether the given set of VSS writers' UUIDs should be included or excluded from the \ + application consistent recovery point. + - By default, the value is set to false, indicating that all listed VSS writers' UUIDs will be excluded. + type: bool + default: false + writers: + description: + - List of VSS writer UUIDs that are used in an application consistent recovery point. + - The default values are the system and the registry writer UUIDs. + type: list + elements: str + should_store_vss_metadata: + description: + - Specifies whether to store VSS metadata for application-specific backup/restore. + - VSS metadata, including writer and requester details, is compressed into a .cab file \ + during backup and must be saved for restoration. + type: bool + default: false + + volume_group_recovery_points: + description: + - List of volume group recovery point that are a part of the specified top-level recovery point. + - Note that a recovery point can contain a maximum number of 30 entities. These entities can be a combination of VM(s) and volume group(s). + type: list + elements: dict + suboptions: + volume_group_ext_id: + description: + - Volume Group external identifier which is captured as part of this top level recovery point. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Abhinav Bansal (@abhinavbansal29) + - Pradeepsingh Bhati (@bhati-pradeep) +""" + +EXAMPLES = r""" +- name: Create Recovery Point for 2 VMs and 2 Volume Groups + nutanix.ncp.ntnx_recovery_points_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: present + name: "Recovery Point 1" + expiration_time: "2024-09-30T14:15:22Z" + status: "COMPLETE" + recovery_point_type: "CRASH_CONSISTENT" + vm_recovery_points: + - vm_ext_id: "ac5aff0c-6c68-4948-9088-b903e2be0ce7" + - vm_ext_id: "3f50a1b2-4c3d-4e6a-9b8e-1a2b3c4d5e6f" + volume_group_recovery_points: + - volume_group_ext_id: "9b8a7c6d-5e4f-3a2b-1c0d-9e8f7a6b5c4d" + - volume_group_ext_id: "2d3e4f5a-6b7c-8d9e-0f1a-2b3c4d5e6f7g" + +- name: Update a Recovery Point + nutanix.ncp.ntnx_recovery_points_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: present + ext_id: "1ca2963d-77b6-453a-ae23-2c19e7a954a3" + expiration_time: "2024-11-30T14:15:22Z" + +- name: Delete a Recovery Point + nutanix.ncp.ntnx_recovery_points_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: absent + ext_id: "1ca2963d-77b6-453a-ae23-2c19e7a954a3" +""" +RETURN = r""" +response: + description: + - Response for the recovery point operation + - Recovery point details if C(wait) is True + - Task details if C(wait) is False + returned: always + type: dict + sample: { + "creation_time": "2024-09-03T06:15:25.920305+00:00", + "expiration_time": "2024-09-30T14:15:22+00:00", + "ext_id": "d492e754-1792-41a5-8960-e2e87c8fea7d", + "links": null, + "location_agnostic_id": "86275d66-80e0-4744-90e9-b5ed3ff573bf", + "location_references": [ + { + "location_ext_id": "00061fa4-ef93-7dd8-185b-ac1f6b6f97e2" + } + ], + "name": "Recovery Point 1", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "recovery_point_type": "CRASH_CONSISTENT", + "status": "COMPLETE", + "tenant_id": null, + "vm_recovery_points": [ + { + "application_consistent_properties": null, + "consistency_group_ext_id": null, + "disk_recovery_points": [ + { + "disk_ext_id": "14caad84-02e8-4425-8604-4e492ad89fa3", + "disk_recovery_point_ext_id": "94e61902-1954-4d54-917a-a28205454fce" + }, + { + "disk_ext_id": "870fa4d7-0999-4d39-91b7-c51640e3704c", + "disk_recovery_point_ext_id": "7e96075e-4786-4409-82bd-764e23d877a6" + } + ], + "ext_id": "d06acb73-b057-4f47-85b9-43ddaf8726a5", + "links": null, + "location_agnostic_id": "a71ac990-70f4-4552-96c3-6031215d4bbb", + "tenant_id": null, + "vm_categories": null, + "vm_ext_id": "ac5aff0c-6c68-4948-9088-b903e2be0ce7" + } + ], + "volume_group_recovery_points": null + } +task_ext_id: + description: The task external ID for the operation. + returned: always + type: str + sample: "ZXJnb24=:7cdb5481-dade-44a9-8239-7afbde1c1b82" +ext_id: + description: The external ID of the top level recovery point. + returned: always + type: str + sample: "d492e754-1792-41a5-8960-e2e87c8fea7d" +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs + sample: "Error occurred while creating recovery point" +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +warning: + description: Warning message if any + type: str + returned: when a warning occurs + sample: "Only Expiration time Updation is allowed. Can't update other fields." +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 +from datetime import datetime # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.data_protection.api_client import ( # noqa: E402 + get_etag, + get_recovery_point_api_instance, +) +from ..module_utils.v4.data_protection.helpers import get_recovery_point # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_ext_id_from_task_completion_details, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_dataprotection_py_client as data_protection_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as data_protection_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + application_consistent_properties_obj_map = { + "application_consistent_properties_spec": data_protection_sdk.VssProperties, + } + application_consistent_properties_sub_spec = dict( + backup_type=dict( + type="str", choices=["FULL_BACKUP", "COPY_BACKUP"], required=True + ), + should_include_writers=dict(type="bool", default=False), + writers=dict(type="list", elements="str"), + should_store_vss_metadata=dict(type="bool", default=False), + ) + + vm_recovery_points_sub_spec = dict( + vm_ext_id=dict(type="str"), + application_consistent_properties=dict( + type="dict", + options=dict( + application_consistent_properties_spec=dict( + type="dict", options=application_consistent_properties_sub_spec + ) + ), + obj=application_consistent_properties_obj_map, + ), + ) + + volume_group_recovery_points_sub_spec = dict( + volume_group_ext_id=dict(type="str", required=True), + ) + + module_args = dict( + ext_id=dict(type="str"), + state=dict(type="str", choices=["present", "absent"], default="present"), + name=dict(type="str"), + expiration_time=dict(type="str"), + recovery_point_type=dict( + type="str", + choices=[ + "CRASH_CONSISTENT", + "APPLICATION_CONSISTENT", + ], + ), + vm_recovery_points=dict( + type="list", + elements="dict", + options=vm_recovery_points_sub_spec, + obj=data_protection_sdk.VmRecoveryPoint, + ), + volume_group_recovery_points=dict( + type="list", + elements="dict", + options=volume_group_recovery_points_sub_spec, + obj=data_protection_sdk.VolumeGroupRecoveryPoint, + ), + ) + return module_args + + +def create_recovery_point(module, result): + recovery_points = get_recovery_point_api_instance(module) + sg = SpecGenerator(module) + default_spec = data_protection_sdk.RecoveryPoint() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating create recovery point Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + try: + resp = recovery_points.create_recovery_point(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating recovery point", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_ext_id_from_task_completion_details( + task_status, name=TASK_CONSTANTS.CompletetionDetailsName.RECOVERY_POINT + ) + if ext_id: + resp = get_recovery_point(module, recovery_points, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def check_recovery_point_idempotency_without_expiration(old_spec, update_spec): + old_spec.pop("expiration_time") + update_spec.pop("expiration_time") + return old_spec == update_spec + + +def update_expiry_date_recovery_point(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + recovery_points = get_recovery_point_api_instance(module) + old_spec = get_recovery_point(module, recovery_points, ext_id) + + etag_value = get_etag(data=old_spec) + if not etag_value: + return module.fail_json( + "Unable to fetch etag for Updating Expiry Date", **result + ) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(old_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating update recovery point Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + old_expiration_time = old_spec.to_dict().get("expiration_time") + new_expiration_time = module.params.get("expiration_time") + + if new_expiration_time is None: + result[ + "error" + ] = "Expiration time is required for updating recovery point and other fields can't be updated." + module.fail_json(msg="Expiration time is required", **result) + + if int(old_expiration_time.timestamp()) == int( + datetime.fromisoformat( + new_expiration_time + ).timestamp() # Converted time to epochs + ): + if not check_recovery_point_idempotency_without_expiration( + old_spec.to_dict(), update_spec.to_dict() + ): + result["skipped"] = True + module.exit_json( + msg="Update of other operations is not supported. Only updation of Expiration time is allowed.", + **result, + ) + else: + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + elif not check_recovery_point_idempotency_without_expiration( + old_spec.to_dict(), update_spec.to_dict() + ): + result[ + "warning" + ] = "Only Expiration time Updation is allowed. Can't update other fields." + + expirationTimeSpec = data_protection_sdk.ExpirationTimeSpec() + expirationTimeSpec.expiration_time = new_expiration_time + + resp = None + try: + resp = recovery_points.set_recovery_point_expiration_time( + extId=ext_id, body=expirationTimeSpec, if_match=etag_value + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating recovery point", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(task_status.to_dict()) + resp = get_recovery_point(module, recovery_points, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def delete_recovery_point(module, result): + recovery_points = get_recovery_point_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + old_spec = get_recovery_point(module, recovery_points, ext_id) + + etag = get_etag(data=old_spec) + if not etag: + return module.fail_json("Unable to fetch etag for Deletion", **result) + + kwargs = {"if_match": etag} + + try: + resp = recovery_points.delete_recovery_point_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting recovery point", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(task_status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ( + "state", + "absent", + ("ext_id",), + ), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_dataprotection_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + update_expiry_date_recovery_point(module, result) + else: + create_recovery_point(module, result) + else: + delete_recovery_point(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_roles.py b/plugins/modules/ntnx_roles.py index 55d6a2559..385afc4ac 100644 --- a/plugins/modules/ntnx_roles.py +++ b/plugins/modules/ntnx_roles.py @@ -180,8 +180,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.roles import Role # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.roles import Role # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_roles_info.py b/plugins/modules/ntnx_roles_info.py index 29761c8c6..53e5e79ab 100644 --- a/plugins/modules/ntnx_roles_info.py +++ b/plugins/modules/ntnx_roles_info.py @@ -145,9 +145,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.roles import Role # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.roles import Role # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_roles_info_v2.py b/plugins/modules/ntnx_roles_info_v2.py new file mode 100644 index 000000000..66d81d46c --- /dev/null +++ b/plugins/modules/ntnx_roles_info_v2.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_roles_info_v2 +short_description: Get roles info +version_added: 2.0.0 +description: + - Get roles info using roles external ID or list all roles + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - This module is used to get roles info + - It can be used to get roles info using roles external ID or list all roles + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: List roles + nutanix.ncp.ntnx_roles_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: result + +- name: Fetch role using uuid criteria + nutanix.ncp.ntnx_roles_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "13a6657d-fa96-49e3-7307-87e93a1fec3d" + register: result + +- name: List roles using filter + nutanix.ncp.ntnx_roles_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "displayName eq 'Display_Name_Test'" + register: result +""" + +RETURN = r""" +response: + description: + - Response for fetching roles info + - Returns roles info using roles external ID or list all roles + type: dict + returned: always + sample: + { + "accessible_clients": [ + "Networking", + "License Manager", + "AIOps", + ], + "accessible_entity_types": [ + "Remote Syslog Server", + "Disk", + "Rack", + "Rackable Unit", + "Host", + ], + "assigned_user_groups_count": 0, + "assigned_users_count": 1, + "client_name": "Prism", + "created_by": "", + "created_time": "2024-05-28T09:24:59.584696+00:00", + "description": "View-only admin of a Nutanix deployment. Has access to all infrastructure and platform features, but cannot make any changes.", + "display_name": "Prism Viewer", + "ext_id": "5171e1de-ac44-422b-8d40-dd0ff2bc2b5a", + "is_system_defined": true, + "last_updated_time": "2024-05-28T09:25:06.642563+00:00", + "links": null, + "operations": [ + "10245831-6013-48de-94c7-dd2e9e298b91", + "443c7424-86de-4ea4-8fa8-cc5fe894066b", + ], + "tenant_id": null + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.iam.api_client import get_role_api_instance # noqa: E402 +from ..module_utils.v4.iam.helpers import get_role # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_role_by_ext_id(module, roles, result): + ext_id = module.params.get("ext_id") + resp = get_role(module, roles, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_roles(module, roles, result): + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json(msg="Failed generating roles info Spec", **result) + + try: + resp = roles.list_roles(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching roles info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + roles = get_role_api_instance(module) + if module.params.get("ext_id"): + get_role_by_ext_id(module, roles, result) + else: + get_roles(module, roles, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_roles_v2.py b/plugins/modules/ntnx_roles_v2.py new file mode 100644 index 000000000..250555c96 --- /dev/null +++ b/plugins/modules/ntnx_roles_v2.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_roles_v2 +short_description: Create, update, and delete roles. +description: + - This module allows you to create, update, and delete roles. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + state: + description: + - State of the role. Whether to create, update, or delete. + - If C(state) is C(present) and C(ext_id) is not provided, create a new role. + - If C(state) is C(present) and C(ext_id) is provided, update the role. + - If C(state) is C(absent), it will delete the role with the given External ID. + type: str + choices: ['present', 'absent'] + ext_id: + description: + - Role External ID. + - Required for updating or deleting the role. + type: str + required: false + display_name: + description: + - The display name for the Role. + type: str + required: false + description: + description: + - Description of the Role. + type: str + required: false + client_name: + description: + - Client that created the entity. + type: str + required: false + operations: + description: + - List of Operations external IDs for the Role. + - During update operation, all given operations will override the existing operations. + type: list + elements: str + required: false + wait: + description: + - Wait for the task to complete. + type: bool + required: false + default: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Create roles with operations + nutanix.ncp.ntnx_roles_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + display_name: "Display_Name_Test" + description: test-ansible-role-1-desc + operations: + - "251d4a4f-244f-4c84-70a9-8c8f68f9dff0" + - "0194fbfd-a5d1-49f8-46f4-e4b01d0abe47" + +- name: Update all fields + nutanix.ncp.ntnx_roles_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + ext_id: "13a6657d-fa96-49e3-7307-87e93a1fec3d" + display_name: "Display_Name_Test_Updated" + description: test-ansible-role-3-desc-updated + operations: + - "0194fbfd-a5d1-49f8-46f4-e4b01d0abe47" + +- name: delete role + nutanix.ncp.ntnx_roles_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "13a6657d-fa96-49e3-7307-87e93a1fec3d" +""" + +RETURN = r""" +response: + description: + - Response of roles operations. + - Roles details if C(wait) is True + - Task details if C(wait) is False + returned: always + type: dict + sample: + { + "accessible_clients": [ + "FilesManagerService" + ], + "accessible_entity_types": [ + "Files" + ], + "assigned_user_groups_count": 0, + "assigned_users_count": 0, + "client_name": "IAM", + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-06-24T12:59:39.966377+00:00", + "description": "test-ansible-role-1-desc", + "display_name": "role_name_iUPLDmxsGfOr", + "ext_id": "13a6657d-fa96-49e3-7307-87e93a1fec3d", + "is_system_defined": false, + "last_updated_time": "2024-06-24T12:59:39.966377+00:00", + "links": null, + "operations": [ + "251d4a4f-244f-4c84-70a9-8c8f68f9dff0", + "0194fbfd-a5d1-49f8-46f4-e4b01d0abe47" + ], + "tenant_id": null + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: The created role's External ID + returned: always + type: str + sample: "13a6657d-fa96-49e3-7307-87e93a1fec3d" + +failed: + description: This indicates whether the task failed + returned: always + type: bool + sample: false + +""" + + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_etag, + get_role_api_instance, +) +from ..module_utils.v4.iam.helpers import get_role # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_iam_py_client as iam_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as iam_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + display_name=dict(type="str"), + description=dict(type="str"), + client_name=dict(type="str"), + operations=dict(type="list", elements="str"), + ) + return module_args + + +def create_role(module, result): + roles = get_role_api_instance(module) + sg = SpecGenerator(module) + default_spec = iam_sdk.Role() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create Roles Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = roles.create_role(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating role", + ) + + result["ext_id"] = resp.data.ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["changed"] = True + + +def check_roles_idempotency(old_spec, update_spec): + old_operations = old_spec.pop("operations") + new_operations = update_spec.pop("operations") + if old_spec != update_spec: + return False + if set(old_operations) != set(new_operations): + return False + return True + + +def update_role(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + roles = get_role_api_instance(module) + current_spec = get_role(module, roles, ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating roles update spec", **result) + + # check for idempotency + if check_roles_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = roles.update_role_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating role", + ) + + resp = get_role(module, roles, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def delete_role(module, result): + roles = get_role_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_role(module, roles, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("unable to fetch etag for deleting role", **result) + + kwargs = {"if_match": etag} + + try: + resp = roles.delete_role_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting role", + ) + + result["changed"] = True + result["response"] = resp + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("display_name", "ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_role(module, result) + else: + create_role(module, result) + else: + delete_role(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_route_tables_info_v2.py b/plugins/modules/ntnx_route_tables_info_v2.py new file mode 100644 index 000000000..3c3e39b7b --- /dev/null +++ b/plugins/modules/ntnx_route_tables_info_v2.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_route_tables_info_v2 +short_description: Route tables info module +version_added: 2.0.0 +description: + - This module fetches route tables information + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - Route table external ID + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: List all route tables + nutanix.ncp.ntnx_route_tables_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: route_tables + +- name: Fetch route table by external_id + nutanix.ncp.ntnx_route_tables_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "251d4a4f-244f-4c84-70a9-8c8f68f9dff0" + register: route_table + +- name: List all route tables with filter + nutanix.ncp.ntnx_route_tables_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: vpcReference eq '251d4a4f-244f-4c84-70a9-8c8f68f9dff0' + register: route_tables_filter +""" +RETURN = r""" +response: + description: + - Response for fetching route table info + - Returns route table or list of multiple route tables details. + type: dict + returned: always + sample: + { + "extId": "7f9a76a3-922b-4aba-8d79-e7eb5cdaf201", + "metadata": { + "ownerReferenceId": "00000000-0000-0000-0000-000000000000", + "ownerUserName": "admin" + }, + "vpcReference": "66a926de-c188-4121-8456-0b97cdf0f807" + } + +ext_id: + description: route table external ID + type: str + returned: always + sample: "63acaca5-ed45-415e-bfbd-19c9d9fd3dfd" + +failed: + description: Indicates if the request failed + type: bool + returned: always + +error: + description: Error message + type: str + returned: always + +changed: + description: Indicates if any changes were made during the operation + type: bool + returned: always + sample: False +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_route_tables_api_instance, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_route_table_using_ext_id(module, route_table_api_instance, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + try: + resp = route_table_api_instance.get_route_table_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching route table info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_route_tables(module, route_table_api_instance, result): + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params, extra_params=["expand"]) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating route table info Spec", **result) + + try: + resp = route_table_api_instance.list_route_tables(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching route tables info", + ) + if (resp is None) or (resp.to_dict().get("data") is None): + result["response"] = [] + else: + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + route_table_api_instance = get_route_tables_api_instance(module) + if module.params.get("ext_id"): + get_route_table_using_ext_id(module, route_table_api_instance, result) + else: + get_route_tables(module, route_table_api_instance, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_routes_info_v2.py b/plugins/modules/ntnx_routes_info_v2.py new file mode 100644 index 000000000..21c43c03b --- /dev/null +++ b/plugins/modules/ntnx_routes_info_v2.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_routes_info_v2 +short_description: Routes info module +version_added: 2.0.0 +description: + - This module fetches routes information + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - Route external ID + type: str + route_table_ext_id: + description: + - Route table external ID + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: List all routes + nutanix.ncp.ntnx_routes_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + route_table_ext_id: "13a6657d-fa96-49e3-7307-87e93a1fec3d" + register: result + +- name: Fetch route by external ID + nutanix.ncp.ntnx_routes_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 5d4f9c3b-7ea1-4a92-bfae-9d3e7c2a1b45 + route_table_ext_id: 82b71a8c-2d9f-4f7c-ae65-2b4c2c3e3fbd + register: result + ignore_errors: true + +- name: List all routes with filter + nutanix.ncp.ntnx_routes_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + route_table_ext_id: "13a6657d-fa96-49e3-7307-87e93a1fec3d" + filter: name eq 'route_name' + register: result + +- name: List all routes with limit + nutanix.ncp.ntnx_routes_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + route_table_ext_id: "13a6657d-fa96-49e3-7307-87e93a1fec3d" + limit: 1 + register: result +""" +RETURN = r""" +response: + description: + - Response for fetching routes info + - Returns route or list of multiple routes details. + type: dict + returned: always + sample: + { + "extId": "ed3cf052-a96a-4222-8d60-143a33c77e9f", + "isActive": true, + "priority": 32768, + "metadata": { "ownerReferenceId": "00000000-0000-0000-0000-000000000000" }, + "name": "route_test", + "destination": + { "ipv4": { "ip": { "value": "10.0.0.1" }, "prefixLength": 32 } }, + "nexthop": + { + "nexthopName": "integration_test_Ext-Nat1", + "nexthopType": "EXTERNAL_SUBNET", + "nexthopReference": "5e98d574-c54c-4775-9f7a-8ebb2bc77d2c", + "nexthopIpAddress": + { "ipv4": { "value": "10.44.3.193", "prefixLength": 32 } }, + }, + "routeTableReference": "7f9a76a3-922b-4aba-8d79-e7eb5cdaf201", + "vpcReference": "66a926de-c188-4121-8456-0b97cdf0f807", + "routeType": "STATIC", + } + +ext_id: + description: route external ID + type: str + returned: always + sample: "63acaca5-ed45-415e-bfbd-19c9d9fd3dfd" + +route_table_ext_id: + description: route table external ID + type: str + returned: always + sample: "7f9a76b3-922b-4aba-9d79-e7eb5cdab236" + +failed: + description: Indicates if the request failed + type: bool + returned: always + +error: + description: Error message + type: str + returned: always + +changed: + description: Indicates if any changes were made during the operation + type: bool + returned: always + sample: False +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.network.api_client import get_routes_api_instance # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + route_table_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_route(module, route_api_instance, result): + ext_id = module.params.get("ext_id") + route_table_ext_id = module.params.get("route_table_ext_id") + result["ext_id"] = ext_id + result["route_table_ext_id"] = route_table_ext_id + try: + resp = route_api_instance.get_route_for_route_table_by_id( + extId=ext_id, routeTableExtId=route_table_ext_id + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching route info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_routes(module, route_api_instance, result): + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params, extra_params=["expand"]) + route_table_ext_id = module.params.get("route_table_ext_id") + result["route_table_ext_id"] = route_table_ext_id + if err: + result["error"] = err + module.fail_json(msg="Failed generating route info Spec", **result) + + try: + resp = route_api_instance.list_routes_by_route_table_id( + routeTableExtId=route_table_ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching routes info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + route_api_instance = get_routes_api_instance(module) + if module.params.get("ext_id") and module.params.get("route_table_ext_id"): + get_route(module, route_api_instance, result) + else: + get_routes(module, route_api_instance, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_routes_v2.py b/plugins/modules/ntnx_routes_v2.py new file mode 100644 index 000000000..e93445fbe --- /dev/null +++ b/plugins/modules/ntnx_routes_v2.py @@ -0,0 +1,593 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: ntnx_routes_v2 +short_description: Module to create, update, and delete routes in route table in VPC +version_added: 2.0.0 +description: + - Create, Update, Delete routes in route table in VPC + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - State of the route. + - if C(state) is C(present) and route exteral ID is given, then route will be updated. + - if C(state) is C(present) and route external ID is absent, then route will be created. + - if C(state) is C(absent) and route external ID is given, then route will be deleted. + choices: ["present", "absent"] + type: str + ext_id: + description: + - External ID of the route. + type: str + vpc_reference: + description: + - Reference to the VPC where the route table is located. + type: str + metadata: + description: + - Metadata for the route. + required: false + type: dict + suboptions: + owner_reference_id: + description: + - Reference ID of the owner. + required: false + type: str + project_reference_id: + description: + - Reference ID of the project. + required: false + type: str + category_ids: + description: + - List of category IDs. + required: false + type: list + elements: str + name: + description: + - Name of the route. + required: false + type: str + description: + description: + - Description of the route. + required: false + type: str + destination: + description: + - Destination IP subnet for the route. + required: false + type: dict + suboptions: + ipv4: + description: + - IPv4 destination IP subnet. + required: false + type: dict + suboptions: + ip: + description: + - IPv4 address. + type: dict + required: true + suboptions: + value: + description: + - IPv4 address. + required: true + type: str + prefix_length: + description: + - Prefix length of the subnet. + type: int + prefix_length: + description: + - Prefix length of the subnet. + type: int + required: true + ipv6: + description: + - IPv6 destination IP subnet. + required: false + type: dict + suboptions: + ip: + description: + - IPv6 address. + type: dict + required: true + suboptions: + value: + description: + - IPv4 address. + required: true + type: str + prefix_length: + description: + - Prefix length of the subnet. + type: int + prefix_length: + description: + - Prefix length of the subnet. + type: int + required: true + route_table_ext_id: + description: + - External ID of the route table. + type: str + required: true + external_routing_domain_reference: + description: + - Reference to the external routing domain. + required: false + type: str + route_type: + description: + - Type of the route. + type: str + choices: + - LOCAL + - STATIC + wait: + description: + - Wait for the task to complete + type: bool + default: true + nexthop: + description: + - Nexthop information for the route. + type: dict + suboptions: + nexthop_type: + description: + - Type of the nexthop. + type: str + required: true + choices: + - VPN_CONNECTION + - EXTERNAL_SUBNET + nexthop_reference: + description: + - Reference to the nexthop. + type: str + nexthop_ip_address: + description: + - IP address of the nexthop. + type: dict + suboptions: + ipv4: + description: + - IPv4 address. + type: dict + suboptions: + value: + description: + - IPv4 address. + type: str + required: true + prefix_length: + description: + - Prefix length of the subnet. + type: int + ipv6: + description: + - IPv6 address. + type: dict + suboptions: + value: + description: + - IPv4 address. + type: str + required: true + prefix_length: + description: + - Prefix length of the subnet. + type: int +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Create route + nutanix.ncp.ntnx_routes_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + name: "route_test" + description: "Route for testing" + vpc_reference: "c9a4b37d-5f8d-4a2a-b639-2d8e1f5a0c67" + route_table_ext_id: "7f9a76a3-922b-4aba-8d79-e7eb5cdaf201" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.1" + prefix_length: 32 + nexthop: + nexthop_type: "EXTERNAL_SUBNET" + nexthop_reference: "5e98d574-c54c-4775-9f7a-8ebb2bc77d2c" + metadata: + owner_reference_id: "a1f7c8d4-3b9e-4891-b7ae-6c2d4e5f9b21" + project_reference_id: "d7f1b9c3-6a5e-40d2-a1c4-e3f8b6a4d9f0" + register: result + +- name: Update route + nutanix.ncp.ntnx_routes_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + name: "route_test" + description: "Route for testing updated" + vpc_reference: "c9a4b37d-5f8d-4a2a-b639-2d8e1f5a0c67" + route_table_ext_id: "7f9a76a3-922b-4aba-8d79-e7eb5cdaf201" + ext_id: "ed3cf052-a96a-4222-8d60-143a33c77e9f" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.2" + prefix_length: 32 + register: result + +- name: Delete route + nutanix.ncp.ntnx_routes_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "ed3cf052-a96a-4222-8d60-143a33c77e9f" + route_table_ext_id: "7f9a76a3-922b-4aba-8d79-e7eb5cdaf201" + register: result +""" + +RETURN = r""" +response: + description: + - Response for creating, updating, or deleting routes. + - Route details if C(wait) is true and the operation is create or update. + - Task details if C(wait) is false or the operation is delete. + type: dict + returned: always + sample: + { + "extId": "ed3cf052-a96a-4222-8d60-143a33c77e9f", + "isActive": true, + "priority": 32768, + "metadata": + { "ownerReferenceId": "00000000-0000-0000-0000-000000000000" }, + "name": "route_test", + "destination": + { "ipv4": { "ip": { "value": "10.0.0.1" }, "prefixLength": 32 } }, + "nexthop": + { + "nexthopName": "integration_test_Ext-Nat1", + "nexthopType": "EXTERNAL_SUBNET", + "nexthopReference": "5e98d574-c54c-4775-9f7a-8ebb2bc77d2c", + "nexthopIpAddress": + { "ipv4": { "value": "10.44.3.193", "prefixLength": 32 } }, + }, + "routeTableReference": "7f9a76a3-922b-4aba-8d79-e7eb5cdaf201", + "vpcReference": "66a926de-c188-4121-8456-0b97cdf0f807", + "routeType": "STATIC", + } + +task_ext_id: + description: The task ext_id of the operation + type: str + returned: always + sample: "ZXJnb24=:3a2267ad-5e17-4813-b474-b5c7ea0aa848" + +ext_id: + description: The external ID of the route + type: str + returned: always + +route_table_ext_id: + description: The external ID of the route table + type: str + returned: always + +changed: + description: Indicates if any changes were made during the operation + type: bool + returned: always + +skipped: + description: Indicates if the operation was skipped + type: bool + returned: always + +failed: + description: Indicates if the operation failed + type: bool + returned: always + +error: + description: Error message if any + type: str + returned: always +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_etag, + get_routes_api_instance, +) +from ..module_utils.v4.network.helpers import get_route # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_networking_py_client as net_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as net_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + ip_address_sub_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int"), + ) + + ipv4_subnet_spec = dict( + ip=dict( + type="dict", + options=ip_address_sub_spec, + obj=net_sdk.IPv4Address, + required=True, + ), + prefix_length=dict(type="int", required=True), + ) + + ipv6_subnet_spec = dict( + ip=dict( + type="dict", + options=ip_address_sub_spec, + obj=net_sdk.IPv6Address, + required=True, + ), + prefix_length=dict(type="int", required=True), + ) + + ip_address_spec = dict( + ipv4=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv4Address), + ipv6=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv6Address), + ) + + ip_subnet_spec = dict( + ipv4=dict(type="dict", options=ipv4_subnet_spec, obj=net_sdk.IPv4Subnet), + ipv6=dict(type="dict", options=ipv6_subnet_spec, obj=net_sdk.IPv6Subnet), + ) + + metadata_spec = dict( + owner_reference_id=dict(type="str"), + project_reference_id=dict(type="str"), + category_ids=dict(type="list", elements="str"), + ) + + nexthop_spec = dict( + nexthop_type=dict( + type="str", + choices=[ + "VPN_CONNECTION", + "EXTERNAL_SUBNET", + ], + obj=net_sdk.NexthopType, + required=True, + ), + nexthop_reference=dict(type="str"), + nexthop_ip_address=dict( + type="dict", options=ip_address_spec, obj=net_sdk.IPAddress + ), + ) + + module_args = dict( + ext_id=dict(type="str"), + vpc_reference=dict(type="str"), + metadata=dict(type="dict", options=metadata_spec, obj=net_sdk.Metadata), + name=dict(type="str"), + description=dict(type="str"), + destination=dict(type="dict", options=ip_subnet_spec, obj=net_sdk.IPSubnet), + nexthop=dict(type="dict", options=nexthop_spec, obj=net_sdk.Nexthop), + route_table_ext_id=dict(type="str", required=True), + external_routing_domain_reference=dict(type="str"), + route_type=dict( + type="str", + choices=["LOCAL", "STATIC"], + obj=net_sdk.RouteType, + ), + ) + + return module_args + + +def create_route_table(module, route_api_instance, result): + sg = SpecGenerator(module) + default_spec = net_sdk.Route() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating create route spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + route_table_ext_id = module.params.get("route_table_ext_id") + try: + resp = route_api_instance.create_route_for_route_table( + routeTableExtId=route_table_ext_id, body=spec + ) + except Exception as e: + raise_api_exception( + module=module, exception=e, msg="API exception while creating route" + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.ROUTE + ) + if ext_id: + result["ext_id"] = ext_id + route = get_route(module, route_api_instance, ext_id, route_table_ext_id) + result["response"] = strip_internal_attributes(route.to_dict()) + result["changed"] = True + + +def check_idempotency(current_spec, update_spec): + return current_spec == update_spec + + +def update_route_table(module, route_api_instance, result): + ext_id = module.params.get("ext_id") + route_table_ext_id = module.params.get("route_table_ext_id") + result["ext_id"] = ext_id + result["route_table_ext_id"] = route_table_ext_id + current_spec = get_route(module, route_api_instance, ext_id, route_table_ext_id) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating update route spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + # check for idempotency + if check_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + resp = None + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("unable to fetch etag for updating route", **result) + + kwargs = {"if_match": etag} + try: + resp = route_api_instance.update_route_for_route_table_by_id( + routeTableExtId=route_table_ext_id, extId=ext_id, body=update_spec, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, exception=e, msg="API exception while updating route" + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + route = get_route(module, route_api_instance, ext_id, route_table_ext_id) + result["response"] = strip_internal_attributes(route.to_dict()) + result["changed"] = True + + +def delete_route_table(module, route_api_instance, result): + ext_id = module.params.get("ext_id") + route_table_ext_id = module.params.get("route_table_ext_id") + result["ext_id"] = ext_id + result["route_table_ext_id"] = route_table_ext_id + resp = None + try: + resp = route_api_instance.delete_route_for_route_table_by_id( + routeTableExtId=route_table_ext_id, extId=ext_id + ) + except Exception as e: + raise_api_exception( + module=module, exception=e, msg="API exception while deleting route" + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[["state", "absent", ["ext_id"]]], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_networking_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "skipped": False, + } + state = module.params.get("state") + route_api_instance = get_routes_api_instance(module) + if state == "present": + if not module.params.get("ext_id"): + create_route_table(module, route_api_instance, result) + else: + update_route_table(module, route_api_instance, result) + elif state == "absent": + delete_route_table(module, route_api_instance, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_saml_identity_providers_info_v2.py b/plugins/modules/ntnx_saml_identity_providers_info_v2.py new file mode 100644 index 000000000..5b683c0db --- /dev/null +++ b/plugins/modules/ntnx_saml_identity_providers_info_v2.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_saml_identity_providers_info_v2 +short_description: Fetch SAML identity providers from Nutanix PC +version_added: 2.0.0 +description: + - Fetch a single or list of multiple identity providers + - if external id is provided, it will return the identity provider info + - if external id is not provided, it will return multiple identity providers + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - Identity provider external ID + required: false + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: List identity providers + nutanix.ncp.ntnx_saml_identity_providers_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: result + +- name: List identity provider using name filter criteria + nutanix.ncp.ntnx_saml_identity_providers_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "name eq 'test_idp'" + register: result + +- name: List identity provider using ext_id + nutanix.ncp.ntnx_saml_identity_providers_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + register: result +""" +RETURN = r""" +response: + description: + - The response from the identity provider v4 API. + - it can be identity provider or multiple identity providers as per spec. + returned: always + type: dict + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-07-01T05:20:48.365380+00:00", + "custom_attributes": null, + "email_attribute": "email", + "entity_issuer": "https://000.000.000.000:9440/api/iam/authn", + "ext_id": "5fa927db-dcf1-5fee-ad3f-dc2ee9e80915", + "groups_attribute": "groups", + "groups_delim": ",", + "idp_metadata": { + "certificate": null, + "entity_id": "http://test.test.com/adfs/services/trust", + "error_url": null, + "login_url": "https://test.test.com/adfs/ls/", + "logout_url": "https://test.test.com/adfs/ls/IdpInitiatedSignOn.asp", + "name_id_policy_format": "emailAddress" + }, + "idp_metadata_url": null, + "idp_metadata_xml": null, + "is_signed_authn_req_enabled": false, + "last_updated_time": "2024-07-01T05:20:48.365380+00:00", + "links": null, + "name": "ansible-saml", + "tenant_id": "59d5de78-a964-5746-8c6e-677c4c7a79df", + "username_attribute": "DibnCPQWWtZtansible-agvm1" + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_identity_provider_api_instance, +) +from ..module_utils.v4.iam.helpers import get_identity_provider # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_identity_provider_by_ext_id(module, identity_providers, result): + ext_id = module.params.get("ext_id") + resp = get_identity_provider(module, identity_providers, ext_id=ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_identity_providers(module, identity_providers, result): + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json(msg="Failed generating identity providers info Spec", **result) + + try: + resp = identity_providers.list_saml_identity_providers(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching identity providers info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + identity_providers = get_identity_provider_api_instance(module) + if module.params.get("ext_id"): + get_identity_provider_by_ext_id(module, identity_providers, result) + else: + get_identity_providers(module, identity_providers, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_saml_identity_providers_v2.py b/plugins/modules/ntnx_saml_identity_providers_v2.py new file mode 100644 index 000000000..aafaee323 --- /dev/null +++ b/plugins/modules/ntnx_saml_identity_providers_v2.py @@ -0,0 +1,454 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_saml_identity_providers_v2 +short_description: Manage SAML identity providers in Nutanix PC +version_added: "2.0.0" +description: + - Create, Update, Delete SAML identity providers in Nutanix PC + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External ID of the Identity provider. + - Required for updating or deleting the Identity provider. + type: str + name: + description: + - Unique name of the IDP. + required: false + type: str + username_attribute: + description: + - SAML assertion Username attribute element. + required: false + type: str + email_attribute: + description: + - SAML assertion email attribute element. + required: false + type: str + groups_attribute: + description: + - SAML assertion groups attribute element. + required: false + type: str + groups_delim: + description: + - Delimiter is used to split the value of attribute into multiple groups. + required: false + type: str + idp_metadata_url: + description: + - Metadata url that provides IDP details. + required: false + type: str + idp_metadata_xml: + description: + - Base64 encoded metadata in XML format with IDP details. + required: false + type: str + idp_metadata: + description: + - Information of the IDP. + required: false + type: dict + suboptions: + entity_id: + description: + - Entity Identifier of Identity provider. + required: false + type: str + login_url: + description: + - Login URL of the Identity provider. + required: false + type: str + logout_url: + description: + - Logout URL of the Identity provider. + required: false + type: str + error_url: + description: + - Error URL of the Identity provider. + required: false + type: str + certificate: + description: + - Certificate for verification. + required: false + type: str + name_id_policy_format: + description: + - Name ID Policy format. + required: false + type: str + choices: ["EMAILADDRESS","UNSPECIFIED","X509SUBJECTNAME","WINDOWSDOMAINQUALIFIEDNAME","ENCRYPTED","ENTITY","KERBEROS","PERSISTENT","TRANSIENT",] + custom_attributes: + description: + - SAML assertions for list of custom attribute elements. + required: false + type: list + elements: str + entity_issuer: + description: + - It will be used as Issuer in SAML authnRequest. + required: false + type: str + is_signed_authn_req_enabled: + description: + - Flag indicating signing of SAML authnRequests. + required: false + type: bool + state: + description: + - Specify state + - If C(state) is set to C(present) then module will create Identity provider. + - if C(state) is set to C(present) and C(ext_id) is given, then module will update Identity provider. + - If C(state) is set to C(absent) with C(ext_id), then module will delete Identity provider. + choices: + - present + - absent +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + + +EXAMPLES = r""" +- name: Create identity provider + nutanix.ncp.ntnx_saml_identity_providers_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: "ansible-saml" + username_attribute: "test_name" + email_attribute: "email" + groups_attribute: "groups" + groups_delim: "," + idp_metadata_xml: "https://samltest.id/saml/idp" + is_signed_authn_req_enabled: true + state: present + register: result + ignore_errors: true +- name: Update identity provider + nutanix.ncp.ntnx_saml_identity_providers_v2: + ext_id: "59d5de78-a964-5746-8c6e-677c4c7a79df" + name: "ansible-saml" + username_attribute: "new_name2" + email_attribute: "email" + groups_attribute: "groups" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present +- name: Delete identity provider + nutanix.ncp.ntnx_saml_identity_providers_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "59d5de78-a964-5746-8c6e-677c4c7a79df" + register: result +""" + +RETURN = r""" +response: + description: + - Response for the Identity provider operations. + - Identity provider details if C(wait) is true. + - Task details if C(wait) is false. + returned: always + type: dict + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-07-01T05:20:48.365380+00:00", + "custom_attributes": null, + "email_attribute": "email", + "entity_issuer": "https://000.000.000.000:9440/api/iam/authn", + "ext_id": "5fa927db-dcf1-5fee-ad3f-dc2ee9e80915", + "groups_attribute": "groups", + "groups_delim": ",", + "idp_metadata": { + "certificate": null, + "entity_id": "http://test.test.com/adfs/services/trust", + "error_url": null, + "login_url": "https://test.test.com/adfs/ls/", + "logout_url": "https://test.test.com/adfs/ls/IdpInitiatedSignOn.asp", + "name_id_policy_format": "emailAddress" + }, + "idp_metadata_url": null, + "idp_metadata_xml": null, + "is_signed_authn_req_enabled": false, + "last_updated_time": "2024-07-01T05:20:48.365380+00:00", + "links": null, + "name": "ansible-saml", + "tenant_id": "59d5de78-a964-5746-8c6e-677c4c7a79df", + "username_attribute": "DibnCPQWWtZtansible-agvm1" + } +changed: + description: + - Whether the identity provider is changed or not. + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +ext_id: + description: + - External ID of the Identity provider. + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +skipped: + description: + - Whether the operation is skipped or not. + - Will be returned if operation is skipped. + type: bool + returned: always +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_etag, + get_identity_provider_api_instance, +) +from ..module_utils.v4.iam.helpers import get_identity_provider # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_iam_py_client as iam_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as iam_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + idp_metadata_spec = dict( + entity_id=dict(type="str"), + login_url=dict(type="str"), + logout_url=dict(type="str"), + error_url=dict(type="str"), + certificate=dict(type="str"), + name_id_policy_format=dict( + type="str", + choices=[ + "EMAILADDRESS", + "UNSPECIFIED", + "X509SUBJECTNAME", + "WINDOWSDOMAINQUALIFIEDNAME", + "ENCRYPTED", + "ENTITY", + "KERBEROS", + "PERSISTENT", + "TRANSIENT", + ], + ), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + username_attribute=dict(type="str"), + email_attribute=dict(type="str"), + groups_attribute=dict(type="str"), + groups_delim=dict(type="str"), + idp_metadata_url=dict(type="str"), + idp_metadata_xml=dict(type="str"), + idp_metadata=dict( + type="dict", options=idp_metadata_spec, obj=iam_sdk.IdpMetadata + ), + custom_attributes=dict(type="list", elements="str"), + entity_issuer=dict(type="str"), + is_signed_authn_req_enabled=dict(type="bool"), + ) + return module_args + + +def create_identity_provider(module, identity_providers, result): + sg = SpecGenerator(module) + default_spec = iam_sdk.SamlIdentityProvider() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating create identity providers spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = identity_providers.create_saml_identity_provider(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating identity provider", + ) + + result["ext_id"] = resp.data.ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["changed"] = True + + +def check_identity_providers_idempotency(old_spec, update_spec): + if old_spec != update_spec: + return False + + return True + + +def update_identity_provider(module, identity_providers, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_identity_provider(module, identity_providers, ext_id=ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating identity providers update spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + # check for idempotency + if check_identity_providers_idempotency( + current_spec.to_dict(), update_spec.to_dict() + ): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + resp = None + try: + resp = identity_providers.update_saml_identity_provider_by_id( + extId=ext_id, body=update_spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating identity provider", + ) + + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["changed"] = True + + +def delete_identity_provider(module, identity_providers, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_identity_provider(module, identity_providers, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "unable to fetch etag for deleting identity provider", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = identity_providers.delete_saml_identity_provider_by_id( + extId=ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting identity provider", + ) + result["changed"] = True + if resp is None: + result["msg"] = "Identity Provider with ext_id: {} deleted successfully".format( + ext_id + ) + else: + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + identity_providers = get_identity_provider_api_instance(module) + if state == "present": + if module.params.get("ext_id"): + update_identity_provider(module, identity_providers, result) + else: + create_identity_provider(module, identity_providers, result) + else: + delete_identity_provider(module, identity_providers, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_security_rules.py b/plugins/modules/ntnx_security_rules.py index a95be3ab6..3eec9a5ba 100644 --- a/plugins/modules/ntnx_security_rules.py +++ b/plugins/modules/ntnx_security_rules.py @@ -885,29 +885,29 @@ policy_mode: MONITOR target_group: categories: - apptype: Apache_Spark + apptype: Apache_Spark default_internal_policy: DENY_ALL inbounds: - categories: - AppFamily: - - Databases - - DevOps + AppFamily: + - Databases + - DevOps protocol: icmp: - code: 1 type: 1 - categories: - AppFamily: - - Databases - - DevOps + AppFamily: + - Databases + - DevOps protocol: tcp: - start_port: 22 end_port: 80 - categories: - AppFamily: - - Databases - - DevOps + AppFamily: + - Databases + - DevOps protocol: udp: - start_port: 82 @@ -918,8 +918,8 @@ prefix_length: 24 outbounds: - categories: - AppFamily: - - Databases + AppFamily: + - Databases register: result - name: update app security rule with outbound list ntnx_security_rules: @@ -931,9 +931,9 @@ - code: 1 type: 1 categories: - AppFamily: - - Databases - - DevOps + AppFamily: + - Databases + - DevOps register: result - name: update quarantine_rule by adding inbound and outbound list ntnx_security_rules: @@ -941,14 +941,14 @@ quarantine_rule: inbound: - categories: - AppFamily: - - Databases - - DevOps + AppFamily: + - Databases + - DevOps outbound: - categories: - AppFamily: - - Databases - - DevOps + AppFamily: + - Databases + - DevOps policy_mode: MONITOR allow_ipv6_traffic: true policy_hitlog:: true @@ -1099,9 +1099,10 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.security_rules import SecurityRule # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.prism.security_rules import SecurityRule # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.utils import check_for_idempotency # noqa: E402 def get_module_spec(): @@ -1259,7 +1260,7 @@ def update_security_rule(module, result): result["response"] = spec return - if utils.check_for_idempotency(spec, resp, state=state): + if check_for_idempotency(spec, resp, state=state): result["skipped"] = True module.exit_json(msg="Nothing to change") diff --git a/plugins/modules/ntnx_security_rules_info.py b/plugins/modules/ntnx_security_rules_info.py index 30ac5fec5..23bd2d02f 100644 --- a/plugins/modules/ntnx_security_rules_info.py +++ b/plugins/modules/ntnx_security_rules_info.py @@ -39,28 +39,28 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List security_rule using name filter criteria - ntnx_security_rules_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - name: "{{ security_rule.name }}" - kind: security_rule - register: result - - - name: List security_rule using length, offset, sort order and name sort attribute - ntnx_security_rules_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 1 - offset: 1 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result +- name: List security_rule using name filter criteria + ntnx_security_rules_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + name: "{{ security_rule.name }}" + kind: security_rule + register: result + +- name: List security_rule using length, offset, sort order and name sort attribute + ntnx_security_rules_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 1 + offset: 1 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result """ RETURN = r""" api_version: @@ -176,9 +176,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.security_rules import SecurityRule # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.security_rules import SecurityRule # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_security_rules_info_v2.py b/plugins/modules/ntnx_security_rules_info_v2.py new file mode 100644 index 000000000..e4434de3b --- /dev/null +++ b/plugins/modules/ntnx_security_rules_info_v2.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_security_rules_info_v2 +short_description: Fetch network security policies info from Nutanix PC. +version_added: 2.0.0 +description: + - Fetch list of multiple network security policies info. + - Fetch specific network security policy info by ext_id. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External id to fetch specific network security policy info. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: Get all policies + nutanix.ncp.ntnx_security_rules_info_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + register: result + +- name: Get particular policy + nutanix.ncp.ntnx_security_rules_info_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + ext_id: "569a018e-18ac-4813-b00f-2aa0d0005042" + register: result + +- name: Fetch certain policy using filters + nutanix.ncp.ntnx_security_rules_info_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + filter: "name eq 'rule1'" + register: result + +- name: Fetch only 5 policies using limit + nutanix.ncp.ntnx_security_rules_info_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + limit: 5 + register: result +""" +RETURN = r""" +response: + description: + - Network security policy info if ext_id is provided + - List of network security policies if ext_id is not provided + returned: always + type: dict + sample: { + "created_by": "00000000-0000-0000-0000-000000000000", + "creation_time": "2024-07-19T12:55:54.945000+00:00", + "description": "Ansible created rule updated", + "ext_id": "e8347a03-28a0-4eaa-9f43-64fd74cdee9e", + "is_hitlog_enabled": false, + "is_ipv6_traffic_allowed": false, + "is_system_defined": false, + "last_update_time": "2024-07-19T12:56:21.167000+00:00", + "links": null, + "name": "ansible-nsr-HqsWGHjQBsok2-updated", + "rules": [ + { + "description": "inbound1", + "ext_id": "81ae70d1-d010-4c70-999f-bbeba03ce64e", + "links": null, + "spec": { + "secured_group_action": "ALLOW", + "secured_group_category_references": [ + "569a018e-18ac-4813-b00f-2aa0d0005042" + ] + }, + "tenant_id": null, + "type": "INTRA_GROUP" + }, + { + "description": "inbound4_updated", + "ext_id": "fb6860d5-bb3e-45ba-871f-4870474b5430", + "links": null, + "spec": { + "dest_address_group_references": null, + "dest_allow_spec": null, + "dest_category_references": null, + "dest_subnet": null, + "icmp_services": null, + "is_all_protocol_allowed": null, + "network_function_chain_reference": null, + "secured_group_category_references": [ + "569a018e-18ac-4813-b00f-2aa0d0005042" + ], + "service_group_references": [ + "f77c1342-95e4-411e-9281-42ef2123d5b1" + ], + "src_address_group_references": null, + "src_allow_spec": null, + "src_category_references": null, + "src_subnet": { + "prefix_length": 24, + "value": "10.0.1.0" + }, + "tcp_services": null, + "udp_services": null + }, + "tenant_id": null, + "type": "APPLICATION" + }, + { + "description": "outbound1", + "ext_id": "fc9ad075-24a0-42a7-9977-2d9df462227d", + "links": null, + "spec": { + "dest_address_group_references": null, + "dest_allow_spec": null, + "dest_category_references": null, + "dest_subnet": { + "prefix_length": 24, + "value": "10.0.1.0" + }, + "icmp_services": null, + "is_all_protocol_allowed": true, + "network_function_chain_reference": null, + "secured_group_category_references": [ + "569a018e-18ac-4813-b00f-2aa0d0005042" + ], + "service_group_references": null, + "src_address_group_references": null, + "src_allow_spec": null, + "src_category_references": null, + "src_subnet": null, + "tcp_services": null, + "udp_services": null + }, + "tenant_id": null, + "type": "APPLICATION" + } + ], + "scope": "ALL_VLAN", + "secured_groups": null, + "state": "MONITOR", + "tenant_id": null, + "type": "APPLICATION", + "vpc_references": null + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +ext_id: + description: External id of the network security policy if fetched by ext_id + returned: always + type: str + sample: "e8347a03-28a0-4eaa-9f43-64fd74cdee9e" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.flow.api_client import ( # noqa: E402 + get_network_security_policy_api_instance, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_network_security_policy(module, result): + network_security_policies = get_network_security_policy_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = network_security_policies.get_network_security_policy_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching network security policy info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_network_security_policies(module, result): + network_security_policies = get_network_security_policy_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating network security policies info Spec", **result + ) + + try: + resp = network_security_policies.list_network_security_policies(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching network security policies info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_network_security_policy(module, result) + else: + get_network_security_policies(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_security_rules_v2.py b/plugins/modules/ntnx_security_rules_v2.py new file mode 100644 index 000000000..09bc98cec --- /dev/null +++ b/plugins/modules/ntnx_security_rules_v2.py @@ -0,0 +1,968 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_security_rules_v2 +short_description: Manage network security policies in Nutanix Prism Central +version_added: "2.0.0" +description: + - This module allows you to create, update, and delete network security policies in Nutanix Prism Central. + - During update, the rules provided under C(rules) will replace existing rules. + - This module uses PC v4 APIs based SDKs +options: + wait: + description: + - Wait for the task to complete. + type: bool + default: true + state: + description: + - The state of the network security policy. + - If C(present) and C(ext_id) is not provided, the network security policy will be created. + - If C(present) and C(ext_id) is provided, the network security policy will be updated. + - If C(absent) and C(ext_id) is provided, the network security policy will be deleted. + type: str + ext_id: + description: + - External ID of the Flow Network Security Policy. + type: str + name: + description: + - Name of the Flow Network Security Policy. + required: false + type: str + is_ipv6_traffic_allowed: + description: + - If Ipv6 Traffic needs to be allowed. + type: bool + is_hitlog_enabled: + description: + - If Hitlog needs to be enabled. + type: bool + description: + description: + - Description of policy + required: false + type: str + vpc_references: + description: + - A list of external ids for VPCs, used only when the scope of policy is a list of VPCs. + required: false + type: list + elements: str + type: + description: + - Defines the type of rules that can be used in a policy. + required: false + type: str + choices: + - QUARANTINE + - ISOLATION + - APPLICATION + policy_state: + description: + - Whether the policy is just to be saved, applied, monitored. + required: false + type: str + choices: + - SAVE + - MONITOR + - ENFORCE + scope: + description: + - Defines the scope of the policy. Currently, only ALL_VLAN and VPC_LIST are supported. + If scope is not provided, the default is set based on whether vpcReferences field is provided or not. + required: false + type: str + choices: + - ALL_VLAN + - ALL_VPC + - VPC_LIST + rules: + description: + - A list of rules that form a policy. For isolation policies, use isolation rules + - For application or quarantine policies, use application rules. + required: false + type: list + elements: dict + suboptions: + ext_id: + description: + - External ID of the rule. + required: false + type: str + description: + description: + - Description of rule + required: false + type: str + type: + description: + - The type for a rule - the value chosen here restricts which specification can be chosen. + type: str + choices: + - QUARANTINE + - TWO_ENV_ISOLATION + - APPLICATION + - INTRA_GROUP + - MULTI_ENV_ISOLATION + spec: + description: + - The specification of the rule. + type: dict + suboptions: + two_env_isolation_rule_spec: + description: + - The specification of the two environment isolation rule. + required: false + type: dict + suboptions: + first_isolation_group: + description: + - Denotes the first group of category uuids that will be used in an isolation policy. + type: list + elements: str + second_isolation_group: + description: + - Denotes the second group of category uuids that will be used in an isolation policy. + type: list + elements: str + application_rule_spec: + description: + - The specification of the application rule. + - This can be used for application or quarantine policies. + required: false + type: dict + suboptions: + secured_group_category_references: + description: + - A set of categories of vms which is protected by a Network Security Policy and defined as a list of categories. + type: list + elements: str + src_allow_spec: + description: + - A specification to how allow mode traffic should be applied, either ALL or NONE. + type: str + choices: + - ALL + - NONE + dest_allow_spec: + description: + - A specification to how allow mode traffic should be applied, either ALL or NONE. + type: str + choices: + - ALL + - NONE + src_category_references: + description: + - List of categories that define a set of network endpoints as inbound. + type: list + elements: str + dest_category_references: + description: + - List of categories that define a set of network endpoints as outbound. + type: list + elements: str + src_subnet: + description: + - The source subnet/IP specification. + type: dict + suboptions: + value: + description: + - The value of the source subnet. + type: str + prefix_length: + description: + - The prefix length of the source subnet. + type: int + dest_subnet: + description: + - The destination subnet/IP specification. + type: dict + suboptions: + value: + description: + - The value of the destination subnet. + type: str + prefix_length: + description: + - The prefix length of the destination subnet. + type: int + src_address_group_references: + description: + - A list of address group references. + type: list + elements: str + dest_address_group_references: + description: + - A list of address group references. + type: list + elements: str + service_group_references: + description: + - The list of service group references. + type: list + elements: str + is_all_protocol_allowed: + description: + - Denotes whether the rule allows traffic for all protocols. + - If set to true, the rule allows traffic for all protocols. + - If set to false or not specified, specifying at least one protocol service or service group is mandatory. + type: bool + tcp_services: + description: + - The list of TCP services. + type: list + elements: dict + suboptions: + start_port: + description: + - The start port of the TCP service. + type: int + end_port: + description: + - The end port of the TCP service. + type: int + udp_services: + description: + - The list of UDP services. + type: list + elements: dict + suboptions: + start_port: + description: + - The start port of the UDP service. + type: int + end_port: + description: + - The end port of the UDP service. + type: int + icmp_services: + description: + - Icmp Type Code List. + type: list + elements: dict + suboptions: + is_all_allowed: + description: + - Icmp service All Allowed. + type: bool + type: + description: + - Icmp service Type. Ignore this field if Type has to be ANY. + type: int + code: + description: + - Icmp service Code. Ignore this field if Code has to be ANY. + type: int + network_function_chain_reference: + description: + - A reference to the network function chain in the rule. + type: str + intra_entity_group_rule_spec: + description: + - The specification of the intra entity group rule. + required: false + type: dict + suboptions: + secured_group_category_references: + description: + - The list of secured group category references. + type: list + elements: str + secured_group_action: + description: + - A specification to whether traffic between intra secured group entities should be allowed or denied. + type: str + choices: + - ALLOW + - DENY + multi_env_isolation_rule_spec: + description: + - The specification of the multi environment isolation rule. + required: false + type: dict + suboptions: + spec: + description: + - The specification of the multi environment isolation rule. + type: dict + suboptions: + all_to_all_isolation_group: + description: + - The specification of the all to all isolation group. + type: dict + suboptions: + isolation_groups: + description: + - The list of isolation groups. + type: list + elements: dict + suboptions: + group_category_references: + description: + - The list of group category references. + type: list + elements: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - Pradeepsingh Bhati (@bhati-pradeep) + +""" + +EXAMPLES = r""" +- name: Create policy with all vlan scope and using rules on sources and destinations + nutanix.ncp.ntnx_security_rules_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + name: "rule1" + description: "Ansible created rule" + type: "APPLICATION" + policy_state: "ENFORCE" + scope: "ALL_VLAN" + is_hitlog_enabled: true + is_ipv6_traffic_allowed: true + rules: + - description: "Intra group rule" + type: "INTRA_GROUP" + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d032" + secured_group_action: "DENY" + - description: "Inbound rule for certain category withing service group rule" + type: "APPLICATION" + spec: + application_rule_spec: + secured_group_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d032" + src_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d035" + service_group_references: + - "f83d766b-f3e8-42f0-a32f-223983848d03d" + - description: "Inbound rule from certain address group with some protocol rules" + type: "APPLICATION" + spec: + application_rule_spec: + secured_group_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d032" + src_address_group_references: + - "2123123d-f3e8-42f0-a32f-24983848d03d" + tcp_services: + - start_port: 80 + end_port: 90 + - start_port: 100 + end_port: 110 + udp_services: + - start_port: 120 + end_port: 130 + - start_port: 140 + end_port: 150 + icmp_services: + - type: 3 + code: 1 + - type: 4 + code: 2 + - description: "Inbound rule from certain subnet with service group rule" + type: "APPLICATION" + spec: + application_rule_spec: + secured_group_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d032" + src_subnet: + value: "10.0.0.0" + prefix_length: 24 + service_group_references: + - "f83d766b-f3e8-ansk2-a32f-24983848d03d" + - description: "Outbound rule to certain category with all traffic allowed" + type: "APPLICATION" + spec: + application_rule_spec: + secured_group_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d032" + dest_category_references: + - "f83d766b-f3e8-42f0-a32f-kjasdjbjsdb" + is_all_protocol_allowed: true + - description: "Outbound rule to certain IP with all traffic allowed" + type: "APPLICATION" + spec: + application_rule_spec: + secured_group_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d03d" + dest_subnet: + value: "10.0.1.1" + prefix_length: 32 + is_all_protocol_allowed: true + - description: "Outbound rule to certain address group with all traffic allowed" + type: "APPLICATION" + spec: + application_rule_spec: + secured_group_category_references: + - "f83d766b-f3e8-42f0-a32f-24983848d032" + dest_address_group_references: + - "f83d766b-bwh2d-42f0-a32f-24983848d032" + is_all_protocol_allowed: true + register: result + +- name: Update the policy state + nutanix.ncp.ntnx_security_rules_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + ext_id: "f83d766b-f3e8-42f0-a32f-24983h3tr8d032" + policy_state: "ENFORCE" + register: result + +- name: Delete policy + nutanix.ncp.ntnx_security_rules_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + ext_id: "f83d766b-f3e8-42f0-a32f-24983h3tr8d032" + register: result +""" + + +RETURN = r""" +response: + description: + - If C(wait) is set to C(true), the response will contain the security policy details. + - If C(wait) is set to C(false), the response will contain the task details. + - For delete operation, the response will contain the task details. + returned: always + type: dict + sample: { + "created_by": "00000000-0000-0000-0000-000000000000", + "creation_time": "2024-07-19T12:55:54.945000+00:00", + "description": "Ansible created rule updated", + "ext_id": "e8347a03-28a0-4eaa-9f43-64fd74cdee9e", + "is_hitlog_enabled": false, + "is_ipv6_traffic_allowed": false, + "is_system_defined": false, + "last_update_time": "2024-07-19T12:56:21.167000+00:00", + "links": null, + "name": "ansible-nsr-HqsWGHjQBsok2-updated", + "rules": [ + { + "description": "inbound1", + "ext_id": "81ae70d1-d010-4c70-999f-bbeba03ce64e", + "links": null, + "spec": { + "secured_group_action": "ALLOW", + "secured_group_category_references": [ + "569a018e-18ac-4813-b00f-2aa0d0005042" + ] + }, + "tenant_id": null, + "type": "INTRA_GROUP" + }, + { + "description": "inbound4_updated", + "ext_id": "fb6860d5-bb3e-45ba-871f-4870474b5430", + "links": null, + "spec": { + "dest_address_group_references": null, + "dest_allow_spec": null, + "dest_category_references": null, + "dest_subnet": null, + "icmp_services": null, + "is_all_protocol_allowed": null, + "network_function_chain_reference": null, + "secured_group_category_references": [ + "569a018e-18ac-4813-b00f-2aa0d0005042" + ], + "service_group_references": [ + "f77c1342-95e4-411e-9281-42ef2123d5b1" + ], + "src_address_group_references": null, + "src_allow_spec": null, + "src_category_references": null, + "src_subnet": { + "prefix_length": 24, + "value": "10.0.1.0" + }, + "tcp_services": null, + "udp_services": null + }, + "tenant_id": null, + "type": "APPLICATION" + }, + { + "description": "outbound1", + "ext_id": "fc9ad075-24a0-42a7-9977-2d9df462227d", + "links": null, + "spec": { + "dest_address_group_references": null, + "dest_allow_spec": null, + "dest_category_references": null, + "dest_subnet": { + "prefix_length": 24, + "value": "10.0.1.0" + }, + "icmp_services": null, + "is_all_protocol_allowed": true, + "network_function_chain_reference": null, + "secured_group_category_references": [ + "569a018e-18ac-4813-b00f-2aa0d0005042" + ], + "service_group_references": null, + "src_address_group_references": null, + "src_allow_spec": null, + "src_category_references": null, + "src_subnet": null, + "tcp_services": null, + "udp_services": null + }, + "tenant_id": null, + "type": "APPLICATION" + } + ], + "scope": "ALL_VLAN", + "secured_groups": null, + "state": "MONITOR", + "tenant_id": null, + "type": "APPLICATION", + "vpc_references": null + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +skipped: + description: Flag is module operation is skipped due to no state changes + returned: always + type: bool + sample: false +ext_id: + description: The created security policy ext_id + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.flow.api_client import ( # noqa: E402 + get_etag, + get_network_security_policy_api_instance, +) +from ..module_utils.v4.flow.helpers import get_network_security_policy # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_microseg_py_client as mic_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as mic_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + rule_spec_obj_map = { + "two_env_isolation_rule_spec": mic_sdk.TwoEnvIsolationRuleSpec, + "application_rule_spec": mic_sdk.ApplicationRuleSpec, + "intra_entity_group_rule_spec": mic_sdk.IntraEntityGroupRuleSpec, + "multi_env_isolation_rule_spec": mic_sdk.MultiEnvIsolationRuleSpec, + } + multi_env_isolation_rule_spec_obj_map = { + "all_to_all_isolation_group": mic_sdk.AllToAllIsolationGroup, + } + + icmp_service_spec = dict( + is_all_allowed=dict(type="bool"), + type=dict(type="int"), + code=dict(type="int"), + ) + + range_spec = dict( + start_port=dict(type="int"), + end_port=dict(type="int"), + ) + + ip_address_sub_spec = dict( + value=dict(type="str"), + prefix_length=dict(type="int"), + ) + + isolation_rule_spec = dict( + first_isolation_group=dict(type="list", elements="str"), + second_isolation_group=dict(type="list", elements="str"), + ) + application_rule_spec = dict( + secured_group_category_references=dict(type="list", elements="str"), + src_allow_spec=dict(type="str", choices=["ALL", "NONE"]), + dest_allow_spec=dict(type="str", choices=["ALL", "NONE"]), + src_category_references=dict(type="list", elements="str"), + dest_category_references=dict(type="list", elements="str"), + src_subnet=dict( + type="dict", options=ip_address_sub_spec, obj=mic_sdk.IPv4Address + ), + dest_subnet=dict( + type="dict", options=ip_address_sub_spec, obj=mic_sdk.IPv4Address + ), + src_address_group_references=dict(type="list", elements="str"), + dest_address_group_references=dict(type="list", elements="str"), + service_group_references=dict(type="list", elements="str"), + is_all_protocol_allowed=dict(type="bool"), + tcp_services=dict( + type="list", + elements="dict", + options=range_spec, + obj=mic_sdk.TcpPortRangeSpec, + ), + udp_services=dict( + type="list", + elements="dict", + options=range_spec, + obj=mic_sdk.UdpPortRangeSpec, + ), + icmp_services=dict( + type="list", + elements="dict", + options=icmp_service_spec, + obj=mic_sdk.IcmpTypeCodeSpec, + ), + network_function_chain_reference=dict(type="str"), + ) + entity_group_rule_spec = dict( + secured_group_category_references=dict(type="list", elements="str"), + secured_group_action=dict(type="str", choices=["ALLOW", "DENY"]), + ) + + isolation_groups_spec = dict( + group_category_references=dict(type="list", elements="str") + ) + + all_to_all_spec = dict( + isolation_groups=dict( + type="list", + elements="dict", + obj=mic_sdk.IsolationGroup, + options=isolation_groups_spec, + ) + ) + + all_to_all_isolation_group_spec = dict( + all_to_all_isolation_group=dict( + type="dict", + obj=mic_sdk.AllToAllIsolationGroup, + options=all_to_all_spec, + ) + ) + + multi_env_isolation_rule_spec = dict( + spec=dict( + type="dict", + options=all_to_all_isolation_group_spec, + obj=multi_env_isolation_rule_spec_obj_map, + ) + ) + + rule_spec = dict( + two_env_isolation_rule_spec=dict(type="dict", options=isolation_rule_spec), + application_rule_spec=dict(type="dict", options=application_rule_spec), + intra_entity_group_rule_spec=dict(type="dict", options=entity_group_rule_spec), + multi_env_isolation_rule_spec=dict( + type="dict", options=multi_env_isolation_rule_spec + ), + ) + + policy_rule = dict( + ext_id=dict(type="str"), + description=dict(type="str"), + type=dict( + type="str", + choices=[ + "QUARANTINE", + "TWO_ENV_ISOLATION", + "APPLICATION", + "INTRA_GROUP", + "MULTI_ENV_ISOLATION", + ], + ), + spec=dict( + type="dict", + options=rule_spec, + obj=rule_spec_obj_map, + mutually_exclusive=[ + ( + "two_env_isolation_rule_spec", + "application_rule_spec", + "intra_entity_group_rule_spec", + "multi_env_isolation_rule_spec", + ) + ], + ), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + type=dict(type="str", choices=["QUARANTINE", "ISOLATION", "APPLICATION"]), + policy_state=dict(type="str", choices=["SAVE", "MONITOR", "ENFORCE"]), + rules=dict( + type="list", + elements="dict", + options=policy_rule, + obj=mic_sdk.NetworkSecurityPolicyRule, + ), + scope=dict(type="str", choices=["ALL_VLAN", "ALL_VPC", "VPC_LIST"]), + vpc_references=dict(type="list", elements="str"), + is_ipv6_traffic_allowed=dict(type="bool"), + is_hitlog_enabled=dict(type="bool"), + ) + + return module_args + + +def create_network_security_policy(module, result): + network_security_policies = get_network_security_policy_api_instance(module) + + sg = SpecGenerator(module) + default_spec = mic_sdk.NetworkSecurityPolicy() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating create network security policies Spec", **result + ) + + # since we have kept rule's state as `policy_state` in module spec + # due to conflict with `state` of default module spec + # we need to set `state` in create spec from explicitly + if module.params.get("policy_state"): + spec.state = module.params.get("policy_state") + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = network_security_policies.create_network_security_policy(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating network security policy", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.SECURITY_POLICY + ) + if ext_id: + resp = get_network_security_policy( + module, network_security_policies, ext_id + ) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_network_security_policies_idempotency(old_spec, update_spec): + + # check if numbers of rules are same + if len(old_spec.get("rules", [])) != len(update_spec.get("rules", [])): + return False + + # remove external ID from older spec's each rule. + # since update will overlap all existing rules + for rule in old_spec.get("rules", []): + rule["ext_id"] = None + + # compare rules from old and new spec + old_rules = old_spec.pop("rules") + update_rules = update_spec.pop("rules") + for rule in update_rules: + if rule not in old_rules: + return False + + if old_spec != update_spec: + return False + return True + + +def update_network_security_policy(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + network_security_policies = get_network_security_policy_api_instance(module) + + current_spec = get_network_security_policy( + module, network_security_policies, ext_id=ext_id + ) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating network_security_policies update spec", **result + ) + + # due to conflict of spec.state with module state + if module.params.get("policy_state"): + update_spec.state = module.params.get("policy_state") + else: + update_spec.state = current_spec.state + + # check for idempotency + if check_network_security_policies_idempotency( + current_spec.to_dict(), update_spec.to_dict() + ): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + network_security_policies = get_network_security_policy_api_instance(module) + try: + resp = network_security_policies.update_network_security_policy_by_id( + extId=ext_id, body=update_spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating network_security_policy", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id, True) + resp = get_network_security_policy(module, network_security_policies, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_network_security_policy(module, result): + network_security_policies = get_network_security_policy_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_network_security_policy( + module, network_security_policies, ext_id=ext_id + ) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for deleting network security policy", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = network_security_policies.delete_network_security_policy_by_id( + extId=ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting network_security_policy", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_microseg_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + update_network_security_policy(module, result) + else: + create_network_security_policy(module, result) + else: + delete_network_security_policy(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_service_groups.py b/plugins/modules/ntnx_service_groups.py index 14d01d454..7459e27e2 100644 --- a/plugins/modules/ntnx_service_groups.py +++ b/plugins/modules/ntnx_service_groups.py @@ -83,7 +83,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false name: app_srvive_group desc: desc service_details: @@ -93,7 +93,7 @@ - "10-50" - "60-90" - "99" - any_icmp: True + any_icmp: true register: result - name: create service group with icmp @@ -101,7 +101,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false name: icmp_srvive_group desc: desc service_details: @@ -117,7 +117,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false service_group_uuid: "{{service_group_uuid}}" name: updated_name desc: updated_desc @@ -144,8 +144,8 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.service_groups import ServiceGroup # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.prism.service_groups import ServiceGroup # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_service_groups_info.py b/plugins/modules/ntnx_service_groups_info.py index 7fe0c0a6d..844563b4d 100644 --- a/plugins/modules/ntnx_service_groups_info.py +++ b/plugins/modules/ntnx_service_groups_info.py @@ -39,28 +39,28 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List service_group using name filter criteria - ntnx_service_groups_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - name: "{{ service_group.name }}" - kind: service_group - register: result - - - name: List service_group using length, offset, sort order and name sort attribute - ntnx_service_groups_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 1 - offset: 1 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result +- name: List service_group using name filter criteria + ntnx_service_groups_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + name: "{{ service_group.name }}" + kind: service_group + register: result + +- name: List service_group using length, offset, sort order and name sort attribute + ntnx_service_groups_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 1 + offset: 1 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result """ RETURN = r""" api_version: @@ -141,9 +141,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.service_groups import ServiceGroup # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.service_groups import ServiceGroup # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_service_groups_info_v2.py b/plugins/modules/ntnx_service_groups_info_v2.py new file mode 100644 index 000000000..135148eb1 --- /dev/null +++ b/plugins/modules/ntnx_service_groups_info_v2.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_service_groups_info_v2 +short_description: service_group info module +version_added: 2.0.0 +description: + - This module is used to get service groups info. + - It can be used to get all service groups or a particular service group using ext_id. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - Service group external id. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: test getting particular service_group using external id + nutanix.ncp.ntnx_service_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: '{{ result.response.0.ext_id }}' + +- name: test getting all service groups + nutanix.ncp.ntnx_service_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false +""" +RETURN = r""" +response: + description: + - Response for fetching service groups info. + - One service group info if External ID is provided. + - List of multiple service groups info if External ID is not provided. + returned: always + type: dict + sample: { + "created_by": null, + "description": "IPv6 Behind NAT44 CPEs", + "ext_id": "8f6351f3-ccf2-4e05-ac11-79daa1ad8158", + "icmp_services": null, + "is_system_defined": true, + "links": null, + "name": "6a44", + "policy_references": null, + "tcp_services": [ + { + "end_port": 1027, + "start_port": 1027 + } + ], + "tenant_id": null, + "udp_services": null + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false + +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.flow.api_client import ( # noqa: E402 + get_service_groups_api_instance, +) +from ..module_utils.v4.flow.helpers import get_service_group # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_service_group_using_ext_id(module, result): + ext_id = module.params.get("ext_id") + service_groups = get_service_groups_api_instance(module) + resp = get_service_group(module, service_groups, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_service_groups(module, result): + service_groups = get_service_groups_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating service groups info Spec", **result) + + try: + resp = service_groups.list_service_groups(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching service groups info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_service_group_using_ext_id(module, result) + else: + get_service_groups(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_service_groups_v2.py b/plugins/modules/ntnx_service_groups_v2.py new file mode 100644 index 000000000..32dd78296 --- /dev/null +++ b/plugins/modules/ntnx_service_groups_v2.py @@ -0,0 +1,480 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_service_groups_v2 +short_description: "Create, Update, Delete service groups" +version_added: 2.0.0 +description: "Create, Update, Delete service groups" +options: + state: + description: + - State of the service group, whether to create, update or delete. + - present -> Create service group if external ID is not provided, Update service group if external ID is provided. + - absent -> Delete service group with the given external ID. + - This module uses PC v4 APIs based SDKs + type: str + required: false + choices: ['present', 'absent'] + + ext_id: + description: + - Service group External ID. + - Required for updating or deleting service group. + type: str + + name: + description: + - Service group name. + type: str + + description: + description: + - Service group description. + type: str + + tcp_services: + description: + - List of TCP services. + type: list + elements: dict + suboptions: + start_port: + description: Starting port of the range. + type: int + end_port: + description: Ending port of the range. + type: int + + udp_services: + description: + - List of UDP services. + type: list + elements: dict + suboptions: + start_port: + description: Starting port of the range. + type: int + end_port: + description: Ending port of the range. + type: int + + icmp_services: + description: + - List of ICMP services. + type: list + elements: dict + suboptions: + is_all_allowed: + description: Indicates if all code types are allowed. + type: bool + type: + description: ICMP message type. + type: int + code: + description: ICMP message code. + type: int + wait: + description: + - Wait for the task to complete + type: bool + default: true + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: create tcp service group + nutanix.ncp.ntnx_service_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: tcp_service_group + description: desc + tcp_services: + - start_port: 10 + end_port: 50 + - start_port: 60 + end_port: 90 + - start_port: 98 + end_port: 98 + - start_port: 99 + end_port: 99 + +- name: create udp service group + nutanix.ncp.ntnx_service_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: udp_service_group + description: desc + udp_services: + - start_port: 10 + end_port: 50 + - start_port: 60 + end_port: 90 + - start_port: 98 + end_port: 98 + - start_port: 99 + end_port: 99 + +- name: create icmp with service group + nutanix.ncp.ntnx_service_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: icmp_srvive_group + description: desc + icmp_services: + - code: 10 + type: 1 + - code: 3 + type: 2 + +- name: Delete all created service groups + nutanix.ncp.ntnx_service_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "{{ item }}" +""" + +RETURN = r""" +response: + description: + - Response for service groups operations. + - Service group details if C(wait) is True. + - Task details if C(wait) is False. + returned: always + type: dict + sample: { + "created_by": "00000000-0000-0000-0000-000000000000", + "description": "desc", + "ext_id": "24107d61-2b08-470f-afda-cd0350182b3b", + "icmp_services": null, + "is_system_defined": false, + "links": null, + "name": "service_group_sekEkbklgvOJ_2", + "policy_references": null, + "tcp_services": null, + "tenant_id": null, + "udp_services": [ + { + "end_port": 50, + "start_port": 10 + }, + { + "end_port": 90, + "start_port": 60 + }, + { + "end_port": 98, + "start_port": 98 + }, + { + "end_port": 99, + "start_port": 99 + } + ] + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: The created service group ext_id + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + +failed: + description: This indicates whether the task failed + returned: always + type: bool + sample: false + +skipped: + description: This indicates whether the task was skipped due to idempotency checks + returned: always + type: bool + sample: false +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.flow.api_client import ( # noqa: E402 + get_etag, + get_service_groups_api_instance, +) +from ..module_utils.v4.flow.helpers import ( # noqa: E402 + get_service_group, + strip_service_group_extra_attributes, +) +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_microseg_py_client as mic_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as mic_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + icmp_service_spec = dict( + is_all_allowed=dict(type="bool"), + type=dict(type="int"), + code=dict(type="int"), + ) + + range_spec = dict( + start_port=dict(type="int"), + end_port=dict(type="int"), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + tcp_services=dict( + type="list", + elements="dict", + options=range_spec, + obj=mic_sdk.TcpPortRangeSpec, + ), + udp_services=dict( + type="list", + elements="dict", + options=range_spec, + obj=mic_sdk.UdpPortRangeSpec, + ), + icmp_services=dict( + type="list", + elements="dict", + options=icmp_service_spec, + obj=mic_sdk.IcmpTypeCodeSpec, + ), + ) + + return module_args + + +def create_service_group(module, result): + service_groups = get_service_groups_api_instance(module) + + sg = SpecGenerator(module) + default_spec = mic_sdk.ServiceGroup() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create service groups Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = service_groups.create_service_group(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating service group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.SERVICE_GROUP + ) + if ext_id: + resp = get_service_group(module, service_groups, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_service_groups_idempotency(old_spec, update_spec): + strip_internal_attributes(old_spec) + strip_internal_attributes(update_spec) + if old_spec != update_spec: + return False + + return True + + +def update_service_group(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + service_groups = get_service_groups_api_instance(module) + current_spec = get_service_group(module, service_groups, ext_id) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating service_groups update spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + # check for idempotency + if check_service_groups_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + strip_service_group_extra_attributes(update_spec) + + resp = None + service_groups = get_service_groups_api_instance(module) + try: + resp = service_groups.update_service_group_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating service_group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id, True) + resp = get_service_group(module, service_groups, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_service_group(module, result): + service_groups = get_service_groups_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_service_group(module, service_groups, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for deleting service group", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = service_groups.delete_service_group_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting service_group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_microseg_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_service_group(module, result) + else: + create_service_group(module, result) + else: + delete_service_group(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_static_routes.py b/plugins/modules/ntnx_static_routes.py index edc2e0504..0d423aedd 100644 --- a/plugins/modules/ntnx_static_routes.py +++ b/plugins/modules/ntnx_static_routes.py @@ -104,7 +104,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false vpc_uuid: "{{ vpc.uuid }}" static_routes: - destination: "0.0.0.0/0" @@ -130,7 +130,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false vpc_uuid: "{{ vpc.uuid }}" remove_all_routes: true register: result @@ -271,12 +271,12 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.static_routes import StaticRoute # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 from ..module_utils.utils import ( # noqa: E402 remove_param_with_none_value, strip_extra_attrs, ) +from ..module_utils.v3.prism.static_routes import StaticRoute # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_static_routes_info.py b/plugins/modules/ntnx_static_routes_info.py index 4e825e5d8..db917c488 100644 --- a/plugins/modules/ntnx_static_routes_info.py +++ b/plugins/modules/ntnx_static_routes_info.py @@ -167,7 +167,7 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.static_routes import StaticRoute # noqa: E402 +from ..module_utils.v3.prism.static_routes import StaticRoute # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_storage_containers_info_v2.py b/plugins/modules/ntnx_storage_containers_info_v2.py new file mode 100644 index 000000000..67effe500 --- /dev/null +++ b/plugins/modules/ntnx_storage_containers_info_v2.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_storage_containers_info_v2 +short_description: Retrieve information about Nutanix storage continer from PC +version_added: 2.0.0 +description: + - This module retrieves information about Nutanix storage continer from PC. + - Fetch particular storage continer info using external ID + - Fetch multiple storage continers info with/without using filters, limit, etc. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the storage continer. + - If not provided, multiple storage continer info will be fetched. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Alaa Bishtawi (@alaabishtawi) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: fetch storage continer info using external ID + nutanix.ncp.ntnx_storage_containers_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + register: result + +- name: fetch all storage continer info + nutanix.ncp.ntnx_storage_containers_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + register: result + +- name: fetch all storage continer info with filter + nutanix.ncp.ntnx_storage_containers_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + filter: "name eq 'storage_container_name'" + register: result +""" + +RETURN = r""" +response: + description: + - Response for fetching storage container info. + - Returns storage container info if ext_id is provided or list of multiple storage containers. + type: dict + returned: always + sample: + { + "affinity_host_ext_id": null, + "cache_deduplication": "OFF", + "cluster_ext_id": "0006197f-3d06-ce49-1fc3-ac1f6b6029c1", + "cluster_name": "auto-cluster-prod-f30accd2eec1", + "compression_delay_secs": 0, + "container_ext_id": "547c01c4-19c2-4293-8a9c-43441c18d0c7", + "erasure_code": "OFF", + "erasure_code_delay_secs": null, + "ext_id": null, + "has_higher_ec_fault_domain_preference": false, + "is_compression_enabled": false, + "is_encrypted": null, + "is_inline_ec_enabled": false, + "is_internal": false, + "is_marked_for_removal": false, + "is_nfs_whitelist_inherited": true, + "is_software_encryption_enabled": false, + "links": [ + { + "href": "https://000.000.000.000:9440/api/clustermgmt/v4.0.b2/config/storage-containers/547c01c4-19c2-4293-8a9c-43441c18d0c7", + "rel": "storage-container" + }, + { + "href": "https://000.000.000.000:9440/api/clustermgmt/v4.0.b2/stats/storage-containers/547c01c4-19c2-4293-8a9c-43441c18d0c7", + "rel": "storage-container-stats" + } + ], + "logical_advertised_capacity_bytes": null, + "logical_explicit_reserved_capacity_bytes": 0, + "logical_implicit_reserved_capacity_bytes": 0, + "max_capacity_bytes": 4365702025514, + "name": "SelfServiceContainer", + "nfs_whitelist_address": null, + "on_disk_dedup": "OFF", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "replication_factor": 1, + "storage_pool_ext_id": "487c142e-6c41-4b10-9585-4feac6bd3c68", + "tenant_id": null + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the storage container if given in input. + type: str + returned: always + sample: "00061de6-4a87-6b06-185b-ac1f6b6f97e2" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_storage_containers_api_instance, +) +from ..module_utils.v4.clusters_mgmt.helpers import get_storage_container # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_storage_container_by_ext_id(module, result): + ext_id = module.params.get("ext_id") + storage_containers = get_storage_containers_api_instance(module) + resp = get_storage_container(module, storage_containers, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_storage_containers(module, result): + storage_containers = get_storage_containers_api_instance(module) + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(module.params) + if err: + module.fail_json( + "Failed creating query parameters for fetching storage containers info" + ) + resp = None + try: + resp = storage_containers.list_storage_containers(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching storage containers info", + ) + + if getattr(resp, "data", None): + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + else: + result["response"] = [] + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + skip_info_args=False, + mutually_exclusive=[("ext_id", "filter")], + ) + + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_storage_container_by_ext_id(module, result) + else: + get_storage_containers(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_storage_containers_stats_v2.py b/plugins/modules/ntnx_storage_containers_stats_v2.py new file mode 100644 index 000000000..054fd721d --- /dev/null +++ b/plugins/modules/ntnx_storage_containers_stats_v2.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_storage_containers_stats_v2 +short_description: Retrieve stats about Nutanix storage continer from PC +version_added: 2.0.0 +description: + - Get Stats for a Storage Container + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the storage continer. + type: str + required: true + start_time: + description: + - The start time of the period for which stats should be reported. + - The value should be in extended ISO-8601 format. + - sample input time is 2024-07-31T12:41:56.955Z + type: str + required: true + end_time: + description: + - The end time of the period for which stats should be reported. + - The value should be in extended ISO-8601 format. + - sample input time is 2025-07-31T12:41:56.955Z + type: str + required: true + sampling_interval: + description: + - The sampling interval in seconds at which statistical data should be collected. + For example, if you want performance statistics every 30 seconds, then provide the value as 30. + type: int + required: false + stat_type: + description: + - The type of stats. + type: str + required: false + choices: + - SUM + - AVG + - MIN + - MAX + - COUNT + - LAST +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials +author: + - Alaa Bishtawi (@alaabishtawi) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Fetch storage container stats during time interval + nutanix.ncp.ntnx_storage_containers_stats_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + start_time: 2024-07-31T12:41:56.955Z + end_time: 2025-07-31T12:41:56.955Z + register: result + +- name: Fetch storage container stats with all attributes + nutanix.ncp.ntnx_storage_containers_stats_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + start_time: 2024-07-31T12:41:56.955Z + end_time: 2025-07-31T12:41:56.955Z + sampling_interval: 30 + stat_type: "SUM" + register: result +""" + +RETURN = r""" +response: + description: + - Response for fetching storage container stats. + type: dict + returned: always + sample: + { + "container_ext_id": "547c01c4-19c2-4293-8a9c-43441c18d0c7", + "controller_avg_io_latencyu_secs": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 947 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 1061 + } + ], + "controller_avg_read_io_latencyu_secs": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 797 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 832 + } + ], + "controller_avg_write_io_latencyu_secs": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 2175 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 3182 + } + ], + "controller_io_bandwidthk_bps": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 53450 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 51019 + } + ], + "controller_num_iops": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 1247 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 1203 + } + ], + "controller_num_read_iops": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 1110 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 1086 + } + ], + "controller_num_write_iops": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 136 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 117 + } + ], + "controller_read_io_bandwidthk_bps": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 52171 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 49956 + } + ], + "controller_read_io_ratio_ppm": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 890744 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 902745 + } + ], + "controller_write_io_bandwidthk_bps": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 1278 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 1062 + } + ], + "controller_write_io_ratio_ppm": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 109255 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 97254 + } + ], + "data_reduction_clone_saving_ratio_ppm": null, + "data_reduction_compression_saving_ratio_ppm": null, + "data_reduction_dedup_saving_ratio_ppm": null, + "data_reduction_erasure_coding_saving_ratio_ppm": null, + "data_reduction_overall_post_reduction_bytes": null, + "data_reduction_overall_pre_reduction_bytes": null, + "data_reduction_saved_bytes": null, + "data_reduction_saving_ratio_ppm": null, + "data_reduction_snapshot_saving_ratio_ppm": null, + "data_reduction_thin_provision_saving_ratio_ppm": null, + "data_reduction_total_saving_ratio_ppm": null, + "data_reduction_zero_write_savings_bytes": null, + "ext_id": null, + "health": null, + "links": null, + "storage_actual_physical_usage_bytes": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 139663605760 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 139659902976 + } + ], + "storage_capacity_bytes": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 4138110191211 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 4138122024555 + } + ], + "storage_free_bytes": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 3998446585451 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 3998462121579 + } + ], + "storage_replication_factor": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 1 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 1 + } + ], + "storage_reserved_capacity_bytes": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 0 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 0 + } + ], + "storage_tier_das_sata_usage_bytes": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 0 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 0 + } + ], + "storage_tier_ssd_usage_bytes": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 139663605760 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 139659902976 + } + ], + "storage_usage_bytes": [ + { + "timestamp": "2024-07-31T11:29:00+00:00", + "value": 139663605760 + }, + { + "timestamp": "2024-07-31T11:28:30+00:00", + "value": 139659902976 + } + ] + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the storage container . + type: str + returned: always + sample: "00061de6-4a87-6b06-185b-ac1f6b6f97e2" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_storage_containers_api_instance, +) +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str", required=True), + start_time=dict(type="str", required=True), + end_time=dict(type="str", required=True), + sampling_interval=dict(type="int"), + stat_type=dict( + type="str", + choices=[ + "SUM", + "AVG", + "MIN", + "MAX", + "COUNT", + "LAST", + ], + ), + ) + + return module_args + + +def get_storage_container_stats(module, result): + storage_container = get_storage_containers_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + start_time = module.params.get("start_time") + end_time = module.params.get("end_time") + sampling_interval = module.params.get("sampling_interval") + stat_type = module.params.get("stat_type") + resp = None + try: + resp = storage_container.get_storage_container_stats( + extId=ext_id, + _startTime=start_time, + _endTime=end_time, + _samplingInterval=sampling_interval, + _statType=stat_type, + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching storage containers stats", + ) + if getattr(resp, "data", None): + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + else: + module.fail_json(msg="Failed fetching storage container stats", **result) + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + skip_info_args=True, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + get_storage_container_stats(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_storage_containers_v2.py b/plugins/modules/ntnx_storage_containers_v2.py new file mode 100644 index 000000000..04e85f217 --- /dev/null +++ b/plugins/modules/ntnx_storage_containers_v2.py @@ -0,0 +1,567 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_storage_containers_v2 +short_description: Manage storage containers in Nutanix Prism Central +description: + - This module allows you to create, update, and delete storage containers in Nutanix Prism Central. + - This module uses PC v4 APIs based SDKs +version_added: 2.0.0 +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then module will create storage container. + - if C(state) is set to C(present) and C(ext_id) is given, then module will update storage container. + - If C(state) is set to C(absent) with C(ext_id), then module will delete storage container. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + owner_ext_id: + description: + - The external ID of the owner of the storage container. + required: false + type: str + ext_id: + description: + - The external ID of the storage container. + required: false + type: str + name: + description: + - The name of the storage container. + required: false + type: str + cluster_ext_id: + description: + - The external ID of the cluster where the storage container belongs. + required: false + type: str + logical_explicit_reserved_capacity_bytes: + description: + - The logical explicit reserved capacity of the storage container in bytes. + required: false + type: int + logical_advertised_capacity_bytes: + description: + - The logical advertised capacity of the storage container in bytes. + required: false + type: int + replication_factor: + description: + - The replication factor of the storage container. + required: false + type: int + nfs_whitelist_address: + description: + - The NFS whitelist addresses of the storage container. + required: false + type: list + elements: dict + suboptions: + ipv4: + description: + - The IPv4 address. + required: false + type: dict + suboptions: + value: + description: + - The value of the IPv4 address. + required: true + type: str + prefix_length: + description: + - The prefix length of the IPv4 address. + required: false + type: int + ipv6: + description: + - The IPv6 address. + required: false + type: dict + suboptions: + value: + description: + - The value of the IPv6 address. + required: true + type: str + prefix_length: + description: + - The prefix length of the IPv6 address. + required: false + type: int + fqdn: + description: + - The fully qualified domain name. + required: false + type: dict + suboptions: + value: + description: + - The value of the fully qualified domain name. + required: true + type: str + erasure_code: + description: + - The erasure code setting. + required: false + type: str + choices: ['NONE', 'OFF', 'ON'] + is_inline_ec_enabled: + description: + - Whether inline erasure coding is enabled. + required: false + type: bool + has_higher_ec_fault_domain_preference: + description: + - Whether the storage container has higher erasure coding fault domain preference. + required: false + type: bool + erasure_code_delay_secs: + description: + - The delay in seconds for erasure coding. + required: false + type: int + cache_deduplication: + description: + - The cache deduplication setting. + required: false + type: str + choices: ['NONE', 'OFF', 'ON'] + on_disk_dedup: + description: + - The on-disk deduplication setting. + required: false + type: str + choices: ['NONE', 'OFF', 'POST_PROCESS'] + is_compression_enabled: + description: + - Whether compression is enabled. + required: false + type: bool + compression_delay_secs: + description: + - The delay in seconds for compression. + required: false + type: int + is_internal: + description: + - Whether the storage container is internal. + required: false + type: bool + is_software_encryption_enabled: + description: + - Whether software encryption is enabled. + required: false + type: bool + affinity_host_ext_id: + description: + - The external ID of the affinity host. + required: false + type: str + ignore_small_files: + description: + - Whether to ignore small files during delete operation. + required: false + type: bool +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Alaa Bishtawi (@alaabishtawi) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Create storage container with minimal spec + nutanix.ncp.ntnx_storage_containers_v2: + nutanix_host: + nutanix_username: + nutanix_password: + name: storage_container_name + cluster_ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + +- name: Create storage container + nutanix.ncp.ntnx_storage_containers_v2: + nutanix_host: + nutanix_username: + nutanix_password: + name: storage_container_name + cluster_ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + owner_ext_id: 12345678-1234-1234-1324-123456789012 + logical_explicit_reserved_capacity_bytes: 20 + logical_implicit_reserved_capacity_bytes: 100 + logical_advertised_capacity_bytes: 1073741824000 + on_disk_dedup: "OFF" + is_compression_enabled: true + compression_delay_secs: 3600 + is_internal: false + is_software_encryption_enabled: false + is_encrypted: false + is_nfs_whitelist_inherited: false + is_inline_ec_enabled: false + has_higher_ec_fault_domain_preference: true + +- name: Update storage container + nutanix.ncp.ntnx_storage_containers_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + name: storage_container_name + cluster_ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + owner_ext_id: 12345678-1234-1234-1324-123456789012 + logical_explicit_reserved_capacity_bytes: 20 + +- name: Delete storage container + nutanix.ncp.ntnx_storage_containers_v2: + nutanix_host: + nutanix_username: + nutanix_password: + ext_id: 00061de6-4a87-6b06-185b-ac1f6b6f97e2 + state: absent +""" + +RETURN = r""" +response: + description: + - Response for the storage container operation. + - Storage container details if C(wait) is true. + - Task details if C(wait) is false. + type: dict + returned: always + sample: + { + "affinity_host_ext_id": null, + "cache_deduplication": "OFF", + "cluster_ext_id": "0006197f-3d06-ce49-1fc3-ac1f6b6029c1", + "cluster_name": "auto-cluster-prod-f30accd2eec1", + "compression_delay_secs": 0, + "container_ext_id": "57516342-7d8e-470f-91b8-ae310737ff8c", + "erasure_code": "OFF", + "erasure_code_delay_secs": null, + "ext_id": null, + "has_higher_ec_fault_domain_preference": false, + "is_compression_enabled": false, + "is_encrypted": null, + "is_inline_ec_enabled": false, + "is_internal": false, + "is_marked_for_removal": false, + "is_nfs_whitelist_inherited": true, + "is_software_encryption_enabled": false, + "links": null, + "logical_advertised_capacity_bytes": null, + "logical_explicit_reserved_capacity_bytes": 0, + "logical_implicit_reserved_capacity_bytes": 0, + "max_capacity_bytes": 4291605771923, + "name": "dIJXzxaJJkVFansible-ag1", + "nfs_whitelist_address": null, + "on_disk_dedup": "OFF", + "owner_ext_id": "00000000-0000-0000-0000-000000000000", + "replication_factor": null, + "storage_pool_ext_id": "487c142e-6c41-4b10-9585-4feac6bd3c68", + "tenant_id": null + } +task_ext_id: + description: + - Task external ID. + type: str + returned: always + sample: ZXJnb24=:d0fe946a-83b7-464d-bafb-4826282a75b1 +ext_id: + description: + - External ID of the storage container. + type: str + returned: always + sample: 4bc0962b-8fc1-4d04-b188-0a183c158e67 +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false +skipped: + description: This field indicates whether the task was skipped. For example during idempotency checks. + returned: always + type: bool + sample: true +""" + +import traceback # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.clusters_mgmt.api_client import ( # noqa: E402 + get_etag, + get_storage_containers_api_instance, +) +from ..module_utils.v4.clusters_mgmt.helpers import get_storage_container # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +try: + import ntnx_clustermgmt_py_client as clustermgmt_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as clustermgmt_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +def get_module_spec(): + ipv4_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + ipv6_spec = dict( + value=dict(type="str", required=True), + prefix_length=dict(type="int", required=False), + ) + fqdn_spec = dict( + value=dict(type="str", required=True), + ) + + nfs_whitelist_address_spec = dict( + ipv4=dict(type="dict", options=ipv4_spec, required=False), + ipv6=dict(type="dict", options=ipv6_spec, required=False), + fqdn=dict(type="dict", options=fqdn_spec, required=False), + ) + module_args = dict( + owner_ext_id=dict(type="str", required=False), + name=dict(type="str", required=False), + cluster_ext_id=dict(type="str", required=False), + logical_explicit_reserved_capacity_bytes=dict(type="int", required=False), + logical_advertised_capacity_bytes=dict(type="int", required=False), + replication_factor=dict(type="int", required=False), + nfs_whitelist_address=dict( + type="list", + elements="dict", + options=nfs_whitelist_address_spec, + required=False, + ), + erasure_code=dict(type="str", choices=["NONE", "OFF", "ON"]), + is_inline_ec_enabled=dict(type="bool", required=False), + has_higher_ec_fault_domain_preference=dict(type="bool", required=False), + erasure_code_delay_secs=dict(type="int", required=False), + cache_deduplication=dict(type="str", choices=["NONE", "OFF", "ON"]), + on_disk_dedup=dict(type="str", choices=["NONE", "OFF", "POST_PROCESS"]), + is_compression_enabled=dict(type="bool", required=False), + compression_delay_secs=dict(type="int", required=False), + is_internal=dict(type="bool", required=False), + is_software_encryption_enabled=dict(type="bool", required=False), + affinity_host_ext_id=dict(type="str", required=False), + ext_id=dict(type="str", required=False), + ignore_small_files=dict(type="bool", required=False), + ) + return module_args + + +def create_storage_container(module, storage_container_api, result): + sg = SpecGenerator(module) + default_spec = clustermgmt_sdk.StorageContainer() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating create storage container spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + resp = None + cluster_ext_id = module.params.get("cluster_ext_id") + if not cluster_ext_id: + return module.fail_json( + "cluster_ext_id is required in case of creating storage container", **result + ) + try: + resp = storage_container_api.create_storage_container( + body=spec, X_Cluster_Id=cluster_ext_id + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="API Exception while creating storage container", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.STORAGE_CONTAINER + ) + if ext_id: + resp = get_storage_container(module, storage_container_api, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_idempotency(old_spec, update_spec): + if old_spec == update_spec: + return True + return False + + +def update_storage_container(module, storage_container_api, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + current_spec = get_storage_container(module, storage_container_api, ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating update storage container spec", **result + ) + # Setting external ID to None in update spec as 'container_ext_id' is being used instead + if hasattr(current_spec, "ext_id"): + update_spec.ext_id = None + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + if check_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + resp = None + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "unable to fetch etag for updating storage container", **result + ) + + kwargs = {"if_match": etag} + try: + resp = storage_container_api.update_storage_container_by_id( + ext_id, update_spec, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="API Exception while updating storage container", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_storage_container(module, storage_container_api, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_storage_container(module, storage_container_api, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + ignore_small_files = module.params.get("ignore_small_files") + try: + resp = storage_container_api.delete_storage_container_by_id( + extId=ext_id, ignore_small_files=ignore_small_files + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="API Exception while deleting storage container", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ("state", "present", ("cluster_ext_id",)), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_clustermgmt_py_client"), + exception=SDK_IMP_ERROR, + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params.get("state") + storage_container_api = get_storage_containers_api_instance(module) + if state == "present": + if module.params.get("ext_id"): + update_storage_container(module, storage_container_api, result) + else: + create_storage_container(module, storage_container_api, result) + elif state == "absent": + delete_storage_container(module, storage_container_api, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_subnets.py b/plugins/modules/ntnx_subnets.py index 1be8363c0..63f7f3643 100644 --- a/plugins/modules/ntnx_subnets.py +++ b/plugins/modules/ntnx_subnets.py @@ -291,91 +291,91 @@ """ EXAMPLES = r""" - - name: VLAN subnet with IPAM IP pools and DHCP - ntnx_subnets: - state: present - nutanix_host: "{{ ip }}" - validate_certs: false - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: VLAN subnet with IPAM IP pools and DHCP - vlan_subnet: - vlan_id: "{{vlan_subnets_ids.3}}" - virtual_switch: - name: "{{ virtual_switch.name }}" - cluster: - name: "{{ cluster.name }}" - ipam: - network_ip: "{{ network_ip }}" - network_prefix: "{{ network_prefix }}" - gateway_ip: "{{ gateway_ip_address }}" - ip_pools: - - start_ip: "{{ start_address }}" - end_ip: "{{ end_address }}" - dhcp: - dns_servers: "{{ dns_servers }}" - domain_search: "{{ domain_search }}" - domain_name: "{{ domain_name }}" - tftp_server_name: "{{ tftp_server_name }}" - boot_file: "{{ boot_file }}" - dhcp_server_ip: "{{ dhcp_server_address }}" - - - name: External subnet with NAT - ntnx_subnets: - state: present - nutanix_host: "{{ ip }}" - validate_certs: false - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: " External subnet with NAT " - external_subnet: - vlan_id: "{{ vlan_id }}" - enable_nat: True - cluster: - name: "{{ cluster_name }}" - ipam: - network_ip: "{{ network_ip }}" - network_prefix: "{{ network_prefix }}" - gateway_ip: "{{ gateway_ip_address }}" - ip_pools: - - start_ip: "{{ dhcp.start_address }}" - end_ip: "{{ dhcp.end_address }}" - - start_ip: "{{ static.start_address }}" - end_ip: "{{ static.end_address }}" - - - name: Overlay Subnet with IP_pools and DHCP - ntnx_subnets: - state: present - nutanix_host: "{{ ip }}" - validate_certs: false - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: Overlay Subnet with IP_pools and DHCP - overlay_subnet: - vpc: - name: "{{ vpc_name }}" - ipam: - network_ip: "{{ network_ip }}" - network_prefix: "{{ network_prefix }}" - gateway_ip: "{{ gateway_ip_address }}" - ip_pools: - - start_ip: "{{ start_address }}" - end_ip: "{{ end_address }}" - dhcp: - dns_servers: "{{ dns_servers }}" - domain_search: "{{ domain_search }}" - domain_name: "{{ domain_name }}" - tftp_server_name: "{{ tftp_server_name }}" - boot_file: "{{ boot_file }}" - - - name: Delete subnets - ntnx_subnets: - state: absent - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - subnet_uuid: "{{ subnet_uuid }}" +- name: VLAN subnet with IPAM IP pools and DHCP + ntnx_subnets: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: VLAN subnet with IPAM IP pools and DHCP + vlan_subnet: + vlan_id: "{{vlan_subnets_ids.3}}" + virtual_switch: + name: "{{ virtual_switch.name }}" + cluster: + name: "{{ cluster.name }}" + ipam: + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" + ip_pools: + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" + dhcp: + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + domain_name: "{{ domain_name }}" + tftp_server_name: "{{ tftp_server_name }}" + boot_file: "{{ boot_file }}" + dhcp_server_ip: "{{ dhcp_server_address }}" + +- name: External subnet with NAT + ntnx_subnets: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: " External subnet with NAT " + external_subnet: + vlan_id: "{{ vlan_id }}" + enable_nat: true + cluster: + name: "{{ cluster_name }}" + ipam: + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" + ip_pools: + - start_ip: "{{ dhcp.start_address }}" + end_ip: "{{ dhcp.end_address }}" + - start_ip: "{{ static.start_address }}" + end_ip: "{{ static.end_address }}" + +- name: Overlay Subnet with IP_pools and DHCP + ntnx_subnets: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: Overlay Subnet with IP_pools and DHCP + overlay_subnet: + vpc: + name: "{{ vpc_name }}" + ipam: + network_ip: "{{ network_ip }}" + network_prefix: "{{ network_prefix }}" + gateway_ip: "{{ gateway_ip_address }}" + ip_pools: + - start_ip: "{{ start_address }}" + end_ip: "{{ end_address }}" + dhcp: + dns_servers: "{{ dns_servers }}" + domain_search: "{{ domain_search }}" + domain_name: "{{ domain_name }}" + tftp_server_name: "{{ tftp_server_name }}" + boot_file: "{{ boot_file }}" + +- name: Delete subnets + ntnx_subnets: + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + subnet_uuid: "{{ subnet_uuid }}" """ RETURN = r""" @@ -457,9 +457,9 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.subnets import Subnet # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.prism.subnets import Subnet # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_subnets_info.py b/plugins/modules/ntnx_subnets_info.py index 99e7fcab5..4002f4427 100644 --- a/plugins/modules/ntnx_subnets_info.py +++ b/plugins/modules/ntnx_subnets_info.py @@ -33,29 +33,28 @@ - Dina AbuHijleh (@dina-abuhijleh) """ EXAMPLES = r""" - - name: List subnets using type filter criteria - ntnx_subnets_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - subnet_type: "{{ subnet.type }}" - kind: subnet - register: result - - - name: List subnets using length, offset, sort order and sort attribute - ntnx_subnets_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 2 - offset: 1 - sort_order: "DESCENDING" - sort_attribute: "vlan_id" - register: result - +- name: List subnets using type filter criteria + ntnx_subnets_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + subnet_type: "{{ subnet.type }}" + kind: subnet + register: result + +- name: List subnets using length, offset, sort order and sort attribute + ntnx_subnets_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 2 + offset: 1 + sort_order: "DESCENDING" + sort_attribute: "vlan_id" + register: result """ RETURN = r""" api_version: @@ -184,9 +183,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.subnets import Subnet # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.subnets import Subnet # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_subnets_info_v2.py b/plugins/modules/ntnx_subnets_info_v2.py new file mode 100644 index 000000000..80e77ef97 --- /dev/null +++ b/plugins/modules/ntnx_subnets_info_v2.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_subnets_info_v2 +short_description: subnet info module +version_added: 2.0.0 +description: + - Fetch list of subnets or subnet info using subnet external ID + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - subnet external ID + type: str + expand: + description: + - Expand the response with additional information + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: List all subnets + nutanix.ncp.ntnx_subnets_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + register: result + +- name: List subnet using uuid criteria + nutanix.ncp.ntnx_subnets_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + ext_id: "{{ subnet_ext_id }}" + register: result + +- name: List subnets using filter criteria and filter for subnet name + nutanix.ncp.ntnx_subnets_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + filter: "name eq '{{ test_subnet_name }}'" + +- name: List subnet using filter criteria and filter for cluster uuid + nutanix.ncp.ntnx_subnets_info_v2: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + filter: "clusterReference eq '{{ cluster_uuid }}'" +""" +RETURN = r""" +response: + description: + - Response for fetching subnets + - Returns subnet details + type: dict + returned: always + sample: + { + "bridge_name": "br0", + "cluster_name": null, + "cluster_reference": "0006197f-3d06-ce49-1fc3-ac1f6b6029c1", + "description": null, + "dhcp_options": { + "boot_file_name": "pxelinux.0", + "domain_name": "nutanix.com", + "domain_name_servers": [ + { + "ipv4": { + "prefix_length": 32, + "value": "8.8.8.8" + }, + "ipv6": null + }, + { + "ipv4": { + "prefix_length": 32, + "value": "8.8.4.4" + }, + "ipv6": null + } + ], + "ntp_servers": null, + "search_domains": [ + "calm.nutanix.com", + "eng.nutanix.com" + ], + "tftp_server_name": "['10.5.0.10']" + }, + "dynamic_ip_addresses": null, + "ext_id": "9cc4abba-f27d-40db-90ba-c1592dccaedf", + "hypervisor_type": "acropolis", + "ip_config": [ + { + "ipv4": { + "default_gateway_ip": { + "prefix_length": 32, + "value": "192.168.0.254" + }, + "dhcp_server_address": { + "prefix_length": 32, + "value": "192.168.0.253" + }, + "ip_subnet": { + "ip": { + "prefix_length": 32, + "value": "192.168.0.0" + }, + "prefix_length": 24 + }, + "pool_list": [ + { + "end_ip": { + "prefix_length": 32, + "value": "192.168.0.30" + }, + "start_ip": { + "prefix_length": 32, + "value": "192.168.0.20" + } + } + ] + }, + "ipv6": null + } + ], + "ip_prefix": null, + "ip_usage": null, + "is_advanced_networking": null, + "is_external": false, + "is_nat_enabled": null, + "links": null, + "metadata": { + "category_ids": null, + "owner_reference_id": "00000000-0000-0000-0000-000000000000", + "owner_user_name": null, + "project_name": null, + "project_reference_id": null + }, + "migration_state": null, + "name": "KTTRilWFptZc_subnet_test_3", + "network_function_chain_reference": null, + "network_id": 226, + "reserved_ip_addresses": null, + "subnet_type": "VLAN", + "tenant_id": null, + "virtual_switch": null, + "virtual_switch_reference": "3a9be61d-f5c1-4fe0-8a5a-9832d747b4f8", + "vpc": null, + "vpc_reference": null + } + +failed: + description: Indicates if the request failed + type: bool + returned: always + +error: + description: Error message + type: str + returned: always + +changed: + description: Indicates if any changes were made during the operation + type: bool + returned: always + sample: False +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.network.api_client import get_subnet_api_instance # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + expand=dict(type="str"), + ) + + return module_args + + +def get_subnet(module, result): + subnets = get_subnet_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = subnets.get_subnet_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching subnet info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_subnets(module, result): + subnets = get_subnet_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params, extra_params=["expand"]) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating subnets info Spec", **result) + + try: + resp = subnets.list_subnets(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching subnets info", + ) + if not resp or not resp.to_dict().get("data"): + result["response"] = [] + else: + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_subnet(module, result) + else: + get_subnets(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_subnets_v2.py b/plugins/modules/ntnx_subnets_v2.py new file mode 100644 index 000000000..9ab6903ce --- /dev/null +++ b/plugins/modules/ntnx_subnets_v2.py @@ -0,0 +1,876 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_subnets_v2 +short_description: subnets module which supports Create, Update, Delete subnets +version_added: 2.0.0 +description: + - Create, Update, Delete subnets + - This module uses PC v4 APIs based SDKs +notes: + - For updating IPAM config using C(ip_config), provide all details again. Module will not fetch existing IPAM config. + - For subnet create and delete, module will return tasks status in response after operation. + - For subnet update, module will return subnet info if C(wait) is true, else task status. +options: + state: + description: + - if C(state) is present, it will create or update the subnet. + - If C(state) is set to C(present) and ext_id is not provided then the operation will be create the subnet + - If C(state) is set to C(present) and ext_id is provided then the operation will be update the subnet + - If C(state) is set to C(absent) and ext_id is provided , then operation will be delete the subnet + type: str + choices: ['present', 'absent'] + name: + description: + - Subnet name + type: str + ext_id: + description: + - Subnet external ID + - Required only for updating or deleting the subnet. + type: str + subnet_type: + description: + - Type of the subnet + type: str + choices: ['OVERLAY', 'VLAN'] + network_id: + description: + - Network ID + type: int + dhcp_options: + description: + - DHCP options + type: dict + suboptions: + domain_name_servers: + description: + - Domain name servers + type: list + elements: dict + suboptions: + ipv4: + description: + - IPv4 address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + ipv6: + description: + - IPv6 address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + domain_name: + description: + - Domain name + type: str + tftp_server_name: + description: + - TFTP server name + type: str + boot_file_name: + description: + - Boot file name + type: str + ntp_servers: + description: + - NTP servers + type: list + elements: dict + suboptions: + ipv4: + description: + - IPv4 address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + ipv6: + description: + - IPv6 address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + search_domains: + description: + - Search domains + type: list + elements: str + ip_config: + description: + - IPAM configuration + type: list + elements: dict + suboptions: + ipv4: + description: + - IPv4 configuration + type: dict + suboptions: + ip_subnet: + description: + - IP subnet + type: dict + suboptions: + ip: + description: + - IP address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + prefix_length: + description: + - Prefix length + - Required field + type: int + default_gateway_ip: + description: + - Default gateway IP + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + dhcp_server_address: + description: + - DHCP server address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + pool_list: + description: + - Pool list + type: list + elements: dict + suboptions: + start_ip: + description: + - Start IP + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + end_ip: + description: + - End IP + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + ipv6: + description: + - IPv6 configuration + type: dict + suboptions: + ip_subnet: + description: + - IP subnet + type: dict + suboptions: + ip: + description: + - IP address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + prefix_length: + description: + - Prefix length + type: int + default_gateway_ip: + description: + - Default gateway IP + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + dhcp_server_address: + description: + - DHCP server address + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + pool_list: + description: + - Pool list + type: list + elements: dict + suboptions: + start_ip: + description: + - Start IP + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + end_ip: + description: + - End IP + type: dict + suboptions: + value: + description: + - IP address + type: str + prefix_length: + description: + - Prefix length + type: int + cluster_reference: + description: + - Cluster external ID + - Required for VLAN subnet type + type: str + virtual_switch_reference: + description: + - Virtual switch external ID + type: str + vpc_reference: + description: + - VPC external ID + - Required for OVERLAY subnet type + type: str + is_nat_enabled: + description: + - flag to enable NAT + type: bool + is_external: + description: + - flag to mark the subnet as external + type: bool + network_function_chain_reference: + description: + - Network function chain external ID + type: str + is_advanced_networking: + description: + - flag to enable advanced networking + type: bool + ip_prefix: + description: + - IP prefix + type: str + wait: + description: + - Wait for the task to complete + type: bool + default: true + metadata: + description: Metadata associated with this resource. + type: dict + suboptions: + owner_reference_id: + description: owner external_id + type: str + project_reference_id: + description: project external id + type: str + category_ids: + description: A list of globally unique identifiers that represent all the categories the resource will be associated with. + type: list + elements: str + hypervisor_type: + description: Hypervisor type + type: str + description: + type: str + description: Description of the subnet. +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: VLAN subnet with IPAM IP pools + nutanix.ncp.ntnx_subnets_v2: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: VLAN subnet with IPAM IP pools + subnet_type: VLAN + cluster_reference: 00061663-9fa0-28ca-185b-ac1f6b6f97e2 + virtual_switch_reference: 18dbfce0-f7e1-4b19-a9e6-43b0be8c2507 + network_id: 226 + ip_config: + - ipv4: + ip_subnet: + ip: + value: 192.168.0.0 + prefix_length: 24 + default_gateway_ip: + value: 192.168.0.254 + prefix_length: 24 + pool_list: + - start_ip: + value: 192.168.0.20 + prefix_length: 24 + end_ip: + value: 192.168.0.30 + prefix_length: 24 + +- name: External subnet with NAT + nutanix.ncp.ntnx_subnets_v2: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: "External subnet with NAT" + subnet_type: VLAN + cluster_reference: 00061663-9fa0-28ca-185b-ac1f6b6f97e2 + network_id: 103 + is_external: true + ip_config: + - ipv4: + ip_subnet: + ip: + value: 10.44.3.192 + prefix_length: 27 + default_gateway_ip: + value: 10.44.3.193 + prefix_length: 27 + pool_list: + - start_ip: + value: 10.44.3.198 + prefix_length: 27 + end_ip: + value: 10.44.3.207 + prefix_length: 27 + - start_ip: + value: 10.44.3.208 + prefix_length: 27 + end_ip: + value: 10.44.3.217 + prefix_length: 27 + +- name: Overlay Subnet with IP_pools + nutanix.ncp.ntnx_subnets_v2: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: Overlay Subnet with IP_pools and DHCP + subnet_type: OVERLAY + vpc_reference: 4c92c01e-2eb7-4a50-bda3-09729b62b634 + ip_config: + - ipv4: + ip_subnet: + ip: + value: 192.168.0.0 + prefix_length: 24 + default_gateway_ip: + value: 192.168.0.254 + prefix_length: 24 + pool_list: + - start_ip: + value: 192.168.0.20 + prefix_length: 24 + end_ip: + value: 192.168.0.30 + prefix_length: 24 + +- name: Delete subnets + nutanix.ncp.ntnx_subnets_v2: + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + subnet_uuid: a3265671-de53-41be-af9b-f06241b95356 +""" + +RETURN = r""" +response: + description: + - Response for the subnet operations. + - Subnet details if C(wait) is true. + - Task details if C(wait) is false. + type: dict + returned: always + sample: { + "bridge_name": "br0", + "cluster_name": null, + "cluster_reference": "00061663-9fa0-28ca-185b-ac1f6b6f97e2", + "description": null, + "dhcp_options": { + "boot_file_name": null, + "domain_name": null, + "domain_name_servers": null, + "ntp_servers": null, + "search_domains": null, + "tftp_server_name": null + }, + "dynamic_ip_addresses": null, + "ext_id": "1d42d222-a065-4ed8-9f74-dc5818dfab41", + "hypervisor_type": "acropolis", + "ip_config": [ + { + "ipv4": { + "default_gateway_ip": null, + "dhcp_server_address": null, + "ip_subnet": null, + "pool_list": null + }, + "ipv6": null + } + ], + "ip_prefix": null, + "is_advanced_networking": null, + "is_external": false, + "is_nat_enabled": null, + "links": null, + "metadata": { + "category_ids": null, + "owner_reference_id": "00000000-0000-0000-0000-000000000000", + "owner_user_name": null, + "project_name": null, + "project_reference_id": null + }, + "name": "VLAN subnet without IPAM", + "network_function_chain_reference": null, + "network_id": 221, + "reserved_ip_addresses": null, + "subnet_type": "VLAN", + "tenant_id": null, + "virtual_switch": null, + "virtual_switch_reference": "18dbfce0-f7e1-4b19-a9e6-43b0be8c2507", + "vpc": null, + "vpc_reference": null + } +ext_id: + description: + - External ID of the subnet. + type: str + returned: always +task_ext_id: + description: + - Task external ID. + type: str + returned: always +changed: + description: + - Whether the subnet is changed or not. + type: bool + returned: always +skipped: + description: + - Whether the operation is skipped or not. + - Will be returned if operation is skipped. + type: bool + returned: always +error: + description: + - Error message if an error occurs. + type: str + returned: when an error occurs +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_etag, + get_subnet_api_instance, +) +from ..module_utils.v4.network.helpers import get_subnet # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + remove_empty_ip_config, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_networking_py_client as net_sdk # noqa: E402 +except ImportError: + from ..module_utils.v4.sdk_mock import mock_sdk as net_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + ip = dict( + value=dict(type="str"), + prefix_length=dict(type="int"), + ) + + ipv4_pool_spec = dict( + start_ip=dict(type="dict", options=ip, obj=net_sdk.IPv4Address), + end_ip=dict(type="dict", options=ip, obj=net_sdk.IPv4Address), + ) + + ipv4_subnet_spec = dict( + ip=dict(type="dict", options=ip, obj=net_sdk.IPv4Address), + prefix_length=dict(type="int"), + ) + + ipv4_config_sub_spec = dict( + ip_subnet=dict(type="dict", options=ipv4_subnet_spec, obj=net_sdk.IPv4Subnet), + default_gateway_ip=dict(type="dict", options=ip, obj=net_sdk.IPv4Address), + dhcp_server_address=dict(type="dict", options=ip, obj=net_sdk.IPv4Address), + pool_list=dict( + type="list", elements="dict", options=ipv4_pool_spec, obj=net_sdk.IPv4Pool + ), + ) + + ipv6_subnet_spec = dict( + ip=dict(type="dict", options=ip, obj=net_sdk.IPv6Address), + prefix_length=dict(type="int"), + ) + + ipv6_pool_spec = dict( + start_ip=dict(type="dict", options=ip, obj=net_sdk.IPv6Address), + end_ip=dict(type="dict", options=ip, obj=net_sdk.IPv6Address), + ) + + ipv6_config_sub_spec = dict( + ip_subnet=dict(type="dict", options=ipv6_subnet_spec, obj=net_sdk.IPv6Subnet), + default_gateway_ip=dict(type="dict", options=ip, obj=net_sdk.IPv6Address), + dhcp_server_address=dict(type="dict", options=ip, obj=net_sdk.IPv6Address), + pool_list=dict( + type="list", elements="dict", options=ipv6_pool_spec, obj=net_sdk.IPv6Pool + ), + ) + + ip_config_spec = dict( + ipv4=dict(type="dict", options=ipv4_config_sub_spec, obj=net_sdk.IPv4Config), + ipv6=dict(type="dict", options=ipv6_config_sub_spec, obj=net_sdk.IPv6Config), + ) + + ip_address = dict( + ipv4=dict(type="dict", options=ip, obj=net_sdk.IPv4Address), + ipv6=dict(type="dict", options=ip, obj=net_sdk.IPv6Address), + ) + + dhcp_spec = dict( + domain_name_servers=dict( + type="list", elements="dict", options=ip_address, obj=net_sdk.IPAddress + ), + domain_name=dict(type="str"), + tftp_server_name=dict(type="str"), + boot_file_name=dict(type="str"), + ntp_servers=dict( + type="list", elements="dict", options=ip_address, obj=net_sdk.IPAddress + ), + search_domains=dict(type="list", elements="str"), + ) + + metadata_spec = dict( + owner_reference_id=dict(type="str"), + project_reference_id=dict(type="str"), + category_ids=dict(type="list", elements="str"), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + subnet_type=dict(type="str", choices=["OVERLAY", "VLAN"]), + network_id=dict(type="int"), + dhcp_options=dict(type="dict", options=dhcp_spec, obj=net_sdk.DhcpOptions), + ip_config=dict( + type="list", elements="dict", options=ip_config_spec, obj=net_sdk.IPConfig + ), + cluster_reference=dict(type="str"), + virtual_switch_reference=dict(type="str"), + vpc_reference=dict(type="str"), + is_nat_enabled=dict(type="bool"), + is_external=dict(type="bool"), + network_function_chain_reference=dict(type="str"), + is_advanced_networking=dict(type="bool"), + hypervisor_type=dict(type="str"), + ip_prefix=dict(type="str"), + metadata=dict(type="dict", options=metadata_spec, obj=net_sdk.Metadata), + ) + + return module_args + + +def create_subnet(module, result): + subnets = get_subnet_api_instance(module) + + sg = SpecGenerator(module) + default_spec = net_sdk.Subnet() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create subnets Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = subnets.create_subnet(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating subnet", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + ext_id = get_entity_ext_id_from_task( + resp, rel=TASK_CONSTANTS.RelEntityType.SUBNET + ) + if ext_id: + resp = get_subnet(module, subnets, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_subnets_idempotency(old_spec, update_spec): + if old_spec != update_spec: + return False + return True + + +def update_subnet(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + subnets = get_subnet_api_instance(module) + current_spec = get_subnet(module, subnets, ext_id=ext_id) + remove_empty_ip_config(current_spec) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating subnets update spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + # check for idempotency + if check_subnets_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + resp = None + subnets = get_subnet_api_instance(module) + try: + resp = subnets.update_subnet_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating subnet", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_subnet(module, subnets, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_subnet(module, result): + subnets = get_subnet_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_subnet(module, subnets, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("unable to fetch etag for deleting subnet", **result) + + kwargs = {"if_match": etag} + + try: + resp = subnets.delete_subnet_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting subnet", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ( + "state", + "present", + ("ext_id", "cluster_reference", "vpc_reference"), + True, + ), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_networking_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_subnet(module, result) + else: + create_subnet(module, result) + else: + delete_subnet(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_templates_deploy_v2.py b/plugins/modules/ntnx_templates_deploy_v2.py new file mode 100644 index 000000000..a02855623 --- /dev/null +++ b/plugins/modules/ntnx_templates_deploy_v2.py @@ -0,0 +1,531 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_templates_deploy_v2 +short_description: Deploy Nutanix templates +description: + - This module allows you to deploy Nutanix templates. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + ext_id: + description: + - The external ID of the template to deploy. + required: true + type: str + version_id: + description: + - The identifier of a Template Version. + type: str + number_of_vms: + description: + - Number of VMs to be deployed. + type: int + override_vms_config: + description: + - A list specifying the VM configuration overrides for each of the VMs to be created. + Each element in the list corresponds to a VM and includes the override configurations + such as VM Name, Configuration, and Guest Customization. The position of the element + in the list defines the index of the VM to which the override configuration will be applied. + type: list + elements: dict + suboptions: + name: + description: Name of the virtual machine + type: str + num_sockets: + description: Number of vCPU sockets. + type: int + num_cores_per_socket: + description: Number of cores per socket. + type: int + num_threads_per_core: + description: Number of threads per core. + type: int + memory_size_bytes: + description: Memory size in bytes. + type: int + nics: + description: NICs attached to the VM. + type: list + elements: dict + suboptions: + backing_info: + description: Defines a NIC emulated by the hypervisor + type: dict + suboptions: + model: + description: Model of the NIC + type: str + choices: ["VIRTIO", "E1000"] + mac_address: + description: MAC address of the emulated NIC. + type: str + is_connected: + description: Indicates whether the NIC is connected or not. Default is True. + type: bool + num_queues: + description: The number of Tx/Rx queue pairs for this NIC. + type: int + network_info: + description: Network information for a NIC. + type: dict + suboptions: + nic_type: + description: Type of the NIC + type: str + choices: ["NORMAL_NIC", "DIRECT_NIC", "NETWORK_FUNCTION_NIC", "SPAN_DESTINATION_NIC"] + network_function_chain: + description: The network function chain associates with the NIC. Only valid if nic_type is NORMAL_NIC. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a network function chain. It should be of type UUID. + required: true + type: str + network_function_nic_type: + description: The type of this Network function NIC. Defaults to INGRESS. + type: str + choices: ["INGRESS", "EGRESS", "TAP"] + subnet: + description: Network identifier for this adapter. Only valid if nic_type is NORMAL_NIC or DIRECT_NIC. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a subnet. It should be of type UUID. + required: true + type: str + vlan_mode: + description: + - By default, all the virtual NICs are created in ACCESS mode, which permits only one VLAN per virtual network. + TRUNKED mode allows multiple VLANs on a single VM NIC for network-aware user VMs. + type: str + choices: ["ACCESS", "TRUNK"] + trunked_vlans: + description: + - List of networks to trunk if VLAN mode is marked as TRUNKED. + If empty and VLAN mode is set to TRUNKED, all the VLANs are trunked. + type: list + elements: int + should_allow_unknown_macs: + description: + - Indicates whether an unknown unicast traffic is forwarded to this NIC or not. + This is applicable only for the NICs on the overlay subnets. + type: bool + ipv4_config: + description: The IP address configurations. + type: dict + suboptions: + should_assign_ip: + description: + - If set to true (default value), an IP address must be assigned to the VM NIC + either the one explicitly specified by the user or allocated automatically by + the IPAM service by not specifying the IP address. + If false, then no IP assignment is required for this VM NIC. + type: bool + ip_address: + description: Primary IP address configuration + type: dict + suboptions: + value: + description: IP address + type: str + required: True + prefix_length: + description: Prefix length of the IP address + type: int + secondary_ip_address_list: + description: List of secondary IP addresses + type: list + elements: dict + suboptions: + value: + description: IP address + type: str + required: True + prefix_length: + description: Prefix length of the IP address + type: int + + guest_customization: + description: + - Stage a Sysprep or cloud-init configuration file to be used by the guest for the next boot. + Note that the Sysprep command must be used to generalize the Windows VMs before triggering this API call. + type: dict + suboptions: + config: + type: dict + description: The Nutanix Guest Tools customization settings. + suboptions: + sysprep: + description: Sysprep configuration for Windows guests + type: dict + suboptions: + install_type: + description: + - Indicates whether the guest will be freshly installed using this unattend configuration, + or this unattend configuration will be applied to a pre-prepared image. Default is 'PREPARED'. + type: str + choices: ["FRESH", "PREPARED"] + sysprep_script: + description: Parameters for the sysprep script + type: dict + suboptions: + unattendxml: + description: unattend.xml settings + type: dict + suboptions: + value: + description: XML content for unattend.xml + type: str + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: The value associated with the key for this key-value pair + type: raw + + cloudinit: + description: Cloud-init configuration for Linux guests + type: dict + suboptions: + datasource_type: + description: Type of cloud-init datasource + type: str + choices: ["CONFIG_DRIVE_V2"] + metadata: + description: + - The contents of the meta_data configuration for cloud-init. + This can be formatted as YAML or JSON. The value must be base64 encoded. + type: str + cloud_init_script: + description: The script to use for cloud-init. + type: dict + suboptions: + user_data: + description: User data script + type: dict + suboptions: + value: + description: The actual user data script content + type: str + required: True + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: The value associated with the key for this key-value pair + type: raw + cluster_reference: + description: + - The identifier of the Cluster where the VM(s) will be created using a Template. + type: str + wait: + description: + - Whether to wait for the template deployment to complete. + type: bool + default: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Deploy VM + nutanix.ncp.ntnx_templates_deploy_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "{{ template_ext_id }}" + version_id: "{{version_ext_id}}" + cluster_reference: "{{cluster.uuid}}" + +- name: Deploy vm and override config + nutanix.ncp.ntnx_templates_deploy_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "{{ template_ext_id }}" + version_id: "{{version_ext_id}}" + cluster_reference: "{{cluster.uuid}}" + override_vms_config: + - name: vm_template_override + num_sockets: 4 + num_cores_per_socket: 4 + num_threads_per_core: 2 + memory_size_bytes: 4294967296 +""" + +RETURN = r""" +ext_id: + description: The external ID of the deployed template. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +task_ext_id: + description: The external ID of the deployment task. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +response: + description: The response from the template deployment API which always includes the task details. + type: dict + returned: always + sample: + { + "cluster_ext_ids": null, + "completed_time": "2024-05-20T08:13:07.945618+00:00", + "completion_details": null, + "created_time": "2024-05-20T08:12:59.209878+00:00", + "entities_affected": [ + { + "ext_id": "fa236286-b965-4125-8367-672e6597a2f8", + "rel": "vmm:content:templates" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:f27e2a60-a171-48e5-a7ab-8872c8560984", + "is_cancelable": false, + "last_updated_time": "2024-05-20T08:13:07.945617+00:00", + "legacy_error_message": null, + "operation": "kVmTemplateDeploy", + "operation_description": null, + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-05-20T08:12:59.226669+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": [ + { + "ext_id": "ZXJnb24=:1089a445-8d35-47ce-b059-2fad04432017", + "href": "https://000.000.000.000:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:1089a445-8d35-47ce-b059-2fad04432017", + "rel": "subtask" + }, + { + "ext_id": "ZXJnb24=:e5f36bac-751d-4e69-b72c-aebb32b6cfea", + "href": "https://000.000.000.000:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:e5f36bac-751d-4e69-b72c-aebb32b6cfea", + "rel": "subtask" + }, + { + "ext_id": "ZXJnb24=:a2bca528-3455-4115-9da0-79ed7c0ace96", + "href": "https://000.000.000.000:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:a2bca528-3455-4115-9da0-79ed7c0ace96", + "rel": "subtask" + } + ], + "warnings": null + } +changed: + description: Indicates whether the template deployment changed the system. + type: bool + returned: always + sample: true +error: + description: The error message if the template deployment failed. + type: str + returned: when error occurs + sample: "Failed to deploy template" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import ( # noqa: E402 + get_etag, + get_templates_api_instance, +) +from ..module_utils.v4.vmm.helpers import get_template # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_override_vm_config_schema(): + override_config_schema = dict( + name=dict(type="str"), + num_sockets=dict(type="int"), + num_cores_per_socket=dict(type="int"), + num_threads_per_core=dict(type="int"), + memory_size_bytes=dict(type="int"), + nics=dict( + type="list", + elements="dict", + options=vm_specs.get_nic_spec(), + obj=vmm_sdk.AhvConfigNic, + ), + guest_customization=dict( + type="dict", + options=vm_specs.get_gc_spec(), + obj=vmm_sdk.GuestCustomizationParams, + ), + ) + return override_config_schema + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=True), + version_id=dict(type="str"), + number_of_vms=dict(type="int"), + override_vms_config=dict( + type="list", elements="dict", options=get_override_vm_config_schema() + ), + cluster_reference=dict(type="str"), + ) + + return module_args + + +def deploy_template(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_template(module, templates, ext_id=ext_id) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.TemplateDeployment() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating create templates deploy spec", **result) + + # Generate override vm config map for each VM + override_vms_config = module.params.get("override_vms_config", []) + if override_vms_config: + override_vm_config_map = {} + vm_index = 0 + override_vms_config_schema = get_override_vm_config_schema() + kwargs = {"module_args": override_vms_config_schema} + for vm_config in override_vms_config: + s = vmm_sdk.VmConfigOverride() + s, err = sg.generate_spec(obj=s, attr=vm_config, **kwargs) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating vm config override spec", **result + ) + override_vm_config_map[str(vm_index)] = s + vm_index += 1 + + spec.override_vm_config_map = override_vm_config_map + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create deploy template spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + version_ext_id = module.params.get("version_id") + result[ + "msg" + ] = f"Template ({ext_id}) with given version ({version_ext_id}) will be deployed." + return + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("Unable to fetch etag for deploying template", **result) + + kwargs = {"if_match": etag} + + try: + resp = templates.deploy_template(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deploying template", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + deploy_template(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_templates_guest_os_v2.py b/plugins/modules/ntnx_templates_guest_os_v2.py new file mode 100644 index 000000000..3fbd8db26 --- /dev/null +++ b/plugins/modules/ntnx_templates_guest_os_v2.py @@ -0,0 +1,306 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_templates_guest_os_v2 +short_description: Manage guest OS updates for Nutanix AHV templates. +description: + - This module allows you to initiate, complete, or cancel guest OS updates for Nutanix AHV templates. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + state: + description: + - The state of the guest OS update. + type: str + choices: ['start', 'cancel', 'finish'] + default: start + template_ext_id: + description: + - The identifier of a Template. + - requited for all states. + type: str + required: true + version_id: + description: + - The identifier of a Template Version. + type: str + required: true + version_name: + description: + - The user defined name of a Template Version. + - required for finish state to complete guest OS update. + type: str + version_description: + description: + - The user defined description of a Template Version. + - required for finish state to complete guest OS update. + type: str + is_active_version: + description: + - Specify whether to mark the Template Version as active or not. + The newly created Version during Template Creation, updating + or Guest OS updating is set to Active by default unless specified otherwise. + type: bool +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: initiate_guest_os_update + nutanix.ncp.ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{version1_ext_id}}" + state: start + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + +- name: cancel guest_os_update + nutanix.ncp.ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{version1_ext_id}}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: cancel + +- name: finish guest_os_update + nutanix.ncp.ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{version1_ext_id}}" + state: finish + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false +""" + +RETURN = r""" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import ( # noqa: E402 + get_etag, + get_templates_api_instance, +) +from ..module_utils.v4.vmm.helpers import get_template # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + state=dict(type="str", choices=["start", "cancel", "finish"], default="start"), + template_ext_id=dict(type="str", required=True), + version_id=dict(type="str", required=True), + version_name=dict(type="str"), + version_description=dict(type="str"), + is_active_version=dict(type="bool"), + ) + + return module_args + + +def initiate_guest_os_update(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("template_ext_id") + result["template_ext_id"] = ext_id + + current_spec = get_template(module, templates, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for initiate guest os update", **result + ) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.InitiateGuestUpdateSpec() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating initiate guest os update spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + result["msg"] = "Guest OS update will be initiated" + return + + kwargs = {"if_match": etag} + + try: + resp = templates.initiate_guest_update(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while initiating guest os update", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + + result["changed"] = True + + +def complete_guest_os_update(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("template_ext_id") + result["template_ext_id"] = ext_id + + current_spec = get_template(module, templates, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for complete guest os update", **result + ) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.CompleteGuestUpdateSpec() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating complete guest os update spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + result["msg"] = "Guest OS update will be completed" + return + + kwargs = {"if_match": etag} + + try: + resp = templates.complete_guest_update(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while completing guest os update", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + + result["changed"] = True + + +def cancel_guest_os_update(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("template_ext_id") + result["template_ext_id"] = ext_id + + current_spec = get_template(module, templates, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for initiate guest os update", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = templates.cancel_guest_update(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while initiating guest os update", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "start": + initiate_guest_os_update(module, result) + elif state == "finish": + complete_guest_os_update(module, result) + else: + cancel_guest_os_update(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_templates_info_v2.py b/plugins/modules/ntnx_templates_info_v2.py new file mode 100644 index 000000000..0f9c9ffb2 --- /dev/null +++ b/plugins/modules/ntnx_templates_info_v2.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_templates_info_v2 +short_description: template info module +version_added: 2.0.0 +description: + - Get templates info + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - template UUID + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: Fetch template info using ext id + nutanix.ncp.ntnx_templates_info_v2: + ext_id: "{{ template1_ext_id }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + +- name: List all templates + nutanix.ncp.ntnx_templates_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false +""" +RETURN = r""" +response: + description: + - it contains template information + - Type can be list or dict; a response could be a list of templates or a template itself. + By default, the response will be a list. and dict if the ext_id is supplied in order to retrieve the template specifically + type: dict + returned: always + sample: + { + "create_time": "2024-05-16T04:41:12.501912+00:00", + "created_by": { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": null, + "created_time": null, + "display_name": null, + "email_id": null, + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": null, + "idp_id": null, + "is_force_reset_password_enabled": null, + "last_login_time": null, + "last_name": null, + "last_updated_time": null, + "links": null, + "locale": null, + "middle_initial": null, + "password": null, + "region": null, + "status": null, + "tenant_id": null, + "user_type": null, + "username": "admin" + }, + "ext_id": "a13033a1-dbca-4712-aa33-54ab0ee86a94", + "guest_update_status": null, + "links": null, + "template_description": "ansible test", + "template_name": "SNfCOFKPcllbansible-agtemplate", + "template_version_spec": { + "create_time": "2024-05-16T04:41:12.458682+00:00", + "created_by": { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": null, + "created_time": null, + "display_name": null, + "email_id": null, + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": null, + "idp_id": null, + "is_force_reset_password_enabled": null, + "last_login_time": null, + "last_name": null, + "last_updated_time": null, + "links": null, + "locale": null, + "middle_initial": null, + "password": null, + "region": null, + "status": null, + "tenant_id": null, + "user_type": null, + "username": "admin" + }, + "ext_id": "8cbc63a1-1219-4e75-b728-9086d3f7d13e", + "is_active_version": true, + "is_gc_override_enabled": true, + "links": null, + "tenant_id": null, + "version_description": "Created from VM: MinReqVMalaa2", + "version_name": "Initial Version", + "version_source": null, + "version_source_discriminator": null, + "vm_spec": { + "apc_config": null, + "availability_zone": null, + "bios_uuid": null, + "boot_config": { + "boot_device": null, + "boot_order": [ + "CDROM", + "DISK", + "NETWORK" + ] + }, + "categories": [ + { + "ext_id": "eb8b4155-b3d1-5772-8d2f-d566d43d8e46" + } + ], + "cd_roms": null, + "cluster": { + "ext_id": "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + }, + "create_time": null, + "description": null, + "disks": null, + "enabled_cpu_features": null, + "ext_id": null, + "generation_uuid": null, + "gpus": null, + "guest_customization": null, + "guest_tools": null, + "hardware_clock_timezone": "UTC", + "host": null, + "is_agent_vm": false, + "is_branding_enabled": true, + "is_cpu_passthrough_enabled": false, + "is_cross_cluster_migration_in_progress": null, + "is_gpu_console_enabled": false, + "is_live_migrate_capable": null, + "is_memory_overcommit_enabled": false, + "is_vcpu_hard_pinning_enabled": false, + "is_vga_console_enabled": true, + "links": null, + "machine_type": "PC", + "memory_size_bytes": 4294967296, + "name": "MinReqVMalaa2", + "nics": null, + "num_cores_per_socket": 1, + "num_numa_nodes": 0, + "num_sockets": 1, + "num_threads_per_core": 1, + "ownership_info": null, + "power_state": "ON", + "protection_policy_state": null, + "protection_type": null, + "serial_ports": null, + "source": null, + "storage_config": null, + "tenant_id": null, + "update_time": null, + "vtpm_config": { + "is_vtpm_enabled": false, + "version": null + } + } + }, + "tenant_id": null, + "update_time": "2024-05-16T04:41:12.501912+00:00", + "updated_by": { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": null, + "created_time": null, + "display_name": null, + "email_id": null, + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": null, + "idp_id": null, + "is_force_reset_password_enabled": null, + "last_login_time": null, + "last_name": null, + "last_updated_time": null, + "links": null, + "locale": null, + "middle_initial": null, + "password": null, + "region": null, + "status": null, + "tenant_id": null, + "user_type": null, + "username": "admin" + } + } + +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the template version when is fetched. + type: str + returned: always + sample: "00000-00000-000000-000000" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_templates_api_instance # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_template(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = templates.get_template_by_id(ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching template info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_templates(module, result): + templates = get_templates_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating templates info Spec", **result) + + try: + resp = templates.list_templates(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching templates info", + ) + if not getattr(resp, "data", None): + result["response"] = [] + return + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_template(module, result) + else: + get_templates(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_templates_v2.py b/plugins/modules/ntnx_templates_v2.py new file mode 100644 index 000000000..4e155d09d --- /dev/null +++ b/plugins/modules/ntnx_templates_v2.py @@ -0,0 +1,1663 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_templates_v2 +short_description: Manage Nutanix AHV template resources +description: + - This module allows you to create, update, and delete Nutanix AHV templates. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + state: + description: + - The desired state of the template. + choices: ['present', 'absent'] + default: 'present' + ext_id: + description: + - A globally unique identifier of an instance that is suitable for external consumption. + type: str + template_name: + description: + - The user defined name of a Template. + type: str + template_description: + description: + - The description of the template. + type: str + template_version_spec: + description: + - Used for creating new versions of templates, allowing specification of version details such as name, + description, `extId`, and the source of the new version. + - Each instance includes an `extId` field, which holds a globally unique identifier for the instance. + type: dict + suboptions: + version_name: + description: + - The user defined name of a Template Version. + type: str + version_description: + description: + - The user defined description of a Template Version. + type: str + version_source: + description: + - Source of the created Template Version. + The source can either be a VM when creating a new Template Version or + an existing Version within a Template when creating a new Version. + type: dict + suboptions: + template_vm_reference: + description: + - The reference to a template VM. + type: dict + suboptions: + ext_id: + description: + - The identifier of a VM. + type: str + guest_customization: + description: + - Stage a Sysprep or cloud-init configuration file to be used by the guest for the next boot. + Note that the Sysprep command must be used to generalize the Windows VMs before triggering this API call. + type: dict + suboptions: + config: + type: dict + description: The Nutanix Guest Tools customization settings. + suboptions: + sysprep: + description: Sysprep configuration for Windows guests + type: dict + suboptions: + install_type: + description: + - Indicates whether the guest will be freshly + installed using this unattend configuration, + or this unattend configuration will be applied to a pre-prepared image. + Default is 'PREPARED'. + type: str + choices: ["FRESH", "PREPARED"] + sysprep_script: + description: Parameters for the sysprep script + type: dict + suboptions: + unattendxml: + description: unattend.xml settings + type: dict + suboptions: + value: + description: XML content for unattend.xml + type: str + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: + - The value associated with the key + for this key-value pair + type: raw + + cloudinit: + description: Cloud-init configuration for Linux guests + type: dict + suboptions: + datasource_type: + description: Type of cloud-init datasource + type: str + choices: ["CONFIG_DRIVE_V2"] + metadata: + description: + - The contents of the meta_data configuration for cloud-init. + This can be formatted as YAML or JSON. The value must be base64 encoded. + type: str + cloud_init_script: + description: The script to use for cloud-init. + type: dict + suboptions: + user_data: + description: User data script + type: dict + suboptions: + value: + description: The actual user data script content + type: str + required: True + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: + - The value associated with the key + for this key-value pair + type: raw + + template_version_reference: + description: + - The reference to a template version. + type: dict + suboptions: + version_id: + description: + - The identifier of a Template Version. + type: str + override_vm_config: + description: + - Overrides specification for VM create from a Template. + type: dict + suboptions: + name: + description: VM name. + type: str + num_sockets: + description: Number of vCPU sockets. + type: int + num_cores_per_socket: + description: Number of cores per socket. + type: int + num_threads_per_core: + description: Number of threads per core. + type: int + memory_size_bytes: + description: Memory size in bytes. + type: int + nics: + description: NICs attached to the VM. + type: list + elements: dict + suboptions: + backing_info: + description: Defines a NIC emulated by the hypervisor + type: dict + suboptions: + model: + description: Model of the NIC + type: str + choices: ["VIRTIO", "E1000"] + mac_address: + description: MAC address of the emulated NIC. + type: str + is_connected: + description: Indicates whether the NIC is connected or not. Default is True. + type: bool + num_queues: + description: The number of Tx/Rx queue pairs for this NIC. + type: int + network_info: + description: Network information for a NIC. + type: dict + suboptions: + nic_type: + description: Type of the NIC + type: str + choices: ["NORMAL_NIC", "DIRECT_NIC", "NETWORK_FUNCTION_NIC", "SPAN_DESTINATION_NIC"] + network_function_chain: + description: + - The network function chain associates with the NIC. + Only valid if nic_type is NORMAL_NIC. + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a network function chain. + It should be of type UUID. + required: true + type: str + network_function_nic_type: + description: The type of this Network function NIC. Defaults to INGRESS. + type: str + choices: ["INGRESS", "EGRESS", "TAP"] + subnet: + description: + - Network identifier for this adapter. + Only valid if nic_type is NORMAL_NIC or DIRECT_NIC. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a subnet. It should be of type UUID. + required: true + type: str + vlan_mode: + description: + - By default, all the virtual NICs are created in ACCESS mode, + which permits only one VLAN per virtual network. + TRUNKED mode allows multiple VLANs on a single VM NIC for network-aware user VMs. + type: str + choices: ["ACCESS", "TRUNK"] + trunked_vlans: + description: + - List of networks to trunk if VLAN mode is marked as TRUNKED. + If empty and VLAN mode is set to TRUNKED, all the VLANs are trunked. + type: list + elements: int + should_allow_unknown_macs: + description: + - Indicates whether an unknown unicast traffic is forwarded to this NIC or not. + This is applicable only for the NICs on the overlay subnets. + type: bool + ipv4_config: + description: The IP address configurations. + type: dict + suboptions: + should_assign_ip: + description: + - If set to true (default value), an IP address must be assigned to the VM NIC + either the one explicitly specified by the user or allocated automatically + by the IPAM service by not specifying the IP address. + If false, then no IP assignment is required for this VM NIC. + type: bool + ip_address: + description: Primary IP address configuration + type: dict + suboptions: + value: + description: IP address + type: str + required: True + prefix_length: + description: Prefix length of the IP address + type: int + secondary_ip_address_list: + description: List of secondary IP addresses + type: list + elements: dict + suboptions: + value: + description: IP address + type: str + required: True + prefix_length: + description: Prefix length of the IP address + type: int + + guest_customization: + description: + - Stage a Sysprep or cloud-init configuration file to be used by the guest for the next boot. + Note that the Sysprep command must be used to generalize the Windows VMs before triggering this API call. + type: dict + suboptions: + config: + type: dict + description: The Nutanix Guest Tools customization settings. + suboptions: + sysprep: + description: Sysprep configuration for Windows guests + type: dict + suboptions: + install_type: + description: + - Indicates whether the guest will be freshly installed + using this unattend configuration, + or this unattend configuration will be applied to a pre-prepared image. + Default is 'PREPARED'. + type: str + choices: ["FRESH", "PREPARED"] + sysprep_script: + description: Parameters for the sysprep script + type: dict + suboptions: + unattendxml: + description: unattend.xml settings + type: dict + suboptions: + value: + description: XML content for unattend.xml + type: str + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: + - The value associated with the key + for this key-value pair + type: raw + + cloudinit: + description: Cloud-init configuration for Linux guests + type: dict + suboptions: + datasource_type: + description: Type of cloud-init datasource + type: str + choices: ["CONFIG_DRIVE_V2"] + metadata: + description: + - The contents of the meta_data configuration for cloud-init. + This can be formatted as YAML or JSON. The value must be base64 encoded. + type: str + cloud_init_script: + description: The script to use for cloud-init. + type: dict + suboptions: + user_data: + description: User data script + type: dict + suboptions: + value: + description: The actual user data script content + type: str + required: True + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: + - The value associated with the key + for this key-value pair + type: raw + vm_spec: + description: + - VM configuration. + type: dict + suboptions: + guest_tools: + description: + - The guest tools for the VM. + required: false + type: dict + suboptions: + is_enabled: + description: + - Whether the guest tools are enabled for the VM. + type: bool + capabilities: + description: + - The list of capabilities for the guest tools. + type: list + elements: str + choices: ["SELF_SERVICE_RESTORE", "VSS_SNAPSHOT"] + + ext_id: + description: A globally unique identifier of an instance that is suitable for external consumption. + type: str + name: + description: VM name. + type: str + description: + description: VM description. + type: str + num_sockets: + description: Number of VCPU sockets + type: int + num_cores_per_socket: + description: Number of CPU cores per socket + type: int + num_threads_per_core: + description: Number of threads per core + type: int + num_numa_nodes: + description: Number of NUMA nodes. 0 means NUMA is disabled. + type: int + memory_size_bytes: + description: Size of memory in bytes + type: int + is_vcpu_hard_pinning_enabled: + description: Indicates whether the vCPUs should be hard pinned to specific pCPUs or not. + type: bool + is_cpu_passthrough_enabled: + description: + - Indicates whether to passthrough the host CPU features to the guest or not. + Enabling this will make VM incapable of live migration. + type: bool + enabled_cpu_features: + description: + - The list of additional CPU features to be enabled. + HardwareVirtualization Indicates whether hardware assisted virtualization + should be enabled for the Guest OS or not. Once enabled, the Guest OS can deploy a nested hypervisor. + type: list + elements: str + choices: ["HARDWARE_VIRTUALIZATION"] + is_memory_overcommit_enabled: + description: + - Indicates whether the memory overcommit feature should be enabled for the VM or not. + If enabled, parts of the VM memory may reside outside of the hypervisor physical memory. + Once enabled, it should be expected that the VM may suffer performance degradation. + type: bool + is_gpu_console_enabled: + description: Indicates whether the vGPU console is enabled or not. + type: bool + categories: + description: Categories for the VM. + type: list + elements: dict + suboptions: + ext_id: + description: The globally unique identifier of a VM category. It should be of type UUID. + type: str + required: True + cluster: + description: Reference to a cluster. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a cluster. It should be of type UUID. + type: str + required: True + availability_zone: + description: Reference to an availability zone. + type: dict + suboptions: + ext_id: + description: External identifier of the availability zone reference + type: str + required: True + guest_customization: + description: + - Stage a Sysprep or cloud-init configuration file to be used by the guest for the next boot. + Note that the Sysprep command must be used to generalize the Windows VMs before triggering this API call. + type: dict + suboptions: + config: + type: dict + description: The Nutanix Guest Tools customization settings. + suboptions: + sysprep: + description: Sysprep configuration for Windows guests + type: dict + suboptions: + install_type: + description: + - Indicates whether the guest will be freshly installed + using this unattend configuration, or this unattend + configuration will be applied to a pre-prepared image. + Default is 'PREPARED'. + type: str + choices: ["FRESH", "PREPARED"] + sysprep_script: + description: Parameters for the sysprep script + type: dict + suboptions: + unattendxml: + description: unattend.xml settings + type: dict + suboptions: + value: + description: XML content for unattend.xml + type: str + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: + - The value associated with the key + for this key-value pair + type: raw + + cloudinit: + description: Cloud-init configuration for Linux guests + type: dict + suboptions: + datasource_type: + description: Type of cloud-init datasource + type: str + choices: ["CONFIG_DRIVE_V2"] + metadata: + description: + - The contents of the meta_data configuration for cloud-init. + This can be formatted as YAML or JSON. The value must be base64 encoded. + type: str + cloud_init_script: + description: The script to use for cloud-init. + type: dict + suboptions: + user_data: + description: User data script + type: dict + suboptions: + value: + description: The actual user data script content + type: str + required: True + custom_key_values: + description: Custom key-value pairs + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: + - The value associated with the key + for this key-value pair + type: raw + hardware_clock_timezone: + description: VM hardware clock timezone in IANA TZDB format (America/Los_Angeles). + type: str + is_branding_enabled: + description: Indicates whether to remove AHV branding from VM firmware tables or not. + type: bool + boot_config: + description: + - Indicates the order of device types in which the VM should try to boot from. + If the boot device order is not provided the system will decide an appropriate boot device order. + type: dict + suboptions: + legacy_boot: + description: Legacy boot configuration + type: dict + suboptions: + boot_device: + description: Boot device settings for legacy boot + type: dict + suboptions: + boot_device_disk: + description: Boot device from disk + type: dict + suboptions: + disk_address: + description: Disk address for boot device + type: dict + suboptions: + bus_type: + description: + - Bus type for the device. + The acceptable values are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: True + index: + description: + - Device index on the bus. + This field is ignored unless the bus details are specified. + type: int + boot_device_nic: + description: Boot device from NIC + type: dict + suboptions: + mac_address: + description: MAC address of the NIC + type: str + boot_order: + description: + - Indicates the order of device types in which the VM should try to boot from. + If the boot device order is not provided the system will + decide an appropriate boot device order. + type: list + elements: str + choices: ["CDROM", "NETWORK", "DISK"] + uefi_boot: + description: UEFI boot configuration + type: dict + suboptions: + is_secure_boot_enabled: + description: Indicate whether to enable secure boot or not. + type: bool + nvram_device: + description: Configuration for NVRAM to be presented to the VM. + type: dict + suboptions: + backing_storage_info: + description: Storage provided by Nutanix ADSF + type: dict + suboptions: + disk_size_bytes: + description: Size of the disk in Bytes + type: int + storage_container: + description: + - This reference is for disk level storage container preference. + This preference specifies the storage container to which this disk belongs. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a VM disk container. It should be of type UUID. + type: str + required: True + storage_config: + description: Storage configuration for VM disks. + type: dict + suboptions: + is_flash_mode_enabled: + description: Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: A reference to a disk or image that contains the contents of a disk. + type: dict + suboptions: + reference: + description: Reference to the data source + type: dict + suboptions: + image_reference: + description: Image reference for the data source + type: dict + suboptions: + image_ext_id: + description: The globally unique identifier of an image. It should be of type UUID. + type: str + vm_disk_reference: + description: VM disk reference for the data source + type: dict + suboptions: + disk_ext_id: + description: The globally unique identifier of a VM disk. It should be of type UUID. + type: str + disk_address: + description: Disk address. + type: dict + suboptions: + bus_type: + description: + - Bus type for the device. + The acceptable values are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: True + index: + description: + - Device index on the bus. + This field is ignored unless the bus details are specified. + type: int + vm_reference: + description: This is a reference to a VM. + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a VM. It should be of type UUID. + type: str + required: True + + is_vga_console_enabled: + description: Indicates whether the VGA console should be disabled or not. + type: bool + machine_type: + description: Machine type for the VM. Machine type Q35 is required for secure boot and does not support IDE disks + type: str + choices: ["PC", "PSERIES", "Q35"] + vtpm_config: + description: Indicates how the vTPM for the VM should be configured. + type: dict + suboptions: + is_vtpm_enabled: + description: Indicates whether the virtual trusted platform module is enabled for the Guest OS or not. + type: bool + version: + description: Version of the vTPM + type: str + is_agent_vm: + description: + - Indicates whether the VM is an agent VM or not. + When their host enters maintenance mode, once the normal VMs are evacuated, + the agent VMs are powered off. + When the host is restored, agent VMs are powered on before the normal VMs are restored. + In other words, agent VMs cannot be HA-protected or live migrated. + type: bool + apc_config: + description: + - Advanced Processor Compatibility configuration for the VM. + Enabling this retains the CPU model for the VM across power cycles and migrations. + type: dict + suboptions: + is_apc_enabled: + description: If enabled, the selected CPU model will be retained across live and cold migrations of the VM. + type: bool + cpu_model: + description: + - CPU model associated with the VM if Advanced Processor Compatibility(APC) is enabled. + If APC is enabled and no CPU model is explicitly set, a default baseline CPU model is picked by the system. + See the APC documentation for more information + type: dict + suboptions: + ext_id: + description: The globally unique identifier of the CPU model associated with the VM. + type: str + name: + description: Name of the CPU model associated with the VM. + type: str + storage_config: + description: Storage configuration for VM. + type: dict + suboptions: + is_flash_mode_enabled: + description: Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + qos_config: + description: QoS parameters to be enforced. + type: dict + suboptions: + throttled_iops: + description: Throttled IOPS for the governed entities. The block size for the I/O is 32 kB. + type: int + disks: + description: Disks attached to the VM. + type: list + elements: dict + suboptions: + backing_info: + description: Supporting storage to create virtual disk on. + type: dict + suboptions: + vm_disk: + description: VM disk information + type: dict + suboptions: + disk_size_bytes: + description: Size of the disk in bytes + type: int + storage_container: + description: + - This reference is for disk level storage container preference. + This preference specifies the storage container to which this disk belongs. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a VM disk container. It should be of type UUID. + type: str + required: True + storage_config: + description: Storage configuration for VM disks. + type: dict + suboptions: + is_flash_mode_enabled: + description: Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: A reference to a disk or image that contains the contents of a disk. + type: dict + suboptions: + reference: + description: Reference to the data source + type: dict + suboptions: + image_reference: + description: Image reference for the data source + type: dict + suboptions: + image_ext_id: + description: The globally unique identifier of an image. It should be of type UUID. + type: str + vm_disk_reference: + description: VM disk reference for the data source + type: dict + suboptions: + disk_ext_id: + description: The globally unique identifier of a VM disk. It should be of type UUID. + type: str + disk_address: + description: Disk address. + type: dict + suboptions: + bus_type: + description: + - Bus type for the device. + The acceptable values are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: True + index: + description: + - Device index on the bus. + This field is ignored unless the bus details are specified. + type: int + vm_reference: + description: This is a reference to a VM. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a VM. It should be of type UUID. + type: str + required: True + adsf_volume_group: + description: write + type: dict + suboptions: + volume_group_ext_id: + description: + - The globally unique identifier of an ADSF volume group. It should be of type UUID. + type: str + disk_address: + description: Address information for the disk + type: dict + suboptions: + bus_type: + description: + - Bus type for the device. + The acceptable values are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: True + index: + description: + - Device index on the bus. + This field is ignored unless the bus details are specified. + type: int + + cd_roms: + description: CD-ROMs attached to the VM. + type: list + elements: dict + suboptions: + backing_info: + description: Storage provided by Nutanix ADSF + type: dict + suboptions: + disk_size_bytes: + description: Size of the disk in bytes + type: int + storage_container: + description: + - This reference is for disk level storage container preference. + This preference specifies the storage container to which this disk belongs. + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a VM disk container. It should be of type UUID. + type: str + required: True + storage_config: + description: Storage configuration for the disk + type: dict + suboptions: + is_flash_mode_enabled: + description: Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: A reference to a disk or image that contains the contents of a disk. + type: dict + suboptions: + reference: + description: Reference to the data source + type: dict + suboptions: + image_reference: + description: Image reference for the data source + type: dict + suboptions: + image_ext_id: + description: The globally unique identifier of an image. It should be of type UUID. + type: str + vm_disk_reference: + description: VM disk reference for the data source + type: dict + suboptions: + disk_ext_id: + description: The globally unique identifier of a VM disk. It should be of type UUID. + type: str + disk_address: + description: Disk address. + type: dict + suboptions: + bus_type: + description: + - Bus type for the device. + The acceptable values are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: True + index: + description: + - Device index on the bus. + This field is ignored unless the bus details are specified. + type: int + vm_reference: + description: This is a reference to a VM. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a VM. It should be of type UUID. + type: str + required: True + disk_address: + description: Virtual Machine disk (VM disk). + type: dict + suboptions: + bus_type: + description: Bus type for the device. The acceptable values are IDE, SATA. + type: str + choices: ["IDE", "SATA"] + index: + description: Device index on the bus. This field is ignored unless the bus details are specified. + type: int + nics: + description: NICs attached to the VM. + type: list + elements: dict + suboptions: + backing_info: + description: Defines a NIC emulated by the hypervisor + type: dict + suboptions: + model: + description: Model of the NIC + type: str + choices: ["VIRTIO", "E1000"] + mac_address: + description: MAC address of the emulated NIC. + type: str + is_connected: + description: Indicates whether the NIC is connected or not. Default is True. + type: bool + num_queues: + description: The number of Tx/Rx queue pairs for this NIC. + type: int + network_info: + description: Network information for a NIC. + type: dict + suboptions: + nic_type: + description: Type of the NIC + type: str + choices: ["NORMAL_NIC", "DIRECT_NIC", "NETWORK_FUNCTION_NIC", "SPAN_DESTINATION_NIC"] + network_function_chain: + description: The network function chain associates with the NIC. Only valid if nic_type is NORMAL_NIC. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a network function chain. It should be of type UUID. + required: true + type: str + network_function_nic_type: + description: The type of this Network function NIC. Defaults to INGRESS. + type: str + choices: ["INGRESS", "EGRESS", "TAP"] + subnet: + description: Network identifier for this adapter. Only valid if nic_type is NORMAL_NIC or DIRECT_NIC. + type: dict + suboptions: + ext_id: + description: The globally unique identifier of a subnet. It should be of type UUID. + required: true + type: str + vlan_mode: + description: + - By default, all the virtual NICs are created in ACCESS mode, + which permits only one VLAN per virtual network. + TRUNKED mode allows multiple VLANs on a single + VM NIC for network-aware user VMs. + type: str + choices: ["ACCESS", "TRUNK"] + trunked_vlans: + description: + - List of networks to trunk if VLAN mode is marked as TRUNKED. + If empty and VLAN mode is set to TRUNKED, all the VLANs are trunked. + type: list + elements: int + should_allow_unknown_macs: + description: + - Indicates whether an unknown unicast traffic is forwarded to this NIC or not. + This is applicable only for the NICs on the overlay subnets. + type: bool + ipv4_config: + description: The IP address configurations. + type: dict + suboptions: + should_assign_ip: + description: + - If set to true (default value), an IP address must be assigned to the VM NIC + either the one explicitly specified by the user + or allocated automatically by the IPAM service by not specifying the IP address. + If false, then no IP assignment is required for this VM NIC. + type: bool + ip_address: + description: Primary IP address configuration + type: dict + suboptions: + value: + description: IP address + type: str + required: True + prefix_length: + description: Prefix length of the IP address + type: int + secondary_ip_address_list: + description: List of secondary IP addresses + type: list + elements: dict + suboptions: + value: + description: IP address + type: str + required: True + prefix_length: + description: Prefix length of the IP address + type: int + gpus: + description: GPUs attached to the VM. + type: list + elements: dict + suboptions: + name: + description: Name of the GPU + type: str + mode: + description: Mode of the GPU + type: str + choices: ["PASSTHROUGH_GRAPHICS", "PASSTHROUGH_COMPUTE", "VIRTUAL"] + device_id: + description: Device ID of the GPU + type: int + vendor: + description: Vendor of the GPU + type: str + choices: ["NVIDIA", "INTEL", "AMD"] + pci_address: + description: PCI address of the GPU + type: dict + suboptions: + segment: + description: Segment number of the PCI address + type: int + bus: + description: Bus number of the PCI address + type: int + device: + description: Device number of the PCI address + type: int + func: + description: Function number of the PCI address + type: int + serial_ports: + description: Serial ports configured on the VM. + type: list + elements: dict + suboptions: + is_connected: + description: Indicates whether the serial port is connected or not. + type: bool + ext_id: + description: A globally unique identifier of an instance that is suitable for external consumption. + type: str + index: + description: Index of the serial port. + type: int + is_active_version: + description: + - Specify whether to mark the Template Version as active or not. + The newly created Version during Template Creation, + updating or Guest OS updating is set to Active by default unless specified otherwise. + type: bool + is_gc_override_enabled: + description: + - Allow or disallow override of the Guest Customization during Template deployment. + type: bool + guest_update_status: + description: + - The status of the guest update. + type: dict + suboptions: + deployed_vm_reference: + description: + - The identifier of the temporary VM created on initiating Guest OS Update. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Create new template from a vm + nutanix.ncp.ntnx_templates_v2: + ext_id: "{{ template1_ext_id }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + template_name: "{{ template_name }}" + template_description: "ansible test" + template_version_spec: + version_source: + template_vm_reference: + ext_id: "{{ vm_uuid }}" + +- name: Update template description & name + nutanix.ncp.ntnx_templates_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "{{ template1_ext_id }}" + template_version_spec: + version_name: "{{ version_2_name }}" + version_description: "ansible_template_version_description New" + version_source: + template_version_reference: + version_id: "{{version1_ext_id}}" + override_vm_config: + num_sockets: 4 + num_cores_per_socket: 4 + num_threads_per_core: 4 + name: "new_vm_name" + +- name: Delete Template + nutanix.ncp.ntnx_templates_v2: + ext_id: "{{ template1_ext_id }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent +""" + +RETURN = r""" +response: + description: The response from the Nutanix API. + type: dict + returned: always + sample: + { + "create_time": "2024-05-20T08:02:06.806063+00:00", + "created_by": { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": null, + "created_time": null, + "display_name": null, + "email_id": null, + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": null, + "idp_id": null, + "is_force_reset_password_enabled": null, + "last_login_time": null, + "last_name": null, + "last_updated_time": null, + "links": null, + "locale": null, + "middle_initial": null, + "password": null, + "region": null, + "status": null, + "tenant_id": null, + "user_type": null, + "username": "admin" + }, + "ext_id": "5448bd78-5343-4e1c-8f30-9246d88c1147", + "guest_update_status": null, + "links": null, + "template_description": "ansible test", + "template_name": "rDkjscJgMJBoansible-agtemplate", + "template_version_spec": { + "create_time": "2024-05-20T08:02:06.776259+00:00", + "created_by": { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": null, + "created_time": null, + "display_name": null, + "email_id": null, + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": null, + "idp_id": null, + "is_force_reset_password_enabled": null, + "last_login_time": null, + "last_name": null, + "last_updated_time": null, + "links": null, + "locale": null, + "middle_initial": null, + "password": null, + "region": null, + "status": null, + "tenant_id": null, + "user_type": null, + "username": "admin" + }, + "ext_id": "148038b3-6e68-48d9-ba29-4c8f36798be5", + "is_active_version": true, + "is_gc_override_enabled": true, + "links": null, + "tenant_id": null, + "version_description": "Created from VM: MinReqVMalaa2", + "version_name": "Initial Version", + "version_source": null, + "version_source_discriminator": null, + "vm_spec": { + "apc_config": null, + "availability_zone": null, + "bios_uuid": null, + "boot_config": { + "boot_device": null, + "boot_order": [ + "CDROM", + "DISK", + "NETWORK" + ] + }, + "categories": [ + { + "ext_id": "eb8b4155-b3d1-5772-8d2f-d566d43d8e46" + } + ], + "cd_roms": null, + "cluster": { + "ext_id": "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + }, + "create_time": null, + "description": null, + "disks": null, + "enabled_cpu_features": null, + "ext_id": null, + "generation_uuid": null, + "gpus": null, + "guest_customization": null, + "guest_tools": null, + "hardware_clock_timezone": "UTC", + "host": null, + "is_agent_vm": false, + "is_branding_enabled": true, + "is_cpu_passthrough_enabled": false, + "is_cross_cluster_migration_in_progress": null, + "is_gpu_console_enabled": false, + "is_live_migrate_capable": null, + "is_memory_overcommit_enabled": false, + "is_vcpu_hard_pinning_enabled": false, + "is_vga_console_enabled": true, + "links": null, + "machine_type": "PC", + "memory_size_bytes": 4294967296, + "name": "MinReqVMalaa2", + "nics": null, + "num_cores_per_socket": 1, + "num_numa_nodes": 0, + "num_sockets": 1, + "num_threads_per_core": 1, + "ownership_info": null, + "power_state": "ON", + "protection_policy_state": null, + "protection_type": null, + "serial_ports": null, + "source": null, + "storage_config": null, + "tenant_id": null, + "update_time": null, + "vtpm_config": { + "is_vtpm_enabled": false, + "version": null + } + } + }, + "tenant_id": null, + "update_time": "2024-05-20T08:02:06.806063+00:00", + "updated_by": { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": null, + "created_time": null, + "display_name": null, + "email_id": null, + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": null, + "idp_id": null, + "is_force_reset_password_enabled": null, + "last_login_time": null, + "last_name": null, + "last_updated_time": null, + "links": null, + "locale": null, + "middle_initial": null, + "password": null, + "region": null, + "status": null, + "tenant_id": null, + "user_type": null, + "username": "admin" + } + } +task_ext_id: + description: The unique identifier of the task. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +changed: + description: Indicates whether the state of the template has changed. + type: bool + returned: always + sample: true +ext_id: + description: Template external ID + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Failed to create templates" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import ( # noqa: E402 + get_etag, + get_templates_api_instance, +) +from ..module_utils.v4.vmm.helpers import get_template # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + version_source_map = { + "template_vm_reference": vmm_sdk.TemplateVmReference, + "template_version_reference": vmm_sdk.TemplateVersionReference, + } + + template_vm_reference_spec = dict( + ext_id=dict(type="str"), + guest_customization=dict( + type="dict", + options=vm_specs.get_gc_spec(), + obj=vmm_sdk.GuestCustomizationParams, + ), + ) + + override_config_spec = dict( + name=dict(type="str"), + num_sockets=dict(type="int"), + num_cores_per_socket=dict(type="int"), + num_threads_per_core=dict(type="int"), + memory_size_bytes=dict(type="int"), + nics=dict( + type="list", + elements="dict", + options=vm_specs.get_nic_spec(), + obj=vmm_sdk.AhvConfigNic, + ), + guest_customization=dict( + type="dict", + options=vm_specs.get_gc_spec(), + obj=vmm_sdk.GuestCustomizationParams, + ), + ) + + template_version_reference_spec = dict( + version_id=dict(type="str"), + override_vm_config=dict( + type="dict", options=override_config_spec, obj=vmm_sdk.VmConfigOverride + ), + ) + + version_source_spec = dict( + template_vm_reference=dict( + type="dict", + options=template_vm_reference_spec, + obj=vmm_sdk.TemplateVmReference, + ), + template_version_reference=dict( + type="dict", + options=template_version_reference_spec, + obj=vmm_sdk.TemplateVersionReference, + ), + ) + + version_spec = dict( + version_name=dict(type="str"), + version_description=dict(type="str"), + version_source=dict( + type="dict", options=version_source_spec, obj=version_source_map + ), + vm_spec=dict( + type="dict", options=vm_specs.get_vm_spec(), obj=vmm_sdk.AhvConfigVm + ), + is_active_version=dict(type="bool"), + is_gc_override_enabled=dict(type="bool"), + ) + + guest_update_status_spec = dict( + deployed_vm_reference=dict(type="str"), + ) + + module_args = dict( + ext_id=dict(type="str"), + template_name=dict(type="str"), + template_description=dict(type="str"), + template_version_spec=dict( + type="dict", options=version_spec, obj=vmm_sdk.TemplateVersionSpec + ), + guest_update_status=dict( + type="dict", options=guest_update_status_spec, obj=vmm_sdk.GuestUpdateStatus + ), + ) + + return module_args + + +def create_template(module, result): + templates = get_templates_api_instance(module) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.Template() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create templates Spec", **result) + + if module.check_mode: + result["response"] = spec.to_dict() + return + + resp = None + try: + resp = templates.create_template(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating template", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.TEMPLATES + ) + if ext_id: + resp = get_template(module, templates, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def update_template(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_template(module, templates, ext_id) + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if not update_spec.created_by.user_type: + update_spec.created_by.user_type = "LOCAL" + if not update_spec.template_version_spec.created_by.user_type: + update_spec.template_version_spec.created_by.user_type = "LOCAL" + if update_spec.template_version_spec.ext_id: + update_spec.template_version_spec.ext_id = None + if not update_spec.updated_by.user_type: + update_spec.updated_by.user_type = "LOCAL" + + if err: + result["error"] = err + module.fail_json(msg="Failed generating templates update spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = templates.update_template_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating template", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_template(module, templates, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_template(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_template(module, templates, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("Unable to fetch etag for deleting template", **result) + + kwargs = {"if_match": etag} + + try: + resp = templates.delete_template_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting template", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("template_name", "ext_id"), True), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_template(module, result) + else: + create_template(module, result) + else: + delete_template(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_templates_version_v2.py b/plugins/modules/ntnx_templates_version_v2.py new file mode 100644 index 000000000..c45fa6a49 --- /dev/null +++ b/plugins/modules/ntnx_templates_version_v2.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_templates_version_v2 +short_description: Manage Nutanix template versions +description: + - This module allows you to publish or delete template versions in Nutanix. + - This module uses PC v4 APIs based SDKs +version_added: '2.0.0' +options: + template_ext_id: + description: + - The unique identifier of the template. + required: true + type: str + version_id: + description: + - The unique identifier of the template version. + required: true + type: str + state: + description: + - The desired state of the template version. + - If set to "present", the template version will be published. + - If set to "absent", the template version will be deleted. + choices: ['present', 'absent'] + wait: + description: + - Whether to wait for the task to complete before returning. + required: false + type: bool + default: True +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Set the Active Version + nutanix.ncp.ntnx_templates_version_v2: + template_ext_id: "f3ae7dfe-9f7f-4085-8619-5d93ad9c4e64" + version_id: "5fbfc4d6-7736-49e4-97e7-eb55b061f16f" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + +- name: Delete Template Version + nutanix.ncp.ntnx_templates_version_v2: + state: absent + template_ext_id: "f3ae7dfe-9f7f-4085-8619-5d93ad9c4e64" + version_id: "5fbfc4d6-7736-49e4-97e7-eb55b061f16f" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false +""" + +RETURN = r""" +response: + description: The response from the Nutanix API. + type: dict + returned: always + sample: { + "ext_id": "task-789", + "status": "Succeeded" + } +task_ext_id: + description: The unique identifier of the task. + type: str + returned: always + sample: "task-789" +changed: + description: Indicates whether the state of the template version was changed. + type: bool + returned: always + sample: true +error: + description: The error message, if any. + type: str + returned: on failure + sample: "Failed to publish template version" +template_ext_id: + description: The unique identifier of the template. + type: str + returned: always + sample: "f3ae7dfe-9f7f-4085-8619-5d93ad9c4e64" +ext_id: + description: The unique identifier of the template version. + type: str + returned: always + sample: "5fbfc4d6-7736-49e4-97e7-eb55b061f16f" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import ( # noqa: E402 + get_etag, + get_templates_api_instance, +) +from ..module_utils.v4.vmm.helpers import get_template # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + template_ext_id=dict(type="str", required=True), + version_id=dict(type="str", required=True), + ) + + return module_args + + +def publish_template(module, result): + templates = get_templates_api_instance(module) + ext_id = module.params.get("template_ext_id") + result["template_ext_id"] = ext_id + + current_spec = get_template(module, templates, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for publish template version", **result + ) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.TemplatePublishSpec() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating publish template version spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + kwargs = {"if_match": etag} + + try: + resp = templates.publish_template(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while publishing template version", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_template(module, templates, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def delete_template_version(module, result): + templates = get_templates_api_instance(module) + template_ext_id = module.params.get("template_ext_id") + ext_id = module.params.get("version_id") + result["template_ext_id"] = template_ext_id + result["ext_id"] = ext_id + + current_spec = get_template(module, templates, template_ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for deleting template version", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = templates.delete_template_version_by_id( + templateExtId=template_ext_id, extId=ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting template version", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + publish_template(module, result) + else: + delete_template_version(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_templates_versions_info_v2.py b/plugins/modules/ntnx_templates_versions_info_v2.py new file mode 100644 index 000000000..70b3cd446 --- /dev/null +++ b/plugins/modules/ntnx_templates_versions_info_v2.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_templates_versions_info_v2 +short_description: Fetches information about Nutanix template versions. +version_added: 2.0.0 +description: + - This module fetches information about Nutanix template versions. + - It can retrieve information about a specific template version or all template versions. + - This module uses PC v4 APIs based SDKs +options: + template_ext_id: + description: + - The external ID of the template. + type: str + required: true + ext_id: + description: + - The external ID of the template version. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Retrieve the Template Version details for the given Template Version identifier. + nutanix.ncp.ntnx_templates_versions_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "{{ version_ext_id }}" + template_ext_id: "{{ template_ext_id }}" +""" + +RETURN = r""" +response: + description: + - The response from the Nutanix PC template version v4 API. + - Type can be list or dict; a response could be a list of templates version or a template version. + By default, the response will be a list. and dict if the ext_id is supplied in order to retrieve the template version specifically + type: dict + returned: always + sample: + { + "create_time": "2024-05-16T04:41:24.102680+00:00", + "created_by": { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": null, + "created_time": null, + "display_name": null, + "email_id": null, + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": null, + "idp_id": null, + "is_force_reset_password_enabled": null, + "last_login_time": null, + "last_name": null, + "last_updated_time": null, + "links": null, + "locale": null, + "middle_initial": null, + "password": null, + "region": null, + "status": null, + "tenant_id": null, + "user_type": null, + "username": "admin" + }, + "ext_id": "3a6a4105-0e90-4114-a9b6-53bb7203c357", + "is_active_version": true, + "is_gc_override_enabled": true, + "links": null, + "tenant_id": null, + "version_description": "ansible_template_version_description New", + "version_name": "SNfCOFKPcllbansible-agversion2", + "version_source": null, + "version_source_discriminator": null, + "vm_spec": { + "apc_config": null, + "availability_zone": null, + "bios_uuid": null, + "boot_config": { + "boot_device": null, + "boot_order": [ + "CDROM", + "DISK", + "NETWORK" + ] + }, + "categories": [ + { + "ext_id": "eb8b4155-b3d1-5772-8d2f-d566d43d8e46" + } + ], + "cd_roms": null, + "cluster": { + "ext_id": "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + }, + "create_time": null, + "description": null, + "disks": null, + "enabled_cpu_features": null, + "ext_id": null, + "generation_uuid": null, + "gpus": null, + "guest_customization": null, + "guest_tools": null, + "hardware_clock_timezone": "UTC", + "host": null, + "is_agent_vm": false, + "is_branding_enabled": true, + "is_cpu_passthrough_enabled": false, + "is_cross_cluster_migration_in_progress": null, + "is_gpu_console_enabled": false, + "is_live_migrate_capable": null, + "is_memory_overcommit_enabled": false, + "is_vcpu_hard_pinning_enabled": false, + "is_vga_console_enabled": true, + "links": null, + "machine_type": "PC", + "memory_size_bytes": 4294967296, + "name": "new_vm_name", + "nics": null, + "num_cores_per_socket": 4, + "num_numa_nodes": 0, + "num_sockets": 4, + "num_threads_per_core": 4, + "ownership_info": null, + "power_state": "ON", + "protection_policy_state": null, + "protection_type": null, + "serial_ports": null, + "source": null, + "storage_config": null, + "tenant_id": null, + "update_time": null, + "vtpm_config": { + "is_vtpm_enabled": false, + "version": null + } + } + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the template version when is fetched. + type: str + returned: always + sample: "00000-00000-000000-000000" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_templates_api_instance # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + template_ext_id=dict(type="str", required=True), + ext_id=dict(type="str"), + ) + + return module_args + + +def get_template_version(module, result): + templates = get_templates_api_instance(module) + template_ext_id = module.params.get("template_ext_id") + ext_id = module.params.get("ext_id") + + try: + resp = templates.get_template_version_by_id(template_ext_id, ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching template version info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_template_versions(module, result): + templates = get_templates_api_instance(module) + template_ext_id = module.params.get("template_ext_id") + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating template versions info Spec", **result) + + try: + resp = templates.list_template_versions(template_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching template versions info", + ) + if not getattr(resp, "data", None): + result["response"] = [] + return + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_template_version(module, result) + else: + get_template_versions(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_user_groups.py b/plugins/modules/ntnx_user_groups.py index a86f8d1da..ef41ee004 100644 --- a/plugins/modules/ntnx_user_groups.py +++ b/plugins/modules/ntnx_user_groups.py @@ -95,7 +95,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false distinguished_name: "" project: uuid: "{{project_uuid}}" @@ -110,7 +110,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false idp: idp_uuid: "{{idp_uuid}}" group_name: "{{group_name}}" @@ -189,8 +189,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 -from ..module_utils.prism.user_groups import UserGroup # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.user_groups import UserGroup # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_user_groups_info.py b/plugins/modules/ntnx_user_groups_info.py index 0c0875e31..255aaa112 100644 --- a/plugins/modules/ntnx_user_groups_info.py +++ b/plugins/modules/ntnx_user_groups_info.py @@ -32,36 +32,36 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List user groups using name filter criteria - ntnx_user_groups_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - group_name: "{{ name }}" - register: result - - - name: List user groups using length, offset, sort order and sort attribute - ntnx_user_groups_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 2 - offset: 1 - sort_order: "DESCENDING" - sort_attribute: "group_name" - register: result - - - name: test getting particular user group using uuid - ntnx_user_groups_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - usergroup_uuid: '{{ uuid }}' - register: result +- name: List user groups using name filter criteria + ntnx_user_groups_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + group_name: "{{ name }}" + register: result + +- name: List user groups using length, offset, sort order and sort attribute + ntnx_user_groups_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 2 + offset: 1 + sort_order: "DESCENDING" + sort_attribute: "group_name" + register: result + +- name: test getting particular user group using uuid + ntnx_user_groups_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + usergroup_uuid: '{{ uuid }}' + register: result """ RETURN = r""" api_version: @@ -167,9 +167,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.user_groups import UserGroup # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.user_groups import UserGroup # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_user_groups_info_v2.py b/plugins/modules/ntnx_user_groups_info_v2.py new file mode 100644 index 000000000..fc8b27d32 --- /dev/null +++ b/plugins/modules/ntnx_user_groups_info_v2.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_user_groups_info_v2 +short_description: Fetch user groups +version_added: 2.0.0 +description: + - This module is used to get user_group information. + - Fetch a specific user group using ext_id or multiple user groups + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - user_group external ID + - If used, specific user group information will be fetched. Else, all user groups will be fetched. + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: List all user groups + nutanix.ncp.ntnx_user_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: user_groups + +- name: List user_groups using user_group uuid criteria + nutanix.ncp.ntnx_user_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "119e6031-93e3-40b8-bd2e-2537522d629f" + register: result + +- name: List user_groups using filter criteria + nutanix.ncp.ntnx_user_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "name eq 'test_user_group_name'" + register: result +""" + +RETURN = r""" +response: + description: + - Response for fetching user group information. + - User group info if ext_id is provided. + - List of user groups if ext_id is not provided. + type: dict + returned: always + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-05-29T08:52:41.075478+00:00", + "distinguished_name": "cn=sspgroupqa1,cn=users,dc=qa,dc=nucalm,dc=io", + "ext_id": "119e6031-93e3-40b8-bd2e-2537522d629f", + "group_type": "LDAP", + "idp_id": "6863c60b-ae9d-5c32-b8c1-2d45b9ba343a", + "last_updated_time": "2024-05-29T08:52:41.075478+00:00", + "links": null, + "name": "sspgroupqa1", + "tenant_id": null + } + +ext_id: + description: + - user_group external ID when specific user group is fetched + type: str + returned: always + sample: "119e6031-93e3-40b8-bd2e-2537522d629f" + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.iam.api_client import get_user_group_api_instance # noqa: E402 +from ..module_utils.v4.iam.helpers import get_user_group # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_user_group_by_ext_id(module, user_groups, result): + ext_id = module.params.get("ext_id") + resp = get_user_group(module, user_groups, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_user_groups(module, user_groups, result): + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json(msg="Failed generating user groups info Spec", **result) + + try: + resp = user_groups.list_user_groups(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching user groups info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + user_groups = get_user_group_api_instance(module) + if module.params.get("ext_id"): + get_user_group_by_ext_id(module, user_groups, result) + else: + get_user_groups(module, user_groups, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_user_groups_v2.py b/plugins/modules/ntnx_user_groups_v2.py new file mode 100644 index 000000000..249cdc64c --- /dev/null +++ b/plugins/modules/ntnx_user_groups_v2.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_user_groups_v2 +short_description: Create and Delete user groups +description: + - Create and Delete user groups in Nutanix PC + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + state: + description: + - State of the user group, whether to create or delete. + - When C(state) is present, it will create user group. + - When C(state) is absent, it will delete user group. + type: str + choices: ['present', 'absent'] + ext_id: + description: + - User Group External ID. + - Required for deleting a user group. + type: str + name: + description: + - Common Name of the User Group. + - Mandatory for SAML User Group. + type: str + distinguished_name: + description: + - Identifier for the User Group in the form of a distinguished name. + - Mandatory for LDAP User Group. + type: str + idp_id: + description: + - Identifier of the IDP for the User Group. + type: str + group_type: + description: + - Type of the User Group. + type: str + choices: ['SAML', 'LDAP'] + wait: + description: + - Wait for the task to complete. + type: bool + required: false + default: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + +EXAMPLES = r""" +- name: Create LDAP user group + nutanix.ncp.ntnx_user_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + group_type: "LDAP" + distinguished_name: "test_distinguished_name" + idp_id: "6863c60b-ae9d-5c32-b8c1-2d45b9ba343a" + +- name: Create SAML user group + nutanix.ncp.ntnx_user_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + group_type: "SAML" + idp_id: "6863c60b-ae9d-5c32-b8c1-2d45b9ba343a" + name: group_name_test + +- name: Delete user group + nutanix.ncp.ntnx_user_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "ext_id" +""" + +RETURN = r""" +response: + description: + - Response of user group operation. + - User group details if C(wait) is True. + - Task details if C(wait) is False. + type: dict + returned: always + sample: + { + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-06-26T22:56:55.763219-07:00", + "distinguished_name": null, + "ext_id": "b94694bd-cd7f-5ba1-88f7-ca96306da024", + "group_type": "SAML", + "idp_id": "40fe7aeb-f420-5aee-ba42-cfc2369bc1ec", + "last_updated_time": "2024-06-26T22:56:55.763219-07:00", + "links": null, + "name": "group_name2_wtkhkeqsivby", + "tenant_id": null + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: The created user group external ID + returned: always + type: str + sample: "b94694bd-cd7f-5ba1-88f7-ca96306da024" + +failed: + description: This indicates whether the task failed + returned: always + type: bool + sample: false + +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.iam.api_client import ( # noqa: E402 + get_etag, + get_user_group_api_instance, +) +from ..module_utils.v4.iam.helpers import get_user_group # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_iam_py_client as iam_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as iam_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + distinguished_name=dict(type="str"), + idp_id=dict(type="str"), + group_type=dict(type="str", choices=["SAML", "LDAP"]), + ) + return module_args + + +def create_user_group(module, user_groups, result): + sg = SpecGenerator(module) + default_spec = iam_sdk.UserGroup() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create user groups spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = user_groups.create_user_group(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating user group", + ) + + result["ext_id"] = resp.data.ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["changed"] = True + + +def delete_user_group(module, user_groups, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_user_group(module, user_groups, ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "unable to fetch etag for deleting user groups", **result + ) + + kwargs = {"if_match": etag} + + try: + resp = user_groups.delete_user_group_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting user group", + ) + + result["changed"] = True + if resp is None: + result["msg"] = "User group with ext_id: {} deleted successfully".format(ext_id) + else: + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "distinguished_name", "ext_id"), True), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + user_groups = get_user_group_api_instance(module) + if state == "present": + create_user_group(module, user_groups, result) + else: + delete_user_group(module, user_groups, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_users.py b/plugins/modules/ntnx_users.py index 2c06d8efd..0d5211b61 100644 --- a/plugins/modules/ntnx_users.py +++ b/plugins/modules/ntnx_users.py @@ -88,9 +88,9 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" + directory_service_uuid: "{{directory_service_uuid}}" project: uuid: "{{project_uuid}}" categories: @@ -105,7 +105,7 @@ nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - validate_certs: False + validate_certs: false identity_provider_uuid: "{{identity_provider_uuid}}" username: "{{username}}" register: result @@ -206,8 +206,8 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 -from ..module_utils.prism.users import User # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.users import User # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_users_info.py b/plugins/modules/ntnx_users_info.py index 160685af4..5013d4e4e 100644 --- a/plugins/modules/ntnx_users_info.py +++ b/plugins/modules/ntnx_users_info.py @@ -33,36 +33,36 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List users using name filter criteria - ntnx_users_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - username: "{{ name }}" - register: result - - - name: List users using length, offset, sort order and sort attribute - ntnx_users_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 2 - offset: 1 - sort_order: "DESCENDING" - sort_attribute: "username" - register: result - - - name: test getting particular user using uuid - ntnx_users_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - user_uuid: '{{ uuid }}' - register: result +- name: List users using name filter criteria + ntnx_users_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + username: "{{ name }}" + register: result + +- name: List users using length, offset, sort order and sort attribute + ntnx_users_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 2 + offset: 1 + sort_order: "DESCENDING" + sort_attribute: "username" + register: result + +- name: test getting particular user using uuid + ntnx_users_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + user_uuid: '{{ uuid }}' + register: result """ RETURN = r""" api_version: @@ -152,9 +152,9 @@ """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.users import User # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.users import User # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_users_info_v2.py b/plugins/modules/ntnx_users_info_v2.py new file mode 100644 index 000000000..1cfa8ba83 --- /dev/null +++ b/plugins/modules/ntnx_users_info_v2.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_users_info_v2 +short_description: Get users info +version_added: 2.0.0 +description: + - Get users info using user external ID or list multiple users + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External ID of the user + - It can be used to get specific user info + required: false + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" +- name: List all users + nutanix.ncp.ntnx_users_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: users + +- name: List users using user ext_id criteria + nutanix.ncp.ntnx_users_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "04e7b47e-a861-5b57-a494-10ca57e6ec4a" + register: result + +- name: List users using filter + nutanix.ncp.ntnx_users_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "username eq 'test_user'" +""" +RETURN = r""" +response: + description: + - Response for fetching users info + - Returns users info using users external ID or list multiple users + type: dict + returned: always + sample: + { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-05-28T09:03:05.778320+00:00", + "display_name": "admin", + "email_id": "", + "ext_id": "00000000-0000-0000-0000-000000000000", + "first_name": "admin", + "idp_id": "37f30135-455b-5ebd-995f-b47e817a59f2", + "is_force_reset_password_enabled": false, + "last_login_time": "2024-06-25T07:33:22.803659+00:00", + "last_name": "", + "last_updated_by": "00000000-0000-0000-0000-000000000000", + "last_updated_time": "2024-05-28T09:28:51.567099+00:00", + "links": null, + "locale": "en-US", + "middle_initial": "", + "password": null, + "region": "en-US", + "status": "ACTIVE", + "tenant_id": null, + "user_type": "LOCAL", + "username": "admin" + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false + +ext_id: + description: External ID of the user + returned: always + type: str + sample: 04e7b47e-a861-5b57-a494-10ca57e6ec4a + +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.iam.api_client import get_user_api_instance # noqa: E402 +from ..module_utils.v4.iam.helpers import get_user # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_user_by_ext_id(module, users, result): + ext_id = module.params.get("ext_id") + resp = get_user(module, users, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_users(module, users, result): + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + if err: + result["error"] = err + module.fail_json(msg="Failed generating users info Spec", **result) + + try: + resp = users.list_users(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching users info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + users = get_user_api_instance(module) + if module.params.get("ext_id"): + get_user_by_ext_id(module, users, result) + else: + get_users(module, users, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_users_v2.py b/plugins/modules/ntnx_users_v2.py new file mode 100644 index 000000000..9b9c850fd --- /dev/null +++ b/plugins/modules/ntnx_users_v2.py @@ -0,0 +1,417 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_users_v2 +short_description: Module to create and update users from Nutanix PC. +version_added: "2.0.0" +description: + - This module allows you to create and update users. + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - State of the user. Whether to create, update, or delete. + - If C(state) is C(present) and C(ext_id) is not provided, create a new user. + - If C(state) is C(present) and C(ext_id) is provided, update the user. + type: str + choices: ['present'] + ext_id: + description: + - External ID of the User. + - Required for updating or deleting a User. + required: false + type: str + username: + description: + - Identifier for the User in the form an email address. + - Required for creating a User. + required: false + type: str + user_type: + description: + - Type of the User. + required: false + type: str + choices: ['LOCAL', 'SAML', 'LDAP', 'EXTERNAL'] + display_name: + description: + - Display name for the User. + required: false + type: str + first_name: + description: + - First name for the User. + required: false + type: str + middle_initial: + description: + - Middle name for the User. + required: false + type: str + last_name: + description: + - Last name for the User. + required: false + type: str + email_id: + description: + - Email Id for the User. + required: false + type: str + locale: + description: + - Default locale for the User. + required: false + type: str + region: + description: + - Default Region for the User. + required: false + type: str + password: + description: + - Password for the User. + - Use this for local user only. + required: false + type: str + idp_id: + description: + - Identifier of the IDP for the User. + - Mandatory for creating LDAP and SAML users. + required: false + type: str + is_force_reset_password_enabled: + description: + - Flag to force the User to reset password. + - Supported for local users only. + required: false + type: bool + default: false + additional_attributes: + description: + - Any additional attribute for the User. + required: false + type: list + elements: dict + suboptions: + name: + description: The key of this key-value pair + type: str + value: + description: + - The value associated with the key for this key-value pair + - Supported for creating users only. + type: str + status: + description: + - Status of the User. + - Supported for creating users only. + required: false + type: str + choices: ['ACTIVE', 'INACTIVE'] + wait: + description: + - Wait for the task to complete. + type: bool + required: false + default: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - George Ghawali (@george-ghawali) +""" + + +EXAMPLES = r""" +- name: create local user + nutanix.ncp.ntnx_users_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + idp_id: "40fe7aeb-f420-5aee-ba42-cfc2369bc1ec" + user_type: LOCAL + username: "ssptest111@qa.nucalm.io" + first_name: firstName + last_name: lastName + password: test.Password.123 + is_force_reset_password_enabled: true + register: result + +- name: update local user + nutanix.ncp.ntnx_users_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "27892065-1d1b-5d66-ab17-a26038088b17" + first_name: "firstNameUpdated" + last_name: "lastNameUpdated" + register: result + +- name: Create SAML user + nutanix.ncp.ntnx_users_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + user_type: SAML + username: "user_test" + idp_id: "40fe7aeb-f420-5aee-ba42-cfc2369bc1ec" + +- name: Create LDAP user + nutanix.ncp.ntnx_users_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + user_type: LDAP + username: "user_test" + idp_id: "40fe7aeb-f420-5aee-ba42-cfc2369bc1ec" +""" + +RETURN = r""" +response: + description: + - Response of users operations. + - Users details if C(wait) is True + - Task details if C(wait) is False + type: dict + returned: always + sample: + { + "additional_attributes": null, + "buckets_access_keys": null, + "created_by": "00000000-0000-0000-0000-000000000000", + "created_time": "2024-06-25T01:31:51.963601-07:00", + "display_name": "", + "email_id": "", + "ext_id": "27892065-1d1b-5d66-ab17-a26038088b17", + "first_name": "firstName", + "idp_id": "37f30135-455b-5ebd-995f-b47e817a59f2", + "is_force_reset_password_enabled": true, + "last_login_time": "2024-06-25T01:31:51.863120-07:00", + "last_name": "lastName", + "last_updated_by": "00000000-0000-0000-0000-000000000000", + "last_updated_time": "2024-06-25T01:31:51.963601-07:00", + "links": null, + "locale": "en-US", + "middle_initial": "", + "password": null, + "region": "en-US", + "status": "ACTIVE", + "tenant_id": null, + "user_type": "LOCAL", + "username": "ssptest111@qa.nucalm.io" + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: The created user's External ID + returned: always + type: str + sample: "27892065-1d1b-5d66-ab17-a26038088b17" + +failed: + description: This indicates whether the task failed + returned: always + type: bool + sample: false + +msg: + description: This field typically holds a message that is displayed to the user in case of delete + returned: always + type: bool + sample: "User with ext_id: 27892065-1d1b-5d66-ab17-a26038088b17 deleted successfully" +""" +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.iam.api_client import get_user_api_instance # noqa: E402 +from ..module_utils.v4.iam.helpers import get_user # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, + strip_users_empty_attributes, +) + +SDK_IMP_ERROR = None +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +try: + import ntnx_iam_py_client as iam_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as iam_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + kvp_spec = dict( + name=dict(type="str"), + value=dict(type="str"), + ) + + module_args = dict( + state=dict(type="str", choices=["present"], default="present"), + ext_id=dict(type="str"), + username=dict(type="str"), + user_type=dict(type="str", choices=["LOCAL", "SAML", "LDAP", "EXTERNAL"]), + display_name=dict(type="str"), + first_name=dict(type="str"), + middle_initial=dict(type="str"), + last_name=dict(type="str"), + email_id=dict(type="str"), + locale=dict(type="str"), + region=dict(type="str"), + password=dict(type="str", no_log=True), + idp_id=dict(type="str"), + is_force_reset_password_enabled=dict(type="bool", default=False), + additional_attributes=dict( + type="list", elements="dict", options=kvp_spec, obj=iam_sdk.KVPair + ), + status=dict(type="str", choices=["ACTIVE", "INACTIVE"]), + ) + return module_args + + +def create_user(module, users, result): + sg = SpecGenerator(module) + default_spec = iam_sdk.User() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create users spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = users.create_user(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating user", + ) + + result["ext_id"] = resp.data.ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + result["changed"] = True + + +def check_users_idempotency(old_spec, update_spec): + if old_spec != update_spec: + return False + return True + + +def update_user(module, users, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_user(module, users, ext_id=ext_id) + + strip_users_empty_attributes(current_spec) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating users update spec", **result) + + # check for idempotency + if not module.params.get("password"): + if check_users_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = users.update_user_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating user", + ) + + resp = get_user(module, users, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + mutually_exclusive=[ + ("ext_id", "username"), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_iam_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + users = get_user_api_instance(module) + if state == "present": + if module.params.get("ext_id"): + update_user(module, users, result) + else: + create_user(module, users, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vm_recovery_point_info_v2.py b/plugins/modules/ntnx_vm_recovery_point_info_v2.py new file mode 100644 index 000000000..859903694 --- /dev/null +++ b/plugins/modules/ntnx_vm_recovery_point_info_v2.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vm_recovery_point_info_v2 +short_description: Get VM recovery point info +version_added: 2.0.0 +description: + - Fetch specific VM recovery point info which is part of top level recovery point + - This module uses PC v4 APIs based SDKs +options: + recovery_point_ext_id: + description: + - Top level Recovery point external ID + type: str + required: true + vm_recovery_point_ext_id: + description: + - VM recovery point external ID + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Abhinav Bansal (@abhinavbansal29) + - Pradeepsingh Bhati (@bhati-pradeep) +""" +EXAMPLES = r""" +- name: Fetch specific VM recovery point info which is part of top level recovery point + nutanix.ncp.ntnx_vm_recovery_point_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + recovery_point_ext_id: "1ca2963d-77b6-453a-ae23-2c19e7a954a3" + vm_recovery_point_ext_id: "522670d7-e92d-45c5-9139-76ccff6813c2" + register: result +""" +RETURN = r""" +response: + description: + - Response for fetching VM recovery point info + returned: always + type: dict + sample: + { + "application_consistent_properties": null, + "consistency_group_ext_id": null, + "disk_recovery_points": [ + { + "disk_ext_id": "839feff9-bac0-4a70-9523-82ea9e431517", + "disk_recovery_point_ext_id": "21d467f0-ccef-4733-91cc-f04db58a92eb" + }, + { + "disk_ext_id": null, + "disk_recovery_point_ext_id": "91aedb3c-39c9-4750-b553-6e8360d7c1ff" + } + ], + "ext_id": "b387359d-fa5c-4d58-9eb2-3af1a4976319", + "links": null, + "location_agnostic_id": "51264897-07a8-4292-831b-ae28a37135e5", + "tenant_id": null, + "vm_categories": null, + "vm_ext_id": "2e572ceb-d955-4ed7-956f-1c90acf5b5ad" + } +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: false + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + type: str + returned: always + sample: null + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.data_protection.api_client import ( # noqa: E402 + get_recovery_point_api_instance, +) +from ..module_utils.v4.data_protection.helpers import ( # noqa: E402 + get_vm_recovery_point, +) +from ..module_utils.v4.utils import strip_internal_attributes # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + recovery_point_ext_id=dict(type="str", required=True), + vm_recovery_point_ext_id=dict(type="str", required=True), + ) + + return module_args + + +def get_vm_recovery_point_using_vm_rp_ext_id(module, recovery_points, result): + recovery_point_ext_id = module.params.get("recovery_point_ext_id") + vm_recovery_point_ext_id = module.params.get("vm_recovery_point_ext_id") + resp = get_vm_recovery_point( + module, recovery_points, recovery_point_ext_id, vm_recovery_point_ext_id + ) + result["ext_id"] = vm_recovery_point_ext_id + result["recovery_point_ext_id"] = recovery_point_ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + recovery_points = get_recovery_point_api_instance(module) + get_vm_recovery_point_using_vm_rp_ext_id(module, recovery_points, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vm_revert_v2.py b/plugins/modules/ntnx_vm_revert_v2.py new file mode 100644 index 000000000..b88bf10fe --- /dev/null +++ b/plugins/modules/ntnx_vm_revert_v2.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vm_revert_v2 +short_description: Revert VM from recovery point +version_added: 2.0.0 +description: + - Revert VM from recovery point using VM external ID + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - External ID of the VM + type: str + vm_recovery_point_ext_id: + description: + - VM recovery point external ID + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Abhinav Bansal (@abhinavbansal29) + - Pradeepsingh Bhati (@bhati-pradeep) +""" +EXAMPLES = r""" +- name: Revert a VM to a Recovery Point + nutanix.ncp.ntnx_vm_revert_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "ac5aff0c-6c68-4948-9088-b903e2be0ce7" + vm_recovery_point_ext_id: "522670d7-e92d-45c5-9139-76ccff6813c2" + register: result + ignore_errors: true +""" +RETURN = r""" +response: + description: + - Response for reverting VM from recovery point + - VM details if C(wait) is true. + - Task details if C(wait) is false. + returned: always + type: dict + sample: + { + "cluster_ext_ids": [ + "00061fa4-ef93-7dd8-185b-ac1f6b6f97e2" + ], + "completed_time": "2024-09-04T06:26:51.524581+00:00", + "completion_details": [ + { + "name": "VM Recovery Point UUID", + "value": "055d8419-6e9f-4552-8977-6dc92d14702e" + } + ], + "created_time": "2024-09-04T06:26:47.167906+00:00", + "entities_affected": [ + { + "ext_id": "ac5aff0c-6c68-4948-9088-b903e2be0ce7", + "rel": "vmm:ahv:config:vm" + }, + { + "ext_id": "055d8419-6e9f-4552-8977-6dc92d14702e", + "rel": "dataprotection:config:vm-recovery-point" + }, + { + "ext_id": "0f34a2a7-6068-48ba-859d-1ced14d7f5da", + "rel": "vmm:ahv:config:vm:disk" + }, + { + "ext_id": "160f1e37-4d35-45de-b280-393a91803dfd", + "rel": "vmm:ahv:config:vm:disk" + }, + { + "ext_id": "7439fc19-1733-42c8-aa86-01b08fe84a06", + "rel": "vmm:ahv:config:vm:cdrom" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:0e040d14-5dcf-5302-8b48-d3c6cf115cd1", + "is_cancelable": false, + "last_updated_time": "2024-09-04T06:26:51.524581+00:00", + "legacy_error_message": null, + "operation": "RevertVm", + "operation_description": "Revert VM", + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-09-04T06:26:47.185754+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": null, + "warnings": null + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: when an error occurs + type: str + sample: "Failed to get etag for VM" + +failed: + description: This field typically holds information about if the task have failed + returned: always + type: bool + sample: false + +task_ext_id: + description: The external ID of the task + returned: always + type: str + sample: "ZXJnb24=:0e040d14-5dcf-5302-8b48-d3c6cf115cd1" + +ext_id: + description: The external ID of the VM + returned: always + type: str + sample: "ac5aff0c-6c68-4948-9088-b903e2be0ce7" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_vm # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), # external id of the VM + vm_recovery_point_ext_id=dict(type="str", required=True), + ) + + return module_args + + +def revert_vm_from_recovery_point(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + vms = get_vm_api_instance(module) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.AhvConfigRevertParams() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating spec for revert VM", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + vm = get_vm(module, vms, ext_id) + etag = get_etag(vm) + if not etag: + module.fail_json(msg="Failed to get etag for VM", **result) + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.revert_vm(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while reverting vm", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "task_ext_id": None, + } + revert_vm_from_recovery_point(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms.py b/plugins/modules/ntnx_vms.py index 7d1d329d3..d92d67522 100644 --- a/plugins/modules/ntnx_vms.py +++ b/plugins/modules/ntnx_vms.py @@ -136,322 +136,323 @@ """ EXAMPLES = r""" - - name: VM with CentOS-7-cloud-init image - ntnx_vms: - state: present - name: VM with CentOS-7-cloud-init image - timezone: "UTC" - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - cluster: - name: "{{ cluster_name }}" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - - - name: VM with Cluster, Network, Universal time zone, one Disk - ntnx_vms: - state: present - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - name: "VM with Cluster Network and Disk" - timezone: "Universal" - cluster: - name: "{{ cluster_name }}" - networks: - - is_connected: True - subnet: - name: "{{ network_name }}" - disks: - - type: "DISK" - size_gb: 10 - bus: "PCI" - - - name: VM with Cluster, different CDROMs - ntnx_vms: - state: present - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - name: "VM with multiple CDROMs" - cluster: - name: "{{ cluster_name }}" - disks: - - type: "CDROM" - bus: "SATA" - empty_cdrom: True - - type: "CDROM" - bus: "IDE" - empty_cdrom: True - cores_per_vcpu: 1 - - - name: VM with diffrent disk types and diffrent sizes with UEFI boot type - ntnx_vms: - state: present - name: VM with UEFI boot type - timezone: GMT - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - cluster: - name: "{{ cluster_name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 - - type: DISK - size_gb: 1 - bus: SCSI - - type: DISK - size_gb: 2 - bus: PCI - storage_container: - name: "{{ storage_container_name }}" - - type: DISK - size_gb: 3 - bus: SATA - boot_config: - boot_type: UEFI - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 2 - cores_per_vcpu: 1 - memory_gb: 1 - - - name: VM with managed and unmanaged network - ntnx_vms: - state: present - name: VM_NIC - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - timezone: UTC - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: true - subnet: - uuid: "{{ network.dhcp.uuid }}" - - is_connected: true - subnet: - uuid: "{{ network.static.uuid }}" - disks: - - type: DISK - size_gb: 1 - bus: SCSI - - type: DISK - size_gb: 3 - bus: PCI - - type: CDROM - bus: SATA - empty_cdrom: True - - type: CDROM - bus: IDE - empty_cdrom: True - boot_config: - boot_type: UEFI - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 2 - cores_per_vcpu: 2 - memory_gb: 2 - - - name: Delete VM - ntnx_vms: - state: absent - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - vm_uuid: '{{ vm_uuid }}' - - - name: update vm by values for memory, vcpus and cores_per_vcpu, timezone - ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - timezone: UTC - - - name: Update VM by adding all type of disks - ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 - - type: DISK - size_gb: 1 - bus: PCI - - type: "DISK" - size_gb: 1 - bus: "SATA" - - type: "DISK" - size_gb: 1 - bus: "SCSI" - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - uuid: "{{ storage_container.uuid }}" - - type: "DISK" - bus: "IDE" - size_gb: 1 - - type: "CDROM" - bus: "IDE" - empty_cdrom: True - - - name: Update VM by increasing the size of the disks - ntnx_vms: - vm_uuid: "{{ result.vm_uuid }}" - disks: - - type: "DISK" - uuid: "{{ result.response.spec.resources.disk_list[0].uuid }}" - size_gb: 22 - - type: DISK - uuid: "{{ result.response.spec.resources.disk_list[1].uuid }}" - size_gb: 2 - - type: "DISK" - uuid: "{{ result.response.spec.resources.disk_list[2].uuid }}" - size_gb: 2 - - type: "DISK" - size_gb: 2 - uuid: "{{ result.response.spec.resources.disk_list[3].uuid }}" - - type: DISK - size_gb: 2 - uuid: "{{ result.response.spec.resources.disk_list[4].uuid }}" - - type: "DISK" - uuid: "{{ result.response.spec.resources.disk_list[5].uuid }}" - size_gb: 1 - - - name: Update VM by removing all type of disks - ntnx_vms: - vm_uuid: "{{ result.vm_uuid }}" - disks: - - state: absent - uuid: "{{ result.response.spec.resources.disk_list[0].uuid }}" - - state: absent - uuid: "{{ result.response.spec.resources.disk_list[1].uuid }}" - - state: absent - uuid: "{{ result.response.spec.resources.disk_list[2].uuid }}" - - state: absent - uuid: "{{ result.response.spec.resources.disk_list[3].uuid }}" - - state: absent - uuid: "{{ result.response.spec.resources.disk_list[4].uuid }}" - - state: absent - uuid: "{{ result.response.spec.resources.disk_list[5].uuid }}" - - state: absent - uuid: "{{ result.response.spec.resources.disk_list[6].uuid }}" - - - name: Update VM by adding subnets - ntnx_vms: - vm_uuid: "{{ result.vm_uuid }}" - networks: - - is_connected: true - subnet: - uuid: "{{ network.dhcp.uuid }}" - - is_connected: false - subnet: - uuid: "{{ static.uuid }}" - private_ip: "{{ network.static.ip }}" - - - name: Update VM by editing a subnet is_connected - ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - desc: disconnect and connects nic's - networks: - - is_connected: true - uuid: "{{ result.response.spec.resources.nic_list[1].uuid }}" - - is_connected: false - uuid: "{{ result.response.spec.resources.nic_list[0].uuid }}" - - - name: Update VM by change the private ip for subnet - ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - name: updated - desc: change ip - networks: - - is_connected: true - private_ip: "10.30.30.79" - uuid: "{{ result.response.spec.resources.nic_list[1].uuid }}" - - - name: Update VM by change vlan subnet - ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - name: updated - desc: change vlan - categories: - AppType: - - Apache_Spark - networks: - - is_connected: false - subnet: - name: vlan1211 - uuid: "{{ result.response.spec.resources.nic_list[0].uuid }}" - - - name: Update VM by deleting a subnet - ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - networks: - - state: absent - uuid: "{{ result.response.spec.resources.nic_list[0].uuid }}" - - state: absent - uuid: "{{ result.response.spec.resources.nic_list[1].uuid }}" - - - name: hard power off the vm - ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - state: hard_poweroff - register: result - ignore_errors: true - - name: power on the vm - ntnx_vms: - state: power_on - vm_uuid: "{{ vm.vm_uuid }}" - - - name: soft shut down the vm - ntnx_vms: - state: soft_shutdown - vm_uuid: "{{ vm.vm_uuid }}" - - - name: Create VM with minimum requiremnts with hard_poweroff opperation - ntnx_vms: - state: hard_poweroff - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" - - - name: Create VM with minimum requiremnts with poweroff opperation - ntnx_vms: - state: power_off - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" +- name: VM with CentOS-7-cloud-init image + ntnx_vms: + state: present + name: VM with CentOS-7-cloud-init image + timezone: "UTC" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster: + name: "{{ cluster_name }}" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: true + +- name: VM with Cluster, Network, Universal time zone, one Disk + ntnx_vms: + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: "VM with Cluster Network and Disk" + timezone: "Universal" + cluster: + name: "{{ cluster_name }}" + networks: + - is_connected: true + subnet: + name: "{{ network_name }}" + disks: + - type: "DISK" + size_gb: 10 + bus: "PCI" + +- name: VM with Cluster, different CDROMs + ntnx_vms: + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + name: "VM with multiple CDROMs" + cluster: + name: "{{ cluster_name }}" + disks: + - type: "CDROM" + bus: "SATA" + empty_cdrom: true + - type: "CDROM" + bus: "IDE" + empty_cdrom: true + cores_per_vcpu: 1 + +- name: VM with diffrent disk types and diffrent sizes with UEFI boot type + ntnx_vms: + state: present + name: VM with UEFI boot type + timezone: GMT + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster: + name: "{{ cluster_name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 + - type: DISK + size_gb: 1 + bus: SCSI + - type: DISK + size_gb: 2 + bus: PCI + storage_container: + name: "{{ storage_container_name }}" + - type: DISK + size_gb: 3 + bus: SATA + boot_config: + boot_type: UEFI + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 2 + cores_per_vcpu: 1 + memory_gb: 1 + +- name: VM with managed and unmanaged network + ntnx_vms: + state: present + name: VM_NIC + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + timezone: UTC + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + uuid: "{{ network.dhcp.uuid }}" + - is_connected: true + subnet: + uuid: "{{ network.static.uuid }}" + disks: + - type: DISK + size_gb: 1 + bus: SCSI + - type: DISK + size_gb: 3 + bus: PCI + - type: CDROM + bus: SATA + empty_cdrom: true + - type: CDROM + bus: IDE + empty_cdrom: true + boot_config: + boot_type: UEFI + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 2 + cores_per_vcpu: 2 + memory_gb: 2 + +- name: Delete VM + ntnx_vms: + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_uuid: '{{ vm_uuid }}' + +- name: update vm by values for memory, vcpus and cores_per_vcpu, timezone + ntnx_vms: + vm_uuid: "{{ vm.vm_uuid }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + timezone: UTC + +- name: Update VM by adding all type of disks + ntnx_vms: + vm_uuid: "{{ vm.vm_uuid }}" + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 + - type: DISK + size_gb: 1 + bus: PCI + - type: "DISK" + size_gb: 1 + bus: "SATA" + - type: "DISK" + size_gb: 1 + bus: "SCSI" + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + uuid: "{{ storage_container.uuid }}" + - type: "DISK" + bus: "IDE" + size_gb: 1 + - type: "CDROM" + bus: "IDE" + empty_cdrom: true + +- name: Update VM by increasing the size of the disks + ntnx_vms: + vm_uuid: "{{ result.vm_uuid }}" + disks: + - type: "DISK" + uuid: "{{ result.response.spec.resources.disk_list[0].uuid }}" + size_gb: 22 + - type: DISK + uuid: "{{ result.response.spec.resources.disk_list[1].uuid }}" + size_gb: 2 + - type: "DISK" + uuid: "{{ result.response.spec.resources.disk_list[2].uuid }}" + size_gb: 2 + - type: "DISK" + size_gb: 2 + uuid: "{{ result.response.spec.resources.disk_list[3].uuid }}" + - type: DISK + size_gb: 2 + uuid: "{{ result.response.spec.resources.disk_list[4].uuid }}" + - type: "DISK" + uuid: "{{ result.response.spec.resources.disk_list[5].uuid }}" + size_gb: 1 + +- name: Update VM by removing all type of disks + ntnx_vms: + vm_uuid: "{{ result.vm_uuid }}" + disks: + - state: absent + uuid: "{{ result.response.spec.resources.disk_list[0].uuid }}" + - state: absent + uuid: "{{ result.response.spec.resources.disk_list[1].uuid }}" + - state: absent + uuid: "{{ result.response.spec.resources.disk_list[2].uuid }}" + - state: absent + uuid: "{{ result.response.spec.resources.disk_list[3].uuid }}" + - state: absent + uuid: "{{ result.response.spec.resources.disk_list[4].uuid }}" + - state: absent + uuid: "{{ result.response.spec.resources.disk_list[5].uuid }}" + - state: absent + uuid: "{{ result.response.spec.resources.disk_list[6].uuid }}" + +- name: Update VM by adding subnets + ntnx_vms: + vm_uuid: "{{ result.vm_uuid }}" + networks: + - is_connected: true + subnet: + uuid: "{{ network.dhcp.uuid }}" + - is_connected: false + subnet: + uuid: "{{ static.uuid }}" + private_ip: "{{ network.static.ip }}" + +- name: Update VM by editing a subnet is_connected + ntnx_vms: + vm_uuid: "{{ vm.vm_uuid }}" + desc: disconnect and connects nic's + networks: + - is_connected: true + uuid: "{{ result.response.spec.resources.nic_list[1].uuid }}" + - is_connected: false + uuid: "{{ result.response.spec.resources.nic_list[0].uuid }}" + +- name: Update VM by change the private ip for subnet + ntnx_vms: + vm_uuid: "{{ vm.vm_uuid }}" + name: updated + desc: change ip + networks: + - is_connected: true + private_ip: "10.30.30.79" + uuid: "{{ result.response.spec.resources.nic_list[1].uuid }}" + +- name: Update VM by change vlan subnet + ntnx_vms: + vm_uuid: "{{ vm.vm_uuid }}" + name: updated + desc: change vlan + categories: + AppType: + - Apache_Spark + networks: + - is_connected: false + subnet: + name: vlan1211 + uuid: "{{ result.response.spec.resources.nic_list[0].uuid }}" + +- name: Update VM by deleting a subnet + ntnx_vms: + vm_uuid: "{{ vm.vm_uuid }}" + networks: + - state: absent + uuid: "{{ result.response.spec.resources.nic_list[0].uuid }}" + - state: absent + uuid: "{{ result.response.spec.resources.nic_list[1].uuid }}" + +- name: hard power off the vm + ntnx_vms: + vm_uuid: "{{ vm.vm_uuid }}" + state: hard_poweroff + register: result + ignore_errors: true + +- name: power on the vm + ntnx_vms: + state: power_on + vm_uuid: "{{ vm.vm_uuid }}" + +- name: soft shut down the vm + ntnx_vms: + state: soft_shutdown + vm_uuid: "{{ vm.vm_uuid }}" + +- name: Create VM with minimum requiremnts with hard_poweroff opperation + ntnx_vms: + state: hard_poweroff + name: integration_test_opperations_vm + cluster: + name: "{{ cluster.name }}" + +- name: Create VM with minimum requiremnts with poweroff opperation + ntnx_vms: + state: power_off + name: integration_test_opperations_vm + cluster: + name: "{{ cluster.name }}" """ RETURN = r""" @@ -744,9 +745,10 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.spec.vms import DefaultVMSpec # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 -from ..module_utils.prism.vms import VM # noqa: E402 +from ..module_utils.v3.prism.spec.vms import DefaultVMSpec # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.vms import VM # noqa: E402 +from ..module_utils.v3.utils import check_for_idempotency # noqa: E402 def get_module_spec(): @@ -869,7 +871,7 @@ def update_vm(module, result): result["response"] = spec return - if utils.check_for_idempotency(spec, resp, state=state): + if check_for_idempotency(spec, resp, state=state): result["skipped"] = True module.exit_json(msg="Nothing to change") diff --git a/plugins/modules/ntnx_vms_categories_v2.py b/plugins/modules/ntnx_vms_categories_v2.py new file mode 100644 index 000000000..daf4f150e --- /dev/null +++ b/plugins/modules/ntnx_vms_categories_v2.py @@ -0,0 +1,317 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_categories_v2 +short_description: Associate or disassociate categories to a VM in AHV Nutanix. +description: + - This module allows you to associate or disassociate categories to a AHV VM in Nutanix. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + categories: + description: + - List of categories to associate or disassociate with the VM. + - For associating, module will only send categories which are not already associated with the VM. + - For disassociating, module will only send categories which are already associated with the VM. + type: list + elements: dict + suboptions: + ext_id: + description: + - The external ID of the category. + type: str + required: true + required: true + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + state: + description: + - If set to "present", the categories will be associated with the VM. + - If set to "absent", the categories will be disassociated from the VM. + type: str + choices: [present, absent] + default: present + wait: + description: + - Whether to wait for the operation to complete before returning. + type: bool + default: true +author: + - Pradeepsingh Bhati (@bhati-pradeep) +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Associate categories with a VM + nutanix.ncp.ntnx_vms_categories_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + categories: + - ext_id: bbc3555a-133b-5348-9764-bfff196e84e4 + - ext_id: e4bda88f-e5da-5eb1-a031-2c0bb00d923d + - ext_id: 7bb4b92a-e6bd-5866-8ad4-8f3ab5886c33 + vm_ext_id: 8bb4b92a-e6bd-5866-8ad4-8f3ab5886c33 + state: present + wait: true + +- name: Disassociate categories from a VM + nutanix.ncp.ntnx_vms_categories_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + categories: + - ext_id: bbc3555a-133b-5348-9764-bfff196e84e4 + - ext_id: e4bda88f-e5da-5eb1-a031-2c0bb00d923d + vm_ext_id: 8bb4b92a-e6bd-5866-8ad4-8f3ab5886c33 + state: absent + wait: true +""" + +RETURNS = r""" +response: + description: + - For wait=true, the response will be the list of categories associated with the VM. + - For wait=false, the response will be the task response when triggered. + type: list + elements: str + returned: always + sample:[ + "eb8b4155-b3d1-5772-8d2f-d566d43d8e46", + "4d552748-e119-540a-b06c-3c6f0d213fa2", + "46f433d5-016d-5b11-a75f-5d0f44da7fd5", + "cee7a9cc-3032-54bb-9eaf-a8205af52b7c" + ], +error: + description: The error message if an error occurred. + type: str + returned: on error + sample: "failed to associate categories with vm" +changed: + description: Whether the state of the vm has changed. + type: bool + returned: always + sample: true +skipped: + description: Whether the operation is skipped due to no state change + type: bool + returned: on skipping + sample: true +task_ext_id: + description: The external ID of the task. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" + + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_vm # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + reference_spec = dict( + ext_id=dict(type="str", required=True), + ) + module_args = dict( + categories=dict( + type="list", + elements="dict", + options=reference_spec, + obj=vmm_sdk.AhvConfigCategoryReference, + required=True, + ), + vm_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_vm_categories_uuid_list(module, api_instance, vm_ext_id): + vm = get_vm(module, api_instance, vm_ext_id) + categories = [] + if vm.categories is not None: + for item in vm.categories: + categories.append(item.ext_id) + + return categories + + +def associate_categories(module, result): + vmm = get_vm_api_instance(module) + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + + # Remove already existing categories from list and create specs + # create list of categories not associated in vm + current_categories = get_vm_categories_uuid_list(module, vmm, vm_ext_id) + add_categories = [] + for category in module.params.get("categories", []): + ext_id = category.get("ext_id") + if ext_id and ext_id not in current_categories: + add_categories.append(vmm_sdk.AhvConfigCategoryReference(ext_id=ext_id)) + + spec = vmm_sdk.AhvConfigAssociateVmCategoriesParams(categories=add_categories) + + if not spec.categories: + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of vm current state + vm = get_vm(module, vmm, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.associate_categories(extId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while associating vm disk", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + vm = get_vm(module, vmm, vm_ext_id) + categories = vm.categories + categories = [item.ext_id for item in categories] + result["response"] = categories + result["changed"] = True + + +def disassociate_categories(module, result): + vmm = get_vm_api_instance(module) + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + + # Send categories from list which are actually associated to vm + # this is to avoid failures from API due to non associated categories + current_categories = get_vm_categories_uuid_list(module, vmm, vm_ext_id) + remove_categories = [] + for category in module.params.get("categories", []): + ext_id = category.get("ext_id") + if ext_id and ext_id in current_categories: + remove_categories.append(vmm_sdk.AhvConfigCategoryReference(ext_id=ext_id)) + + spec = vmm_sdk.AhvConfigDisassociateVmCategoriesParams(categories=remove_categories) + + if not spec.categories: + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of vm current state + vm = get_vm(module, vmm, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.disassociate_categories(extId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while disassociating vm disk", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + vm = get_vm(module, vmm, vm_ext_id) + categories = vm.categories + if categories is not None: + categories = [item.ext_id for item in categories] + result["response"] = categories + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "vm_ext_id": None, + } + state = module.params.get("state") + if state == "present": + associate_categories(module, result) + else: + disassociate_categories(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_cd_rom_info_v2.py b/plugins/modules/ntnx_vms_cd_rom_info_v2.py new file mode 100644 index 000000000..becc59a78 --- /dev/null +++ b/plugins/modules/ntnx_vms_cd_rom_info_v2.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_cd_rom_info_v2 +short_description: Fetch information about Nutanix VM's CD ROM +description: + - This module fetches information about Nutanix VM's CD ROM. + - The module can fetch information about all CD ROMs or a specific CD ROM. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the CD ROM. + type: str + required: false + vm_ext_id: + description: + - The external ID of the vm. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch information about all CD ROMs of a vm + nutanix.ncp.ntnx_vms_cd_rom_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec16e + +- name: Fetch information about a specific CD ROM + nutanix.ncp.ntnx_vms_cd_rom_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec16e + ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 +""" + + +RETURN = r""" +response: + description: + - The response from the Nutanix PC Disks v4 API. + - it can be single CD ROM or list of CD ROM as per spec. + type: dict + returned: always + sample: { + "backing_info": { + "data_source": { + "reference": { + "image_ext_id": "b988b5ae-da2d-424c-8530-51529a0efb52" + } + }, + "disk_ext_id": "e1651169-f9df-4785-bdff-7a94b1cf04e0", + "disk_size_bytes": 48695296, + "is_migration_in_progress": false, + "storage_config": null, + "storage_container": { + "ext_id": "ab76422d-d587-46aa-b25a-b005acdcd896" + } + }, + "disk_address": { + "bus_type": "IDE", + "index": 0 + }, + "ext_id": "e1651169-f9df-4785-bdff-7a94b1cf04e0", + "iso_type": "OTHER", + "links": null, + "tenant_id": null + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +ext_id: + description: + - The external ID of the CD ROM when specific is fetched. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_cd_rom # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + vm_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_cd_rom_by_ext_id(module, vms, result): + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + resp = get_cd_rom(module, vms, ext_id, vm_ext_id) + result["ext_id"] = ext_id + result["vm_ext_id"] = vm_ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_cd_roms(module, vms, result): + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm cd roms info Spec", **result) + + try: + resp = vms.list_cd_roms_by_vm_id(vmExtId=vm_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm cd roms info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + vms = get_vm_api_instance(module) + if module.params.get("ext_id"): + get_cd_rom_by_ext_id(module, vms, result) + else: + get_cd_roms(module, vms, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_cd_rom_iso_v2.py b/plugins/modules/ntnx_vms_cd_rom_iso_v2.py new file mode 100644 index 000000000..f054d8986 --- /dev/null +++ b/plugins/modules/ntnx_vms_cd_rom_iso_v2.py @@ -0,0 +1,413 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vms_cd_rom_iso_v2 +short_description: Insert or Eject ISO from CD ROM of Nutanix VMs +version_added: "2.0.0" +description: + - This module can insert or eject ISO from CD ROM of Nutanix VMs + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be insert iso in that CD ROM. + - if C(state) is set to C(absent) then it will eject ISO from that CD ROM. + choices: + - present + - absent + type: str + default: present + ext_id: + description: + - The external ID of the CD ROM. + type: str + required: false + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + backing_info: + description: + - ISO related information and storage + type: dict + suboptions: + disk_size_bytes: + description: + - The size of the CDROM in bytes. + type: int + storage_container: + description: + - The storage container reference. + type: dict + suboptions: + ext_id: + description: + - The external ID of the storage container. + type: str + required: true + storage_config: + description: + - The storage configuration. + type: dict + suboptions: + is_flash_mode_enabled: + description: + - to enable flash mode or not + type: bool + data_source: + description: + - The data source for the cd rom. + type: dict + suboptions: + reference: + description: + - The reference to the data source. + type: dict + suboptions: + image_reference: + description: + - The reference to an image. + - Mutually exclusive with C(vm_disk_reference). + type: dict + suboptions: + image_ext_id: + description: + - The external ID of the image. + type: str + vm_disk_reference: + description: + - The reference to a VM disk. + - Mutually exclusive with C(image_reference). + type: dict + suboptions: + disk_ext_id: + description: + - The external ID of the VM disk. + type: str + disk_address: + description: + - The address of the disk. + type: dict + suboptions: + bus_type: + description: + - The bus type of the disk. + type: str + choices: + - "SCSI" + - "IDE" + - "PCI" + - "SATA" + - "SPAPR" + required: true + index: + description: + - The index of the disk. + type: int + vm_reference: + description: + - The reference to the VM. + type: dict + suboptions: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + disk_address: + description: + - The address of the CD ROM. + type: dict + suboptions: + bus_type: + description: + - The bus type of the CD ROM. + type: str + choices: + - "IDE" + - "SATA" + required: false + index: + description: + - The index of the CD ROM. + type: int + wait: + description: + - Whether to wait for the task to complete. + type: bool + default: true +author: + - Pradeepsingh Bhati (@bhati-pradeep) +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Inject ISO in CD ROM of a VM + nutanix.ncp.ntnx_vms_cd_rom_iso_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd6" + ext_id: "e1651169-f9df-4785-bdff-7a94b1cf04e0" + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "b988b5ae-da2d-424c-8530-51529a0efb52" + state: present + wait: true +""" + +RETURN = r""" +response: + description: + - The response from the Nutanix v4 API. + - It will have task details for the operation + type: dict + returned: always + sample: { + "cluster_ext_ids": [ + "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + ], + "completed_time": "2024-04-27T14:24:07.561731+00:00", + "completion_details": null, + "created_time": "2024-04-27T14:24:06.695534+00:00", + "entities_affected": [ + { + "ext_id": "521ab899-2398-4a23-62cb-8cd5e46ee5d2", + "rel": "vmm:ahv:vm" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:d0eba95b-5ac1-5564-9be7-7137a82214ab", + "is_cancelable": false, + "last_updated_time": "2024-04-27T14:24:07.561730+00:00", + "legacy_error_message": null, + "operation": "CreateCdRom", + "operation_description": null, + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-04-27T14:24:06.706222+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": [ + { + "ext_id": "ZXJnb24=:82c22bf6-ca1f-5358-a13f-ec4e3c6fa077", + "href": "https://*****:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:82c22bf6-ca1f-5358-a13f-ec4e3c6fa077", + "rel": "subtask" + } + ], + "warnings": null + } +error: + description: The error message if an error occurred. + type: str + returned: on error +changed: + description: Whether the state of the CD ROM has changed. + type: bool + returned: always + sample: true +task_ext_id: + description: The external ID of the task. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +ext_id: + description: + - The external ID of the CD ROM. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_api_client, get_etag # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_cd_rom, get_vm # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=False), + vm_ext_id=dict(type="str", required=True), + ) + module_args.update(vm_specs.get_cd_rom_spec()) + return module_args + + +def get_vm_api_instance(module): + api_client = get_api_client(module) + return vmm_sdk.VmApi(api_client=api_client) + + +def insert_iso(module, vms, result): + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + ext_id = module.params["ext_id"] + result["ext_id"] = ext_id + + sg = SpecGenerator(module) + default_spec = vmm_sdk.CdRomInsertParams() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating insert iso in cd rom spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of vm current state + vm = get_vm(module, vms, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.insert_cd_rom_by_id( + vmExtId=vm_ext_id, extId=ext_id, body=spec, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while inserting iso in cd rom", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + resp = get_cd_rom(module, vms, ext_id, vm_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def eject_iso(module, vms, result): + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + ext_id = module.params["ext_id"] + result["ext_id"] = ext_id + + if module.check_mode: + result[ + "response" + ] = "ISO will be ejected from CD ROM with external ID: {0}".format(ext_id) + + return + + # get etag of vm current state + vm = get_vm(module, vms, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.eject_cd_rom_by_id(vmExtId=vm_ext_id, extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while ejecting iso in cd rom", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + resp = get_cd_rom(module, vms, ext_id, vm_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "vm_ext_id": None, + } + state = module.params.get("state") + vms = get_vm_api_instance(module) + if state == "present": + insert_iso(module, vms, result) + else: + eject_iso(module, vms, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_cd_rom_v2.py b/plugins/modules/ntnx_vms_cd_rom_v2.py new file mode 100644 index 000000000..927b8ba92 --- /dev/null +++ b/plugins/modules/ntnx_vms_cd_rom_v2.py @@ -0,0 +1,414 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vms_cd_rom_v2 +short_description: Manage CDROM for Nutanix AHV VMs +version_added: "2.0.0" +description: + - This module allows you to create and remove CDROM for Nutanix AHV VMs. + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be create the CDROM. + - If C(state) is set to C(absent) then the operation will be remove the CDROM. + choices: + - present + - absent + type: str + default: present + ext_id: + description: + - The external ID of the CDROM. + - Required for deleting a disk. + type: str + required: false + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + backing_info: + description: + - Storage provided by Nutanix ADSF + type: dict + suboptions: + disk_size_bytes: + description: + - The size of the CDROM in bytes. + type: int + storage_container: + description: + - The storage container reference. + type: dict + suboptions: + ext_id: + description: + - The external ID of the storage container. + type: str + required: true + storage_config: + description: + - The storage configuration. + type: dict + suboptions: + is_flash_mode_enabled: + description: + - Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: + - The data source for the disk. + type: dict + suboptions: + reference: + description: + - The reference to the data source. + type: dict + suboptions: + image_reference: + description: + - The reference to an image. + - Mutually exclusive with C(vm_disk_reference). + type: dict + suboptions: + image_ext_id: + description: + - The external ID of the image. + type: str + vm_disk_reference: + description: + - The reference to a VM disk. + - Mutually exclusive with C(image_reference). + type: dict + suboptions: + disk_ext_id: + description: + - The external ID of the VM disk. + type: str + disk_address: + description: + - The address of the disk. + type: dict + suboptions: + bus_type: + description: + - The bus type of the disk. + type: str + choices: + - 'SCSI' + - 'IDE' + - 'PCI' + - 'SATA' + - 'SPAPR' + required: true + index: + description: + - The index of the disk. + type: int + vm_reference: + description: + - The reference to the VM. + type: dict + suboptions: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + disk_address: + description: + - The address of the CDROM. + type: dict + suboptions: + bus_type: + description: + - Bus type for the device + type: str + choices: + - 'IDE' + - 'SATA' + required: false + index: + description: + - Device index on the bus. + - This field is ignored unless the bus details are specified. + type: int + wait: + description: + - Whether to wait for the task to complete. + type: bool + default: true +author: + - Pradeepsingh Bhati (@bhati-pradeep) +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Create a empty CD ROM for a VM + nutanix.ncp.ntnx_vms_cd_rom_v2: + state: "present" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: "521ab899-2398-4a23-62cb-8cd5e46ee5d2" + disk_address: + bus_type: "IDE" + wait: true + +- name: Remove CD ROM from VM + nutanix.ncp.ntnx_vms_cd_rom_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd6" + ext_id: "e1651169-f9df-4785-bdff-7a94b1cf04e0" + state: absent + wait: true +""" + +RETURN = r""" +response: + description: + - The response from the Nutanix v4 API. + - It will give prism central task details. + type: dict + returned: always + sample: { + "cluster_ext_ids": [ + "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + ], + "completed_time": "2024-04-27T14:24:07.561731+00:00", + "completion_details": null, + "created_time": "2024-04-27T14:24:06.695534+00:00", + "entities_affected": [ + { + "ext_id": "521ab899-2398-4a23-62cb-8cd5e46ee5d2", + "rel": "vmm:ahv:vm" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:d0eba95b-5ac1-5564-9be7-7137a82214ab", + "is_cancelable": false, + "last_updated_time": "2024-04-27T14:24:07.561730+00:00", + "legacy_error_message": null, + "operation": "CreateCdRom", + "operation_description": null, + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-04-27T14:24:06.706222+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": [ + { + "ext_id": "ZXJnb24=:82c22bf6-ca1f-5358-a13f-ec4e3c6fa077", + "href": "https://*****:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:82c22bf6-ca1f-5358-a13f-ec4e3c6fa077", + "rel": "subtask" + } + ], + "warnings": null + } +error: + description: The error message if an error occurred. + type: str + returned: on error +changed: + description: Whether the state of the VM has changed. + type: bool + returned: always + sample: true +task_ext_id: + description: The external ID of the task. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +ext_id: + description: + - The external ID of the CD ROM. + - It won't be returned during create due to know issue. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + wait_for_completion, + wait_for_entity_ext_id_in_task, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_cd_rom, get_vm # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=False), + vm_ext_id=dict(type="str", required=True), + ) + module_args.update(vm_specs.get_cd_rom_spec()) + return module_args + + +def create_cd_rom(module, result): + vms = get_vm_api_instance(module) + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + default_spec = vmm_sdk.AhvConfigCdRom() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vm cd rom Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of vm current state + vm = get_vm(module, vms, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.create_cd_rom(vmExtId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating vm cd rom", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id, err = wait_for_entity_ext_id_in_task( + module, task_ext_id, rel=TASK_CONSTANTS.RelEntityType.CD_ROM + ) + if err: + result["error"] = err + module.fail_json( + msg="Failed to get external ID of CD ROM from task", **result + ) + if ext_id: + resp = get_cd_rom(module, vms, ext_id, vm_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_cd_rom(module, result): + vms = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + result["ext_id"] = ext_id + + vm = get_vm(module, vms, vm_ext_id) + etag = get_etag(vm) + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.delete_cd_rom_by_id(vmExtId=vm_ext_id, extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting vm cd rom", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "vm_ext_id": None, + } + state = module.params.get("state") + if state == "present": + create_cd_rom(module, result) + else: + delete_cd_rom(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_clone.py b/plugins/modules/ntnx_vms_clone.py index 5b4d35a5f..ec0bc8bbe 100644 --- a/plugins/modules/ntnx_vms_clone.py +++ b/plugins/modules/ntnx_vms_clone.py @@ -34,10 +34,10 @@ ntnx_vms_clone: src_vm_uuid: "{{ vm.vm_uuid }}" networks: - - is_connected: false - subnet: - name: "{{ network.dhcp.name }}" - check_mode: yes + - is_connected: false + subnet: + name: "{{ network.dhcp.name }}" + check_mode: true - name: clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off ntnx_vms_clone: @@ -54,20 +54,20 @@ ntnx_vms_clone: src_vm_uuid: "{{ vm.vm_uuid }}" networks: - - is_connected: true - subnet: - uuid: "{{ network.dhcp.uuid }}" - - is_connected: true - subnet: - uuid: "{{ static.uuid }}" + - is_connected: true + subnet: + uuid: "{{ network.dhcp.uuid }}" + - is_connected: true + subnet: + uuid: "{{ static.uuid }}" - name: clone vm with script ntnx_vms_clone: src_vm_uuid: "{{ vm.vm_uuid }}" guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: true """ RETURN = r""" @@ -270,9 +270,9 @@ from ..module_utils import utils # noqa: E402 from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.spec.vms import DefaultVMSpec # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 -from ..module_utils.prism.vms import VM # noqa: E402 +from ..module_utils.v3.prism.spec.vms import DefaultVMSpec # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.vms import VM # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_vms_clone_v2.py b/plugins/modules/ntnx_vms_clone_v2.py new file mode 100644 index 000000000..f6cecd7f3 --- /dev/null +++ b/plugins/modules/ntnx_vms_clone_v2.py @@ -0,0 +1,694 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_clone_v2 +short_description: Clone a virtual machine in Nutanix AHV. +version_added: "2.0.0" +description: + - This module allows you to clone a virtual machine in Nutanix AHV. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the VM. + - Required for cloning VM. + required: true + type: str + name: + description: + - VM name. + required: false + type: str + num_sockets: + description: + - Number of vCPU sockets. + required: false + type: int + num_cores_per_socket: + description: + - Number of cores per socket. + required: false + type: int + num_threads_per_core: + description: + - Number of cores per socket. + required: false + type: int + memory_size_bytes: + description: + - Memory size in bytes. + required: false + type: int + nics: + description: + - NICs attached to the VM. + required: false + type: list + elements: dict + suboptions: + backing_info: + description: + - Defines a NIC emulated by the hypervisor + type: dict + suboptions: + model: + description: + - Options for the NIC emulation. + required: false + type: str + choices: ["VIRTIO", "E1000"] + mac_address: + description: + - MAC address of the emulated NIC. + required: false + type: str + is_connected: + description: + - Indicates whether the NIC is connected or not. Default is True. + required: false + type: bool + num_queues: + description: + - The number of Tx/Rx queue pairs for this NIC. + required: false + type: int + + network_info: + description: + - Network information for a NIC. + required: false + type: dict + suboptions: + ipv4_config: + description: Configuration for IPv4 settings. + type: dict + suboptions: + should_assign_ip: + description: + - If set to true (default value), an IP address must be assigned to the VM NIC + either the one explicitly specified by the user or allocated automatically by the IPAM service + by not specifying the IP address. If false, then no IP assignment is required for this VM NIC. + type: bool + ip_address: + description: Primary IPv4 address configuration. + type: dict + suboptions: + value: + description: The IPv4 address value. + type: str + required: true + prefix_length: + description: The prefix length of the IPv4 address. + type: int + secondary_ip_address_list: + description: Secondary IP addresses for the NIC. + type: list + elements: dict + suboptions: + value: + description: The IPv4 address value. + type: str + required: true + prefix_length: + description: The prefix length of the IPv4 address. + type: int + network_function_chain: + description: + - The network function chain associates with the NIC. Only valid if nic_type is NORMAL_NIC. + required: false + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a network function chain. It should be of type UUID. + required: true + type: str + subnet: + description: + - Network identifier for this adapter. Only valid if nic_type is NORMAL_NIC or DIRECT_NIC. + required: false + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a subnet. It should be of type UUID. + required: true + type: str + should_allow_unknown_macs: + description: + - Indicates whether an unknown unicast traffic is forwarded to this NIC or not. + This is applicable only for the NICs on the overlay subnets. + required: false + type: bool + trunked_vlans: + description: + - List of networks to trunk if VLAN mode is marked as TRUNKED. + If empty and VLAN mode is set to TRUNKED, all the VLANs are trunked. + required: false + type: list + elements: int + vlan_mode: + description: + - By default, all the virtual NICs are created in ACCESS mode, which permits only one VLAN per virtual network. + TRUNKED mode allows multiple VLANs on a single VM NIC for network-aware user VMs. + required: false + type: str + choices: ["ACCESS", "TRUNK"] + network_function_nic_type: + description: + - The type of this Network function NIC. Defaults to INGRESS. + required: false + type: str + choices: ["INGRESS", "EGRESS", "TAP"] + nic_type: + description: + - NIC type. + required: false + type: str + choices: ["NORMAL_NIC", "DIRECT_NIC", "NETWORK_FUNCTION_NIC", "SPAN_DESTINATION_NIC"] + boot_config: + description: + - Indicates the order of device types in which the VM should try to boot from. + If the boot device order is not provided the system will decide an appropriate boot device order. + required: false + type: dict + suboptions: + legacy_boot: + description: + - The legacy boot configuration. + required: false + type: dict + suboptions: + boot_device: + description: + - The boot device for legacy boot. + type: dict + suboptions: + boot_device_disk: + description: Specification for booting from disk. + type: dict + suboptions: + disk_address: + description: Address specification for the disk. + type: dict + suboptions: + bus_type: + description: Bus type for the device. The acceptable values are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: true + index: + description: Device index on the bus. This field is ignored unless the bus details are specified. + type: int + boot_device_nic: + description: Specification for booting from network interface controller (NIC). + type: dict + suboptions: + mac_address: + description: Mac address + type: str + boot_order: + description: + - Indicates the order of device types in which the VM should try to boot from. + If the boot device order is not provided the system will decide an appropriate boot device order. + type: list + elements: str + choices: ["CDROM", "DISK", "NETWORK"] + uefi_boot: + description: + - The UEFI boot configuration. + required: false + type: dict + suboptions: + is_secure_boot_enabled: + description: Indicate whether to enable secure boot or not. + type: bool + nvram_device: + description: Configuration for NVRAM to be presented to the VM. + type: dict + suboptions: + backing_storage_info: + description: Storage provided by Nutanix ADSF + type: dict + suboptions: + disk_size_bytes: + description: Size of the disk in Bytes + type: int + storage_container: + description: + - This reference is for disk level storage container preference. + This preference specifies the storage container to which this disk belongs. + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a VM disk container. It should be of type UUID. + required: true + type: str + storage_config: + description: Storage configuration for VM disks. + type: dict + suboptions: + is_flash_mode_enabled: + description: Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: A reference to a disk or image that contains the contents of a disk. + type: dict + suboptions: + reference: + description: Reference to the data source, mutually exclusive with either image_reference or vm_disk_reference. + type: dict + suboptions: + image_reference: + description: Reference to an image. + type: dict + suboptions: + image_ext_id: + description: The globally unique identifier of an image. It should be of type UUID. + type: str + vm_disk_reference: + description: Reference to a virtual machine disk. + type: dict + suboptions: + disk_ext_id: + description: The globally unique identifier of a VM disk. It should be of type UUID. + type: str + disk_address: + description: The address of the disk. + type: dict + suboptions: + bus_type: + description: + - Bus type for the device. The acceptable values + are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + required: true + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + index: + description: + - Device index on the bus. + This field is ignored unless the bus details are specified. + type: int + vm_reference: + description: This is a reference to a VM. + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a VM. It should be of type UUID. + required: true + type: str + + guest_customization: + description: + - Stage a Sysprep or cloud-init configuration file to be used by the guest for the next boot. + Note that the Sysprep command must be used to generalize the Windows VMs before triggering this API call. + required: false + type: dict + suboptions: + config: + description: + - The Nutanix Guest Tools customization settings. + required: false + type: dict + suboptions: + sysprep: + description: + - sysprep config + required: false + type: dict + suboptions: + install_type: + description: + - Indicates whether the guest will be freshly installed using this unattend configuration, + or this unattend configuration will be applied to a pre-prepared image. + type: str + choices: ["FRESH", "PREPARED"] + sysprep_script: + description: Sysprep script configuration. + type: dict + suboptions: + unattendxml: + description: Configuration for Unattend XML. + type: dict + suboptions: + value: + description: The Vales of the field + type: str + custom_key_values: + description: Custom key-value pairs for system preparation. + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: Key Name + type: str + value: + description: Key Value + type: raw + cloudinit: + description: + - cloudinit + required: false + type: dict + suboptions: + datasource_type: + description: + - The type of the data source. + type: str + choices: ["CONFIG_DRIVE_V2"] + metadata: + description: Metadata configuration. + type: str + cloud_init_script: + description: Cloud init script configuration. + type: dict + suboptions: + user_data: + description: User data for cloud-init. + type: dict + required: false + suboptions: + value: + description: The Vales of the field + type: str + required: true + custom_key_values: + description: Custom key-value pairs for cloud-init. + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: Key Name + type: str + value: + description: Key Value + type: raw + required: false + wait: + description: + - Whether to wait for the clone operation to complete. + required: false + type: bool + default: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Clone VM with same attributes values + nutanix.ncp.ntnx_vms_clone_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "de84a538-32bf-4a42-913b-340540af18fd" + name: "cloned_VM" + +- name: Clone VM with different attributes values + nutanix.ncp.ntnx_vms_clone_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "de84a538-32bf-4a42-913b-340540af18fd" + name: "cloned_VM" + num_sockets: 2 + num_cores_per_socket: 2 + num_threads_per_core: 2 +""" + +RETURN = r""" +response: + description: + - If C(wait) is true, then it will give cloned vm details. Else it will be task details + - It will have the new cloned VM details. + type: dict + returned: always + sample: { + "apc_config": { + "cpu_model": null, + "is_apc_enabled": false + }, + "availability_zone": null, + "bios_uuid": "14e05447-fd70-4348-4a69-ec90f427f638", + "boot_config": { + "boot_device": null, + "boot_order": [ + "CDROM", + "DISK", + "NETWORK" + ] + }, + "categories": [ + { + "ext_id": "eb8b4155-b3d1-5772-8d2f-d566d43d8e46" + } + ], + "cd_roms": null, + "cluster": { + "ext_id": "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + }, + "create_time": "2024-05-19T11:13:37.764820+00:00", + "description": null, + "disks": null, + "enabled_cpu_features": null, + "ext_id": "14e05447-fd70-4348-4a69-ec90f427f638", + "generation_uuid": "56e9e929-ae56-4ecc-a62b-cba2930ea522", + "gpus": null, + "guest_customization": null, + "guest_tools": null, + "hardware_clock_timezone": "UTC", + "host": null, + "is_agent_vm": false, + "is_branding_enabled": true, + "is_cpu_passthrough_enabled": false, + "is_cross_cluster_migration_in_progress": false, + "is_gpu_console_enabled": false, + "is_live_migrate_capable": null, + "is_memory_overcommit_enabled": false, + "is_vcpu_hard_pinning_enabled": false, + "is_vga_console_enabled": true, + "links": null, + "machine_type": "PC", + "memory_size_bytes": 4294967296, + "name": "PRTZlmWgHjkg_vm_test_clone2", + "nics": null, + "num_cores_per_socket": 2, + "num_numa_nodes": 0, + "num_sockets": 2, + "num_threads_per_core": 2, + "ownership_info": { + "owner": { + "ext_id": "00000000-0000-0000-0000-000000000000" + } + }, + "power_state": "OFF", + "protection_policy_state": null, + "protection_type": "UNPROTECTED", + "serial_ports": null, + "source": { + "entity_type": "VM", + "ext_id": "de84a538-32bf-4a42-913b-340540af18fd" + }, + "storage_config": null, + "tenant_id": null, + "update_time": "2024-05-19T11:13:38.529699+00:00", + "vtpm_config": { + "is_vtpm_enabled": false, + "version": null + } + } +error: + description: The error message if an error occurred. + type: str + returned: on error +changed: + description: Whether the state of the VM has changed. + type: bool + returned: always + sample: true +task_ext_id: + description: The external ID of the task. + type: str + returned: always + sample: "ZXJnb24=:ad73eb98-367b-5997-9f79-14e4b10bd1ed" +vm_ext_id: + description: The external ID of the VM. + type: str + returned: always + sample: "de84a538-32bf-4a42-913b-340540af18fd" +ext_id: + description: + - The external ID of the new cloned VM. + type: str + returned: always + sample: "14e05447-fd70-4348-4a69-ec90f427f638" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_vm # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=True), + name=dict(type="str"), + num_sockets=dict(type="int"), + num_cores_per_socket=dict(type="int"), + num_threads_per_core=dict(type="int"), + memory_size_bytes=dict(type="int"), + nics=dict( + type="list", + elements="dict", + options=vm_specs.get_nic_spec(), + obj=vmm_sdk.AhvConfigNic, + ), + boot_config=dict( + type="dict", + options=vm_specs.get_boot_config_spec(), + obj=vm_specs.get_boot_config_allowed_types(), + mutually_exclusive=[("legacy_boot", "uefi_boot")], + ), + guest_customization=dict( + type="dict", + options=vm_specs.get_gc_spec(), + obj=vmm_sdk.GuestCustomizationParams, + ), + ) + + return module_args + + +def clone_vm(module, result): + vmm = get_vm_api_instance(module) + vm_ext_id = module.params["ext_id"] + result["vm_ext_id"] = vm_ext_id + + current_spec = get_vm(module, vmm, vm_ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json( + "Unable to fetch etag for initiate guest os update", **result + ) + + kwargs = {"if_match": etag} + + sg = SpecGenerator(module) + default_spec = vmm_sdk.CloneOverrideParams() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vms Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = vmm.clone_vm(extId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while cloning vm", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + if len(resp.entities_affected) > 0: + for item in resp.entities_affected: + if ( + item.ext_id != vm_ext_id + and item.rel == TASK_CONSTANTS.RelEntityType.VM + ): + ext_id = item.ext_id + resp = get_vm(module, vmm, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + clone_vm(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_disks_info_v2.py b/plugins/modules/ntnx_vms_disks_info_v2.py new file mode 100644 index 000000000..a7ce0479a --- /dev/null +++ b/plugins/modules/ntnx_vms_disks_info_v2.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_disks_info_v2 +short_description: Fetch information about Nutanix VM's disks +description: + - This module fetches information about Nutanix VM's disks. + - The module can fetch information about all disks or a specific disks. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the disk. + type: str + required: false + vm_ext_id: + description: + - The external ID of the vm. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch information about all disks of a vm + nutanix.ncp.ntnx_vms_disks_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec16e + +- name: Fetch information about a specific disk + nutanix.ncp.ntnx_vms_disks_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec16e + ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 +""" + + +RETURN = r""" +response: + description: + - The response from the Nutanix PC Disks v4 API. + - it can be single disk or list of disks as per spec. + type: dict + returned: always + sample: { + "backing_info": { + "data_source": null, + "disk_ext_id": "b76b2f22-aa3b-4684-aa2e-d08204f059b2", + "disk_size_bytes": 1073741824, + "is_migration_in_progress": false, + "storage_config": null, + "storage_container": { + "ext_id": "786c00c9-f8c9-4167-83c6-0a7f278a5d0f" + } + }, + "disk_address": { + "bus_type": "SCSI", + "index": 0 + }, + "ext_id": "b76b2f22-aa3b-4684-aa2e-d08204f059b2", + "links": null, + "tenant_id": null + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +ext_id: + description: + - The external ID of the disk when specific disk is fetched. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_disk # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + vm_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_disk_by_ext_id(module, vmm, result): + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + resp = get_disk(module, vmm, ext_id, vm_ext_id) + result["ext_id"] = ext_id + result["vm_ext_id"] = vm_ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def get_disks(module, vmm, result): + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm disks info Spec", **result) + + try: + resp = vmm.list_disks_by_vm_id(vmExtId=vm_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm disks info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + vmm = get_vm_api_instance(module) + if module.params.get("ext_id"): + get_disk_by_ext_id(module, vmm, result) + else: + get_disks(module, vmm, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_disks_v2.py b/plugins/modules/ntnx_vms_disks_v2.py new file mode 100644 index 000000000..557d531eb --- /dev/null +++ b/plugins/modules/ntnx_vms_disks_v2.py @@ -0,0 +1,524 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vms_disks_v2 +short_description: Manage disks for Nutanix AHV VMs +version_added: "2.0.0" +description: + - This module allows you to manage disks for Nutanix AHV VMs. + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be create the item. + - if C(state) is set to C(present) and C(ext_id) is given then it will update that disk. + - if C(state) is set to C(present) then C(ext_id) or C(name) needs to be set. + - >- + If C(state) is set to C(absent) and if the item exists, then + item is removed. + choices: + - present + - absent + type: str + default: present + ext_id: + description: + - The external ID of the disk. + - Required for updating or deleting a disk. + type: str + required: false + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + backing_info: + description: + - Supporting storage to create virtual disk on. + type: dict + suboptions: + vm_disk: + description: + - The VM disk information. + type: dict + suboptions: + disk_size_bytes: + description: + - The size of the disk in bytes. + - Mutually exclusive with C(data_source) during update. + type: int + storage_container: + description: + - The storage container reference. + type: dict + suboptions: + ext_id: + description: + - The external ID of the storage container. + type: str + required: true + storage_config: + description: + - The storage configuration for the disk. + type: dict + suboptions: + is_flash_mode_enabled: + description: + - Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: + - The data source for the disk. + - Mutually exclusive with C(disk_size_bytes) during update. + type: dict + suboptions: + reference: + description: + - The reference to the data source. + type: dict + suboptions: + image_reference: + description: + - The reference to an image. + - Mutually exclusive with C(vm_disk_reference). + type: dict + suboptions: + image_ext_id: + description: + - The external ID of the image. + type: str + vm_disk_reference: + description: + - The reference to a VM disk. + - Mutually exclusive with C(image_reference). + type: dict + suboptions: + disk_ext_id: + description: + - The external ID of the VM disk. + type: str + disk_address: + description: + - The address of the disk. + type: dict + suboptions: + bus_type: + description: + - The bus type of the disk. + type: str + choices: + - 'SCSI' + - 'IDE' + - 'PCI' + - 'SATA' + - 'SPAPR' + required: true + index: + description: + - The index of the disk. + type: int + vm_reference: + description: + - The reference to the VM. + type: dict + suboptions: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + adsf_volume_group: + description: + - The ADSF volume group reference. + type: dict + suboptions: + volume_group_ext_id: + description: + - The external ID of the volume group. + type: str + disk_address: + description: + - The address of the disk. + type: dict + suboptions: + bus_type: + description: + - The bus type of the disk. + type: str + choices: + - 'SCSI' + - 'IDE' + - 'PCI' + - 'SATA' + - 'SPAPR' + required: true + index: + description: + - The index of the disk. + type: int + wait: + description: + - Whether to wait for the task to complete. + type: bool + default: true +author: + - Pradeepsingh Bhati (@bhati-pradeep) +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Create a disk for a VM + nutanix.ncp.ntnx_vms_disks_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd6" + backing_info: + vm_disk: + disk_size_bytes: 1073741824 + storage_container: + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd2" + storage_config: + is_flash_mode_enabled: true + data_source: + reference: + image_reference: + image_ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd1" + disk_address: + bus_type: "SCSI" + index: 1 + state: present + wait: true + +- name: Update a disk's storage container and size for a VM + nutanix.ncp.ntnx_vms_disks_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd6" + backing_info: + vm_disk: + disk_size_bytes: 29843545600 + storage_container: + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd7" + state: present + wait: true + +- name: Delete a disk from a VM + nutanix.ncp.ntnx_vms_disks_v2: + vm_ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd6" + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd7" + state: absent + wait: true +""" + +RETURN = r""" +response: + description: + - The response from the Nutanix v4 API. + - For create/delete it will have task response depending on c(wait). + - For update it will have disk latest info if c(wait) is true. + type: dict + returned: always + sample: { + "backing_info": { + "data_source": null, + "disk_ext_id": "530567f3-abda-4913-b5d0-0ab6758ec16e", + "disk_size_bytes": 29843545600, + "is_migration_in_progress": false, + "storage_config": null, + "storage_container": { + "ext_id": "78ec68c5-d9b0-4ba4-a3e9-96f90d580a0b" + } + }, + "disk_address": { + "bus_type": "PCI", + "index": 7 + }, + "ext_id": "530567f3-abda-4913-b5d0-0ab6758ec16e", + "links": null, + "tenant_id": null + } +error: + description: The error message if an error occurred. + type: str + returned: on error + sample: "Failed generating create vm disk Spec" +changed: + description: Whether the state of the disk has changed. + type: bool + returned: always + sample: true +skipped: + description: Whether the operation is skipped due to no state change + type: bool + returned: on skipping + sample: true +task_ext_id: + description: The external ID of the task. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +ext_id: + description: + - The external ID of the disk. + - It won't be returned during create due to know issue. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + wait_for_completion, + wait_for_entity_ext_id_in_task, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_disk, get_vm # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=False), + vm_ext_id=dict(type="str", required=True), + ) + module_args.update(vm_specs.get_disk_spec()) + return module_args + + +def create_disk(module, result): + vmm = get_vm_api_instance(module) + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + default_spec = vmm_sdk.AhvConfigDisk() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vm disk Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of vm current state + vm = get_vm(module, vmm, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.create_disk(vmExtId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating vm disk", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id, err = wait_for_entity_ext_id_in_task( + module, task_ext_id, rel=TASK_CONSTANTS.RelEntityType.VM_DISK + ) + if err: + result["error"] = err + module.fail_json( + msg="Failed to get external ID of disk from task", **result + ) + if ext_id: + resp = get_disk(module, vmm, ext_id, vm_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_idempotency(current_spec, update_spec): + if current_spec != update_spec: + return False + return True + + +def update_disk(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + result["ext_id"] = ext_id + result["vm_ext_id"] = vm_ext_id + + current_spec = get_disk( + module, api_instance=vmm, ext_id=ext_id, vm_ext_id=vm_ext_id + ) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm disk update spec", **result) + + # check for idempotency + if check_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + # data source and disk_size_bytes cannot be sent together + disk_size_bytes = ( + module.params.get("backing_info", {}).get("vm_disk", {}).get("disk_size_bytes") + ) + data_source = ( + module.params.get("backing_info", {}).get("vm_disk", {}).get("data_source") + ) + if disk_size_bytes and data_source: + result["error"] = "data source and disk_size_bytes cannot be sent together" + module.exit_json(**result) + elif disk_size_bytes: + update_spec.backing_info.data_source = None + elif data_source: + update_spec.backing_info.disk_size_bytes = None + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = vmm.update_disk_by_id(vmExtId=vm_ext_id, extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating vm disk", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + # poll for the last unfinished task + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_disk(module, vmm, ext_id, vm_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_disk(module, result): + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + result["ext_id"] = ext_id + + vmm = get_vm_api_instance(module) + disk = get_disk(module, vmm, ext_id, vm_ext_id=vm_ext_id) + etag = get_etag(disk) + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.delete_disk_by_id(vmExtId=vm_ext_id, extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting vm disk", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "absent", ("ext_id",)), + ("state", "present", ("ext_id", "backing_info", "disk_address"), True), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "vm_ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + update_disk(module, result) + else: + create_disk(module, result) + else: + delete_disk(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_info.py b/plugins/modules/ntnx_vms_info.py index c11ec2904..2ecc97a7f 100644 --- a/plugins/modules/ntnx_vms_info.py +++ b/plugins/modules/ntnx_vms_info.py @@ -33,29 +33,28 @@ - Dina AbuHijleh (@dina-abuhijleh) """ EXAMPLES = r""" - - name: List VMS using name filter criteria - ntnx_vms_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - vm_name: "{{ vm.name }}" - kind: vm - register: result - - - name: List VMS using length, offset, sort order and vm_name sort attribute - ntnx_vms_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 1 - offset: 1 - sort_order: "ASCENDING" - sort_attribute: "vm_name" - register: result +- name: List VMS using name filter criteria + ntnx_vms_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + vm_name: "{{ vm.name }}" + kind: vm + register: result +- name: List VMS using length, offset, sort order and vm_name sort attribute + ntnx_vms_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 1 + offset: 1 + sort_order: "ASCENDING" + sort_attribute: "vm_name" + register: result """ RETURN = r""" api_version: @@ -249,9 +248,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.vms import VM # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.vms import VM # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_vms_info_v2.py b/plugins/modules/ntnx_vms_info_v2.py new file mode 100644 index 000000000..9365aeb56 --- /dev/null +++ b/plugins/modules/ntnx_vms_info_v2.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_info_v2 +short_description: Fetch information about Nutanix AHV based PC VMs +description: + - This module fetches information about Nutanix AHV based PC VMs + - The module can fetch information about all VMs or a specific AHV based PC VMs + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the VM. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch information about all vms + nutanix.ncp.ntnx_vms_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + +- name: Fetch information about a specific vm + nutanix.ncp.ntnx_vms_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 +""" +RETURN = r""" +response: + description: + - The response from the Nutanix PC vms v4 API. + - it can be single vm or list of vms as per spec. + type: dict + returned: always + sample: { + "apc_config": { + "cpu_model": null, + "is_apc_enabled": false + }, + "availability_zone": null, + "bios_uuid": "9d199d16-1c8e-4ddf-40f5-20a2d78aa918", + "boot_config": { + "boot_device": null, + "boot_order": [ + "CDROM", + "DISK", + "NETWORK" + ] + }, + "categories": null, + "cd_roms": null, + "cluster": { + "ext_id": "0006197f-3d06-ce49-1fc3-ac1f6b6029c1" + }, + "create_time": "2024-06-24T08:01:46.269181+00:00", + "description": "ansible test", + "disks": null, + "enabled_cpu_features": null, + "ext_id": "9d199d16-1c8e-4ddf-40f5-20a2d78aa918", + "generation_uuid": "8bd335e2-f616-4806-87b3-53120c1f2acb", + "gpus": null, + "guest_customization": null, + "guest_tools": null, + "hardware_clock_timezone": "UTC", + "host": null, + "is_agent_vm": false, + "is_branding_enabled": true, + "is_cpu_passthrough_enabled": false, + "is_cross_cluster_migration_in_progress": false, + "is_gpu_console_enabled": false, + "is_live_migrate_capable": null, + "is_memory_overcommit_enabled": false, + "is_vcpu_hard_pinning_enabled": false, + "is_vga_console_enabled": true, + "links": null, + "machine_type": "PC", + "memory_size_bytes": 1073741824, + "name": "GFGLBElSNEGBansible-agvm", + "nics": null, + "num_cores_per_socket": 1, + "num_numa_nodes": 0, + "num_sockets": 1, + "num_threads_per_core": 1, + "ownership_info": { + "owner": { + "ext_id": "00000000-0000-0000-0000-000000000000" + } + }, + "power_state": "OFF", + "protection_policy_state": null, + "protection_type": "UNPROTECTED", + "serial_ports": null, + "source": null, + "storage_config": null, + "tenant_id": null, + "update_time": "2024-06-24T08:01:46.806598+00:00", + "vtpm_config": { + "is_vtpm_enabled": false, + "version": null + } + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +ext_id: + description: + - The external ID of the vm when specific vm is fetched. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_vm(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = vmm.get_vm_by_id(extId=ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_vms(module, result): + vmm = get_vm_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating vms info Spec", **result) + + try: + resp = vmm.list_vms(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vms info", + ) + if resp is None or getattr(resp, "data", None) is None: + result["response"] = [] + else: + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_vm(module, result) + else: + get_vms(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_ngt_info_v2.py b/plugins/modules/ntnx_vms_ngt_info_v2.py new file mode 100644 index 000000000..84169d6fc --- /dev/null +++ b/plugins/modules/ntnx_vms_ngt_info_v2.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_ngt_info_v2 +short_description: Get Nutanix Guest Tools (NGT) current config for a virtual machine. +description: + - This module retrieves Nutanix Guest Tools (NGT) current config for a virtual machine in a Nutanix cluster. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the virtual machine for which to retrieve NGT information. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials +""" + +EXAMPLES = r""" +- name: Get NGT info for a virtual machine + nutanix.ncp.ntnx_vms_ngt_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd1" + register: result +""" + +RETURN = r""" +response: + description: The NGT information for the virtual machine. + type: dict + returned: always + sample: { + "available_version": "4.1", + "capabilities": [ + "SELF_SERVICE_RESTORE" + ], + "guest_os_version": "linux:64:CentOS Linux-7.3.1611", + "is_enabled": true, + "is_installed": true, + "is_iso_inserted": true, + "is_reachable": true, + "is_vm_mobility_drivers_installed": null, + "is_vss_snapshot_capable": null, + "version": "4.1" + } +changed: + description: Indicates whether the NGT information has changed. + type: bool + returned: always +error: + description: The error message, if any. + type: str + returned: on error +ext_id: + description: The external ID of the virtual machine. + type: str + returned: always +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict(ext_id=dict(type="str", required=True)) + return module_args + + +def get_ngt_config(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + if not ext_id: + return module.fail_json(msg="ext_id is required to install NGT", **result) + + result["ext_id"] = ext_id + + status = None + try: + status = vmm.get_guest_tools_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching NGT info for given vm", + ) + + result["response"] = strip_internal_attributes(status.to_dict()) + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + skip_info_args=True, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + get_ngt_config(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_ngt_insert_iso_v2.py b/plugins/modules/ntnx_vms_ngt_insert_iso_v2.py new file mode 100644 index 000000000..e92e4410b --- /dev/null +++ b/plugins/modules/ntnx_vms_ngt_insert_iso_v2.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_ngt_insert_iso_v2 +short_description: Insert Nutanix Guest Tools (NGT) ISO into a virtual machine. +description: + - This module allows you to insert the Nutanix Guest Tools (NGT) ISO into a virtual machine's available CD-ROM in Nutanix PC. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the virtual machine where the NGT ISO should be inserted. + type: str + required: true + capabilities: + description: + - List of capabilities to enable for the NGT ISO. + - Valid choices are "SELF_SERVICE_RESTORE" and "VSS_SNAPSHOT". + type: list + elements: str + choices: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + is_config_only: + description: + - Indicates that the Nutanix Guest Tools are already installed on the guest VM, + and the ISO is being inserted to update the configuration of these tools. + type: bool + default: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Insert NGT ISO into virtual machine + nutanix.ncp.ntnx_vms_ngt_insert_iso_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd1" + capabilities: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + is_config_only: false +""" + +RETURNS = r""" +changed: + description: Indicates whether the state of the system has changed. + returned: always + type: bool +error: + description: Error message if an error occurred during the module execution. + returned: on error + type: str +response: + description: + - If C(wait) is true, It will show NGT configuration of VM after operation. + - If C(wait) is false, It will show task status of NGT operation. + returned: always + type: dict + sample: { + "available_version": "4.1", + "capabilities": [ + "SELF_SERVICE_RESTORE" + ], + "guest_os_version": "linux:64:CentOS Linux-7.3.1611", + "is_enabled": true, + "is_installed": true, + "is_iso_inserted": true, + "is_reachable": true, + "is_vm_mobility_drivers_installed": null, + "is_vss_snapshot_capable": null, + "version": "4.1" + } +ext_id: + description: The external ID of the virtual machine. + returned: always + type: str +task_ext_id: + description: The external ID of the task created for inserting the NGT ISO. + returned: when the task is created + type: str +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_ngt_status # noqa: E402 + +SDK_IMP_ERROR = None +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=True), + capabilities=dict( + type="list", + elements="str", + choices=["SELF_SERVICE_RESTORE", "VSS_SNAPSHOT"], + ), + is_config_only=dict(type="bool", default=False), + ) + return module_args + + +def insert_ngt_iso(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + if not ext_id: + return module.fail_json( + msg="vm ext_id is required to inserting NGT iso", **result + ) + + result["ext_id"] = ext_id + + status = vmm.get_guest_tools_by_id(extId=ext_id) + etag = get_etag(status) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.GuestToolsInsertConfig() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating insert NGT iso spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.insert_vm_guest_tools(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while inserting NGT iso in vm", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + status = get_ngt_status(module, vmm, ext_id) + result["response"] = strip_internal_attributes(status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + insert_ngt_iso(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_ngt_update_v2.py b/plugins/modules/ntnx_vms_ngt_update_v2.py new file mode 100644 index 000000000..364e8a133 --- /dev/null +++ b/plugins/modules/ntnx_vms_ngt_update_v2.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_ngt_update_v2 +short_description: Update Nutanix Guest Tools (NGT) configuration for a VM. +version_added: "2.0.0" +description: + - This module allows you to update the Nutanix Guest Tools (NGT) configuration for a VM. + - This module uses PC v4 APIs based SDKs +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + is_enabled: + description: + - To enable or disable NGT for the VM. + type: bool + required: false + capabilities: + description: + - The list of NGT capabilities to enable for the VM. + type: list + elements: str + choices: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + + +EXAMPLES = r""" +- name: Update NGT configuration for a VM + nutanix.ncp.ntnx_vms_ngt_update_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd1" + capabilities: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT +""" + +RETURNS = r""" +response: + description: + - If C(wait) is true, It will show NGT configuration of VM after operation. + - If C(wait) is false, It will show task status of NGT operation. + type: dict + returned: always + sample: { + "available_version": "4.1", + "capabilities": [ + "VSS_SNAPSHOT" + ], + "guest_os_version": "linux:64:CentOS Linux-7.3.1611", + "is_enabled": true, + "is_installed": true, + "is_iso_inserted": false, + "is_reachable": true, + "is_vm_mobility_drivers_installed": null, + "is_vss_snapshot_capable": null, + "version": "4.1" + } +changed: + description: Indicates whether the NGT configuration was changed. + type: bool + returned: always + sample: false +error: + description: The error message, if any. + type: str + returned: on error + sample: "Failed to update NGT configuration." +task_ext_id: + + description: The external ID of the task, if the update operation is asynchronous. + type: str + returned: when the update operation is asynchronous +skipped: + description: Indicates whether the NGT configuration update was skipped due to idempotency. + type: bool + returned: when the NGT configuration is already up to date +""" + +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_ngt_status # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=True), + is_enabled=dict(type="bool"), + capabilities=dict( + type="list", + elements="str", + choices=["SELF_SERVICE_RESTORE", "VSS_SNAPSHOT"], + ), + ) + return module_args + + +def check_idempotency(current_spec, update_spec): + current_capabilities = getattr(current_spec, "capabilities", []) + if current_capabilities is None: + current_capabilities = [] + updated_capabilities = getattr(update_spec, "capabilities", []) + if updated_capabilities is None: + updated_capabilities = [] + if ( + sorted(current_capabilities) == sorted(updated_capabilities) + and current_spec.is_enabled == update_spec.is_enabled + ): + return True + return False + + +def update_ngt_config(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + if not ext_id: + return module.fail_json(msg="vm ext_id is required to update NGT", **result) + + result["ext_id"] = ext_id + + current_spec = get_ngt_status(module, vmm, ext_id) + + spec = deepcopy(current_spec) + + if module.params.get("capabilities") is not None: + spec.capabilities = module.params.get("capabilities") + + if module.params.get("is_enabled") is not None: + spec.is_enabled = module.params.get("is_enabled") + + if check_idempotency(current_spec, spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + try: + resp = vmm.update_guest_tools_by_id(extId=ext_id, body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating NGT", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + status = get_ngt_status(module, vmm, ext_id) + result["response"] = strip_internal_attributes(status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[("state", "present", ("capabilities", "is_enabled"), True)], + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + update_ngt_config(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_ngt_upgrade_v2.py b/plugins/modules/ntnx_vms_ngt_upgrade_v2.py new file mode 100644 index 000000000..c766c2383 --- /dev/null +++ b/plugins/modules/ntnx_vms_ngt_upgrade_v2.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_ngt_upgrade_v2 +short_description: Upgrade Nutanix Guest Tools on a VM +version_added: "2.0.0" +description: + - This module upgrades Nutanix Guest Tools (NGT) on a VM in a Nutanix PC. + - This module uses PC v4 APIs based SDKs +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + reboot_preference: + description: + - The reboot preference for the VM after the NGT upgrade. + type: dict + suboptions: + schedule_type: + description: + - The type of reboot schedule. + type: str + choices: + - IMMEDIATE + - SKIP + - LATER + required: true + schedule: + description: + - The schedule for the reboot. + type: dict + suboptions: + start_time: + description: + - The start time for the reboot. + - ISO 8601 format. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Upgrade NGT on a VM + nutanix.ncp.ntnx_vms_ngt_upgrade_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd1" + reboot_preference: + schedule_type: "IMMEDIATE" + schedule: + start_time: "2022-01-01T00:00:00Z" + register: result +""" + +RETURNS = r""" +response: + description: + - If C(wait) is true, It will show NGT configuration of VM after operation. + - If C(wait) is false, It will show task status of NGT operation. + type: dict + returned: always + sample: { + "available_version": "4.1", + "capabilities": [ + "VSS_SNAPSHOT" + ], + "guest_os_version": "linux:64:CentOS Linux-7.3.1611", + "is_enabled": true, + "is_installed": true, + "is_iso_inserted": false, + "is_reachable": true, + "is_vm_mobility_drivers_installed": null, + "is_vss_snapshot_capable": null, + "version": "4.1" + } +changed: + description: Indicates whether the NGT configuration was changed. + type: bool + returned: always + sample: false +ext_id: + description: The external ID of the VM. + type: str + returned: always +task_ext_id: + description: The external ID of the task associated with the NGT upgrade. + type: str + returned: when the task_ext_id is available +error: + description: The error message, if any. + type: str + returned: when an error occurs +""" + + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_ngt_status # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + schedule = dict( + start_time=dict(type="str", required=True), + ) + reboot_preference = dict( + schedule_type=dict( + type="str", choices=["IMMEDIATE", "SKIP", "LATER"], required=True + ), + schedule=dict( + type="dict", options=schedule, obj=vmm_sdk.RebootPreferenceSchedule + ), + ) + module_args = dict( + ext_id=dict(type="str", required=True), + reboot_preference=dict( + type="dict", options=reboot_preference, obj=vmm_sdk.RebootPreference + ), + ) + return module_args + + +def upgrade_ngt(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + if not ext_id: + return module.fail_json(msg="ext_id is required to upgrade NGT", **result) + + result["ext_id"] = ext_id + + status = get_ngt_status(module, vmm, ext_id) + etag = get_etag(status) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.GuestToolsUpgradeConfig() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create VM NGT upgrade Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.upgrade_vm_guest_tools(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while installing NGT", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + status = get_ngt_status(module, vmm, ext_id) + result["response"] = strip_internal_attributes(status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + upgrade_ngt(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_ngt_v2.py b/plugins/modules/ntnx_vms_ngt_v2.py new file mode 100644 index 000000000..c1eafb31a --- /dev/null +++ b/plugins/modules/ntnx_vms_ngt_v2.py @@ -0,0 +1,340 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +module: ntnx_vms_ngt_v2 +short_description: Install or uninstall Nutanix Guest Tools (NGT) on a VM. +description: + - This module allows you to install or uninstall Nutanix Guest Tools (NGT) on a VM in a Nutanix cluster. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +notes: + - Module will skip if install/uninstall is not required checking the current installation status. +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then module will install NGT on the VM. + - if C(state) is set to C(absent) then module will uninstall NGT on the VM. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the VM on which to install or uninstall NGT. + type: str + required: true + capabilities: + description: + - List of NGT capabilities to enable. + type: list + elements: str + choices: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + credential: + description: + - The credentials to authenticate with the VM. + type: dict + suboptions: + username: + description: + - The username for authentication. + type: str + required: true + password: + description: + - The password for authentication. + type: str + required: true + reboot_preference: + description: + - The reboot preference for the VM after installing or uninstalling NGT. + type: dict + suboptions: + schedule_type: + description: + - The type of reboot schedule. + type: str + choices: + - IMMEDIATE + - SKIP + - LATER + required: true + schedule: + description: + - The schedule for reboot. + - required when C(schedule_type) is LATER. + type: dict + suboptions: + start_time: + description: + - The start time for the reboot schedule. + - ISO 8601 format. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Install NGT on a VM + nutanix.ncp.ntnx_vms_ngt_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd1" + capabilities: + - SELF_SERVICE_RESTORE + credentials: + username: "admin" + password: "password" + reboot_preference: + schedule_type: "LATER" + schedule: + start_time: "2022-01-01T00:00:00Z" + +- name: Uninstall NGT from a VM + nutanix.ncp.ntnx_vms_ngt_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "98b9dc89-be08-3c56-b554-692b8b676fd1" + state: "absent" +""" + +RETURN = r""" +response: + description: + - If C(wait) is true, It will show NGT configuration of VM after install or uninstall operation. + - If C(wait) is false, It will show task status of NGT operation. + type: dict + returned: always + sample: + { + "available_version": "4.1", + "capabilities": [ + "VSS_SNAPSHOT" + ], + "guest_os_version": null, + "is_enabled": true, + "is_installed": false, + "is_iso_inserted": true, + "is_reachable": false, + "is_vm_mobility_drivers_installed": null, + "is_vss_snapshot_capable": null, + "version": null + } +task_ext_id: + description: The external ID of the task associated with the NGT operation. + type: str + returned: when the task is created +changed: + description: Indicates whether the NGT installation or uninstallation changed the state of the VM. + type: bool + returned: always +error: + description: The error message, if any, encountered during the NGT operation. + type: str + returned: on error +ext_id: + description: The external ID of the VM on which the NGT operation was performed. + type: str + returned: always +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_ngt_status # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + credential = dict( + username=dict(type="str", required=True), + password=dict(type="str", required=True, no_log=True), + ) + schedule = dict( + start_time=dict(type="str", required=True), + ) + reboot_preference = dict( + schedule_type=dict( + type="str", choices=["IMMEDIATE", "SKIP", "LATER"], required=True + ), + schedule=dict( + type="dict", options=schedule, obj=vmm_sdk.RebootPreferenceSchedule + ), + ) + module_args = dict( + ext_id=dict(type="str", required=True), + capabilities=dict( + type="list", + elements="str", + choices=["SELF_SERVICE_RESTORE", "VSS_SNAPSHOT"], + ), + credential=dict(type="dict", options=credential, obj=vmm_sdk.Credential), + reboot_preference=dict( + type="dict", options=reboot_preference, obj=vmm_sdk.RebootPreference + ), + ) + return module_args + + +def install_ngt(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + if not ext_id: + return module.fail_json(msg="ext_id is required to install NGT", **result) + + result["ext_id"] = ext_id + + status = get_ngt_status(module, vmm, ext_id) + if getattr(status, "is_installed", False): + result["skipped"] = True + module.exit_json(msg="NGT is already installed in given vm.", **result) + etag = get_etag(status) + kwargs = {"if_match": etag} + + sg = SpecGenerator(module) + default_spec = vmm_sdk.GuestToolsInstallConfig() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create install Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = vmm.install_vm_guest_tools(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while installing NGT", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + status = get_ngt_status(module, vmm, ext_id) + result["response"] = strip_internal_attributes(status.to_dict()) + result["changed"] = True + + +def uninstall_ngt(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + if not ext_id: + return module.fail_json( + msg="vm ext_id is required to uninstalling NGT", **result + ) + + result["ext_id"] = ext_id + + status = get_ngt_status(module, vmm, ext_id) + if not getattr(status, "is_installed", False): + result["skipped"] = True + module.exit_json(msg="NGT is already not installed in the given vm", **result) + + etag = get_etag(status) + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.uninstall_vm_guest_tools(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while uninstalling NGT", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + status = get_ngt_status(module, vmm, ext_id) + result["response"] = strip_internal_attributes(status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + install_ngt(module, result) + else: + uninstall_ngt(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_nics_info_v2.py b/plugins/modules/ntnx_vms_nics_info_v2.py new file mode 100644 index 000000000..53fe9ead0 --- /dev/null +++ b/plugins/modules/ntnx_vms_nics_info_v2.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_nics_info_v2 +short_description: Fetch information about Nutanix VM's NICs +description: + - This module fetches information about Nutanix VM's NICs. + - The module can fetch information about all NICs or a specific NICs. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + ext_id: + description: + - The external ID of the nic. + type: str + required: false + vm_ext_id: + description: + - The external ID of the vm. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Pradeepsingh Bhati (@bhati-pradeep) +""" + +EXAMPLES = r""" +- name: Fetch information about all nics of a vm + nutanix.ncp.ntnx_vms_nics_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec16e + +- name: Fetch information about a specific nic + nutanix.ncp.ntnx_vms_nics_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec16e + ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 +""" + +RETURN = r""" +response: + description: + - The response from the Nutanix PC VMM Nics v4 API. + - it can be single nic or list of nics as per spec. + type: dict + returned: always + sample: { + "backing_info": { + "is_connected": true, + "mac_address": "50:6b:8d:a2:37:00", + "model": null, + "num_queues": 1 + }, + "ext_id": "4a67ce54-dd9c-4c71-9d91-2a19d512dc7d", + "links": null, + "network_info": { + "ipv4_config": null, + "ipv4_info": null, + "network_function_chain": null, + "network_function_nic_type": null, + "nic_type": "NORMAL_NIC", + "should_allow_unknown_macs": null, + "subnet": { + "ext_id": "34c596ab-37fe-4739-a961-5e5cad79bb99" + }, + "trunked_vlans": null, + "vlan_mode": "ACCESS" + }, + "tenant_id": null + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +ext_id: + description: + - The external ID of the nic when specific nic is fetched. + type: str + returned: always + sample: "530567f3-abda-4913-b5d0-0ab6758ec168" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + vm_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_nic(module, result): + vms = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + + try: + resp = vms.get_nic_by_id(vmExtId=vm_ext_id, extId=ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm nic info", + ) + + result["ext_id"] = ext_id + result["vm_ext_id"] = vm_ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_nics(module, result): + vms = get_vm_api_instance(module) + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm nics info Spec", **result) + + try: + resp = vms.list_nics_by_vm_id(vmExtId=vm_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm nics info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_nic(module, result) + else: + get_nics(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_nics_ip_v2.py b/plugins/modules/ntnx_vms_nics_ip_v2.py new file mode 100644 index 000000000..a8b8d09f2 --- /dev/null +++ b/plugins/modules/ntnx_vms_nics_ip_v2.py @@ -0,0 +1,319 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_nics_ip_v2 +short_description: Assign/Release IP to/from Nutanix VM NICs. +description: + - This module allows you to assign or release IP from Nutanix VM NICs. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +notes: + - Module will will skip releasing IP from NIC, if no IP is assigned to NIC. + - Module will skip if the install/uninstall is not required as per above check. +options: + state: + description: + - Specify state + - If C(state) is set to present then module will assign IP to NIC. + - if C(state) is set to absent then module will release IP from. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the NIC. + type: str + required: true + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + ip_address: + description: + - The IP address for the NIC to be assigned. + type: dict + suboptions: + value: + description: + - The IP address value. + type: str + required: True +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Assign IP + nutanix.ncp.ntnx_vms_nics_ip_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + ext_id: 7147b563-7b80-4be5-96b5-d8ff63187a5c + vm_ext_id: "521ab899-2398-4a23-62cb-8cd5e46ee5d2" + ip_address: + value: "10.51.144.137" + register: result + +- name: Release IP + nutanix.ncp.ntnx_vms_nics_ip_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + state: absent + ext_id: 7147b563-7b80-4be5-96b5-d8ff63187a5c + vm_ext_id: "521ab899-2398-4a23-62cb-8cd5e46ee5d2" + register: result +""" + +RETURNS = r""" +response: + description: + - For C(wait)=false, it will return task details + - Else it will return NIC info + type: dict + returned: always + sample: { + "backing_info": { + "is_connected": true, + "mac_address": "50:6b:8d:f9:de:e7", + "model": null, + "num_queues": 1 + }, + "ext_id": "7147b563-7b80-4be5-96b5-d8ff63187a5c", + "links": null, + "network_info": { + "ipv4_config": { + "ip_address": { + "prefix_length": 32, + "value": "10.51.144.137" + }, + "secondary_ip_address_list": null, + "should_assign_ip": null + }, + "ipv4_info": null, + "network_function_chain": null, + "network_function_nic_type": null, + "nic_type": "NORMAL_NIC", + "should_allow_unknown_macs": null, + "subnet": { + "ext_id": "18f0ed6e-30c8-48be-9c8f-e7cb4153416a" + }, + "trunked_vlans": null, + "vlan_mode": "ACCESS" + }, + "tenant_id": null + } +task_ext_id: + description: The external ID of the task associated with the operation. + type: str +changed: + description: Indicates whether the module changed the state of the VM NIC. + type: bool +error: + description: The error message, if any, encountered. + type: str +ext_id: + description: + - NIC external ID + type: str +vm_ext_id: + description: VM external ID + type: str +skipped: + description: Indicates whether the operation was skipped due to no state changes (Idempotency). + type: bool +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_nic # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + ip_address = dict( + value=dict(type="str", required=True), + ) + module_args = dict( + ext_id=dict(type="str", required=True), + vm_ext_id=dict(type="str", required=True), + ip_address=dict( + type="dict", options=ip_address, obj=vmm_sdk.IPv4Address, required=False + ), + ) + return module_args + + +def assign_ip(module, result): + vms = get_vm_api_instance(module) + nic_ext_id = module.params.get("ext_id") + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + result["ext_id"] = nic_ext_id + + sg = SpecGenerator(module) + default_spec = vmm_sdk.AssignIpParams() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating assign IP to VM Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of nic current state + vm = get_nic(module, api_instance=vms, ext_id=nic_ext_id, vm_ext_id=vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.assign_ip_by_id( + vmExtId=vm_ext_id, extId=nic_ext_id, body=spec, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while assign IP to VM", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + nic = get_nic(module, api_instance=vms, ext_id=nic_ext_id, vm_ext_id=vm_ext_id) + result["response"] = strip_internal_attributes(nic.to_dict()) + result["changed"] = True + + +def release_ip(module, result): + vms = get_vm_api_instance(module) + nic_ext_id = module.params.get("ext_id") + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + result["ext_id"] = nic_ext_id + + if module.check_mode: + result["response"] = "Release IP address from NIC with external ID: {}".format( + nic_ext_id + ) + return + + # get etag of nic current state + nic = get_nic(module, api_instance=vms, ext_id=nic_ext_id, vm_ext_id=vm_ext_id) + + # check if IP is not assigned then no need for releasing it + ip_info = nic.network_info + if hasattr(ip_info, "ipv4_config") and getattr(ip_info, "ipv4_config") is None: + result["skipped"] = True + module.exit_json( + msg="Nothing to change. NIC doesn't have IP assigned", **result + ) + + etag = get_etag(nic) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.release_ip_by_id(vmExtId=vm_ext_id, extId=nic_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while releasing IP from VM", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + nic = get_nic(module, api_instance=vms, ext_id=nic_ext_id, vm_ext_id=vm_ext_id) + result["response"] = strip_internal_attributes(nic.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("ip_address",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "vm_ext_id": None, + } + state = module.params.get("state") + if state == "present": + assign_ip(module, result) + else: + release_ip(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_nics_migrate_v2.py b/plugins/modules/ntnx_vms_nics_migrate_v2.py new file mode 100644 index 000000000..fd42448d5 --- /dev/null +++ b/plugins/modules/ntnx_vms_nics_migrate_v2.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_nics_migrate_v2 +short_description: Migrate a NIC to another subnet in Nutanix VM. +description: + - Migrates a network device attached to a Virtual Machine to another subnet. + - This module uses PC v4 APIs based SDKs +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the NIC. + type: str + required: true + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + migrate_type: + description: + - The type of migration to be performed. + type: str + required: true + choices: + - "ASSIGN_IP" + - "RELEASE_IP" + subnet: + description: + - Network identifier for this adapter. Only valid if nic_type is NORMAL_NIC or DIRECT_NIC. + type: dict + suboptions: + ext_id: + description: + - external ID of target subnet + type: str + required: True + required: True + ip_address: + description: + - The IP address for the NIC to be assigned. + type: dict + suboptions: + value: + description: + - The IP address value. + type: str + required: True +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Migrate subnet in NIC + nutanix.ncp.ntnx_vms_nics_migrate_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + ext_id: 7147b563-7b80-4be5-96b5-d8ff63187a5c + vm_ext_id: "521ab899-2398-4a23-62cb-8cd5e46ee5d2" + ip_address: + value: "10.51.144.137" + migrate_type: "ASSIGN_IP" + subnet: + ext_id: "7131f3ca-47ce-4f1d-990c-fa17800bd94d" + register: result +""" + +RETURNS = r""" +response: + description: + - For C(wait)=false, it will return task details + - Else it will return NIC info + type: dict + returned: always + sample: { + "backing_info": { + "is_connected": true, + "mac_address": "50:6b:8d:f9:de:e7", + "model": null, + "num_queues": 1 + }, + "ext_id": "7147b563-7b80-4be5-96b5-d8ff63187a5c", + "links": null, + "network_info": { + "ipv4_config": { + "ip_address": { + "prefix_length": 32, + "value": "10.51.144.137" + }, + "secondary_ip_address_list": null, + "should_assign_ip": null + }, + "ipv4_info": null, + "network_function_chain": null, + "network_function_nic_type": null, + "nic_type": "NORMAL_NIC", + "should_allow_unknown_macs": null, + "subnet": { + "ext_id": "18f0ed6e-30c8-48be-9c8f-e7cb4153416a" + }, + "trunked_vlans": null, + "vlan_mode": "ACCESS" + }, + "tenant_id": null + } +task_ext_id: + description: The external ID of the task associated with the operation. + type: str +changed: + description: Indicates whether the module changed the state of the VM NIC. + type: bool +error: + description: The error message, if any, encountered. + type: str +ext_id: + description: + - NIC external ID + type: str +vm_ext_id: + description: VM external ID + type: str +skipped: + description: Indicates whether the operation was skipped due to no state changes (Idempotency). + type: bool +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_nic # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + ip_address = dict( + value=dict(type="str", required=True), + ) + subnet = dict( + ext_id=dict(type="str", required=True), + ) + module_args = dict( + ext_id=dict(type="str", required=True), + vm_ext_id=dict(type="str", required=True), + migrate_type=dict( + type="str", required=True, choices=["ASSIGN_IP", "RELEASE_IP"] + ), + ip_address=dict( + type="dict", options=ip_address, obj=vmm_sdk.IPv4Address, required=False + ), + subnet=dict( + type="dict", options=subnet, obj=vmm_sdk.SubnetReference, required=True + ), + ) + return module_args + + +def migrate_nic(module, result): + vms = get_vm_api_instance(module) + nic_ext_id = module.params.get("ext_id") + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + result["ext_id"] = nic_ext_id + + sg = SpecGenerator(module) + default_spec = vmm_sdk.MigrateNicConfig() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating Migrate NIC spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of nic current state + nic = get_nic(module, api_instance=vms, ext_id=nic_ext_id, vm_ext_id=vm_ext_id) + etag = get_etag(nic) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.migrate_nic_by_id( + vmExtId=vm_ext_id, extId=nic_ext_id, body=spec, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while migrating nic of VM", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + nic = get_nic(module, api_instance=vms, ext_id=nic_ext_id, vm_ext_id=vm_ext_id) + result["response"] = strip_internal_attributes(nic.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "vm_ext_id": None, + } + migrate_nic(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_nics_v2.py b/plugins/modules/ntnx_vms_nics_v2.py new file mode 100644 index 000000000..568743c09 --- /dev/null +++ b/plugins/modules/ntnx_vms_nics_v2.py @@ -0,0 +1,537 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_nics_v2 +short_description: Manage NICs of Nutanix VMs +description: + - This module allows you to create, update, and delete NICs of Nutanix VMs. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + state: + description: + - Specify state + - If C(state) is set to present then module will create NIC. + - If C(state) is set to present and nic ext_id is given then module will update given NIC. + - if C(state) is set to absent then module will delete NIC of that VM. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the NIC. + type: str + required: false + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + backing_info: + description: + - The backing information for the NIC. + type: dict + suboptions: + model: + description: + - The model of the NIC. + type: str + choices: + - VIRTIO + - E1000 + required: false + mac_address: + description: + - The MAC address of the NIC. + type: str + required: false + is_connected: + description: + - Whether the NIC needs to be connected or not. + type: bool + required: false + num_queues: + description: + - The number of queues for the NIC. + type: int + required: false + network_info: + description: + - The network configuration for the NIC. + type: dict + suboptions: + nic_type: + description: + - The type of the NIC. + type: str + choices: + - NORMAL_NIC + - DIRECT_NIC + - NETWORK_FUNCTION_NIC + - SPAN_DESTINATION_NIC + required: false + network_function_chain: + description: + - The network function chain for the NIC. + type: dict + suboptions: + ext_id: + description: + - The external ID of the network function chain. + type: str + required: true + required: false + network_function_nic_type: + description: + - The type of the network function NIC. + type: str + choices: + - INGRESS + - EGRESS + - TAP + required: false + subnet: + description: + - The subnet for the NIC. + type: dict + suboptions: + ext_id: + description: + - The external ID of the subnet. + type: str + required: true + required: false + vlan_mode: + description: + - The VLAN mode for the NIC. + type: str + choices: + - ACCESS + - TRUNK + required: false + trunked_vlans: + description: + - The trunked VLANs for the NIC. + type: list + elements: int + required: false + should_allow_unknown_macs: + description: + - Whether to allow unknown MAC addresses or not. + type: bool + required: false + ipv4_config: + description: + - The IPv4 configuration for the NIC. + type: dict + suboptions: + should_assign_ip: + description: + - Whether to assign an IP address or not. + type: bool + required: false + ip_address: + description: + - The IP address for the NIC. + type: dict + suboptions: + value: + description: + - The IP address value. + type: str + required: True + prefix_length: + description: + - The prefix length for the IP address. + - Can be skipped, default it will be 32. + type: int + required: false + secondary_ip_address_list: + description: + - The list of secondary IP addresses for the NIC. + type: list + elements: dict + suboptions: + value: + description: + - The IP address value. + type: str + required: true + prefix_length: + description: + - The prefix length for the IP address. + - Can be skipped, default it will be 32. + type: int + required: false + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Create nic with assigning IP as true + nutanix.ncp.ntnx_vms_nics_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + vm_ext_id: "97634446-ac09-41c8-8298-71608c6d5ac9" + backing_info: + is_connected: true + network_info: + nic_type: "NORMAL_NIC" + subnet: + ext_id: "18f0ed6e-30c8-48be-9c8f-e7cb4153416a" + vlan_mode: "ACCESS" + ipv4_config: + should_assign_ip: true + register: result + +- name: Create nic with assigning private IP + nutanix.ncp.ntnx_vms_nics_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + vm_ext_id: "97634446-ac09-41c8-8298-71608c6d5ac9" + backing_info: + is_connected: true + network_info: + nic_type: "NORMAL_NIC" + subnet: + ext_id: "18f0ed6e-30c8-48be-9c8f-e7cb4153416a" + vlan_mode: "ACCESS" + ipv4_config: + should_assign_ip: true + ip_address: + value: "10.44.44.44" + register: result + +- name: Update VLAN nic type + nutanix.ncp.ntnx_vms_nics_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + vm_ext_id: "97634446-ac09-41c8-8298-71608c6d5ac9" + ext_id: "40a5ac91-83f6-5d35-8ae0-d8f013779377" + network_info: + nic_type: "DIRECT_NIC" + register: result + +- name: Update VLAN nic type + nutanix.ncp.ntnx_vms_nics_v2: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + state: "absent" + vm_ext_id: "97634446-ac09-41c8-8298-71608c6d5ac9" + ext_id: "40a5ac91-83f6-5d35-8ae0-d8f013779377" + register: result +""" + +RETURNS = r""" +response: + description: + - Currently for create operation it will return the task details. + - For update it will return the final state of VM NIC if wait is set to true. Else it will return task details. + - For delete it will return the task details. + type: dict + returned: always + sample: { + "cluster_ext_ids": [ + "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + ], + "completed_time": "2024-04-23T09:10:10.037257+00:00", + "completion_details": null, + "created_time": "2024-04-23T09:10:06.890679+00:00", + "entities_affected": [ + { + "ext_id": "97634446-ac09-41c8-8298-71608c6d5ac9", + "rel": "vmm:ahv:vm" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:40a5ac91-83f6-5d35-8ae0-d8f013779377", + "is_cancelable": false, + "last_updated_time": "2024-04-23T09:10:10.037257+00:00", + "legacy_error_message": null, + "operation": "CreateNic", + "operation_description": null, + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-04-23T09:10:06.901247+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": [ + { + "ext_id": "ZXJnb24=:9b097142-fc32-5675-8ebb-e35206c1080f", + "href": "https://10.44.76.42:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:9b097142-fc32-5675-8ebb-e35206c1080f", + "rel": "subtask" + } + ], + "warnings": null + } +task_ext_id: + description: The external ID of the task associated with the operation. + type: str +changed: + description: Indicates whether the module changed the state of the VM. + type: bool +error: + description: The error message, if any, encountered. + type: str +ext_id: + description: + - NIC external ID + - Only returned when NIC is updated. + - Due to known issue, external ID wont be present during create operation. + type: str +vm_ext_id: + description: VM external ID + type: str +skipped: + description: Indicates whether the operation was skipped due to no state changes (Idempotency). + type: bool +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + wait_for_completion, + wait_for_entity_ext_id_in_task, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_nic, get_vm # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=False), + vm_ext_id=dict(type="str", required=True), + ) + module_args.update(vm_specs.get_nic_spec()) + return module_args + + +def create_nic(module, result): + vms = get_vm_api_instance(module) + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + default_spec = vmm_sdk.AhvConfigNic() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vm nic Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of vm current state + vm = get_vm(module, vms, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.create_nic(vmExtId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating vm nic", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id, err = wait_for_entity_ext_id_in_task( + module, task_ext_id, rel=TASK_CONSTANTS.RelEntityType.VM_NIC + ) + if err: + result["error"] = err + module.fail_json(msg="Failed to get NIC external ID from task", **result) + if ext_id: + resp = get_nic(module, api_instance=vms, ext_id=ext_id, vm_ext_id=vm_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_idempotency(current_spec, update_spec): + if current_spec != update_spec: + return False + return True + + +def update_nic(module, result): + vms = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + result["ext_id"] = ext_id + result["vm_ext_id"] = vm_ext_id + + current_spec = get_nic(module, api_instance=vms, ext_id=ext_id, vm_ext_id=vm_ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm nic update spec", **result) + + # check for idempotency + if check_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = vms.update_nic_by_id(vmExtId=vm_ext_id, extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating vm nic", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + # poll for the last unfinished task + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_nic(module, api_instance=vms, ext_id=ext_id, vm_ext_id=vm_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_nic(module, result): + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + result["ext_id"] = ext_id + + vms = get_vm_api_instance(module) + nic = get_nic(module, api_instance=vms, ext_id=ext_id, vm_ext_id=vm_ext_id) + etag = get_etag(nic) + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.delete_nic_by_id(vmExtId=vm_ext_id, extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting vm nic", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "absent", ("ext_id",)), + ("state", "present", ("ext_id", "backing_info", "network_info"), True), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "vm_ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + update_nic(module, result) + else: + create_nic(module, result) + else: + delete_nic(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_ova.py b/plugins/modules/ntnx_vms_ova.py index 8ef3082c6..c3a79e36f 100644 --- a/plugins/modules/ntnx_vms_ova.py +++ b/plugins/modules/ntnx_vms_ova.py @@ -48,7 +48,7 @@ file_format: VMDK register: result ignore_errors: true - check_mode: yes + check_mode: true - name: create QCOW2 ova_image ntnx_vms_ova: @@ -214,8 +214,8 @@ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 -from ..module_utils.prism.vms import VM # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.vms import VM # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_vms_power_actions_v2.py b/plugins/modules/ntnx_vms_power_actions_v2.py new file mode 100644 index 000000000..65de9127a --- /dev/null +++ b/plugins/modules/ntnx_vms_power_actions_v2.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vms_power_actions_v2 +short_description: Perform power actions on Nutanix VMs +description: + - This module allows you to perform power actions on Nutanix VMs, such as powering on, powering off, resetting, and more. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + state: + description: + - The desired power state of the VM. + - power_on -> Turn on the VM. + - power_off -> Turn off the VM. + - force_power_cycle -> Forcefully power cycle the VM. + - reset -> Reset the VM. + - shutdown -> Shutdown the VM using ACPI. + - guest_shutdown -> Shutdown the VM using NGT. + - reboot -> Reboot the VM using ACPI. + - guest_reboot -> Reboot the VM using NGT. + type: str + choices: + - power_on + - power_off + - force_power_cycle + - reset + - shutdown + - guest_shutdown + - reboot + - guest_reboot + default: power_on + wait: + description: + - Wait for the task to complete. + type: bool + default: true + guest_power_state_transition_config: + description: + - Configuration options for guest power state transition. + type: dict + suboptions: + should_enable_script_exec: + description: + - Indicates whether to run the set script before the VM shutdowns/restarts. + type: bool + should_fail_on_script_failure: + description: + - Indicates whether to abort VM shutdown/restart if the script fails. + type: bool +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - George Ghawali (@george-ghawali) +""" +EXAMPLES = r""" + +- name: Power on a VM + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: power_on + wait: true + register: result + +- name: Power off a VM + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: power_off + wait: true + register: result + +- name: Reset a VM + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: reset + wait: true + register: result + +- name: Shutdown a VM using ACPI + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: shutdown + wait: true + register: result + +- name: Shutdown a VM using NGT + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: guest_shutdown + wait: true + register: result + +- name: Reboot a VM using ACPI + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: reboot + wait: true + register: result + +- name: Reboot a VM using NGT + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: guest_reboot + wait: true + guest_power_state_transition_config: + should_enable_script_exec: true + should_fail_on_script_failure: true + register: result + +- name: Power on a VM with guest power state transition configuration + nutanix.ncp.ntnx_vms_power_actions_v2: + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + state: power_on + wait: true + guest_power_state_transition_config: + should_enable_script_exec: true + should_fail_on_script_failure: true + register: result +""" + +RETURN = r""" +ext_id: + description: The external ID of the VM. + type: str + returned: always + sample: "0005a7b8-0b0b-4b3b-0000-000000000000" +task_ext_id: + description: The external ID of the power action task. + type: str + returned: when a power action task is triggered +response: + description: The response from the power action task. + type: dict + returned: always + sample: {"status": "success"} +changed: + description: Indicates whether the power state of the VM has changed. + type: bool + returned: always + sample: true +skipped: + description: Indicates whether the power action was skipped because the VM is already in the desired state. + type: bool + returned: when the power action is skipped +msg: + description: A human-readable message about the result of the power action. + type: str + returned: always + sample: "Power on action completed successfully." +""" + +import traceback # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_vm # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + + +def get_module_spec(): + guest_power_state_transition_config = dict( + should_enable_script_exec=dict(type="bool"), + should_fail_on_script_failure=dict(type="bool"), + ) + module_args = dict( + ext_id=dict(type="str", required=True), + state=dict( + type="str", + choices=[ + "power_on", + "power_off", + "force_power_cycle", + "reset", + "shutdown", + "guest_shutdown", + "reboot", + "guest_reboot", + ], + default="power_on", + ), + guest_power_state_transition_config=dict( + type="dict", + options=guest_power_state_transition_config, + obj=vmm_sdk.GuestPowerStateTransitionConfig, + ), + ) + return module_args + + +def power_actions(module, state, result): + vmm = get_vm_api_instance(module) + vm_ext_id = module.params["ext_id"] + result["ext_id"] = vm_ext_id + if module.check_mode or state == "guest_shutdown" or state == "guest_reboot": + sg = SpecGenerator(module) + default_spec = vmm_sdk.GuestPowerOptions() + spec, err = sg.generate_spec(obj=default_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating spec for guest reboot vm", **result) + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + vm = get_vm(module, vmm, vm_ext_id) + if (vm.power_state == "ON" and state == "power_on") or ( + vm.power_state == "OFF" and state != "power_on" + ): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + return + etag = get_etag(vm) + kwargs = {"if_match": etag} + resp = None + try: + if state == "power_on": + resp = vmm.power_on_vm(extId=vm_ext_id, **kwargs) + + elif state == "power_off": + resp = vmm.power_off_vm(extId=vm_ext_id, **kwargs) + + elif state == "force_power_cycle": + resp = vmm.power_cycle_vm(extId=vm_ext_id, **kwargs) + + elif state == "reset": + resp = vmm.reset_vm(extId=vm_ext_id, **kwargs) + + elif state == "shutdown": + resp = vmm.shutdown_vm(extId=vm_ext_id, **kwargs) + + elif state == "guest_shutdown": + resp = vmm.shutdown_guest_vm(extId=vm_ext_id, body=spec, **kwargs) + + elif state == "reboot": + resp = vmm.reboot_vm(extId=vm_ext_id, **kwargs) + + elif state == "guest_reboot": + resp = vmm.reboot_guest_vm(extId=vm_ext_id, body=spec, **kwargs) + + else: + result["error"] = "Action is not supported" + module.fail_json(msg="Action is not supported", **result) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Failed to ${state} VM", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + # poll for the last unfinished task + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params.get("state") + power_actions(module, state, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_serial_port_info_v2.py b/plugins/modules/ntnx_vms_serial_port_info_v2.py new file mode 100644 index 000000000..ab0a97055 --- /dev/null +++ b/plugins/modules/ntnx_vms_serial_port_info_v2.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_serial_port_info_v2 +short_description: Fetch information about Nutanix VM's serial ports +description: + - This module fetches information about Nutanix VM's serial ports. + - The module can fetch information about all serial ports or a specific serial ports. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the serial port. + type: str + required: false + vm_ext_id: + description: + - The external ID of the vm. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch information about all serial ports of a vm + nutanix.ncp.ntnx_vms_serial_port_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 00000-00000-000000-000000 + +- name: Fetch information about a specific serial port + nutanix.ncp.ntnx_vms_serial_port_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: 00000-00000-000000-000000 + ext_id: 00000-00000-000000-000000 +""" + + +RETURN = r""" +response: + description: + - The response from the Nutanix PC serial ports v4 API. + - it can be single serial port or list of serial ports as per spec. + type: dict + returned: always + sample: + { + "ext_id": "00000000-0000-0000-0000-000000000000", + "index": 0, + "is_connected": true, + "links": null, + "tenant_id": null + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "00000-00000-000000-000000" +ext_id: + description: + - The external ID of the serial port when specific serial port is fetched. + type: str + returned: always + sample: "00000-00000-000000-000000" +""" + +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_vm_api_instance # noqa: E402 + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + vm_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_serial_port(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + + try: + resp = vmm.get_serial_port_by_id(vmExtId=vm_ext_id, extId=ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm serial port info", + ) + + result["ext_id"] = ext_id + result["vm_ext_id"] = vm_ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_serial_ports(module, result): + vmm = get_vm_api_instance(module) + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm serial ports info Spec", **result) + + try: + resp = vmm.list_serial_ports_by_vm_id(vmExtId=vm_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vm serial ports info", + ) + + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_serial_port(module, result) + else: + get_serial_ports(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_serial_port_v2.py b/plugins/modules/ntnx_vms_serial_port_v2.py new file mode 100644 index 000000000..2a610c85a --- /dev/null +++ b/plugins/modules/ntnx_vms_serial_port_v2.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_vms_serial_port_v2 +version_added: "2.0.0" +description: + - This module allows you to manage serial port for Nutanix AHV VMs. + - This module uses PC v4 APIs based SDKs +short_description: VM Serial Port module which supports VM serial port CRUD states +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then the operation will be create the item. + - if C(state) is set to C(present) and C(ext_id) is given then it will update that serial port. + - if C(state) is set to C(present) then C(ext_id) or C(name) needs to be set. + - >- + If C(state) is set to C(absent) and if the item exists, then + item is removed. + choices: + - present + - absent + type: str + default: present + ext_id: + description: + - The external ID of the serial port. + - Required for updating or deleting a serial port. + type: str + required: false + + vm_ext_id: + description: + - The external ID of the VM. + type: str + required: true + index: + description: + - Index of the serial port. + type: int + is_connected: + description: + - Indicates whether the serial port is connected or not. + type: bool +author: + - Alaa Bishtawi (@alaa-bish) +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Create Serial Port + nutanix.ncp.ntnx_vms_serial_port_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: '{{ vm_ext_id }}' + state: present + index: 0 + is_connected: true + +- name: Update Serial Port connection status + nutanix.ncp.ntnx_vms_serial_port_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vm_ext_id: '{{ vm_ext_id }}' + ext_id: '{{ result.response.0.ext_id }}' + state: present + is_connected: false + +- name: Delete Serial Port + nutanix.ncp.ntnx_vms_serial_port_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "dded1b87-e566-419a-aac0-fb282792fb83" + vm_ext_id: "dded1b87-e566-419a-aac0-fb282792fb83" +""" + +RETURN = r""" +response: + description: + - when wait is false, the response will be task status. + - The response from the Nutanix PC VMM Serial Port v4 API. + - it can be single serial port or list of serial ports as per spec. + type: dict + returned: always + sample: + { + "ext_id": "dded1b87-e566-419a-aac0-fb282792fb83", + "index": 0, + "is_connected": true, + "links": null, + "tenant_id": null + } +error: + description: The error message if an error occurs. + type: str + returned: when an error occurs +vm_ext_id: + description: The external ID of the vm. + type: str + returned: always + sample: "dded1b87-e566-419a-aac0-fb282792fb83" +ext_id: + description: + - The external ID of the Serial Port when specific serial port is fetched. + type: str + returned: always + sample: "dded1b87-e566-419a-aac0-fb282792fb83" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + wait_for_completion, + wait_for_entity_ext_id_in_task, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_serial_port, get_vm # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + is_connected=dict(type="bool"), + index=dict(type="int"), + vm_ext_id=dict(type="str", required=True), + ) + + return module_args + + +def create_serial_port(module, result): + vmm = get_vm_api_instance(module) + vm_ext_id = module.params["vm_ext_id"] + result["vm_ext_id"] = vm_ext_id + + sg = SpecGenerator(module) + default_spec = vmm_sdk.SerialPort() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vm serial port Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + # get etag of vm current state + vm = get_vm(module, vmm, vm_ext_id) + etag = get_etag(vm) + + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.create_serial_port(vmExtId=vm_ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating vm serial port", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id, err = wait_for_entity_ext_id_in_task( + module, task_ext_id, rel=TASK_CONSTANTS.RelEntityType.SERIAL_PORT + ) + if err: + result["error"] = err + module.fail_json(msg="Failed to get serial port ID from task", **result) + if ext_id: + resp = get_serial_port(module, vmm, ext_id, vm_ext_id=vm_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_idempotency(current_spec, update_spec): + if current_spec != update_spec: + return False + return True + + +def update_serial_port(module, result): + vmm = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + result["ext_id"] = ext_id + result["vm_ext_id"] = vm_ext_id + + current_spec = get_serial_port( + module, api_instance=vmm, ext_id=ext_id, vm_ext_id=vm_ext_id + ) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm serial port update spec", **result) + + # check for idempotency + if check_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = vmm.update_serial_port_by_id( + vmExtId=vm_ext_id, extId=ext_id, body=update_spec + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating vm serial port", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + # poll for the last unfinished task + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_serial_port(module, vmm, ext_id, vm_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_serial_port(module, result): + ext_id = module.params.get("ext_id") + vm_ext_id = module.params.get("vm_ext_id") + result["vm_ext_id"] = vm_ext_id + result["ext_id"] = ext_id + + vmm = get_vm_api_instance(module) + serial_port = get_serial_port(module, vmm, ext_id, vm_ext_id=vm_ext_id) + etag = get_etag(serial_port) + kwargs = {"if_match": etag} + resp = None + try: + resp = vmm.delete_serial_port_by_id(vmExtId=vm_ext_id, extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting vm serial port", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[("state", "absent", ("ext_id",))], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "vm_ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + update_serial_port(module, result) + else: + create_serial_port(module, result) + else: + delete_serial_port(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_stage_guest_customization_v2.py b/plugins/modules/ntnx_vms_stage_guest_customization_v2.py new file mode 100644 index 000000000..a431bb4ce --- /dev/null +++ b/plugins/modules/ntnx_vms_stage_guest_customization_v2.py @@ -0,0 +1,341 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: ntnx_vms_stage_guest_customization_v2 +short_description: Stage guest customization configuration for a Nutanix VM +description: + - This module stages guest customization configuration for a Nutanix VM. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +options: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + config: + description: + - The guest customization configuration. + type: dict + suboptions: + sysprep: + description: + - The Sysprep configuration. + type: dict + suboptions: + install_type: + description: + - The Sysprep installation type. + type: str + choices: + - FRESH + - PREPARED + sysprep_script: + description: + - The Sysprep script configuration. + type: dict + suboptions: + unattendxml: + description: + - The path to the unattend.xml file. + type: dict + suboptions: + value: + description: + - The value of the unattend.xml file. + type: str + custom_key_values: + description: + - The custom key-value pairs for Sysprep. + type: dict + suboptions: + key_value_pairs: + description: + - The list of key-value pairs. + type: list + elements: dict + suboptions: + name: + description: + - The name of the key-value pair. + type: str + value: + description: + - The value of the key-value pair. + type: raw + cloudinit: + description: + - The CloudInit configuration. + type: dict + suboptions: + datasource_type: + description: + - The type of the CloudInit datasource. + type: str + choices: + - CONFIG_DRIVE_V2 + metadata: + description: + - The metadata for CloudInit. + type: str + cloud_init_script: + description: + - The CloudInit script configuration. + type: dict + suboptions: + user_data: + description: + - The user data for CloudInit. + type: dict + suboptions: + value: + description: + - The value of the user data. + type: str + required: true + custom_key_values: + description: + - The custom key-value pairs for CloudInit. + type: dict + suboptions: + key_value_pairs: + description: + - The list of key-value pairs. + type: list + elements: dict + suboptions: + name: + description: + - The name of the key-value pair. + type: str + value: + description: + - The value of the key-value pair. + type: raw +author: + - Alaa Bishtawi (@alaa-bish) +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" +EXAMPLES = r""" +- name: Update guest script + nutanix.ncp.ntnx_vms_stage_guest_customization_v2: + ext_id: "7334f142-9653-4c84-7287-3c758d1a0aeb" + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + cloud_init_script: + user_data: + value: I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ== + register: result +""" + +RETURN = r""" +--- +response: + description: + - Response for update guest customization configuration. + - Task details status + type: dict + returned: always + sample: + { + "cluster_ext_ids": [ + "0006197f-3d06-ce49-1fc3-ac1f6b6029c1" + ], + "completed_time": "2024-07-15T07:49:15.629543+00:00", + "completion_details": null, + "created_time": "2024-07-15T07:49:14.348791+00:00", + "entities_affected": [ + { + "ext_id": "7334f142-9653-4c84-7287-3c758d1a0aeb", + "rel": "vmm:ahv:vm" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:faeab3ca-3dbc-5384-867b-afa179822b79", + "is_cancelable": false, + "last_updated_time": "2024-07-15T07:49:15.629542+00:00", + "legacy_error_message": null, + "operation": "CustomizeGuest", + "operation_description": null, + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-07-15T07:49:14.371537+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": [ + { + "ext_id": "ZXJnb24=:5e3ed0ac-c573-5f52-b72f-bc47185c7910", + "href": "https://10.51.144.57:9440/api/prism/v4.0.b1/config/tasks/ZXJnb24=:5e3ed0ac-c573-5f52-b72f-bc47185c7910", + "rel": "subtask" + } + ], + "warnings": null + } +task_ext_id: + description: + - The external ID of the task. + type: str + returned: always + sample: "ZXJnb24=:faeab3ca-3dbc-5384-867b-afa179822b79" +ext_id: + description: + - The external ID of the VM. + type: str + returned: always + sample: "7334f142-9653-4c84-7287-3c758d1a0aeb" +changed: + description: + - Indicates whether the guest customization configuration was changed. + type: bool + returned: always + sample: true +error: + description: + - The error message. + type: str + returned: always +failed: + description: + - Indicates whether the task failed. + type: bool + returned: always + sample: false +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_vm # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=True), + config=dict( + type="dict", + options=vm_specs.get_gc_param_spec(), + obj=vm_specs.get_gc_allowed_types_spec(), + ), + ) + + return module_args + + +def stage_customize_guest(module, result): + vms = get_vm_api_instance(module) + ext_id = module.params["ext_id"] + result["ext_id"] = ext_id + + vm = get_vm(module, vms, ext_id=ext_id) + + etag = get_etag(data=vm) + if not etag: + return module.fail_json( + "Unable to fetch etag for stage guest customization configuration", **result + ) + + kwargs = {"if_match": etag} + + sg = SpecGenerator(module) + default_spec = vmm_sdk.GuestCustomizationParams() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating stage guest customization configuration spec", + **result, + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.customize_guest_vm(extId=ext_id, body=spec, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while staging guest customization configuration", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id, True) + result["response"] = strip_internal_attributes(resp.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + stage_customize_guest(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vms_v2.py b/plugins/modules/ntnx_vms_v2.py new file mode 100644 index 000000000..70dbb4a77 --- /dev/null +++ b/plugins/modules/ntnx_vms_v2.py @@ -0,0 +1,1297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_vms_v2 +short_description: "Create, Update and delete VMs in Nutanix AHV based PC" +version_added: 2.0.0 +description: + - Create, Update and delete VMs in Nutanix AHV based PC + - This module uses PC v4 APIs based SDKs +notes: + - During vm update, Update or create of subresources like disks, nics, cd_roms, gpus, serial_ports, etc. is not supported. + - Use subresources specific modules to update or create subresources. + - Power state management is not supported in this module. use ntnx_vms_power_actions_v2 module to manage power state. + - Avoid providing subresources spec during update operation. +options: + ext_id: + description: + - external ID of the VM. + - Required for updating or deleting the VM. + required: false + type: str + name: + description: + - The name of the VM. + type: str + description: + description: + - The description of the VM. + required: false + type: str + num_sockets: + description: + - The number of sockets for the VM. + required: false + type: int + num_cores_per_socket: + description: + - The number of cores per socket for the VM. + required: false + type: int + num_threads_per_core: + description: + - The number of threads per core for the VM. + required: false + type: int + num_numa_nodes: + description: + - The number of NUMA nodes for the VM. + required: false + type: int + memory_size_bytes: + description: + - The memory size in bytes for the VM. + required: false + type: int + is_vcpu_hard_pinning_enabled: + description: + - Whether vCPU hard pinning is enabled for the VM. + required: false + type: bool + is_cpu_passthrough_enabled: + description: + - Whether CPU passthrough is enabled for the VM. + required: false + type: bool + enabled_cpu_features: + description: + - The list of enabled CPU features for the VM. + required: false + type: list + elements: str + choices: ["HARDWARE_VIRTUALIZATION"] + is_memory_overcommit_enabled: + description: + - Whether memory overcommit is enabled for the VM. + required: false + type: bool + is_gpu_console_enabled: + description: + - Whether GPU console is enabled for the VM. + required: false + type: bool + categories: + description: + - The list of categories for the VM. + required: false + type: list + elements: dict + suboptions: + ext_id: + description: + - The external ID of the category. + required: true + type: str + cluster: + description: + - The cluster reference for the VM. + type: dict + suboptions: + ext_id: + description: + - The external ID of the cluster. + required: true + type: str + availability_zone: + description: + - The availability zone reference for the VM. + required: false + type: dict + suboptions: + ext_id: + description: + - The external ID of the availability zone. + required: true + type: str + guest_customization: + description: + - Stage a Sysprep or cloud-init configuration file to be used by the guest for the next boot. + Note that the Sysprep command must be used to generalize the Windows VMs before triggering this API call. + required: false + type: dict + suboptions: + config: + description: + - The Nutanix Guest Tools customization settings. + required: false + type: dict + suboptions: + sysprep: + description: + - sysprep config + required: false + type: dict + suboptions: + install_type: + description: + - Indicates whether the guest will be freshly installed using this unattend configuration, + or this unattend configuration will be applied to a pre-prepared image. + type: str + choices: ["FRESH", "PREPARED"] + sysprep_script: + description: Sysprep script configuration. + type: dict + suboptions: + unattendxml: + description: Configuration for Unattend XML. + type: dict + suboptions: + value: + description: The Vales of the field + type: str + custom_key_values: + description: Custom key-value pairs for system preparation. + type: dict + suboptions: + key_value_pairs: + description: The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: Key Name + type: str + value: + description: Key Value + type: raw + cloudinit: + description: + - cloudinit config + required: false + type: dict + suboptions: + datasource_type: + description: + - The type of the data source. + type: str + choices: ["CONFIG_DRIVE_V2"] + metadata: + description: + - The metadata. + type: str + cloud_init_script: + description: + - The cloud-init script. + type: dict + suboptions: + user_data: + description: + - The user data. + type: dict + suboptions: + value: + description: + - The value of the user data. + type: str + required: true + custom_key_values: + description: + - The custom key values. + type: dict + suboptions: + key_value_pairs: + description: + - The list of the individual KeyValuePair elements. + type: list + elements: dict + suboptions: + name: + description: + - The name of the key. + type: str + value: + description: + - The value of the key. + type: raw + guest_tools: + description: + - The guest tools for the VM. + required: false + type: dict + suboptions: + is_enabled: + description: + - Whether the guest tools are enabled for the VM. + type: bool + capabilities: + description: + - The list of capabilities for the guest tools. + type: list + elements: str + choices: ["SELF_SERVICE_RESTORE", "VSS_SNAPSHOT"] + hardware_clock_timezone: + description: + - The hardware clock timezone for the VM. + required: false + type: str + is_branding_enabled: + description: + - Whether branding is enabled for the VM. + required: false + type: bool + boot_config: + description: + - The boot configuration for the VM. + required: false + type: dict + suboptions: + legacy_boot: + description: + - The legacy boot configuration. + required: false + type: dict + suboptions: + boot_device: + description: + - The boot device for legacy boot. + type: dict + suboptions: + boot_device_disk: + description: Specification for booting from disk. + type: dict + suboptions: + disk_address: + description: Address specification for the disk. + type: dict + suboptions: + bus_type: + description: Bus type for the device. The acceptable values are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: true + index: + description: Device index on the bus. This field is ignored unless the bus details are specified. + type: int + boot_device_nic: + description: Specification for booting from network interface controller (NIC). + type: dict + suboptions: + mac_address: + description: Mac address + type: str + boot_order: + description: + - Indicates the order of device types in which the VM should try to boot from. + If the boot device order is not provided the system will decide an appropriate boot device order. + type: list + elements: str + choices: ["CDROM", "NETWORK", "DISK"] + uefi_boot: + description: + - The UEFI boot configuration. + required: false + type: dict + suboptions: + is_secure_boot_enabled: + description: Indicate whether to enable secure boot or not. + type: bool + nvram_device: + description: Configuration for NVRAM to be presented to the VM. + type: dict + suboptions: + backing_storage_info: + description: Storage provided by Nutanix ADSF + type: dict + suboptions: + disk_size_bytes: + description: Size of the disk in Bytes + type: int + storage_container: + description: + - This reference is for disk level storage container preference. + This preference specifies the storage container to which this disk belongs. + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a VM disk container. It should be of type UUID. + required: true + type: str + storage_config: + description: Storage configuration for VM disks. + type: dict + suboptions: + is_flash_mode_enabled: + description: Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: A reference to a disk or image that contains the contents of a disk. + type: dict + suboptions: + reference: + description: Reference to the data source, mutually exclusive with either image_reference or vm_disk_reference. + type: dict + suboptions: + image_reference: + description: Reference to an image. + type: dict + suboptions: + image_ext_id: + description: The globally unique identifier of an image. It should be of type UUID. + type: str + vm_disk_reference: + description: Reference to a virtual machine disk. + type: dict + suboptions: + disk_ext_id: + description: The globally unique identifier of a VM disk. It should be of type UUID. + type: str + disk_address: + description: The address of the disk. + type: dict + suboptions: + bus_type: + description: + - Bus type for the device. The acceptable values + are SCSI, IDE, PCI, SATA, SPAPR (only PPC). + type: str + choices: ["SCSI", "IDE", "PCI", "SATA", "SPAPR"] + required: true + index: + description: + - Device index on the bus. + This field is ignored unless the bus details are specified. + type: int + vm_reference: + description: This is a reference to a VM. + type: dict + suboptions: + ext_id: + description: + - The globally unique identifier of a VM. It should be of type UUID. + required: true + type: str + + is_vga_console_enabled: + description: + - Whether VGA console is enabled for the VM. + required: false + type: bool + machine_type: + description: + - The machine type for the VM. + required: false + type: str + choices: ["PC", "PSERIES", "Q35"] + vtpm_config: + description: + - The vTPM configuration for the VM. + required: false + type: dict + suboptions: + is_vtpm_enabled: + description: + - Indicates whether the virtual trusted platform module is enabled for the Guest OS or not. + type: bool + version: + description: + - The version of the vTPM. + type: str + is_agent_vm: + description: + - Whether the VM is an agent VM. + required: false + type: bool + apc_config: + description: + - The APC configuration for the VM. + required: false + type: dict + suboptions: + is_apc_enabled: + description: + - Indicates whether the APC is enabled or not. + type: bool + cpu_model: + description: + - The CPU model reference. + type: dict + suboptions: + ext_id: + description: + - The external ID of the CPU model. + type: str + name: + description: + - The name of the CPU model. + type: str + storage_config: + description: + - The storage configuration for the VM. + required: false + type: dict + suboptions: + is_flash_mode_enabled: + description: + - Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + qos_config: + description: + - The QoS configuration for the VM. + type: dict + suboptions: + throttled_iops: + description: + - The throttled IOPS. + type: int + + disks: + description: + - The list of disks for the VM. + required: false + type: list + elements: dict + suboptions: + backing_info: + description: + - Supporting storage to create virtual disk on. + type: dict + suboptions: + vm_disk: + description: + - The VM disk information. + type: dict + suboptions: + disk_size_bytes: + description: + - The size of the disk in bytes. + - Mutually exclusive with C(data_source) during update. + type: int + storage_container: + description: + - The storage container reference. + type: dict + suboptions: + ext_id: + description: + - The external ID of the storage container. + type: str + required: true + storage_config: + description: + - The storage configuration for the disk. + type: dict + suboptions: + is_flash_mode_enabled: + description: + - Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: + - The data source for the disk. + - Mutually exclusive with C(disk_size_bytes) during update. + type: dict + suboptions: + reference: + description: + - The reference to the data source. + type: dict + suboptions: + image_reference: + description: + - The reference to an image. + - Mutually exclusive with C(vm_disk_reference). + type: dict + suboptions: + image_ext_id: + description: + - The external ID of the image. + type: str + vm_disk_reference: + description: + - The reference to a VM disk. + - Mutually exclusive with C(image_reference). + type: dict + suboptions: + disk_ext_id: + description: + - The external ID of the VM disk. + type: str + disk_address: + description: + - The address of the disk. + type: dict + suboptions: + bus_type: + description: + - The bus type of the disk. + type: str + choices: + - 'SCSI' + - 'IDE' + - 'PCI' + - 'SATA' + - 'SPAPR' + required: true + index: + description: + - The index of the disk. + type: int + vm_reference: + description: + - The reference to the VM. + type: dict + suboptions: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + adsf_volume_group: + description: + - The ADSF volume group reference. + type: dict + suboptions: + volume_group_ext_id: + description: + - The external ID of the volume group. + type: str + disk_address: + description: + - The address of the disk. + type: dict + suboptions: + bus_type: + description: + - The bus type of the disk. + type: str + choices: + - 'SCSI' + - 'IDE' + - 'PCI' + - 'SATA' + - 'SPAPR' + required: true + index: + description: + - The index of the disk. + type: int + cd_roms: + description: + - The list of CD-ROMs for the VM. + required: false + type: list + elements: dict + suboptions: + backing_info: + description: + - Storage provided by Nutanix ADSF + type: dict + suboptions: + disk_size_bytes: + description: + - The size of the CDROM in bytes. + type: int + storage_container: + description: + - The storage container reference. + type: dict + suboptions: + ext_id: + description: + - The external ID of the storage container. + type: str + required: true + storage_config: + description: + - The storage configuration. + type: dict + suboptions: + is_flash_mode_enabled: + description: + - Indicates whether the virtual disk is pinned to the hot tier or not. + type: bool + data_source: + description: + - The data source for the disk. + type: dict + suboptions: + reference: + description: + - The reference to the data source. + type: dict + suboptions: + image_reference: + description: + - The reference to an image. + - Mutually exclusive with C(vm_disk_reference). + type: dict + suboptions: + image_ext_id: + description: + - The external ID of the image. + type: str + vm_disk_reference: + description: + - The reference to a VM disk. + - Mutually exclusive with C(image_reference). + type: dict + suboptions: + disk_ext_id: + description: + - The external ID of the VM disk. + type: str + disk_address: + description: + - The address of the disk. + type: dict + suboptions: + bus_type: + description: + - The bus type of the disk. + type: str + required: true + choices: + - 'SCSI' + - 'IDE' + - 'PCI' + - 'SATA' + - 'SPAPR' + index: + description: + - The index of the disk. + type: int + vm_reference: + description: + - The reference to the VM. + type: dict + suboptions: + ext_id: + description: + - The external ID of the VM. + type: str + required: true + disk_address: + description: + - The address of the CDROM. + type: dict + suboptions: + bus_type: + description: + - Bus type for the device + type: str + choices: + - 'IDE' + - 'SATA' + index: + description: + - Device index on the bus. + - This field is ignored unless the bus details are specified. + type: int + nics: + description: + - The list of NICs for the VM. + required: false + type: list + elements: dict + suboptions: + backing_info: + description: + - The backing information for the NIC. + type: dict + suboptions: + model: + description: + - The model of the NIC. + type: str + choices: + - VIRTIO + - E1000 + required: false + mac_address: + description: + - The MAC address of the NIC. + type: str + required: false + is_connected: + description: + - Whether the NIC needs to be connected or not. + type: bool + required: false + num_queues: + description: + - The number of queues for the NIC. + type: int + required: false + network_info: + description: + - The network configuration for the NIC. + type: dict + suboptions: + nic_type: + description: + - The type of the NIC. + type: str + choices: + - NORMAL_NIC + - DIRECT_NIC + - NETWORK_FUNCTION_NIC + - SPAN_DESTINATION_NIC + required: false + network_function_chain: + description: + - The network function chain for the NIC. + type: dict + suboptions: + ext_id: + description: + - The external ID of the network function chain. + type: str + required: true + required: false + network_function_nic_type: + description: + - The type of the network function NIC. + type: str + choices: + - INGRESS + - EGRESS + - TAP + required: false + subnet: + description: + - The subnet for the NIC. + type: dict + suboptions: + ext_id: + description: + - The external ID of the subnet. + type: str + required: true + required: false + vlan_mode: + description: + - The VLAN mode for the NIC. + type: str + choices: + - ACCESS + - TRUNK + required: false + trunked_vlans: + description: + - The trunked VLANs for the NIC. + type: list + elements: int + required: false + should_allow_unknown_macs: + description: + - Whether to allow unknown MAC addresses or not. + type: bool + required: false + ipv4_config: + description: + - The IPv4 configuration for the NIC. + type: dict + suboptions: + should_assign_ip: + description: + - Whether to assign an IP address or not. + type: bool + required: false + ip_address: + description: + - The IP address for the NIC. + type: dict + suboptions: + value: + description: + - The IP address value. + type: str + required: True + prefix_length: + description: + - The prefix length for the IP address. + - Can be skipped, default it will be 32. + type: int + required: false + secondary_ip_address_list: + description: + - The list of secondary IP addresses for the NIC. + type: list + elements: dict + suboptions: + value: + description: + - The IP address value. + type: str + required: true + prefix_length: + description: + - The prefix length for the IP address. + - Can be skipped, default it will be 32. + type: int + required: false + required: false + gpus: + description: + - The list of GPUs for the VM. + required: false + type: list + elements: dict + suboptions: + name: + description: + - The name of the GPU. + required: false + type: str + mode: + description: + - The mode of the GPU. + choices: ['PASSTHROUGH_GRAPHICS', 'PASSTHROUGH_COMPUTE', 'VIRTUAL'] + type: str + device_id: + description: + - The ID of the GPU device. + type: int + vendor: + description: + - The vendor of the GPU. + choices: ['NVIDIA', 'AMD', 'INTEL'] + type: str + pci_address: + description: + - The PCI address of the GPU. + type: dict + suboptions: + segment: + description: + - The segment of the PCI address. + type: int + func: + description: + - The function of the PCI address. + type: int + device: + description: + - The device of the PCI address. + type: int + bus: + description: + - The bus of the PCI address. + type: int + serial_ports: + description: + - The list of serial ports for the VM. + required: false + type: list + elements: dict + suboptions: + index: + description: + - Index of the serial port. + type: int + is_connected: + description: + - Indicates whether the serial port is connected or not. + type: bool + ext_id: + description: + - its not supported for serial ports using VM + type: str + state: + description: + - The desired state of the VM. + - if C(state) is present, it will create or update the vm. + - If C(state) is set to C(present) and ext_id is not provided then the operation will be create the vm + - If C(state) is set to C(present) and ext_id is provided then the operation will be update the vm + - If C(state) is set to C(absent) and ext_id is provided , then operation will be delete the vm + choices: ['present', 'absent'] + type: str + wait: + description: + - Whether to wait for the task to complete. + required: false + type: bool +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) + - Pradeepsingh Bhati (@bhati-pradeep) +""" +EXAMPLES = r""" +- name: Create VM with minimum requirements + nutanix.ncp.ntnx_vms_v2: + name: "test_name" + description: "ansible test" + cluster: + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + register: result + ignore_errors: true + +- name: Create VM with full requirements + nutanix.ncp.ntnx_vms_v2: + name: "test_name" + description: "ansible test" + cluster: + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + num_sockets: 1 + num_cores_per_socket: 1 + num_threads_per_core: 1 + num_numa_nodes: 1 + memory_size_bytes: 4294967296 + is_vcpu_hard_pinning_enabled: false + is_cpu_passthrough_enabled: false + is_memory_overcommit_enabled: false + is_gpu_console_enabled: false + is_vga_console_enabled: false + machine_type: "PC" + hardware_clock_timezone: "UTC" + enabled_cpu_features: + - HARDWARE_VIRTUALIZATION + is_branding_enabled: false + is_agent_vm: false + apc_config: + is_apc_enabled: false + vtpm_config: + is_vtpm_enabled: false + register: result + ignore_errors: true +- name: Update VM + nutanix.ncp.ntnx_vms_v2: + state: present + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + name: "new_name_updated" + description: "Test VM updated" + num_sockets: 2 + num_threads_per_core: 2 + num_cores_per_socket: 2 + num_numa_nodes: 2 + memory_size_bytes: 4294967296 + machine_type: "Q35" + is_vcpu_hard_pinning_enabled: true + is_cpu_passthrough_enabled: false + is_memory_overcommit_enabled: false + is_gpu_console_enabled: false + is_branding_enabled: true + is_vga_console_enabled: false + is_agent_vm: true + enabled_cpu_features: HARDWARE_VIRTUALIZATION + register: result + ignore_errors: true +- name: Delete VM + nutanix.ncp.ntnx_vms_v2: + state: absent + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + register: result +""" +RETURN = r""" +response: + description: + - Response for the vm operations. + - vm details if C(wait) is true. + - Task details if C(wait) is false. + returned: always + type: dict + sample: + { + "apc_config": { + "cpu_model": null, + "is_apc_enabled": false + }, + "availability_zone": null, + "bios_uuid": "9d199d16-1c8e-4ddf-40f5-20a2d78aa918", + "boot_config": { + "boot_device": null, + "boot_order": [ + "CDROM", + "DISK", + "NETWORK" + ] + }, + "categories": null, + "cd_roms": null, + "cluster": { + "ext_id": "0006197f-3d06-ce49-1fc3-ac1f6b6029c1" + }, + "create_time": "2024-06-24T08:01:46.269181+00:00", + "description": "ansible test", + "disks": null, + "enabled_cpu_features": null, + "ext_id": "9d199d16-1c8e-4ddf-40f5-20a2d78aa918", + "generation_uuid": "8bd335e2-f616-4806-87b3-53120c1f2acb", + "gpus": null, + "guest_customization": null, + "guest_tools": null, + "hardware_clock_timezone": "UTC", + "host": null, + "is_agent_vm": false, + "is_branding_enabled": true, + "is_cpu_passthrough_enabled": false, + "is_cross_cluster_migration_in_progress": false, + "is_gpu_console_enabled": false, + "is_live_migrate_capable": null, + "is_memory_overcommit_enabled": false, + "is_vcpu_hard_pinning_enabled": false, + "is_vga_console_enabled": true, + "links": null, + "machine_type": "PC", + "memory_size_bytes": 1073741824, + "name": "GFGLBElSNEGBansible-agvm", + "nics": null, + "num_cores_per_socket": 1, + "num_numa_nodes": 0, + "num_sockets": 1, + "num_threads_per_core": 1, + "ownership_info": { + "owner": { + "ext_id": "00000000-0000-0000-0000-000000000000" + } + }, + "power_state": "OFF", + "protection_policy_state": null, + "protection_type": "UNPROTECTED", + "serial_ports": null, + "source": null, + "storage_config": null, + "tenant_id": null, + "update_time": "2024-06-24T08:01:46.806598+00:00", + "vtpm_config": { + "is_vtpm_enabled": false, + "version": null + } + } + + +changed: + description: + - Whether the vm is changed or not. + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + + +ext_id: + description: + - External ID of the vm. + returned: always + type: str + sample: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + +skipped: + description: + - Whether the operation is skipped or not. + - Will be returned if operation is skipped. + type: bool + returned: always +""" + + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.vmm.api_client import get_etag, get_vm_api_instance # noqa: E402 +from ..module_utils.v4.vmm.helpers import get_vm # noqa: E402 +from ..module_utils.v4.vmm.spec.vms import VmSpecs as vm_specs # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_vmm_py_client as vmm_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as vmm_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = vm_specs.get_vm_spec() + + return module_args + + +def create_vm(module, result): + vms = get_vm_api_instance(module) + + sg = SpecGenerator(module) + default_spec = vmm_sdk.AhvConfigVm() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vms spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = vms.create_vm(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating vm", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.VM + ) + if ext_id: + resp = get_vm(module, vms, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_idempotency(current_spec, update_spec): + if current_spec != update_spec: + return False + return True + + +def update_vm(module, result): + vms = get_vm_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_vm(module, vms, ext_id=ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + if err: + result["error"] = err + module.fail_json(msg="Failed generating vm update spec", **result) + + # check for idempotency + if check_idempotency(current_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + try: + resp = vms.update_vm_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating vm", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_vm(module, vms, ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_vm(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + vms = get_vm_api_instance(module) + vm = get_vm(module, vms, ext_id) + etag = get_etag(vm) + kwargs = {"if_match": etag} + resp = None + try: + resp = vms.delete_vm_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting vm", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_vmm_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "task_ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + update_vm(module, result) + else: + create_vm(module, result) + else: + delete_vm(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_volume_groups_disks_info_v2.py b/plugins/modules/ntnx_volume_groups_disks_info_v2.py new file mode 100644 index 000000000..d69cf2c07 --- /dev/null +++ b/plugins/modules/ntnx_volume_groups_disks_info_v2.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_volume_groups_disks_info_v2 +short_description: Fetch information about Nutanix PC Volume group disks. +description: + - This module fetches information about Nutanix PC Volume groups disks. + - The module can fetch information about all Volume groups or a specific Volume group disk. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the Volume Group disk. + type: str + required: false + volume_group_ext_id: + description: + - The external ID of the Volume Group. + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch information about all Disks of VG + nutanix.ncp.ntnx_volume_groups_disks_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + volume_group_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 + validate_certs: false + +- name: Fetch information about all Disks of VG using page and limits + nutanix.ncp.ntnx_volume_groups_disks_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + volume_group_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 + page: 1 + limit: 50 + validate_certs: false + +- name: Fetch information about a specific VG Disk + nutanix.ncp.ntnx_volume_groups_disks_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1654 + volume_group_ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 +""" + +RETURN = r""" +response: + description: + - List of disks if C(ext_id) is not provided. + - Specific disk details if C(ext_id) is provided. Below example is of same case. + type: dict + returned: always + sample: { + "created_time": null, + "description": null, + "disk_data_source_reference": null, + "disk_size_bytes": 21474836480, + "disk_storage_features": { + "flash_mode": { + "is_enabled": true + } + }, + "ext_id": "4e00e28d-4d93-4587-a8f0-4502d72224c8", + "index": 0, + "links": null, + "storage_container_id": "10eb150f-e8b8-4d69-a828-6f23771d3723", + "tenant_id": null + } +volume_group_ext_id: + description: Volume Group external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +ext_id: + description: Disk external ID. When C(ext_id) is provided. + type: str + returned: When c(wait) if true + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Api Exception raised while fetching volume group disk info" +""" + + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.volumes.api_client import get_vg_api_instance # noqa: E402 + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + volume_group_ext_id=dict(type="str", required=True), + ) + return module_args + + +def get_vg_disk(module, result): + vgs = get_vg_api_instance(module) + ext_id = module.params.get("ext_id") + volume_group_ext_id = module.params.get("volume_group_ext_id") + + try: + resp = vgs.get_volume_disk_by_id( + extId=ext_id, volumeGroupExtId=volume_group_ext_id + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching volume group disk info", + ) + + result["ext_id"] = ext_id + result["volume_group_ext_id"] = volume_group_ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_vg_disks(module, result): + vgs = get_vg_api_instance(module) + volume_group_ext_id = module.params.get("volume_group_ext_id") + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating volume group disks info Spec", **result) + + try: + resp = vgs.list_volume_disks_by_volume_group_id( + volumeGroupExtId=volume_group_ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching volume group disks info", + ) + + result["volume_group_ext_id"] = volume_group_ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + if not result["response"]: + result["response"] = [] + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_vg_disk(module, result) + else: + get_vg_disks(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_volume_groups_disks_v2.py b/plugins/modules/ntnx_volume_groups_disks_v2.py new file mode 100644 index 000000000..532ee7870 --- /dev/null +++ b/plugins/modules/ntnx_volume_groups_disks_v2.py @@ -0,0 +1,420 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_volume_groups_disks_v2 +short_description: Manage Nutanix volume group disks +description: + - This module allows you to create and delete volume group disks in Nutanix. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then module will create vDisk. + - if C(state) is set to C(absent) then module will delete vDisk. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: true + ext_id: + description: + - The external ID of the disk. + - Required for C(state)=absent for delete. + type: str + required: false + volume_group_ext_id: + description: + - Volume Group external ID. + type: str + required: true + index: + description: + - Index of the disk in a Volume Group. + - This field is optional and immutable. + type: int + disk_size_bytes: + description: + - Size of the disk in bytes. + - This field is mandatory during Volume Group creation if a new disk is being created on the storage container. + type: int + description: + description: + - Volume Disk description. This is an optional field. + type: str + disk_data_source_reference: + description: + - Reference for creation of disk. + type: dict + suboptions: + ext_id: + description: + - External ID of the entity. + type: str + entity_type: + description: + - Type of the entity. + type: str + choices: + - STORAGE_CONTAINER + - VM_DISK + - VOLUME_DISK + - DISK_RECOVERY_POINT + disk_storage_features: + description: + - Storage optimization features which must be enabled on the Volume Disks. + - This is an optional field. + - If omitted, the disks will honor the Volume Group specific storage features setting. + type: dict + suboptions: + flash_mode: + description: + - Once configured, this field will avoid down migration of data from the hot tier \ + unless the overrides field is specified for the virtual disks. + type: dict + required: true + suboptions: + is_enabled: + description: Indicates whether the flash mode is enabled or not. This is an optional field. + type: bool + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Create disk with all attributes + nutanix.ncp.ntnx_volume_groups_disks_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + volume_group_ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b34 + index: 1 + disk_size_bytes: 21474836480 + description: "ansible-created-disk" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "STORAGE_CONTAINER" + ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b33 + register: result + ignore_errors: true + +- name: Create disk with vdisk ref + nutanix.ncp.ntnx_volume_groups_disks_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + volume_group_ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b34 + index: 1 + description: "ansible-created-disk" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "VM_DISK" + ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b37 + register: result + ignore_errors: true + + +- name: Create disk from volume group disk + nutanix.ncp.ntnx_volume_groups_disks_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + index: 2 + description: "ansible-created-disk-updated" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "VOLUME_DISK" + ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b32 + register: result + ignore_errors: true + +- name: Delete a volume group disk + nutanix.ncp.ntnx_volume_groups_disks_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + volume_group_ext_id: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" + ext_id: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" + state: absent +""" + +RETURN = r""" +response: + description: + - Disk details after creation if C(wait) is true. + - Task details if C(wait) is false. + type: dict + returned: always + sample: { + "created_time": null, + "description": null, + "disk_data_source_reference": null, + "disk_size_bytes": 21474836480, + "disk_storage_features": { + "flash_mode": { + "is_enabled": true + } + }, + "ext_id": "4e00e28d-4d93-4587-a8f0-4502d72224c8", + "index": 0, + "links": null, + "storage_container_id": "10eb150f-e8b8-4d69-a828-6f23771d3723", + "tenant_id": null + } +volume_group_ext_id: + description: Volume Group external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +ext_id: + description: Disk external ID. + type: str + returned: When c(wait) if true + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +task_ext_id: + description: The task external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Failed to create volume group disk" +changed: + description: Indicates whether the resource has changed. + type: bool + returned: always + sample: true +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.volumes.api_client import ( # noqa: E402 + get_etag, + get_vg_api_instance, +) +from ..module_utils.v4.volumes.spec.volume_group import ( # noqa: E402 + VGSpecs as vg_specs, +) + +SDK_IMP_ERROR = None +try: + import ntnx_volumes_py_client as volumes_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as volumes_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + disk_data_source_reference = dict( + ext_id=dict(type="str"), + entity_type=dict( + type="str", + choices=[ + "STORAGE_CONTAINER", + "VOLUME_DISK", + "VM_DISK", + "DISK_RECOVERY_POINT", + ], + ), + ) + module_args = dict( + volume_group_ext_id=dict(type="str", required=True), + ext_id=dict(type="str"), + index=dict(type="int"), + disk_size_bytes=dict(type="int"), + description=dict(type="str"), + disk_storage_features=dict( + type="dict", + options=vg_specs.get_storage_features_spec(), + obj=volumes_sdk.DiskStorageFeatures, + ), + disk_data_source_reference=dict( + type="dict", + options=disk_data_source_reference, + obj=volumes_sdk.EntityReference, + ), + ) + + return module_args + + +def get_volume_group_disk(module, api_instance, ext_id, volume_group_ext_id): + try: + return api_instance.get_volume_disk_by_id( + extId=ext_id, volumeGroupExtId=volume_group_ext_id + ).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching Volume group disk info using ext_id", + ) + + +def create_disk(module, result): + vgs = get_vg_api_instance(module) + volume_group_ext_id = module.params.get("volume_group_ext_id") + result["volume_group_ext_id"] = volume_group_ext_id + + sg = SpecGenerator(module) + default_spec = volumes_sdk.VolumeDisk() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating create volume group disk spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = vgs.create_volume_disk(body=spec, volumeGroupExtId=volume_group_ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating volume group disk", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.VOLUME_GROUP_DISK + ) + if ext_id: + resp = get_volume_group_disk(module, vgs, ext_id, volume_group_ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_disk(module, result): + ext_id = module.params.get("ext_id") + volume_group_ext_id = module.params.get("volume_group_ext_id") + result["ext_id"] = ext_id + result["volume_group_ext_id"] = volume_group_ext_id + + vgs = get_vg_api_instance(module) + vg = get_volume_group_disk(module, vgs, ext_id, volume_group_ext_id) + etag = get_etag(vg) + kwargs = {"if_match": etag} + resp = None + try: + resp = vgs.delete_volume_disk_by_id( + extId=ext_id, volumeGroupExtId=volume_group_ext_id, **kwargs + ) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting volume group disk", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_volumes_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "task_ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + # Update disk if not supported for pc.2024.1 + pass + else: + create_disk(module, result) + else: + delete_disk(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_volume_groups_info_v2.py b/plugins/modules/ntnx_volume_groups_info_v2.py new file mode 100644 index 000000000..eea3aaa09 --- /dev/null +++ b/plugins/modules/ntnx_volume_groups_info_v2.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: ntnx_volume_groups_info_v2 +short_description: Fetch information about Nutanix PC Volume groups. +description: + - This module fetches information about Nutanix PC Volume groups. + - The module can fetch information about all Volume groups or a specific Volume group. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the Volume Group. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch information about all VGs + nutanix.ncp.ntnx_volume_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + +- name: Fetch information about a specific VG + nutanix.ncp.ntnx_volume_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 + +- name: Fetch information about a specific VG + nutanix.ncp.ntnx_volume_groups_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: 530567f3-abda-4913-b5d0-0ab6758ec1653 +""" + +RETURN = r""" +response: + description: + - Volume group details if C(ext_id) is provided. + - List of Volume groups if C(ext_id) is not provided. + type: dict + returned: always + sample: { + "cluster_reference": "00061663-9fa0-28ca-185b-ac1f6b6f97e2", + "created_by": null, + "created_time": null, + "description": "Volume group 2", + "enabled_authentications": null, + "ext_id": "792cd764-37b5-4da3-7ef1-ea3f618c1648", + "is_hidden": null, + "iscsi_features": { + "enabled_authentications": "CHAP", + "iscsi_target_name": null, + "target_secret": null + }, + "iscsi_target_name": null, + "iscsi_target_prefix": null, + "links": null, + "load_balance_vm_attachments": null, + "name": "ansible-vgs-KjRMtTRxhrww2", + "sharing_status": "SHARED", + "should_load_balance_vm_attachments": true, + "storage_features": { + "flash_mode": { + "is_enabled": true + } + }, + "target_name": "vg1-792cd764-37b5-4da3-7ef1-ea3f618c1648", + "target_prefix": null, + "target_secret": null, + "tenant_id": null, + } +ext_id: + description: Volume group external ID. + type: str + returned: When C(ext_id) is provided. + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Failed generating volume groups info Spec" +changed: + description: Indicates whether the resource has changed. + type: bool + returned: always + sample: true +""" + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.volumes.api_client import get_vg_api_instance # noqa: E402 + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str"), + ) + return module_args + + +def get_vg(module, result): + vgs = get_vg_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = vgs.get_volume_group_by_id(extId=ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching volume group info", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_vgs(module, result): + vgs = get_vg_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating volume groups info Spec", **result) + + try: + resp = vgs.list_volume_groups(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching volume groups info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_vg(module, result) + else: + get_vgs(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_volume_groups_iscsi_clients_info_v2.py b/plugins/modules/ntnx_volume_groups_iscsi_clients_info_v2.py new file mode 100644 index 000000000..66125ed9e --- /dev/null +++ b/plugins/modules/ntnx_volume_groups_iscsi_clients_info_v2.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_volume_groups_iscsi_clients_info_v2 +short_description: Fetch ISCSI clients info. +description: + - By default, Fetch all iscsi clients currently attached to any VG + - Fetch iscsi client if C(ext_id) is given +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + ext_id: + description: + - The external ID of the iscsi client. + - This will fetch the iscsi client with the given external ID. + type: str + required: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +""" + +EXAMPLES = r""" +- name: Fetch specific iscsi client info + nutanix.ncp.ntnx_volume_groups_iscsi_clients_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + register: result + +- name: Fetch all iscsi clients attached across VGs + nutanix.ncp.ntnx_volume_groups_iscsi_clients_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + limit: 10 + register: result +""" + +RETURN = r""" +response: + description: + - list of iscsi clients currently attached to any VG + - specific iscsi client if ext_id given + type: dict + returned: always + sample: [ + { + "cluster_reference": "00061663-9fa0-28ca-185b-ac1f6b6f97e2", + "created_time": null, + "ext_id": "aea43b5c-ae4d-4b60-934b-f8f581275dec", + "links": [ + { + "href": "https://*****:9440/api/volumes/v4.0.b1/config/iscsi-clients/aea43b5c-ae4d-4b60-934b-f8f581275dec", + "rel": "external_attachment" + } + ], + "tenant_id": null + } + ] +ext_id: + description: Iscsi client external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Api Exception raised while fetching ISCSI clients attached to VGs" +changed: + description: Indicates whether the resource has changed. + type: bool + returned: always + sample: true +""" + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.volumes.api_client import ( # noqa: E402 + get_iscsi_client_api_instance, +) + + +def get_module_spec(): + module_args = dict( + ext_id=dict(type="str", required=False), + ) + return module_args + + +def get_iscsi_client(module, result): + clients = get_iscsi_client_api_instance(module) + ext_id = module.params.get("ext_id") + + try: + resp = clients.get_iscsi_client_by_id(extId=ext_id) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching given ISCSI client", + ) + + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()).get("data") + + +def get_iscsi_clients(module, result): + clients = get_iscsi_client_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating info Spec", **result) + + try: + resp = clients.list_iscsi_clients(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching all available ISCSI clients", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_iscsi_client(module, result) + else: + get_iscsi_clients(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_volume_groups_iscsi_clients_v2.py b/plugins/modules/ntnx_volume_groups_iscsi_clients_v2.py new file mode 100644 index 000000000..b697e7420 --- /dev/null +++ b/plugins/modules/ntnx_volume_groups_iscsi_clients_v2.py @@ -0,0 +1,436 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_volume_groups_iscsi_clients_v2 +short_description: Manage Nutanix volume groups iscsi clients in Nutanix PC. +description: + - This module allows you to attach & detach ISCSI clients to/from a volume group in a Nutanix cluster. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then module will attach iscsi client to volume group. + - If C(state) is set to C(present) then module will detach iscsi client to volume group. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the iscsi client. + - Its required for delete. + type: str + required: false + volume_group_ext_id: + description: + - The external ID of the volume group. + type: str + required: true + iscsi_initiator_name: + description: + - iSCSI initiator name. + - During the attach operation, exactly one of iscsiInitiatorName and iscsiInitiatorNetworkId must be specified. + - This field is immutable. + type: str + required: false + client_secret: + description: + - iSCSI initiator client secret in case of CHAP authentication. + - This field should not be provided in case the authentication type is not set to CHAP. + type: str + required: false + enabled_authentications: + description: + - The authentication type enabled for the Volume Group. + - If omitted, authentication is not configured for attachment. + - If this is set to CHAP, the target/client secret must be provided. + - This is an optional field. + required: false + choices: + - CHAP + - NONE + type: str + default: NONE + num_virtual_targets: + description: + - Number of virtual targets generated for the iSCSI target. This field is immutable. + type: int + required: false + attachment_site: + description: + - The site where the Volume Group attach operation should be processed. + - This is an optional field. + - This field may only be set if Metro DR has been configured for this Volume Group. + required: false + type: str + choices: + - SECONDARY + - PRIMARY + iscsi_initiator_network_id: + description: + - An unique address that identifies a device on the internet or a local network in IPv4/IPv6 format or a Fully Qualified Domain Name + - Mutually exclusive with C(iscsi_initiator_name). + required: false + type: dict + suboptions: + ipv4: + description: + - IPv4 address of the initiator. + type: dict + required: false + suboptions: + value: + description: + - The value of the IPv4 address. + type: str + required: true + ipv6: + description: + - IPv6 address of the initiator. + type: dict + required: false + suboptions: + value: + description: + - The value of the IPv6 address. + type: str + required: true + fqdn: + description: + - fqdn address of the initiator. + type: dict + required: false + suboptions: + value: + description: + - The value of the fqdn address. + type: str + required: true + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Attach iscsi client to VG using initiator name + nutanix.ncp.ntnx_volume_groups_iscsi_clients_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + volume_group_ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + iscsi_initiator_name: iqn-1-05.com.microsoft:win-1234 + num_virtual_targets: 32 + register: result + +- name: Attach using ipv4 address + nutanix.ncp.ntnx_volume_groups_iscsi_clients_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + volume_group_ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + num_virtual_targets: 32 + enabled_authentications: CHAP + client_secret: "Nutanix.1234455" + attachment_site: "PRIMARY" + iscsi_initiator_network_id: + ipv4: + value: "0.0.0.0" + register: result + +- name: Detach client from VG + nutanix.ncp.ntnx_volume_groups_iscsi_clients_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + volume_group_ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + ext_id: 9905b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + state: absent + register: result +""" + +RETURN = r""" +response: + description: + - Task details + type: dict + returned: always + sample: { + "cluster_ext_ids": [ + "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + ], + "completed_time": "2024-05-20T05:19:00.229645+00:00", + "completion_details": null, + "created_time": "2024-05-20T05:19:00.095273+00:00", + "entities_affected": [ + { + "ext_id": "aea43b5c-ae4d-4b60-934b-f8f581275dec", + "rel": "volumes:config:iscsi-client" + }, + { + "ext_id": "11ac5593-c9cf-403d-641c-3bf76eff2193", + "rel": "volumes:config:volume-group" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:e7b6ff28-e5f1-4316-82e8-96368cc851d7", + "is_cancelable": false, + "last_updated_time": "2024-05-20T05:19:00.229642+00:00", + "legacy_error_message": null, + "operation": "VolumeGroupAttachExternal", + "operation_description": "Volume group attach to iSCSI Client", + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-05-20T05:19:00.122260+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": null, + "warnings": null + } +ext_id: + description: Iscsi client external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +volume_group_ext_id: + description: Volume group external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +task_ext_id: + description: The task external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Api Exception raised while attaching ISCSI client to volume group" +changed: + description: Indicates whether the resource has changed. + type: bool + returned: always + sample: true +""" +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.volumes.api_client import ( # noqa: E402 + get_etag, + get_vg_api_instance, +) +from ..module_utils.v4.volumes.helpers import get_volume_group # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_volumes_py_client as volumes_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as volumes_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + address = dict( + value=dict(type="str", required=True), + ) + iscsi_initiator_network_id = dict( + ipv4=dict(type="dict", options=address, obj=volumes_sdk.IPv4Address), + ipv6=dict(type="dict", options=address, obj=volumes_sdk.IPv6Address), + fqdn=dict(type="dict", options=address, obj=volumes_sdk.FQDN), + ) + module_args = dict( + volume_group_ext_id=dict(type="str", required=True), + ext_id=dict(type="str", required=False), + iscsi_initiator_name=dict(type="str"), + client_secret=dict(type="str", no_log=True), + enabled_authentications=dict( + type="str", choices=["CHAP", "NONE"], default="NONE" + ), + num_virtual_targets=dict(type="int"), + attachment_site=dict(type="str", choices=["SECONDARY", "PRIMARY"]), + iscsi_initiator_network_id=dict( + type="dict", + options=iscsi_initiator_network_id, + obj=volumes_sdk.IPAddressOrFQDN, + ), + ) + + return module_args + + +def get_iscsi_client(module, api_instance, ext_id): + try: + return api_instance.get_iscsi_client_by_id(extId=ext_id).data + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching Volume group disk info using ext_id", + ) + + +def attach_iscsi_client(module, result): + vgs = get_vg_api_instance(module) + volume_group_ext_id = module.params.get("volume_group_ext_id") + result["volume_group_ext_id"] = volume_group_ext_id + + sg = SpecGenerator(module) + default_spec = volumes_sdk.IscsiClient() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating attach ISCSI client to volume group spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + vg = get_volume_group(module, vgs, volume_group_ext_id) + etag = get_etag(vg) + kwargs = {"if_match": etag} + resp = None + try: + resp = vgs.attach_iscsi_client(body=spec, extId=volume_group_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while attaching ISCSI client to volume group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.ISCSI_CLIENT + ) + if ext_id: + result["ext_id"] = ext_id + result["changed"] = True + + +def detach_iscsi_client(module, result): + vgs = get_vg_api_instance(module) + volume_group_ext_id = module.params.get("volume_group_ext_id") + result["volume_group_ext_id"] = volume_group_ext_id + + sg = SpecGenerator(module) + default_spec = volumes_sdk.IscsiClientAttachment() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating detach ISCSI client to volume group spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + vg = get_volume_group(module, vgs, volume_group_ext_id) + etag = get_etag(vg) + kwargs = {"if_match": etag} + resp = None + try: + resp = vgs.detach_iscsi_client(body=spec, extId=volume_group_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while detaching ISCSI client from volume group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_volumes_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "task_ext_id": None, + } + state = module.params.get("state") + if state == "present": + attach_iscsi_client(module, result) + else: + detach_iscsi_client(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_volume_groups_v2.py b/plugins/modules/ntnx_volume_groups_v2.py new file mode 100644 index 000000000..dca8ae4ad --- /dev/null +++ b/plugins/modules/ntnx_volume_groups_v2.py @@ -0,0 +1,419 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_volume_groups_v2 +short_description: Manage Nutanix volume group in PC +description: + - This module allows you to create and delete volume group in Nutanix PC. + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then module will create volume group. + - if C(state) is set to C(absent) then module will delete volume group. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the volume group. + - Required for C(state)=absent for delete. + type: str + required: false + name: + description: + - Name of VG + type: str + required: false + description: + description: + - Description of VG + type: str + required: false + sharing_status: + description: + - Indicates whether the Volume Group can be shared across multiple iSCSI initiators. + - The mode cannot be changed from SHARED to NOT_SHARED on a Volume Group with multiple attachments. + - Similarly, a Volume Group cannot be associated with more than one attachment as long as it is in exclusive mode. + - This is an optional field. + type: str + required: false + choices: + - SHARED + - NOT_SHARED + should_load_balance_vm_attachments: + description: + - Indicates whether to enable Volume Group load balancing for VM attachments. + type: bool + required: false + target_prefix: + description: + - The specifications contain the target prefix for external clients as the value. + - This is an optional field. + - Mutually exclusive with C(target_name). + required: false + type: str + target_name: + description: + - Name of the external client target that will be visible and accessible to the client + - This is an optional field. + - Mutually exclusive with C(target_prefix). + required: false + type: str + enabled_authentications: + description: + - The authentication type enabled for the Volume Group. This is an optional field. + - If omitted, authentication is not configured for the Volume Group. + - If this is set to CHAP, the target/client secret must be provided. + - This is an optional field. + required: false + choices: + - CHAP + - NONE + type: str + cluster_reference: + description: + - Cluster reference for VG, required for create. + required: false + type: str + usage_type: + description: + - Expected usage type for the Volume Group. + - This is an indicative hint on how the caller will consume the Volume Group. + - This is an optional field. + required: false + type: str + choices: + - BACKUP_TARGET + - INTERNAL + - TEMPORARY + - USER + is_hidden: + description: + - Indicates whether the Volume Group is hidden. + required: False + type: bool + storage_features: + description: + - Storage optimization features which must be enabled on the Volume Group + required: false + type: dict + suboptions: + flash_mode: + description: + - Enable flash mode on the Volume Group. + type: dict + required: True + suboptions: + is_enabled: + description: + - Indicates whether the flash mode is enabled or not. + type: bool + required: True + iscsi_features: + description: + - iSCSI specific settings for the Volume Group. + required: false + type: dict + suboptions: + target_secret: + description: + - Target secret in case of a CHAP authentication. + type: str + required: True + enabled_authentications: + description: + - The authentication type enabled for the Volume Group. This is an optional field. + - If omitted, authentication is not configured for the Volume Group. + - If this is set to CHAP, the target/client secret must be provided. + - This is an optional field. + required: false + choices: + - CHAP + - NONE + type: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Create Volume group with all config and enabled chap auth + nutanix.ncp.ntnx_volume_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + name: "{{vg1_name}}" + description: "Volume group 2" + should_load_balance_vm_attachments: true + sharing_status: "SHARED" + target_prefix: "vg1" + cluster_reference: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + usage_type: "USER" + storage_features: + flash_mode: + is_enabled: true + iscsi_features: + target_secret: "Secret1234567" + enabled_authentications: "CHAP" + register: result + ignore_errors: true + +- name: Create Volume group with min spec and no Auth + nutanix.ncp.ntnx_volume_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + name: "{{vg1_name}}" + description: "Volume group 1" + cluster_reference: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + register: result + ignore_errors: true + +- name: Delete Volume groups + nutanix.ncp.ntnx_volume_groups_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: absent + ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b67 + register: result + ignore_errors: true +""" + +RETURN = r""" +response: + description: + - Volume group details after creation if C(wait) is true. + - Task details if C(wait) is false. + type: dict + returned: always + sample: { + "cluster_reference": "00061663-9fa0-28ca-185b-ac1f6b6f97e2", + "created_by": null, + "created_time": null, + "description": "Volume group 2", + "enabled_authentications": null, + "ext_id": "792cd764-37b5-4da3-7ef1-ea3f618c1648", + "is_hidden": null, + "iscsi_features": { + "enabled_authentications": "CHAP", + "iscsi_target_name": null, + "target_secret": null + }, + "iscsi_target_name": null, + "iscsi_target_prefix": null, + "links": null, + "load_balance_vm_attachments": null, + "name": "ansible-vgs-KjRMtTRxhrww2", + "sharing_status": "SHARED", + "should_load_balance_vm_attachments": true, + "storage_features": { + "flash_mode": { + "is_enabled": true + } + }, + "target_name": "vg1-792cd764-37b5-4da3-7ef1-ea3f618c1648", + "target_prefix": null, + "target_secret": null, + "tenant_id": null, + "usage_type": "USER" + } +ext_id: + description: Volume Group external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +task_ext_id: + description: The task external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Failed generating create volume group spec" +changed: + description: Indicates whether the resource has changed. + type: bool + returned: always + sample: true +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.volumes.api_client import ( # noqa: E402 + get_etag, + get_vg_api_instance, +) +from ..module_utils.v4.volumes.helpers import get_volume_group # noqa: E402 +from ..module_utils.v4.volumes.spec.volume_group import ( # noqa: E402 + VGSpecs as vg_specs, +) + +SDK_IMP_ERROR = None +try: + import ntnx_volumes_py_client as volumes_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as volumes_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = vg_specs.get_volume_group_spec() + + return module_args + + +def create_vg(module, result): + vgs = get_vg_api_instance(module) + + sg = SpecGenerator(module) + default_spec = volumes_sdk.VolumeGroup() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create volume group spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = vgs.create_volume_group(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating volume groups", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.VOLUME_GROUP + ) + if ext_id: + resp = get_volume_group(module, vgs, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_vg(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + vgs = get_vg_api_instance(module) + vg = get_volume_group(module, vgs, ext_id) + etag = get_etag(vg) + kwargs = {"if_match": etag} + resp = None + try: + resp = vgs.delete_volume_group_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting volume group", + ) + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "absent", ("ext_id",)), + ], + mutually_exclusive=[["target_name", "target_prefix"]], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_volumes_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "task_ext_id": None, + } + state = module.params.get("state") + if state == "present": + if module.params.get("ext_id"): + # Update not supported for pc.2024.1 release. + pass + else: + create_vg(module, result) + else: + delete_vg(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_volume_groups_vms_v2.py b/plugins/modules/ntnx_volume_groups_vms_v2.py new file mode 100644 index 000000000..c744b0440 --- /dev/null +++ b/plugins/modules/ntnx_volume_groups_vms_v2.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_volume_groups_vms_v2 +short_description: Attach/Detach volume group to AHV VMs in Nutanix PC +description: + - Attach/Detach volume group to AHV VMs in Nutanix PC + - This module uses PC v4 APIs based SDKs +version_added: "2.0.0" +author: + - Pradeepsingh Bhati (@bhati-pradeep) +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then module will attach VG to VM. + - If C(state) is set to C(present) then module will detach VG from VM. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the VM + - Its required for delete. + type: str + required: true + volume_group_ext_id: + description: + - The external ID of the volume group. + type: str + required: true + index: + description: + - The index on the SCSI bus to attach the VM to the Volume Group. + - This is an optional field. + type: int + required: false + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +""" + +EXAMPLES = r""" +- name: Attach VM1 to VG + nutanix.ncp.ntnx_volume_groups_vms_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: present + volume_group_ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4asda + index: 1 + register: result + +- name: Attach VM2 to VG + nutanix.ncp.ntnx_volume_groups_vms_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + state: "present" + volume_group_ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b35 + ext_id: 0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b213213 + register: result +""" + +RETURN = r""" +response: + description: + - Task details + type: dict + returned: always + sample: { + "cluster_ext_ids": [ + "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + ], + "completed_time": "2024-05-20T05:19:00.229645+00:00", + "completion_details": null, + "created_time": "2024-05-20T05:19:00.095273+00:00", + "entities_affected": [ + { + "ext_id": "aea43b5c-ae4d-4b60-934b-f8f581275dec", + "rel": "volumes:config:vms" + }, + { + "ext_id": "11ac5593-c9cf-403d-641c-3bf76eff2193", + "rel": "volumes:config:volume-group" + } + ], + "error_messages": null, + "ext_id": "ZXJnb24=:e7b6ff28-e5f1-4316-82e8-96368cc851d7", + "is_cancelable": false, + "last_updated_time": "2024-05-20T05:19:00.229642+00:00", + "legacy_error_message": null, + "operation": "VolumeGroupAttachExternal", + "operation_description": "Volume group attach to VM", + "owned_by": { + "ext_id": "00000000-0000-0000-0000-000000000000", + "name": "admin" + }, + "parent_task": null, + "progress_percentage": 100, + "started_time": "2024-05-20T05:19:00.122260+00:00", + "status": "SUCCEEDED", + "sub_steps": null, + "sub_tasks": null, + "warnings": null + } +ext_id: + description: VM external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +volume_group_ext_id: + description: Volume group external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +task_ext_id: + description: The task external ID. + type: str + returned: always + sample: "0005b6b1-0b3b-4b3b-8b3b-0b3b4b3b4b3b" +error: + description: The error message if any. + type: str + returned: when error occurs + sample: "Api Exception raised while attaching VM to volume group" +changed: + description: Indicates whether the resource has changed. + type: bool + returned: always + sample: true +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.prism.tasks import wait_for_completion # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) +from ..module_utils.v4.volumes.api_client import ( # noqa: E402 + get_etag, + get_vg_api_instance, +) +from ..module_utils.v4.volumes.helpers import get_volume_group # noqa: E402 + +SDK_IMP_ERROR = None +try: + import ntnx_volumes_py_client as volumes_sdk # noqa: E402 +except ImportError: + + from ..module_utils.v4.sdk_mock import mock_sdk as volumes_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + module_args = dict( + volume_group_ext_id=dict(type="str", required=True), + ext_id=dict(type="str", required=True), + index=dict(type="int"), + ) + + return module_args + + +def attach_vm(module, result): + vgs = get_vg_api_instance(module) + volume_group_ext_id = module.params.get("volume_group_ext_id") + result["volume_group_ext_id"] = volume_group_ext_id + result["ext_id"] = module.params.get("ext_id") + + sg = SpecGenerator(module) + default_spec = volumes_sdk.VmAttachment() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating attach VM to volume group spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + vg = get_volume_group(module, vgs, volume_group_ext_id) + etag = get_etag(vg) + kwargs = {"if_match": etag} + resp = None + try: + resp = vgs.attach_vm(body=spec, extId=volume_group_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while attaching VM to volume group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + result["changed"] = True + + +def detach_vm(module, result): + vgs = get_vg_api_instance(module) + volume_group_ext_id = module.params.get("volume_group_ext_id") + result["volume_group_ext_id"] = volume_group_ext_id + result["ext_id"] = module.params.get("ext_id") + + sg = SpecGenerator(module) + default_spec = volumes_sdk.VmAttachment() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json( + msg="Failed generating detach VM to volume group spec", **result + ) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + vg = get_volume_group(module, vgs, volume_group_ext_id) + etag = get_etag(vg) + kwargs = {"if_match": etag} + resp = None + try: + resp = vgs.detach_vm(body=spec, extId=volume_group_ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while detaching VM to volume group", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_volumes_py_client"), exception=SDK_IMP_ERROR + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + "task_ext_id": None, + } + state = module.params.get("state") + if state == "present": + attach_vm(module, result) + else: + detach_vm(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vpcs.py b/plugins/modules/ntnx_vpcs.py index 8f7d0c658..957f22b85 100644 --- a/plugins/modules/ntnx_vpcs.py +++ b/plugins/modules/ntnx_vpcs.py @@ -62,51 +62,50 @@ """ EXAMPLES = r""" +- name: Create min VPC + ntnx_vpcs: + validate_certs: false + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: MinVPC + external_subnets: + - subnet_name: "{{ external_subnet.name }}" - - name: Create min VPC - ntnx_vpcs: - validate_certs: False - state: present - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: MinVPC - external_subnets: - - subnet_name: "{{ external_subnet.name }}" +- name: Create VPC with dns_servers + ntnx_vpcs: + validate_certs: false + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: vpc_with_dns_servers + dns_servers: "{{ dns_servers }}" - - name: Create VPC with dns_servers - ntnx_vpcs: - validate_certs: False - state: present - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: vpc_with_dns_servers - dns_servers: "{{ dns_servers }}" +- name: Create VPC with all specfactions + ntnx_vpcs: + validate_certs: false + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: vpc_with_add_specfactions + external_subnets: + - subnet_name: "{{ external_subnet.name }}" + dns_servers: "{{ dns_servers }}" + routable_ips: + - network_ip: "{{ routable_ips.network_ip }}" + network_prefix: "{{ routable_ips.network_prefix }}" - - name: Create VPC with all specfactions - ntnx_vpcs: - validate_certs: False - state: present - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: vpc_with_add_specfactions - external_subnets: - - subnet_name: "{{ external_subnet.name }}" - dns_servers: "{{ dns_servers }}" - routable_ips: - - network_ip: "{{ routable_ips.network_ip }}" - network_prefix: "{{ routable_ips.network_prefix }}" - - - name: Delete VPC - ntnx_vpcs: - state: absent - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - vpc_uuid: "{{ vpc_uuid }}" +- name: Delete VPC + ntnx_vpcs: + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + vpc_uuid: "{{ vpc_uuid }}" """ RETURN = r""" @@ -201,9 +200,9 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 -from ..module_utils.prism.vpcs import Vpc # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.vpcs import Vpc # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_vpcs_info.py b/plugins/modules/ntnx_vpcs_info.py index 88bd49aa5..b8ae05c5e 100644 --- a/plugins/modules/ntnx_vpcs_info.py +++ b/plugins/modules/ntnx_vpcs_info.py @@ -33,28 +33,28 @@ - Dina AbuHijleh (@dina-abuhijleh) """ EXAMPLES = r""" - - name: List VPC using name filter criteria - ntnx_vpcs_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - filter: - name: "{{ vpc.name }}" - kind: vpc - register: result - - - name: List VPC using length, offset, sort order and name sort attribute - ntnx_vpcs_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - length: 1 - offset: 1 - sort_order: "ASCENDING" - sort_attribute: "name" - register: result +- name: List VPC using name filter criteria + ntnx_vpcs_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: + name: "{{ vpc.name }}" + kind: vpc + register: result + +- name: List VPC using length, offset, sort order and name sort attribute + ntnx_vpcs_info: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + length: 1 + offset: 1 + sort_order: "ASCENDING" + sort_attribute: "name" + register: result """ RETURN = r""" api_version: @@ -148,9 +148,9 @@ } """ -from ..module_utils.base_info_module import BaseInfoModule # noqa: E402 -from ..module_utils.prism.vpcs import Vpc # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v3.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v3.prism.vpcs import Vpc # noqa: E402 def get_module_spec(): diff --git a/plugins/modules/ntnx_vpcs_info_v2.py b/plugins/modules/ntnx_vpcs_info_v2.py new file mode 100644 index 000000000..c16f1da11 --- /dev/null +++ b/plugins/modules/ntnx_vpcs_info_v2.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vpcs_info_v2 +short_description: vpc info module +version_added: 2.0.0 +description: + - This module fetches information about Nutanix vpcs. + - The module can fetch information about all vpcs or a specific vpc. + - This module uses PC v4 APIs based SDKs +options: + ext_id: + description: + - vpc external ID + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_info_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: List VPCs + nutanix.ncp.ntnx_vpcs_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + register: vpcs + +- name: List VPC using name filter criteria + nutanix.ncp.ntnx_vpcs_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + filter: "name eq 'test_vpc'" + register: result + +- name: List VPC using ext_id + nutanix.ncp.ntnx_vpcs_info_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + ext_id: "33dba56c-f123-4ec6-8b38-901e1cf716c2" + register: result +""" +RETURN = r""" +response: + description: + - The response from the vpc v4 API. + - it can be vpc or list of vpcs as per spec. + returned: always + type: dict + sample: + { + "common_dhcp_options": { + "domain_name_servers": null + }, + "description": null, + "ext_id": "ce14a4cc-5a9a-4dd0-8f82-daadc1045e57", + "external_routing_domain_reference": null, + "external_subnets": [ + { + "active_gateway_count": 1, + "active_gateway_node": { + "node_id": "a9b4cb02-2487-4878-a6b6-395bd4f5fb61", + "node_ip_address": { + "ipv4": { + "prefix_length": 32, + "value": "000.000.000.000" + }, + "ipv6": null + } + }, + "active_gateway_nodes": [ + { + "node_id": "a9b4cb02-2487-4878-a6b6-395bd4f5fb61", + "node_ip_address": { + "ipv4": { + "prefix_length": 32, + "value": "000.000.000.000" + }, + "ipv6": null + } + } + ], + "external_ips": [ + { + "ipv4": { + "prefix_length": 32, + "value": "000.000.000.000" + }, + "ipv6": null + } + ], + "gateway_nodes": null, + "subnet_reference": "b000b263-8662-4a7f-a841-32eaf5b97d5d" + } + ], + "externally_routable_prefixes": null, + "links": null, + "metadata": { + "category_ids": null, + "owner_reference_id": "00000000-0000-0000-0000-000000000000", + "owner_user_name": null, + "project_name": null, + "project_reference_id": null + }, + "name": "rohcTvGipSJQansible-ag2", + "snat_ips": null, + "tenant_id": null, + "vpc_type": "REGULAR" + } + + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + +ext_id: + description: + - The external ID of the vpc when specific vpc is fetched. + type: str + returned: always + sample: "33dba56c-f123-4ec6-8b38-901e1cf716c2" +""" +import warnings # noqa: E402 + +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.base_info_module import BaseInfoModule # noqa: E402 +from ..module_utils.v4.network.api_client import get_vpc_api_instance # noqa: E402 +from ..module_utils.v4.network.helpers import get_vpc # noqa: E402 +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + + module_args = dict( + ext_id=dict(type="str"), + ) + + return module_args + + +def get_vpcs(module, result): + vpcs = get_vpc_api_instance(module) + + sg = SpecGenerator(module) + kwargs, err = sg.get_info_spec(attr=module.params) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating vpcs info Spec", **result) + + try: + resp = vpcs.list_vpcs(**kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while fetching vpcs info", + ) + + resp = strip_internal_attributes(resp.to_dict()).get("data") + if not resp: + resp = [] + result["response"] = resp + + +def get_vpc_by_ext_id(module, result): + vpcs = get_vpc_api_instance(module) + ext_id = module.params.get("ext_id") + + resp = get_vpc(module, vpcs, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + +def run_module(): + module = BaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[ + ("ext_id", "filter"), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + if module.params.get("ext_id"): + get_vpc_by_ext_id(module, result) + else: + get_vpcs(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_vpcs_v2.py b/plugins/modules/ntnx_vpcs_v2.py new file mode 100644 index 000000000..2612efaac --- /dev/null +++ b/plugins/modules/ntnx_vpcs_v2.py @@ -0,0 +1,668 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2024, Nutanix +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_vpcs_v2 +short_description: vpcs module which supports vpc CRUD operations +version_added: 2.0.0 +description: + - Create, Update, Delete vpcs + - This module uses PC v4 APIs based SDKs +options: + state: + description: + - Specify state + - If C(state) is set to C(present) then module will create vpc. + - if C(state) is set to C(present) and C(ext_id) is given, then module will update vpc. + - If C(state) is set to C(absent) with C(ext_id), then module will delete vpc. + choices: + - present + - absent + type: str + default: present + wait: + description: Wait for the operation to complete. + type: bool + required: false + default: True + ext_id: + description: + - The external ID of the vpc. + - Required for C(state)=absent for delete. + - Required for C(state)=present to trigger update of vpc. + type: str + + name: + description: Name of the VPC. + type: str + + description: + description: Description of the VPC. + type: str + + vpc_type: + description: Type of the VPC. + type: str + choices: + - REGULAR + - TRANSIT + + common_dhcp_options: + description: List of DHCP options to be configured. + type: dict + suboptions: + domain_name_servers: + description: List of Domain Name Server addresses . + type: list + elements: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: The IPv4 address value. + type: str + prefix_length: + description: The prefix length of the IPv4 address. + type: int + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: The IPv6 address value. + type: str + prefix_length: + description: The prefix length of the IPv6 address. + type: int + + external_subnets: + description: List of external subnets that the VPC is attached to. + type: list + elements: dict + suboptions: + subnet_reference: + description: External subnet reference. + type: str + external_ips: + description: + - List of IP Addresses used for SNAT, if NAT is enabled on the external subnet. + If NAT is not enabled, this specifies the IP address of the VPC port connected to the external gateway. + type: list + elements: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: The IPv4 address value. + type: str + prefix_length: + description: The prefix length of the IPv4 address. + type: int + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: The IPv6 address value. + type: str + prefix_length: + description: The prefix length of the IPv6 address. + type: int + gateway_nodes: + description: List of gateway nodes that can be used for external connectivity. + type: str + active_gateway_node: + description: Reference of gateway nodes + type: dict + suboptions: + node_id: + description: Node ID. + type: str + node_ip_address: + description: An unique address that identifies a device on the internet or a local network in IPv4 or IPv6 format. + type: dict + suboptions: + ipv4: + description: IPv4 address. + type: dict + suboptions: + value: + description: The IPv4 address value. + type: str + prefix_length: + description: The prefix length of the IPv4 address. + type: int + ipv6: + description: IPv6 address. + type: dict + suboptions: + value: + description: The IPv6 address value. + type: str + prefix_length: + description: The prefix length of the IPv6 address. + type: int + active_gateway_count: + description: Number of active gateways. + type: int + + external_routing_domain_reference: + description: External routing domain associated with this route table + type: str + + externally_routable_prefixes: + description: + - CIDR blocks from the VPC which can talk externally without performing NAT. + This is applicable when connecting to external subnets which have disabled NAT. + type: list + elements: dict + suboptions: + ipv4: + description: IPv4 subnet. + type: dict + suboptions: + ip: + description: IPv4 address. + type: dict + suboptions: + value: + description: The IPv4 address value. + type: str + prefix_length: + description: The prefix length of the IPv4 address. + type: int + prefix_length: + description: The prefix length of the subnet. + type: int + ipv6: + description: IPv6 subnet. + type: dict + suboptions: + ip: + description: IPv6 address. + type: dict + suboptions: + value: + description: The IPv6 address value. + type: str + prefix_length: + description: The prefix length of the IPv6 address. + type: int + prefix_length: + description: The prefix length of the subnet. + type: int + + metadata: + description: Metadata associated with this resource. + type: dict + suboptions: + owner_reference_id: + description: A globally unique identifier that represents the owner of this resource. + type: str + owner_user_name: + description: The userName of the owner of this resource. + type: str + project_reference_id: + description: A globally unique identifier that represents the project this resource belongs to. + type: str + project_name: + description: The name of the project this resource belongs to. + type: str + category_ids: + description: A list of globally unique identifiers that represent all the categories the resource is associated with. + type: list + elements: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations_v2 +author: + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Create min VPC with external_nat_subnet uuid + nutanix.ncp.ntnx_vpcs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + wait: true + name: MinVPC + external_subnets: + - subnet_reference: "{{ external_nat_subnet.uuid }}" + register: result + +- name: Create VPC with routable_ips + nutanix.ncp.ntnx_vpcs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + name: vpc_with_routable_ips + externally_routable_prefixes: + - + ipv4: + ip: + value: "{{ routable_ips.network_ip }}" + prefix_length: "{{ routable_ips.network_prefix }}" + +- name: Create VPC with dns_servers + nutanix.ncp.ntnx_vpcs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: present + name: vpc_with_dns_servers + common_dhcp_options: + domain_name_servers: + - + ipv4: + value: "{{ dns_servers.0 }}" + prefix_length: 32 + - + ipv4: + value: "{{ dns_servers.1 }}" + prefix_length: 32 + register: result + +- name: Delete all created vpcs + nutanix.ncp.ntnx_vpcs_v2: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" +""" + +RETURN = r""" +response: + description: The created vpc object + returned: always + type: dict + sample: + { + "common_dhcp_options": { + "domain_name_servers": null + }, + "description": null, + "ext_id": "ce14a4cc-5a9a-4dd0-8f82-daadc1045e57", + "external_routing_domain_reference": null, + "external_subnets": [ + { + "active_gateway_count": 1, + "active_gateway_node": { + "node_id": "a9b4cb02-2487-4878-a6b6-395bd4f5fb61", + "node_ip_address": { + "ipv4": { + "prefix_length": 32, + "value": "000.000.000.000" + }, + "ipv6": null + } + }, + "active_gateway_nodes": [ + { + "node_id": "a9b4cb02-2487-4878-a6b6-395bd4f5fb61", + "node_ip_address": { + "ipv4": { + "prefix_length": 32, + "value": "000.000.000.000" + }, + "ipv6": null + } + } + ], + "external_ips": [ + { + "ipv4": { + "prefix_length": 32, + "value": "000.000.000.000" + }, + "ipv6": null + } + ], + "gateway_nodes": null, + "subnet_reference": "b000b263-8662-4a7f-a841-32eaf5b97d5d" + } + ], + "externally_routable_prefixes": null, + "links": null, + "metadata": { + "category_ids": null, + "owner_reference_id": "00000000-0000-0000-0000-000000000000", + "owner_user_name": null, + "project_name": null, + "project_reference_id": null + }, + "name": "rohcTvGipSJQansible-ag2", + "snat_ips": null, + "tenant_id": null, + "vpc_type": "REGULAR" + } + +changed: + description: This indicates whether the task resulted in any changes + returned: always + type: bool + sample: true + +error: + description: This field typically holds information about if the task have errors that occurred during the task execution + returned: always + type: bool + sample: false + + +ext_id: + description: The external ID of VPC + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" + +import traceback # noqa: E402 +import warnings # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ansible.module_utils.basic import missing_required_lib # noqa: E402 + +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 +from ..module_utils.v4.constants import Tasks as TASK_CONSTANTS # noqa: E402 +from ..module_utils.v4.network.api_client import ( # noqa: E402 + get_etag, + get_vpc_api_instance, +) +from ..module_utils.v4.network.helpers import get_vpc # noqa: E402 +from ..module_utils.v4.prism.tasks import ( # noqa: E402 + get_entity_ext_id_from_task, + wait_for_completion, +) +from ..module_utils.v4.spec_generator import SpecGenerator # noqa: E402 +from ..module_utils.v4.utils import ( # noqa: E402 + raise_api_exception, + strip_internal_attributes, +) + +SDK_IMP_ERROR = None +try: + import ntnx_networking_py_client as net_sdk # noqa: E402 +except ImportError: + from ..module_utils.v4.sdk_mock import mock_sdk as net_sdk # noqa: E402 + + SDK_IMP_ERROR = traceback.format_exc() + +# Suppress the InsecureRequestWarning +warnings.filterwarnings("ignore", message="Unverified HTTPS request is being made") + + +def get_module_spec(): + ip_address_sub_spec = dict( + value=dict(type="str"), + prefix_length=dict(type="int"), + ) + + ip_address_spec = dict( + ipv4=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv4Address), + ipv6=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv6Address), + ) + + vpc_dhcp_options_spec = dict( + domain_name_servers=dict( + type="list", elements="dict", options=ip_address_spec, obj=net_sdk.IPAddress + ), + ) + + gnr_spec = dict( + node_id=dict(type="str"), + node_ip_address=dict( + type="dict", options=ip_address_spec, obj=net_sdk.IPAddress + ), + ) + + external_subnet_spec = dict( + subnet_reference=dict(type="str"), + external_ips=dict( + type="list", elements="dict", options=ip_address_spec, obj=net_sdk.IPAddress + ), + gateway_nodes=dict(type="str"), + active_gateway_node=dict( + type="dict", options=gnr_spec, obj=net_sdk.GatewayNodeReference + ), + active_gateway_count=dict(type="int"), + ) + + ipv4_subnet_spec = dict( + ip=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv4Address), + prefix_length=dict(type="int"), + ) + + ipv6_subnet_spec = dict( + ip=dict(type="dict", options=ip_address_sub_spec, obj=net_sdk.IPv6Address), + prefix_length=dict(type="int"), + ) + + ip_subnet_spec = dict( + ipv4=dict(type="dict", options=ipv4_subnet_spec, obj=net_sdk.IPv4Subnet), + ipv6=dict(type="dict", options=ipv6_subnet_spec, obj=net_sdk.IPv6Subnet), + ) + + metadata_spec = dict( + owner_reference_id=dict(type="str"), + owner_user_name=dict(type="str"), + project_reference_id=dict(type="str"), + project_name=dict(type="str"), + category_ids=dict(type="list", elements="str"), + ) + + module_args = dict( + ext_id=dict(type="str"), + name=dict(type="str"), + description=dict(type="str"), + vpc_type=dict(type="str", choices=["REGULAR", "TRANSIT"]), + common_dhcp_options=dict( + type="dict", options=vpc_dhcp_options_spec, obj=net_sdk.VpcDhcpOptions + ), + external_subnets=dict( + type="list", + elements="dict", + options=external_subnet_spec, + obj=net_sdk.ExternalSubnet, + ), + external_routing_domain_reference=dict(type="str"), + externally_routable_prefixes=dict( + type="list", elements="dict", options=ip_subnet_spec, obj=net_sdk.IPSubnet + ), + metadata=dict(type="dict", options=metadata_spec, obj=net_sdk.Metadata), + ) + + return module_args + + +def create_vpc(module, result): + vpcs = get_vpc_api_instance(module) + + sg = SpecGenerator(module) + default_spec = net_sdk.Vpc() + spec, err = sg.generate_spec(obj=default_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vpcs Spec", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(spec.to_dict()) + return + + resp = None + try: + resp = vpcs.create_vpc(body=spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while creating vpc", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + if task_ext_id and module.params.get("wait"): + task_status = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(task_status.to_dict()) + ext_id = get_entity_ext_id_from_task( + task_status, rel=TASK_CONSTANTS.RelEntityType.VPC + ) + if ext_id: + resp = get_vpc(module, vpcs, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def check_vpcs_idempotency(old_spec, update_spec): + if old_spec != update_spec: + return False + return True + + +def update_vpc(module, result): + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + vpcs = get_vpc_api_instance(module) + + current_spec = get_vpc(module, vpcs, ext_id=ext_id) + + sg = SpecGenerator(module) + update_spec, err = sg.generate_spec(obj=deepcopy(current_spec)) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating vpcs update spec", **result) + + # check for idempotency + if check_vpcs_idempotency(current_spec.to_dict(), update_spec.to_dict()): + result["skipped"] = True + module.exit_json(msg="Nothing to change.", **result) + + if module.check_mode: + result["response"] = strip_internal_attributes(update_spec.to_dict()) + return + + resp = None + vpcs = get_vpc_api_instance(module) + try: + resp = vpcs.update_vpc_by_id(extId=ext_id, body=update_spec) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while updating vpc", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + wait_for_completion(module, task_ext_id) + resp = get_vpc(module, vpcs, ext_id) + result["ext_id"] = ext_id + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def delete_vpc(module, result): + vpcs = get_vpc_api_instance(module) + ext_id = module.params.get("ext_id") + result["ext_id"] = ext_id + + current_spec = get_vpc(module, vpcs, ext_id=ext_id) + + etag = get_etag(data=current_spec) + if not etag: + return module.fail_json("unable to fetch etag for deleting vpc", **result) + + kwargs = {"if_match": etag} + + try: + resp = vpcs.delete_vpc_by_id(extId=ext_id, **kwargs) + except Exception as e: + raise_api_exception( + module=module, + exception=e, + msg="Api Exception raised while deleting vpc", + ) + + task_ext_id = resp.data.ext_id + result["task_ext_id"] = task_ext_id + result["response"] = strip_internal_attributes(resp.data.to_dict()) + + if task_ext_id and module.params.get("wait"): + resp = wait_for_completion(module, task_ext_id) + result["response"] = strip_internal_attributes(resp.to_dict()) + + result["changed"] = True + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "ext_id"), True), + ("state", "absent", ("ext_id",)), + ], + ) + if SDK_IMP_ERROR: + module.fail_json( + msg=missing_required_lib("ntnx_networking_py_client"), + exception=SDK_IMP_ERROR, + ) + + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "ext_id": None, + } + state = module.params["state"] + if state == "present": + if module.params.get("ext_id"): + update_vpc(module, result) + else: + create_vpc(module, result) + else: + delete_vpc(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/requirements.txt b/requirements.txt index 89699d2bd..4d34ecff9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,11 @@ isort==5.9.3 coverage==6.5.0 shyaml==0.6.2 markupsafe==2 +ntnx-clustermgmt-py-client==4.0.1 +ntnx-iam-py-client==4.0.1 +ntnx-microseg-py-client==4.0.1 +ntnx-networking-py-client==4.0.1 +ntnx-prism-py-client==4.0.1 +ntnx-vmm-py-client==4.0.1 +ntnx-volumes-py-client==4.0.1 +ntnx-dataprotection-py-client==4.0.1 \ No newline at end of file diff --git a/scripts/codegen.py b/scripts/codegen.py index c182b97f1..82d49243c 100644 --- a/scripts/codegen.py +++ b/scripts/codegen.py @@ -83,8 +83,8 @@ """ from ..module_utils.base_module import BaseModule # noqa: E402 -from ..module_utils.prism.tasks import Task # noqa: E402 -from ..module_utils.prism.MNAME import CNAME # noqa: E402 +from ..module_utils.v3.prism.tasks import Task # noqa: E402 +from ..module_utils.v3.prism.MNAME import CNAME # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 diff --git a/tests/integration/requirements.txt b/tests/integration/requirements.txt index 89699d2bd..4d34ecff9 100644 --- a/tests/integration/requirements.txt +++ b/tests/integration/requirements.txt @@ -9,3 +9,11 @@ isort==5.9.3 coverage==6.5.0 shyaml==0.6.2 markupsafe==2 +ntnx-clustermgmt-py-client==4.0.1 +ntnx-iam-py-client==4.0.1 +ntnx-microseg-py-client==4.0.1 +ntnx-networking-py-client==4.0.1 +ntnx-prism-py-client==4.0.1 +ntnx-vmm-py-client==4.0.1 +ntnx-volumes-py-client==4.0.1 +ntnx-dataprotection-py-client==4.0.1 \ No newline at end of file diff --git a/tests/integration/targets/ntnx_acps/tasks/create_acps.yml b/tests/integration/targets/ntnx_acps/tasks/create_acps.yml index 06125d5b6..96dfa8922 100644 --- a/tests/integration/targets/ntnx_acps/tasks/create_acps.yml +++ b/tests/integration/targets/ntnx_acps/tasks/create_acps.yml @@ -1,34 +1,33 @@ --- - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" - -- set_fact: - acp1_name: "{{random_name[0]}}1" - acp2_name: "{{random_name[0]}}2" - acp3_name: "{{random_name[0]}}3" - acp4_name: "{{random_name[0]}}4" - acp5_name: "{{random_name[0]}}5" - + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" +- name: Set acp name + ansible.builtin.set_fact: + acp1_name: "{{ random_name[0] }}1" + acp2_name: "{{ random_name[0] }}2" + acp3_name: "{{ random_name[0] }}3" + acp4_name: "{{ random_name[0] }}4" + acp5_name: "{{ random_name[0] }}5" - name: Create min ACP ntnx_acps: state: present - wait: True - name: "{{acp1_name}}" + wait: true + name: "{{ acp1_name }}" role: uuid: "{{ acp.role.uuid }}" register: result check_mode: true - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - - result.response.spec.name == "{{acp1_name}}" + - result.response.spec.name == "{{ acp1_name }}" fail_msg: " Unable to create Min ACP with check mode " success_msg: " Min ACP with check mode passed" # ########################################################## @@ -36,54 +35,56 @@ ntnx_acps: state: present wait: true - name: "{{acp2_name}}" + name: "{{ acp2_name }}" role: - uuid: '{{ acp.role.uuid }}' + uuid: "{{ acp.role.uuid }}" check_mode: false register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - - result.response.spec.name == "{{acp2_name}}" + - result.response.spec.name == "{{ acp2_name }}" fail_msg: " Unable to create Min ACP " success_msg: " Min ACP created successfully " -- set_fact: - todelete: "{{ todelete + [ result.acp_uuid ] }}" +- name: Set acp uuid to delete later + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.acp_uuid] }}" ########################################################## - name: Create ACP with user reference ntnx_acps: state: present - name: "{{acp5_name}}" + name: "{{ acp5_name }}" role: uuid: "{{ acp.role.uuid }}" user_uuids: - "{{ acp.user_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - - result.response.spec.name == "{{acp5_name}}" + - result.response.spec.name == "{{ acp5_name }}" - result.response.status.resources.role_reference.uuid == "{{ acp.role.uuid }}" - result.response.status.resources.user_reference_list.0.uuid == "{{ acp.user_uuid }}" fail_msg: " Unable to Create ACP with user reference " success_msg: " ACP with user reference created successfully " -- set_fact: - todelete: "{{ todelete + [ result.acp_uuid ] }}" +- name: Set acp uuid to delete later + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.acp_uuid] }}" ########################################################## - name: Create ACP with user ad user group reference ntnx_acps: state: present - name: "{{acp3_name}}" + name: "{{ acp3_name }}" role: uuid: "{{ acp.role.uuid }}" user_uuids: @@ -91,27 +92,28 @@ user_group_uuids: - "{{ acp.user_group_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - - result.response.spec.name == "{{acp3_name}}" + - result.response.spec.name == "{{ acp3_name }}" - result.response.status.resources.role_reference.uuid == "{{ acp.role.uuid }}" - result.response.status.resources.user_reference_list.0.uuid == "{{ acp.user_uuid }}" - result.response.status.resources.user_group_reference_list.0.uuid == "{{ acp.user_group_uuid }}" fail_msg: " Unable to Create ACP with user and user group reference " success_msg: " ACP with user and user group reference created successfully " -- set_fact: - todelete: "{{ todelete + [ result.acp_uuid ] }}" +- name: Set acp uuid to delete later + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.acp_uuid] }}" ########################################################## - name: Create ACP with all specfactions ntnx_acps: state: present - name: "{{acp4_name}}" + name: "{{ acp4_name }}" role: uuid: "{{ acp.role.uuid }}" user_uuids: @@ -134,7 +136,7 @@ operator: IN rhs: uuid_list: - - "{{ network.dhcp.uuid }}" + - "{{ network.dhcp.uuid }}" - scope_filter: - lhs: CATEGORY operator: IN @@ -148,10 +150,10 @@ rhs: collection: ALL register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -175,8 +177,9 @@ fail_msg: " Unable to Create ACP all specfactions " success_msg: " ACP with all specfactions created successfully " -- set_fact: - todelete: "{{ todelete + [ result.acp_uuid ] }}" +- name: Set acp uuid to delete later + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.acp_uuid] }}" ########################################################## - name: Delete all created acps ntnx_acps: @@ -184,10 +187,10 @@ acp_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true @@ -195,5 +198,6 @@ fail_msg: "unable to delete all created acp's" success_msg: "All acp's deleted successfully" -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml b/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml index f988ef708..8d6cfdab7 100644 --- a/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml +++ b/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml @@ -1,15 +1,16 @@ --- - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - acp1_name: "{{random_name[0]}}1" +- name: Set acp name + ansible.builtin.set_fact: + acp1_name: "{{ random_name[0] }}1" - name: Create ACP with all specfactions ntnx_acps: state: present - name: "{{acp1_name}}" + name: "{{ acp1_name }}" role: uuid: "{{ acp.role.uuid }}" user_uuids: @@ -18,23 +19,21 @@ - "{{ acp.user_group_uuid }}" filters: - scope_filter: - - - lhs: PROJECT + - lhs: PROJECT operator: IN rhs: uuid_list: - "{{ project.uuid }}" entity_filter: - - - lhs: ALL + - lhs: ALL operator: IN rhs: collection: ALL register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -50,7 +49,6 @@ fail_msg: " Unable to Create ACP all specfactions " success_msg: " ACP with all specfactions created successfully " - - name: Delete acp ntnx_acps: state: absent @@ -59,7 +57,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'SUCCEEDED' diff --git a/tests/integration/targets/ntnx_acps/tasks/main.yml b/tests/integration/targets/ntnx_acps/tasks/main.yml index b75051612..a987b305b 100644 --- a/tests/integration/targets/ntnx_acps/tasks/main.yml +++ b/tests/integration/targets/ntnx_acps/tasks/main.yml @@ -1,12 +1,17 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_acps.yml" - - import_tasks: "delete_acp.yml" - - import_tasks: "update_acps.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import create_acps.yml + ansible.builtin.import_tasks: "create_acps.yml" + - name: Import delete_acp.yml + ansible.builtin.import_tasks: "delete_acp.yml" + - name: Import update_acps.yml + ansible.builtin.import_tasks: "update_acps.yml" + - name: Import negative_scenarios.yml + ansible.builtin.import_tasks: "negative_scenarios.yml" diff --git a/tests/integration/targets/ntnx_acps/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_acps/tasks/negative_scenarios.yml index 9f05fbe40..8a82a667c 100644 --- a/tests/integration/targets/ntnx_acps/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_acps/tasks/negative_scenarios.yml @@ -1,61 +1,62 @@ - - debug: - msg: "Started Negative Creation Cases" +- name: Started Negative Creation Cases + ansible.builtin.debug: + msg: "Started Negative Creation Cases" - - name: Unknown role name - ntnx_acps: - state: present - name: MinACP2 - role: - name: "unknown987" - register: result - ignore_errors: true +- name: Unknown role name + ntnx_acps: + state: present + name: MinACP2 + role: + name: "unknown987" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - - result.msg=="Failed generating acp spec" - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail ACP created successfully with unknown role name ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + - result.msg=="Failed generating acp spec" + success_msg: " Success: returned error as expected " + fail_msg: " Fail ACP created successfully with unknown role name " ####################################################### - - name: Unknown role uuid - ntnx_acps: - state: present - name: MinACP2 - role: - uuid: 00000000-0000-0000-0000-000000000000 - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.failed==True - - result.status_code==405 - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail acp created successfully with unknown role uuid ' +- name: Unknown role uuid + ntnx_acps: + state: present + name: MinACP2 + role: + uuid: 00000000-0000-0000-0000-000000000000 + register: result + ignore_errors: true +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + - result.status_code==405 + success_msg: " Success: returned error as expected " + fail_msg: " Fail acp created successfully with unknown role uuid " ####################################################### - - name: Delete acp with unknown uuid - ntnx_acps: - state: absent - acp_uuid: 5 - ignore_errors: True - register: result - - name: Creation Status - assert: - that: - - result.status_code==400 - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting acp with unknown uuid ' +- name: Delete acp with unknown uuid + ntnx_acps: + state: absent + acp_uuid: 5 + ignore_errors: true + register: result +- name: Creation Status + ansible.builtin.assert: + that: + - result.status_code==400 + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting acp with unknown uuid " ####################################################### - - name: Delete acp with missing uuid - ntnx_acps: - state: absent - ignore_errors: True - register: result +- name: Delete acp with missing uuid + ntnx_acps: + state: absent + ignore_errors: true + register: result - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting acp with missing uuid ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting acp with missing uuid " diff --git a/tests/integration/targets/ntnx_acps/tasks/update_acps.yml b/tests/integration/targets/ntnx_acps/tasks/update_acps.yml index a2181099e..b56eacf5f 100644 --- a/tests/integration/targets/ntnx_acps/tasks/update_acps.yml +++ b/tests/integration/targets/ntnx_acps/tasks/update_acps.yml @@ -1,6 +1,6 @@ --- - -- set_fact: +- name: Setting acp name variables + ansible.builtin.set_fact: acp_name: "{{ query('community.general.random_string', upper=false, numbers=false, special=false)[0] }}" acp_name_updated: "{{ query('community.general.random_string', upper=false, numbers=false, special=false)[0] }}" @@ -8,37 +8,38 @@ ntnx_acps: state: present wait: true - name: "{{acp_name}}" + name: "{{ acp_name }}" role: - uuid: '{{ acp.role.uuid }}' + uuid: "{{ acp.role.uuid }}" register: setup_acp - name: Creation Status - assert: + ansible.builtin.assert: that: - setup_acp.response is defined - setup_acp.response.status.state == 'COMPLETE' - - setup_acp.response.spec.name == "{{acp_name}}" + - setup_acp.response.spec.name == "{{ acp_name }}" - setup_acp.response.status.resources.role_reference.uuid == "{{ acp.role.uuid }}" fail_msg: "Unable to create ACP for update tests" success_msg: "ACP created successfully for update tests" -- set_fact: - todelete: '{{ todelete + [ setup_acp["response"]["metadata"]["uuid"] ] }}' +- name: Set acp uuid to delete later + ansible.builtin.set_fact: + todelete: '{{ todelete + [setup_acp["response"]["metadata"]["uuid"]] }}' ############################################# UPDATE TESTS ######################################## -- name: check idempotency +- name: Check idempotency ntnx_acps: state: present acp_uuid: "{{ setup_acp.acp_uuid }}" - name: "{{acp_name}}" + name: "{{ acp_name }}" role: - uuid: '{{ acp.role.uuid }}' + uuid: "{{ acp.role.uuid }}" register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -52,30 +53,28 @@ ntnx_acps: state: present acp_uuid: "{{ setup_acp.acp_uuid }}" - name: "{{acp_name_updated}}" + name: "{{ acp_name_updated }}" desc: "description after update" filters: - scope_filter: - - - lhs: PROJECT + - lhs: PROJECT operator: IN rhs: uuid_list: - "{{ project.uuid }}" entity_filter: - - - lhs: ALL + - lhs: ALL operator: IN rhs: collection: ALL register: result - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - - result.response.status.name == "{{acp_name_updated}}" + - result.response.status.name == "{{ acp_name_updated }}" - result.response.status.description == 'description after update' - result.response.status.resources.filter_list.context_list.0.entity_filter_expression_list.0.operator == "IN" - result.response.status.resources.filter_list.context_list.0.entity_filter_expression_list.0.left_hand_side.entity_type == "ALL" @@ -86,33 +85,30 @@ fail_msg: "Fail: Unable to update ACP" success_msg: "Pass: ACP with given update spec updated successfully" - ######################################################################################## - name: Update name, desc and filters ntnx_acps: state: present acp_uuid: "{{ setup_acp.acp_uuid }}" - name: "{{acp_name_updated}}" + name: "{{ acp_name_updated }}" desc: "description after update" filters: - scope_filter: - - - lhs: PROJECT + - lhs: PROJECT operator: IN rhs: uuid_list: - "{{ project.uuid }}" entity_filter: - - - lhs: ALL + - lhs: ALL operator: IN rhs: collection: ALL register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -120,20 +116,18 @@ fail_msg: "Fail: ACP got updated" success_msg: "Pass: ACP update skipped successfully due to no changes in spec" - ########################################### Cleanup ################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_acps: state: absent acp_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True - + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true @@ -141,5 +135,6 @@ fail_msg: "unable to delete all created acp's" success_msg: "All acp's deleted successfully" -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_acps_info/tasks/info.yml b/tests/integration/targets/ntnx_acps_info/tasks/info.yml index 3ac25358c..36ed5d386 100644 --- a/tests/integration/targets/ntnx_acps_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_acps_info/tasks/info.yml @@ -1,8 +1,10 @@ --- -- debug: - msg: "start ntnx_acps_info tests" +- name: Start ntnx_acps_info tests + ansible.builtin.debug: + msg: "Start ntnx_acps_info tests" -- set_fact: +- name: Generate random acp name + ansible.builtin.set_fact: acp_name: "{{ query('community.general.random_string', upper=false, numbers=false, special=false)[0] }}" - name: Create min ACP for update @@ -11,14 +13,14 @@ wait: true name: "{{acp_name}}" role: - uuid: '{{ acp.role.uuid }}' + uuid: "{{ acp.role.uuid }}" register: acps -- set_fact: +- name: Adding acp to delete later + ansible.builtin.set_fact: todelete: '{{ todelete + [ acps["response"]["metadata"]["uuid"] ] }}' - -- name: test getting all acp's +- name: Test getting all acp's ntnx_acps_info: kind: access_control_policy length: 1 @@ -27,8 +29,8 @@ sort_attribute: "name" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -37,14 +39,14 @@ success_msg: "acps listed successfully" ################################################################ -- name: test getting particular acp using filter +- name: Test getting particular acp using filter ntnx_acps_info: filter: name: "{{ acp_name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -55,13 +57,13 @@ ################################################################# -- name: test getting particular acp using uuid +- name: Test getting particular acp using uuid ntnx_acps_info: - acp_uuid: '{{ acps.response.metadata.uuid }}' + acp_uuid: "{{ acps.response.metadata.uuid }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -70,14 +72,14 @@ fail_msg: "Unable to get particular acp using uuid" success_msg: "acp info obtained successfully using uuid" -- name: cleanup created entities +- name: Cleanup created entities ntnx_acps: state: absent acp_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True - + ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_acps_info/tasks/main.yml b/tests/integration/targets/ntnx_acps_info/tasks/main.yml index 3364b30c6..2fa9406da 100644 --- a/tests/integration/targets/ntnx_acps_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_acps_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: - group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" +- name: Initializing variables + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import info.yml + ansible.builtin.import_tasks: "info.yml" diff --git a/tests/integration/targets/ntnx_address_groups/aliases b/tests/integration/targets/ntnx_address_groups/aliases index 87e7bdaae..e69de29bb 100644 --- a/tests/integration/targets/ntnx_address_groups/aliases +++ b/tests/integration/targets/ntnx_address_groups/aliases @@ -1 +0,0 @@ -disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_address_groups/tasks/create.yml b/tests/integration/targets/ntnx_address_groups/tasks/create.yml index 59a2e0cef..b6b760c29 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/create.yml @@ -1,19 +1,21 @@ --- -- debug: - msg: start ntnx_address_groups create tests +- name: Start ntnx_address_groups create tests + ansible.builtin.debug: + msg: Start ntnx_address_groups create tests - name: Generate random project_name - set_fact: + ansible.builtin.set_fact: random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" -- set_fact: +- name: Set suffix name + ansible.builtin.set_fact: suffix_name: "ansible-ag" -- set_fact: +- name: Set address group names + ansible.builtin.set_fact: ag1: "{{random_name}}{{suffix_name}}1" ag2: "{{random_name}}{{suffix_name}}2" - - name: Create address group ntnx_address_groups: state: present @@ -27,11 +29,11 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.address_group_uuid is defined - - result.changed == True + - result.changed == true - result.response.name == "{{ag1}}" - result.response.description == "test-ansible-group-1-desc" - result.response.ip_address_block_list[0].ip == "10.1.1.0" @@ -42,7 +44,8 @@ fail_msg: "Unable to create address group" success_msg: "Address group created susccessfully" -- set_fact: +- name: Setting address group uuid to delete variable + ansible.builtin.set_fact: todelete: '{{ result["address_group_uuid"] }}' ################################################################################################### @@ -55,10 +58,10 @@ - network_ip: "10.1.1.0" network_prefix: 24 register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == "Address group with given name already exists" - result.changed == False @@ -68,7 +71,7 @@ ################################################################################################### - name: Check mode test - check_mode: yes + check_mode: true ntnx_address_groups: state: present name: "{{ag2}}" @@ -81,7 +84,7 @@ register: result - name: Check mode Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False @@ -97,13 +100,13 @@ ################################################################################################### - -- name: cleanup created entities +- name: Cleanup created entities ntnx_address_groups: state: absent address_group_uuid: "{{ todelete }}" register: result - ignore_errors: True + ignore_errors: true -- set_fact: - todelete: [] +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: "" diff --git a/tests/integration/targets/ntnx_address_groups/tasks/delete.yml b/tests/integration/targets/ntnx_address_groups/tasks/delete.yml index 1c707f087..234cc1d18 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/delete.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/delete.yml @@ -1,15 +1,18 @@ --- -- debug: - msg: start ntnx_address_groups delete tests +- name: Start ntnx_address_groups create tests + ansible.builtin.debug: + msg: Start ntnx_address_groups delete tests - name: Generate random project_name - set_fact: + ansible.builtin.set_fact: random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" -- set_fact: +- name: Set suffix name + ansible.builtin.set_fact: suffix_name: "ansible-ag" -- set_fact: +- name: Set address group names + ansible.builtin.set_fact: ag1: "{{random_name}}{{suffix_name}}1" @@ -24,26 +27,26 @@ register: test_ag - name: Creation Status - assert: + ansible.builtin.assert: that: - test_ag.response is defined - - test_ag.changed == True + - test_ag.changed == true fail_msg: "Unable to create address group" success_msg: "address group created susccessfully" ################################################################################################### -- name: delete address group +- name: Delete address group ntnx_address_groups: state: absent address_group_uuid: "{{ test_ag.address_group_uuid }}" register: result -- name: delete Status - assert: +- name: Delete Status + ansible.builtin.assert: that: - result.response is defined - - result.changed == True + - result.changed == true fail_msg: "address group delete failed" success_msg: "address group deleted successfully" diff --git a/tests/integration/targets/ntnx_address_groups/tasks/main.yml b/tests/integration/targets/ntnx_address_groups/tasks/main.yml index a2c7a07b0..e4554ddc7 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/main.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/main.yml @@ -1,11 +1,15 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "update.yml" - - import_tasks: "delete.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" + - name: Import update.yml + ansible.builtin.import_tasks: "update.yml" + - name: Import delete.yml + ansible.builtin.import_tasks: "delete.yml" diff --git a/tests/integration/targets/ntnx_address_groups/tasks/update.yml b/tests/integration/targets/ntnx_address_groups/tasks/update.yml index f4ebedc50..366f9248c 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/update.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/update.yml @@ -1,16 +1,19 @@ --- -- debug: - msg: start ntnx_address_groups update tests +- name: Start ntnx_address_groups update tests + ansible.builtin.debug: + msg: Start ntnx_address_groups update tests - name: Generate random project_name - set_fact: + ansible.builtin.set_fact: random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" -- set_fact: +- name: Set suffix name + ansible.builtin.set_fact: suffix_name: "ansible-ag" -- set_fact: +- name: Set address group names + ansible.builtin.set_fact: ag1: "{{random_name}}{{suffix_name}}1" ag2: "{{random_name}}{{suffix_name}}2" @@ -30,10 +33,10 @@ register: test_ag - name: Creation Status - assert: + ansible.builtin.assert: that: - test_ag.response is defined - - test_ag.changed == True + - test_ag.changed == true fail_msg: "Unable to create adress group" success_msg: "Address group created susccessfully" @@ -52,11 +55,11 @@ register: result - name: Update status - assert: + ansible.builtin.assert: that: - result.response is defined - result.address_group_uuid is defined - - result.changed == True + - result.changed == true - result.response.name == "{{ag2}}" - result.response.description == "test-ansible-address-group-3-desc-updated" - result.response.ip_address_block_list[0].ip == "10.1.3.1" @@ -79,8 +82,8 @@ network_prefix: 32 register: result -- name: idempotency check status - assert: +- name: Idempotency check status + ansible.builtin.assert: that: - result.changed == False - result.failed == False @@ -92,7 +95,7 @@ ################################################################################################### - name: Check mode test - check_mode: yes + check_mode: true ntnx_address_groups: state: present address_group_uuid: "{{test_ag.address_group_uuid}}" @@ -105,8 +108,8 @@ network_prefix: 32 register: result -- name: check mode Status - assert: +- name: Check mode Status + ansible.builtin.assert: that: - result.response is defined - result.address_group_uuid is defined @@ -123,9 +126,9 @@ ################################################################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_address_groups: state: absent address_group_uuid: "{{test_ag.address_group_uuid}}" register: result - ignore_errors: True + ignore_errors: true diff --git a/tests/integration/targets/ntnx_address_groups_info/aliases b/tests/integration/targets/ntnx_address_groups_info/aliases index 87e7bdaae..e69de29bb 100644 --- a/tests/integration/targets/ntnx_address_groups_info/aliases +++ b/tests/integration/targets/ntnx_address_groups_info/aliases @@ -1 +0,0 @@ -disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_address_groups_info/tasks/address_groups_info.yml b/tests/integration/targets/ntnx_address_groups_info/tasks/address_groups_info.yml index bebdf2535..9df2e93cc 100644 --- a/tests/integration/targets/ntnx_address_groups_info/tasks/address_groups_info.yml +++ b/tests/integration/targets/ntnx_address_groups_info/tasks/address_groups_info.yml @@ -1,5 +1,6 @@ -- debug: - msg: start testing ntnx_address_groups_info +- name: Start testing ntnx_address_groups_info + ansible.builtin.debug: + msg: Start testing ntnx_address_groups_info - name: Create address groups for tests ntnx_address_groups: @@ -22,10 +23,10 @@ - name: List all address groups ntnx_address_groups_info: register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.metadata.total_matches > 0 @@ -33,9 +34,9 @@ fail_msg: "Unable to list all address groups" success_msg: " address groups info obtained successfully" -- set_fact: +- name: Set address group name and uuid for further tests + ansible.builtin.set_fact: test_address_groups_name: "{{result.response.entities.1.address_group.name}}" -- set_fact: test_address_groups_uuid: "{{result.response.entities.1.uuid}}" ################################################## @@ -44,10 +45,10 @@ ntnx_address_groups_info: address_group_uuid: "{{ test_address_groups_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -63,10 +64,10 @@ filter: name: "{{ test_address_groups_name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -84,10 +85,10 @@ length: 1 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -98,14 +99,16 @@ ################################################## -- name: delete created address groups for tests +- name: Delete created address groups for tests ntnx_address_groups: state: absent address_group_uuid: "{{ ag_1.address_group_uuid}}" + register: result ignore_errors: true -- name: delete created address groups for tests +- name: Delete created address groups for tests ntnx_address_groups: state: absent address_group_uuid: "{{ ag_2.address_group_uuid}}" + register: result ignore_errors: true diff --git a/tests/integration/targets/ntnx_address_groups_info/tasks/main.yml b/tests/integration/targets/ntnx_address_groups_info/tasks/main.yml index 011a2fd92..1198ff65e 100644 --- a/tests/integration/targets/ntnx_address_groups_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_address_groups_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "address_groups_info.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "address_groups_info.yml" diff --git a/tests/integration/targets/ntnx_address_groups_v2/aliases b/tests/integration/targets/ntnx_address_groups_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_address_groups_v2/meta/main.yml b/tests/integration/targets/ntnx_address_groups_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_address_groups_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_address_groups_v2/tasks/address_groups.yml b/tests/integration/targets/ntnx_address_groups_v2/tasks/address_groups.yml new file mode 100644 index 000000000..e52d0df31 --- /dev/null +++ b/tests/integration/targets/ntnx_address_groups_v2/tasks/address_groups.yml @@ -0,0 +1,291 @@ +--- +- name: Start ntnx_address_groups_v2 tests + ansible.builtin.debug: + msg: start ntnx_address_groups_v2 tests + +- name: Generate random project_name + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set suffix_name + ansible.builtin.set_fact: + suffix_name: ansible-ag + +- name: Set variables + ansible.builtin.set_fact: + ag1: "{{ random_name }}{{ suffix_name }}1" + ag2: "{{ random_name }}{{ suffix_name }}2" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create address group with check mode enabled + ntnx_address_groups_v2: + state: present + name: "{{ ag1 }}" + description: test-ansible-address-group-2-desc + ipv4_addresses: + - value: 10.1.1.0 + prefix_length: 24 + - value: 10.1.2.2 + prefix_length: 32 + ip_ranges: + - start_ip: 10.2.1.0 + end_ip: 10.2.2.0 + register: result + ignore_errors: true + check_mode: true + +- name: Create address group with check mode enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.name == "{{ ag1 }}" + - result.response.description == "test-ansible-address-group-2-desc" + - result.response.ipv4_addresses[0].value == "10.1.1.0" + - result.response.ipv4_addresses[1].value == "10.1.2.2" + - result.response.ipv4_addresses[0].prefix_length == 24 + - result.response.ipv4_addresses[1].prefix_length == 32 + - result.response.ip_ranges[0].start_ip == "10.2.1.0" + - result.response.ip_ranges[0].end_ip == "10.2.2.0" + + fail_msg: Create address group with check mode enabled failed + success_msg: Create address group with check mode enabled passed + +################################################################################################### + +- name: Create address group + ntnx_address_groups_v2: + state: present + name: "{{ ag2 }}" + description: test-ansible-group-1-desc + ipv4_addresses: + - value: 10.1.1.0 + prefix_length: 24 + - value: 10.1.2.2 + prefix_length: 32 + register: result + ignore_errors: true + +- name: Create address group Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.changed == True + - result.failed == False + - result.response.name == "{{ ag2 }}" + - result.response.description == "test-ansible-group-1-desc" + - result.response.ipv4_addresses[0].value == "10.1.1.0" + - result.response.ipv4_addresses[1].value == "10.1.2.2" + - result.response.ipv4_addresses[0].prefix_length == 24 + - result.response.ipv4_addresses[1].prefix_length == 32 + - result.task_ext_id is defined + + fail_msg: Unable to create address group + success_msg: Address group created successfully + +- name: Set external id to delete + ansible.builtin.set_fact: + todelete: '{{ result["ext_id"] }}' + +################################################################################################### + +- name: Update address group with check mode enabled + ntnx_address_groups_v2: + state: present + ext_id: "{{ todelete }}" + name: "{{ ag1 }}" + description: test-ansible-group-2-desc + ipv4_addresses: + - value: 10.2.1.0 + prefix_length: 24 + - value: 10.2.2.1 + prefix_length: 32 + register: result + ignore_errors: true + check_mode: true + +- name: Update address group with check mode enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == False + - result.response.name == "{{ ag1 }}" + - result.response.description == "test-ansible-group-2-desc" + - result.response.ipv4_addresses[0].value == "10.2.1.0" + - result.response.ipv4_addresses[1].value == "10.2.2.1" + - result.response.ipv4_addresses[0].prefix_length == 24 + - result.response.ipv4_addresses[1].prefix_length == 32 + + fail_msg: Update address group with check mode enabled failed + success_msg: Update address group with check mode enabled passed + +################################################################################################### + +- name: Update address group + ntnx_address_groups_v2: + state: present + ext_id: "{{ todelete }}" + name: "{{ ag1 }}" + description: test-ansible-group-3-desc + ipv4_addresses: + - value: 10.1.4.1 + prefix_length: 32 + register: result + ignore_errors: true + +- name: Update address group status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == True + - result.response.name == "{{ ag1 }}" + - result.response.description == "test-ansible-group-3-desc" + - result.response.ipv4_addresses[0].value == "10.1.4.1" + - result.response.ipv4_addresses[0].prefix_length == 32 + - result.response.ipv4_addresses | length == 1 + + fail_msg: Unable to update address group + success_msg: Address group updated successfully + +################################################################################################### + +- name: Test idempotency by updating address group with same values + ntnx_address_groups_v2: + state: present + ext_id: "{{ todelete }}" + name: "{{ ag1 }}" + description: test-ansible-group-3-desc + ipv4_addresses: + - value: 10.1.4.1 + prefix_length: 32 + register: result + ignore_errors: true + +- name: Idempotency check status + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.msg == "Nothing to change." + + fail_msg: Idempotency check failed + success_msg: Idempotency check passed + +################################################################################################### + +- name: Check if address group with existing name fails or not + ntnx_address_groups_v2: + state: present + name: "{{ ag1 }}" + ipv4_addresses: + - value: 10.1.1.0 + prefix_length: 24 + register: result + ignore_errors: true + +- name: Check if address group with existing name fails or not Status + ansible.builtin.assert: + that: + - result.msg == "Task Failed" + - result.changed == false + - result.failed == true + + fail_msg: Was able to create address group with existing address group name + success_msg: Address group with existing address group name failed successfully + +################################################################################################### + +- name: List all address groups + ntnx_address_groups_info_v2: + register: result + ignore_errors: true + +- name: List all address groups Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 0 + fail_msg: Unable to list all address groups + success_msg: address groups info obtained successfully + +################################################################################################### + +- name: List address groups using external id + ntnx_address_groups_info_v2: + ext_id: "{{ todelete }}" + register: result + ignore_errors: true + +- name: List address groups using external id Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ todelete }}" + - result.response.name == "{{ ag1 }}" + - result.response.ext_id == "{{ todelete }}" + fail_msg: Unable to list address groups using external id + success_msg: address groups info obtained successfully + +################################################################################################### + +- name: List address groups using limit + ntnx_address_groups_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: List address groups using limit Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: "Unable to list address groups using limit " + success_msg: "address groups listed successfully using limit " + +################################################################################################### + +- name: List address groups using filter + ntnx_address_groups_info_v2: + filter: name eq '{{ ag1 }}' + register: result + ignore_errors: true + +- name: List address groups using filter Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + - result.response[0].name == "{{ ag1 }}" + - result.response[0].description == "test-ansible-group-3-desc" + - result.response[0].ext_id == "{{ todelete }}" + fail_msg: "Unable to list address groups using filter " + success_msg: "address groups listed successfully using filter " + +################################################################################################### + +- name: Delete address group + ntnx_address_groups_v2: + state: absent + ext_id: "{{ todelete }}" + register: result + ignore_errors: true + +- name: Reset todelete variable + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_address_groups_v2/tasks/main.yml b/tests/integration/targets/ntnx_address_groups_v2/tasks/main.yml new file mode 100644 index 000000000..72b9167e7 --- /dev/null +++ b/tests/integration/targets/ntnx_address_groups_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import address_groups.yml + ansible.builtin.import_tasks: address_groups.yml diff --git a/tests/integration/targets/ntnx_authorization_policies_v2/aliases b/tests/integration/targets/ntnx_authorization_policies_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_authorization_policies_v2/meta/main.yml b/tests/integration/targets/ntnx_authorization_policies_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_authorization_policies_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_authorization_policies_v2/tasks/all_operations.yml b/tests/integration/targets/ntnx_authorization_policies_v2/tasks/all_operations.yml new file mode 100644 index 000000000..5d20044c9 --- /dev/null +++ b/tests/integration/targets/ntnx_authorization_policies_v2/tasks/all_operations.yml @@ -0,0 +1,507 @@ +--- +- name: Start authorization CRUD tests + ansible.builtin.debug: + msg: Start authorization CRUD tests + +- name: Generate random string + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Create random acp_name and todelete list + ansible.builtin.set_fact: + acp_name: "{{ random_name }}-ansible-ag" + todelete: [] + +######################################################################## Test setup ######################################################################## + +- name: Get create operations + ntnx_operations_info_v2: + filter: displayName startswith 'Create_' + register: create_operations + ignore_errors: true + +- name: Define variables + ansible.builtin.set_fact: + test_operation_1_uuid: "{{ create_operations.response[0].ext_id }}" + test_operation_2_uuid: "{{ create_operations.response[1].ext_id }}" + +- name: Create roles with operations + ntnx_roles_v2: + state: present + display_name: "{{ acp_name }}-ansible-ag-role" + description: Role for ansible acp tests + operations: + - "{{ test_operation_1_uuid }}" + - "{{ test_operation_2_uuid }}" + wait: true + register: result + ignore_errors: true + +- name: Save role uuid + ansible.builtin.set_fact: + role_ext_id: "{{ result.ext_id }}" + +- name: Verify role creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == True + - result.failed == False + - result.response.display_name == "{{ acp_name }}-ansible-ag-role" + success_msg: Created roles with operations + fail_msg: Create roles with operations failed +######################################################################## Create Tests ######################################################################## +- name: Create auth policy spec using check mode + ntnx_authorization_policies_v2: + state: present + display_name: "{{ acp_name }}1" + description: ansible created acps + role: "{{ role_ext_id }}" + authorization_policy_type: USER_DEFINED + entities: + - images: + "*": + eq: "*" + - directory_service: + uuid: + anyof: + - ebbfbd38-794b-5529-adcc-dcb6b4177382 + - ebbfbd38-794b-5529-adcc-dcb6b4177383 + - marketplace_item: + owner_uuid: + eq: SELF_OWNED + identities: + - user: + uuid: + anyof: + - ebbfbd38-794b-5529-adcc-dcb6b4177384 + - ebbfbd38-794b-5529-adcc-dcb6b4177385 + - user: + group: + anyof: + - ebbfbd38-794b-5529-adcc-dcb6b4177386 + - ebbfbd38-794b-5529-adcc-dcb6b4177387 + register: result + ignore_errors: true + check_mode: true + +- name: Verify auth policy spec creation in check mode + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.display_name == "{{ acp_name }}1" + - result.response.description == "ansible created acps" + - result.response.role == "{{ role_ext_id }}" + - result.response.authorization_policy_type == "USER_DEFINED" + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.entities[1].entity_filter.directory_service.uuid.anyof | length == 2 + - result.response.entities[1].entity_filter.directory_service.uuid.anyof[0] == "ebbfbd38-794b-5529-adcc-dcb6b4177382" + - result.response.entities[1].entity_filter.directory_service.uuid.anyof[1] == "ebbfbd38-794b-5529-adcc-dcb6b4177383" + - result.response.entities[2].entity_filter.marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.identities[0].identity_filter.user.uuid.anyof | length == 2 + - result.response.identities[0].identity_filter.user.uuid.anyof[0] == "ebbfbd38-794b-5529-adcc-dcb6b4177384" + - result.response.identities[0].identity_filter.user.uuid.anyof[1] == "ebbfbd38-794b-5529-adcc-dcb6b4177385" + - result.response.identities[1].identity_filter.user.group.anyof | length == 2 + - result.response.identities[1].identity_filter.user.group.anyof[0] == "ebbfbd38-794b-5529-adcc-dcb6b4177386" + - result.response.identities[1].identity_filter.user.group.anyof[1] == "ebbfbd38-794b-5529-adcc-dcb6b4177387" + fail_msg: Unable to create auth policy spec in check mode + success_msg: Auth policy spec created successfully in check mode + +- name: Create auth policy + ntnx_authorization_policies_v2: + state: present + display_name: "{{ acp_name }}1" + description: ansible created acps + role: "{{ role_ext_id }}" + authorization_policy_type: USER_DEFINED + entities: + - images: + "*": + eq: "*" + - directory_service: + uuid: + anyof: + - "{{ directory_service_uuid }}" + - marketplace_item: + owner_uuid: + eq: SELF_OWNED + identities: + - user: + uuid: + anyof: + - "{{ users[0] }}" + - "{{ users[1] }}" + - user: + group: + anyof: + - "{{ user_groups[0] }}" + register: result + ignore_errors: true + +- name: Verify auth policy creation + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.response.display_name == "{{ acp_name }}1" + - result.response.description == "ansible created acps" + - result.response.role == "{{ role_ext_id }}" + - result.response.authorization_policy_type == "USER_DEFINED" + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.entities[1].entity_filter.directory_service.uuid.anyof | length == 1 + - result.response.entities[1].entity_filter.directory_service.uuid.anyof[0] == "{{ directory_service_uuid }}" + - result.response.entities[2].entity_filter.marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.identities[0].identity_filter.user.uuid.anyof | length == 2 + - result.response.identities[0].identity_filter.user.uuid.anyof[0] == "{{ users[0] }}" + - result.response.identities[0].identity_filter.user.uuid.anyof[1] == "{{ users[1] }}" + - result.response.identities[1].identity_filter.user.group.anyof | length == 1 + - result.response.identities[1].identity_filter.user.group.anyof[0] == "{{ user_groups[0] }}" + fail_msg: Unable to create auth policy + success_msg: Auth policy created successfully + +- name: Add acp to todelete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +- name: Create another auth policy + ntnx_authorization_policies_v2: + state: present + display_name: "{{ acp_name }}2" + role: "{{ role_ext_id }}" + entities: + - images: + "*": + eq: "*" + identities: + - user: + group: + anyof: + - "{{ user_groups[0] }}" + register: result + ignore_errors: true + +- name: Verify auth policy creation + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.display_name == "{{ acp_name }}2" + - result.response.role == "{{ role_ext_id }}" + - result.response.authorization_policy_type == "USER_DEFINED" + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.identities[0].identity_filter.user.group.anyof | length == 1 + - result.response.identities[0].identity_filter.user.group.anyof[0] == "{{ user_groups[0] }}" + fail_msg: Unable to create auth policy + success_msg: Auth policy created successfully + +- name: Add acp to todelete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +- name: Save policy external Id for update tests + ansible.builtin.set_fact: + policy_ext_id: "{{ result.ext_id }}" + +###################################################### Update tests ######################################################## + +- name: Generate spec with checkmode for updating acp + ntnx_authorization_policies_v2: + state: present + ext_id: "{{ policy_ext_id }}" + entities: + - images: + "*": + eq: "*" + - marketplace_item: + owner_uuid: + eq: SELF_OWNED + - "*": + project_uuid: + anyof: + - "{{ project.uuid }}" + register: result + ignore_errors: true + check_mode: true + +- name: Verify spec + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.ext_id == "{{ policy_ext_id }}" + - result.response.entities | length == 3 + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.entities[1].entity_filter.marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.entities[2].entity_filter["*"].project_uuid.anyof[0] == "{{ project.uuid }}" + - result.response.identities | length == 1 + - result.response.identities[0].identity_filter.user.group.anyof | length == 1 + - result.response.identities[0].identity_filter.user.group.anyof[0] == "{{ user_groups[0] }}" + fail_msg: Unable to create spec for updating acps + success_msg: Spec for updating acps created successfully + +- name: Add access to self owned marketplace_item and certain projects + ntnx_authorization_policies_v2: + state: present + ext_id: "{{ policy_ext_id }}" + entities: + - images: + "*": + eq: "*" + - marketplace_item: + owner_uuid: + eq: SELF_OWNED + - "*": + project_uuid: + anyof: + - "{{ project.uuid }}" + register: result + ignore_errors: true + +- name: Verify auth policy update + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id == "{{ policy_ext_id }}" + - result.response.entities | length == 3 + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.entities[1].entity_filter.marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.entities[2].entity_filter["*"].project_uuid.anyof[0] == "{{ project.uuid }}" + - result.response.identities | length == 1 + - result.response.identities[0].identity_filter.user.group.anyof | length == 1 + - result.response.identities[0].identity_filter.user.group.anyof[0] == "{{ user_groups[0] }}" + fail_msg: Unable to update policy + success_msg: Policy updated successfully + +- name: Give access to users and remove access of user groups + ntnx_authorization_policies_v2: + state: present + ext_id: "{{ policy_ext_id }}" + identities: + - user: + uuid: + anyof: + - "{{ users[0] }}" + - "{{ users[1] }}" + register: result + ignore_errors: true + +- name: Verify auth policy update + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id == "{{ policy_ext_id }}" + - result.response.entities | length == 3 + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.entities[1].entity_filter.marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.entities[2].entity_filter["*"].project_uuid.anyof[0] == "{{ project.uuid }}" + - result.response.identities | length == 1 + - result.response.identities[0].identity_filter.user.uuid.anyof | length == 2 + - result.response.identities[0].identity_filter.user.uuid.anyof[0] == "{{ users[0] }}" + - result.response.identities[0].identity_filter.user.uuid.anyof[1] == "{{ users[1] }}" + fail_msg: Unable to update policy + success_msg: Policy updated successfully + +- name: Remove access of certain users and certain entities + ntnx_authorization_policies_v2: + state: present + ext_id: "{{ policy_ext_id }}" + entities: + - images: + "*": + eq: "*" + - marketplace_item: + owner_uuid: + eq: SELF_OWNED + identities: + - user: + uuid: + anyof: + - "{{ users[0] }}" + register: result + ignore_errors: true + +- name: Verify update status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id == "{{ policy_ext_id }}" + - result.response.entities | length == 2 + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.entities[1].entity_filter.marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.identities | length == 1 + - result.response.identities[0].identity_filter.user.uuid.anyof | length == 1 + - result.response.identities[0].identity_filter.user.uuid.anyof[0] == "{{ users[0] }}" + fail_msg: Unable to update policy + success_msg: Policy updated successfully + +- name: Try update with same data to check idempotency + ntnx_authorization_policies_v2: + state: present + ext_id: "{{ policy_ext_id }}" + display_name: "{{ acp_name }}2" + role: "{{ role_ext_id }}" + entities: + - images: + "*": + eq: "*" + - marketplace_item: + owner_uuid: + eq: SELF_OWNED + identities: + - user: + uuid: + anyof: + - "{{ users[0] }}" + register: result + ignore_errors: true + +- name: Verify update status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.ext_id == "{{ policy_ext_id }}" + - result.msg == "Nothing to change." + - result.skipped == True + fail_msg: Module failed to skip when nothing to change + success_msg: Policy auth update skipped successfully + +- name: Update name of policy + ntnx_authorization_policies_v2: + state: present + ext_id: "{{ policy_ext_id }}" + display_name: "{{ acp_name }}2-updated" + register: result + ignore_errors: true + +- name: Verify update status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id == "{{ policy_ext_id }}" + - result.response.display_name == "{{ acp_name }}2-updated" + - result.response.entities | length == 2 + - result.response.entities[0].entity_filter.images["*"]["eq"] == "*" + - result.response.entities[1].entity_filter.marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.identities | length == 1 + - result.response.identities[0].identity_filter.user.uuid.anyof | length == 1 + - result.response.identities[0].identity_filter.user.uuid.anyof[0] == "{{ users[0] }}" + - result.response.role == "{{ role_ext_id }}" + fail_msg: Unable to update policy + success_msg: Policy updated successfully + +###################################################### Read Tests ######################################################## + +- name: Get all auth policies + ntnx_authorization_policies_info_v2: + limit: 5 + register: result + ignore_errors: true + +- name: Verify all auth policies + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 5 + fail_msg: Unable to get all auth policies + success_msg: All auth policies fetched successfully + +- name: Get specific auth policy + ntnx_authorization_policies_info_v2: + ext_id: "{{ policy_ext_id }}" + register: result + ignore_errors: true + +- name: Verify specific auth policy + ansible.builtin.assert: + that: + - result.response is defined + - result.response.ext_id == "{{ policy_ext_id }}" + - result.response.display_name == "{{ acp_name }}2-updated" + - result.response.entities | length == 2 + - result.response.entities[0].images["*"]["eq"] == "*" + - result.response.entities[1].marketplace_item.owner_uuid.eq == "SELF_OWNED" + - result.response.identities | length == 1 + - result.response.identities[0].user.uuid.anyof | length == 1 + - result.response.identities[0].user.uuid.anyof[0] == "{{ users[0] }}" + - result.response.role == "{{ role_ext_id }}" + fail_msg: Unable to get specific auth policy + success_msg: Specific auth policy fetched successfully + +- name: Fetch using filters + ntnx_authorization_policies_info_v2: + filter: displayName eq '{{ acp_name }}2-updated' + register: result + ignore_errors: true + +- name: Verify fetch using filters + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + - result.response[0].ext_id == "{{ policy_ext_id }}" + fail_msg: Unable to fetch using filters + success_msg: Fetched using filters successfully +###################################################### Delete Tests ######################################################## +- name: Delete created auth policies + ntnx_authorization_policies_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ todelete }}" + +- name: Verify status + vars: + msg: "Authorization policy with ext_id: {{ item.ext_id }} deleted successfully" + ansible.builtin.assert: + that: + - result.changed == true + - result.msg == "All items completed" + - result.results | length == todelete | length + - item.failed == false + - item.ext_id in todelete + - item.msg == "{{ msg }}" + fail_msg: Delete created auth policies failed + success_msg: Delete created auth policies passed + loop: "{{ result.results }}" + when: todelete | length > 0 and result | length > 0 + +- name: Reset todelete + ansible.builtin.set_fact: + todelete: [] + +###################################################### Deleted role ######################################################## + +- name: Delete role + ntnx_roles_v2: + state: absent + ext_id: "{{ role_ext_id }}" + register: result + ignore_errors: true + +- name: Delete roles Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + fail_msg: Unable to delete roles + success_msg: Roles deleted successfully diff --git a/tests/integration/targets/ntnx_authorization_policies_v2/tasks/main.yml b/tests/integration/targets/ntnx_authorization_policies_v2/tasks/main.yml new file mode 100644 index 000000000..b08529f55 --- /dev/null +++ b/tests/integration/targets/ntnx_authorization_policies_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import all authorization policies test + ansible.builtin.import_tasks: all_operations.yml diff --git a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml index 679532a42..f68720200 100644 --- a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml +++ b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml @@ -1,8 +1,10 @@ --- -- debug: +- name: Start testing ntnx_categories + ansible.builtin.debug: msg: Start testing ntnx_categories -- set_fact: +- name: Set category name and values + ansible.builtin.set_fact: first_category: name: test-catA1 desc: first test description @@ -10,9 +12,9 @@ second_category: name: test-catB1 values: - - 'value-a' - - 'value-b' - - 'value-c' + - "value-a" + - "value-b" + - "value-c" - name: Create only category key with description ntnx_categories: @@ -23,7 +25,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -45,9 +47,8 @@ register: result ignore_errors: true - - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -59,7 +60,7 @@ fail_msg: "Unable to Add values to existing category key having no values & Update description" success_msg: "Add values to existing category key having no values & Update description finished successfully" ################# -- name: update existing category with same values +- name: Update existing category with same values ntnx_categories: state: "present" name: "{{first_category.name}}" @@ -71,7 +72,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -79,7 +80,7 @@ fail_msg: "Fail: existing category update with wrong values" success_msg: "Passed: Nothing to update as expected " ################# -- name: update existing category by deleting some values +- name: Update existing category by deleting some values ntnx_categories: state: "absent" name: "{{first_category.name}}" @@ -89,14 +90,14 @@ register: result ignore_errors: true -- name: get modified category +- name: Get modified category ntnx_categories_info: name: "{{first_category.name}}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -106,7 +107,7 @@ fail_msg: "Fail: unable to update existing category by deleting some values " success_msg: "Passed: update existing category by deleting some values finished successfully" ################# -- name: update existing category by deleting all values +- name: Update existing category by deleting all values ntnx_categories: state: "absent" name: "{{first_category.name}}" @@ -114,15 +115,14 @@ register: result ignore_errors: true -- name: get modified category +- name: Get modified category ntnx_categories_info: name: "{{first_category.name}}" register: result ignore_errors: true - - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -131,21 +131,21 @@ fail_msg: "Fail: unable to update existing category by deleting all values " success_msg: "Passed: update existing category by deleting all values finished successfully" ################# -- name: Delte the category +- name: Delete the category ntnx_categories: state: "absent" name: "{{first_category.name}}" register: result ignore_errors: true -- name: search deleted category +- name: Search deleted category ntnx_categories_info: name: "{{first_category.name}}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -167,7 +167,7 @@ check_mode: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -191,7 +191,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -202,7 +202,7 @@ fail_msg: "Unable to Create category key and value together" success_msg: "Create category key and value together finished successfully" ################# -- name: delete the category +- name: Delete the category ntnx_categories: state: "absent" name: "{{second_category.name}}" @@ -210,7 +210,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true diff --git a/tests/integration/targets/ntnx_categories/tasks/main.yml b/tests/integration/targets/ntnx_categories/tasks/main.yml index d4d19ea74..76916a644 100644 --- a/tests/integration/targets/ntnx_categories/tasks/main.yml +++ b/tests/integration/targets/ntnx_categories/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "all_operations.yml" + - name: Import all_operations.yml + ansible.builtin.import_tasks: "all_operations.yml" diff --git a/tests/integration/targets/ntnx_categories_info/tasks/info.yml b/tests/integration/targets/ntnx_categories_info/tasks/info.yml index 69a1eff99..8d52834f7 100644 --- a/tests/integration/targets/ntnx_categories_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_categories_info/tasks/info.yml @@ -1,14 +1,15 @@ --- -- debug: +- name: Start testing ntnx_categories_info + ansible.builtin.debug: msg: Start testing ntnx_categories_info -- name: test getting all categories +- name: Test getting all categories ntnx_categories_info: register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response.entities is defined - result.changed == false @@ -18,12 +19,13 @@ fail_msg: "Unable to list all categories" success_msg: "categories listed successfully" ##################################################################################################### -- set_fact: +- name: Set category name and values + ansible.builtin.set_fact: category_name: test-catAA values: - - 'value-a' - - 'value-b' - - 'value-c' + - "value-a" + - "value-b" + - "value-c" - name: Create category key ntnx_categories: @@ -33,15 +35,15 @@ register: result ignore_errors: true -- name: test getting the category with filter by it's name +- name: Test getting the category with filter by it's name ntnx_categories_info: filter: - name: "{{category_name}}" + name: "{{category_name}}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response.entities|length == 1 - result.changed == false @@ -50,14 +52,14 @@ fail_msg: "Unable to get particular category with name filter" success_msg: "category info obtained successfully by name filter" ##################################################################################################### -- name: test getting the category by it's name +- name: Test getting the category by it's name ntnx_categories_info: - name: "{{category_name}}" + name: "{{category_name}}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -66,7 +68,7 @@ fail_msg: "Unable to get particular category with it's name" success_msg: "category info obtained successfully by it's name" ##################################################################################################### -- name: delete the category +- name: Delete the category ntnx_categories: state: "absent" name: "{{category_name}}" @@ -74,7 +76,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true diff --git a/tests/integration/targets/ntnx_categories_info/tasks/main.yml b/tests/integration/targets/ntnx_categories_info/tasks/main.yml index 3364b30c6..a37bd6bd3 100644 --- a/tests/integration/targets/ntnx_categories_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_categories_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import info.yml + ansible.builtin.import_tasks: "info.yml" diff --git a/tests/integration/targets/ntnx_categories_v2/aliases b/tests/integration/targets/ntnx_categories_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_categories_v2/meta/main.yml b/tests/integration/targets/ntnx_categories_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_categories_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_categories_v2/tasks/all_operations.yml b/tests/integration/targets/ntnx_categories_v2/tasks/all_operations.yml new file mode 100644 index 000000000..74cac9c52 --- /dev/null +++ b/tests/integration/targets/ntnx_categories_v2/tasks/all_operations.yml @@ -0,0 +1,197 @@ +--- +- name: Start ntnx_categories_v2 tests + ansible.builtin.debug: + msg: Start ntnx_categories_v2 tests + +- name: Generate random category key & value + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: "ansible-ag" + +- name: Set category key & value + ansible.builtin.set_fact: + todelete: [] + key1: "{{random_name}}{{suffix_name}}key1" + value1: "{{random_name}}{{suffix_name}}value1" + value2: "{{random_name}}{{suffix_name}}value2" +######################################################################## +- name: Create category key & value with check_mode + ntnx_categories_v2: + key: "{{key1}}" + value: "{{value1}}" + description: "ansible test" + register: result + ignore_errors: true + check_mode: true + +- name: Check mode status + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.response.key == "{{key1}}" + - result.response.value == "{{value1}}" + - result.response.description == "ansible test" + fail_msg: "Check mode failed" + success_msg: "Check mode passed" +######################################################################## +- name: Create category key & value + ntnx_categories_v2: + key: "{{key1}}" + value: "{{value1}}" + description: "ansible test" + register: output + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - output.response is defined + - output.response.key == "{{key1}}" + - output.response.value == "{{value1}}" + - output.response.type == "USER" + - output.response.ext_id is defined + - output.changed == True + - output.failed == False + - output.response.description == "ansible test" + fail_msg: "Unable to create category key & value" + success_msg: "Category key & value created successfully" + +- name: Add category to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ output["response"]["ext_id"] ] }}' +######################################################################## +- name: Update category value and description + ntnx_categories_v2: + ext_id: "{{output.response.ext_id}}" + key: "{{key1}}" + value: "{{value2}}" + description: "ansible test New value" + register: result + ignore_errors: true + +- name: Update status + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.type == "USER" + - result.response.key == "{{key1}}" + - result.response.value == "{{value2}}" + - result.response.description == "ansible test New value" + fail_msg: "Unable to update category key & value" + success_msg: "Category description & value updated successfully" +######################################################################## +- name: Check idempotency + ntnx_categories_v2: + ext_id: "{{output.response.ext_id}}" + key: "{{key1}}" + value: "{{value2}}" + description: "ansible test New value" + register: result + ignore_errors: true + +- name: Idempotency status + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.skipped == True + - result.msg == "Nothing to change." + fail_msg: "Idempotency failed" + success_msg: "Idempotency passed" +######################################################################## +- name: Update category value and description with check_mode + ntnx_categories_v2: + ext_id: "{{output.response.ext_id}}" + key: "{{key1}}" + value: "new value" + description: "ansible test New value" + register: result + ignore_errors: true + check_mode: true + +- name: Check mode status + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.response.key == "{{key1}}" + - result.response.value == "new value" + - result.response.description == "ansible test New value" + fail_msg: "Check mode failed" + success_msg: "Check mode passed" +######################################################################## +- name: List all categories + ntnx_categories_info_v2: + register: result + ignore_errors: true + +- name: Status of all categories + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + fail_msg: "Unable to list categories " + success_msg: "categories listed successfully" + +- name: Set category name + ansible.builtin.set_fact: + category_name: "{{result.response.0.key}}" + +######################################################################## +- name: List all categories with keyname & expand associations + ntnx_categories_info_v2: + expand: associations + filter: "key eq '{{category_name}}'" + register: result + ignore_errors: true + +- name: Status of all categories + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + - result.response.0.associations is defined + fail_msg: "Unable to List all categories with keyname & expand associations " + success_msg: "categories with keyname & expand listed successfully" +######################################################################## +- name: Fetch category using ext id & expand detailedAssociations + ntnx_categories_info_v2: + ext_id: "{{result.response.0.ext_id}}" + expand: detailedAssociations + register: result + ignore_errors: true + +- name: Status of category + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.key == "{{category_name}}" + fail_msg: "Unable to fetch category using ext id & expand detailedAssociations" + success_msg: "category fetched using ext id & expand detailedAssociations successfully" +######################################################################## +- name: Delete Created categories + ntnx_categories_v2: + ext_id: "{{item}}" + state: absent + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.changed == True + - result.msg == "All items completed" + fail_msg: "Unable to delete category key & value" + success_msg: "Category key & value deleted successfully" diff --git a/tests/integration/targets/ntnx_categories_v2/tasks/main.yml b/tests/integration/targets/ntnx_categories_v2/tasks/main.yml new file mode 100644 index 000000000..722b1337b --- /dev/null +++ b/tests/integration/targets/ntnx_categories_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import all_operations.yml + ansible.builtin.import_tasks: "all_operations.yml" diff --git a/tests/integration/targets/ntnx_clusters_and_hosts_v2/aliases b/tests/integration/targets/ntnx_clusters_and_hosts_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_clusters_and_hosts_v2/meta/main.yml b/tests/integration/targets/ntnx_clusters_and_hosts_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_clusters_and_hosts_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_clusters_and_hosts_v2/tasks/cluster_crud.yml b/tests/integration/targets/ntnx_clusters_and_hosts_v2/tasks/cluster_crud.yml new file mode 100644 index 000000000..ae6953dbd --- /dev/null +++ b/tests/integration/targets/ntnx_clusters_and_hosts_v2/tasks/cluster_crud.yml @@ -0,0 +1,1166 @@ +--- +# Test scenarios sequence wise: +# This playbook will test below cases: +# 1. Generate spec for cluster create using check mode +# 2. Run cluster create prechecks +# 3. Create cluster with minimum spec and register PE PC for verifying cluster creation +# 4. Create cluster with auth keys, configs, network details, etc. and register PE PC for verifying cluster creation +# 5. Generate spec for cluster update using check mode +# 6. Update config public auth keys +# 7. Update ntp servers and dns servers and smtp server +# 8. Verify idempotency of module +# 9. Test info module for fetching cluster details +# 10. Test info module for fetching host details +# 11. Test cluster destroy + +- name: Start clusters crud tests + ansible.builtin.debug: + msg: start clusters crud tests + +- name: Generate random category key & value + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set prefix name for clusters + ansible.builtin.set_fact: + prefix_name: ansible_test + +- name: Set command split for resetting cluster username and password + ansible.builtin.set_fact: + pe_ssh_cmd: sshpass -p '{{ clusters[0].pe_password }}' ssh -o StrictHostKeyChecking=no {{ clusters[0].pe_username }}@{{ clusters[0].nodes[0].cvm_ip }} + reset_username_password: /home/nutanix/prism/cli/ncli user reset-password user-name={{ username }} password={{ password }} + cluster_status: /usr/local/nutanix/cluster/bin/cluster status + +- name: Set command for resetting cluster username and password + ansible.builtin.set_fact: + reset_command: '{{ pe_ssh_cmd }} "{{ reset_username_password }}"' + cluster_status_command: '{{ pe_ssh_cmd }} "{{ cluster_status }}"' + +############################################## Create cluster test 1 ############################################## + +- name: Create spec for cluster create with dummy values using check mode + ntnx_clusters_v2: + name: cluster1 + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: 10.0.0.1 + host_ip: + ipv4: + value: 10.0.0.2 + config: + cluster_function: + - AOS + authorized_public_key_list: + - name: key1 + key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ6 + redundancy_factor: 1 + cluster_arch: X86_64 + fault_tolerance_state: + domain_awareness_level: DISK + desired_cluster_fault_tolerance: CFT_2N_OR_2D + operation_mode: NORMAL + encryption_in_transit_status: ENABLED + network: + external_address: + ipv4: + value: 10.0.0.3 + external_data_service_ip: + ipv4: + value: 10.0.0.4 + nfs_subnet_whitelist: + - vlan.123 + ntp_server_ip_list: + - ipv4: + value: 10.0.0.5 + - ipv6: + value: 2001:db8::1 + - fqdn: + value: ntp.example.com + name_server_ip_list: + - ipv4: + value: 10.0.0.6 + - ipv6: + value: 2001:db8::2 + - fqdn: + value: dns.example.com + smtp_server: + email_address: testemail@test.com + server: + ip_address: + ipv4: + value: 10.0.0.7 + port: 25 + username: test + password: test1 + type: PLAIN + masquerading_ip: + ipv4: + value: 10.0.0.8 + management_server: + ip: + ipv4: + value: 10.0.0.9 + is_registered: true + is_drs_enabled: true + fqdn: test.example.com + key_management_server_type: PRISM_CENTRAL + backplane: + vlan_tag: 123 + subnet: + value: 10.0.0.0 + prefix_length: 24 + netmask: + value: 255.255.255.225 + http_proxy_list: + - ip_address: + ipv4: + value: 10.0.0.1 + port: 8080 + username: username_test + password: test1 + name: name_test + proxy_types: + - HTTP + - HTTPS + http_proxy_white_list: + - target_type: IPV4_NETWORK_MASK + target: test + container_name: test-container + dryrun: true + register: result + check_mode: true + ignore_errors: true + +- name: Verify generated spec + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response.name == "cluster1" + - result.response.nodes.node_list[0].controller_vm_ip.ipv4.value == "10.0.0.1" + - result.response.nodes.node_list[0].host_ip.ipv4.value == "10.0.0.2" + - result.response.config.cluster_function[0] == "AOS" + - result.response.config.authorized_public_key_list[0].name == "key1" + - result.response.config.authorized_public_key_list[0].key == "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ6" + - result.response.config.redundancy_factor == 1 + - result.response.config.cluster_arch == "X86_64" + - result.response.config.fault_tolerance_state.domain_awareness_level == "DISK" + - result.response.config.operation_mode == "NORMAL" + - result.response.config.encryption_in_transit_status == "ENABLED" + - result.response.network.external_address.ipv4.value == "10.0.0.3" + - result.response.network.external_data_service_ip.ipv4.value == "10.0.0.4" + - result.response.network.nfs_subnet_whitelist[0] == "vlan.123" + - result.response.network.ntp_server_ip_list[0].ipv4.value == "10.0.0.5" + - result.response.network.ntp_server_ip_list[1].ipv6.value == "2001:db8::1" + - result.response.network.ntp_server_ip_list[2].fqdn.value == "ntp.example.com" + - result.response.network.name_server_ip_list[0].ipv4.value == "10.0.0.6" + - result.response.network.name_server_ip_list[1].ipv6.value == "2001:db8::2" + - result.response.network.name_server_ip_list[2].fqdn.value == "dns.example.com" + - result.response.network.smtp_server.email_address == "testemail@test.com" + - result.response.network.smtp_server.server.ip_address.ipv4.value == "10.0.0.7" + - result.response.network.smtp_server.server.port == 25 + - result.response.network.smtp_server.server.username == "test" + - result.response.network.smtp_server.type == "PLAIN" + - result.response.network.masquerading_ip.ipv4.value == "10.0.0.8" + - result.response.network.management_server.ip.ipv4.value == "10.0.0.9" + - result.response.network.management_server.is_registered == true + - result.response.network.management_server.is_drs_enabled == true + - result.response.network.fqdn == "test.example.com" + - result.response.network.key_management_server_type == "PRISM_CENTRAL" + - result.response.network.backplane.vlan_tag == 123 + - result.response.network.backplane.subnet.value == "10.0.0.0" + - result.response.network.backplane.subnet.prefix_length == 24 + - result.response.network.backplane.netmask.value == "255.255.255.225" + - result.response.network.http_proxy_list[0].ip_address.ipv4.value == "10.0.0.1" + - result.response.network.http_proxy_list[0].port == 8080 + - result.response.network.http_proxy_list[0].username == "username_test" + - result.response.network.http_proxy_list[0].name == "name_test" + - result.response.network.http_proxy_list[0].proxy_types[0] == "HTTP" + - result.response.network.http_proxy_list[0].proxy_types[1] == "HTTPS" + - result.response.network.http_proxy_white_list[0].target_type == "IPV4_NETWORK_MASK" + - result.response.network.http_proxy_white_list[0].target == "test" + - result.response.container_name == "test-container" + fail_msg: Cluster create spec is not generated as expected + success_msg: Cluster create spec is generated as expected + +- name: Generate spec for discovering unconfigured nodes using check mode + ntnx_discover_unconfigured_nodes_v2: + is_manual_discovery: true + address_type: "IPV4" + ip_filter_list: + - ipv4: + value: 192.168.1.30 + prefix_length: 24 + uuid_filter_list: + - "00061de6-4a87-6b06-185b-ac1f6b6f97e2" + interface_filter_list: + - 10061de6-4a87-6b06-185b-ac1f6b6f97e2 + timeout: 50 + register: result + ignore_errors: true + check_mode: true + +- name: Generate spec for discovering unconfigured nodes using check mode status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response.is_manual_discovery == true + - result.response.address_type == "IPV4" + - result.response.ip_filter_list[0].ipv4.value == "192.168.1.30" + - result.response.ip_filter_list[0].ipv4.prefix_length == 24 + - result.response.uuid_filter_list[0] == "00061de6-4a87-6b06-185b-ac1f6b6f97e2" + - result.response.interface_filter_list[0] == "10061de6-4a87-6b06-185b-ac1f6b6f97e2" + - result.response.timeout == 50 + fail_msg: Generate spec for discovering unconfigured nodes using check mode failed + success_msg: Generate spec for discovering unconfigured nodes using check mode passed + +- name: List all clusters to get prism central external ID + ntnx_clusters_info_v2: + filter: "config/clusterFunction/any(t:t eq Clustermgmt.Config.ClusterFunctionRef'PRISM_CENTRAL')" + register: result + ignore_errors: true + +- name: Get prism central external ID + ansible.builtin.set_fact: + prism_central_external_id: "{{ result.response[0].ext_id }}" + +- name: Discover unconfigured node + ntnx_discover_unconfigured_nodes_v2: + address_type: "IPV4" + ip_filter_list: + - ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + register: result + ignore_errors: true + +- name: Discover unconfigured node status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.cluster_ext_id is defined + - result.task_ext_id is defined + - result.response.ext_id is defined + - result.response.response.node_list[0].cvm_ip.ipv4.value == "{{ clusters[0].nodes[0].cvm_ip }}" + fail_msg: Discover unconfigured node failed + success_msg: Discover unconfigured node passed + +- name: Run cluster create prechecks + ntnx_clusters_v2: + name: "{{ clusters[0].name }}" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + config: + cluster_function: "{{ clusters[0].config.cluster_functions }}" + authorized_public_key_list: + - name: "{{ clusters[0].config.auth_public_keys[0].name }}" + key: "{{ clusters[0].config.auth_public_keys[0].key }}" + redundancy_factor: "{{ clusters[0].config.redundancy_factor_cluster_crud }}" + cluster_arch: "{{ clusters[0].config.cluster_arch }}" + fault_tolerance_state: + domain_awareness_level: "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_crud }}" + network: + external_address: + ipv4: + value: "{{ clusters[0].network.virtual_ip }}" + external_data_service_ip: + ipv4: + value: "{{ clusters[0].network.iscsi_ip }}" + ntp_server_ip_list: + - fqdn: + value: "{{ clusters[0].network.ntp_servers[0] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[1] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[2] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[3] }}" + name_server_ip_list: + - ipv4: + value: "{{ clusters[0].network.dns_servers[0] }}" + - ipv4: + value: "{{ clusters[0].network.dns_servers[1] }}" + smtp_server: + email_address: "{{ clusters[0].network.smtp_server.email_address }}" + server: + ip_address: + ipv4: + value: "{{ clusters[0].network.smtp_server.ip }}" + port: "{{ clusters[0].network.smtp_server.port }}" + username: "{{ clusters[0].network.smtp_server.username }}" + password: "{{ clusters[0].network.smtp_server.password }}" + type: "{{ clusters[0].network.smtp_server.type }}" + dryrun: true + timeout: 1800 + register: result + ignore_errors: true + +- name: Verify cluster create prechecks run + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: Cluster create prechecks failed + success_msg: Cluster create prechecks passed + +- name: Create cluster name + ansible.builtin.set_fact: + cluster_name: "{{ prefix_name }}_{{ clusters[0].name }}" + +- name: Check if cluster is unconfigured or not + ansible.builtin.command: "{{ cluster_status_command }}" + register: result + ignore_errors: true + changed_when: result.rc != 0 + +- name: Assert that cluster is unconfigured + ansible.builtin.assert: + that: + - result.rc == 1 + - result.stderr.find('Cluster is currently unconfigured') != -1 + fail_msg: Cannot create cluster, cluster is already created + success_msg: Cluster is unconfigured + +- name: Create cluster with minimum spec + ntnx_clusters_v2: + name: "{{cluster_name}}" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + config: + cluster_function: "{{ clusters[0].config.cluster_functions }}" + redundancy_factor: "{{ clusters[0].config.redundancy_factor_cluster_crud }}" + cluster_arch: "{{ clusters[0].config.cluster_arch }}" + fault_tolerance_state: + domain_awareness_level: "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_crud }}" + timeout: 1800 + register: result + ignore_errors: true + +- name: Verify cluster create task status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: Cluster create failed + success_msg: Cluster create passed + +- name: Reset username and password + ansible.builtin.command: "{{ reset_command }}" + register: result + ignore_errors: true + changed_when: result.rc != 0 + +- name: Generate spec for aos remote cluster PE PC registration using check mode + ntnx_pc_registration_v2: + ext_id: d2f9994f-44fb-4d4c-ad3c-92055316444f + remote_cluster: + aos_remote_cluster: + remote_cluster: + address: + ipv4: + value: 10.87.153.200 + credentials: + authentication: + username: admin + password: password + check_mode: true + register: result + ignore_errors: true + +- name: Generate spec for aos remote cluster PE PC registration using check mode status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "d2f9994f-44fb-4d4c-ad3c-92055316444f" + - result.response.remote_cluster.remote_cluster.address.ipv4.value == "10.87.153.200" + - result.response.remote_cluster.remote_cluster.credentials.authentication.username == "admin" + fail_msg: "Generate spec for aos remote cluster PE PC registration using check mode failed" + success_msg: "Generate spec for aos remote cluster PE PC registration using check mode passed" + +- name: Generate spec for PC and domain manager(PC) remote cluster registration using check mode + ntnx_pc_registration_v2: + ext_id: d2f9994f-44fb-4d4c-ad3c-92055316444f + remote_cluster: + domain_manager_remote_cluster: + remote_cluster: + address: + ipv4: + value: 10.87.142.101 + credentials: + authentication: + username: admin + password: password + cloud_type: ONPREM_CLOUD + check_mode: true + register: result + ignore_errors: true + +- name: Generate spec for PC and domain manager(PC) remote cluster registration using check mode status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "d2f9994f-44fb-4d4c-ad3c-92055316444f" + - result.response.remote_cluster.remote_cluster.address.ipv4.value == "10.87.142.101" + - result.response.remote_cluster.remote_cluster.credentials.authentication.username == "admin" + - result.response.remote_cluster.cloud_type == "ONPREM_CLOUD" + fail_msg: "Generate spec for PC and domain manager(PC) remote cluster registration using check mode failed" + success_msg: "Generate spec for PC and domain manager(PC) remote cluster registration using check mode passed" + +- name: Generate spec for PC registration using cluster reference + ntnx_pc_registration_v2: + ext_id: 3fa85f64-5717-4562-b3fc-2c963f66afa6 + remote_cluster: + cluster_reference: + ext_id: d2f9994f-44fb-4d4c-ad3c-92055316444f + check_mode: true + register: result + ignore_errors: true + +- name: Generate spec for PC registration using cluster reference status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "3fa85f64-5717-4562-b3fc-2c963f66afa6" + - result.response.remote_cluster.ext_id == "d2f9994f-44fb-4d4c-ad3c-92055316444f" + fail_msg: "Generate spec for PC registration using cluster reference failed" + success_msg: "Generate spec for PC registration using cluster reference passed" + +- name: Run PE PC registration + ntnx_pc_registration_v2: + ext_id: "{{ prism_central_external_id }}" + remote_cluster: + aos_remote_cluster: + remote_cluster: + address: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + credentials: + authentication: + username: "{{ username }}" + password: "{{ password }}" + register: result + ignore_errors: true + +- name: Verify PE PC registration + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.ext_id == prism_central_external_id + - result.response.status == 'SUCCEEDED' + - result.task_ext_id is defined + fail_msg: "PE PC registration failed" + success_msg: "PE PC registration passed" + +- name: Sleep for 1 minute + ansible.builtin.pause: + seconds: 60 + +- name: Fetch cluster using name + ntnx_clusters_info_v2: + filter: name eq '{{ cluster_name }}' + register: result + ignore_errors: true + +- name: Verify listing + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length > 0 + fail_msg: Failed verifying PE PC registration + success_msg: PE PC registration passed successfully + +- name: Set cluster external ID + ansible.builtin.set_fact: + cluster_ext_id: "{{ result.response[0].ext_id }}" + +- name: Fetch cluster info using external ID + ntnx_clusters_info_v2: + ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Verify created cluster's details using cluster info + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id == cluster_ext_id + - result.response.name == "{{ cluster_name }}" + - result.response.nodes.node_list[0].controller_vm_ip.ipv4.value == "{{ clusters[0].nodes[0].cvm_ip }}" + - '"{{ clusters[0].config.cluster_functions[0] }}" in "{{result.response.config.cluster_function}}"' + - result.response.config.redundancy_factor == clusters[0].config.redundancy_factor_cluster_crud + - result.response.config.cluster_arch == "{{ clusters[0].config.cluster_arch }}" + - > + result.response.config.fault_tolerance_state.domain_awareness_level == + "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_crud }}" + fail_msg: Failed verifying cluster creation + success_msg: Cluster creation passed successfully + +- name: Destroy the cluster for cleanup + ntnx_clusters_v2: + state: absent + ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Verify cluster deletion + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.status == "SUCCEEDED" + - result.ext_id == cluster_ext_id + fail_msg: Failed verifying cluster deletion + success_msg: Cluster deletion passed successfully + +############################################# Create cluster test 2 ############################################## + +- name: Sleep for 2 minute for pending tasks in node to finish + ansible.builtin.pause: + seconds: 120 + +- name: Check if cluster is unconfigured or not + ansible.builtin.command: "{{ cluster_status_command }}" + register: result + ignore_errors: true + changed_when: result.rc != 0 + +- name: Assert that cluster is unconfigured + ansible.builtin.assert: + that: + - result.rc == 1 + - result.stderr.find('Cluster is currently unconfigured') != -1 + fail_msg: Cannot create cluster, cluster is already created + success_msg: Cluster is unconfigured + +- name: Create cluster with all configuration + ntnx_clusters_v2: + name: "{{ cluster_name }}" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + config: + cluster_function: "{{ clusters[0].config.cluster_functions }}" + authorized_public_key_list: + - name: "{{ clusters[0].config.auth_public_keys[0].name }}" + key: "{{ clusters[0].config.auth_public_keys[0].key }}" + redundancy_factor: "{{ clusters[0].config.redundancy_factor_cluster_crud }}" + cluster_arch: "{{ clusters[0].config.cluster_arch }}" + fault_tolerance_state: + domain_awareness_level: "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_crud }}" + network: + external_address: + ipv4: + value: "{{ clusters[0].network.virtual_ip }}" + ntp_server_ip_list: + - fqdn: + value: "{{ clusters[0].network.ntp_servers[0] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[1] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[2] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[3] }}" + name_server_ip_list: + - ipv4: + value: "{{ clusters[0].network.dns_servers[0] }}" + - ipv4: + value: "{{ clusters[0].network.dns_servers[1] }}" + timeout: 1800 + register: result + ignore_errors: true + +- name: Verify cluster create task status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: Cluster create failed + success_msg: Cluster create passed + +- name: Reset username and password + ansible.builtin.command: "{{ reset_command }}" + register: result + ignore_errors: true + changed_when: result.rc != 0 + +- name: Run PE PC registration + ntnx_pc_registration_v2: + ext_id: "{{ prism_central_external_id }}" + remote_cluster: + aos_remote_cluster: + remote_cluster: + address: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + credentials: + authentication: + username: "{{ username }}" + password: "{{ password }}" + register: result + ignore_errors: true + +- name: Run PE PC registration status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.ext_id == prism_central_external_id + - result.response.status == 'SUCCEEDED' + - result.task_ext_id is defined + fail_msg: "Run PE PC registration failed" + success_msg: "Run PE PC registration passed" + +- name: Sleep for 1 minute + ansible.builtin.pause: + seconds: 60 + +- name: Fetch cluster using name + ntnx_clusters_info_v2: + filter: name eq '{{ cluster_name }}' + register: result + ignore_errors: true + +- name: Verify listing + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length > 0 + fail_msg: Failed verifying PE PC registration + success_msg: PE PC registration passed successfully + +- name: Set cluster external ID + ansible.builtin.set_fact: + cluster_ext_id: "{{ result.response[0].ext_id }}" + +- name: Fetch cluster info using external ID + ntnx_clusters_info_v2: + ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Create list of ntp servers and dns servers fetched from cluster + ansible.builtin.set_fact: + ntp_servers: "{{ result.response.network.ntp_server_ip_list | map(attribute='fqdn.value') | list }}" + dns_servers: "{{ result.response.network.name_server_ip_list | map(attribute='ipv4.value') | list }}" + +- name: Keep only unique ntp servers + ansible.builtin.set_fact: + unique_ntp_servers: "{{ ntp_servers | unique }}" + +- name: Keep only unique dns servers + ansible.builtin.set_fact: + unique_dns_servers: "{{ dns_servers | unique }}" + +- name: Verify created cluster's details using cluster info + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id == cluster_ext_id + - result.response.name == "{{ cluster_name }}" + - result.response.nodes.node_list[0].controller_vm_ip.ipv4.value == "{{ clusters[0].nodes[0].cvm_ip }}" + - '"{{ clusters[0].config.cluster_functions[0] }}" in "{{result.response.config.cluster_function}}"' + - result.response.config.redundancy_factor == clusters[0].config.redundancy_factor_cluster_crud + - result.response.config.cluster_arch == "{{ clusters[0].config.cluster_arch }}" + - > + result.response.config.fault_tolerance_state.domain_awareness_level == + "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_crud }}" + - result.response.network.external_address.ipv4.value == "{{ clusters[0].network.virtual_ip }}" + - unique_ntp_servers | length == 4 + - unique_dns_servers | length == 2 + - unique_ntp_servers[0] in "{{ clusters[0].network.ntp_servers }}" + - unique_ntp_servers[1] in "{{ clusters[0].network.ntp_servers }}" + - unique_ntp_servers[2] in "{{ clusters[0].network.ntp_servers }}" + - unique_ntp_servers[3] in "{{ clusters[0].network.ntp_servers }}" + - unique_dns_servers[0] in "{{ clusters[0].network.dns_servers }}" + - unique_dns_servers[1] in "{{ clusters[0].network.dns_servers }}" + fail_msg: Failed verifying cluster creation + success_msg: Cluster creation passed successfully + +############################################## Update cluster test ############################################## + +- name: Create update spec for cluster using dummy values + check_mode: true + ntnx_clusters_v2: + name: "{{ cluster_name }}_updated" + ext_id: "{{ cluster_ext_id }}" + network: + ntp_server_ip_list: + - fqdn: + value: "{{ clusters[0].network.ntp_servers[1] }}" + name_server_ip_list: + - ipv4: + value: "{{ clusters[0].network.dns_servers[1] }}" + smtp_server: + email_address: test@test.com + server: + ip_address: + ipv4: + value: 10.0.0.1 + port: "25" + username: test + password: test1 + type: STARTTLS + external_address: + ipv4: + value: 10.0.0.2 + external_data_service_ip: + ipv4: + value: 10.0.0.3 + config: + cluster_function: + - AOS + - ONE_NODE + authorized_public_key_list: + - name: key1 + key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ6 + redundancy_factor: 2 + cluster_arch: X86_64 + fault_tolerance_state: + domain_awareness_level: RACK + register: result + ignore_errors: true + +- name: Verify generated update spec + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.ext_id == cluster_ext_id + - result.response.name == "{{ cluster_name }}_updated" + - result.response.network.ntp_server_ip_list | length == 1 + - result.response.network.ntp_server_ip_list[0].fqdn.value == "{{ clusters[0].network.ntp_servers[1] }}" + - result.response.network.name_server_ip_list[0].ipv4.value == "{{ clusters[0].network.dns_servers[1] }}" + - result.response.network.smtp_server.email_address == "test@test.com" + - result.response.network.smtp_server.server.ip_address.ipv4.value == "10.0.0.1" + - result.response.network.smtp_server.server.port == 25 + - result.response.network.smtp_server.server.username == "test" + - result.response.network.smtp_server.type == "STARTTLS" + - result.response.network.external_address.ipv4.value == "10.0.0.2" + - result.response.network.external_data_service_ip.ipv4.value == "10.0.0.3" + - result.response.config.cluster_function[0] == "AOS" + - result.response.config.cluster_function[1] == "ONE_NODE" + - result.response.config.authorized_public_key_list | length == 1 + - result.response.config.authorized_public_key_list[0].name == "key1" + - result.response.config.authorized_public_key_list[0].key == "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ6" + - result.response.config.redundancy_factor == 2 + fail_msg: Cluster update spec is not generated as expected + success_msg: Cluster update spec is generated as expected + +############################################## Update basic cluster config and auth public key ############################################## + +- name: Fetch cluster info using external ID + ntnx_clusters_info_v2: + ext_id: "{{ cluster_ext_id }}" + register: cluster_info + ignore_errors: true + +- name: Verify fetched cluster info + ansible.builtin.assert: + that: + - cluster_info.response is defined + - cluster_info.ext_id == cluster_ext_id + fail_msg: Failed fetching cluster info + success_msg: Fetched cluster info successfully + +- name: Get auth public keys + ansible.builtin.set_fact: + auth_public_keys: "{{ cluster_info.response.config.authorized_public_key_list }}" + +- name: Set new auth public key + ansible.builtin.set_fact: + new_auth_public_key: + key: "{{ clusters[0].config.auth_public_keys[0].key }}" + name: "{{ clusters[0].config.auth_public_keys[0].name }}" + +- name: Add 1 auth public key to list + ansible.builtin.set_fact: + updated_auth_public_keys: "{{ auth_public_keys + [new_auth_public_key] }}" + +- name: Update cluster basic configuration, add auth public keys + ntnx_clusters_v2: + name: "{{ cluster_name }}_updated" + ext_id: "{{ cluster_ext_id }}" + config: + authorized_public_key_list: "{{ updated_auth_public_keys }}" + timeout: 1200 + register: result + ignore_errors: true + +- name: Verify cluster update task status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.task_ext_id is defined + fail_msg: Cluster update failed + success_msg: Cluster update passed + +# there is latency in api data update +- name: Sleep for 1 minute + ansible.builtin.pause: + seconds: 60 + +- name: Fetch cluster info using external ID + ntnx_clusters_info_v2: + ext_id: "{{ cluster_ext_id }}" + register: cluster_info + ignore_errors: true + +- name: Verify fetched cluster info + ansible.builtin.assert: + that: + - cluster_info.response is defined + - cluster_info.ext_id == cluster_ext_id + fail_msg: Failed fetching cluster info + success_msg: Fetched cluster info successfully + +- name: Verify cluster update results from cluster info + ansible.builtin.assert: + that: + - cluster_info.response is defined + - cluster_info.changed == false + - cluster_info.response.name == "{{ cluster_name }}_updated" + - cluster_info.response.config.authorized_public_key_list | length > auth_public_keys | length + - cluster_info.response.config.authorized_public_key_list == updated_auth_public_keys + - cluster_info.response.network.ntp_server_ip_list | length == 4 + - cluster_info.response.network.name_server_ip_list | length == 2 + fail_msg: Cluster update failed + success_msg: Cluster update passed + +############################################# Update cluster ntp servers, dns servers and smtp servers ############################################## + +- name: Update ntp servers, dns servers and smtp servers in cluster + ntnx_clusters_v2: + ext_id: "{{ cluster_ext_id }}" + network: + external_data_service_ip: + ipv4: + value: "{{ clusters[0].network.iscsi_ip1 }}" + ntp_server_ip_list: + - fqdn: + value: "{{ clusters[0].network.ntp_servers[0] }}" + name_server_ip_list: + - ipv4: + value: "{{ clusters[0].network.dns_servers[0] }}" + smtp_server: + email_address: "{{ clusters[0].network.smtp_server.email_address }}" + server: + ip_address: + ipv4: + value: "{{ clusters[0].network.smtp_server.ip }}" + port: "{{ clusters[0].network.smtp_server.port }}" + username: "{{ clusters[0].network.smtp_server.username }}" + password: "{{ clusters[0].network.smtp_server.password }}" + type: STARTTLS + timeout: 1200 + register: result + ignore_errors: true + +- name: Verify cluster update task status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.task_ext_id is defined + fail_msg: Cluster update failed + success_msg: Cluster update passed + +# there is latency in api data update +- name: Sleep for 1 minute + ansible.builtin.pause: + seconds: 60 + +- name: Fetch cluster info using external ID + ntnx_clusters_info_v2: + ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Verify fetched cluster info + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id == cluster_ext_id + - result.response.name == "{{ cluster_name }}_updated" + - result.response.network.external_data_service_ip.ipv4.value == "{{ clusters[0].network.iscsi_ip1 }}" + - result.response.network.ntp_server_ip_list | length == 1 + - result.response.network.ntp_server_ip_list[0].fqdn.value == "{{ clusters[0].network.ntp_servers[0] }}" + - result.response.network.name_server_ip_list | length == 1 + - result.response.network.name_server_ip_list[0].ipv4.value == "{{ clusters[0].network.dns_servers[0] }}" + - result.response.network.smtp_server.email_address == "{{ clusters[0].network.smtp_server.email_address }}" + - result.response.network.smtp_server.server.ip_address.ipv4.value == "{{ clusters[0].network.smtp_server.ip }}" + - result.response.network.smtp_server.server.port == clusters[0].network.smtp_server.port + - result.response.network.smtp_server.server.username == "{{ clusters[0].network.smtp_server.username }}" + - result.response.network.smtp_server.type == "STARTTLS" + - result.response.config.authorized_public_key_list | length == updated_auth_public_keys | length + fail_msg: Cluster updated data verification failed + success_msg: Fetched cluster info and verified updated attribute successfully + +############################################## Check idempotency ############################################## +- name: Verify idempotency + ntnx_clusters_v2: + name: "{{ cluster_name }}_updated" + ext_id: "{{ cluster_ext_id }}" + network: + external_data_service_ip: + ipv4: + value: "{{ clusters[0].network.iscsi_ip1 }}" + ntp_server_ip_list: + - fqdn: + value: "{{ clusters[0].network.ntp_servers[0] }}" + name_server_ip_list: + - ipv4: + value: "{{ clusters[0].network.dns_servers[0] }}" + smtp_server: + email_address: "{{ clusters[0].network.smtp_server.email_address }}" + server: + ip_address: + ipv4: + value: "{{ clusters[0].network.smtp_server.ip }}" + port: "{{ clusters[0].network.smtp_server.port }}" + username: "{{ clusters[0].network.smtp_server.username }}" + type: STARTTLS + + timeout: 600 + register: result + ignore_errors: true + +- name: Verify cluster update task skip status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.ext_id == cluster_ext_id + - result.failed == false + - result.skipped == true + - result.msg == "Nothing to change." + fail_msg: Cluster update did not skip due to no changes + success_msg: Cluster update skipped as expected + +- name: Idempotency checks should get skipped if password is given + ntnx_clusters_v2: + ext_id: "{{ cluster_ext_id }}" + network: + smtp_server: + email_address: "{{ clusters[0].network.smtp_server.email_address }}" + server: + ip_address: + ipv4: + value: "{{ clusters[0].network.smtp_server.ip }}" + password: "{{ clusters[0].network.smtp_server.password }}" + port: "{{ clusters[0].network.smtp_server.port }}" + username: "{{ clusters[0].network.smtp_server.username }}" + type: STARTTLS + + timeout: 600 + register: result + ignore_errors: true + +- name: Verify cluster update task status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + fail_msg: Cluster update skipped instead of running + success_msg: Cluster update ran as expected + +############################################## Hosts & cluster info module tests ############################################## + +- name: Verify listing of clusters + ntnx_clusters_info_v2: + register: result + ignore_errors: true + +- name: Verify listing of clusters + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length >= 2 + fail_msg: Failed listing clusters + success_msg: Listed clusters successfully + +- name: Verify listing clusters using filters + ntnx_clusters_info_v2: + filter: name eq '{{ cluster_name }}_updated' + register: result + ignore_errors: true + +- name: Verify listing clusters using filters + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length == 1 + - result.response[0].name == "{{ cluster_name }}_updated" + - result.response[0].ext_id == cluster_ext_id + fail_msg: Failed listing clusters using filters + success_msg: Listed clusters using filters successfully + +- name: Verify listing clusters using wrong filters + ntnx_clusters_info_v2: + filter: name eq 'wrong_name' + register: result + ignore_errors: true + +- name: Verify listing clusters using wrong filters + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length == 0 + fail_msg: Listing cluster with invalid name returned data + success_msg: Module returned no data for invalid name + +- name: Verify listing clusters using limit + ntnx_clusters_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Verify listing clusters using limit + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length == 1 + fail_msg: Failed listing clusters using limit + success_msg: Listed clusters using limit successfully + +- name: List all hosts + ntnx_hosts_info_v2: + register: result + ignore_errors: true + +- name: Verify listing of hosts + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length >= 2 + fail_msg: Failed listing hosts + success_msg: Listed hosts successfully + +- name: Set host related variables + ansible.builtin.set_fact: + host_name: "{{ result.response[0].host_name }}" + host_ext_id: "{{ result.response[0].ext_id }}" + cluster_uuid: "{{ result.response[0].cluster.uuid }}" + +- name: Verify listing of hosts using filters + ntnx_hosts_info_v2: + filter: hostName eq '{{ host_name }}' + register: result + ignore_errors: true + +- name: Verify listing of hosts using filters + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length == 1 + - result.response[0].host_name == host_name + - result.response[0].ext_id == host_ext_id + fail_msg: Failed listing hosts using filters + success_msg: Listed hosts using filters successfully + +- name: Verify listing of hosts using wrong filters + ntnx_hosts_info_v2: + filter: hostName eq 'wrong_name' + register: result + ignore_errors: true + +- name: Verify listing of hosts using wrong filters + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length == 0 + fail_msg: Listing host with invalid name returned data + success_msg: Module returned no data for invalid name + +- name: Verify listing of hosts using limit + ntnx_hosts_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Verify listing of hosts using limit + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length == 1 + fail_msg: Failed listing hosts using limit + success_msg: Listed hosts using limit successfully + +- name: Verify fetching host using external ID + ntnx_hosts_info_v2: + ext_id: "{{ host_ext_id }}" + cluster_ext_id: "{{ cluster_uuid }}" + register: result + ignore_errors: true + +- name: Verify fetching host using external ID + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.ext_id == host_ext_id + - result.response.host_name == host_name + - result.response.ext_id == host_ext_id + fail_msg: Failed fetching host using external ID + success_msg: Fetched host using external ID successfully + +- name: Fetch hosts of particular cluster + ntnx_hosts_info_v2: + cluster_ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Verify fetching hosts of particular cluster + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response | length == 1 + - result.response[0].cluster.uuid == cluster_ext_id + fail_msg: Failed fetching hosts of particular cluster + success_msg: Fetched hosts of particular cluster successfully + +############################################## Delete cluster test ############################################## + +- name: Destroy the cluster for cleanup + ntnx_clusters_v2: + state: absent + ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Verify cluster deletion + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.status == "SUCCEEDED" + - result.ext_id == cluster_ext_id + fail_msg: Failed verifying cluster deletion + success_msg: Cluster deletion passed successfully diff --git a/tests/integration/targets/ntnx_clusters_and_hosts_v2/tasks/main.yml b/tests/integration/targets/ntnx_clusters_and_hosts_v2/tasks/main.yml new file mode 100644 index 000000000..51f0bcab7 --- /dev/null +++ b/tests/integration/targets/ntnx_clusters_and_hosts_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import clusters crud tests and hosts info tests + ansible.builtin.import_tasks: cluster_crud.yml diff --git a/tests/integration/targets/ntnx_clusters_info/tasks/get_clusters_info.yml b/tests/integration/targets/ntnx_clusters_info/tasks/get_clusters_info.yml index 4f78278c2..d6438aa13 100644 --- a/tests/integration/targets/ntnx_clusters_info/tasks/get_clusters_info.yml +++ b/tests/integration/targets/ntnx_clusters_info/tasks/get_clusters_info.yml @@ -1,14 +1,15 @@ --- -- debug: +- name: Start testing ntnx_clusters_info + ansible.builtin.debug: msg: Start testing ntnx_clusters_info -- name: test getting all clusters +- name: Test getting all clusters ntnx_clusters_info: register: clusters - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - clusters.response is defined - clusters.changed == false @@ -17,15 +18,14 @@ fail_msg: "Unable to list all cluster" success_msg: "clusters listed successfully" - -- name: test getting particular cluster using uuid +- name: Test getting particular cluster using uuid ntnx_clusters_info: - cluster_uuid: '{{ clusters.response.entities[0].metadata.uuid }}' + cluster_uuid: "{{ clusters.response.entities[0].metadata.uuid }}" register: result - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -34,8 +34,6 @@ - result.response.status.state == "COMPLETE" fail_msg: "Unable to get particular cluster" success_msg: "custer info obtained successfully" - - # commenting due to technical issue # - name: List clusters using length, offset, sort order and priority sort attribute # ntnx_clusters_info: @@ -44,10 +42,10 @@ # sort_order: "ASCENDING" # sort_attribute: "name" # register: result -# ignore_errors: True +# ignore_errors: true # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.changed == false diff --git a/tests/integration/targets/ntnx_clusters_info/tasks/main.yml b/tests/integration/targets/ntnx_clusters_info/tasks/main.yml index 9cf0aa472..94ef5e386 100644 --- a/tests/integration/targets/ntnx_clusters_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_clusters_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "get_clusters_info.yml" + - name: Import get_clusters_info.yml + ansible.builtin.import_tasks: "get_clusters_info.yml" diff --git a/tests/integration/targets/ntnx_clusters_nodes_v2/aliases b/tests/integration/targets/ntnx_clusters_nodes_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_clusters_nodes_v2/meta/main.yml b/tests/integration/targets/ntnx_clusters_nodes_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_clusters_nodes_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_clusters_nodes_v2/tasks/add_remove_nodes.yml b/tests/integration/targets/ntnx_clusters_nodes_v2/tasks/add_remove_nodes.yml new file mode 100644 index 000000000..f2a4b8f4b --- /dev/null +++ b/tests/integration/targets/ntnx_clusters_nodes_v2/tasks/add_remove_nodes.yml @@ -0,0 +1,647 @@ +--- +- name: Start ntnx_clusters_nodes_v2 tests + ansible.builtin.debug: + msg: Start ntnx_clusters_nodes_v2 tests + +- name: Set cluster variables + ansible.builtin.set_fact: + node_indices: [0, 1, 2, 3] + prefix_name: ansible_cluster_test + pe_ssh_cmd: sshpass -p '{{ clusters[0].pe_password }}' ssh -o StrictHostKeyChecking=no {{ clusters[0].pe_username }}@{{ clusters[0].nodes[0].cvm_ip }} + reset_username_password: /home/nutanix/prism/cli/ncli user reset-password user-name={{ username }} password={{ password }} + +- name: Set cluster name and reset command + ansible.builtin.set_fact: + cluster_name: "{{ prefix_name }}_{{ clusters[0].name }}" + reset_command: '{{ pe_ssh_cmd }} "{{ reset_username_password }}"' + +- name: Generate spec for fetching network info for an unconfigured node using check mode + ntnx_nodes_network_info_v2: + cluster_ext_id: "000625a6-246f-7d5d-0000-00000001381e" + node_list: + - node_uuid: "00061de6-4a87-6b06-185b-ac1f6b6f97e2" + block_id: "17SM6B010052" + node_position: "A" + cvm_ip: + ipv4: + value: "10.0.0.0" + prefix_length: 24 + ipv6: + value: "0001:db8:3333:4444:5555:6666:7777:8888" + prefix_length: 64 + hypervisor_ip: + ipv4: + value: "10.0.1.0" + prefix_length: 24 + ipv6: + value: "0001:db7:3333:4444:5555:6666:7777:8888" + prefix_length: 64 + ipmi_ip: + ipv4: + value: "10.1.1.1" + prefix_length: 24 + ipv6: + value: "0001:db6:3333:4444:5555:6666:7777:8888" + prefix_length: 64 + digital_certificate_map_list: + - key: "key1" + value: "value1" + - key: "key2" + value: "value2" + model: "model1" + is_compute_only: true + is_light_compute: true + hypervisor_type: "AHV" + hypervisor_version: "version1" + nos_version: "nos_version1" + current_network_interface: "interface1" + is_robo_mixed_hypervisor: true + request_type: "expand_cluster" + register: result + ignore_errors: true + check_mode: true + +- name: Generate spec for fetching network info for an unconfigured node using check mode status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.cluster_ext_id == "000625a6-246f-7d5d-0000-00000001381e" + - result.response.node_list[0].node_uuid == "00061de6-4a87-6b06-185b-ac1f6b6f97e2" + - result.response.node_list[0].block_id == "17SM6B010052" + - result.response.node_list[0].node_position == "A" + - result.response.node_list[0].cvm_ip.ipv4.value == "10.0.0.0" + - result.response.node_list[0].cvm_ip.ipv4.prefix_length == 24 + - result.response.node_list[0].cvm_ip.ipv6.value == "0001:db8:3333:4444:5555:6666:7777:8888" + - result.response.node_list[0].cvm_ip.ipv6.prefix_length == 64 + - result.response.node_list[0].hypervisor_ip.ipv4.value == "10.0.1.0" + - result.response.node_list[0].hypervisor_ip.ipv4.prefix_length == 24 + - result.response.node_list[0].hypervisor_ip.ipv6.value == "0001:db7:3333:4444:5555:6666:7777:8888" + - result.response.node_list[0].hypervisor_ip.ipv6.prefix_length == 64 + - result.response.node_list[0].ipmi_ip.ipv4.value == "10.1.1.1" + - result.response.node_list[0].ipmi_ip.ipv4.prefix_length == 24 + - result.response.node_list[0].ipmi_ip.ipv6.value == "0001:db6:3333:4444:5555:6666:7777:8888" + - result.response.node_list[0].ipmi_ip.ipv6.prefix_length == 64 + - result.response.node_list[0].digital_certificate_map_list[0].key == "key1" + - result.response.node_list[0].digital_certificate_map_list[0].value == "value1" + - result.response.node_list[0].digital_certificate_map_list[1].key == "key2" + - result.response.node_list[0].digital_certificate_map_list[1].value == "value2" + - result.response.node_list[0].model == "model1" + - result.response.node_list[0].is_compute_only == true + - result.response.node_list[0].is_light_compute == true + - result.response.node_list[0].hypervisor_type == "AHV" + - result.response.node_list[0].hypervisor_version == "version1" + - result.response.node_list[0].nos_version == "nos_version1" + - result.response.node_list[0].current_network_interface == "interface1" + - result.response.node_list[0].is_robo_mixed_hypervisor == true + - result.response.request_type == "expand_cluster" + fail_msg: "Generate spec for fetching network info for an unconfigured node using check mode failed" + success_msg: "Generate spec for fetching network info for an unconfigured node using check mode passed" + +- name: Generate spec for discovering unconfigured nodes using check mode + ntnx_discover_unconfigured_nodes_v2: + address_type: "IPV4" + ip_filter_list: + - ipv4: + value: "10.0.0.1" + - ipv4: + value: "10.0.0.5" + register: result + ignore_errors: true + check_mode: true + +- name: Generate spec for discovering unconfigured nodes using check mode status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response.ip_filter_list[0].ipv4.value == "10.0.0.1" + - result.response.ip_filter_list[1].ipv4.value == "10.0.0.5" + fail_msg: "Generate spec for discovering unconfigured nodes using check mode failed" + success_msg: "Generate spec for discovering unconfigured nodes using check mode passed" + +- name: Generate spec for expanding cluster using check mode + ntnx_clusters_nodes_v2: + node_params: + node_list: + - node_uuid: "6750557f-673b-462a-9b58-3acd29e15eaa" + block_id: block-id + node_position: node-position + hypervisor_type: AHV + is_robo_mixed_hypervisor: false + hypervisor_hostname: hypervisor-hostname + hypervisor_version: hypervisor-version + nos_version: nos-version + is_light_compute: false + ipmi_ip: + ipv4: + value: 192.168.1.10 + prefix_length: 24 + digital_certificate_map_list: + - key: certificate-key + value: certificate-value + cvm_ip: + ipv4: + value: 192.168.1.10 + prefix_length: 24 + hypervisor_ip: + ipv4: + value: 192.168.1.10 + prefix_length: 24 + model: node-model + current_network_interface: network-interface + networks: + - name: network-name + networks: network1 + uplinks: + active: + - mac: "0c:c4:7a:9a:40:e8" + name: "name" + value: "value" + config_params: + should_skip_discovery: false + should_skip_imaging: false + should_validate_rack_awareness: false + is_nos_compatible: false + is_compute_only: false + is_never_scheduleable: false + target_hypervisor: AHV + hyperv: + domain_details: + username: domain-username + password: domain-password + cluster_name: domain-cluster + failover_cluster_details: + username: failover-username + password: failover-password + cluster_name: failover-cluster + cluster_ext_id: "000625a6-246f-7d5d-0000-00000001381e" + register: result + ignore_errors: true + check_mode: true + +- name: Generate spec for expanding cluster using check mode status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response.node_params.node_list[0].node_uuid == "6750557f-673b-462a-9b58-3acd29e15eaa" + - result.response.node_params.node_list[0].block_id == "block-id" + - result.response.node_params.node_list[0].node_position == "node-position" + - result.response.node_params.node_list[0].hypervisor_type == "AHV" + - result.response.node_params.node_list[0].is_robo_mixed_hypervisor == false + - result.response.node_params.node_list[0].hypervisor_hostname == "hypervisor-hostname" + - result.response.node_params.node_list[0].hypervisor_version == "hypervisor-version" + - result.response.node_params.node_list[0].nos_version == "nos-version" + - result.response.node_params.node_list[0].is_light_compute == false + - result.response.node_params.node_list[0].ipmi_ip.ipv4.value == "192.168.1.10" + - result.response.node_params.node_list[0].ipmi_ip.ipv4.prefix_length == 24 + - result.response.node_params.node_list[0].digital_certificate_map_list[0].key == "certificate-key" + - result.response.node_params.node_list[0].digital_certificate_map_list[0].value == "certificate-value" + - result.response.node_params.node_list[0].cvm_ip.ipv4.value == "192.168.1.10" + - result.response.node_params.node_list[0].cvm_ip.ipv4.prefix_length == 24 + - result.response.node_params.node_list[0].hypervisor_ip.ipv4.value == "192.168.1.10" + - result.response.node_params.node_list[0].hypervisor_ip.ipv4.prefix_length == 24 + - result.response.node_params.node_list[0].model == "node-model" + - result.response.node_params.node_list[0].current_network_interface == "network-interface" + - result.response.node_params.node_list[0].networks[0].name == "network-name" + - result.response.node_params.node_list[0].networks[0].networks[0] == "network1" + - result.response.node_params.node_list[0].networks[0].uplinks.active[0].mac == "0c:c4:7a:9a:40:e8" + - result.response.node_params.node_list[0].networks[0].uplinks.active[0].name == "name" + - result.response.node_params.node_list[0].networks[0].uplinks.active[0].value == "value" + - result.response.config_params.should_skip_discovery == false + - result.response.config_params.should_skip_imaging == false + - result.response.config_params.should_validate_rack_awareness == false + - result.response.config_params.is_nos_compatible == false + - result.response.config_params.is_compute_only == false + - result.response.config_params.is_never_scheduleable == false + - result.response.config_params.target_hypervisor == "AHV" + - result.response.config_params.hyperv.domain_details.username == "domain-username" + - result.response.config_params.hyperv.domain_details.password == "domain-password" + - result.response.config_params.hyperv.domain_details.cluster_name == "domain-cluster" + - result.response.config_params.hyperv.failover_cluster_details.username == "failover-username" + - result.response.config_params.hyperv.failover_cluster_details.password == "failover-password" + - result.response.config_params.hyperv.failover_cluster_details.cluster_name == "failover-cluster" + - result.cluster_ext_id == "000625a6-246f-7d5d-0000-00000001381e" + fail_msg: Generate spec for expanding cluster using check mode failed + success_msg: Generate spec for expanding cluster using check mode passed + +- name: Generate spec for removing node from cluster using check mode + ntnx_clusters_nodes_v2: + state: absent + cluster_ext_id: "000625a6-246f-7d5d-0000-00000001381e" + should_skip_prechecks: true + should_skip_remove: true + node_uuids: + - "00061de6-4a87-6b06-185b-ac1f6b6f97e2" + extra_params: + should_skip_upgrade_check: true + should_skip_space_check: true + should_skip_add_check: true + register: result + ignore_errors: true + check_mode: true + +- name: Generate spec for removing node from cluster using check mode status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.cluster_ext_id == "000625a6-246f-7d5d-0000-00000001381e" + - result.response.node_uuids[0] == "00061de6-4a87-6b06-185b-ac1f6b6f97e2" + - result.response.should_skip_prechecks == true + - result.response.should_skip_remove == true + - result.response.extra_params.should_skip_upgrade_check == true + - result.response.extra_params.should_skip_space_check == true + - result.response.extra_params.should_skip_add_check == true + fail_msg: Generate spec for removing node from cluster using check mode failed + success_msg: Generate spec for removing node from cluster using check mode passed + +- name: Check if each cluster node is unconfigured + ansible.builtin.command: + 'sshpass -p "{{ clusters[0].pe_password }}" ssh -o StrictHostKeyChecking=no \ + {{ clusters[0].pe_username }}@{{ clusters[0].nodes[item].cvm_ip }} "/usr/local/nutanix/cluster/bin/cluster status"' + loop: "{{ node_indices }}" + register: results + ignore_errors: true + changed_when: results.rc != 0 + no_log: true + +# verify that all nodes are unconfigured by checking the return code and stderr in all of them +- name: Assert that all cluster nodes are unconfigured + ansible.builtin.assert: + that: + - results.results | map(attribute='rc') | select('equalto', 1) | list | length == 4 + - results.results | map(attribute='stderr') | select('search', 'Cluster is currently unconfigured') | list | length == 4 + fail_msg: Cannot create cluster, one or more nodes are already configured + success_msg: All cluster nodes are unconfigured + +- name: Discover unconfigured nodes + ntnx_discover_unconfigured_nodes_v2: + address_type: "IPV4" + ip_filter_list: + - ipv4: + value: "{{ clusters[0].nodes[item].cvm_ip }}" + loop: "{{ node_indices }}" + register: results + ignore_errors: true + +- name: Assert discover unconfigured nodes status for each node + ansible.builtin.assert: + that: + - item.changed == false + - item.failed == false + - item.response.ext_id is defined + - item.response.response.node_list.0.cvm_ip.ipv4.value | string in "{{ clusters[0].nodes }}" + - results | map(attribute='response.node_list.0.cvm_ip.ipv4.value') | list | length == clusters[0].nodes | length + fail_msg: "Discover unconfigured nodes failed" + success_msg: "Discover unconfigured nodes passed" + loop: "{{ results.results }}" + no_log: true + +- name: Run cluster create prechecks + ntnx_clusters_v2: + name: "{{ clusters[0].name }}" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[1].cvm_ip }}" + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[2].cvm_ip }}" + config: + cluster_function: "{{ clusters[0].config.cluster_functions }}" + authorized_public_key_list: + - name: "{{ clusters[0].config.auth_public_keys[0].name }}" + key: "{{ clusters[0].config.auth_public_keys[0].key }}" + redundancy_factor: "{{ clusters[0].config.redundancy_factor_cluster_expanding }}" + cluster_arch: "{{ clusters[0].config.cluster_arch }}" + fault_tolerance_state: + domain_awareness_level: "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_expanding }}" + network: + external_address: + ipv4: + value: "{{ clusters[0].network.virtual_ip }}" + external_data_service_ip: + ipv4: + value: "{{ clusters[0].network.iscsi_ip }}" + ntp_server_ip_list: + - fqdn: + value: "{{ clusters[0].network.ntp_servers[0] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[1] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[2] }}" + - fqdn: + value: "{{ clusters[0].network.ntp_servers[3] }}" + name_server_ip_list: + - ipv4: + value: "{{ clusters[0].network.dns_servers[0] }}" + - ipv4: + value: "{{ clusters[0].network.dns_servers[1] }}" + smtp_server: + email_address: "{{ clusters[0].network.smtp_server.email_address }}" + server: + ip_address: + ipv4: + value: "{{ clusters[0].network.smtp_server.ip }}" + port: "{{ clusters[0].network.smtp_server.port }}" + username: "{{ clusters[0].network.smtp_server.username }}" + password: "{{ clusters[0].network.smtp_server.password }}" + type: "{{ clusters[0].network.smtp_server.type }}" + dryrun: true + timeout: 1800 + register: result + ignore_errors: true + +- name: Verify cluster create prechecks run + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: Cluster create prechecks failed + success_msg: Cluster create prechecks passed + +- name: Create 3 nodes cluster with minimum spec + ntnx_clusters_v2: + name: "{{cluster_name}}" + nodes: + node_list: + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[1].cvm_ip }}" + - controller_vm_ip: + ipv4: + value: "{{ clusters[0].nodes[2].cvm_ip }}" + config: + cluster_function: "{{ clusters[0].config.cluster_functions }}" + redundancy_factor: "{{ clusters[0].config.redundancy_factor_cluster_expanding }}" + cluster_arch: "{{ clusters[0].config.cluster_arch }}" + fault_tolerance_state: + domain_awareness_level: "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_expanding }}" + timeout: 1800 + register: result + ignore_errors: true + +- name: Verify cluster create task status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: Cluster create failed + success_msg: Cluster create passed + +- name: Reset username and password + ansible.builtin.command: "{{ reset_command }}" + register: result + ignore_errors: true + changed_when: result.rc != 0 + +- name: List all clusters to get prism central external ID + ntnx_clusters_info_v2: + filter: "config/clusterFunction/any(t:t eq Clustermgmt.Config.ClusterFunctionRef'PRISM_CENTRAL')" + register: result + ignore_errors: true + +- name: Get prism central external ID + ansible.builtin.set_fact: + prism_central_external_id: "{{ result.response[0].ext_id }}" + +- name: Run PE PC registration + ntnx_pc_registration_v2: + ext_id: "{{ prism_central_external_id }}" + remote_cluster: + aos_remote_cluster: + remote_cluster: + address: + ipv4: + value: "{{ clusters[0].nodes[0].cvm_ip }}" + credentials: + authentication: + username: "{{ username }}" + password: "{{ password }}" + register: result + ignore_errors: true + +- name: Run PE PC registration status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.ext_id == prism_central_external_id + - result.response.status == 'SUCCEEDED' + - result.task_ext_id is defined + fail_msg: "Run PE PC registration failed" + success_msg: "Run PE PC registration passed" + +- name: Sleep for 1 minute + ansible.builtin.pause: + seconds: 60 + +- name: Fetch cluster using name + ntnx_clusters_info_v2: + filter: name eq '{{ cluster_name }}' + register: result + ignore_errors: true + +- name: Verify listing + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length > 0 + fail_msg: Failed vefifying PE PC registration + success_msg: PE PC registration passed successfully + +- name: Set cluster external ID + ansible.builtin.set_fact: + cluster_ext_id: "{{ result.response[0].ext_id }}" + +- name: Fetch cluster info using external ID + ntnx_clusters_info_v2: + ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Verify created cluster's details using cluster info + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id == cluster_ext_id + - result.response.name == "{{ cluster_name }}" + - result.response.nodes.node_list[0].controller_vm_ip.ipv4.value == "{{ clusters[0].nodes[0].cvm_ip }}" + - result.response.nodes.node_list[1].controller_vm_ip.ipv4.value == "{{ clusters[0].nodes[1].cvm_ip }}" + - result.response.nodes.node_list[2].controller_vm_ip.ipv4.value == "{{ clusters[0].nodes[2].cvm_ip }}" + - result.response.config.cluster_function[0] == "{{ clusters[0].config.cluster_functions[0] }}" + - result.response.config.redundancy_factor == clusters[0].config.redundancy_factor_cluster_expanding + - result.response.config.cluster_arch == "{{ clusters[0].config.cluster_arch }}" + - > + result.response.config.fault_tolerance_state.domain_awareness_level == + "{{ clusters[0].config.fault_tolerance_state.domain_awareness_level_cluster_expanding }}" + fail_msg: Failed verifying cluster creation + success_msg: Cluster creation passed successfully + +- name: Discover unconfigured node + ntnx_discover_unconfigured_nodes_v2: + cluster_ext_id: "{{ cluster_ext_id }}" + address_type: "IPV4" + ip_filter_list: + - ipv4: + value: "{{ clusters[0].nodes[3].cvm_ip }}" + register: result_discover + ignore_errors: true + +- name: Assert discover unconfigured node status + ansible.builtin.assert: + that: + - result_discover.changed == false + - result_discover.failed == false + - result_discover.cluster_ext_id == cluster_ext_id + - result_discover.response.ext_id is defined + - result_discover.response.response.node_list.0.cvm_ip.ipv4.value == "{{ clusters[0].nodes[3].cvm_ip }}" + fail_msg: "Discover unconfigured node failed" + success_msg: "Discover unconfigured node passed" + +- name: Get network info of unconfigured node + ntnx_nodes_network_info_v2: + cluster_ext_id: "{{ cluster_ext_id }}" + node_list: + - cvm_ip: + ipv4: + value: "{{ result_discover.response.response.node_list[0].cvm_ip.ipv4.value }}" + hypervisor_ip: + ipv4: + value: "{{ result_discover.response.response.node_list[0].hypervisor_ip.ipv4.value }}" + request_type: "expand_cluster" + register: result_network_info + ignore_errors: true + +- name: Get network info of unconfigured node status + ansible.builtin.assert: + that: + - result_network_info.changed == false + - result_network_info.failed == false + - result_network_info.response.ext_id is defined + - result_network_info.response.response.network_info is defined + - result_network_info.response.response.uplinks is defined + - result_network_info.response.response.uplinks[0].cvm_ip.ipv4.value == "{{ result_discover.response.response.node_list[0].cvm_ip.ipv4.value }}" + - result_network_info.response.task_response_type == "NETWORKING_DETAILS" + fail_msg: "Get network info of unconfigured node failed" + success_msg: "Get network info of unconfigured node passed" + +- name: Expand cluster by adding a node + ntnx_clusters_nodes_v2: + cluster_ext_id: "{{ cluster_ext_id }}" + node_params: + node_list: + - node_uuid: "{{ result_discover.response.response.node_list[0].node_uuid }}" + block_id: "{{ result_discover.response.response.node_list[0].rackable_unit_serial }}" + node_position: "{{ result_discover.response.response.node_list[0].node_position }}" + nos_version: "{{ result_discover.response.response.node_list[0].nos_version }}" + hypervisor_type: "{{ result_discover.response.response.node_list[0].hypervisor_type }}" + hypervisor_version: "{{ result_discover.response.response.node_list[0].hypervisor_version }}" + is_light_compute: false + is_robo_mixed_hypervisor: true + hypervisor_hostname: test + model: "{{ result_discover.response.response.node_list[0].rackable_unit_model }}" + cvm_ip: + ipv4: + value: "{{ result_discover.response.response.node_list[0].cvm_ip.ipv4.value }}" + prefix_length: "{{ result_discover.response.response.node_list[0].cvm_ip.ipv4.prefix_length | int }}" + hypervisor_ip: + ipv4: + value: "{{ result_discover.response.response.node_list[0].hypervisor_ip.ipv4.value }}" + prefix_length: "{{ result_discover.response.response.node_list[0].hypervisor_ip.ipv4.prefix_length | int }}" + ipmi_ip: + ipv4: + value: "{{ result_discover.response.response.node_list[0].ipmi_ip.ipv4.value }}" + prefix_length: "{{ result_discover.response.response.node_list[0].ipmi_ip.ipv4.prefix_length | int }}" + current_network_interface: "{{ result_network_info.response.response.uplinks[0].uplink_list[0].name }}" + networks: + - name: "{{ result_network_info.response.response.network_info.hci[0].name }}" + networks: "{{ result_network_info.response.response.network_info.hci[0].networks }}" + uplinks: + active: + - mac: "{{ result_network_info.response.response.uplinks[0].uplink_list[0].mac }}" + name: "{{ result_network_info.response.response.uplinks[0].uplink_list[0].name }}" + value: "{{ result_network_info.response.response.uplinks[0].uplink_list[0].name }}" + standby: + - mac: "{{ result_network_info.response.response.uplinks[0].uplink_list[1].mac }}" + name: "{{ result_network_info.response.response.uplinks[0].uplink_list[1].name }}" + value: "{{ result_network_info.response.response.uplinks[0].uplink_list[1].name }}" + config_params: + should_skip_discovery: false + should_skip_imaging: true + should_validate_rack_awareness: false + is_nos_compatible: false + is_compute_only: false + is_never_scheduleable: false + target_hypervisor: "{{ result_discover.response.response.node_list[0].hypervisor_type }}" + should_skip_add_node: false + should_skip_pre_expand_checks: false + register: result_expand + ignore_errors: true + +- name: Expand cluster by adding a node status + ansible.builtin.assert: + that: + - result_expand.changed == true + - result_expand.failed == false + - result_expand.response.ext_id is defined + - result_expand.response.status == "SUCCEEDED" + - result_expand.task_ext_id is defined + fail_msg: "Expand cluster failed" + success_msg: "Expand cluster passed" + +- name: Sleep for 5 minutes + ansible.builtin.pause: + seconds: 300 + +- name: Remove node from cluster + ntnx_clusters_nodes_v2: + state: absent + cluster_ext_id: "{{ cluster_ext_id }}" + node_uuids: + - "{{ result_discover.response.response.node_list[0].node_uuid }}" + register: result + ignore_errors: true + +- name: Remove node from cluster status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: "Remove node from cluster failed" + success_msg: "Remove node from cluster passed" + +- name: Sleep for 1 minute + ansible.builtin.pause: + seconds: 60 + +- name: Destroy the cluster for cleanup + ntnx_clusters_v2: + state: absent + ext_id: "{{ cluster_ext_id }}" + register: result + ignore_errors: true + +- name: Verify cluster deletion + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.status == "SUCCEEDED" + - result.ext_id == cluster_ext_id + fail_msg: Failed verifying cluster deletion + success_msg: Cluster deletion passed successfully diff --git a/tests/integration/targets/ntnx_clusters_nodes_v2/tasks/main.yml b/tests/integration/targets/ntnx_clusters_nodes_v2/tasks/main.yml new file mode 100644 index 000000000..505f03788 --- /dev/null +++ b/tests/integration/targets/ntnx_clusters_nodes_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import add_remove_nodes.yml + ansible.builtin.import_tasks: add_remove_nodes.yml diff --git a/tests/integration/targets/ntnx_directory_services_v2/aliases b/tests/integration/targets/ntnx_directory_services_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_directory_services_v2/meta/main.yml b/tests/integration/targets/ntnx_directory_services_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_directory_services_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_directory_services_v2/tasks/all_operations.yml b/tests/integration/targets/ntnx_directory_services_v2/tasks/all_operations.yml new file mode 100644 index 000000000..e6c93d8e7 --- /dev/null +++ b/tests/integration/targets/ntnx_directory_services_v2/tasks/all_operations.yml @@ -0,0 +1,391 @@ +--- +- name: Start ntnx_directory_services_v2 tests + ansible.builtin.debug: + msg: start ntnx_directory_services_v2 tests + +- name: Generate random category key & value + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set variables + ansible.builtin.set_fact: + directory_service: "{{ random_name }}ansible-ag" + todelete: "" + +- name: Create ACTIVE_DIRECTORY service with check_mode + ntnx_directory_services_v2: + state: present + name: "{{ directory_service }}" + url: "{{ active_directory.url }}" + directory_type: ACTIVE_DIRECTORY + domain_name: "{{ active_directory.domain_name }}" + service_account: + username: "{{ active_directory.username }}" + password: "{{ active_directory.password }}" + register: result + ignore_errors: true + check_mode: true + +- name: Status of ACTIVE_DIRECTORY service creation in check mode + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.name == "{{ directory_service }}" + - result.response.domain_name == "{{ active_directory.domain_name }}" + - result.response.directory_type == "ACTIVE_DIRECTORY" + - result.response.url == "{{ active_directory.url }}" + - result.response.service_account.username == "{{ active_directory.username }}" + - result.response.service_account.password is defined + fail_msg: Unable to create ACTIVE_DIRECTORY service in check mode + success_msg: ACTIVE_DIRECTORY service created successfully in check mode + +######################################################################## + +- name: Create OpenLDAP service with check_mode + ntnx_directory_services_v2: + state: present + name: "{{ directory_service }}_name" + url: "{{ directory_service }}_url" + directory_type: OPEN_LDAP + domain_name: "{{ directory_service }}_domain_name" + service_account: + username: "{{ directory_service }}_username" + password: "{{ directory_service }}_password" + open_ldap_configuration: + user_configuration: + user_object_class: inetOrgPerson + user_search_base: ou=users,dc=example,dc=com + username_attribute: uid + user_group_configuration: + group_object_class: groupOfNames + group_search_base: ou=groups,dc=example,dc=com + group_member_attribute: member + group_member_attribute_value: uid + register: result + ignore_errors: true + check_mode: true + +- name: Create OpenLDAP service with check_mode status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.name == "{{ directory_service }}_name" + - result.response.domain_name == "{{ directory_service }}_domain_name" + - result.response.directory_type == "OPEN_LDAP" + - result.response.url == "{{ directory_service }}_url" + - result.response.service_account.username == "{{ directory_service }}_username" + - result.response.service_account.password is defined + - result.response.open_ldap_configuration.user_configuration.user_object_class == "inetOrgPerson" + - result.response.open_ldap_configuration.user_configuration.user_search_base == "ou=users,dc=example,dc=com" + - result.response.open_ldap_configuration.user_configuration.username_attribute == "uid" + - result.response.open_ldap_configuration.user_group_configuration.group_object_class == "groupOfNames" + - result.response.open_ldap_configuration.user_group_configuration.group_search_base == "ou=groups,dc=example,dc=com" + - result.response.open_ldap_configuration.user_group_configuration.group_member_attribute == "member" + - result.response.open_ldap_configuration.user_group_configuration.group_member_attribute_value == "uid" + fail_msg: Unable to create OPEN_LDAP service in check mode + success_msg: OPEN_LDAP service created successfully in check mode + +######################################################################## + +- name: Create ACTIVE_DIRECTORY service + ntnx_directory_services_v2: + state: present + name: "{{ directory_service }}" + url: "{{ active_directory.url }}" + directory_type: ACTIVE_DIRECTORY + domain_name: "{{ active_directory.domain_name }}" + service_account: + username: "{{ active_directory.username }}" + password: "{{ active_directory.password }}" + white_listed_groups: + - "{{ white_listed_groups[0] }}" + register: result + ignore_errors: true + +- name: Status of ACTIVE_DIRECTORY service creation + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == False + - result.response.name == "{{ directory_service }}" + - result.response.directory_type == "ACTIVE_DIRECTORY" + - result.response.url == "{{ active_directory.url }}" + - result.response.domain_name == "{{ active_directory.domain_name }}" + - result.response.service_account.username == "{{ active_directory.username }}" + - result.response.service_account.password is defined + - result.response.ext_id is defined + - result.ext_id is defined + - result.response.ext_id == result.ext_id + fail_msg: "Unable to create ACTIVE_DIRECTORY service " + success_msg: "ACTIVE_DIRECTORY service created successfully " + +- name: Adding directory_service external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ result.response.ext_id }}" + +######################################################################## + +- name: Create directory service that already exists + ntnx_directory_services_v2: + state: present + name: "{{ directory_service }}" + url: "{{ active_directory.url }}" + directory_type: ACTIVE_DIRECTORY + domain_name: "{{ active_directory.domain_name }}" + service_account: + username: "{{ active_directory.username }}" + password: "{{ active_directory.password }}" + white_listed_groups: + - "{{ white_listed_groups[1] }}" + register: result + ignore_errors: true + +- name: Status of ACTIVE_DIRECTORY service creation + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == True + - result.response.data.error | length > 0 + - result.status == 409 + fail_msg: "Unable to create ACTIVE_DIRECTORY service " + success_msg: "ACTIVE_DIRECTORY service created successfully " + +######################################################################## + +- name: Update directory service with all attributes + ntnx_directory_services_v2: + state: present + ext_id: "{{ todelete }}" + name: "{{ directory_service }}" + url: "{{ active_directory.url }}" + directory_type: ACTIVE_DIRECTORY + domain_name: "{{ active_directory.domain_name }}" + service_account: + username: "{{ active_directory.username }}" + password: "{{ active_directory.password }}" + group_search_type: RECURSIVE + white_listed_groups: + - "{{ white_listed_groups[1] }}" + register: result + ignore_errors: true + +- name: Status of updating ACTIVE_DIRECTORY service with all attributes + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == False + - result.ext_id == "{{ todelete }}" + - result.response.name == "{{ directory_service }}" + - result.response.directory_type == "ACTIVE_DIRECTORY" + - result.response.url == "{{ active_directory.url }}" + - result.response.service_account.username == "{{ active_directory.username }}" + - result.response.service_account.password is defined + - result.response.group_search_type == "RECURSIVE" + - result.response.white_listed_groups[0] == "{{ white_listed_groups[1] }}" + fail_msg: Unable to update ACTIVE_DIRECTORY service with all attributes + success_msg: "ACTIVE_DIRECTORY service with all attributes is updated successfully " + +######################################################################## + +- name: Update group search type and white listed groups in directory service + ntnx_directory_services_v2: + state: present + ext_id: "{{ todelete }}" + service_account: + username: "{{ active_directory.username }}" + password: "{{ active_directory.password }}" + group_search_type: NON_RECURSIVE + white_listed_groups: + - "{{ white_listed_groups[0] }}" + register: result + ignore_errors: true + +- name: Update group search type and white listed groups in directory service status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == False + - result.ext_id == "{{ todelete }}" + - result.response.group_search_type == "NON_RECURSIVE" + - result.response.white_listed_groups[0] == "{{ white_listed_groups[0] }}" + fail_msg: "update group search type and white listed groups in directory service failed " + success_msg: "update group search type and white listed groups in directory service passed " + +######################################################################## + +- name: Verify if idempotency checks are skipped if password is provided + ntnx_directory_services_v2: + state: present + ext_id: "{{ todelete }}" + name: "{{ directory_service }}" + url: "{{ active_directory.url }}" + directory_type: ACTIVE_DIRECTORY + domain_name: "{{ active_directory.domain_name }}" + service_account: + username: "{{ active_directory.username }}" + password: "{{ active_directory.password }}" + group_search_type: NON_RECURSIVE + white_listed_groups: + - "{{ white_listed_groups[1] }}" + register: result + ignore_errors: true + +- name: Test idempotency status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.name == "{{ directory_service }}" + - result.response.directory_type == "ACTIVE_DIRECTORY" + - result.response.url == "{{ active_directory.url }}" + - result.response.service_account.username == "{{ active_directory.username }}" + - result.response.service_account.password is defined + - result.response.group_search_type == "NON_RECURSIVE" + - result.response.white_listed_groups[0] == "{{ white_listed_groups[1] }}" + fail_msg: "Verify if idempotency checks are skipped if password is provided failed " + success_msg: "Verify if idempotency checks are skipped if password is provided passed " + +######################################################################## + +- name: Verify if module is idempotent if same config is provided + ntnx_directory_services_v2: + state: present + ext_id: "{{ todelete }}" + name: "{{ directory_service }}" + url: "{{ active_directory.url }}" + directory_type: ACTIVE_DIRECTORY + domain_name: "{{ active_directory.domain_name }}" + service_account: + username: "{{ active_directory.username }}" + group_search_type: NON_RECURSIVE + white_listed_groups: + - "{{ white_listed_groups[1] }}" + register: result + ignore_errors: true + +- name: Test idempotency status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.ext_id == "{{ todelete }}" + - result.msg == "Nothing to change." + fail_msg: "Verify if module is idempotent if same config is provided failed " + success_msg: "Verify if module is idempotent if same config is provided passed " + +######################################################################## + +- name: Fetch directory service info using ext id + ntnx_directory_services_info_v2: + ext_id: "{{ todelete }}" + register: result + ignore_errors: true + +- name: Fetch directory service info using ext id status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.ext_id == "{{ todelete }}" + - result.response.name == "{{ directory_service }}" + - result.response.directory_type == "ACTIVE_DIRECTORY" + - result.response.url == "{{ active_directory.url }}" + - result.response.domain_name == "{{ active_directory.domain_name }}" + - result.response.service_account.username == "{{ active_directory.username }}" + - result.response.service_account.password is defined + - result.response.group_search_type == "NON_RECURSIVE" + - result.response.white_listed_groups[0] == "{{ white_listed_groups[1] }}" + fail_msg: Unable to fetch directory service using ext id + success_msg: directory service fetched using ext id successfully + +######################################################################## + +- name: List all directory services + ntnx_directory_services_info_v2: + register: result + ignore_errors: true + +- name: List all directory services status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + fail_msg: Unable to list all directory services + success_msg: All directory services listed successfully + +######################################################################## + +- name: List all directory services with filter + ntnx_directory_services_info_v2: + filter: name eq '{{ directory_service }}' + register: result + ignore_errors: true + +- name: List all directory services with filter status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length == 1 + - result.response[0].name == "{{ directory_service }}" + fail_msg: Unable to list all directory services with filter + success_msg: All directory services listed with filter successfully + +######################################################################## + +- name: List all directory services with limit + ntnx_directory_services_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: List all directory services with limit status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length == 1 + fail_msg: Unable to list all directory services with limit + success_msg: All directory services listed with limit successfully + +######################################################################## + +- name: Delete directory service + ntnx_directory_services_v2: + state: absent + ext_id: "{{ todelete }}" + register: result + ignore_errors: true + +- name: Output of delete directory service + ansible.builtin.set_fact: + msg: "Directory Service with ext_id: {{ todelete }} deleted successfully" + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.ext_id == "{{ todelete }}" + - result.msg == "{{ msg }}" + fail_msg: Unable to delete directory service + success_msg: directory service is deleted successfully + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_directory_services_v2/tasks/main.yml b/tests/integration/targets/ntnx_directory_services_v2/tasks/main.yml new file mode 100644 index 000000000..8f015ea4c --- /dev/null +++ b/tests/integration/targets/ntnx_directory_services_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import all_operations.yml + ansible.builtin.import_tasks: all_operations.yml diff --git a/tests/integration/targets/ntnx_floating_ips_v2/aliases b/tests/integration/targets/ntnx_floating_ips_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_floating_ips_v2/meta/main.yml b/tests/integration/targets/ntnx_floating_ips_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_floating_ips_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_floating_ips_v2/tasks/all_operation.yml b/tests/integration/targets/ntnx_floating_ips_v2/tasks/all_operation.yml new file mode 100644 index 000000000..1bfa5de82 --- /dev/null +++ b/tests/integration/targets/ntnx_floating_ips_v2/tasks/all_operation.yml @@ -0,0 +1,383 @@ +--- +- name: Start testing ntnx_floating_ips_v2 ntnx_floating_ips_info_v2 + ansible.builtin.debug: + msg: Start testing ntnx_floating_ips_v2 ntnx_floating_ips_info_v2 + +- name: Generate random floating ip name + ansible.builtin.set_fact: + fip1_name: "{{query('community.general.random_string',numbers=false, special=false,length=8).0}}" + fip2_name: "{{query('community.general.random_string',numbers=false, special=false,length=8).0}}" + fip3_name: "{{query('community.general.random_string',numbers=false, special=false,length=8).0}}" + +- name: Fetch vm nic reference for tests + ntnx_vms_info: + vm_uuid: "{{ vm.uuid }}" + register: result + +- name: Define variable + ansible.builtin.set_fact: + vm_nic_reference: "{{ result.response.status.resources.nic_list[0].uuid }}" + +- name: Fetch categories for assigning + ntnx_categories_info_v2: + register: result + +- name: Define variable + ansible.builtin.set_fact: + category_ext_id: "{{ result.response[0].ext_id }}" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +############################################################## +- name: Create floating ip with external subnet uuid with check mode + ntnx_floating_ips_v2: + state: present + name: "{{ fip1_name }}" + external_subnet_reference: "{{ external_nat_subnet.uuid }}" + floating_ip: + ipv4: + value: "{{ external_nat_subnets[0].floating_ip }}" + register: result + ignore_errors: true + check_mode: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == "{{ fip1_name }}" + - result.response.floating_ip.ipv4.value == "{{ external_nat_subnets[0].floating_ip }}" + - result.response.external_subnet_reference == "{{ external_nat_subnet.uuid }}" + fail_msg: " Unable to create floating ip with external subnet uuid with check mode" + success_msg: " Floating ip with external subnet uuid created successfully with check mode" +############################################################## +- name: Create floating ip with external subnet uuid + ntnx_floating_ips_v2: + state: present + name: "{{ fip1_name }}" + external_subnet_reference: "{{ external_nat_subnet.uuid }}" + metadata: + project_reference_id: "{{ project.uuid }}" + owner_reference_id: "{{ users[0] }}" + category_ids: + - "{{ category_ext_id }}" + floating_ip: + ipv4: + value: "{{ external_nat_subnets[0].floating_ip }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ fip1_name }}" + - result.response.external_subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response.metadata.project_reference_id == "{{ project.uuid }}" + - result.response.metadata.owner_reference_id == "{{ users[0] }}" + - result.response.metadata.category_ids[0] == "{{ category_ext_id }}" + - result.response.floating_ip.ipv4.value == "{{ external_nat_subnets[0].floating_ip }}" + fail_msg: " Unable to create floating ip with external subnet uuid " + success_msg: " Floating ip with external subnet uuid created successfully " + +- name: Fetch the created floating IP + ntnx_floating_ips_info_v2: + filter: name eq '{{ fip1_name }}' + register: result + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + - result.changed == false + - result.failed == false + - result.response[0].name == "{{ fip1_name }}" + - result.response[0].external_subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response[0].metadata.project_reference_id == "{{ project.uuid }}" + - result.response[0].metadata.owner_reference_id == "{{ users[0] }}" + - result.response[0].metadata.category_ids[0] == "{{ category_ext_id }}" + - result.response[0].floating_ip.ipv4.value == "{{ external_nat_subnets[0].floating_ip }}" + + fail_msg: " Unable to create floating ip with external subnet uuid " + success_msg: " Floating ip with external subnet uuid created successfully " + +- name: Define variable + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.response[0].ext_id] }}" + +- name: Get floating_ips using ext_id + ntnx_floating_ips_info_v2: + ext_id: "{{ result.response[0].ext_id }}" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.name == "{{ fip1_name }}" + - result.response.external_subnet_reference == "{{ external_nat_subnet.uuid }}" + + fail_msg: " Unable to list fips " + success_msg: " fips listed successfully " + +############################################################## +- name: Check idempotency + ntnx_floating_ips_v2: + state: present + name: "{{ fip1_name }}" + external_subnet_reference: "{{ external_nat_subnet.uuid }}" + ext_id: "{{ result.ext_id }}" + register: result + ignore_errors: true + +- name: Idempotency Status + ansible.builtin.assert: + that: + - result.failed == false + - result.changed == false + - result.msg == "Nothing to change." + fail_msg: " Unable to check idempotency " + success_msg: " Idempotency check finished successfully " +############################################################## +- name: Create floating ip with vpc uuid and private ip association + ntnx_floating_ips_v2: + state: present + external_subnet_reference: "{{ external_nat_subnet.uuid }}" + name: "{{ fip2_name }}" + association: + private_ip_association: + private_ip: + ipv4: + value: "{{ overlay_subnet.private_ip }}" + vpc_reference: "{{ vpc.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ fip2_name }}" + - result.response.external_subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response.association.private_ip.ipv4.value == "{{ overlay_subnet.private_ip }}" + - result.response.association.vpc_reference == "{{ vpc.uuid }}" + + fail_msg: " Unable to create floating ip with vpc " + success_msg: " Floating ip with vpc uuid and external subnet uuid created successfully " + +- name: Fetch the created floating IP + ntnx_floating_ips_info_v2: + filter: name eq '{{ fip2_name }}' + register: result + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + - result.response[0].name == "{{ fip2_name }}" + - result.response[0].external_subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response[0].association.private_ip.ipv4.value == "{{ overlay_subnet.private_ip }}" + - result.response[0].association.vpc_reference == "{{ vpc.uuid }}" + fail_msg: " Unable to create floating ip with vpc " + success_msg: " Floating ip with vpc uuid and external subnet uuid created successfully " + +- name: Define variable + ansible.builtin.set_fact: + fip_ext_id: "{{ result.response[0].ext_id }}" +- name: Define variable + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.response[0].ext_id] }}" +########################################################## +- name: Update floating ip name with check mode + ntnx_floating_ips_v2: + state: present + name: "{{ fip2_name }}_updated" + ext_id: "{{ fip_ext_id }}" + association: + private_ip_association: + private_ip: + ipv4: + value: "{{ overlay_subnet.private_ip1 }}" + vpc_reference: "{{ vpc.uuid }}" + register: result + ignore_errors: true + check_mode: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ result.ext_id }}" + - result.response.name == "{{ fip2_name }}_updated" + - result.response.association.private_ip.ipv4.value == "{{ overlay_subnet.private_ip1 }}" + - result.response.association.vpc_reference == "{{ vpc.uuid }}" + fail_msg: " Unable to update floating ip name with check mode" + success_msg: " Floating ip name updated successfully with check mode " + +########################################################## +- name: Update floating ip name and private IP + ntnx_floating_ips_v2: + state: present + name: "{{ fip2_name }}_updated" + ext_id: "{{ fip_ext_id }}" + association: + private_ip_association: + private_ip: + ipv4: + value: "{{ overlay_subnet.private_ip1 }}" + vpc_reference: "{{ vpc.uuid }}" + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ result.ext_id }}" + - result.response.name == "{{ fip2_name }}_updated" + - result.response.association.private_ip.ipv4.value == "{{ overlay_subnet.private_ip1 }}" + - result.response.association.vpc_reference == "{{ vpc.uuid }}" + fail_msg: " Unable to update floating ip" + success_msg: " Floating ip name updated successfully" + +######################################################## +- name: Create floating ip with vm nic reference association + ntnx_floating_ips_v2: + state: present + external_subnet_reference: "{{ external_nat_subnet.uuid }}" + association: + vm_nic_association: + vm_nic_reference: "{{ vm_nic_reference }}" + name: "{{ fip3_name }}" + ignore_errors: true + register: result + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ fip3_name }}" + - result.response.external_subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response.association.vm_nic_reference == "{{ vm_nic_reference }}" + fail_msg: " Unable to create floating ip with vm nic reference " + success_msg: " Floating ip with external uuid and vm nic reference created successfully " + +- name: Fetch the created floating IP + ntnx_floating_ips_info_v2: + filter: name eq '{{ fip3_name }}' + register: result + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + - result.response[0].name == "{{ fip3_name }}" + - result.changed == false + - result.response[0].external_subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response[0].association.vm_nic_reference == "{{ vm_nic_reference }}" + fail_msg: " Unable to create floating ip with vm" + success_msg: " Floating ip with external uuid and vm created successfully " + +- name: Define variable + ansible.builtin.set_fact: + fip_ext_id: "{{ result.response[0].ext_id }}" + +- name: Define variable + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.response[0].ext_id] }}" + +# using dummy reference here for generating spec only +- name: Generate update spec for floating ip with vm nic reference + ntnx_floating_ips_v2: + state: present + ext_id: "{{ fip_ext_id }}" + external_subnet_reference: "{{ external_nat_subnet.uuid }}" + association: + vm_nic_association: + vm_nic_reference: e96b0997-8b1a-4554-881b-f59dd689b3de + name: "{{ fip3_name }}" + check_mode: true + ignore_errors: true + register: result + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id == result.response.ext_id + - result.response.name == "{{ fip3_name }}" + - result.response.external_subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response.association.vm_nic_reference == "e96b0997-8b1a-4554-881b-f59dd689b3de" + fail_msg: " Unable to create floating ip with vm" + success_msg: " Floating ip with external uuid and vm created successfully " + +############################################################## +- name: List Floating_ips + ntnx_floating_ips_info_v2: + register: fips + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - fips.response is defined + - fips.response | length >= 1 + fail_msg: " Unable to list vpcs " + success_msg: " Floating_ips listed successfully " +############################################################## +- name: List floating_ips using limit + ntnx_floating_ips_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + fail_msg: " Unable to list floating_ips " + success_msg: " Floating_ips listed successfully " +############################################################# +- name: Get floating_ips using ext_id + ntnx_floating_ips_info_v2: + ext_id: "{{ fips.response.1.ext_id }}" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.name == fips.response.1.name + fail_msg: " Unable to list fips " + success_msg: " fips listed successfully " +######################################################### +- name: Delete all created floating ips + ntnx_floating_ips_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Define variable + ansible.builtin.set_fact: + todelete: [] +######################################################### diff --git a/tests/integration/targets/ntnx_floating_ips_v2/tasks/main.yml b/tests/integration/targets/ntnx_floating_ips_v2/tasks/main.yml new file mode 100644 index 000000000..f67dc0e3e --- /dev/null +++ b/tests/integration/targets/ntnx_floating_ips_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Ntnx_floating_ips_v2 integration test + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import tasks + ansible.builtin.import_tasks: all_operation.yml diff --git a/tests/integration/targets/ntnx_floating_ips_v2/vars/main.yml b/tests/integration/targets/ntnx_floating_ips_v2/vars/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_foundation/aliases b/tests/integration/targets/ntnx_foundation/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation/aliases +++ b/tests/integration/targets/ntnx_foundation/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml index b27e210df..032ed3f65 100644 --- a/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml @@ -1,68 +1,68 @@ --- - - debug: - msg: start testing ntnx_foundation +- name: Start testing ntnx_foundation + ansible.builtin.debug: + msg: Start testing ntnx_foundation - - name: Image nodes using manual and discovery modes. Create cluster - ntnx_foundation: - timeout: 4500 - nutanix_host: "{{foundation_host}}" - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - manual_mode: - cvm_ip: "{{IBIS_node.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" - ipmi_netmask: "{{IBIS_node.node1.ipmi_netmask}}" - ipmi_gateway: "{{IBIS_node.node1.ipmi_gateway}}" - ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" - ipmi_password: "{{IBIS_node.node1.ipmi_password}}" - hypervisor: "{{IBIS_node.node1.hypervisor}}" - hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" - node_position: "{{IBIS_node.node1.node_position}}" - - discovery_mode: #dos mode using cvm - cvm_gb_ram: 50 - node_serial: "{{IBIS_node.node3.node_serial}}" - device_hint: "vm_installer" - discovery_override: - hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" - hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" - cvm_ip: "{{IBIS_node.node3.cvm_ip}}" - hypervisor: "{{IBIS_node.node3.hypervisor}}" - - discovery_mode: # aos node using ipmi - cvm_gb_ram: 50 - ipmi_password: "{{IBIS_node.node2.ipmi_password}}" - node_serial: "{{IBIS_node.node2.node_serial}}" - discovery_override: - hypervisor_hostname: "IBIS2" - clusters: - - redundancy_factor: 2 - cluster_members: - - "{{IBIS_node.node1.cvm_ip}}" - - "{{IBIS_node.node3.cvm_ip}}" - - "{{IBIS_node.node2.cvm_ip}}" - name: "test-cluster" - register: first_cluster - ignore_errors: True - # when: false # make it true or remove to unskip task - - - name: Creation Status - assert: - that: - - first_cluster.response is defined - - first_cluster.failed==false - - first_cluster.changed==true - - first_cluster.response.cluster_urls is defined - - first_cluster.response.cluster_urls.0.name=="test-cluster" - fail_msg: " Fail : unable to create cluster with three node" - success_msg: "Succes: cluster with three node created successfully " - # when: false # make it true or remove to unskip task +- name: Image nodes using manual and discovery modes. Create cluster + ntnx_foundation: + timeout: 4500 + nutanix_host: "{{foundation_host}}" + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - manual_mode: + cvm_ip: "{{IBIS_node.node1.cvm_ip}}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" + ipmi_netmask: "{{IBIS_node.node1.ipmi_netmask}}" + ipmi_gateway: "{{IBIS_node.node1.ipmi_gateway}}" + ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" + ipmi_password: "{{IBIS_node.node1.ipmi_password}}" + hypervisor: "{{IBIS_node.node1.hypervisor}}" + hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" + node_position: "{{IBIS_node.node1.node_position}}" + - discovery_mode: # dos mode using cvm + cvm_gb_ram: 50 + node_serial: "{{IBIS_node.node3.node_serial}}" + device_hint: "vm_installer" + discovery_override: + hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" + hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" + cvm_ip: "{{IBIS_node.node3.cvm_ip}}" + hypervisor: "{{IBIS_node.node3.hypervisor}}" + - discovery_mode: # aos node using ipmi + cvm_gb_ram: 50 + ipmi_password: "{{IBIS_node.node2.ipmi_password}}" + node_serial: "{{IBIS_node.node2.node_serial}}" + discovery_override: + hypervisor_hostname: "IBIS2" + clusters: + - redundancy_factor: 2 + cluster_members: + - "{{IBIS_node.node1.cvm_ip}}" + - "{{IBIS_node.node3.cvm_ip}}" + - "{{IBIS_node.node2.cvm_ip}}" + name: "test-cluster" + register: first_cluster + ignore_errors: true + # when: false # make it true or remove to unskip task +- name: Creation Status + ansible.builtin.assert: + that: + - first_cluster.response is defined + - first_cluster.failed==false + - first_cluster.changed==true + - first_cluster.response.cluster_urls is defined + - first_cluster.response.cluster_urls.0.name=="test-cluster" + fail_msg: " Fail : unable to create cluster with three node" + success_msg: "Succes: cluster with three node created successfully " + # when: false # make it true or remove to unskip task ###################################################### diff --git a/tests/integration/targets/ntnx_foundation/tasks/main.yml b/tests/integration/targets/ntnx_foundation/tasks/main.yml index 61dfecb44..cda74b3db 100644 --- a/tests/integration/targets/ntnx_foundation/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation/tasks/main.yml @@ -1,7 +1,10 @@ --- -- module_defaults: - nutanix.ncp.ntnx_foundation: - nutanix_host: "{{ foundation_host }}" +- name: Set module defaults + module_defaults: + nutanix.ncp.ntnx_foundation: + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "image_nodes.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import image_nodes.yml + ansible.builtin.import_tasks: "image_nodes.yml" + - name: Import negative_scenarios.yml + ansible.builtin.import_tasks: "negative_scenarios.yml" diff --git a/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml index 86472bf65..a80e20dc0 100644 --- a/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml @@ -1,119 +1,121 @@ - - name: Image nodes with check mode - check_mode: yes - ntnx_foundation: - timeout: 3660 - nutanix_host: "{{foundation_host}}" - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - manual_mode: - cvm_gb_ram: 50 - cvm_ip: "{{IBIS_node.node1.cvm_ip}}" - hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" - ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" - ipmi_password: "{{IBIS_node.node1.ipmi_password}}" - hypervisor: "{{IBIS_node.node1.hypervisor}}" - hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" - node_position: "{{IBIS_node.node1.node_position}}" - clusters: - - redundancy_factor: 2 - cluster_members: - - "{{IBIS_node.node1.cvm_ip}}" - - "{{IBIS_node.node3.cvm_ip}}" - - "{{IBIS_node.node2.cvm_ip}}" - name: "test-cluster" - register: result +- name: Image nodes with check mode + check_mode: true + ntnx_foundation: + timeout: 3660 + nutanix_host: "{{foundation_host}}" + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - manual_mode: + cvm_gb_ram: 50 + cvm_ip: "{{IBIS_node.node1.cvm_ip}}" + hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" + ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" + ipmi_password: "{{IBIS_node.node1.ipmi_password}}" + hypervisor: "{{IBIS_node.node1.hypervisor}}" + hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" + node_position: "{{IBIS_node.node1.node_position}}" + clusters: + - redundancy_factor: 2 + cluster_members: + - "{{IBIS_node.node1.cvm_ip}}" + - "{{IBIS_node.node3.cvm_ip}}" + - "{{IBIS_node.node2.cvm_ip}}" + name: "test-cluster" + register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed==false - - result.changed==false - - result.response.blocks.0.nodes.0.cvm_ip=="{{IBIS_node.node1.cvm_ip}}" - - result.response.blocks.0.nodes.0.hypervisor_hostname=="{{IBIS_node.node1.hypervisor_hostname}}" - - result.response.blocks.0.nodes.0.ipmi_ip=="{{IBIS_node.node1.ipmi_ip}}" - - result.response.blocks.0.nodes.0.hypervisor=="{{IBIS_node.node1.hypervisor}}" - - result.response.blocks.0.nodes.0.node_position=="{{IBIS_node.node1.node_position}}" - - result.response.clusters.0.cluster_name=="test-cluster" - fail_msg: " Fail : check_mode fail" - success_msg: "Succes: returned response as expected" +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed==false + - result.changed==false + - result.response.blocks.0.nodes.0.cvm_ip=="{{IBIS_node.node1.cvm_ip}}" + - result.response.blocks.0.nodes.0.hypervisor_hostname=="{{IBIS_node.node1.hypervisor_hostname}}" + - result.response.blocks.0.nodes.0.ipmi_ip=="{{IBIS_node.node1.ipmi_ip}}" + - result.response.blocks.0.nodes.0.hypervisor=="{{IBIS_node.node1.hypervisor}}" + - result.response.blocks.0.nodes.0.node_position=="{{IBIS_node.node1.node_position}}" + - result.response.clusters.0.cluster_name=="test-cluster" + fail_msg: " Fail : check_mode fail" + success_msg: "Succes: returned response as expected" ################################### - - debug: - msg: start negative_scenarios for ntnx_foundation +- name: Start negative_scenarios for ntnx_foundation + ansible.builtin.debug: + msg: Start negative_scenarios for ntnx_foundation ################################### - - name: Image nodes with wrong serial - ntnx_foundation: - timeout: 3660 - nutanix_host: "{{foundation_host}}" - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - discovery_mode: - cvm_gb_ram: 50 - node_serial: wrong_serial - device_hint: "vm_installer" - discovery_override: - hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" - hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" - cvm_ip: "{{IBIS_node.node3.cvm_ip}}" - hypervisor: "{{IBIS_node.node3.hypervisor}}" - register: result - ignore_errors: True +- name: Image nodes with wrong serial + ntnx_foundation: + timeout: 3660 + nutanix_host: "{{foundation_host}}" + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - discovery_mode: + cvm_gb_ram: 50 + node_serial: wrong_serial + device_hint: "vm_installer" + discovery_override: + hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" + hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" + cvm_ip: "{{IBIS_node.node3.cvm_ip}}" + hypervisor: "{{IBIS_node.node3.hypervisor}}" + register: result + ignore_errors: false - - name: Creation Status - assert: - that: - - result.msg == "Failed generating Image Nodes Spec" - - result.changed==false - - result.failed==true - fail_msg: " Fail : image node with wrong serial done successfully " - success_msg: "Succes: unable to image node with wrong serial " +- name: Creation Status + ansible.builtin.assert: + that: + - result.msg == "Failed generating Image Nodes Spec" + - result.changed==false + - result.failed==true + fail_msg: " Fail : image node with wrong serial done successfully " + success_msg: "Succes: unable to image node with wrong serial " ################################### - - name: Image nodes with wrong hypervisor - ntnx_foundation: - timeout: 3660 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - discovery_mode: - cvm_gb_ram: 50 - node_serial: wrong_serial - device_hint: "vm_installer" - discovery_override: - hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" - cvm_ip: "{{IBIS_node.node3.cvm_ip}}" - hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" - hypervisor: "phoenix" - register: result - ignore_errors: True +- name: Image nodes with wrong hypervisor + ntnx_foundation: + timeout: 3660 + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - discovery_mode: + cvm_gb_ram: 50 + node_serial: wrong_serial + device_hint: "vm_installer" + discovery_override: + hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" + cvm_ip: "{{IBIS_node.node3.cvm_ip}}" + hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" + hypervisor: "phoenix" + register: result + ignore_errors: false - - name: Creation Status - assert: - that: - - result.changed==false - - result.failed==true - - "result.msg=='value of hypervisor must be one of: kvm, hyperv, xen, esx, ahv, got: phoenix found in blocks -> nodes -> discovery_mode -> discovery_override'" - fail_msg: " Fail : Image nodes with wrong hypervisor done successfully " - success_msg: "Succes: unable to image node with wrong hypervisor" +- name: Creation Status + ansible.builtin.assert: + that: + - result.changed==false + - result.failed==true + - "result.msg=='value of hypervisor must be one of: kvm, hyperv, xen, esx, ahv, got: \ + phoenix found in blocks -> nodes -> discovery_mode -> discovery_override'" + fail_msg: " Fail : Image nodes with wrong hypervisor done successfully " + success_msg: "Succes: unable to image node with wrong hypervisor" diff --git a/tests/integration/targets/ntnx_foundation_aos_packages_info/aliases b/tests/integration/targets/ntnx_foundation_aos_packages_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_aos_packages_info/aliases +++ b/tests/integration/targets/ntnx_foundation_aos_packages_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_aos_packages_info/meta/main.yml b/tests/integration/targets/ntnx_foundation_aos_packages_info/meta/main.yml index 8d7d13401..998ccf2b3 100644 --- a/tests/integration/targets/ntnx_foundation_aos_packages_info/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_aos_packages_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml index d2cca917b..5cf859154 100644 --- a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml +++ b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml @@ -1,13 +1,14 @@ --- -- debug: +- name: Start testing ntnx_foundation_aos_packages_info + ansible.builtin.debug: msg: start testing ntnx_foundation_aos_packages_info -- name: get aos_packages_info from foundation +- name: Get aos_packages_info from foundation ntnx_foundation_aos_packages_info: register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.aos_packages | length > 0 - result.aos_packages is defined diff --git a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/main.yml b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/main.yml index 5b585991e..66467227e 100644 --- a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/main.yml @@ -1,6 +1,8 @@ --- -- module_defaults: +- name: Ntnx_foundation_aos_packages_info integration tests + module_defaults: nutanix.ncp.ntnx_foundation_aos_packages_info: - nutanix_host: "{{ foundation_host }}" + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "get_aos.yml" + - name: Import Tasks + ansible.builtin.import_tasks: get_aos.yml diff --git a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/aliases b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/aliases +++ b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/meta/main.yml b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/meta/main.yml index 8d7d13401..998ccf2b3 100644 --- a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml index 9988683a3..be6b0f990 100644 --- a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml +++ b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml @@ -1,27 +1,28 @@ --- -- debug: +- name: Start testing ntnx_foundation_bmc_ipmi_config + ansible.builtin.debug: msg: start testing ntnx_foundation_bmc_ipmi_config -- name: configure ipmi +- name: Configure ipmi ntnx_foundation_bmc_ipmi_config: - ipmi_user: "{{bmc.ipmi_user}}" - ipmi_password: "{{bmc.ipmi_password}}" - ipmi_netmask: "{{bmc.ipmi_netmask}}" - ipmi_gateway: "{{bmc.ipmi_gateway}}" + ipmi_user: "{{ bmc.ipmi_user }}" + ipmi_password: "{{ bmc.ipmi_password }}" + ipmi_netmask: "{{ bmc.ipmi_netmask }}" + ipmi_gateway: "{{ bmc.ipmi_gateway }}" timeout: 100 blocks: - - nodes: - - ipmi_mac: "{{bmc.ipmi_mac}}" - ipmi_ip: "{{bmc.ipmi_ip}}" + - nodes: + - ipmi_mac: "{{ bmc.ipmi_mac }}" + ipmi_ip: "{{ bmc.ipmi_ip }}" register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed==false - result.changed==true - result.response.blocks.0.nodes.0.ipmi_configure_successful==true - result.response.blocks.0.nodes.0.ipmi_message is defined - fail_msg: "bmc ipmi configure was failed with error result.error" - success_msg: "bmc ipmi configure was successfull" + fail_msg: bmc ipmi configure was failed with error result.error + success_msg: bmc ipmi configure was successfull diff --git a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/main.yml b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/main.yml index 7ac06c2c5..658a97435 100644 --- a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/main.yml @@ -1,6 +1,8 @@ --- -- module_defaults: +- name: Ntnx_foundation_bmc_ipmi_config integration tests + module_defaults: nutanix.ncp.ntnx_foundation_bmc_ipmi_config: - nutanix_host: "{{ foundation_host }}" + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "configure_ipmi.yml" + - name: Import Tasks + ansible.builtin.import_tasks: configure_ipmi.yml diff --git a/tests/integration/targets/ntnx_foundation_central/aliases b/tests/integration/targets/ntnx_foundation_central/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_central/aliases +++ b/tests/integration/targets/ntnx_foundation_central/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_central/meta/main.yml b/tests/integration/targets/ntnx_foundation_central/meta/main.yml index 2c3cac55f..3d12027e5 100644 --- a/tests/integration/targets/ntnx_foundation_central/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_central/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_fc_env diff --git a/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml index 113c4d3a3..8f92f281c 100644 --- a/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml @@ -1,51 +1,53 @@ -- debug: +--- +- name: Start testing ntnx_foundation_central module + ansible.builtin.debug: msg: start testing ntnx_foundation_central module -- name: api +- name: Api ntnx_foundation_central: - cluster_name: "{{cluster_name}}" + cluster_name: "{{ cluster_name }}" skip_cluster_creation: false redundancy_factor: 2 - aos_package_url: "{{aos_package_url}}" + aos_package_url: "{{ aos_package_url }}" common_network_settings: cvm_dns_servers: - - "{{common_network_settings.cvm_dns_servers}}" + - "{{ common_network_settings.cvm_dns_servers }}" hypervisor_dns_servers: - - "{{common_network_settings.hypervisor_dns_servers}}" + - "{{ common_network_settings.hypervisor_dns_servers }}" cvm_ntp_servers: - - "{{common_network_settings.cvm_ntp_servers}}" + - "{{ common_network_settings.cvm_ntp_servers }}" hypervisor_ntp_servers: - - "{{common_network_settings.hypervisor_ntp_servers}}" + - "{{ common_network_settings.hypervisor_ntp_servers }}" hypervisor_iso_details: - url: "{{hypervisor_iso_details.url}}" + url: "{{ hypervisor_iso_details.url }}" nodes_list: - manual_mode: - cvm_gateway: "{{node1.cvm_gateway}}" - cvm_netmask: "{{node1.cvm_netmask}}" - cvm_ip: "{{node1.cvm_ip}}" - hypervisor_gateway: "{{node1.hypervisor_gateway}}" - hypervisor_netmask: "{{node1.hypervisor_netmask}}" - hypervisor_ip: "{{node1.hypervisor_ip}}" - hypervisor_hostname: "{{node1.hypervisor_hostname}}" - imaged_node_uuid: "{{node1.imaged_node_uuid}}" + cvm_gateway: "{{ node1.cvm_gateway }}" + cvm_netmask: "{{ node1.cvm_netmask }}" + cvm_ip: "{{ node1.cvm_ip }}" + hypervisor_gateway: "{{ node1.hypervisor_gateway }}" + hypervisor_netmask: "{{ node1.hypervisor_netmask }}" + hypervisor_ip: "{{ node1.hypervisor_ip }}" + hypervisor_hostname: "{{ node1.hypervisor_hostname }}" + imaged_node_uuid: "{{ node1.imaged_node_uuid }}" use_existing_network_settings: false - ipmi_gateway: "{{node1.ipmi_gateway}}" - ipmi_netmask: "{{node1.ipmi_netmask}}" - ipmi_ip: "{{node1.ipmi_ip}}" + ipmi_gateway: "{{ node1.ipmi_gateway }}" + ipmi_netmask: "{{ node1.ipmi_netmask }}" + ipmi_ip: "{{ node1.ipmi_ip }}" image_now: true - hypervisor_type: "{{node1.hypervisor_type}}" + hypervisor_type: "{{ node1.hypervisor_type }}" - discovery_mode: - node_serial: "{{node2.node_serial}}" + node_serial: "{{ node2.node_serial }}" - discovery_mode: - node_serial: "{{node3.node_serial}}" + node_serial: "{{ node3.node_serial }}" discovery_override: - hypervisor_hostname: "{{node3.discovery_override.hypervisor_hostname}}" + hypervisor_hostname: "{{ node3.discovery_override.hypervisor_hostname }}" register: result ignore_errors: true # when: false # make it true or remove to unskip task - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed==false diff --git a/tests/integration/targets/ntnx_foundation_central/tasks/main.yml b/tests/integration/targets/ntnx_foundation_central/tasks/main.yml index ec2c1972f..34c8ea78b 100644 --- a/tests/integration/targets/ntnx_foundation_central/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_central/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_foundation_central integration tests + module_defaults: nutanix.ncp.ntnx_foundation_central: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "image_nodes.yml" + - name: Import Tasks + ansible.builtin.import_tasks: image_nodes.yml diff --git a/tests/integration/targets/ntnx_foundation_central/vars/main.yml b/tests/integration/targets/ntnx_foundation_central/vars/main.yml index 34d455f00..a6dd5a564 100644 --- a/tests/integration/targets/ntnx_foundation_central/vars/main.yml +++ b/tests/integration/targets/ntnx_foundation_central/vars/main.yml @@ -1 +1,2 @@ +--- cluster_name: test_cluster diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys/aliases b/tests/integration/targets/ntnx_foundation_central_api_keys/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys/aliases +++ b/tests/integration/targets/ntnx_foundation_central_api_keys/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys/meta/main.yml b/tests/integration/targets/ntnx_foundation_central_api_keys/meta/main.yml index 2c3cac55f..3d12027e5 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_fc_env diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml index 92943493e..dde8fd3a1 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml @@ -1,15 +1,17 @@ -- debug: +--- +- name: Start testing ntnx_foundation_central_api_keys module + ansible.builtin.debug: msg: start testing ntnx_foundation_central_api_keys module -- name: create api key with check_mode +- name: Create api key with check_mode ntnx_foundation_central_api_keys: - alias: test + alias: test check_mode: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed==false @@ -19,18 +21,17 @@ success_msg: "succes: api key with check_mode: " - name: Generate random alias for api key - set_fact: - random_alias: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_alias: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" - -- name: create api key with random alias +- name: Create api key with random alias ntnx_foundation_central_api_keys: - alias: "{{random_alias.0}}" + alias: "{{random_alias.0}}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed==false @@ -39,13 +40,14 @@ fail_msg: "fail: Unable to create api key " success_msg: "succes: api key created successfully " -- ntnx_foundation_central_api_keys: - alias: "{{random_alias.0}}" +- name: Create duplicate api key with same alias + ntnx_foundation_central_api_keys: + alias: "{{random_alias.0}}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed==false - result.failed==true diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/main.yml b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/main.yml index 9d26b931f..d6d707839 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_foundation_central_api_keys integration tests + module_defaults: nutanix.ncp.ntnx_foundation_central_api_keys: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_key.yml" + - name: Import Tasks + ansible.builtin.import_tasks: create_key.yml diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys_info/aliases b/tests/integration/targets/ntnx_foundation_central_api_keys_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys_info/aliases +++ b/tests/integration/targets/ntnx_foundation_central_api_keys_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys_info/meta/main.yml b/tests/integration/targets/ntnx_foundation_central_api_keys_info/meta/main.yml index 2c3cac55f..3d12027e5 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys_info/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_fc_env diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml index c26afa9f1..b1ddb8ab6 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml @@ -1,19 +1,20 @@ --- -- debug: +- name: Start testing ntnx_foundation_central_api_keys_info module + ansible.builtin.debug: msg: start testing ntnx_foundation_central_api_keys_info module - name: Generate random alias for api key - set_fact: - random_alias: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_alias: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- name: create api key with random alias +- name: Create api key with random alias ntnx_foundation_central_api_keys: - alias: "{{random_alias.0}}" + alias: "{{random_alias.0}}" register: key ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - key.response is defined - key.failed==false @@ -23,44 +24,44 @@ fail_msg: "fail: Unable to create api key " success_msg: "succes: api key created successfully " -- name: get api key with key_uuid +- name: Get api key with key_uuid ntnx_foundation_central_api_keys_info: - key_uuid: "{{key.response.key_uuid}}" + key_uuid: "{{ key.response.key_uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.alias=="{{random_alias.0}}" fail_msg: "fail: Unable to get api key with key_uuid" success_msg: "succes: get api key with key_uuid " -- name: get api key with alias +- name: Get api key with alias ntnx_foundation_central_api_keys_info: alias: "{{random_alias.0}}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.0.key_uuid== key.response.key_uuid fail_msg: "fail: Unable to get api key with alias name" success_msg: "succes: get api key with alias name " -- name: get api key with custom filter +- name: Get api key with custom filter ntnx_foundation_central_api_keys_info: - custom_filter: - created_timestamp: "{{key.response.created_timestamp}}" - alias: "{{key.response.alias}}" + custom_filter: + created_timestamp: "{{ key.response.created_timestamp }}" + alias: "{{ key.response.alias }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed==false diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/main.yml b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/main.yml index 7444f4ef7..6e93699f3 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/main.yml @@ -1,15 +1,17 @@ --- -- module_defaults: +- name: Ntnx_foundation_central_api_keys_info integration tests + module_defaults: nutanix.ncp.ntnx_foundation_central_api_keys: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false nutanix.ncp.ntnx_foundation_central_api_keys_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false block: - - import_tasks: "key_info.yml" + - name: Import Tasks + ansible.builtin.import_tasks: key_info.yml diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/aliases b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/aliases +++ b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/meta/main.yml b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/meta/main.yml index 2c3cac55f..3d12027e5 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_fc_env diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml index cb248215e..a390bd338 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml @@ -1,8 +1,9 @@ -- debug: +--- +- name: Start testing ntnx_foundation_central_imaged_clusters_info module + ansible.builtin.debug: msg: start testing ntnx_foundation_central_imaged_clusters_info module - -- name: get imaged cluster using image_cluster_uuid +- name: Get imaged cluster using image_cluster_uuid ntnx_foundation_central_imaged_clusters_info: filters: archived: false @@ -10,7 +11,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - clusters.changed==true - clusters.failed==false @@ -18,15 +19,14 @@ fail_msg: "fail: unable to get all imaged,archived cluster " success_msg: "succes: get all imaged,archived cluster sucessfuly " - -- name: get imaged cluster using image_cluster_uuid +- name: Get imaged cluster using image_cluster_uuid ntnx_foundation_central_imaged_clusters_info: imaged_cluster_uuid: "{{clusters.response.imaged_clusters.0.imaged_cluster_uuid}}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed==true - result.failed==false @@ -35,7 +35,7 @@ fail_msg: "fail: unable to get imaged cluster using image_cluster_uuid " success_msg: "succes: get imaged cluster using image_cluster_uuid sucessfuly " -- name: get imaged cluster using custom filter +- name: Get imaged cluster using custom filter ntnx_foundation_central_imaged_clusters_info: custom_filter: destroyed: true @@ -43,7 +43,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed==true - result.failed==false @@ -51,9 +51,8 @@ fail_msg: "fail: unable to get imaged cluster using custom filter " success_msg: "succes: get imaged cluster using custom filter sucessfully" - # still offset and length -# - debug: +# - ansible.builtin.debug: # var: clusters.response # - name: get imaged cluster using image_cluster_uuid @@ -63,5 +62,5 @@ # length: 20 # register: result # ignore_errors: true -# - debug: +# - ansible.builtin.debug: # var: result.response.imaged_clusters|length diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/main.yml b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/main.yml index 0a642e6ba..5290deadd 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_foundation_central_imaged_clusters_info integration tests + module_defaults: nutanix.ncp.ntnx_foundation_central_imaged_clusters_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "get_cluster_info.yml" + - name: Import Tasks + ansible.builtin.import_tasks: get_cluster_info.yml diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/aliases b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/aliases +++ b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/meta/main.yml b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/meta/main.yml index 2c3cac55f..3d12027e5 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_fc_env diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml index 7fc30a286..8eca5c618 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml @@ -1,13 +1,15 @@ -- debug: +--- +- name: Start testing ntnx_foundation_central_imaged_nodes_info module + ansible.builtin.debug: msg: start testing ntnx_foundation_central_imaged_nodes_info module -- name: get all imaged nodes +- name: Get all imaged nodes ntnx_foundation_central_imaged_nodes_info: register: nodes ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - nodes.changed==true - nodes.failed==false @@ -17,14 +19,14 @@ fail_msg: "fail: unable to get all imaged nodes " success_msg: "succes: get all imaged nodes sucessfully " -- name: get node by uuid +- name: Get node by uuid ntnx_foundation_central_imaged_nodes_info: imaged_node_uuid: "{{nodes.response.imaged_nodes.0.imaged_node_uuid}}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed==true - result.failed==false @@ -33,7 +35,7 @@ fail_msg: "fail: unable to get node by uuid" success_msg: "succes: get node by uuid successfully " -- name: get imaged node using custom filter +- name: Get imaged node using custom filter ntnx_foundation_central_imaged_nodes_info: custom_filter: node_serial: "{{nodes.response.imaged_nodes.0.node_serial}}" @@ -41,7 +43,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed==true - result.failed==false diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/main.yml b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/main.yml index 552adf413..6778d7370 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_foundation_central_imaged_nodes_info integration tests + module_defaults: nutanix.ncp.ntnx_foundation_central_imaged_nodes_info: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "get_node_info.yml" + - name: Import Tasks + ansible.builtin.import_tasks: get_node_info.yml diff --git a/tests/integration/targets/ntnx_foundation_discover_nodes_info/aliases b/tests/integration/targets/ntnx_foundation_discover_nodes_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_discover_nodes_info/aliases +++ b/tests/integration/targets/ntnx_foundation_discover_nodes_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_discover_nodes_info/meta/main.yml b/tests/integration/targets/ntnx_foundation_discover_nodes_info/meta/main.yml index 8d7d13401..998ccf2b3 100644 --- a/tests/integration/targets/ntnx_foundation_discover_nodes_info/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_discover_nodes_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml index 973b93d41..a7785bf82 100644 --- a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml @@ -1,5 +1,6 @@ --- -- debug: +- name: Start testing ntnx_foundation_discover_nodes_info + ansible.builtin.debug: msg: start testing ntnx_foundation_discover_nodes_info - name: Discover nodes @@ -7,7 +8,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.blocks is defined - result.failed==false @@ -23,7 +24,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.blocks is defined - result.failed==false @@ -38,7 +39,7 @@ # register: result # - name: Creation Status -# assert: +# ansible.builtin.assert: # that: # - result.blocks is defined # - result.failed==false diff --git a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/main.yml b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/main.yml index ef590b9a7..6397d7a65 100644 --- a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/main.yml @@ -1,6 +1,8 @@ --- -- module_defaults: +- name: Ntnx_foundation_discover_nodes_info integration tests + module_defaults: nutanix.ncp.ntnx_foundation_discover_nodes_info: - nutanix_host: "{{ foundation_host }}" + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "discover_nodes.yml" + - name: Import Tasks + ansible.builtin.import_tasks: discover_nodes.yml diff --git a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/aliases b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/aliases +++ b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/meta/main.yml b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/meta/main.yml index 8d7d13401..998ccf2b3 100644 --- a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml index 500b09e7c..149095a09 100644 --- a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml +++ b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml @@ -1,13 +1,14 @@ --- -- debug: +- name: Start testing ntnx_foundation_hypervisor_images_info + ansible.builtin.debug: msg: start testing ntnx_foundation_hypervisor_images_info -- name: get hypervisor_images_info from foundation +- name: Get hypervisor_images_info from foundation ntnx_foundation_hypervisor_images_info: register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.hypervisor_images | length > 0 - result.hypervisor_images is defined diff --git a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/main.yml b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/main.yml index 87fb5fbb1..25578d66d 100644 --- a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/main.yml @@ -1,6 +1,8 @@ --- -- module_defaults: +- name: Ntnx_foundation_hypervisor_images_info integration tests + module_defaults: nutanix.ncp.ntnx_foundation_hypervisor_images_info: - nutanix_host: "{{ foundation_host }}" + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "get_hypervisors.yml" + - name: Import Tasks + ansible.builtin.import_tasks: get_hypervisors.yml diff --git a/tests/integration/targets/ntnx_foundation_image_upload/aliases b/tests/integration/targets/ntnx_foundation_image_upload/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/aliases +++ b/tests/integration/targets/ntnx_foundation_image_upload/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_image_upload/meta/main.yml b/tests/integration/targets/ntnx_foundation_image_upload/meta/main.yml index 8d7d13401..998ccf2b3 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_image_upload/tasks/main.yml b/tests/integration/targets/ntnx_foundation_image_upload/tasks/main.yml index ce36d3623..025338542 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/tasks/main.yml @@ -1,7 +1,10 @@ --- -- module_defaults: +- name: Ntnx_foundation_image_upload integration tests + module_defaults: nutanix.ncp.ntnx_foundation_image_upload: - nutanix_host: "{{ foundation_host }}" + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "upload.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import Tasks + ansible.builtin.import_tasks: upload.yml + - name: Import Tasks + ansible.builtin.import_tasks: negative_scenarios.yml diff --git a/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml index 6794b80fd..1ec5a6e9f 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml @@ -1,15 +1,16 @@ +--- - name: Image upload with wrong installer_type ntnx_foundation_image_upload: state: present source: "{{ source }}" - filename: "integration-test-ntnx-package.tar.gz" + filename: integration-test-ntnx-package.tar.gz installer_type: wrong installler type timeout: 3600 register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.failed==true - result.changed==false diff --git a/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml b/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml index 3cc90a7f5..bfeea00e7 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml @@ -1,9 +1,11 @@ --- -- debug: +- name: Start testing ntnx_foundation_image_upload + ansible.builtin.debug: msg: start testing ntnx_foundation_image_upload - name: Download image for test - get_url: + ansible.builtin.get_url: + mode: "0644" url: "{{ image_url }}" dest: "{{ source }}" @@ -11,14 +13,14 @@ ntnx_foundation_image_upload: state: present source: "{{ source }}" - filename: "integration-test-ntnx-package.tar.gz" + filename: integration-test-ntnx-package.tar.gz installer_type: nos timeout: 1800 register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed==false @@ -29,13 +31,13 @@ - name: Delete Image with nos installer_type ntnx_foundation_image_upload: state: absent - filename: "integration-test-ntnx-package.tar.gz" + filename: integration-test-ntnx-package.tar.gz installer_type: nos register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response.status_code == 200 - result.failed==false diff --git a/tests/integration/targets/ntnx_foundation_node_network_info/aliases b/tests/integration/targets/ntnx_foundation_node_network_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_node_network_info/aliases +++ b/tests/integration/targets/ntnx_foundation_node_network_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_node_network_info/meta/main.yml b/tests/integration/targets/ntnx_foundation_node_network_info/meta/main.yml index 8d7d13401..998ccf2b3 100644 --- a/tests/integration/targets/ntnx_foundation_node_network_info/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_node_network_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml index b9faec447..cac2b1943 100644 --- a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml +++ b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml @@ -1,5 +1,6 @@ --- -- debug: +- name: Start testing ntnx_foundation_node_network_info + ansible.builtin.debug: msg: start testing ntnx_foundation_node_network_info - name: Discover nodes @@ -14,7 +15,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.nodes is defined - result.failed==false diff --git a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/main.yml b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/main.yml index 12815744d..9cc532580 100644 --- a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/main.yml @@ -1,6 +1,8 @@ --- -- module_defaults: +- name: Ntnx_foundation_node_network_info integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ foundation_host }}" + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "get_info.yml" + - name: Import Tasks + ansible.builtin.import_tasks: get_info.yml diff --git a/tests/integration/targets/ntnx_foundation_sanity/aliases b/tests/integration/targets/ntnx_foundation_sanity/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_foundation_sanity/aliases +++ b/tests/integration/targets/ntnx_foundation_sanity/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_sanity/meta/main.yml b/tests/integration/targets/ntnx_foundation_sanity/meta/main.yml index 8d7d13401..998ccf2b3 100644 --- a/tests/integration/targets/ntnx_foundation_sanity/meta/main.yml +++ b/tests/integration/targets/ntnx_foundation_sanity/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml index 46056d0da..a766160bf 100644 --- a/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml @@ -1,214 +1,203 @@ --- - - debug: - msg: start testing ntnx_foundation test for bare metal imaging and cluster creation +- name: Start testing ntnx_foundation test for bare metal imaging and cluster creation + ansible.builtin.debug: + msg: start testing ntnx_foundation test for bare metal imaging and cluster creation +- name: Get aos_packages_info from foundation + ntnx_foundation_aos_packages_info: + register: images - - name: get aos_packages_info from foundation - ntnx_foundation_aos_packages_info: - register: images +- name: Create spec for imaging and creating cluster out of bare metal nodes + check_mode: true + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{ cvm_gateway }}" + cvm_netmask: "{{ cvm_netmask }}" + hypervisor_gateway: "{{ hypervisor_gateway }}" + hypervisor_netmask: "{{ hypervisor_netmask }}" + default_ipmi_user: "{{ default_ipmi_user }}" + current_cvm_vlan_tag: "{{ nodes.current_cvm_vlan_tag }}" + nos_package: "{{ images.aos_packages[0] }}" + blocks: + - block_id: "{{ nodes.block_id }}" + nodes: + - manual_mode: + cvm_ip: "{{ nodes.node1.cvm_ip }}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{ nodes.node1.hypervisor_hostname }}" + ipmi_netmask: "{{ nodes.node1.ipmi_netmask }}" + ipmi_gateway: "{{ nodes.node1.ipmi_gateway }}" + ipmi_ip: "{{ nodes.node1.ipmi_ip }}" + ipmi_password: "{{ nodes.node1.ipmi_password }}" + hypervisor: "{{ nodes.node1.hypervisor }}" + hypervisor_ip: "{{ nodes.node1.hypervisor_ip }}" + node_position: "{{ nodes.node1.node_position }}" + clusters: + - redundancy_factor: 1 + cluster_members: + - "{{ nodes.node1.cvm_ip }}" + name: test-cluster + timezone: Asia/Calcutta + cvm_ntp_servers: + - "{{ nodes.ntp_servers[0] }}" + - "{{ nodes.ntp_servers[1] }}" + cvm_dns_servers: + - "{{ nodes.dns_servers[0] }}" + - "{{ nodes.dns_servers[1] }}" + hypervisor_ntp_servers: + - "{{ nodes.ntp_servers[0] }}" + - "{{ nodes.ntp_servers[1] }}" + enable_ns: true + backplane_vlan: "{{ nodes.backplane_vlan }}" + backplane_subnet: "{{ nodes.backplane_subnet }}" + backplane_netmask: "{{ nodes.backplane_netmask }}" + register: spec + ignore_errors: true - - name: Create spec for imaging and creating cluster out of bare metal nodes - check_mode: yes - ntnx_foundation: - timeout: 4500 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" - nos_package: "{{images.aos_packages[0]}}" +- name: Set expected spec + ansible.builtin.set_fact: + expected_spec: blocks: - - block_id: "{{nodes.block_id}}" + - block_id: "{{ nodes.block_id }}" nodes: - - manual_mode: - cvm_ip: "{{nodes.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" - ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" - ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" - ipmi_ip: "{{nodes.node1.ipmi_ip}}" - ipmi_password: "{{nodes.node1.ipmi_password}}" - hypervisor: "{{nodes.node1.hypervisor}}" - hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" - node_position: "{{nodes.node1.node_position}}" + - cvm_gb_ram: 50 + cvm_ip: "{{ nodes.node1.cvm_ip }}" + hypervisor: "{{ nodes.node1.hypervisor }}" + hypervisor_hostname: "{{ nodes.node1.hypervisor_hostname }}" + hypervisor_ip: "{{ nodes.node1.hypervisor_ip }}" + image_now: true + ipmi_gateway: "{{ nodes.node1.ipmi_gateway }}" + ipmi_ip: "{{ nodes.node1.ipmi_ip }}" + ipmi_netmask: "{{ nodes.node1.ipmi_netmask }}" + ipmi_password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + node_position: "{{ nodes.node1.node_position }}" clusters: - - redundancy_factor: 1 - cluster_members: - - "{{nodes.node1.cvm_ip}}" - name: "test-cluster" - timezone: "Asia/Calcutta" - cvm_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" - cvm_dns_servers: - - "{{nodes.dns_servers[0]}}" - - "{{nodes.dns_servers[1]}}" - hypervisor_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" + - backplane_netmask: "{{ nodes.backplane_netmask }}" + backplane_subnet: "{{ nodes.backplane_subnet }}" + backplane_vlan: "{{ nodes.backplane_vlan }}" + cluster_external_ip: + cluster_init_now: true + cluster_members: ["{{ nodes.node1.cvm_ip }}"] + cluster_name: test-cluster + cvm_dns_servers: "{{ nodes.dns_servers[0] }},{{ nodes.dns_servers[1] }}" + cvm_ntp_servers: "{{ nodes.ntp_servers[0] }},{{ nodes.ntp_servers[1] }}" enable_ns: true - backplane_vlan: "{{nodes.backplane_vlan}}" - backplane_subnet: "{{nodes.backplane_subnet}}" - backplane_netmask: "{{nodes.backplane_netmask}}" - register: spec - ignore_errors: True + hypervisor_ntp_servers: "{{ nodes.ntp_servers[0] }},{{ nodes.ntp_servers[1] }}" + redundancy_factor: 1 + single_node_cluster: true + timezone: Asia/Calcutta + current_cvm_vlan_tag: "{{ nodes.current_cvm_vlan_tag }}" + cvm_gateway: "{{ cvm_gateway }}" + cvm_netmask: "{{ cvm_netmask }}" + hypervisor_gateway: "{{ hypervisor_gateway }}" + hypervisor_iso: {} + hypervisor_netmask: "{{ hypervisor_netmask }}" + ipmi_user: "{{ default_ipmi_user }}" + nos_package: "{{ images.aos_packages[0] }}" - - set_fact: - expected_spec: { - "blocks": [ - { - "block_id": "{{nodes.block_id}}", - "nodes": [ - { - "cvm_gb_ram": 50, - "cvm_ip": "{{nodes.node1.cvm_ip}}", - "hypervisor": "{{nodes.node1.hypervisor}}", - "hypervisor_hostname": "{{nodes.node1.hypervisor_hostname}}", - "hypervisor_ip": "{{nodes.node1.hypervisor_ip}}", - "image_now": true, - "ipmi_gateway": "{{nodes.node1.ipmi_gateway}}", - "ipmi_ip": "{{nodes.node1.ipmi_ip}}", - "ipmi_netmask": "{{nodes.node1.ipmi_netmask}}", - "ipmi_password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", - "node_position": "{{nodes.node1.node_position}}" - } - ] - } - ], - "clusters": [ - { - "backplane_netmask": "{{nodes.backplane_netmask}}", - "backplane_subnet": "{{nodes.backplane_subnet}}", - "backplane_vlan": "{{nodes.backplane_vlan}}", - "cluster_external_ip": null, - "cluster_init_now": true, - "cluster_members": [ - "{{nodes.node1.cvm_ip}}" - ], - "cluster_name": "test-cluster", - "cvm_dns_servers": "{{nodes.dns_servers[0]}},{{nodes.dns_servers[1]}}", - "cvm_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", - "enable_ns": true, - "hypervisor_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", - "redundancy_factor": 1, - "single_node_cluster": true, - "timezone": "Asia/Calcutta" - } - ], - "current_cvm_vlan_tag": "{{nodes.current_cvm_vlan_tag}}", - "cvm_gateway": "{{cvm_gateway}}", - "cvm_netmask": "{{cvm_netmask}}", - "hypervisor_gateway": "{{hypervisor_gateway}}", - "hypervisor_iso": {}, - "hypervisor_netmask": "{{hypervisor_netmask}}", - "ipmi_user": "{{default_ipmi_user}}", - "nos_package": "{{images.aos_packages[0]}}" - } +- name: Verify spec + ansible.builtin.assert: + that: + - spec.response is defined + - spec.failed==false + - spec.changed==false + - spec.response == expected_spec + fail_msg: " Fail : unable to create spec for imaging nodes" + success_msg: "Succes: spec generated successfully" - - name: Verify spec - assert: - that: - - spec.response is defined - - spec.failed==false - - spec.changed==false - - spec.response == expected_spec - fail_msg: " Fail : unable to create spec for imaging nodes" - success_msg: "Succes: spec generated successfully" +- name: Image nodes without cluster creation + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{ cvm_gateway }}" + cvm_netmask: "{{ cvm_netmask }}" + hypervisor_gateway: "{{ hypervisor_gateway }}" + hypervisor_netmask: "{{ hypervisor_netmask }}" + default_ipmi_user: "{{ default_ipmi_user }}" + current_cvm_vlan_tag: "{{ nodes.current_cvm_vlan_tag }}" + nos_package: "{{ images.aos_packages[0] }}" + blocks: + - block_id: "{{ nodes.block_id }}" + nodes: + - manual_mode: + cvm_ip: "{{ nodes.node1.cvm_ip }}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{ nodes.node1.hypervisor_hostname }}" + ipmi_netmask: "{{ nodes.node1.ipmi_netmask }}" + ipmi_gateway: "{{ nodes.node1.ipmi_gateway }}" + ipmi_ip: "{{ nodes.node1.ipmi_ip }}" + ipmi_password: "{{ nodes.node1.ipmi_password }}" + hypervisor: "{{ nodes.node1.hypervisor }}" + hypervisor_ip: "{{ nodes.node1.hypervisor_ip }}" + node_position: "{{ nodes.node1.node_position }}" + bond_lacp_rate: "{{ nodes.node1.bond_lacp_rate }}" + bond_mode: "{{ nodes.node1.bond_mode }}" - - name: Image nodes without cluster creation - ntnx_foundation: - timeout: 4500 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" - nos_package: "{{images.aos_packages[0]}}" - blocks: - - block_id: "{{nodes.block_id}}" - nodes: - - manual_mode: - cvm_ip: "{{nodes.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" - ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" - ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" - ipmi_ip: "{{nodes.node1.ipmi_ip}}" - ipmi_password: "{{nodes.node1.ipmi_password}}" - hypervisor: "{{nodes.node1.hypervisor}}" - hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" - node_position: "{{nodes.node1.node_position}}" - bond_lacp_rate: "{{nodes.node1.bond_lacp_rate}}" - bond_mode: "{{nodes.node1.bond_mode}}" - - register: result - no_log: true - ignore_errors: True + register: result + no_log: true + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed==false - - result.changed==true - fail_msg: " Fail : unable to image nodes" - success_msg: "Succes: node imaging done successfully" +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed==false + - result.changed==true + fail_msg: " Fail : unable to image nodes" + success_msg: "Succes: node imaging done successfully" - - name: Image nodes and create cluster out of it - ntnx_foundation: - timeout: 4500 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" - nos_package: "{{images.aos_packages[0]}}" - blocks: - - block_id: "{{nodes.block_id}}" - nodes: - - manual_mode: - cvm_ip: "{{nodes.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" - ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" - ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" - ipmi_ip: "{{nodes.node1.ipmi_ip}}" - ipmi_password: "{{nodes.node1.ipmi_password}}" - hypervisor: "{{nodes.node1.hypervisor}}" - hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" - node_position: "{{nodes.node1.node_position}}" - bond_lacp_rate: "{{nodes.node1.bond_lacp_rate}}" - bond_mode: "{{nodes.node1.bond_mode}}" - clusters: - - redundancy_factor: 1 - cluster_members: - - "{{nodes.node1.cvm_ip}}" - name: "test-cluster" - timezone: "Asia/Calcutta" - cvm_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" - cvm_dns_servers: - - "{{nodes.dns_servers[0]}}" - - "{{nodes.dns_servers[1]}}" - hypervisor_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" - register: result - no_log: true - ignore_errors: True +- name: Image nodes and create cluster out of it + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{ cvm_gateway }}" + cvm_netmask: "{{ cvm_netmask }}" + hypervisor_gateway: "{{ hypervisor_gateway }}" + hypervisor_netmask: "{{ hypervisor_netmask }}" + default_ipmi_user: "{{ default_ipmi_user }}" + current_cvm_vlan_tag: "{{ nodes.current_cvm_vlan_tag }}" + nos_package: "{{ images.aos_packages[0] }}" + blocks: + - block_id: "{{ nodes.block_id }}" + nodes: + - manual_mode: + cvm_ip: "{{ nodes.node1.cvm_ip }}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{ nodes.node1.hypervisor_hostname }}" + ipmi_netmask: "{{ nodes.node1.ipmi_netmask }}" + ipmi_gateway: "{{ nodes.node1.ipmi_gateway }}" + ipmi_ip: "{{ nodes.node1.ipmi_ip }}" + ipmi_password: "{{ nodes.node1.ipmi_password }}" + hypervisor: "{{ nodes.node1.hypervisor }}" + hypervisor_ip: "{{ nodes.node1.hypervisor_ip }}" + node_position: "{{ nodes.node1.node_position }}" + bond_lacp_rate: "{{ nodes.node1.bond_lacp_rate }}" + bond_mode: "{{ nodes.node1.bond_mode }}" + clusters: + - redundancy_factor: 1 + cluster_members: + - "{{ nodes.node1.cvm_ip }}" + name: test-cluster + timezone: Asia/Calcutta + cvm_ntp_servers: + - "{{ nodes.ntp_servers[0] }}" + - "{{ nodes.ntp_servers[1] }}" + cvm_dns_servers: + - "{{ nodes.dns_servers[0] }}" + - "{{ nodes.dns_servers[1] }}" + hypervisor_ntp_servers: + - "{{ nodes.ntp_servers[0] }}" + - "{{ nodes.ntp_servers[1] }}" + register: result + no_log: true + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed==false - - result.changed==true - - result.response.cluster_urls is defined - fail_msg: " Fail : unable to image nodes and create cluster" - success_msg: "Succes: cluster and node imaging done successfully" +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed==false + - result.changed==true + - result.response.cluster_urls is defined + fail_msg: " Fail : unable to image nodes and create cluster" + success_msg: "Succes: cluster and node imaging done successfully" ###################################################### diff --git a/tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml b/tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml index 24fc4e925..1566190e7 100644 --- a/tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml +++ b/tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml @@ -1,6 +1,8 @@ --- -- module_defaults: +- name: Ntnx_foundation_sanity integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ foundation_host }}" + nutanix_host: "{{ foundation_host }}" block: - - import_tasks: "image_nodes.yml" + - name: Import Tasks + ansible.builtin.import_tasks: image_nodes.yml diff --git a/tests/integration/targets/ntnx_gpus_v2/aliases b/tests/integration/targets/ntnx_gpus_v2/aliases new file mode 100644 index 000000000..87e7bdaae --- /dev/null +++ b/tests/integration/targets/ntnx_gpus_v2/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_gpus_v2/meta/main.yml b/tests/integration/targets/ntnx_gpus_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_gpus_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_gpus_v2/tasks/gpus_operations.yml b/tests/integration/targets/ntnx_gpus_v2/tasks/gpus_operations.yml new file mode 100644 index 000000000..585be3592 --- /dev/null +++ b/tests/integration/targets/ntnx_gpus_v2/tasks/gpus_operations.yml @@ -0,0 +1,388 @@ +--- +- name: Start ntnx_gpus_v2 tests + ansible.builtin.debug: + msg: start ntnx_gpus_v2 tests + +- name: Generate random name for VM + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM name + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Generate spec for VM creation with GPU using check mode + ntnx_vms_v2: + name: "vm_test" + cluster: + ext_id: "00062742-82e3-b07b-0000-0000000136bb" + gpus: + - mode: VIRTUAL + device_id: "123" + vendor: "NVIDIA" + check_mode: true + register: result + ignore_errors: true + +- name: Generate spec for VM creation with GPU using check mode status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.cluster.ext_id == "00062742-82e3-b07b-0000-0000000136bb" + - result.response.gpus[0].device_id == 123 + - result.response.gpus[0].vendor == "NVIDIA" + - result.response.gpus[0].mode == "VIRTUAL" + - result.response.gpus[0].ext_id is defined + fail_msg: "Generate spec for VM creation with GPU using check mode failed " + success_msg: "Generate spec for VM creation with GPU using check mode passed " + +################################################################################## + +- name: Create VM with GPU attached + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + gpus: + - mode: "{{ gpus[0].gpu_device_mode }}" + device_id: "{{ gpus[0].gpu_device_id }}" + vendor: "{{ gpus[0].gpu_device_vendor }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.gpus[0].device_id == {{ gpus[0].gpu_device_id }} + - result.response.gpus[0].vendor == "{{ gpus[0].gpu_device_vendor }}" + - result.response.gpus[0].mode == "{{ gpus[0].gpu_device_mode }}" + - result.response.gpus[0].ext_id is defined + fail_msg: "Unable to create VM with GPU attached " + success_msg: "VM with GPU attached is created successfully " + +- name: Set VM and GPU external id + ansible.builtin.set_fact: + vm_ext_id: '{{ result["ext_id"] }}' + gpu_ext_id: '{{ result["response"]["gpus"][0]["ext_id"] }}' + +# here we are powering on the VM to check the full GPU details +- name: Power on VM + ntnx_vms_power_actions_v2: + state: power_on + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Power on Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power on VM " + success_msg: "VM is powered on successfully " + +- name: Sleep for 20 seconds until VM is powered on + ansible.builtin.pause: + seconds: 20 + +- name: Get VM info + ntnx_vms_info_v2: + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Get VM info Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + - result.response.ext_id == "{{ vm_ext_id }}" + - result.response.gpus[0].device_id == {{ gpus[0].gpu_device_id }} + - result.response.gpus[0].vendor == "{{ gpus[0].gpu_device_vendor }}" + - result.response.gpus[0].mode == "{{ gpus[0].gpu_device_mode }}" + - result.response.gpus[0].ext_id == "{{ gpu_ext_id }}" + - result.response.gpus[0].name is defined + - result.response.gpus[0].pci_address is defined + fail_msg: "Unable to get VM info " + success_msg: "VM info is fetched successfully " + +# here we are powering off the VM to detach the GPU +- name: Power off VM + ntnx_vms_power_actions_v2: + state: power_off + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Power off Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power off VM " + success_msg: "VM is powered off successfully " + +- name: Sleep for 15 seconds until VM is powered off + ansible.builtin.pause: + seconds: 15 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not powered off " + success_msg: "VM is powered off successfully " + +################################################################################## + +- name: Detach GPU from VM + ntnx_gpus_v2: + state: absent + vm_ext_id: "{{ vm_ext_id }}" + ext_id: "{{ gpu_ext_id }}" + register: result + ignore_errors: true + +- name: Detach GPU from VM status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response == None + - result.vm_ext_id == "{{ vm_ext_id }}" + - result.task_ext_id is defined + fail_msg: "Unable to detach GPU from VM " + success_msg: "GPU is detached from VM successfully " + +################################################################################## + +- name: Attach GPU to VM + ntnx_gpus_v2: + state: present + vm_ext_id: "{{ vm_ext_id }}" + mode: "{{ gpus[0].gpu_device_mode }}" + device_id: "{{ gpus[0].gpu_device_id }}" + vendor: "{{ gpus[0].gpu_device_vendor }}" + register: result + ignore_errors: true + +- name: Attach GPU to VM status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response[0].device_id == {{ gpus[0].gpu_device_id }} + - result.response[0].vendor == "{{ gpus[0].gpu_device_vendor }}" + - result.response[0].mode == "{{ gpus[0].gpu_device_mode }}" + - result.response[0].ext_id is defined + - result.vm_ext_id == "{{ vm_ext_id }}" + - result.task_ext_id is defined + fail_msg: "Unable to attach GPU to VM " + success_msg: "GPU is attached to VM successfully " + +- name: Set GPU external id + ansible.builtin.set_fact: + gpu_ext_id: '{{ result["response"]["gpus"][0]["ext_id"] }}' + +# here we are powering on the VM to check the full GPU details +- name: Power on VM + ntnx_vms_power_actions_v2: + state: power_on + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Power on Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power on VM " + success_msg: "VM is powered on successfully " + +- name: Sleep for 20 seconds until VM is powered on + ansible.builtin.pause: + seconds: 20 + +- name: Get VM info + ntnx_vms_info_v2: + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Get VM info Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + - result.response.ext_id == "{{ vm_ext_id }}" + - result.response.gpus[0].device_id == {{ gpus[0].gpu_device_id }} + - result.response.gpus[0].vendor == "{{ gpus[0].gpu_device_vendor }}" + - result.response.gpus[0].mode == "{{ gpus[0].gpu_device_mode }}" + - result.response.gpus[0].ext_id == "{{ gpu_ext_id }}" + - result.response.gpus[0].name is defined + - result.response.gpus[0].pci_address is defined + fail_msg: "Unable to get VM info " + success_msg: "VM info is fetched successfully " + +################################################################################## + +- name: List all GPUs attached to the VM + ntnx_gpus_info_v2: + vm_ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: List GPUs attached to the VM status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response | length > 0 + - result.response[0].device_id == {{ gpus[0].gpu_device_id }} + - result.response[0].vendor == "{{ gpus[0].gpu_device_vendor }}" + - result.response[0].mode == "{{ gpus[0].gpu_device_mode }}" + - result.response[0].ext_id is defined + fail_msg: "Unable to list GPUs attached to the VM " + success_msg: "List GPUs attached to the VM is successful " + +################################################################################## + +- name: List GPUs attached to the VM with limit + ntnx_gpus_info_v2: + vm_ext_id: "{{ vm_ext_id }}" + limit: 1 + register: result + ignore_errors: true + +- name: List GPUs attached to the VM with limit status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: "Unable to list GPUs attached to the VM with limit " + success_msg: "List GPUs attached to the VM with limit is successful " + +################################################################################## + +- name: Fetch GPU details using ext_id + ntnx_gpus_info_v2: + vm_ext_id: "{{ vm_ext_id }}" + ext_id: "{{ gpu_ext_id }}" + register: result + ignore_errors: true + +- name: Fetch GPU details using ext_id status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response.device_id == {{ gpus[0].gpu_device_id }} + - result.response.vendor == "{{ gpus[0].gpu_device_vendor }}" + - result.response.mode == "{{ gpus[0].gpu_device_mode }}" + - result.response.ext_id == "{{ gpu_ext_id }}" + fail_msg: "Unable to fetch GPU details using ext_id " + success_msg: "Fetch GPU details using ext_id is successful " + +################################################################################## + +# here we are powering off the VM to detach the GPU +- name: Power off VM + ntnx_vms_power_actions_v2: + state: power_off + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Power off Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power off VM " + success_msg: "VM is powered off successfully " + +- name: Sleep for 15 seconds until VM is powered off + ansible.builtin.pause: + seconds: 15 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not powered off " + success_msg: "VM is powered off successfully " + +- name: Detach GPU from VM + ntnx_gpus_v2: + state: absent + vm_ext_id: "{{ vm_ext_id }}" + ext_id: "{{ gpu_ext_id }}" + register: result + ignore_errors: true + +- name: Detach GPU from VM status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response == None + - result.vm_ext_id == "{{ vm_ext_id }}" + - result.task_ext_id is defined + fail_msg: "Unable to detach GPU from VM " + success_msg: "GPU is detached from VM successfully " + +################################################################################## + +- name: Delete VM + ntnx_vms_v2: + state: absent + ext_id: "{{ vm_ext_id }}" + register: result + +- name: Delete VM status + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_gpus_v2/tasks/main.yml b/tests/integration/targets/ntnx_gpus_v2/tasks/main.yml new file mode 100644 index 000000000..81a315292 --- /dev/null +++ b/tests/integration/targets/ntnx_gpus_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import gpus_operations.yml + ansible.builtin.import_tasks: "gpus_operations.yml" diff --git a/tests/integration/targets/ntnx_hosts_info/tasks/get_hosts_info.yml b/tests/integration/targets/ntnx_hosts_info/tasks/get_hosts_info.yml index cb84471e2..688a8c161 100644 --- a/tests/integration/targets/ntnx_hosts_info/tasks/get_hosts_info.yml +++ b/tests/integration/targets/ntnx_hosts_info/tasks/get_hosts_info.yml @@ -1,36 +1,37 @@ --- -- debug: +- name: Start testing ntnx_hosts_info + ansible.builtin.debug: msg: Start testing ntnx_hosts_info -- name: test getting all hosts +- name: Test getting all hosts ntnx_hosts_info: - register: hosts - ignore_errors: True + register: hosts_info + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - - hosts.response is defined - - hosts.failed == false - - hosts.changed == false - - hosts.response.entities[0].metadata.uuid is defined + - hosts_info.response is defined + - hosts_info.failed == false + - hosts_info.changed == false + - hosts_info.response.entities[0].metadata.uuid is defined fail_msg: "Unable to list all hosts" success_msg: "hosts listed successfully" -- name: test getting particular host using uuid +- name: Test getting particular host using uuid ntnx_hosts_info: - host_uuid: '{{ hosts.response.entities[0].metadata.uuid }}' + host_uuid: '{{ hosts_info.response.entities[0].metadata.uuid }}' register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response.status.state == "COMPLETE" - - result.response.metadata.uuid == "{{ hosts.response.entities[0].metadata.uuid }}" + - result.response.metadata.uuid == "{{ hosts_info.response.entities[0].metadata.uuid }}" fail_msg: "Unable to get particular host" success_msg: "host info obtained successfully" @@ -42,10 +43,10 @@ sort_order: "ASCENDING" sort_attribute: "name" register: result - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false diff --git a/tests/integration/targets/ntnx_hosts_info/tasks/main.yml b/tests/integration/targets/ntnx_hosts_info/tasks/main.yml index fccc9eeda..d8e77cc99 100644 --- a/tests/integration/targets/ntnx_hosts_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_hosts_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "get_hosts_info.yml" + - name: Import get_hosts_info.yml + ansible.builtin.import_tasks: "get_hosts_info.yml" diff --git a/tests/integration/targets/ntnx_image_placement_policies_info/tasks/info.yml b/tests/integration/targets/ntnx_image_placement_policies_info/tasks/info.yml index 8515e38ae..94a3f2a4a 100644 --- a/tests/integration/targets/ntnx_image_placement_policies_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_image_placement_policies_info/tasks/info.yml @@ -1,8 +1,9 @@ --- -- debug: - msg: "start ntnx_image_placement_policies_info tests" +- name: Start ntnx_image_placement_policies_info tests + ansible.builtin.debug: + msg: "Start ntnx_image_placement_policies_info tests" -- name: create image placement policy +- name: Create image placement policy ntnx_image_placement_policy: placement_type: soft name: "test_policy_1" @@ -14,7 +15,7 @@ - Default register: policy_1 -- name: create image placement policy +- name: Create image placement policy ntnx_image_placement_policy: placement_type: soft name: "test_policy_2" @@ -26,19 +27,21 @@ - Default register: policy_2 -- set_fact: +- name: Adding policies to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ policy_1["response"]["metadata"]["uuid"] ] }}' -- set_fact: +- name: Adding policies to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ policy_2["response"]["metadata"]["uuid"] ] }}' ################################################################### -- name: test getting all image placement policies +- name: Test getting all image placement policies ntnx_image_placement_policies_info: register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -47,7 +50,7 @@ success_msg: "Image placement policies listed successfully" ################################################################ -- name: test getting particular image placement policy using filter +- name: Test getting particular image placement policy using filter ntnx_image_placement_policies_info: filter: name: "{{ policy_1.response.status.name }}" @@ -55,8 +58,8 @@ offset: 0 register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.response.entities[0].status.name == '{{ policy_1.response.status.name }}' @@ -65,13 +68,13 @@ ################################################################ -- name: test getting particular image placement policy using uuid +- name: Test getting particular image placement policy using uuid ntnx_image_placement_policies_info: policy_uuid: "{{ policy_2.response.metadata.uuid }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.response.status.name == '{{ policy_2.response.status.name }}' @@ -87,21 +90,22 @@ register: result - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: "Unable to list policies as per names" success_msg: "Image placement policy listed successfully" -##################################################CLEANUP####################################################### +################################################## CLEANUP ####################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_image_placement_policy: state: absent policy_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_image_placement_policies_info/tasks/main.yml b/tests/integration/targets/ntnx_image_placement_policies_info/tasks/main.yml index 3364b30c6..a37bd6bd3 100644 --- a/tests/integration/targets/ntnx_image_placement_policies_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_image_placement_policies_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import info.yml + ansible.builtin.import_tasks: "info.yml" diff --git a/tests/integration/targets/ntnx_image_placement_policy/tasks/create.yml b/tests/integration/targets/ntnx_image_placement_policy/tasks/create.yml index 5489af62e..b59d6edc1 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/tasks/create.yml +++ b/tests/integration/targets/ntnx_image_placement_policy/tasks/create.yml @@ -1,6 +1,7 @@ --- -- debug: - msg: start ntnx_image_placement_policy create tests +- name: Start ntnx_image_placement_policy create tests + ansible.builtin.debug: + msg: Start ntnx_image_placement_policy create tests - name: Create image placement policy with minimal spec ntnx_image_placement_policy: @@ -15,7 +16,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -27,7 +28,8 @@ fail_msg: "Unable to create image placement policy with minimal spec" success_msg: "Image placement policy with minimal spec created successfully" -- set_fact: +- name: Add policy to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' ################################################################################################################ @@ -52,7 +54,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -61,13 +63,14 @@ fail_msg: "Unable to create image placement policy with all specifications" success_msg: "Image placement policy with all specifications created successfully" -- set_fact: +- name: Add policy to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' ################################################################################################################ - name: Create image placement policy in check mode - check_mode: yes + check_mode: true ntnx_image_placement_policy: name: "test_policy_3" desc: "test_policy_3_desc" @@ -82,7 +85,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -105,7 +108,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -113,9 +116,9 @@ fail_msg: "Image placement policy didn't failed with incorrect category" success_msg: "Image placement policy with incorrect category errored out successfully" -##################################################CLEANUP####################################################### +################################################## CLEANUP ####################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_image_placement_policy: state: absent policy_uuid: "{{ item }}" @@ -123,5 +126,6 @@ loop: "{{ todelete }}" ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_image_placement_policy/tasks/delete.yml b/tests/integration/targets/ntnx_image_placement_policy/tasks/delete.yml index 2d39baf74..c3dffe06f 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/tasks/delete.yml +++ b/tests/integration/targets/ntnx_image_placement_policy/tasks/delete.yml @@ -1,6 +1,7 @@ --- -- debug: - msg: start ntnx_image_placement_policy delete tests +- name: Start ntnx_image_placement_policy delete tests + ansible.builtin.debug: + msg: Start ntnx_image_placement_policy delete tests - name: Create image placement policy with minimal spec for delete tests ntnx_image_placement_policy: @@ -15,7 +16,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -30,7 +31,7 @@ register: result - name: Delete Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -48,7 +49,7 @@ ignore_errors: true - name: Delete Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false diff --git a/tests/integration/targets/ntnx_image_placement_policy/tasks/main.yml b/tests/integration/targets/ntnx_image_placement_policy/tasks/main.yml index a2c7a07b0..e4554ddc7 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/tasks/main.yml +++ b/tests/integration/targets/ntnx_image_placement_policy/tasks/main.yml @@ -1,11 +1,15 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "update.yml" - - import_tasks: "delete.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" + - name: Import update.yml + ansible.builtin.import_tasks: "update.yml" + - name: Import delete.yml + ansible.builtin.import_tasks: "delete.yml" diff --git a/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml b/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml index 3f0087324..1eefb5182 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml +++ b/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml @@ -1,6 +1,7 @@ --- -- debug: - msg: start ntnx_image_placement_policy update tests +- name: Start ntnx_image_placement_policy update tests + ansible.builtin.debug: + msg: Start ntnx_image_placement_policy update tests - name: Create image placement policy with minimal spec for update tests ntnx_image_placement_policy: @@ -20,7 +21,7 @@ register: setup_policy - name: Creation Status - assert: + ansible.builtin.assert: that: - setup_policy.response is defined - setup_policy.changed == true @@ -28,12 +29,13 @@ fail_msg: "Unable to create image placement policy with minimal spec" success_msg: "Image placement policy with minimal spec created successfully" -- set_fact: +- name: Adding policy to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ setup_policy["response"]["metadata"]["uuid"] ] }}' ############################################################################################# -- name: test idempotency by definig same spec as before +- name: Test idempotency by definig same spec as before ntnx_image_placement_policy: state: present policy_uuid: "{{ setup_policy.response.metadata.uuid }}" @@ -52,7 +54,7 @@ register: result - name: Update Status - assert: + ansible.builtin.assert: that: - result.changed == false - "'Nothing to change' in result.msg" @@ -61,7 +63,7 @@ ##################################################################################################### -- name: update all specs +- name: Update all specs ntnx_image_placement_policy: state: present policy_uuid: "{{ setup_policy.response.metadata.uuid }}" @@ -82,7 +84,7 @@ register: result - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -99,7 +101,7 @@ ##################################################################################################### -- name: remove attached categories to the policy +- name: Remove attached categories to the policy ntnx_image_placement_policy: state: present policy_uuid: "{{ setup_policy.response.metadata.uuid }}" @@ -107,7 +109,7 @@ register: result - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -116,15 +118,16 @@ fail_msg: "Unable to remove categories from image placement policy" success_msg: "Image placement policy updated successfully" -##################################################CLEANUP####################################################### +################################################## CLEANUP ####################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_image_placement_policy: state: absent policy_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: false -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_image_placement_policy_v2/aliases b/tests/integration/targets/ntnx_image_placement_policy_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_image_placement_policy_v2/meta/main.yml b/tests/integration/targets/ntnx_image_placement_policy_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_image_placement_policy_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_image_placement_policy_v2/tasks/image_policy_operations.yml b/tests/integration/targets/ntnx_image_placement_policy_v2/tasks/image_policy_operations.yml new file mode 100644 index 000000000..9460b29d9 --- /dev/null +++ b/tests/integration/targets/ntnx_image_placement_policy_v2/tasks/image_policy_operations.yml @@ -0,0 +1,554 @@ +--- +- name: Start testing ntnx_image_placement_policies_v2 + ansible.builtin.debug: + msg: Start testing ntnx_image_placement_policies_v2 + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set VM and images names + ansible.builtin.set_fact: + image_name: "{{ random_name }}_image_test" + +- name: List all categories and choose the first one + ntnx_categories_info_v2: + register: result + ignore_errors: true + +- name: Status of all categories + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + fail_msg: "Unable to list categories " + success_msg: categories listed successfully + +- name: Set variables + ansible.builtin.set_fact: + category_ext_id: '{{ result["response"][0]["ext_id"] }}' + category_ext_id1: '{{ result["response"][1]["ext_id"] }}' + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +################################################################################## + +- name: Create an image placement policy - check mode is enabled + ntnx_image_placement_policies_v2: + state: present + name: "{{ image_name }}1" + description: "{{ image_name }}1_description" + placement_type: SOFT + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - "{{ category_ext_id }}" + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - "{{ category_ext_id }}" + enforcement_state: ACTIVE + wait: true + register: result + ignore_errors: true + check_mode: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "ACTIVE" + - result.response.name == '{{ image_name }}1' + - result.response.description == "{{ image_name }}1_description" + fail_msg: Create image replacement policy check mode failed + success_msg: Create image replacement policy check mode passed + +########################################################################################### + +- name: Create an image placement policy with minimal spec + ntnx_image_placement_policies_v2: + state: present + name: "{{ image_name }}2" + placement_type: SOFT + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - "{{ category_ext_id }}" + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - "{{ category_ext_id }}" + wait: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "ACTIVE" + - result.response.name == '{{ image_name }}2' + fail_msg: Create image replacement policy with minimal spec failed + success_msg: Create image replacement policy with minimal spec passed + +- name: Add to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["response"]["ext_id"]] }}' + +########################################################################################### + +- name: Create an image placement policy with all possible attributes + ntnx_image_placement_policies_v2: + state: present + name: "{{ image_name }}3" + description: "{{ image_name }}3_description" + placement_type: SOFT + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - "{{ category_ext_id }}" + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - "{{ category_ext_id }}" + enforcement_state: ACTIVE + wait: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "ACTIVE" + - result.response.name == "{{ image_name }}3" + - result.response.description == "{{ image_name }}3_description" + fail_msg: Create image replacement policy with all possible attributes failed + success_msg: Create image replacement policy with all possible attributes passed + +- name: Add to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["response"]["ext_id"]] }}' + +########################################################################################### + +- name: Create an image placement policy with suspended enforcement state + ntnx_image_placement_policies_v2: + state: present + name: "{{ image_name }}4" + description: "{{ image_name }}4_description" + placement_type: SOFT + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - "{{ category_ext_id }}" + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - "{{ category_ext_id }}" + enforcement_state: SUSPENDED + wait: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "SUSPENDED" + - result.response.name == "{{ image_name }}4" + - result.response.description == "{{ image_name }}4_description" + fail_msg: Create image replacement policy with suspended enforcement state failed + success_msg: Create image replacement policy with suspended enforcement state passed + +- name: Add to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["response"]["ext_id"]] }}' + +########################################################################################## + +- name: Update an existing image placement policy - check mode is enabled + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[1] }}" + name: "{{ image_name }}5" + description: "{{ image_name }}5_description" + placement_type: SOFT + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - "{{ category_ext_id }}" + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - "{{ category_ext_id }}" + enforcement_state: ACTIVE + wait: true + register: result + ignore_errors: true + check_mode: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "ACTIVE" + - result.response.name == "{{ image_name }}5" + - result.response.description == "{{ image_name }}5_description" + fail_msg: Update image replacement policy check mode failed + success_msg: Update image replacement policy check mode passed + +########################################################################################## + +- name: Update an existing image placement policy + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[1] }}" + name: "{{ image_name }}5" + description: "{{ image_name }}5_description" + placement_type: SOFT + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - "{{ category_ext_id1 }}" + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - "{{ category_ext_id1 }}" + wait: true + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id1 }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id1 }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "ACTIVE" + - result.response.name == "{{ image_name }}5" + - result.response.description == "{{ image_name }}5_description" + fail_msg: Update image replacement policy failed + success_msg: Update image replacement policy passed + +# ########################################################################################### + +- name: Check for idempotency by updating the same policy + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[1] }}" + name: "{{ image_name }}5" + description: "{{ image_name }}5_description" + placement_type: SOFT + image_entity_filter: + type: CATEGORIES_MATCH_ALL + category_ext_ids: + - "{{ category_ext_id1 }}" + cluster_entity_filter: + type: CATEGORIES_MATCH_ANY + category_ext_ids: + - "{{ category_ext_id1 }}" + wait: true + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + - result.ext_id == "{{ todelete[1] }}" + fail_msg: Check for idempotency by updating the same policy failed + success_msg: Check for idempotency by updating the same policy passed + +# ########################################################################################### + +- name: Update enforcement state in an existing policy + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[1] }}" + enforcement_state: SUSPENDED + wait: true + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.enforcement_state == "SUSPENDED" + fail_msg: Update enforcement state in an existing policy failed + success_msg: Update enforcement state in an existing policy passed + +########################################################################################### + +- name: Check for idempotency by updating enforcement state to the same value + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[1] }}" + enforcement_state: SUSPENDED + wait: true + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + - result.ext_id == "{{ todelete[1] }}" + fail_msg: Check for idempotency by updating enforcement state to the same value failed + success_msg: Check for idempotency by updating enforcement state to the same value passed + +########################################################################################### + +- name: Suspend an existing policy - check mode is enabled + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[0] }}" + enforcement_state: SUSPENDED + wait: true + register: result + ignore_errors: true + check_mode: true + +- name: Suspend Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "SUSPENDED" + fail_msg: Suspend an existing policy check mode failed + success_msg: Suspend an existing policy check mode passed + +# ########################################################################################### + +- name: Suspend an existing policy + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[0] }}" + enforcement_state: SUSPENDED + wait: true + register: result + ignore_errors: true + +- name: Suspend Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "SUSPENDED" + + fail_msg: Suspend an existing policy failed + success_msg: Suspend an existing policy passed + +########################################################################################### + +- name: Resume an existing policy + + ntnx_image_placement_policies_v2: + state: present + ext_id: "{{ todelete[0] }}" + enforcement_state: ACTIVE + wait: true + register: result + ignore_errors: true + +- name: Resume Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.cluster_entity_filter.type == "CATEGORIES_MATCH_ANY" + - result.response.image_entity_filter.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.image_entity_filter.type == "CATEGORIES_MATCH_ALL" + - result.response.enforcement_state == "ACTIVE" + fail_msg: Resume an existing policy failed + success_msg: Resume an existing policy passed + +########################################################################################### + +- name: Resume policy that does not exist + ntnx_image_placement_policies_v2: + state: present + ext_id: 12345678-1234-1234-1234-123456789123 + enforcement_state: ACTIVE + wait: true + register: result + ignore_errors: true + +- name: Resume Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + - result.status == 404 + - result.changed == false + fail_msg: Resume policy that does not exist failed + success_msg: Resume policy that does not exist passed + +############################################################################### + +- name: Get an existing image placement policy + ntnx_image_placement_policies_info_v2: + ext_id: "{{ todelete[0] }}" + register: result + ignore_errors: true + +- name: Get Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ todelete[0] }}" + fail_msg: Get an existing image placement policy failed + success_msg: Get an existing image placement policy passed + +################################################################################ + +- name: Retrieve all policies + ntnx_image_placement_policies_info_v2: + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 0 + fail_msg: Retrieve all policies failed + success_msg: Retrieve all policies passed + +################################################################################ + +- name: Retrieve all policies and filter for name + ntnx_image_placement_policies_info_v2: + filter: name eq '{{ image_name }}5' + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response[0].name == "{{ image_name }}5" + fail_msg: Retrieve all policies and filter for name failed + success_msg: Retrieve all policies and filter for name passed + +################################################################################ + +- name: Retrieve all policies with limit 1 + ntnx_image_placement_policies_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: Retrieve all policies with limit 1 failed + success_msg: Retrieve all policies with limit 1 passed + +################################################################################ + +- name: Retrieve all policies with limit 2 + ntnx_image_placement_policies_info_v2: + limit: 2 + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 2 + fail_msg: Retrieve all policies with limit 2 failed + success_msg: Retrieve all policies with limit 2 passed + +################################################################################ + +- name: Delete policies + ntnx_image_placement_policies_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.response is defined + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + fail_msg: Delete policies failed + success_msg: Delete policies passed + loop: "{{ result.results }}" diff --git a/tests/integration/targets/ntnx_image_placement_policy_v2/tasks/main.yml b/tests/integration/targets/ntnx_image_placement_policy_v2/tasks/main.yml new file mode 100644 index 000000000..9872db806 --- /dev/null +++ b/tests/integration/targets/ntnx_image_placement_policy_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module_defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import image_policy_operations.yml + ansible.builtin.import_tasks: image_policy_operations.yml diff --git a/tests/integration/targets/ntnx_images/tasks/create.yml b/tests/integration/targets/ntnx_images/tasks/create.yml index 207f1bd8b..2e843bfe0 100644 --- a/tests/integration/targets/ntnx_images/tasks/create.yml +++ b/tests/integration/targets/ntnx_images/tasks/create.yml @@ -1,6 +1,7 @@ --- -- debug: - msg: start ntnx_image create tests +- name: Start ntnx_image create tests + ansible.builtin.debug: + msg: Start ntnx_image create tests - name: Create image with minimal spec and source uri ntnx_images: @@ -12,7 +13,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -22,7 +23,8 @@ fail_msg: "Unable to create image using source_uri" success_msg: "Image with given source_uri created successfully" -- set_fact: +- name: Adding image to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' ################################################################################## @@ -51,7 +53,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -65,13 +67,14 @@ fail_msg: "Unable to create image and upload local image with all specification" success_msg: "Image with given local path created successfully" -- set_fact: +- name: Adding image to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' ############################################################################################### - name: Verify check mode for ntnx_image - check_mode: yes + check_mode: true ntnx_images: state: present name: integration-test-image @@ -95,7 +98,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -122,10 +125,10 @@ name: integration-test-image-with-uri wait: true register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == True @@ -135,13 +138,14 @@ fail_msg: "Image create didn't failed for invalid source uri" ################################################################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_images: state: absent image_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_images/tasks/delete.yml b/tests/integration/targets/ntnx_images/tasks/delete.yml index 3c91138e4..d234f30d6 100644 --- a/tests/integration/targets/ntnx_images/tasks/delete.yml +++ b/tests/integration/targets/ntnx_images/tasks/delete.yml @@ -1,6 +1,7 @@ --- -- debug: - msg: start ntnx_image delete tests +- name: Start ntnx_images delete tests + ansible.builtin.debug: + msg: Start ntnx_image delete tests - name: Create image with minimal spec and source uri for delete tests ntnx_images: @@ -12,7 +13,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -27,7 +28,7 @@ register: result - name: Delete Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -42,10 +43,10 @@ state: absent image_uuid: abcd register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == True diff --git a/tests/integration/targets/ntnx_images/tasks/main.yml b/tests/integration/targets/ntnx_images/tasks/main.yml index a2c7a07b0..65a6e2952 100644 --- a/tests/integration/targets/ntnx_images/tasks/main.yml +++ b/tests/integration/targets/ntnx_images/tasks/main.yml @@ -1,11 +1,15 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "update.yml" - - import_tasks: "delete.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" + - name: Import update.yml + ansible.builtin.import_tasks: "update.yml" + - name: Import delete.yml + ansible.builtin.import_tasks: "delete.yml" diff --git a/tests/integration/targets/ntnx_images/tasks/update.yml b/tests/integration/targets/ntnx_images/tasks/update.yml index 80f4353d3..04f8aeb69 100644 --- a/tests/integration/targets/ntnx_images/tasks/update.yml +++ b/tests/integration/targets/ntnx_images/tasks/update.yml @@ -1,6 +1,7 @@ --- -- debug: - msg: start ntnx_image create tests +- name: Start ntnx_images update tests + ansible.builtin.debug: + msg: Start ntnx_image update tests - name: Create image for update tests ntnx_images: @@ -27,19 +28,20 @@ register: setup_image - name: Creation Status - assert: + ansible.builtin.assert: that: - setup_image.response is defined - setup_image.response.status.state == 'COMPLETE' fail_msg: "Unable to create image using source_uri for update tests" success_msg: "Image with given source_uri created successfully for update tests" -- set_fact: +- name: Adding image to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ setup_image["response"]["metadata"]["uuid"] ] }}' ############################################# UPDATE TESTS ######################################## -- name: check idempotency +- name: Check idempotency ntnx_images: state: present image_uuid: "{{ setup_image.image_uuid }}" @@ -53,8 +55,8 @@ image_type: "DISK_IMAGE" register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -79,7 +81,7 @@ register: result - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -101,9 +103,8 @@ # remove_categories: true # register: result - # - name: Update Status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.response.status.state == 'COMPLETE' @@ -113,13 +114,14 @@ ########################################### Cleanup ################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_images: state: absent image_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_images_info/tasks/info.yml b/tests/integration/targets/ntnx_images_info/tasks/info.yml index 382319a75..62c22f55a 100644 --- a/tests/integration/targets/ntnx_images_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_images_info/tasks/info.yml @@ -1,27 +1,28 @@ --- -- debug: - msg: "start ntnx_images_info tests" +- name: Start ntnx_images_info tests + ansible.builtin.debug: + msg: "Start ntnx_images_info tests" -- name: test getting all images +- name: Test getting all images ntnx_images_info: register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined fail_msg: "Unable to list all images" success_msg: "Images listed successfully" ################################################################ -- name: test getting particular image using filter +- name: Test getting particular image using filter ntnx_images_info: filter: name: "{{ disk_image.centos }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.response.entities[0].status.name == '{{ disk_image.centos }}' @@ -30,19 +31,19 @@ ################################################################ -- name: getting image info for test +- name: Getting image info for test ntnx_images_info: filter: name: "{{ disk_image.centos }}" register: result -- name: test getting particular image using uuid +- name: Test getting particular image using uuid ntnx_images_info: image_uuid: '{{ result.response.entities[0].metadata.uuid }}' register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.response.status.name == '{{ disk_image.centos }}' diff --git a/tests/integration/targets/ntnx_images_info/tasks/main.yml b/tests/integration/targets/ntnx_images_info/tasks/main.yml index 3364b30c6..74e773fce 100644 --- a/tests/integration/targets/ntnx_images_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_images_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import info.yml + ansible.builtin.import_tasks: "info.yml" diff --git a/tests/integration/targets/ntnx_images_v2/aliases b/tests/integration/targets/ntnx_images_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_images_v2/meta/main.yml b/tests/integration/targets/ntnx_images_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_images_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_images_v2/tasks/images_operations.yml b/tests/integration/targets/ntnx_images_v2/tasks/images_operations.yml new file mode 100644 index 000000000..b6effde3e --- /dev/null +++ b/tests/integration/targets/ntnx_images_v2/tasks/images_operations.yml @@ -0,0 +1,601 @@ +--- +- name: Start testing ntnx_images_v2 + ansible.builtin.debug: + msg: Start testing ntnx_images_v2 + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set VM and images names + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + iso_image_name: "{{ random_name }}_iso_image_test" + disk_image_name: "{{ random_name }}_disk_image_test" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create VM to test the images + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + disks: + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: SCSI + index: 2 + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + fail_msg: "Unable to Create VM " + success_msg: "VM is created successfully " + +- name: Set vm_uuid and disk_uuid + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + disk_uuid: '{{ result["response"]["disks"][0]["ext_id"] }}' + +################################################################################## + +- name: List all categories and define variables + ntnx_categories_info_v2: + register: result + ignore_errors: true + +- name: Status of all categories + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + fail_msg: "Unable to list categories " + success_msg: categories listed successfully + +- name: Set variables + ansible.builtin.set_fact: + category_ext_id: '{{ result["response"][0]["ext_id"] }}' + category_ext_id1: '{{ result["response"][1]["ext_id"] }}' + +################################################################################## + +- name: Create ISO_IMAGE using valid source url - check mode is enabled + ntnx_images_v2: + state: present + type: ISO_IMAGE + name: "{{ iso_image_name }}" + description: image created from integration test + checksum: + sha1: + hex_digest: "{{ iso_image.checksum }}" + source: + url_source: + url: "{{ iso_image.url }}" + should_allow_insecure_url: true + cluster_location_ext_ids: "{{ cluster.uuid }}" + category_ext_ids: "{{ category_ext_id }}" + check_mode: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.checksum.hex_digest == "{{ iso_image.checksum }}" + - result.response.name == "{{ iso_image_name }}" + - result.response.source.url == "{{ iso_image.url }}" + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + fail_msg: "Unable to Create ISO_IMAGE with valid source url with check mode enabled " + success_msg: "ISO_IMAGE with valid source url is created successfully with check mode enabled " + +################################################################################## + +- name: Create DISK_IMAGE using valid source url - check mode is enabled + ntnx_images_v2: + state: present + type: DISK_IMAGE + name: "{{ disk_image_name }}" + description: image created from integration test + checksum: + sha1: + hex_digest: "{{ disk_image.checksum }}" + source: + url_source: + url: "{{ disk_image.url }}" + should_allow_insecure_url: true + cluster_location_ext_ids: "{{ cluster.uuid }}" + category_ext_ids: "{{ category_ext_id }}" + check_mode: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.checksum.hex_digest == "{{ disk_image.checksum }}" + - result.response.name == "{{ disk_image_name }}" + - result.response.source.url == "{{ disk_image.url }}" + - result.response.cluster_location_ext_ids[0] == "{{ cluster.uuid }}" + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + fail_msg: "Unable to Create DISK_IMAGE with valid source url with check mode enabled " + success_msg: "DISK_IMAGE with valid source url is created successfully with check mode enabled " + +################################################################################## + +- name: Create ISO_IMAGE using valid source url + ntnx_images_v2: + state: present + type: ISO_IMAGE + name: "{{ iso_image_name }}" + description: image created from integration test + checksum: + sha1: + hex_digest: "{{ iso_image.checksum }}" + source: + url_source: + url: "{{ iso_image.url }}" + should_allow_insecure_url: true + cluster_location_ext_ids: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.checksum.hex_digest == "{{ iso_image.checksum }}" + - result.response.name == "{{ iso_image_name }}" + - result.response.source.url == "{{ iso_image.url }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + - result.response.cluster_location_ext_ids[0] == "{{ cluster.uuid }}" + fail_msg: "Unable to Create ISO_IMAGE with valid source url " + success_msg: "ISO_IMAGE with valid source url is created successfully " + +- name: Add to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' + +################################################################################## + +- name: Create DISK_IMAGE using valid source url + ntnx_images_v2: + state: present + type: DISK_IMAGE + name: "{{ disk_image_name }}" + description: image created from integration test + checksum: + sha1: + hex_digest: "{{ disk_image.checksum }}" + source: + url_source: + url: "{{ disk_image.url }}" + should_allow_insecure_url: true + category_ext_ids: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.checksum.hex_digest == "{{ disk_image.checksum }}" + - result.response.name == "{{ disk_image_name }}" + - result.response.source.url == "{{ disk_image.url }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + fail_msg: "Unable to Create DISK_IMAGE with valid source url " + success_msg: "DISK_IMAGE with valid source url is created successfully " + +- name: Add the image to the list of images to delete + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' + +################################################################################## + +- name: Create ISO_IMAGE using invalid source url + ntnx_images_v2: + state: present + type: ISO_IMAGE + name: "{{ iso_image_name }}" + description: image created from integration test + checksum: + sha1: + hex_digest: "{{ iso_image.checksum }}" + source: + url_source: + url: http://google.com + should_allow_insecure_url: true + category_ext_ids: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == true + - result.response.status == "FAILED" + fail_msg: "Create ISO_IMAGE using invalid source url failed " + success_msg: "Create ISO_IMAGE using invalid source url passed " + +################################################################################# + +- name: Create DISK_IMAGE using valid disk source + ntnx_images_v2: + state: present + type: DISK_IMAGE + name: "{{ disk_image_name }}" + description: image created from integration test + source: + vm_disk_source: + ext_id: "{{ disk_uuid }}" + category_ext_ids: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ disk_image_name }}" + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + fail_msg: "Unable to Create DISK_IMAGE with valid disk source " + success_msg: "DISK_IMAGE with valid disk source is created successfully " + +- name: Add to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' + +################################################################################# + +- name: Retrieve an existing ISO image + ntnx_images_info_v2: + ext_id: "{{ todelete[0] }}" + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ todelete[0] }}" + - result.response.type == "ISO_IMAGE" + - result.response.name == "{{ iso_image_name }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.response.cluster_location_ext_ids[0] == "{{ cluster.uuid }}" + fail_msg: "Unable to retrieve ISO image " + success_msg: "ISO image retrieved successfully " + +################################################################################### + +- name: Retrieve image that does not exist + ntnx_images_info_v2: + ext_id: 12345678-1234-1234-1324-123456789123 + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == true + - result.status == 404 + fail_msg: Retrieve image that does not exist failed + success_msg: Retrieve image that does not exist passed + +################################################################################# + +- name: Retrieve all images with limit 1 + ntnx_images_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: "Retrieve all images with limit 1 failed " + success_msg: "Retrieve all images with limit 1 passed " + +################################################################################# + +- name: Retrieve all images with limit 2 + ntnx_images_info_v2: + limit: 2 + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 2 + fail_msg: "Retrieve all images with limit 2 failed " + success_msg: "Retrieve all images with limit 2 passed " + +################################################################################# + +- name: Retrieve all images and filter for disk image + ntnx_images_info_v2: + filter: name eq '{{ disk_image_name }}' + register: result + ignore_errors: true + +- name: Reading Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response[0].name == "{{ disk_image_name }}" + - result.response[0].type == "DISK_IMAGE" + fail_msg: "Unable to retrieve DISK image " + success_msg: "DISK image retrieved successfully " + +################################################################################# + +- name: Update DISK_IMAGE by changing its name and type - check mode is enabled + ntnx_images_v2: + state: present + ext_id: "{{ todelete[0] }}" + type: ISO_IMAGE + name: "{{ disk_image_name }}_updated" + cluster_location_ext_ids: "{{ cluster.uuid }}" + category_ext_ids: "{{ category_ext_id }}" + check_mode: true + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{ disk_image_name }}_updated" + - result.response.type == "ISO_IMAGE" + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.ext_id == "{{ todelete[0] }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + fail_msg: "Unable to update DISK_IMAGE by changing its name and type with check mode enabled " + success_msg: "DISK_IMAGE updated successfully by changing its name and type with check mode enabled " + +################################################################################## + +- name: Update ISO_IMAGE by changing its name and type - check mode is enabled + ntnx_images_v2: + state: present + ext_id: "{{ todelete[0] }}" + type: DISK_IMAGE + name: "{{ disk_image_name }}" + cluster_location_ext_ids: "{{ cluster.uuid }}" + category_ext_ids: "{{ category_ext_id }}" + check_mode: true + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.ext_id == "{{ todelete[0] }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + fail_msg: "Unable to update ISO_IMAGE by changing its name and type with check mode enabled " + success_msg: "ISO_IMAGE updated successfully by changing its name and type with check mode enabled " + +################################################################################## + +- name: Update DISK_IMAGE by changing its name and type + ntnx_images_v2: + state: present + ext_id: "{{ todelete[0] }}" + type: ISO_IMAGE + name: "{{ iso_image_name }}_updated" + category_ext_ids: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ iso_image_name }}_updated" + - result.response.type == 'ISO_IMAGE' + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.ext_id == "{{ todelete[0] }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + fail_msg: "Unable to update DISK_IMAGE by changing its name and type " + success_msg: "DISK_IMAGE updated successfully by changing its name and type " + +################################################################################## + +- name: Update ISO_IMAGE by changing its name and type + ntnx_images_v2: + state: present + ext_id: "{{ todelete[0] }}" + type: DISK_IMAGE + name: "{{ disk_image_name }}" + category_ext_ids: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ disk_image_name }}" + - result.response.type == 'DISK_IMAGE' + - result.response.category_ext_ids[0] == "{{ category_ext_id }}" + - result.response.ext_id == "{{ todelete[0] }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + fail_msg: "Unable to update ISO_IMAGE by changing its name and type " + success_msg: "ISO_IMAGE updated successfully by changing its name and type " + +################################################################################# + +- name: Update ISO_IMAGE by changing category + ntnx_images_v2: + state: present + ext_id: "{{ todelete[0] }}" + category_ext_ids: "{{ category_ext_id1 }}" + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ disk_image_name }}" + - result.response.type == 'DISK_IMAGE' + - result.response.category_ext_ids[0] == "{{ category_ext_id1 }}" + - result.response.ext_id == "{{ todelete[0] }}" + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + fail_msg: "Unable to update ISO_IMAGE by changing its name and type " + success_msg: "ISO_IMAGE updated successfully by changing its name and type " + +################################################################################# + +- name: Update Image that does not exist + ntnx_images_v2: + state: present + ext_id: 12345678-1234-1234-1324-123456789123 + type: DISK_IMAGE + name: "{{ disk_image_name }}" + category_ext_ids: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == true + fail_msg: Update Image that does not exist failed + success_msg: Update Image that does not exist passed + +################################################################################## + +- name: Delete images + ntnx_images_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.response is defined + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete images " + success_msg: "Images deleted successfully " + loop: "{{ result.results }}" + +################################################################################# + +- name: Delete Image that does not exist + ntnx_images_v2: + state: absent + ext_id: 12345678-1234-1234-1324-123456789123 + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == true + - result.status == 404 + - result.changed == false + fail_msg: Delete Image that does not exist failed + success_msg: Delete Image that does not exist passed + +################################################################################## + +- name: Delete the VM + ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_images_v2/tasks/main.yml b/tests/integration/targets/ntnx_images_v2/tasks/main.yml new file mode 100644 index 000000000..1b1392132 --- /dev/null +++ b/tests/integration/targets/ntnx_images_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module_defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import images_operations.yml + ansible.builtin.import_tasks: images_operations.yml diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/meta/main.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/meta/main.yml index e4f447d3a..e0985ec29 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/meta/main.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_env diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml index bfbc770df..5bbde0ac4 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml @@ -1,35 +1,37 @@ --- -- debug: +- name: Start testing ntnx_karbon_clusters and ntnx_karbon_clusters_info + ansible.builtin.debug: msg: Start testing ntnx_karbon_clusters and ntnx_karbon_clusters_info -- set_fact: - karbon_name: "test-module21" +- name: Set karbon_name + ansible.builtin.set_fact: + karbon_name: test-module21 ############################# -- name: Verify check mode for PROD karbon cluster +- name: Verify check mode for PROD karbon cluster ntnx_karbon_clusters: cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + uuid: "{{ cluster.uuid }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - name: "{{network.dhcp.name}}" + name: "{{ network.dhcp.name }}" cluster_type: PROD cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Flannel storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Delete - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: ext4 - flash_mode: False + flash_mode: false control_plane_virtual_ip: 10.101.0.1 custom_node_configs: etcd: @@ -52,110 +54,110 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - - result.response.name == "{{karbon_name}}" - - result.response.cni_config.node_cidr_mask_size == {{cni.node_cidr_mask_size}} - - result.response.cni_config.pod_ipv4_cidr == "{{cni.pod_ipv4_cidr}}" - - result.response.cni_config.service_ipv4_cidr == "{{cni.service_ipv4_cidr}}" + - result.response.name == "{{ karbon_name }}" + - result.response.cni_config.node_cidr_mask_size == {{ cni.node_cidr_mask_size }} + - result.response.cni_config.pod_ipv4_cidr == "{{ cni.pod_ipv4_cidr }}" + - result.response.cni_config.service_ipv4_cidr == "{{ cni.service_ipv4_cidr }}" success_msg: "Success: check mode spec returned as expected" - fail_msg: "Check mode for PROD cluster failed" + fail_msg: Check mode for PROD cluster failed ############################# -- name: Verify check mode for ntnx_karbon_clusters +- name: Verify check mode for ntnx_karbon_clusters ntnx_karbon_clusters: cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + uuid: "{{ cluster.uuid }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - name: "{{network.dhcp.name}}" + name: "{{ network.dhcp.name }}" cluster_type: DEV cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Flannel storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Delete - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: ext4 - flash_mode: False + flash_mode: false register: result check_mode: true ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - - result.response.name == "{{karbon_name}}" - - result.response.cni_config.node_cidr_mask_size == {{cni.node_cidr_mask_size}} - - result.response.cni_config.pod_ipv4_cidr == "{{cni.pod_ipv4_cidr}}" - - result.response.cni_config.service_ipv4_cidr == "{{cni.service_ipv4_cidr}}" + - result.response.name == "{{ karbon_name }}" + - result.response.cni_config.node_cidr_mask_size == {{ cni.node_cidr_mask_size }} + - result.response.cni_config.pod_ipv4_cidr == "{{ cni.pod_ipv4_cidr }}" + - result.response.cni_config.service_ipv4_cidr == "{{ cni.service_ipv4_cidr }}" success_msg: "Success: check mode spec returned as expected" - fail_msg: "Check mode for ntnx_karbon_clusters failed" + fail_msg: Check mode for ntnx_karbon_clusters failed ############################ -- name: create DEV cluster with Flannel network provider +- name: Create DEV cluster with Flannel network provider ntnx_karbon_clusters: cluster: - uuid: "{{cluster.uuid}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + uuid: "{{ cluster.uuid }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - name: "{{network.dhcp.name}}" + name: "{{ network.dhcp.name }}" cluster_type: DEV cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Flannel storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Delete - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: ext4 - flash_mode: False + flash_mode: false register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - result.cluster_uuid is defined - - result.response.name == "{{karbon_name}}" - - result.response.cni_config.node_cidr_mask_size == {{cni.node_cidr_mask_size}} - - result.response.cni_config.pod_ipv4_cidr == "{{cni.pod_ipv4_cidr}}" - - result.response.cni_config.service_ipv4_cidr == "{{cni.service_ipv4_cidr}}" + - result.response.name == "{{ karbon_name }}" + - result.response.cni_config.node_cidr_mask_size == {{ cni.node_cidr_mask_size }} + - result.response.cni_config.pod_ipv4_cidr == "{{ cni.pod_ipv4_cidr }}" + - result.response.cni_config.service_ipv4_cidr == "{{ cni.service_ipv4_cidr }}" fail_msg: " Fail: unable to create DEV cluster with Flannel network provider" success_msg: " Pass: create DEV cluster with Flannel network provider successfully " ############################# -- name: delete dev cluster with Flannel network provider +- name: Delete dev cluster with Flannel network provider ntnx_karbon_clusters: state: absent - name: "{{result.response.name}}" + name: "{{ result.response.name }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -164,19 +166,19 @@ fail_msg: " Fail: unable to delete dev cluster with Flannel network provider" success_msg: " Pass: delete dev cluster with Flannel network provider finished successfully" ############################# -- name: create DEV cluster with Calico network provider +- name: Create DEV cluster with Calico network provider ntnx_karbon_clusters: cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + name: "{{ cluster.name }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - uuid: "{{network.dhcp.uuid}}" + uuid: "{{ network.dhcp.uuid }}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Calico custom_node_configs: etcd: @@ -195,59 +197,59 @@ memory_gb: 8 disk_gb: 120 storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Retain - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: xfs flash_mode: true register: karbon_cluster ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - karbon_cluster.response is defined - karbon_cluster.changed == true - karbon_cluster.failed == false - karbon_cluster.cluster_uuid is defined - - karbon_cluster.response.name == "{{karbon_name}}" - - karbon_cluster.response.cni_config.node_cidr_mask_size == {{cni.node_cidr_mask_size}} - - karbon_cluster.response.cni_config.calico_config.ip_pool_configs[0].cidr == "{{cni.pod_ipv4_cidr}}" - - karbon_cluster.response.cni_config.service_ipv4_cidr == "{{cni.service_ipv4_cidr}}" - - karbon_cluster.response.cni_config.pod_ipv4_cidr == "{{cni.pod_ipv4_cidr}}" + - karbon_cluster.response.name == "{{ karbon_name }}" + - karbon_cluster.response.cni_config.node_cidr_mask_size == {{ cni.node_cidr_mask_size }} + - karbon_cluster.response.cni_config.calico_config.ip_pool_configs[0].cidr == "{{ cni.pod_ipv4_cidr }}" + - karbon_cluster.response.cni_config.service_ipv4_cidr == "{{ cni.service_ipv4_cidr }}" + - karbon_cluster.response.cni_config.pod_ipv4_cidr == "{{ cni.pod_ipv4_cidr }}" fail_msg: " Fail: unable to create DEV cluster with Calico network provider " success_msg: " Pass: create DEV cluster with Calico network provider finished successfully" ############################# -- name: test getting dev cluster using name +- name: Test getting dev cluster using name ntnx_karbon_clusters_info: - cluster_name: "{{karbon_cluster.response.name}}" + cluster_name: "{{ karbon_cluster.response.name }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - - result.response.name == "{{karbon_cluster.response.name}}" + - result.response.name == "{{ karbon_cluster.response.name }}" fail_msg: " Fail: Unable to get particular Cluster " success_msg: " Pass: Cluster info obtained successfully " #################################################### -- name: test getting cluster with ssh config using cluster name +- name: Test getting cluster with ssh config using cluster name ntnx_karbon_clusters_info: - cluster_name: "{{karbon_cluster.response.name}}" + cluster_name: "{{ karbon_cluster.response.name }}" fetch_ssh_credentials: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -256,15 +258,15 @@ fail_msg: " Fail: Unable to get particular Cluster and it's ssh config " success_msg: " Pass: Cluster info obtained successfully with it's ssh config " #################################################### -- name: test getting cluster with kubeconfig config using cluster name +- name: Test getting cluster with kubeconfig config using cluster name ntnx_karbon_clusters_info: - cluster_name: "{{karbon_cluster.response.name}}" + cluster_name: "{{ karbon_cluster.response.name }}" fetch_kubeconfig: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -274,26 +276,29 @@ success_msg: " Pass: Cluster info obtained successfully with it's kube config " ############################# - name: Generate random node_pool name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=9,upper = false)[0]}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=9, upper=false)[0] }}" -- set_fact: - suffix_name: "ansible" +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: ansible -- set_fact: - node1_name: "{{random_name}}{{suffix_name}}1" - node2_name: "{{random_name}}{{suffix_name}}2" - node3_name: "{{random_name}}{{suffix_name}}3" +- name: Set node names + ansible.builtin.set_fact: + node1_name: "{{ random_name }}{{ suffix_name }}1" + node2_name: "{{ random_name }}{{ suffix_name }}2" + node3_name: "{{ random_name }}{{ suffix_name }}3" -- debug: +- name: Start testing ntnx_karbon_clusters_node_pools + ansible.builtin.debug: msg: Start testing ntnx_karbon_clusters_node_pools - name: Create node pool with subnet uuid ntnx_karbon_clusters_node_pools: node_subnet: - uuid: "{{network.dhcp.uuid}}" - node_pool_name: "{{node1_name}}" - cluster_name: "{{karbon_name}}" + uuid: "{{ network.dhcp.uuid }}" + node_pool_name: "{{ node1_name }}" + cluster_name: "{{ karbon_name }}" pool_config: num_instances: 2 cpu: 4 @@ -303,21 +308,21 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - - result.cluster_name == "{{karbon_name}}" + - result.cluster_name == "{{ karbon_name }}" - result.response is defined - result.node_pool_name is defined - - result.node_pool_name=="{{node1_name}}" + - result.node_pool_name=="{{ node1_name }}" fail_msg: "Fail: Unable to Create node pool " success_msg: "Passed: Create node pool finished successfully " ################################# -- name: try to update node pool config with same values +- name: Try to update node pool config with same values ntnx_karbon_clusters_node_pools: - node_pool_name: "{{node1_name}}" - cluster_name: "{{karbon_name}}" + node_pool_name: "{{ node1_name }}" + cluster_name: "{{ karbon_name }}" pool_config: num_instances: 2 cpu: 4 @@ -327,7 +332,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -335,95 +340,95 @@ fail_msg: "Fail: idempotecy check fail " success_msg: "Passed: Returned as expected " ################################# -- name: try to update node pool config with wrong labels +- name: Try to update node pool config with wrong labels ntnx_karbon_clusters_node_pools: - node_pool_name: "{{node1_name}}" - cluster_name: "{{karbon_name}}" + node_pool_name: "{{ node1_name }}" + cluster_name: "{{ karbon_name }}" pool_config: num_instances: 2 cpu: 4 memory_gb: 8 # for etcd min 8 disk_gb: 120 add_labels: - propert.-+]y5: "string" - propert5: "string" - property4: "string+-.3-@" + propert.-+]y5: string + propert5: string + property4: string+-.3-@ register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - result.failed == true fail_msg: "Fail: node pool updated with wrong labels " success_msg: "Passed: Returned as expected " ################################# -- name: update pool by increasing cpu,memory_gb,num_instances and try to add wrong labels +- name: Update pool by increasing cpu,memory_gb,num_instances and try to add wrong labels ntnx_karbon_clusters_node_pools: - wait: True - node_pool_name: "{{node1_name}}" - cluster_name: "{{karbon_name}}" + wait: true + node_pool_name: "{{ node1_name }}" + cluster_name: "{{ karbon_name }}" pool_config: - cpu: 6 - memory_gb: 10 # for etcd min 8 - disk_gb: 150 - num_instances: 4 + cpu: 6 + memory_gb: 10 # for etcd min 8 + disk_gb: 150 + num_instances: 4 add_labels: - property1: "test-property1" - property2: "test-property2" - property3: "test-property3" - propert.-+]y5: "string" + property1: test-property1 + property2: test-property2 + property3: test-property3 + propert.-+]y5: string register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - - result.cluster_name == "{{karbon_name}}" + - result.cluster_name == "{{ karbon_name }}" - result.response is defined - result.node_pool_name is defined - - result.node_pool_name=="{{node1_name}}" + - result.node_pool_name=="{{ node1_name }}" - result.skipped == true fail_msg: "Fail: Unable to update pool by increasing cpu,memory_gb,num_instances and try to add wrong labels " success_msg: "Passed: update pool by increasing cpu,memory_gb,num_instances and try to add wrong labels finished successfully " # ################################# -- name: update pool by add labels +- name: Update pool by add labels ntnx_karbon_clusters_node_pools: - wait: True - node_pool_name: "{{node1_name}}" - cluster_name: "{{karbon_name}}" + wait: true + node_pool_name: "{{ node1_name }}" + cluster_name: "{{ karbon_name }}" add_labels: - property1: "test-property1" - property2: "test-property2" - property3: "test-property3" + property1: test-property1 + property2: test-property2 + property3: test-property3 register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - - result.cluster_name == "{{karbon_name}}" + - result.cluster_name == "{{ karbon_name }}" - result.response is defined - result.node_pool_name is defined - - result.node_pool_name=="{{node1_name}}" + - result.node_pool_name=="{{ node1_name }}" fail_msg: "Fail: Unable to update pool by add labels " success_msg: "Passed: update pool by add labels finished successfully " ################################# -- name: update pool by decreasing cpu,memory_gb,num_instances and add remove labels +- name: Update pool by decreasing cpu,memory_gb,num_instances and add remove labels ntnx_karbon_clusters_node_pools: - wait: True - node_pool_name: "{{node1_name}}" - cluster_name: "{{karbon_name}}" + wait: true + node_pool_name: "{{ node1_name }}" + cluster_name: "{{ karbon_name }}" pool_config: - cpu: 5 - memory_gb: 9 # for etcd min 8 - disk_gb: 140 - num_instances: 3 + cpu: 5 + memory_gb: 9 # for etcd min 8 + disk_gb: 140 + num_instances: 3 remove_labels: - property2 - property3 @@ -431,27 +436,27 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - - result.cluster_name == "{{karbon_name}}" + - result.cluster_name == "{{ karbon_name }}" - result.response is defined - result.node_pool_name is defined - - result.node_pool_name=="{{node1_name}}" + - result.node_pool_name=="{{ node1_name }}" fail_msg: "Fail: Unable to update pool by decreasing cpu,memory_gb,num_instances and add remove labels " success_msg: "Passed: update pool by decreasing cpu,memory_gb,num_instances and add remove labels finished successfully " ################################ -- name: delete pool +- name: Delete pool ntnx_karbon_clusters_node_pools: state: absent - node_pool_name: "{{node1_name}}" - cluster_name: "{{karbon_name}}" + node_pool_name: "{{ node1_name }}" + cluster_name: "{{ karbon_name }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'SUCCEEDED' @@ -464,34 +469,34 @@ - name: Create node pool with subnet name with default values ntnx_karbon_clusters_node_pools: node_subnet: - name: "{{network.dhcp.name}}" - node_pool_name: "{{node2_name}}" - cluster_name: "{{karbon_name}}" + name: "{{ network.dhcp.name }}" + node_pool_name: "{{ node2_name }}" + cluster_name: "{{ karbon_name }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - - result.cluster_name == "{{karbon_name}}" + - result.cluster_name == "{{ karbon_name }}" - result.response is defined - result.node_pool_name is defined - - result.node_pool_name=="{{node2_name}}" + - result.node_pool_name=="{{ node2_name }}" fail_msg: "Fail: Unable to Create node pool with subnet name with default values " success_msg: "Passed: Create node pool with subnet name with default values finished successfully " ################################# -- name: delete pool +- name: Delete pool ntnx_karbon_clusters_node_pools: state: absent - node_pool_name: "{{node2_name}}" - cluster_name: "{{karbon_name}}" + node_pool_name: "{{ node2_name }}" + cluster_name: "{{ karbon_name }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'SUCCEEDED' @@ -500,15 +505,15 @@ fail_msg: " Unable to delete node pool " success_msg: " node pool has been deleted successfully " ################################# -- name: delete dev cluster +- name: Delete dev cluster ntnx_karbon_clusters: state: absent - name: "{{karbon_cluster.response.name}}" + name: "{{ karbon_cluster.response.name }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml index f7d047c91..f282456e1 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml @@ -1,10 +1,13 @@ --- -- module_defaults: +- name: Ntnx_karbon_clusters_and_info integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "crud.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import Tasks + ansible.builtin.import_tasks: crud.yml + - name: Import Tasks + ansible.builtin.import_tasks: negative_scenarios.yml diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml index 3cc7c5541..f39574220 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml @@ -1,24 +1,26 @@ --- -- debug: +- name: Start negative test cases for ntnx_karbon_clusters + ansible.builtin.debug: msg: Start negative test cases for ntnx_karbon_clusters -- set_fact: - karbon_name: "test-module22" +- name: Set karbon_name + ansible.builtin.set_fact: + karbon_name: test-module22 ############################# -- name: create cluster with cpu less than minimum +- name: Create cluster with cpu less than minimum ntnx_karbon_clusters: cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + name: "{{ cluster.name }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - uuid: "{{network.dhcp.uuid}}" + uuid: "{{ network.dhcp.uuid }}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Calico custom_node_configs: etcd: @@ -27,19 +29,19 @@ memory_gb: 8 disk_gb: 120 storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Retain - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: xfs flash_mode: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response == {} - result.changed == false @@ -49,19 +51,19 @@ fail_msg: " Fail: cluster creaeted with cpu less than minimum" success_msg: " Pass: Retunred as expected" ############################# -- name: create cluster with memory_gb less than minimum +- name: Create cluster with memory_gb less than minimum ntnx_karbon_clusters: cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + name: "{{ cluster.name }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - uuid: "{{network.dhcp.uuid}}" + uuid: "{{ network.dhcp.uuid }}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Calico custom_node_configs: etcd: @@ -70,19 +72,19 @@ memory_gb: 7 disk_gb: 120 storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Retain - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: xfs flash_mode: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response == {} - result.changed == false @@ -92,19 +94,19 @@ fail_msg: " Fail: cluster creaeted with memory_gb size less than minimum" success_msg: " Pass: Retunred as expected" ############################# -- name: create cluster with wrong num_instances for master nodes +- name: Create cluster with wrong num_instances for master nodes ntnx_karbon_clusters: cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + name: "{{ cluster.name }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - uuid: "{{network.dhcp.uuid}}" + uuid: "{{ network.dhcp.uuid }}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Calico custom_node_configs: masters: @@ -113,19 +115,19 @@ memory_gb: 8 disk_gb: 120 storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Retain - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: xfs flash_mode: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response == {} - result.changed == false @@ -135,19 +137,19 @@ fail_msg: " Fail: cluster creaeted with wrong num_instances for master nodes" success_msg: " Pass: Retunred as expected" ############################# -- name: create cluster with wrong num_instances for etcd nodes +- name: Create cluster with wrong num_instances for etcd nodes ntnx_karbon_clusters: cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" + name: "{{ cluster.name }}" + name: "{{ karbon_name }}" + k8s_version: "{{ k8s_version }}" + host_os: "{{ host_os }}" node_subnet: - uuid: "{{network.dhcp.uuid}}" + uuid: "{{ network.dhcp.uuid }}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" + node_cidr_mask_size: "{{ cni.node_cidr_mask_size }}" + service_ipv4_cidr: "{{ cni.service_ipv4_cidr }}" + pod_ipv4_cidr: "{{ cni.pod_ipv4_cidr }}" network_provider: Calico custom_node_configs: etcd: @@ -156,19 +158,19 @@ memory_gb: 8 disk_gb: 120 storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True + nutanix_cluster_password: "{{ nutanix_cluster_password }}" + nutanix_cluster_username: "{{ nutanix_cluster_username }}" + default_storage_class: true name: test-storage-class reclaim_policy: Retain - storage_container: "{{storage_container.name}}" + storage_container: "{{ storage_container.name }}" file_system: xfs flash_mode: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response == {} - result.changed == false diff --git a/tests/integration/targets/ntnx_karbon_registries/aliases b/tests/integration/targets/ntnx_karbon_registries/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_karbon_registries/aliases +++ b/tests/integration/targets/ntnx_karbon_registries/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_karbon_registries/meta/main.yml b/tests/integration/targets/ntnx_karbon_registries/meta/main.yml index e4f447d3a..e0985ec29 100644 --- a/tests/integration/targets/ntnx_karbon_registries/meta/main.yml +++ b/tests/integration/targets/ntnx_karbon_registries/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_env diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml index cf88b97bf..a949f7abd 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml @@ -1,84 +1,84 @@ --- - -- debug: - msg: "start ntnx_karbon_registries tests" +- name: Start ntnx_karbon_registries tests + ansible.builtin.debug: + msg: start ntnx_karbon_registries tests - name: Generate random registry_name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" - -- set_fact: - registry_name: "{{random_name[0]}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" +- name: Set registry_name + ansible.builtin.set_fact: + registry_name: "{{ random_name[0] }}" -- name: create registry with check_mode +- name: Create registry with check_mode ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" + name: "{{ registry_name }}" + url: "{{ url }}" register: result - ignore_errors: True + ignore_errors: true check_mode: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{registry_name}}" - - result.response.url == "{{url}}" - success_msg: ' Success: returned response as expected ' - fail_msg: ' Fail: create registry with check_mode ' + - result.response.name == "{{ registry_name }}" + - result.response.url == "{{ url }}" + success_msg: " Success: returned response as expected " + fail_msg: " Fail: create registry with check_mode " ################################################################ -- name: create registry +- name: Create registry ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" + name: "{{ registry_name }}" + url: "{{ url }}" port: 5000 register: result - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.name == "{{registry_name}}" + - result.response.name == "{{ registry_name }}" - result.response.uuid is defined fail_msg: "Fail: Unable to create registery" success_msg: "Pass: create registry finished successfully" ################################################################ -- name: delete registry +- name: Delete registry ntnx_karbon_registries: - name: "{{registry_name}}" + name: "{{ registry_name }}" state: absent register: result - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.registry_name == "{{registry_name}}" + - result.response.registry_name == "{{ registry_name }}" fail_msg: "Fail: Unable to delete created registry" success_msg: "Pass: delete registry finished successfully" - ################################################################ +################################################################ -- name: create registry with username and password +- name: Create registry with username and password ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" + name: "{{ registry_name }}" + url: "{{ url }}" username: test password: test register: result - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -87,19 +87,19 @@ fail_msg: "Fail: unable to create registry with username and password" success_msg: "Pass: create registry with username and password finished successfully" ############################################################### -- name: delete registry +- name: Delete registry ntnx_karbon_registries: - name: "{{registry_name}}" + name: "{{ registry_name }}" state: absent register: result - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - "'{{registry_name}}' in result.response.registry_name" + - "'{{ registry_name }}' in result.response.registry_name" fail_msg: "Fail: unable to delete created registry with username and password" success_msg: "Pass: delete registry finished successfully" diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/main.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/main.yml index 709409a78..77d364131 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/main.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/main.yml @@ -1,10 +1,13 @@ --- -- module_defaults: +- name: Ntnx_karbon_registries integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "negativ_scenarios.yml" + - name: Import Tasks + ansible.builtin.import_tasks: create.yml + - name: Import Tasks + ansible.builtin.import_tasks: negativ_scenarios.yml diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml index cbe281e4d..50b72a692 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml @@ -1,17 +1,18 @@ --- -- debug: +- name: Negative test cases for ntnx_karbon_registries + ansible.builtin.debug: msg: Start negative test cases for ntnx_karbon_registries -- name: create registry with wrong port number +- name: Create registry with wrong port number ntnx_karbon_registries: name: test_regitry - url: "{{url}}" + url: "{{ url }}" port: 501 register: result - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == true diff --git a/tests/integration/targets/ntnx_karbon_registries_info/aliases b/tests/integration/targets/ntnx_karbon_registries_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_karbon_registries_info/aliases +++ b/tests/integration/targets/ntnx_karbon_registries_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_karbon_registries_info/meta/main.yml b/tests/integration/targets/ntnx_karbon_registries_info/meta/main.yml index e4f447d3a..e0985ec29 100644 --- a/tests/integration/targets/ntnx_karbon_registries_info/meta/main.yml +++ b/tests/integration/targets/ntnx_karbon_registries_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_env diff --git a/tests/integration/targets/ntnx_karbon_registries_info/tasks/info.yml b/tests/integration/targets/ntnx_karbon_registries_info/tasks/info.yml index 810e82abb..1fe9ce1bc 100644 --- a/tests/integration/targets/ntnx_karbon_registries_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_karbon_registries_info/tasks/info.yml @@ -1,71 +1,73 @@ --- -- debug: - msg: "start ntnx_karbon_registries_info tests" +- name: Start ntnx_karbon_registries_info tests + ansible.builtin.debug: + msg: start ntnx_karbon_registries_info tests - name: Generate random registry_name - set_fact: - random_name1: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" - random_name2: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name1: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" + random_name2: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - registry_name1: "{{random_name1[0]}}" - registry_name2: "{{random_name2[0]}}" +- name: Set registry_name + ansible.builtin.set_fact: + registry_name1: "{{ random_name1[0] }}" + registry_name2: "{{ random_name2[0] }}" -- name: create registries for tests +- name: Create registries for tests ntnx_karbon_registries: - name: "{{registry_name1}}" - url: "{{url}}" + name: "{{ registry_name1 }}" + url: "{{ url }}" register: r1 -- name: create registries for tests +- name: Create registries for tests ntnx_karbon_registries: - name: "{{registry_name2}}" - url: "{{url}}" + name: "{{ registry_name2 }}" + url: "{{ url }}" register: r2 ################################################################ -- name: test getting all registries +- name: Test getting all registries ntnx_karbon_registries_info: register: registries -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - registries.response is defined - registries.response | length > 0 - registries.failed == false - registries.changed == false - fail_msg: "Unable to list all registries" - success_msg: "registries listed successfully" + fail_msg: Unable to list all registries + success_msg: registries listed successfully ################################################################ -- name: test getting particular register using name +- name: Test getting particular register using name ntnx_karbon_registries_info: - registry_name: "{{ registries.response[1].name }}" + registry_name: "{{ registries.response[1].name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.response.uuid == "{{ registries.response[1].uuid }}" - result.response.name == "{{ registries.response[1].name }}" - result.response.endpoint == "{{ registries.response[1].endpoint }}" - fail_msg: "Unable to get particular registry" - success_msg: "register info obtained successfully" + fail_msg: Unable to get particular registry + success_msg: register info obtained successfully ################################################################ -- name: delete registry entry +- name: Delete registry entry ntnx_karbon_registries: - name: "{{registry_name1}}" + name: "{{ registry_name1 }}" state: absent register: result - ignore_errors: True + ignore_errors: true -- name: delete registry +- name: Delete registry ntnx_karbon_registries: - name: "{{registry_name2}}" + name: "{{ registry_name2 }}" state: absent register: result - ignore_errors: True + ignore_errors: true diff --git a/tests/integration/targets/ntnx_karbon_registries_info/tasks/main.yml b/tests/integration/targets/ntnx_karbon_registries_info/tasks/main.yml index 3364b30c6..81bdd68ad 100644 --- a/tests/integration/targets/ntnx_karbon_registries_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_karbon_registries_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_karbon_registries_info integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import Tasks + ansible.builtin.import_tasks: info.yml diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/aliases b/tests/integration/targets/ntnx_ndb_availability_databases/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/aliases +++ b/tests/integration/targets/ntnx_ndb_availability_databases/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/meta/main.yml b/tests/integration/targets/ntnx_ndb_availability_databases/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_availability_databases/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/main.yml b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/main.yml index d09f77ab1..a49453c43 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/main.yml @@ -1,10 +1,12 @@ --- -- module_defaults: +- name: Ntnx_ndb_availability_databases integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "tests.yml" + - name: Import tasks + ansible.builtin.import_tasks: tests.yml diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml index 57efb4ece..097d4ad6a 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml @@ -5,347 +5,215 @@ # 2. Create HA postgres database instance with multicluster nodes # 3. Create HA postgres database instance with static IP and cluster IP assigments -- debug: - msg: "start ndb databases test flow for testing high availability databases" +- name: Start ndb databases test flow for testing high availability databases + ansible.builtin.debug: + msg: start ndb databases test flow for testing high availability databases - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - db1_name: "{{random_name[0]}}-ha" - db1_name_updated: "{{random_name[0]}}-updated" - cluster1_name: "{{random_name[0]}}-cluster" +- name: Define variables for db1_name, db1_name_updated and cluster1_name + ansible.builtin.set_fact: + db1_name: "{{ random_name[0] }}-ha" + db1_name_updated: "{{ random_name[0] }}-updated" + cluster1_name: "{{ random_name[0] }}-cluster" -- name: create HA instance postgres database spec using check mode - check_mode: yes +- name: Create HA instance postgres database spec using check mode + check_mode: true ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{postgres_ha_profiles.db_params_profile.name}}" + name: "{{ postgres_ha_profiles.db_params_profile.name }}" db_server_cluster: new_cluster: - name: "{{cluster1_name}}" + name: "{{ cluster1_name }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ postgres_ha_profiles.software_profile.name }}" network_profile: name: "{{ postgres_ha_profiles.static_network_profile.name }}" compute_profile: name: "{{ postgres_ha_profiles.compute_profile.name }}" - password: "temp_password" - pub_ssh_key: "test_key" + password: temp_password + pub_ssh_key: test_key vms: - - name: "{{cluster1_name}}-vm-1" - node_type: "database" - role: "Primary" - archive_log_destination: "/temp/" - - name: "{{cluster1_name}}-vm-2" - node_type: "database" - role: "Secondary" - archive_log_destination: "/temp/" - - name: "{{cluster1_name}}-vm-3" + - name: "{{ cluster1_name }}-vm-1" + node_type: database + role: Primary + archive_log_destination: /temp/ + - name: "{{ cluster1_name }}-vm-2" + node_type: database + role: Secondary + archive_log_destination: /temp/ + - name: "{{ cluster1_name }}-vm-3" cluster: - name: "{{cluster.cluster2.name}}" - node_type: "database" - role: "Secondary" - archive_log_destination: "/temp/" + name: "{{ cluster.cluster2.name }}" + node_type: database + role: Secondary + archive_log_destination: /temp/ compute_profile: - uuid: "test_compute_uuid" - - name: "{{cluster1_name}}-ha-vm" + uuid: test_compute_uuid + - name: "{{ cluster1_name }}-ha-vm" cluster: - name: "{{cluster.cluster2.name}}" - node_type: "haproxy" - + name: "{{ cluster.cluster2.name }}" + node_type: haproxy postgres: - type: "ha" + type: ha db_name: testAnsible db_password: "{{ vm_password }}" db_size: 200 listener_port: "9999" - patroni_cluster_name: "patroni_cluster" + patroni_cluster_name: patroni_cluster enable_synchronous_mode: true archive_wal_expire_days: 3 - post_create_script: "ls" - pre_create_script: "ls -a" + post_create_script: ls + pre_create_script: ls -a enable_peer_auth: true ha_proxy: provision_virtual_ip: true write_port: "9999" read_port: "8888" - time_machine: name: TM1 desc: TM-desc sla: name: "{{ sla.name }}" clusters: - - name: "{{cluster.cluster1.name}}" - - uuid: "test_uuid" + - name: "{{ cluster.cluster1.name }}" + - uuid: test_uuid automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result -- set_fact: - expected_response: { - "actionArguments": [ - { - "name": "cluster_name", - "value": "{{cluster1_name}}" - }, - { - "name": "listener_port", - "value": "9999" - }, - { - "name": "allocate_pg_hugepage", - "value": false - }, - { - "name": "cluster_database", - "value": false - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "pre_create_script", - "value": "ls -a" - }, - { - "name": "post_create_script", - "value": "ls" - }, - { - "name": "patroni_cluster_name", - "value": "patroni_cluster" - }, - { - "name": "archive_wal_expire_days", - "value": "3" - }, - { - "name": "enable_synchronous_mode", - "value": true - }, - { - "name": "enable_peer_auth", - "value": true - }, - { - "name": "node_type", - "value": "database" - }, - { - "name": "backup_policy", - "value": "primary_only" - }, - { - "name": "failover_mode", - "value": "Automatic" - }, - { - "name": "database_names", - "value": "testAnsible" - }, - { - "name": "database_size", - "value": "200" - }, - { - "name": "provision_virtual_ip", - "value": true - }, - { - "name": "proxy_write_port", - "value": "9999" - }, - { - "name": "proxy_read_port", - "value": "8888" - }, - { - "name": "deploy_haproxy", - "value": true - } - ], - "autoTuneStagingDrive": true, - "clustered": true, - "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", - "createDbserver": true, - "databaseDescription": "ansible-created-db-desc", - "databaseType": "postgres_database", - "dbParameterProfileId": "{{postgres_ha_profiles.db_params_profile.uuid}}", - "maintenanceTasks": { - "maintenanceWindowId": "{{ maintenance.window_uuid }}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - }, - "name": "{{db1_name}}", - "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", - "nodeCount": 4, - "nodes": [ - { - "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", - "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", - "nxClusterId": "{{cluster.cluster1.uuid}}", - "properties": [ - { - "name": "role", - "value": "Primary" - }, - { - "name": "node_type", - "value": "database" - }, - { - "name": "remote_archive_destination", - "value": "/temp/" - } - ], - "vmName": "{{cluster1_name}}-vm-1" - }, - { - "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", - "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", - "nxClusterId": "{{cluster.cluster1.uuid}}", - "properties": [ - { - "name": "role", - "value": "Secondary" - }, - { - "name": "node_type", - "value": "database" - }, - { - "name": "remote_archive_destination", - "value": "/temp/" - } - ], - "vmName": "{{cluster1_name}}-vm-2" - }, - { - "computeProfileId": "test_compute_uuid", - "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", - "nxClusterId": "{{cluster.cluster2.uuid}}", - "properties": [ - { - "name": "role", - "value": "Secondary" - }, - { - "name": "node_type", - "value": "database" - }, - { - "name": "remote_archive_destination", - "value": "/temp/" - } - ], - "vmName": "{{cluster1_name}}-vm-3" - }, - { - "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", - "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", - "nxClusterId": "{{cluster.cluster2.uuid}}", - "properties": [ - { - "name": "node_type", - "value": "haproxy" - } - ], - "vmName": "{{cluster1_name}}-ha-vm" - } - ], - "nxClusterId": "{{cluster.cluster1.uuid}}", - "softwareProfileId": "{{postgres_ha_profiles.software_profile.uuid}}", - "softwareProfileVersionId": "{{postgres_ha_profiles.software_profile.latest_version_id}}", - "sshPublicKey": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", - "tags": [], - "timeMachineInfo": { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": {}, - "slaDetails": { - "primarySla": { - "nxClusterIds": [ - "{{cluster.cluster1.uuid}}", - "test_uuid" - ], - "slaId": "{{sla.uuid}}" - } - } - }, - "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - } +- name: Set expected response + ansible.builtin.set_fact: + expected_response: + actionArguments: + - { name: cluster_name, value: "{{ cluster1_name }}" } + - { name: listener_port, value: "9999" } + - { name: allocate_pg_hugepage, value: false } + - { name: cluster_database, value: false } + - { name: db_password, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER } + - { name: pre_create_script, value: ls -a } + - { name: post_create_script, value: ls } + - { name: patroni_cluster_name, value: patroni_cluster } + - { name: archive_wal_expire_days, value: "3" } + - { name: enable_synchronous_mode, value: true } + - { name: enable_peer_auth, value: true } + - { name: node_type, value: database } + - { name: backup_policy, value: primary_only } + - { name: failover_mode, value: Automatic } + - { name: database_names, value: testAnsible } + - { name: database_size, value: "200" } + - { name: provision_virtual_ip, value: true } + - { name: proxy_write_port, value: "9999" } + - { name: proxy_read_port, value: "8888" } + - { name: deploy_haproxy, value: true } + autoTuneStagingDrive: true + clustered: true + computeProfileId: "{{ postgres_ha_profiles.compute_profile.uuid }}" + createDbserver: true + databaseDescription: ansible-created-db-desc + databaseType: postgres_database + dbParameterProfileId: "{{ postgres_ha_profiles.db_params_profile.uuid }}" + maintenanceTasks: + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } + name: "{{ db1_name }}" + networkProfileId: "{{ postgres_ha_profiles.static_network_profile.uuid }}" + nodeCount: 4 + nodes: + - computeProfileId: "{{ postgres_ha_profiles.compute_profile.uuid }}" + networkProfileId: "{{ postgres_ha_profiles.static_network_profile.uuid }}" + nxClusterId: "{{ cluster.cluster1.uuid }}" + properties: + - { name: role, value: Primary } + - { name: node_type, value: database } + - { name: remote_archive_destination, value: /temp/ } + vmName: "{{ cluster1_name }}-vm-1" + - computeProfileId: "{{ postgres_ha_profiles.compute_profile.uuid }}" + networkProfileId: "{{ postgres_ha_profiles.static_network_profile.uuid }}" + nxClusterId: "{{ cluster.cluster1.uuid }}" + properties: + - { name: role, value: Secondary } + - { name: node_type, value: database } + - { name: remote_archive_destination, value: /temp/ } + vmName: "{{ cluster1_name }}-vm-2" + - computeProfileId: test_compute_uuid + networkProfileId: "{{ postgres_ha_profiles.static_network_profile.uuid }}" + nxClusterId: "{{ cluster.cluster2.uuid }}" + properties: + - { name: role, value: Secondary } + - { name: node_type, value: database } + - { name: remote_archive_destination, value: /temp/ } + vmName: "{{ cluster1_name }}-vm-3" + - computeProfileId: "{{ postgres_ha_profiles.compute_profile.uuid }}" + networkProfileId: "{{ postgres_ha_profiles.static_network_profile.uuid }}" + nxClusterId: "{{ cluster.cluster2.uuid }}" + properties: [{ name: node_type, value: haproxy }] + vmName: "{{ cluster1_name }}-ha-vm" + nxClusterId: "{{ cluster.cluster1.uuid }}" + softwareProfileId: "{{ postgres_ha_profiles.software_profile.uuid }}" + softwareProfileVersionId: "{{ postgres_ha_profiles.software_profile.latest_version_id }}" + sshPublicKey: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + tags: [] + timeMachineInfo: + autoTuneLogDrive: true + description: TM-desc + name: TM1 + schedule: {} + slaDetails: { primarySla: { nxClusterIds: ["{{ cluster.cluster1.uuid }}", test_uuid], slaId: "{{ sla.uuid }}" } } + vmPassword: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response == expected_response - fail_msg: "Unable to create ha instance database create spec" - success_msg: "HA instance postgres database spec created successfully" + fail_msg: Unable to create ha instance database create spec + success_msg: HA instance postgres database spec created successfully -- name: create HA instance postgres database with static IP assignments +- name: Create HA instance postgres database with static IP assignments ntnx_ndb_databases: wait: true timeout: 5400 - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{postgres_ha_profiles.db_params_profile.name}}" + name: "{{ postgres_ha_profiles.db_params_profile.name }}" db_server_cluster: new_cluster: - name: "{{cluster1_name}}" + name: "{{ cluster1_name }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" ips: - cluster: - name: "{{cluster.cluster1.name}}" - ip: "{{cluster_ips.vip}}" + name: "{{ cluster.cluster1.name }}" + ip: "{{ cluster_ips.vip }}" software_profile: name: "{{ postgres_ha_profiles.software_profile.name }}" @@ -353,39 +221,38 @@ name: "{{ postgres_ha_profiles.static_network_profile.name }}" compute_profile: name: "{{ postgres_ha_profiles.compute_profile.name }}" - password: "{{vm_password}}" - pub_ssh_key: "{{public_ssh_key}}" + password: "{{ vm_password }}" + pub_ssh_key: "{{ public_ssh_key }}" vms: + - name: "{{ cluster1_name }}-vm-1" + node_type: database + role: Primary + ip: "{{ cluster_ips.ip1 }}" - - name: "{{cluster1_name}}-vm-1" - node_type: "database" - role: "Primary" - ip: "{{cluster_ips.ip1}}" - - - name: "{{cluster1_name}}-vm-2" - node_type: "database" - role: "Secondary" - ip: "{{cluster_ips.ip2}}" + - name: "{{ cluster1_name }}-vm-2" + node_type: database + role: Secondary + ip: "{{ cluster_ips.ip2 }}" - - name: "{{cluster1_name}}-vm-3" - node_type: "database" - role: "Secondary" - ip: "{{cluster_ips.ip3}}" + - name: "{{ cluster1_name }}-vm-3" + node_type: database + role: Secondary + ip: "{{ cluster_ips.ip3 }}" - - name: "{{cluster1_name}}-vm-ha-proxy1" - node_type: "haproxy" - ip: "{{cluster_ips.ip4}}" + - name: "{{ cluster1_name }}-vm-ha-proxy1" + node_type: haproxy + ip: "{{ cluster_ips.ip4 }}" - - name: "{{cluster1_name}}-vm-ha-proxy2" - node_type: "haproxy" - ip: "{{cluster_ips.ip5}}" + - name: "{{ cluster1_name }}-vm-ha-proxy2" + node_type: haproxy + ip: "{{ cluster_ips.ip5 }}" postgres: - type: "ha" + type: ha db_name: testAnsible db_password: "{{ vm_password }}" db_size: 200 - patroni_cluster_name: "patroni_cluster" + patroni_cluster_name: patroni_cluster ha_proxy: provision_virtual_ip: true @@ -402,34 +269,34 @@ log_catchup: 30 snapshots_per_day: 2 clusters: - - name: "{{cluster.cluster1.name}}" + - name: "{{ cluster.cluster1.name }}" tags: - ansible-databases: "ha-instance-dbs" + ansible-databases: ha-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -439,85 +306,82 @@ - result.response.description == "ansible-created-db-desc" - properties["db_parameter_profile_id"] == postgres_ha_profiles.db_params_profile.uuid - properties["listener_port"] == "5432" - - properties["cluster_ip"] == "['{{cluster_ips.vip}}']" + - properties["cluster_ip"] == "['{{ cluster_ips.vip }}']" - result.response.databaseNodes | length == 3 - result.response.databaseNodes[0].status == "READY" - result.response.databaseNodes[1].status == "READY" - result.response.databaseNodes[2].status == "READY" - result.response.type == "postgres_database" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "ha-instance-dbs" - fail_msg: "Unable to provision postgres HA database instance" - success_msg: "postgres HA database instance provision successfully" + fail_msg: Unable to provision postgres HA database instance + success_msg: postgres HA database instance provision successfully - -- name: unregister db along with delete time machine and db server vms +- name: Unregister db along with delete time machine and db server vms ntnx_ndb_databases: - db_uuid: "{{result.db_uuid}}" - state: "absent" + db_uuid: "{{ result.db_uuid }}" + state: absent wait: true delete_time_machine: true - delete_db_server_vms: True + delete_db_server_vms: true register: result -- name: verify status of delete of database along with time machine delete - assert: +- name: Verify status of delete of database along with time machine delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - result.response.db_server_vms_delete_status.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" - + fail_msg: database delete failed + success_msg: database deleted successfully -- name: create HA instance postgres database with multicluster vms +- name: Create HA instance postgres database with multicluster vms ntnx_ndb_databases: timeout: 5400 wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{postgres_ha_profiles.db_params_profile.name}}" + name: "{{ postgres_ha_profiles.db_params_profile.name }}" db_server_cluster: new_cluster: - name: "{{cluster1_name}}" + name: "{{ cluster1_name }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ postgres_ha_profiles.software_profile.name }}" network_profile: name: "{{ postgres_ha_profiles.multicluster_network_profile.name }}" compute_profile: name: "{{ postgres_ha_profiles.compute_profile.name }}" - password: "{{vm_password}}" - pub_ssh_key: "{{public_ssh_key}}" + password: "{{ vm_password }}" + pub_ssh_key: "{{ public_ssh_key }}" vms: + - name: "{{ cluster1_name }}-vm-1" + node_type: database + role: Primary - - name: "{{cluster1_name}}-vm-1" - node_type: "database" - role: "Primary" - - - name: "{{cluster1_name}}-vm-2" - node_type: "database" - role: "Secondary" + - name: "{{ cluster1_name }}-vm-2" + node_type: database + role: Secondary - - name: "{{cluster1_name}}-vm-3" + - name: "{{ cluster1_name }}-vm-3" cluster: - name: "{{cluster.cluster2.name}}" - node_type: "database" - role: "Secondary" + name: "{{ cluster.cluster2.name }}" + node_type: database + role: Secondary postgres: - type: "ha" + type: ha db_name: testAnsible db_password: "{{ vm_password }}" db_size: 200 - patroni_cluster_name: "patroni_cluster" + patroni_cluster_name: patroni_cluster time_machine: name: TM1 @@ -532,35 +396,35 @@ log_catchup: 30 snapshots_per_day: 2 clusters: - - name: "{{cluster.cluster1.name}}" - - uuid: "{{cluster.cluster2.uuid}}" + - name: "{{ cluster.cluster1.name }}" + - uuid: "{{ cluster.cluster2.uuid }}" tags: - ansible-databases: "ha-instance-dbs" + ansible-databases: ha-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -578,27 +442,27 @@ - result.response.databaseNodes[2].status == "READY" - result.response.type == "postgres_database" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "ha-instance-dbs" - fail_msg: "Unable to provision postgres HA database instance" - success_msg: "postgres HA database instance provision successfully" + fail_msg: Unable to provision postgres HA database instance + success_msg: postgres HA database instance provision successfully -- name: unregister db along with delete time machine and db server vms +- name: Unregister db along with delete time machine and db server vms ntnx_ndb_databases: - db_uuid: "{{result.db_uuid}}" - state: "absent" + db_uuid: "{{ result.db_uuid }}" + state: absent wait: true delete_time_machine: true - delete_db_server_vms: True + delete_db_server_vms: true register: result -- name: verify status of delete of database along with time machine delete - assert: +- name: Verify status of delete of database along with time machine delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - result.response.db_server_vms_delete_status.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" + fail_msg: database delete failed + success_msg: database deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_clones_info/aliases b/tests/integration/targets/ntnx_ndb_clones_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_clones_info/aliases +++ b/tests/integration/targets/ntnx_ndb_clones_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_clones_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_clones_info/tasks/info.yml index 61d21e97b..75573f713 100644 --- a/tests/integration/targets/ntnx_ndb_clones_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_ndb_clones_info/tasks/info.yml @@ -1,65 +1,65 @@ --- -- debug: +- name: Start testing ntnx_ndb_clones_info + ansible.builtin.debug: msg: Start testing ntnx_ndb_clones_info - name: List all era clones ntnx_ndb_clones_info: register: clones -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - clones.response is defined - clones.failed == false - clones.changed == false - clones.response | length > 0 - fail_msg: "Unable to list all era clones" - success_msg: "era clones listed successfully" + fail_msg: Unable to list all era clones + success_msg: era clones listed successfully ################################################################ -- name: get era clones using it's name +- name: Get era clones using it's name ntnx_ndb_clones_info: - name: "{{clones.response[0].name}}" + name: "{{ clones.response[0].name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{clones.response[0].name}}" + - result.response.name == "{{ clones.response[0].name }}" fail_msg: "Unable to get era clones using it's name " - success_msg: "get era clones using it's name successfully" + success_msg: get era clones using it's name successfully ################################################################ - name: List clones use id ntnx_ndb_clones_info: - uuid: "{{clones.response[0].id}}" + uuid: "{{ clones.response[0].id }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{clones.response[0].name}}" + - result.response.name == "{{ clones.response[0].name }}" fail_msg: "Unable to get era clones using it's id " - success_msg: "get era clones using it's id successfully" + success_msg: get era clones using it's id successfully ################################################################ - -- name: get era clones with incorrect name +- name: Get era clones with incorrect name ntnx_ndb_clones_info: - name: "abcd" + name: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given diff --git a/tests/integration/targets/ntnx_ndb_clones_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_clones_info/tasks/main.yml index da502fcc5..0fc5f70fe 100644 --- a/tests/integration/targets/ntnx_ndb_clones_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_clones_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_clones_info integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "info.yml" + - name: Import tasks + ansible.builtin.import_tasks: info.yml diff --git a/tests/integration/targets/ntnx_ndb_clusters/aliases b/tests/integration/targets/ntnx_ndb_clusters/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/aliases +++ b/tests/integration/targets/ntnx_ndb_clusters/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_clusters/meta/main.yml b/tests/integration/targets/ntnx_ndb_clusters/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml index 232112e75..1585668ad 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml @@ -1,136 +1,136 @@ --- -- debug: +- name: Start testing ntnx_ndb_clusters + ansible.builtin.debug: msg: Start testing ntnx_ndb_clusters - - name: Register cluster with prism_vlan in check mode ntnx_ndb_clusters: - name: "{{cluster.cluster3.name}}" - desc: "{{cluster.cluster3.desc}}" - name_prefix: "{{cluster.cluster3.name_prefix}}" - cluster_ip: "{{cluster.cluster3.cluster_ip}}" - cluster_credentials: - username: "{{cluster.cluster3.cluster_credentials.username}}" - password: "{{cluster.cluster3.cluster_credentials.password}}" - agent_network: - dns_servers: - - "{{cluster.cluster3.agent_network.dns_servers[0]}}" - - "{{cluster.cluster3.agent_network.dns_servers[1]}}" - ntp_servers: - - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" - vlan_access: - prism_vlan: - vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" - vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" - gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - storage_container: "{{cluster.cluster3.storage_container}}" + name: "{{ cluster.cluster3.name }}" + desc: "{{ cluster.cluster3.desc }}" + name_prefix: "{{ cluster.cluster3.name_prefix }}" + cluster_ip: "{{ cluster.cluster3.cluster_ip }}" + cluster_credentials: + username: "{{ cluster.cluster3.cluster_credentials.username }}" + password: "{{ cluster.cluster3.cluster_credentials.password }}" + agent_network: + dns_servers: + - "{{ cluster.cluster3.agent_network.dns_servers[0] }}" + - "{{ cluster.cluster3.agent_network.dns_servers[1] }}" + ntp_servers: + - "{{ cluster.cluster3.agent_network.ntp_servers[0] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[1] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[2] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[3] }}" + vlan_access: + prism_vlan: + vlan_name: "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_name }}" + vlan_type: "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_type }}" + static_ip: "{{ cluster.cluster3.vlan_access.prism_vlan.static_ip }}" + gateway: "{{ cluster.cluster3.vlan_access.prism_vlan.gateway }}" + subnet_mask: "{{ cluster.cluster3.vlan_access.prism_vlan.subnet_mask }}" + storage_container: "{{ cluster.cluster3.storage_container }}" register: result ignore_errors: true check_mode: true - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.clusterName == "{{cluster.cluster3.name}}" - - result.response.clusterDescription == "{{cluster.cluster3.desc}}" - - result.response.agentVMPrefix == "{{cluster.cluster3.name_prefix}}" - - result.response.clusterIP == "{{cluster.cluster3.cluster_ip}}" - - result.response.credentialsInfo[0].value == "{{cluster.cluster3.cluster_credentials.username}}" - - result.response.networksInfo[0].networkInfo[0].value == "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" - - result.response.networksInfo[0].networkInfo[1].value == "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" - - result.response.networksInfo[0].networkInfo[2].value == "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - - result.response.networksInfo[0].networkInfo[3].value == "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - - result.response.networksInfo[0].type== "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + - result.response.clusterName == "{{ cluster.cluster3.name }}" + - result.response.clusterDescription == "{{ cluster.cluster3.desc }}" + - result.response.agentVMPrefix == "{{ cluster.cluster3.name_prefix }}" + - result.response.clusterIP == "{{ cluster.cluster3.cluster_ip }}" + - result.response.credentialsInfo[0].value == "{{ cluster.cluster3.cluster_credentials.username }}" + - result.response.networksInfo[0].networkInfo[0].value == "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_name }}" + - result.response.networksInfo[0].networkInfo[1].value == "{{ cluster.cluster3.vlan_access.prism_vlan.static_ip }}" + - result.response.networksInfo[0].networkInfo[2].value == "{{ cluster.cluster3.vlan_access.prism_vlan.gateway }}" + - result.response.networksInfo[0].networkInfo[3].value == "{{ cluster.cluster3.vlan_access.prism_vlan.subnet_mask }}" + - result.response.networksInfo[0].type== "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_type }}" fail_msg: "fail: Wring with check mode for registring cluster" success_msg: "pass: retunred as expected" - name: Register cluster with prism_vlan ntnx_ndb_clusters: - wait: true - name: "{{cluster.cluster3.name}}" - desc: "{{cluster.cluster3.desc}}" - name_prefix: "{{cluster.cluster3.name_prefix}}" - cluster_ip: "{{cluster.cluster3.cluster_ip}}" - cluster_credentials: - username: "{{cluster.cluster3.cluster_credentials.username}}" - password: "{{cluster.cluster3.cluster_credentials.password}}" - agent_network: - dns_servers: - - "{{cluster.cluster3.agent_network.dns_servers[0]}}" - - "{{cluster.cluster3.agent_network.dns_servers[1]}}" - ntp_servers: - - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" - vlan_access: - prism_vlan: - vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" - vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" - gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - storage_container: "{{cluster.cluster3.storage_container}}" + wait: true + name: "{{ cluster.cluster3.name }}" + desc: "{{ cluster.cluster3.desc }}" + name_prefix: "{{ cluster.cluster3.name_prefix }}" + cluster_ip: "{{ cluster.cluster3.cluster_ip }}" + cluster_credentials: + username: "{{ cluster.cluster3.cluster_credentials.username }}" + password: "{{ cluster.cluster3.cluster_credentials.password }}" + agent_network: + dns_servers: + - "{{ cluster.cluster3.agent_network.dns_servers[0] }}" + - "{{ cluster.cluster3.agent_network.dns_servers[1] }}" + ntp_servers: + - "{{ cluster.cluster3.agent_network.ntp_servers[0] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[1] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[2] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[3] }}" + vlan_access: + prism_vlan: + vlan_name: "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_name }}" + vlan_type: "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_type }}" + static_ip: "{{ cluster.cluster3.vlan_access.prism_vlan.static_ip }}" + gateway: "{{ cluster.cluster3.vlan_access.prism_vlan.gateway }}" + subnet_mask: "{{ cluster.cluster3.vlan_access.prism_vlan.subnet_mask }}" + storage_container: "{{ cluster.cluster3.storage_container }}" register: result ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.name == "{{cluster.cluster3.name}}" - - result.response.description == "{{cluster.cluster3.desc}}" - - result.response.ipAddresses[0] == "{{cluster.cluster3.cluster_ip}}" + - result.response.name == "{{ cluster.cluster3.name }}" + - result.response.description == "{{ cluster.cluster3.desc }}" + - result.response.ipAddresses[0] == "{{ cluster.cluster3.cluster_ip }}" fail_msg: "fail: Unable to Register cluster with prisim_vlan" success_msg: "pass: Register cluster with prisim_vlan finished successfully" ################################################################ -- name: update cluster name , desc +- name: Update cluster name , desc ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - name: newname - desc: newdesc + uuid: "{{ result.cluster_uuid }}" + name: newname + desc: newdesc register: result ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response.name == "newname" - result.cluster_uuid is defined - result.response.description == "newdesc" fail_msg: "fail: Unable to update cluster name , desc" success_msg: "pass: update cluster name , desc finished successfully" -- set_fact: - todelete: "{{result.cluster_uuid}}" +- name: Define variable to delete + ansible.builtin.set_fact: + todelete: "{{ result.cluster_uuid }}" ################################################################ -- name: update cluster credeential in check_mode +- name: Update cluster credeential in check_mode ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - cluster_credentials: - username: test - password: test + uuid: "{{ result.cluster_uuid }}" + cluster_credentials: + username: test + password: test register: result ignore_errors: true no_log: true check_mode: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -144,14 +144,14 @@ - name: Negative Secnarios update storage container ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - storage_container: "{{cluster.cluster3.storage_container}}" + uuid: "{{ result.cluster_uuid }}" + storage_container: "{{ cluster.cluster3.storage_container }}" register: out ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - out.changed == false - out.failed == true @@ -163,20 +163,20 @@ - name: Negative Secnarios update vlan access ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - vlan_access: - prism_vlan: - vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" - vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" - gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + uuid: "{{ result.cluster_uuid }}" + vlan_access: + prism_vlan: + vlan_name: "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_name }}" + vlan_type: "{{ cluster.cluster3.vlan_access.prism_vlan.vlan_type }}" + static_ip: "{{ cluster.cluster3.vlan_access.prism_vlan.static_ip }}" + gateway: "{{ cluster.cluster3.vlan_access.prism_vlan.gateway }}" + subnet_mask: "{{ cluster.cluster3.vlan_access.prism_vlan.subnet_mask }}" register: out ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - out.changed == false - out.failed == true @@ -188,22 +188,22 @@ - name: Negative Secnarios update agent network ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - agent_network: - dns_servers: - - "{{cluster.cluster3.agent_network.dns_servers[0]}}" - - "{{cluster.cluster3.agent_network.dns_servers[1]}}" - ntp_servers: - - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" + uuid: "{{ result.cluster_uuid }}" + agent_network: + dns_servers: + - "{{ cluster.cluster3.agent_network.dns_servers[0] }}" + - "{{ cluster.cluster3.agent_network.dns_servers[1] }}" + ntp_servers: + - "{{ cluster.cluster3.agent_network.ntp_servers[0] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[1] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[2] }}" + - "{{ cluster.cluster3.agent_network.ntp_servers[3] }}" register: out ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - out.changed == false - out.failed == true @@ -215,14 +215,14 @@ - name: Negative Secnarios update agent network ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - name_prefix: "{{cluster.cluster3.name_prefix}}" + uuid: "{{ result.cluster_uuid }}" + name_prefix: "{{ cluster.cluster3.name_prefix }}" register: out ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - out.changed == false - out.failed == true @@ -237,18 +237,18 @@ register: clusters no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - clusters.response is defined - clusters.failed == false - clusters.changed == false - clusters.response | length > 0 - fail_msg: "Unable to list all NDB clusters" - success_msg: "NDB clusters listed successfully" + fail_msg: Unable to list all NDB clusters + success_msg: NDB clusters listed successfully ################################################################ -- name: get ndb cluster with count_entities +- name: Get ndb cluster with count_entities ntnx_ndb_clusters_info: filters: count_entities: true @@ -256,88 +256,88 @@ ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].entityCounts is defined fail_msg: "Unable to get NDB clusters with count_entities " - success_msg: "get NDB clusters using with count_entities successfully" + success_msg: get NDB clusters using with count_entities successfully ################################################################ -- name: get NDB clusters using it's name +- name: Get NDB clusters using it's name ntnx_ndb_clusters_info: - name: "{{clusters.response[0].name}}" + name: "{{ clusters.response[0].name }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{clusters.response[0].name}}" + - result.response.name == "{{ clusters.response[0].name }}" fail_msg: "Unable to get NDB clusters using it's name " - success_msg: "get NDB clusters using it's name successfully" + success_msg: get NDB clusters using it's name successfully ################################################################ - name: List clusters use id ntnx_ndb_clusters_info: - uuid: "{{clusters.response[0].id}}" + uuid: "{{ clusters.response[0].id }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{clusters.response[0].name}}" + - result.response.name == "{{ clusters.response[0].name }}" fail_msg: "Unable to get NDB clusters using it's id " - success_msg: "get NDB clusters using it's id successfully" + success_msg: get NDB clusters using it's id successfully ################################################################ -- name: get NDB clusters with incorrect name +- name: Get NDB clusters with incorrect name ntnx_ndb_clusters_info: - name: "abcd" + name: abcd register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given ################################################################ -- name: delete cluster +- name: Delete cluster ntnx_ndb_clusters: - uuid: "{{todelete}}" - state: absent + uuid: "{{ todelete }}" + state: absent register: result ignore_errors: true no_log: true -- name: assert when status not complete - assert: +- name: Assert when status not complete + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - result.response.status == "5" - fail_msg: "Unable to delete custer" - success_msg: "cluster deleted successfully" - + fail_msg: Unable to delete custer + success_msg: cluster deleted successfully -- set_fact: - todelete: [] +- name: Define to delete + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_clusters/tasks/main.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/main.yml index cbd87d175..defc003fc 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_clusters integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "CRUD.yml" + - name: Import tasks + ansible.builtin.import_tasks: CRUD.yml diff --git a/tests/integration/targets/ntnx_ndb_database_clones/aliases b/tests/integration/targets/ntnx_ndb_database_clones/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_database_clones/aliases +++ b/tests/integration/targets/ntnx_ndb_database_clones/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_database_clones/meta/main.yml b/tests/integration/targets/ntnx_ndb_database_clones/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_database_clones/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_database_clones/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml index 882a78bb5..712d54a76 100644 --- a/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml +++ b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml @@ -7,39 +7,40 @@ # 4. Authorization and deauthorization of db server vm wrt to time machines # 5. Creation of clone on authorized db server vm -- debug: - msg: "start ndb database clone tests" +- name: Start ndb database clone tests + ansible.builtin.debug: + msg: start ndb database clone tests - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - db1_name: "{{random_name[0]}}" - clone_db1: "{{random_name[0]}}-clone" - vm1_name: "{{random_name[0]}}-vm" - tm1: "{{random_name[0]}}-time-machine" - snapshot_name: "{{random_name[0]}}-snapshot" +- name: Define variables + ansible.builtin.set_fact: + db1_name: "{{ random_name[0] }}" + clone_db1: "{{ random_name[0] }}-clone" + vm1_name: "{{ random_name[0] }}-vm" + tm1: "{{ random_name[0] }}-time-machine" + snapshot_name: "{{ random_name[0] }}-snapshot" ############################################ setup db and its snapshot for clone tests ########################################### - -- name: create single instance postgres database on new db server vm +- name: Create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm1_name }}-db" - desc: "vm for db server" + desc: vm for db server password: "{{ vm_password }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ software_profile.name }}" network_profile: @@ -53,10 +54,10 @@ db_name: testAnsible db_password: "{{ vm_password }}" db_size: 200 - type: "single" + type: single time_machine: - name: "{{tm1}}" + name: "{{ tm1 }}" desc: TM-desc sla: name: "{{ sla.name }}" @@ -70,65 +71,68 @@ register: result -- set_fact: - db_uuid: "{{result.db_uuid}}" +- name: Define variable for db uuid + ansible.builtin.set_fact: + db_uuid: "{{ result.db_uuid }}" -- set_fact: - time_machine_uuid: "{{result.response.timeMachineId}}" +- name: Define variable for time machine uuid + ansible.builtin.set_fact: + time_machine_uuid: "{{ result.response.timeMachineId }}" - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' - result.db_uuid is defined - result.changed == true - fail_msg: "Unable to create single instance postgres database" - success_msg: "single instance postgres database created successfully" + fail_msg: Unable to create single instance postgres database + success_msg: single instance postgres database created successfully -- set_fact: - db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" +- name: Define variable for db server vm uuid + ansible.builtin.set_fact: + db_server_uuid: "{{ result.response.databaseNodes[0].dbserverId }}" -- name: create manual snapshot of database +- name: Create manual snapshot of database ntnx_ndb_database_snapshots: - time_machine_uuid: "{{time_machine_uuid}}" - name: "{{snapshot_name}}" + time_machine_uuid: "{{ time_machine_uuid }}" + name: "{{ snapshot_name }}" register: result -- name: snapshot status - assert: +- name: Snapshot status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.snapshot_uuid is defined - result.response.status == "ACTIVE" - fail_msg: "unable to create snapshot of database" - success_msg: "snapshot for clone tests created successfully" + fail_msg: unable to create snapshot of database + success_msg: snapshot for clone tests created successfully -- set_fact: - snapshot_uuid: "{{result.snapshot_uuid}}" +- name: Define variable for snapshot uuid + ansible.builtin.set_fact: + snapshot_uuid: "{{ result.snapshot_uuid }}" ############################################ create clone on new db server vm tests ########################################### - -- name: create spec for clone of database created above on new db server vm - check_mode: yes +- name: Create spec for clone of database created above on new db server vm + check_mode: true ntnx_ndb_database_clones: - name: "{{clone_db1}}" - desc: "ansible-created-clone" + name: "{{ clone_db1 }}" + desc: ansible-created-clone db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm1_name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm_password }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: @@ -136,126 +140,91 @@ pub_ssh_key: "{{ public_ssh_key }}" postgres: - db_password: "{{vm_password}}" - pre_clone_cmd: "ls" - post_clone_cmd: "ls -a" + db_password: "{{ vm_password }}" + pre_clone_cmd: ls + post_clone_cmd: ls -a time_machine: - name: "{{tm1}}" + name: "{{ tm1 }}" pitr_timestamp: "2023-02-04 07:29:36" - timezone: "UTC" + timezone: UTC removal_schedule: days: 2 - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta tags: ansible-clones: ansible-test-db-clones register: result - - -- set_fact: - expected_response: { - "actionArguments": [ - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "pre_clone_cmd", - "value": "ls" - }, - { - "name": "post_clone_cmd", - "value": "ls -a" - }, - { - "name": "dbserver_description", - "value": "vm for db server" - } - ], - "clustered": false, - "computeProfileId": "{{compute_profile.uuid}}", - "createDbserver": true, - "databaseParameterProfileId": "{{db_params_profile.uuid}}", - "description": "ansible-created-clone", - "latestSnapshot": false, - "lcmConfig": { - "databaseLCMConfig": { - "expiryDetails": { - "deleteDatabase": true, - "expireInDays": 2, - "expiryDateTimezone": "Asia/Calcutta", - "remindBeforeInDays": 1 - }, - "refreshDetails": { - "refreshDateTimezone": "Asia/Calcutta", - "refreshInDays": 2, - "refreshTime": "12:00:00" - } - } - }, - "name": "{{clone_db1}}", - "networkProfileId": "{{network_profile.uuid}}", - "nodeCount": 1, - "nodes": [ - { - "computeProfileId": "{{compute_profile.uuid}}", - "networkProfileId": "{{network_profile.uuid}}", - "nxClusterId": "{{cluster.cluster1.uuid}}", - "properties": [], - "vmName": "{{vm1_name}}" - } - ], - "nxClusterId": "{{cluster.cluster1.uuid}}", - "snapshotId": null, - "sshPublicKey": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", - "tags": [ - { - "tagId": "{{tags.clones.uuid}}", - "tagName": "ansible-clones", - "value": "ansible-test-db-clones" - } - ], - "timeMachineId": "{{time_machine_uuid}}", - "timeZone": "UTC", - "userPitrTimestamp": "2023-02-04 07:29:36", - "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - } +- name: Define variables for expected response + ansible.builtin.set_fact: + expected_response: + actionArguments: + - { name: db_password, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER } + - { name: pre_clone_cmd, value: ls } + - { name: post_clone_cmd, value: ls -a } + - { name: dbserver_description, value: vm for db server } + clustered: false + computeProfileId: "{{ compute_profile.uuid }}" + createDbserver: true + databaseParameterProfileId: "{{ db_params_profile.uuid }}" + description: ansible-created-clone + latestSnapshot: false + lcmConfig: + databaseLCMConfig: + expiryDetails: { deleteDatabase: true, expireInDays: 2, expiryDateTimezone: Asia/Calcutta, remindBeforeInDays: 1 } + refreshDetails: { refreshDateTimezone: Asia/Calcutta, refreshInDays: 2, refreshTime: "12:00:00" } + name: "{{ clone_db1 }}" + networkProfileId: "{{ network_profile.uuid }}" + nodeCount: 1 + nodes: + - computeProfileId: "{{ compute_profile.uuid }}" + networkProfileId: "{{ network_profile.uuid }}" + nxClusterId: "{{ cluster.cluster1.uuid }}" + properties: [] + vmName: "{{ vm1_name }}" + nxClusterId: "{{ cluster.cluster1.uuid }}" + snapshotId: + sshPublicKey: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + tags: [{ tagId: "{{ tags.clones.uuid }}", tagName: ansible-clones, value: ansible-test-db-clones }] + timeMachineId: "{{ time_machine_uuid }}" + timeZone: UTC + userPitrTimestamp: "2023-02-04 07:29:36" + vmPassword: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response == expected_response - fail_msg: "Unable to create DB clone provision spec" - success_msg: "DB clone create spec generated successfully using check_mode" + fail_msg: Unable to create DB clone provision spec + success_msg: DB clone create spec generated successfully using check_mode -- name: create clone using snapshot of previously created database +- name: Create clone using snapshot of previously created database ntnx_ndb_database_clones: - name: "{{clone_db1}}" - desc: "ansible-created-clone" + name: "{{ clone_db1 }}" + desc: ansible-created-clone db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm1_name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm_password }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: @@ -263,22 +232,22 @@ pub_ssh_key: "{{ public_ssh_key }}" postgres: - db_password: "{{vm_password}}" + db_password: "{{ vm_password }}" time_machine: - name: "{{tm1}}" - snapshot_uuid: "{{snapshot_uuid}}" + name: "{{ tm1 }}" + snapshot_uuid: "{{ snapshot_uuid }}" removal_schedule: days: 2 - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta tags: ansible-clones: ansible-test-db-clones @@ -286,15 +255,15 @@ # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Clone create status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == True @@ -313,52 +282,53 @@ - result.response.lcmConfig.refreshDetails.refreshTime == "12:00:00" - properties["db_parameter_profile_id"] == db_params_profile.uuid - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.clones.name}}" + - result.response.tags[0].tagName == "{{ tags.clones.name }}" - result.response.tags[0].value == "ansible-test-db-clones" - result.response.sourceSnapshotId == snapshot_uuid - result.response.parentTimeMachineId == time_machine_uuid - fail_msg: "Unable to create clone" - success_msg: "Database clone created successfully" + fail_msg: Unable to create clone + success_msg: Database clone created successfully -- set_fact: - clone_uuid: "{{result.uuid}}" +- name: Define variable for clone uuid + ansible.builtin.set_fact: + clone_uuid: "{{ result.uuid }}" -- set_fact: - db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" +- name: Define variable for db server vm uuid + ansible.builtin.set_fact: + db_server_uuid: "{{ result.response.databaseNodes[0].dbserverId }}" ############################################ clone update and removal/refresh schedules related tests ########################################### - -- name: update name, desc, tags and schedules +- name: Update name, desc, tags and schedules ntnx_ndb_database_clones: - uuid: "{{clone_uuid}}" - name: "{{clone_db1}}-updated" - desc: "ansible-created-clone-updated" + uuid: "{{ clone_uuid }}" + name: "{{ clone_db1 }}-updated" + desc: ansible-created-clone-updated removal_schedule: timestamp: "2023-02-10 07:29:36" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 4 delete_database: false refresh_schedule: days: 4 time: "14:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta tags: ansible-clones: ansible-test-db-clones-updated register: result - name: Clone update status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.uuid is defined - result.uuid == result.response.id - result.response.status == "READY" - - result.response.name == "{{clone_db1}}-updated" + - result.response.name == "{{ clone_db1 }}-updated" - result.response.description == "ansible-created-clone-updated" - result.response.lcmConfig.expiryDetails.expiryTimestamp == "2023-02-10 07:29:36" - result.response.lcmConfig.expiryDetails.remindBeforeInDays == 4 @@ -366,61 +336,56 @@ - result.response.lcmConfig.refreshDetails.refreshInDays == 4 - result.response.lcmConfig.refreshDetails.refreshTime == "14:00:00" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.clones.name}}" + - result.response.tags[0].tagName == "{{ tags.clones.name }}" - result.response.tags[0].value == "ansible-test-db-clones-updated" - fail_msg: "Unable to update clone" - success_msg: "Database clone updated succefully" + fail_msg: Unable to update clone + success_msg: Database clone updated succefully -- name: check idempotency +- name: Check idempotency ntnx_ndb_database_clones: - uuid: "{{clone_uuid}}" - name: "{{clone_db1}}-updated" - desc: "ansible-created-clone-updated" + uuid: "{{ clone_uuid }}" + name: "{{ clone_db1 }}-updated" + desc: ansible-created-clone-updated removal_schedule: timestamp: "2023-02-10 07:29:36" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta remind_before_in_days: 4 delete_database: false refresh_schedule: days: 4 time: "14:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta tags: ansible-clones: ansible-test-db-clones-updated register: result - - -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "database clone got updated" - success_msg: "database clone update got skipped due to no state changes" + fail_msg: database clone got updated + success_msg: database clone update got skipped due to no state changes - -- name: remove schedules +- name: Remove schedules ntnx_ndb_database_clones: - uuid: "{{clone_uuid}}" + uuid: "{{ clone_uuid }}" removal_schedule: - state: "absent" + state: absent refresh_schedule: - state: "absent" + state: absent register: result - - - name: Check schedule remove status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == True @@ -428,218 +393,195 @@ - result.response.status == "READY" - result.response.lcmConfig.expiryDetails == None - result.response.lcmConfig.refreshDetails == None - fail_msg: "schedules update failed" - success_msg: "schedules removed succefully" + fail_msg: schedules update failed + success_msg: schedules removed succefully ########################################### refresh clone ########################################### - -- name: create spec for refresh clone to a pitr timestamp - check_mode: yes +- name: Create spec for refresh clone to a pitr timestamp + check_mode: true ntnx_ndb_database_clone_refresh: - uuid: "{{clone_uuid}}" + uuid: "{{ clone_uuid }}" pitr_timestamp: "2023-02-04 07:29:36" - timezone: "UTC" + timezone: UTC register: result - - name: Check refresh db with pitr spec - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response.userPitrTimestamp == "2023-02-04 07:29:36" - result.response.timeZone == "UTC" - fail_msg: "creation refresh db clone spec failed" - success_msg: "refresh db clone spec created successfully" - + fail_msg: creation refresh db clone spec failed + success_msg: refresh db clone spec created successfully -- name: refresh db clone +- name: Refresh db clone ntnx_ndb_database_clone_refresh: - uuid: "{{clone_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + uuid: "{{ clone_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result - - - name: Check database refresh status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.uuid is defined - result.response.status == "READY" - fail_msg: "database refresh failed" - success_msg: "database refresh completed succefully" + fail_msg: database refresh failed + success_msg: database refresh completed succefully ########################################### delete clone tests########################################### - -- name: create soft remove spec - check_mode: yes +- name: Create soft remove spec + check_mode: true ntnx_ndb_database_clones: - state: "absent" - uuid: "{{clone_uuid}}" + state: absent + uuid: "{{ clone_uuid }}" soft_remove: true register: result - - -- name: verify soft remove spec - assert: +- name: Verify soft remove spec + ansible.builtin.assert: that: - result.changed == false - result.failed == false - result.response.delete == False - result.response.remove == False - result.response.softRemove == True - fail_msg: "creation of spec for soft remove failed" - success_msg: "spec for soft remove created successfully" - + fail_msg: creation of spec for soft remove failed + success_msg: spec for soft remove created successfully - -- name: create unregistration spec - check_mode: yes +- name: Create unregistration spec + check_mode: true ntnx_ndb_database_clones: - state: "absent" - uuid: "{{clone_uuid}}" + state: absent + uuid: "{{ clone_uuid }}" register: result - - -- name: verify unregistration spec - assert: +- name: Verify unregistration spec + ansible.builtin.assert: that: - result.changed == false - result.failed == false - result.response.delete == False - result.response.remove == True - result.response.softRemove == False - fail_msg: "creation of spec for unregistration failed" - success_msg: "spec for unregistration created successfully" + fail_msg: creation of spec for unregistration failed + success_msg: spec for unregistration created successfully -- name: delete clone db +- name: Delete clone db ntnx_ndb_database_clones: - state: "absent" - uuid: "{{clone_uuid}}" + state: absent + uuid: "{{ clone_uuid }}" delete_from_vm: true register: result - - -- name: verify status of db clone delete - assert: +- name: Verify status of db clone delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" + fail_msg: database delete failed + success_msg: database deleted successfully ########################################### authorize and deauthorize db server vms########################################### - -- name: authorize db server vms +- name: Authorize db server vms ntnx_ndb_authorize_db_server_vms: time_machine: - name: "{{tm1}}" + name: "{{ tm1 }}" db_server_vms: - - name: "{{vm1_name}}" + - name: "{{ vm1_name }}" register: result - - -- name: verify status of authorization of db server vms - assert: +- name: Verify status of authorization of db server vms + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "success" - fail_msg: "database authorization with time machine failed" - success_msg: "database authorization with time machine successfully" + fail_msg: database authorization with time machine failed + success_msg: database authorization with time machine successfully -- name: deauthorize db server vms +- name: Deauthorize db server vms ntnx_ndb_authorize_db_server_vms: - state: "absent" + state: absent time_machine: - name: "{{tm1}}" + name: "{{ tm1 }}" db_server_vms: - - name: "{{vm1_name}}" + - name: "{{ vm1_name }}" register: result - - -- name: verify status of deauthorization of db server vms - assert: +- name: Verify status of deauthorization of db server vms + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "success" - fail_msg: "database deauthorization with time machine failed" - success_msg: "database deauthorization with time machine went successfully" - + fail_msg: database deauthorization with time machine failed + success_msg: database deauthorization with time machine went successfully -- name: authorize db server vms for hosting clone +- name: Authorize db server vms for hosting clone ntnx_ndb_authorize_db_server_vms: time_machine: - name: "{{tm1}}" + name: "{{ tm1 }}" db_server_vms: - - name: "{{vm1_name}}" + - name: "{{ vm1_name }}" register: result - -- name: verify status of authorization of db server vms - assert: +- name: Verify status of authorization of db server vms + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "success" - fail_msg: "database authorization with time machine failed" - success_msg: "database authorization with time machine successfully" + fail_msg: database authorization with time machine failed + success_msg: database authorization with time machine successfully ############################################ clone on authorized db server vm ########################################### - -- set_fact: +- name: Define variable for timestamp + ansible.builtin.set_fact: timestamp: "2123-11-08 12:36:15" -- name: create clone using snapshot on authorized server +- name: Create clone using snapshot on authorized server ntnx_ndb_database_clones: - name: "{{clone_db1}}" - desc: "ansible-created-clone" + name: "{{ clone_db1 }}" + desc: ansible-created-clone db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: use_authorized_server: name: "{{ vm1_name }}" postgres: - db_password: "{{vm_password}}" + db_password: "{{ vm_password }}" time_machine: - uuid: "{{time_machine_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + uuid: "{{ time_machine_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" removal_schedule: - timestamp: "{{timestamp}}" - timezone: "Asia/Calcutta" + timestamp: "{{ timestamp }}" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta tags: ansible-clones: ansible-test-db-clones register: result - - - name: Clone create status on authorized db server vm - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == True @@ -651,222 +593,215 @@ - result.response.clone == True - result.response.databaseNodes[0].dbserverId == db_server_uuid - result.response.parentTimeMachineId == time_machine_uuid - fail_msg: "Unable to create clone" - success_msg: "Database clone created succefully" + fail_msg: Unable to create clone + success_msg: Database clone created succefully -- set_fact: - delete_clone_uuid: "{{result.uuid}}" +- name: Define variable for delete clone + ansible.builtin.set_fact: + delete_clone_uuid: "{{ result.uuid }}" -- name: delete clone db +- name: Delete clone db ntnx_ndb_database_clones: - state: "absent" - uuid: "{{delete_clone_uuid}}" + state: absent + uuid: "{{ delete_clone_uuid }}" delete_from_vm: true register: result -- name: verify status of db clone delete - assert: +- name: Verify status of db clone delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" + fail_msg: database delete failed + success_msg: database deleted successfully ########################################### create clone from latest snapshot ################################## -- name: authorize again db server vm +- name: Authorize again db server vm ntnx_ndb_authorize_db_server_vms: time_machine: - name: "{{tm1}}" + name: "{{ tm1 }}" db_server_vms: - - name: "{{vm1_name}}" + - name: "{{ vm1_name }}" register: result - - -- name: verify status of authorization of db server vms - assert: +- name: Verify status of authorization of db server vms + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "success" - fail_msg: "database authorization with time machine failed" - success_msg: "database authorization with time machine successfully" + fail_msg: database authorization with time machine failed + success_msg: database authorization with time machine successfully -- name: create clone using latest snapshot on authorized server +- name: Create clone using latest snapshot on authorized server ntnx_ndb_database_clones: - name: "{{clone_db1}}fromLatestSnapshot" - desc: "ansible-created-clone from latest snapshot" + name: "{{ clone_db1 }}fromLatestSnapshot" + desc: ansible-created-clone from latest snapshot db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: use_authorized_server: name: "{{ vm1_name }}" postgres: - db_password: "{{vm_password}}" + db_password: "{{ vm_password }}" time_machine: - uuid: "{{time_machine_uuid}}" - latest_snapshot: True + uuid: "{{ time_machine_uuid }}" + latest_snapshot: true removal_schedule: - timestamp: "{{timestamp}}" - timezone: "Asia/Calcutta" + timestamp: "{{ timestamp }}" + timezone: Asia/Calcutta remind_before_in_days: 1 - delete_database: True + delete_database: true refresh_schedule: days: 2 time: "12:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta tags: ansible-clones: ansible-test-db-clones register: result - - - name: Clone create status on authorized db server vm - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.uuid is defined - result.uuid == result.response.id - result.response.status == "READY" - - result.response.name == "{{clone_db1}}fromLatestSnapshot" + - result.response.name == "{{ clone_db1 }}fromLatestSnapshot" - result.response.description == "ansible-created-clone from latest snapshot" - result.response.clone == True - result.response.databaseNodes[0].dbserverId == db_server_uuid - result.response.parentTimeMachineId == time_machine_uuid - fail_msg: "Unable to create clone from latest snapshot" - success_msg: "Database clone created from latest snapshot successfully" + fail_msg: Unable to create clone from latest snapshot + success_msg: Database clone created from latest snapshot successfully - - -- set_fact: - delete_clone_uuid: "{{result.uuid}}" +- name: Define variable for delete clone + ansible.builtin.set_fact: + delete_clone_uuid: "{{ result.uuid }}" ############################################ info module tests ################################## -- debug: +- name: Start testing ntnx_ndb_clones_info + ansible.builtin.debug: msg: Start testing ntnx_ndb_clones_info - name: List all era clones ntnx_ndb_clones_info: register: clones -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - clones.response is defined - clones.failed == false - clones.changed == false - clones.response | length > 0 - fail_msg: "Unable to list all era clones" - success_msg: "era clones listed successfully" + fail_msg: Unable to list all era clones + success_msg: era clones listed successfully ################################################################ -- name: get era clones using it's name +- name: Get era clones using it's name ntnx_ndb_clones_info: - name: "{{clones.response[0].name}}" + name: "{{ clones.response[0].name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{clones.response[0].name}}" + - result.response.name == "{{ clones.response[0].name }}" fail_msg: "Unable to get era clones using it's name " - success_msg: "get era clones using it's name successfully" + success_msg: get era clones using it's name successfully ################################################################ - name: List clones use id ntnx_ndb_clones_info: - uuid: "{{clones.response[0].id}}" + uuid: "{{ clones.response[0].id }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{clones.response[0].name}}" + - result.response.name == "{{ clones.response[0].name }}" fail_msg: "Unable to get era clones using it's id " - success_msg: "get era clones using it's id successfully" + success_msg: get era clones using it's id successfully ################################################################ - -- name: get era clones with incorrect name +- name: Get era clones with incorrect name ntnx_ndb_clones_info: - name: "abcd" + name: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" -############################################cleanup########################################### + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given +############################################ cleanup########################################### -- name: delete clone db +- name: Delete clone db ntnx_ndb_database_clones: - state: "absent" - uuid: "{{delete_clone_uuid}}" + state: absent + uuid: "{{ delete_clone_uuid }}" delete_from_vm: true register: result - -- name: verify status of db clone delete - assert: +- name: Verify status of db clone delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "database delete failed" - success_msg: "database delete successfully" - + fail_msg: database delete failed + success_msg: database delete successfully -- name: delete db server vm +- name: Delete db server vm ntnx_ndb_db_server_vms: - state: "absent" - uuid: "{{db_server_uuid}}" + state: absent + uuid: "{{ db_server_uuid }}" delete_from_cluster: true register: result -- name: verify status of delete of db server vm used for clone - assert: +- name: Verify status of delete of db server vm used for clone + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "db server vm deleted failed" - success_msg: "db server vm deleted successfully" - + fail_msg: db server vm deleted failed + success_msg: db server vm deleted successfully -- name: delete database created earlier +- name: Delete database created earlier ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" delete_db_server_vms: true - delete_time_machine: True + delete_time_machine: true register: result -- name: verify status of delete of database along with time machine and db server vm delete - assert: +- name: Verify status of delete of database along with time machine and db server vm delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - result.response.db_server_vms_delete_status.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" + fail_msg: database delete failed + success_msg: database deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_database_clones/tasks/main.yml b/tests/integration/targets/ntnx_ndb_database_clones/tasks/main.yml index 0d3a04a98..9442f3638 100644 --- a/tests/integration/targets/ntnx_ndb_database_clones/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_database_clones/tasks/main.yml @@ -1,10 +1,12 @@ --- -- module_defaults: +- name: Ntnx_ndb_database_clones integration test + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "clones.yml" + - name: Import tasks + ansible.builtin.import_tasks: clones.yml diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/aliases b/tests/integration/targets/ntnx_ndb_databases_actions/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/aliases +++ b/tests/integration/targets/ntnx_ndb_databases_actions/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_actions/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml index 7387bff35..5ec37683b 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml @@ -9,38 +9,39 @@ # 6. Scale database # 7. Add/Remove linked databases -- debug: - msg: "start ndb database day2 actions tests" +- name: Start ndb database day2 actions tests + ansible.builtin.debug: + msg: start ndb database day2 actions tests - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - db1_name: "{{random_name[0]}}" - vm1_name: "{{random_name[0]}}-vm" - tm1: "{{random_name[0]}}-time-machine" - snapshot_name: "{{random_name[0]}}-snapshot" +- name: Define variables + ansible.builtin.set_fact: + db1_name: "{{ random_name[0] }}" + vm1_name: "{{ random_name[0] }}-vm" + tm1: "{{ random_name[0] }}-time-machine" + snapshot_name: "{{ random_name[0] }}-snapshot" ############################################ setup db ########################################### - -- name: create single instance postgres database on new db server vm +- name: Create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm1_name }}-db" - desc: "vm for db server" + desc: vm for db server password: "{{ vm_password }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ software_profile.name }}" network_profile: @@ -54,10 +55,10 @@ db_name: testAnsible1 db_password: "{{ vm_password }}" db_size: 200 - type: "single" + type: single time_machine: - name: "{{tm1}}" + name: "{{ tm1 }}" desc: TM-desc sla: name: "{{ sla.name }}" @@ -71,553 +72,452 @@ register: result -- set_fact: - db_uuid: "{{result.db_uuid}}" +- name: Set db uuid + ansible.builtin.set_fact: + db_uuid: "{{ result.db_uuid }}" -- set_fact: - time_machine_uuid: "{{result.response.timeMachineId}}" +- name: Set time machine uuid + ansible.builtin.set_fact: + time_machine_uuid: "{{ result.response.timeMachineId }}" - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' - result.db_uuid is defined - result.changed == true - fail_msg: "Unable to create single instance postgres database" - success_msg: "single instance postgres database created successfully" - -- set_fact: - db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" + fail_msg: Unable to create single instance postgres database + success_msg: single instance postgres database created successfully +- name: Set db server uuid + ansible.builtin.set_fact: + db_server_uuid: "{{ result.response.databaseNodes[0].dbserverId }}" ############################################ snapshots test ########################################### -- name: create snapshot create spec - check_mode: yes +- name: Create snapshot create spec + check_mode: true ntnx_ndb_database_snapshots: - name: "{{snapshot_name}}" - time_machine_uuid: "{{time_machine_uuid}}" + name: "{{ snapshot_name }}" + time_machine_uuid: "{{ time_machine_uuid }}" clusters: - - name: "{{cluster.cluster1.name}}" - - uuid: "test_uuid2" - - uuid: "test_uuid3" + - name: "{{ cluster.cluster1.name }}" + - uuid: test_uuid2 + - uuid: test_uuid3 expiry_days: 4 register: result -- set_fact: - expected_response: { - "changed": false, - "error": null, - "failed": false, - "response": { - "lcmConfig": { - "snapshotLCMConfig": { - "expiryDetails": { - "expireInDays": 4, - } - } - }, - "name": "{{snapshot_name}}", - "replicateToClusterIds": [ - "{{cluster.cluster1.uuid}}", - "test_uuid2", - "test_uuid3" - ] - }, - "snapshot_uuid": null - } +- name: Set expected response + ansible.builtin.set_fact: + expected_response: + changed: false + error: + failed: false + response: + lcmConfig: { snapshotLCMConfig: { expiryDetails: { expireInDays: 4 } } } + name: "{{ snapshot_name }}" + replicateToClusterIds: ["{{ cluster.cluster1.uuid }}", test_uuid2, test_uuid3] + snapshot_uuid: - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response == expected_response.response - fail_msg: "Unable to create snapshot create spec" - success_msg: "Snapshot create spec generated successfully using check mode" + fail_msg: Unable to create snapshot create spec + success_msg: Snapshot create spec generated successfully using check mode - -- name: create snapshot with minimal spec +- name: Create snapshot with minimal spec ntnx_ndb_database_snapshots: - name: "{{snapshot_name}}1" - time_machine_uuid: "{{time_machine_uuid}}" + name: "{{ snapshot_name }}1" + time_machine_uuid: "{{ time_machine_uuid }}" register: result - -- name: snapshot create status - assert: +- name: Snapshot create status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.snapshot_uuid is defined - - result.response.name == "{{snapshot_name}}1" + - result.response.name == "{{ snapshot_name }}1" - result.response.timeMachineId == time_machine_uuid - fail_msg: "Unable to create snapshot" - success_msg: "Snapshot created successfully" + fail_msg: Unable to create snapshot + success_msg: Snapshot created successfully -- name: create snapshot with expiry +- name: Create snapshot with expiry ntnx_ndb_database_snapshots: - name: "{{snapshot_name}}2" - time_machine_uuid: "{{time_machine_uuid}}" + name: "{{ snapshot_name }}2" + time_machine_uuid: "{{ time_machine_uuid }}" expiry_days: 4 register: result +- name: Set snapshot uuid + ansible.builtin.set_fact: + snapshot_uuid: "{{ result.snapshot_uuid }}" -- set_fact: - snapshot_uuid: "{{result.snapshot_uuid}}" - -- name: snapshot create status - assert: +- name: Snapshot create status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.snapshot_uuid is defined - - result.response.name == "{{snapshot_name}}2" + - result.response.name == "{{ snapshot_name }}2" - result.response.timeMachineId == time_machine_uuid - result.response.lcmConfig.expiryDetails.expireInDays == 4 - fail_msg: "Unable to create snapshot with expiry config" - success_msg: "Snapshot with expiry config created successfully" - + fail_msg: Unable to create snapshot with expiry config + success_msg: Snapshot with expiry config created successfully - -- name: rename snapshot +- name: Rename snapshot ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" - name: "{{snapshot_name}}2-updated" + snapshot_uuid: "{{ snapshot_uuid }}" + name: "{{ snapshot_name }}2-updated" register: result -- name: check rename status - assert: +- name: Check rename status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.snapshot_uuid is defined - - result.response.name == "{{snapshot_name}}2-updated" - - fail_msg: "Unable to rename snapshot" - success_msg: "Snapshot renamed successfully" + - result.response.name == "{{ snapshot_name }}2-updated" + fail_msg: Unable to rename snapshot + success_msg: Snapshot renamed successfully - -- name: update expiry +- name: Update expiry ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + snapshot_uuid: "{{ snapshot_uuid }}" expiry_days: 5 register: result -- name: snapshot expiry update status - assert: +- name: Snapshot expiry update status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.snapshot_uuid is defined - result.response.lcmConfig.expiryDetails.expireInDays == 5 - fail_msg: "Unable to update snapshot expiry" - success_msg: "snapshot expiry updated successfully" - + fail_msg: Unable to update snapshot expiry + success_msg: snapshot expiry updated successfully - -- name: remove expiry schedule +- name: Remove expiry schedule ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + snapshot_uuid: "{{ snapshot_uuid }}" remove_expiry: true register: result -- name: snapshot expiry update status - assert: +- name: Snapshot expiry update status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.snapshot_uuid is defined - result.response.lcmConfig == None - fail_msg: "Unable to remove snapshot expiry schedule" - success_msg: "snapshot expiry schedule removed successfully" - + fail_msg: Unable to remove snapshot expiry schedule + success_msg: snapshot expiry schedule removed successfully - name: Add expiry schedule and rename ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" - name: "{{snapshot_name}}2" + snapshot_uuid: "{{ snapshot_uuid }}" + name: "{{ snapshot_name }}2" expiry_days: 6 register: result -- name: snapshot update status - assert: +- name: Snapshot update status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.snapshot_uuid is defined - - result.response.name == "{{snapshot_name}}2" + - result.response.name == "{{ snapshot_name }}2" - result.response.timeMachineId == time_machine_uuid - result.response.lcmConfig.expiryDetails.expireInDays == 6 - fail_msg: "Unable to add expiry schedule and rename it" - success_msg: "Snapshot updated successfully" - + fail_msg: Unable to add expiry schedule and rename it + success_msg: Snapshot updated successfully - name: Idempotency check ntnx_ndb_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + snapshot_uuid: "{{ snapshot_uuid }}" expiry_days: 6 register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "snapshot got updated" - success_msg: "snapshot update got skipped due to no state changes" - + fail_msg: snapshot got updated + success_msg: snapshot update got skipped due to no state changes ############################################ log catchup ###################################### -- name: create spec for log catchup - check_mode: yes +- name: Create spec for log catchup + check_mode: true ntnx_ndb_database_log_catchup: - time_machine_uuid: "{{time_machine_uuid}}" + time_machine_uuid: "{{ time_machine_uuid }}" register: result -- set_fact: - expected_response: { - "changed": false, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "preRestoreLogCatchup", - "value": false - }, - { - "name": "switch_log", - "value": true - } - ], - "forRestore": false - } - } - - - +- name: Set expected response + ansible.builtin.set_fact: + expected_response: + changed: false + error: + failed: false + response: { actionArguments: [{ name: preRestoreLogCatchup, value: false }, { name: switch_log, value: true }], forRestore: false } - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_response - fail_msg: "Unable to create log catcup spec" - success_msg: "log catchup spec created successfully" + fail_msg: Unable to create log catcup spec + success_msg: log catchup spec created successfully - -- name: create spec for log catchup for restore - check_mode: yes +- name: Create spec for log catchup for restore + check_mode: true ntnx_ndb_database_log_catchup: - time_machine_uuid: "{{time_machine_uuid}}" + time_machine_uuid: "{{ time_machine_uuid }}" for_restore: true register: result -- set_fact: - expected_response: { - "changed": false, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "preRestoreLogCatchup", - "value": True - }, - { - "name": "switch_log", - "value": true - } - ], - "forRestore": true - } - } +- name: Set expected response + ansible.builtin.set_fact: + expected_response: + changed: false + error: + failed: false + response: { actionArguments: [{ name: preRestoreLogCatchup, value: true }, { name: switch_log, value: true }], forRestore: true } - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_response - fail_msg: "Unable to create log catcup spec" - success_msg: "log catchup spec created successfully" - + fail_msg: Unable to create log catcup spec + success_msg: log catchup spec created successfully -- name: perform log catchup +- name: Perform log catchup ntnx_ndb_database_log_catchup: - time_machine_uuid: "{{time_machine_uuid}}" + time_machine_uuid: "{{ time_machine_uuid }}" for_restore: true register: result -- name: verify log catchup status - assert: +- name: Verify log catchup status + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "database log catchup failed" - success_msg: "database log catchup completed successfully" + fail_msg: database log catchup failed + success_msg: database log catchup completed successfully ########################################### restore ########################################### -- name: create restore database spec using pitr timestamp - check_mode: yes +- name: Create restore database spec using pitr timestamp + check_mode: true ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" + db_uuid: "{{ db_uuid }}" pitr_timestamp: "2023-01-02 11:02:22" - timezone: "UTC" + timezone: UTC register: result -- set_fact: - expected_result: { - "changed": false, - "db_uuid": null, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "sameLocation", - "value": true - } - ], - "latestSnapshot": null, - "snapshotId": null, - "timeZone": "UTC", - "userPitrTimestamp": "2023-01-02 11:02:22" - } - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + db_uuid: + error: + failed: false + response: + actionArguments: [{ name: sameLocation, value: true }] + latestSnapshot: + snapshotId: + timeZone: UTC + userPitrTimestamp: "2023-01-02 11:02:22" - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create restore using pitr timestamp spec" - success_msg: "Spec for databas restore using pitr timetsmap created successfully" + fail_msg: Unable to create restore using pitr timestamp spec + success_msg: Spec for databas restore using pitr timetsmap created successfully - -- name: create restore database spec with latest snapshot - check_mode: yes +- name: Create restore database spec with latest snapshot + check_mode: true ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" + db_uuid: "{{ db_uuid }}" register: result -- set_fact: - expected_result: { - "changed": false, - "db_uuid": null, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "sameLocation", - "value": true - } - ], - "latestSnapshot": true, - "snapshotId": null, - "timeZone": null, - "userPitrTimestamp": null - } - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + db_uuid: + error: + failed: false + response: + actionArguments: [{ name: sameLocation, value: true }] + latestSnapshot: true + snapshotId: + timeZone: + userPitrTimestamp: - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create restore using latest snapshot spec" - success_msg: "Spec for databas restore using latest snapshot created successfully" - + fail_msg: Unable to create restore using latest snapshot spec + success_msg: Spec for databas restore using latest snapshot created successfully - -- name: create restore database spec using snapshot uuid - check_mode: yes +- name: Create restore database spec using snapshot uuid + check_mode: true ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + db_uuid: "{{ db_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result -- set_fact: - expected_result: { - "changed": false, - "db_uuid": null, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "sameLocation", - "value": true - } - ], - "latestSnapshot": null, - "snapshotId": "{{snapshot_uuid}}", - "timeZone": null, - "userPitrTimestamp": null - } - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + db_uuid: + error: + failed: false + response: + actionArguments: [{ name: sameLocation, value: true }] + latestSnapshot: + snapshotId: "{{ snapshot_uuid }}" + timeZone: + userPitrTimestamp: - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create restore using snapshot uuid spec" - success_msg: "Spec for databas restore using snapshot uuid created successfully" - + fail_msg: Unable to create restore using snapshot uuid spec + success_msg: Spec for databas restore using snapshot uuid created successfully -- name: perform restore using latest snapshot +- name: Perform restore using latest snapshot ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + db_uuid: "{{ db_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result -- name: restore status - assert: +- name: Restore status + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "Unable to restore database using latest snapshot" - success_msg: "database restored successfully using latest snapshot" + fail_msg: Unable to restore database using latest snapshot + success_msg: database restored successfully using latest snapshot - -- name: perform restore using snapshot uuid +- name: Perform restore using snapshot uuid ntnx_ndb_database_restore: - db_uuid: "{{db_uuid}}" - snapshot_uuid: "{{snapshot_uuid}}" + db_uuid: "{{ db_uuid }}" + snapshot_uuid: "{{ snapshot_uuid }}" register: result -- name: restore status - assert: +- name: Restore status + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "Unable to restore database using snapshot uuid" - success_msg: "database restored successfully using snapshot uuid" + fail_msg: Unable to restore database using snapshot uuid + success_msg: database restored successfully using snapshot uuid ########################################### scaling ########################################### -- name: create spec for scaling - check_mode: yes +- name: Create spec for scaling + check_mode: true ntnx_ndb_database_scale: - db_uuid: "{{db_uuid}}" + db_uuid: "{{ db_uuid }}" storage_gb: 10 - pre_update_cmd: "ls" - post_update_cmd: "ls -a" + pre_update_cmd: ls + post_update_cmd: ls -a register: result -- set_fact: - expected_result: { - "changed": false, - "db_uuid": null, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "working_dir", - "value": "/tmp" - }, - { - "name": "data_storage_size", - "value": 10 - }, - { - "name": "pre_script_cmd", - "value": "ls" - }, - { - "name": "post_script_cmd", - "value": "ls -a" - } - ], - "applicationType": "postgres_database" - } - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + db_uuid: + error: + failed: false + response: + actionArguments: + - { name: working_dir, value: /tmp } + - { name: data_storage_size, value: 10 } + - { name: pre_script_cmd, value: ls } + - { name: post_script_cmd, value: ls -a } + applicationType: postgres_database - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create database scaling spec" - success_msg: "Spec for database scaling with pre post commands created successfully" - + fail_msg: Unable to create database scaling spec + success_msg: Spec for database scaling with pre post commands created successfully -- name: extend database storage for scaling database +- name: Extend database storage for scaling database ntnx_ndb_database_scale: - db_uuid: "{{db_uuid}}" + db_uuid: "{{ db_uuid }}" storage_gb: 2 - pre_update_cmd: "ls" - post_update_cmd: "ls -a" + pre_update_cmd: ls + post_update_cmd: ls -a register: result -- name: database scaling status - assert: +- name: Database scaling status + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "Unable to extend database storage (scale)" - success_msg: "database storage extended (scaling) successfully" + fail_msg: Unable to extend database storage (scale) + success_msg: database storage extended (scaling) successfully ############################################ add / remove linked databases ########################################### - -- name: create databases in database instance - check_mode: yes +- name: Create databases in database instance + check_mode: true ntnx_ndb_linked_databases: - db_instance_uuid: "{{db_uuid}}" + db_instance_uuid: "{{ db_uuid }}" databases: - test1 - test2 - test3 register: result -- set_fact: - expected_result: { - "changed": false, - "db_instance_uuid": "{{db_uuid}}", - "error": null, - "failed": false, - "response": { - "databases": [ - { - "databaseName": "test1" - }, - { - "databaseName": "test2" - }, - { - "databaseName": "test3" - } - ] - } - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + db_instance_uuid: "{{ db_uuid }}" + error: + failed: false + response: { databases: [databaseName: test1, databaseName: test2, databaseName: test3] } - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create spec for adding databases in database instance" - success_msg: "Spec for adding databases in database instance created successfully" - + fail_msg: Unable to create spec for adding databases in database instance + success_msg: Spec for adding databases in database instance created successfully -- name: add databases in database instance +- name: Add databases in database instance ntnx_ndb_linked_databases: - db_instance_uuid: "{{db_uuid}}" + db_instance_uuid: "{{ db_uuid }}" databases: - test1 - test2 @@ -625,69 +525,66 @@ # {% raw %} -- name: create linked databases to its uuid map - set_fact: - linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" +- name: Create linked databases to its uuid map + ansible.builtin.set_fact: + linked_databases: "{{ linked_databases | default({}) | combine({item['name']: item['id']}) }}" loop: "{{result.response}}" no_log: true # {% endraw %} -- name: check linked database update status - assert: +- name: Check linked database update status + ansible.builtin.assert: that: - result.changed == true - result.db_instance_uuid is defined - "'test1' in linked_databases" - "'test2' in linked_databases" - fail_msg: "Unable to add database to database instance" - success_msg: "databases added to database instance successfully" + fail_msg: Unable to add database to database instance + success_msg: databases added to database instance successfully - -- name: remove databases in database instance +- name: Remove databases in database instance ntnx_ndb_linked_databases: - state: "absent" - db_instance_uuid: "{{db_uuid}}" - database_uuid: "{{linked_databases.test1}}" + state: absent + db_instance_uuid: "{{ db_uuid }}" + database_uuid: "{{ linked_databases.test1 }}" register: result # {% raw %} -- name: create linked database map - set_fact: - linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" +- name: Create linked database map + ansible.builtin.set_fact: + linked_databases: "{{ linked_databases | default({}) | combine({item['name']: item['id']}) }}" loop: "{{result.response}}" no_log: true # {% endraw %} -- name: check linked database update status - assert: +- name: Check linked database update status + ansible.builtin.assert: that: - result.changed == true - result.db_instance_uuid is defined - "'test2' in linked_databases" - fail_msg: "Unable to remove database from database instance" - success_msg: "linked database from database instance removed successfully" - + fail_msg: Unable to remove database from database instance + success_msg: linked database from database instance removed successfully ############################################ cleanup ########################################### - -- name: delete database created earlier +- name: Delete database created earlier ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" delete_db_server_vms: true - delete_time_machine: True + delete_time_machine: true register: result -- name: verify status of delete of database along with time machine and db server vm delete - assert: +- name: Verify status of delete of database along with time machine and db server vm delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - result.response.db_server_vms_delete_status.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" + fail_msg: database delete failed + success_msg: database deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/main.yml index 3525a5c66..d68010a4c 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/main.yml @@ -1,10 +1,12 @@ --- -- module_defaults: +- name: Ntnx_ndb_databases_actions integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "all_actions.yml" + - name: Import the actions + ansible.builtin.import_tasks: all_actions.yml diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/aliases b/tests/integration/targets/ntnx_ndb_databases_sanity/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_databases_sanity/aliases +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml index d09f77ab1..b88c0e2fe 100644 --- a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml @@ -1,10 +1,12 @@ --- -- module_defaults: +- name: Ntnx_ndb_databases_sanity integration test + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "tests.yml" + - name: Get all databases + ansible.builtin.import_tasks: tests.yml diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml index 26cc67f06..e8447f1f8 100644 --- a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml @@ -1,58 +1,57 @@ --- # Summary: # This playbook will test basic database flows - - -- debug: - msg: "start ndb databases crud tests" +- name: Start ndb databases crud tests + ansible.builtin.debug: + msg: start ndb databases crud tests - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - db1_name: "{{random_name[0]}}" - db1_name_updated: "{{random_name[0]}}-updated" - vm1_name: "{{random_name[0]}}-vm" +- name: Define variables for db server vm + ansible.builtin.set_fact: + db1_name: "{{ random_name[0] }}" + db1_name_updated: "{{ random_name[0] }}-updated" + vm1_name: "{{ random_name[0] }}-vm" ################################### Single instance postgres database tests ############################# - -- name: create spec for single instance postgres database on new db server vm - check_mode: yes +- name: Create spec for single instance postgres database on new db server vm + check_mode: true ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm1_name }}" desc: vm for db server - password: "test_password" + password: test_password cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ software_profile.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: name: "{{ compute_profile.name }}" - pub_ssh_key: "test_key" + pub_ssh_key: test_key postgres: listener_port: "9999" db_name: testAnsible - db_password: "test_password" + db_password: test_password db_size: 200 - type: "single" + type: single auto_tune_staging_drive: false allocate_pg_hugepage: true - pre_create_script: "ls" - post_create_script: "ls -a" + pre_create_script: ls + post_create_script: ls -a time_machine: name: TM1 @@ -66,169 +65,98 @@ quaterly: JANUARY log_catchup: 30 snapshots_per_day: 2 - auto_tune_staging_drive: False + auto_tune_staging_drive: false tags: - databases: "single-instance-dbs" + databases: single-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result -- set_fact: - expected_action_arguments: [ - { - "name": "dbserver_description", - "value": "vm for db server" - }, - { - "name": "listener_port", - "value": "9999" - }, - { - "name": "auto_tune_staging_drive", - "value": false - }, - { - "name": "allocate_pg_hugepage", - "value": True - }, - { - "name": "cluster_database", - "value": false - }, - { - "name": "auth_method", - "value": "md5" - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "pre_create_script", - "value": "ls" - }, - { - "name": "post_create_script", - "value": "ls -a" - }, - { - "name": "database_names", - "value": "testAnsible" - }, - { - "name": "database_size", - "value": "200" - } - ] - -- set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": { - "continuousSchedule": { - "enabled": true, - "logBackupInterval": 30, - "snapshotsPerDay": 2 - }, - "monthlySchedule": { - "dayOfMonth": 4, - "enabled": true - }, - "quartelySchedule": { - "dayOfMonth": 4, - "enabled": true, - "startMonth": "JANUARY" - }, - "snapshotTimeOfDay": { - "hours": 11, - "minutes": 10, - "seconds": 2 - }, - "weeklySchedule": { - "dayOfWeek": "WEDNESDAY", - "enabled": true - } - }, - "slaId": "{{sla.uuid}}" - } - -- set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } +- name: Define variables + ansible.builtin.set_fact: + expected_action_arguments: + - { name: dbserver_description, value: vm for db server } + - { name: listener_port, value: "9999" } + - { name: auto_tune_staging_drive, value: false } + - { name: allocate_pg_hugepage, value: true } + - { name: cluster_database, value: false } + - { name: auth_method, value: md5 } + - { name: db_password, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER } + - { name: pre_create_script, value: ls } + - { name: post_create_script, value: ls -a } + - { name: database_names, value: testAnsible } + - { name: database_size, value: "200" } + +- name: Set expected time machine info + ansible.builtin.set_fact: + expected_time_machine_info: + autoTuneLogDrive: true + description: TM-desc + name: TM1 + schedule: + continuousSchedule: { enabled: true, logBackupInterval: 30, snapshotsPerDay: 2 } + monthlySchedule: { dayOfMonth: 4, enabled: true } + quartelySchedule: { dayOfMonth: 4, enabled: true, startMonth: JANUARY } + snapshotTimeOfDay: { hours: 11, minutes: 10, seconds: 2 } + weeklySchedule: { dayOfWeek: WEDNESDAY, enabled: true } + slaId: "{{ sla.uuid }}" + +- name: Set maintenance tasks + ansible.builtin.set_fact: + mainetance_tasks: + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response.name == db1_name - result.response.databaseDescription == "ansible-created-db-desc" - result.response.actionArguments == expected_action_arguments - - result.response.computeProfileId == "{{compute_profile.uuid}}" - - result.response.networkProfileId == "{{network_profile.uuid}}" - - result.response.dbParameterProfileId == "{{db_params_profile.uuid}}" - - result.response.softwareProfileId == "{{software_profile.uuid}}" + - result.response.computeProfileId == "{{ compute_profile.uuid }}" + - result.response.networkProfileId == "{{ network_profile.uuid }}" + - result.response.dbParameterProfileId == "{{ db_params_profile.uuid }}" + - result.response.softwareProfileId == "{{ software_profile.uuid }}" - result.response.autoTuneStagingDrive == False - result.response.timeMachineInfo == expected_time_machine_info - result.response.nodes | length == 1 - result.response.nodeCount == 1 - - result.response.nodes[0].nxClusterId == "{{cluster.cluster1.uuid}}" + - result.response.nodes[0].nxClusterId == "{{ cluster.cluster1.uuid }}" - result.response.maintenanceTasks == mainetance_tasks - result.response.createDbserver == True - fail_msg: "Unable to create single instance postgres database provision spec" - success_msg: "single instance postgres database provision spec created successfully" - - + fail_msg: Unable to create single instance postgres database provision spec + success_msg: single instance postgres database provision spec created successfully -- name: create single instance postgres database on new db server vm +- name: Create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: ip: "{{ vm_ip }}" name: "{{ vm1_name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm_password }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ software_profile.name }}" network_profile: @@ -242,7 +170,7 @@ db_name: testAnsible db_password: "{{ vm_password }}" db_size: 200 - type: "single" + type: single time_machine: name: TM1 @@ -257,39 +185,40 @@ log_catchup: 30 snapshots_per_day: 2 tags: - databases: "single-instance-dbs" + databases: single-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result -- set_fact: - db_uuid: "{{result.db_uuid}}" +- name: Set db uuid + ansible.builtin.set_fact: + db_uuid: "{{ result.db_uuid }}" - name: Ensure properties is defined - set_fact: + ansible.builtin.set_fact: properties: {} when: properties is undefined # skip jekyll/Liquid syntax check # -- name: create properties map - set_fact: - properties: "{{ properties | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -304,22 +233,23 @@ - properties["auth"] == "md5" - result.response.databaseNodes[0].status == "READY" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "single-instance-dbs" - fail_msg: "Unable to create single instance postgres database" - success_msg: "single instance postgres database created successfully" + fail_msg: Unable to create single instance postgres database + success_msg: single instance postgres database created successfully -- set_fact: - db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" +- name: Set db server uuid + ansible.builtin.set_fact: + db_server_uuid: "{{ result.response.databaseNodes[0].dbserverId }}" -- name: get vm details associated to the database instance created above and verify +- name: Get vm details associated to the database instance created above and verify ntnx_ndb_db_servers_info: - uuid: "{{db_server_uuid}}" + uuid: "{{ db_server_uuid }}" register: result - name: Verify DB server VM status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'UP' @@ -327,48 +257,47 @@ - result.response.name == vm1_name - result.response.nxClusterId == cluster.cluster1.uuid - result.response.description == "vm for db server" - fail_msg: "Unable to verify db server vm" - success_msg: "db server vm created by database instance creation verified successfully" + fail_msg: Unable to verify db server vm + success_msg: db server vm created by database instance creation verified successfully ################################### update tests ############################# - -- name: update database with check mode - check_mode: yes +- name: Update database with check mode + check_mode: true ntnx_ndb_databases: wait: true - db_uuid: "{{db_uuid}}" - name: "{{db1_name_updated}}" - desc: "ansible-created-db-desc-updated" + db_uuid: "{{ db_uuid }}" + name: "{{ db1_name_updated }}" + desc: ansible-created-db-desc-updated tags: - databases: "single-instance-dbs-updated" + databases: single-instance-dbs-updated register: result -- name: check mode status - assert: +- name: Check mode status + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response.name == db1_name_updated - result.response.description == "ansible-created-db-desc-updated" - fail_msg: "Unable to create single instance postgres database update spec" - success_msg: "single instance postgres database update spec generated successfully" + fail_msg: Unable to create single instance postgres database update spec + success_msg: single instance postgres database update spec generated successfully -- name: update database +- name: Update database ntnx_ndb_databases: wait: true - db_uuid: "{{db_uuid}}" - name: "{{db1_name_updated}}" - desc: "ansible-created-db-desc-updated" + db_uuid: "{{ db_uuid }}" + name: "{{ db1_name_updated }}" + desc: ansible-created-db-desc-updated tags: - databases: "single-instance-dbs-updated" + databases: single-instance-dbs-updated register: result -- name: update status - assert: +- name: Update status + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -377,70 +306,66 @@ - result.response.name == db1_name_updated - result.response.description == "ansible-created-db-desc-updated" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "single-instance-dbs-updated" + fail_msg: Unable to update single instance postgres database + success_msg: single instance postgres database updated successfully - fail_msg: "Unable to update single instance postgres database" - success_msg: "single instance postgres database updated successfully" - - -- name: idempotency checks +- name: Idempotency checks ntnx_ndb_databases: wait: true - db_uuid: "{{db_uuid}}" - name: "{{db1_name_updated}}" - desc: "ansible-created-db-desc-updated" + db_uuid: "{{ db_uuid }}" + name: "{{ db1_name_updated }}" + desc: ansible-created-db-desc-updated tags: - databases: "single-instance-dbs-updated" + databases: single-instance-dbs-updated register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "database got updated" - success_msg: "database update skipped successfully due to no changes in spec" + fail_msg: database got updated + success_msg: database update skipped successfully due to no changes in spec ################################### delete tests ############################# -- name: create spec for delete db from vm - check_mode: yes +- name: Create spec for delete db from vm + check_mode: true ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" wait: true delete_db_from_vm: true register: result -- name: verify delete check mode spec - assert: +- name: Verify delete check mode spec + ansible.builtin.assert: that: - result.changed == false - result.failed == false - result.response.delete == True - result.response.remove == False - result.response.deleteTimeMachine == False - fail_msg: "creation of spec for delete db from vm failed" - success_msg: "spec for delete db from vm created successfully" - - + fail_msg: creation of spec for delete db from vm failed + success_msg: spec for delete db from vm created successfully -- name: create spec for soft remove - check_mode: yes +- name: Create spec for soft remove + check_mode: true ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" wait: true soft_delete: true delete_time_machine: true register: result -- name: verify soft remove check mode spec - assert: +- name: Verify soft remove check mode spec + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -448,13 +373,13 @@ - result.response.remove == False - result.response.softRemove == True - result.response.deleteTimeMachine == True - fail_msg: "creation of spec for soft remove with time machine delete failed" - success_msg: "spec for soft remove with time machine delete created successfully" + fail_msg: creation of spec for soft remove with time machine delete failed + success_msg: spec for soft remove with time machine delete created successfully +##################################### INFO Module tests####################################################### -#####################################INFO Module tests####################################################### - -- debug: +- name: Start testing ntnx_ndb_databases_info based on created database + ansible.builtin.debug: msg: Start testing ntnx_ndb_databases_info based on created database - name: List ndb databases @@ -462,125 +387,122 @@ register: databases no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - databases.response is defined - databases.failed == false - databases.changed == false - databases.response | length > 0 - fail_msg: "Unable to list all era databases" - success_msg: "era databases listed successfully" + fail_msg: Unable to list all era databases + success_msg: era databases listed successfully ################################################################ - name: Get era databases using its name ntnx_ndb_databases_info: - name: "{{databases.response[0].name}}" + name: "{{ databases.response[0].name }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.id == "{{databases.response[0].id}}" - fail_msg: "Unable to Get era databases using its name" - success_msg: "Get era databases using its name finished successfully" + - result.response.id == "{{ databases.response[0].id }}" + fail_msg: Unable to Get era databases using its name + success_msg: Get era databases using its name finished successfully ################################################################ - name: Get era databases using its id ntnx_ndb_databases_info: - uuid: "{{databases.response[0].id}}" + uuid: "{{ databases.response[0].id }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{databases.response[0].name}}" - fail_msg: "Unable to Get era databases using its id" - success_msg: "Get era databases using its id finished successfully" + - result.response.name == "{{ databases.response[0].name }}" + fail_msg: Unable to Get era databases using its id + success_msg: Get era databases using its id finished successfully ################################################################ - name: Get era databases using its id and detailed response ntnx_ndb_databases_info: filters: - detailed: True - uuid: "{{databases.response[0].id}}" + detailed: true + uuid: "{{ databases.response[0].id }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{databases.response[0].name}}" + - result.response.name == "{{ databases.response[0].name }}" - result.response.timeMachine is defined - fail_msg: "Unable to Get era databases using its id" - success_msg: "Get era databases using its id finished successfully" - + fail_msg: Unable to Get era databases using its id + success_msg: Get era databases using its id finished successfully ################################################################ -- name: get era database with incorrect name +- name: Get era database with incorrect name ntnx_ndb_databases_info: - name: "xxxxxxx" + name: xxxxxxx register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given ############################################################################################ - -- name: unregister db along with delete time machine +- name: Unregister db along with delete time machine ntnx_ndb_databases: - db_uuid: "{{db_uuid}}" - state: "absent" + db_uuid: "{{ db_uuid }}" + state: absent wait: true delete_time_machine: true register: result -- name: verify status of delete of database along with time machine delete - assert: +- name: Verify status of delete of database along with time machine delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" + fail_msg: database delete failed + success_msg: database deleted successfully - -- name: delete db server vm +- name: Delete db server vm ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" - delete_from_cluster: True - delete_vgs: True - delete_vm_snapshots: True + state: absent + wait: true + uuid: "{{ db_server_uuid }}" + delete_from_cluster: true + delete_vgs: true + delete_vm_snapshots: true register: result -- name: check delete status - assert: +- name: Check delete status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.response.status == "5" - fail_msg: "Unable to delete db server vm" - success_msg: "DB server VM deleted successfully" + fail_msg: Unable to delete db server vm + success_msg: DB server VM deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/main.yml index d09f77ab1..26378aab3 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/main.yml @@ -1,10 +1,12 @@ --- -- module_defaults: +- name: Ntnx_ndb_databases_single_instance_1 integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "tests.yml" + - name: Import tasks + ansible.builtin.import_tasks: tests.yml diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml index 464f71e61..9ccabf236 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml @@ -6,56 +6,57 @@ # 3. Unregister database instance # 4. Register the database created in point 1 from the db server vm created earlier -- debug: - msg: "start ndb databases test flow 1" +- name: Start ndb databases test flow 1 + ansible.builtin.debug: + msg: start ndb databases test flow 1 - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - db1_name: "{{random_name[0]}}" - db1_name_updated: "{{random_name[0]}}-updated" - vm1_name: "{{random_name[0]}}-vm" +- name: Define variables for db and vm names + ansible.builtin.set_fact: + db1_name: "{{ random_name[0] }}" + db1_name_updated: "{{ random_name[0] }}-updated" + vm1_name: "{{ random_name[0] }}-vm" ################################### Single instance postgres database tests ############################# - -- name: create spec for single instance postgres database on new db server vm - check_mode: yes +- name: Create spec for single instance postgres database on new db server vm + check_mode: true ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: name: "{{ vm1_name }}" desc: vm for db server - password: "test_password" + password: test_password cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ software_profile.name }}" network_profile: name: "{{ network_profile.name }}" compute_profile: name: "{{ compute_profile.name }}" - pub_ssh_key: "test_key" + pub_ssh_key: test_key postgres: listener_port: "9999" db_name: testAnsible - db_password: "test_password" + db_password: test_password db_size: 200 - type: "single" + type: single auto_tune_staging_drive: false allocate_pg_hugepage: true - pre_create_script: "ls" - post_create_script: "ls -a" + pre_create_script: ls + post_create_script: ls -a time_machine: name: TM1 @@ -69,169 +70,98 @@ quaterly: JANUARY log_catchup: 30 snapshots_per_day: 2 - auto_tune_staging_drive: False + auto_tune_staging_drive: false tags: - ansible-databases: "single-instance-dbs" + ansible-databases: single-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result -- set_fact: - expected_action_arguments: [ - { - "name": "dbserver_description", - "value": "vm for db server" - }, - { - "name": "listener_port", - "value": "9999" - }, - { - "name": "auto_tune_staging_drive", - "value": false - }, - { - "name": "allocate_pg_hugepage", - "value": True - }, - { - "name": "cluster_database", - "value": false - }, - { - "name": "auth_method", - "value": "md5" - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "pre_create_script", - "value": "ls" - }, - { - "name": "post_create_script", - "value": "ls -a" - }, - { - "name": "database_names", - "value": "testAnsible" - }, - { - "name": "database_size", - "value": "200" - } - ] - -- set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": { - "continuousSchedule": { - "enabled": true, - "logBackupInterval": 30, - "snapshotsPerDay": 2 - }, - "monthlySchedule": { - "dayOfMonth": 4, - "enabled": true - }, - "quartelySchedule": { - "dayOfMonth": 4, - "enabled": true, - "startMonth": "JANUARY" - }, - "snapshotTimeOfDay": { - "hours": 11, - "minutes": 10, - "seconds": 2 - }, - "weeklySchedule": { - "dayOfWeek": "WEDNESDAY", - "enabled": true - } - }, - "slaId": "{{sla.uuid}}" - } - -- set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } +- name: Set expected action arguments + ansible.builtin.set_fact: + expected_action_arguments: + - { name: dbserver_description, value: vm for db server } + - { name: listener_port, value: "9999" } + - { name: auto_tune_staging_drive, value: false } + - { name: allocate_pg_hugepage, value: true } + - { name: cluster_database, value: false } + - { name: auth_method, value: md5 } + - { name: db_password, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER } + - { name: pre_create_script, value: ls } + - { name: post_create_script, value: ls -a } + - { name: database_names, value: testAnsible } + - { name: database_size, value: "200" } + +- name: Set expected time machine info + ansible.builtin.set_fact: + expected_time_machine_info: + autoTuneLogDrive: true + description: TM-desc + name: TM1 + schedule: + continuousSchedule: { enabled: true, logBackupInterval: 30, snapshotsPerDay: 2 } + monthlySchedule: { dayOfMonth: 4, enabled: true } + quartelySchedule: { dayOfMonth: 4, enabled: true, startMonth: JANUARY } + snapshotTimeOfDay: { hours: 11, minutes: 10, seconds: 2 } + weeklySchedule: { dayOfWeek: WEDNESDAY, enabled: true } + slaId: "{{ sla.uuid }}" + +- name: Set maintenance tasks + ansible.builtin.set_fact: + mainetance_tasks: + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response.name == db1_name - result.response.databaseDescription == "ansible-created-db-desc" - result.response.actionArguments == expected_action_arguments - - result.response.computeProfileId == "{{compute_profile.uuid}}" - - result.response.networkProfileId == "{{network_profile.uuid}}" - - result.response.dbParameterProfileId == "{{db_params_profile.uuid}}" - - result.response.softwareProfileId == "{{software_profile.uuid}}" + - result.response.computeProfileId == "{{ compute_profile.uuid }}" + - result.response.networkProfileId == "{{ network_profile.uuid }}" + - result.response.dbParameterProfileId == "{{ db_params_profile.uuid }}" + - result.response.softwareProfileId == "{{ software_profile.uuid }}" - result.response.autoTuneStagingDrive == False - result.response.timeMachineInfo == expected_time_machine_info - result.response.nodes | length == 1 - result.response.nodeCount == 1 - - result.response.nodes[0].nxClusterId == "{{cluster.cluster1.uuid}}" + - result.response.nodes[0].nxClusterId == "{{ cluster.cluster1.uuid }}" - result.response.maintenanceTasks == mainetance_tasks - result.response.createDbserver == True - fail_msg: "Unable to create single instance postgres database provision spec" - success_msg: "single instance postgres database provision spec created successfully" + fail_msg: Unable to create single instance postgres database provision spec + success_msg: single instance postgres database provision spec created successfully - - -- name: create single instance postgres database on new db server vm +- name: Create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: create_new_server: ip: "{{ vm_ip }}" name: "{{ vm1_name }}" - desc: "vm for db server" + desc: vm for db server password: "{{ vm_password }}" cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" software_profile: name: "{{ software_profile.name }}" network_profile: @@ -245,7 +175,7 @@ db_name: testAnsible db_password: "{{ vm_password }}" db_size: 200 - type: "single" + type: single time_machine: name: TM1 @@ -260,34 +190,35 @@ log_catchup: 30 snapshots_per_day: 2 tags: - ansible-databases: "single-instance-dbs" + ansible-databases: single-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result -- set_fact: - db_uuid: "{{result.db_uuid}}" +- name: Set db uuid + ansible.builtin.set_fact: + db_uuid: "{{ result.db_uuid }}" # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -302,22 +233,23 @@ - properties["auth"] == "md5" - result.response.databaseNodes[0].status == "READY" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "single-instance-dbs" - fail_msg: "Unable to create single instance postgres database" - success_msg: "single instance postgres database created successfully" + fail_msg: Unable to create single instance postgres database + success_msg: single instance postgres database created successfully -- set_fact: - db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" +- name: Set db server uuid + ansible.builtin.set_fact: + db_server_uuid: "{{ result.response.databaseNodes[0].dbserverId }}" -- name: get vm details associated to the database instance created above and verify +- name: Get vm details associated to the database instance created above and verify ntnx_ndb_db_servers_info: - uuid: "{{db_server_uuid}}" + uuid: "{{ db_server_uuid }}" register: result - name: Verify DB server VM status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'UP' @@ -325,48 +257,47 @@ - result.response.name == vm1_name - result.response.nxClusterId == cluster.cluster1.uuid - result.response.description == "vm for db server" - fail_msg: "Unable to verify db server vm" - success_msg: "db server vm created by database instance creation verified successfully" + fail_msg: Unable to verify db server vm + success_msg: db server vm created by database instance creation verified successfully ################################### update tests ############################# - -- name: update database with check mode - check_mode: yes +- name: Update database with check mode + check_mode: true ntnx_ndb_databases: wait: true - db_uuid: "{{db_uuid}}" - name: "{{db1_name_updated}}" - desc: "ansible-created-db-desc-updated" + db_uuid: "{{ db_uuid }}" + name: "{{ db1_name_updated }}" + desc: ansible-created-db-desc-updated tags: - ansible-databases: "single-instance-dbs-updated" + ansible-databases: single-instance-dbs-updated register: result -- name: check mode status - assert: +- name: Check mode status + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.response.name == db1_name_updated - result.response.description == "ansible-created-db-desc-updated" - fail_msg: "Unable to create single instance postgres database update spec" - success_msg: "single instance postgres database update spec generated successfully" + fail_msg: Unable to create single instance postgres database update spec + success_msg: single instance postgres database update spec generated successfully -- name: update database +- name: Update database ntnx_ndb_databases: wait: true - db_uuid: "{{db_uuid}}" - name: "{{db1_name_updated}}" - desc: "ansible-created-db-desc-updated" + db_uuid: "{{ db_uuid }}" + name: "{{ db1_name_updated }}" + desc: ansible-created-db-desc-updated tags: - ansible-databases: "single-instance-dbs-updated" + ansible-databases: single-instance-dbs-updated register: result -- name: update status - assert: +- name: Update status + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -375,70 +306,66 @@ - result.response.name == db1_name_updated - result.response.description == "ansible-created-db-desc-updated" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "single-instance-dbs-updated" + fail_msg: Unable to update single instance postgres database + success_msg: single instance postgres database updated successfully - fail_msg: "Unable to update single instance postgres database" - success_msg: "single instance postgres database updated successfully" - - -- name: idempotency checks +- name: Idempotency checks ntnx_ndb_databases: wait: true - db_uuid: "{{db_uuid}}" - name: "{{db1_name_updated}}" - desc: "ansible-created-db-desc-updated" + db_uuid: "{{ db_uuid }}" + name: "{{ db1_name_updated }}" + desc: ansible-created-db-desc-updated tags: - ansible-databases: "single-instance-dbs-updated" + ansible-databases: single-instance-dbs-updated register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "database got updated" - success_msg: "database update skipped successfully due to no changes in spec" + fail_msg: database got updated + success_msg: database update skipped successfully due to no changes in spec ################################### delete tests ############################# -- name: create spec for delete db from vm - check_mode: yes +- name: Create spec for delete db from vm + check_mode: true ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" wait: true delete_db_from_vm: true register: result -- name: verify delete check mode spec - assert: +- name: Verify delete check mode spec + ansible.builtin.assert: that: - result.changed == false - result.failed == false - result.response.delete == True - result.response.remove == False - result.response.deleteTimeMachine == False - fail_msg: "creation of spec for delete db from vm failed" - success_msg: "spec for delete db from vm created successfully" + fail_msg: creation of spec for delete db from vm failed + success_msg: spec for delete db from vm created successfully - - -- name: create spec for soft remove - check_mode: yes +- name: Create spec for soft remove + check_mode: true ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" wait: true soft_delete: true delete_time_machine: true register: result -- name: verify soft remove check mode spec - assert: +- name: Verify soft remove check mode spec + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -446,48 +373,46 @@ - result.response.remove == False - result.response.softRemove == True - result.response.deleteTimeMachine == True - fail_msg: "creation of spec for soft remove with time machine delete failed" - success_msg: "spec for soft remove with time machine delete created successfully" - + fail_msg: creation of spec for soft remove with time machine delete failed + success_msg: spec for soft remove with time machine delete created successfully -- name: unregister db along with delete time machine +- name: Unregister db along with delete time machine ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" wait: true delete_time_machine: true register: result -- name: verify status of unregister of database with time machine delete - assert: +- name: Verify status of unregister of database with time machine delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "database unregistration failed" - success_msg: "database unregistered successfully" + fail_msg: database unregistration failed + success_msg: database unregistered successfully ################################### single instance postgres database registration tests ############################# - -- name: create spec for registering previously unregistered database from previously created VM's ip - check_mode: yes +- name: Create spec for registering previously unregistered database from previously created VM's ip + check_mode: true ntnx_ndb_register_database: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" - auto_tune_staging_drive: False + name: "{{ db1_name }}" + desc: ansible-created-db-desc + auto_tune_staging_drive: false db_vm: registered: - ip: "10.10.10.10" + ip: 10.10.10.10 postgres: listener_port: "9999" db_name: testAnsible1 - db_password: "{{vm_password}}" - software_path: "{{postgres.software_home}}" - db_user: "postgres" + db_password: "{{ vm_password }}" + software_path: "{{ postgres.software_home }}" + db_user: postgres time_machine: name: TM1 @@ -503,105 +428,54 @@ snapshots_per_day: 2 tags: - ansible-databases: "single-instance-dbs" - working_directory: "/check" + ansible-databases: single-instance-dbs + working_directory: /check automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result -- set_fact: - expected_action_arguments: [ - { - "name": "listener_port", - "value": "9999" - }, - { - "name": "db_name", - "value": "testAnsible1" - }, - { - "name": "db_user", - "value": "postgres" - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "postgres_software_home", - "value": "{{postgres.software_home}}" - } - ] - -- set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": { - "continuousSchedule": { - "enabled": true, - "logBackupInterval": 30, - "snapshotsPerDay": 2 - }, - "monthlySchedule": { - "dayOfMonth": 4, - "enabled": true - }, - "quartelySchedule": { - "dayOfMonth": 4, - "enabled": true, - "startMonth": "JANUARY" - }, - "snapshotTimeOfDay": { - "hours": 11, - "minutes": 10, - "seconds": 2 - }, - "weeklySchedule": { - "dayOfWeek": "WEDNESDAY", - "enabled": true - } - }, - "slaId": "{{sla.uuid}}" - } - -- set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } +- name: Set expected action arguments + ansible.builtin.set_fact: + expected_action_arguments: + - { name: listener_port, value: "9999" } + - { name: db_name, value: testAnsible1 } + - { name: db_user, value: postgres } + - { name: db_password, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER } + - { name: postgres_software_home, value: "{{ postgres.software_home }}" } + +- name: Set expected time machine info + ansible.builtin.set_fact: + expected_time_machine_info: + autoTuneLogDrive: true + description: TM-desc + name: TM1 + schedule: + continuousSchedule: { enabled: true, logBackupInterval: 30, snapshotsPerDay: 2 } + monthlySchedule: { dayOfMonth: 4, enabled: true } + quartelySchedule: { dayOfMonth: 4, enabled: true, startMonth: JANUARY } + snapshotTimeOfDay: { hours: 11, minutes: 10, seconds: 2 } + weeklySchedule: { dayOfWeek: WEDNESDAY, enabled: true } + slaId: "{{ sla.uuid }}" + +- name: Set maintenance tasks + ansible.builtin.set_fact: + mainetance_tasks: + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False @@ -614,25 +488,24 @@ - result.response.vmIp == "10.10.10.10" - result.response.maintenanceTasks == mainetance_tasks - result.response.workingDirectory == "/check" - fail_msg: "Unable to create register database spec" - success_msg: "single instance postgres database register spec created successfully" - + fail_msg: Unable to create register database spec + success_msg: single instance postgres database register spec created successfully -- name: regsiter previously unregistered database from previously created VM +- name: Regsiter previously unregistered database from previously created VM ntnx_ndb_register_database: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_vm: registered: - name: "{{vm1_name}}" + name: "{{ vm1_name }}" postgres: db_name: testAnsible1 - db_password: "{{vm_password}}" - software_path: "{{postgres.software_home}}" + db_password: "{{ vm_password }}" + software_path: "{{ postgres.software_home }}" time_machine: name: TM1 @@ -648,32 +521,32 @@ snapshots_per_day: 2 tags: - ansible-databases: "single-instance-dbs" + ansible-databases: single-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -688,18 +561,19 @@ - result.response.databaseNodes[0].dbserverId == db_server_uuid - result.response.type == "postgres_database" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "single-instance-dbs" - fail_msg: "Unable to register single instance postgres database" - success_msg: "single instance postgres database registered successfully" - + fail_msg: Unable to register single instance postgres database + success_msg: single instance postgres database registered successfully -- set_fact: - db_uuid: "{{result.db_uuid}}" -#####################################INFO Module tests####################################################### +- name: Set db uuid + ansible.builtin.set_fact: + db_uuid: "{{ result.db_uuid }}" +##################################### INFO Module tests####################################################### -- debug: +- name: Start testing ntnx_ndb_databases_info based on created database + ansible.builtin.debug: msg: Start testing ntnx_ndb_databases_info based on created database - name: List ndb databases @@ -707,125 +581,122 @@ register: databases no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - databases.response is defined - databases.failed == false - databases.changed == false - databases.response | length > 0 - fail_msg: "Unable to list all era databases" - success_msg: "era databases listed successfully" + fail_msg: Unable to list all era databases + success_msg: era databases listed successfully ################################################################ - name: Get era databases using its name ntnx_ndb_databases_info: - name: "{{databases.response[0].name}}" + name: "{{ databases.response[0].name }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.id == "{{databases.response[0].id}}" - fail_msg: "Unable to Get era databases using its name" - success_msg: "Get era databases using its name finished successfully" + - result.response.id == "{{ databases.response[0].id }}" + fail_msg: Unable to Get era databases using its name + success_msg: Get era databases using its name finished successfully ################################################################ - name: Get era databases using its id ntnx_ndb_databases_info: - uuid: "{{databases.response[0].id}}" + uuid: "{{ databases.response[0].id }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{databases.response[0].name}}" - fail_msg: "Unable to Get era databases using its id" - success_msg: "Get era databases using its id finished successfully" + - result.response.name == "{{ databases.response[0].name }}" + fail_msg: Unable to Get era databases using its id + success_msg: Get era databases using its id finished successfully ################################################################ - name: Get era databases using its id and detailed response ntnx_ndb_databases_info: filters: - detailed: True - uuid: "{{databases.response[0].id}}" + detailed: true + uuid: "{{ databases.response[0].id }}" register: result no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{databases.response[0].name}}" + - result.response.name == "{{ databases.response[0].name }}" - result.response.timeMachine is defined - fail_msg: "Unable to Get era databases using its id" - success_msg: "Get era databases using its id finished successfully" - + fail_msg: Unable to Get era databases using its id + success_msg: Get era databases using its id finished successfully ################################################################ -- name: get era database with incorrect name +- name: Get era database with incorrect name ntnx_ndb_databases_info: - name: "xxxxxxx" + name: xxxxxxx register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given ############################################################################################ - -- name: unregister db along with delete time machine +- name: Unregister db along with delete time machine ntnx_ndb_databases: - db_uuid: "{{db_uuid}}" - state: "absent" + db_uuid: "{{ db_uuid }}" + state: absent wait: true delete_time_machine: true register: result -- name: verify status of delete of database along with time machine delete - assert: +- name: Verify status of delete of database along with time machine delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" - + fail_msg: database delete failed + success_msg: database deleted successfully -- name: delete db server vm +- name: Delete db server vm ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" - delete_from_cluster: True - delete_vgs: True - delete_vm_snapshots: True + state: absent + wait: true + uuid: "{{ db_server_uuid }}" + delete_from_cluster: true + delete_vgs: true + delete_vm_snapshots: true register: result -- name: check delete status - assert: +- name: Check delete status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.response.status == "5" - fail_msg: "Unable to delete db server vm" - success_msg: "DB server VM deleted successfully" + fail_msg: Unable to delete db server vm + success_msg: DB server VM deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/main.yml index d09f77ab1..c713a8006 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/main.yml @@ -1,10 +1,12 @@ --- -- module_defaults: +- name: Ntnx_ndb_databases_single_instance_2 integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "tests.yml" + - name: Import tasks + ansible.builtin.import_tasks: tests.yml diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml index f213c1b8d..74fcc1f01 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml @@ -6,23 +6,25 @@ # 3. Unregister database instance (db1) and db server vm (vm1) # 4. Register db1 from unregistered vm1 -- debug: - msg: "start ndb databases test flow 2" +- name: Start ndb databases test flow 2 + ansible.builtin.debug: + msg: start ndb databases test flow 2 - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - db1_name: "{{random_name[0]}}" - db1_name_updated: "{{random_name[0]}}-updated" - vm1_name: "{{random_name[0]}}-vm" +- name: Set db1_name, db1_name_updated and vm1_name + ansible.builtin.set_fact: + db1_name: "{{ random_name[0] }}" + db1_name_updated: "{{ random_name[0] }}-updated" + vm1_name: "{{ random_name[0] }}-vm" -- name: create db server vm using software profile +- name: Create db server vm using software profile ntnx_ndb_db_server_vms: - wait: True + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -33,35 +35,36 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'UP' - result.uuid is defined - result.changed == true - fail_msg: "Unable to create db server vm using software profile" - success_msg: "DB server VM created successfully" + fail_msg: Unable to create db server vm using software profile + success_msg: DB server VM created successfully -- set_fact: +- name: Set db server uuid + ansible.builtin.set_fact: db_server_uuid: "{{ result.uuid }}" -- set_fact: +- name: Set vm ip + ansible.builtin.set_fact: _vm_ip: "{{ result.response.ipAddresses[0] }}" - -- name: create new single instance postgres database on vm created earlier +- name: Create new single instance postgres database on vm created earlier ntnx_ndb_databases: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_params_profile: - name: "{{db_params_profile.name}}" + name: "{{ db_params_profile.name }}" db_vm: use_registered_server: @@ -86,24 +89,25 @@ snapshots_per_day: 2 tags: - ansible-databases: "single-instance-dbs" + ansible-databases: single-instance-dbs register: result -- set_fact: - db_uuid: "{{result.db_uuid}}" +- name: Set db uuid + ansible.builtin.set_fact: + db_uuid: "{{ result.db_uuid }}" # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -117,7 +121,7 @@ - result.response.databaseNodes[0].status == "READY" - result.response.databaseNodes[0].dbserverId == db_server_uuid - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "single-instance-dbs" - result.response.timeMachine is defined - result.response.timeMachine.name == "TM1" @@ -125,51 +129,49 @@ - result.response.timeMachine.sla is defined - result.response.timeMachine.sla.id == sla.uuid - fail_msg: "Unable to create single instance postgres database" - success_msg: "single instance postgres database created successfully" - + fail_msg: Unable to create single instance postgres database + success_msg: single instance postgres database created successfully -- name: unregister db along with delete time machine and unregister db servr vm +- name: Unregister db along with delete time machine and unregister db servr vm ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db_uuid}}" + state: absent + db_uuid: "{{ db_uuid }}" wait: true delete_time_machine: true - unregister_db_server_vms: True + unregister_db_server_vms: true register: result -- name: verify status of unregister of database with time machine delete - assert: +- name: Verify status of unregister of database with time machine delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - result.response.db_server_vms_delete_status.status == "5" - fail_msg: "database unregistration failed" - success_msg: "database unregistered successfully" + fail_msg: database unregistration failed + success_msg: database unregistered successfully - -- name: create spec for registering previously unregistered DB from previously unregistered DB server vm - check_mode: yes +- name: Create spec for registering previously unregistered DB from previously unregistered DB server vm + check_mode: true ntnx_ndb_register_database: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_vm: unregistered: - ip: "{{_vm_ip}}" - username: "{{vm_username}}" - password: "{{vm_password}}" - desc: "vm-desc-updated" + ip: "{{ _vm_ip }}" + username: "{{ vm_username }}" + password: "{{ vm_password }}" + desc: vm-desc-updated reset_desc_in_ntnx_cluster: true cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: db_name: testAnsible1 - db_password: "{{vm_password}}" - software_path: "{{postgres.software_home}}" + db_password: "{{ vm_password }}" + software_path: "{{ postgres.software_home }}" time_machine: name: TM1 @@ -178,86 +180,45 @@ name: "{{ sla.name }}" tags: - ansible-databases: "single-instance-dbs" + ansible-databases: single-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - -- set_fact: - expected_action_arguments: [ - { - "name": "vmIp", - "value": "{{_vm_ip}}" - }, - { - "name": "listener_port", - "value": "5432" - }, - { - "name": "db_name", - "value": "testAnsible1" - }, - { - "name": "db_user", - "value": "postgres" - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "postgres_software_home", - "value": "{{postgres.software_home}}" - } - ] - -- set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": {}, - "slaId": "{{sla.uuid}}" - } - -- set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } +- name: Set expected action arguments + ansible.builtin.set_fact: + expected_action_arguments: + - { name: vmIp, value: "{{ _vm_ip }}" } + - { name: listener_port, value: "5432" } + - { name: db_name, value: testAnsible1 } + - { name: db_user, value: postgres } + - { name: db_password, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER } + - { name: postgres_software_home, value: "{{ postgres.software_home }}" } + +- name: Set expected time machine info + ansible.builtin.set_fact: + expected_time_machine_info: { autoTuneLogDrive: true, description: TM-desc, name: TM1, schedule: {}, slaId: "{{ sla.uuid }}"} + +- name: Set maintenance tasks + ansible.builtin.set_fact: + mainetance_tasks: + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False @@ -274,32 +235,30 @@ - result.response.nxClusterId == cluster.cluster1.uuid - result.response.maintenanceTasks == mainetance_tasks - result.response.workingDirectory == "/tmp" - fail_msg: "Unable to create register database spec" - success_msg: "single instance postgres database register spec created successfully" - - + fail_msg: Unable to create register database spec + success_msg: single instance postgres database register spec created successfully -- name: register previously unregistered DB from previously unregistered DB server vm +- name: Register previously unregistered DB from previously unregistered DB server vm ntnx_ndb_register_database: wait: true - name: "{{db1_name}}" - desc: "ansible-created-db-desc" + name: "{{ db1_name }}" + desc: ansible-created-db-desc db_vm: unregistered: - ip: "{{_vm_ip}}" - username: "{{vm_username}}" - password: "{{vm_password}}" - desc: "vm-desc-updated" - reset_desc_in_ntnx_cluster: True + ip: "{{ _vm_ip }}" + username: "{{ vm_username }}" + password: "{{ vm_password }}" + desc: vm-desc-updated + reset_desc_in_ntnx_cluster: true cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: db_name: testAnsible1 - db_password: "{{vm_password}}" - software_path: "{{postgres.software_home}}" - db_user: "postgres" + db_password: "{{ vm_password }}" + software_path: "{{ postgres.software_home }}" + db_user: postgres time_machine: name: TM1 @@ -315,33 +274,33 @@ snapshots_per_day: 2 tags: - ansible-databases: "single-instance-dbs" + ansible-databases: single-instance-dbs automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'READY' @@ -355,7 +314,7 @@ - result.response.databaseNodes[0].status == "READY" - result.response.type == "postgres_database" - result.response.tags | length == 1 - - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].tagName == "{{ tags.databases.name }}" - result.response.tags[0].value == "single-instance-dbs" - result.response.timeMachine is defined - result.response.timeMachine.name == "TM1" @@ -363,24 +322,24 @@ - result.response.timeMachine.sla is defined - result.response.timeMachine.sla.id == sla.uuid - fail_msg: "Unable to register single instance postgres database" - success_msg: "single instance postgres database registered successfully" + fail_msg: Unable to register single instance postgres database + success_msg: single instance postgres database registered successfully -- name: delete db along with delete time machine and db server vms +- name: Delete db along with delete time machine and db server vms ntnx_ndb_databases: - db_uuid: "{{result.db_uuid}}" - state: "absent" + db_uuid: "{{ result.db_uuid }}" + state: absent wait: true delete_time_machine: true - unregister_db_server_vms: True + unregister_db_server_vms: true register: result -- name: verify status of delete of database along with time machine delete - assert: +- name: Verify status of delete of database along with time machine delete + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - result.response.db_server_vms_delete_status.status == "5" - fail_msg: "database delete failed" - success_msg: "database deleted successfully" + fail_msg: database delete failed + success_msg: database deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/aliases b/tests/integration/targets/ntnx_ndb_db_server_vms/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/aliases +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/meta/main.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml index 7f3bf3844..be6212741 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml @@ -1,151 +1,108 @@ --- - -- debug: +- name: "Start ntnx_ndb_db_server_vms, ntnx_ndb_register_db_server_vm, ntnx_ndb_db_servers_info and ntnx_ndb_maintenance_tasks tests. Approx Time: < 30 mins" + ansible.builtin.debug: msg: "start ntnx_ndb_db_server_vms, ntnx_ndb_register_db_server_vm, ntnx_ndb_db_servers_info and ntnx_ndb_maintenance_tasks tests. Approx Time: < 30 mins" - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - vm1_name: "{{random_name[0]}}" - vm1_name_updated: "{{random_name[0]}}-updated" +- name: Define variables + ansible.builtin.set_fact: + vm1_name: "{{ random_name[0] }}" + vm1_name_updated: "{{ random_name[0] }}-updated" ################################### DB server VM Provision tests ############################# -- name: create ndb vm using time machine and check mode - check_mode: yes +- name: Create ndb vm using time machine and check mode + check_mode: true ntnx_ndb_db_server_vms: - wait: True - name: "ansible-created-vm1-from-time-machine" - desc: "ansible-created-vm1-from-time-machine-time-machine" + wait: true + name: ansible-created-vm1-from-time-machine + desc: ansible-created-vm1-from-time-machine-time-machine time_machine: - uuid: "test_uuid" - snapshot_uuid: "test_snapshot_uuid" + uuid: test_uuid + snapshot_uuid: test_snapshot_uuid compute_profile: - uuid: "test_compute_uuid" + uuid: test_compute_uuid network_profile: - uuid: "test_network_uuid" + uuid: test_network_uuid cluster: - uuid: "test_cluster_uuid" - password: "test_password" - pub_ssh_key: "test_public_key" - database_type: "postgres_database" + uuid: test_cluster_uuid + password: test_password + pub_ssh_key: test_public_key + database_type: postgres_database automated_patching: maintenance_window: - uuid: "test_window_uuid" + uuid: test_window_uuid tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: check_mode_result # skip jekyll/Liquid syntax check # {% raw %} -- name: create action_arguments map - set_fact: - action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create action_arguments map + ansible.builtin.set_fact: + action_arguments: "{{ action_arguments | default({}) | combine({item['name']: item['value']}) }}" loop: "{{check_mode_result.response.actionArguments}}" no_log: true # {% endraw %} -- set_fact: - mainetance_tasks: { - "maintenanceWindowId": "test_window_uuid", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } - -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "vm_name", - "value": "ansible-created-vm1-from-time-machine" - }, - { - "name": "client_public_key", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - } - ], - "computeProfileId": "test_compute_uuid", - "databaseType": "postgres_database", - "description": "ansible-created-vm1-from-time-machine-time-machine", - "latestSnapshot": false, - "maintenanceTasks": { - "maintenanceWindowId": "test_window_uuid", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - }, - "networkProfileId": "test_network_uuid", - "nxClusterId": "test_cluster_uuid", - "snapshotId": "test_snapshot_uuid", - "softwareProfileId": "", - "softwareProfileVersionId": "", - "timeMachineId": "test_uuid", - "timeZone": "Asia/Calcutta", - "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - "uuid": null - } +- name: Set mainetance_tasks + ansible.builtin.set_fact: + mainetance_tasks: + maintenanceWindowId: test_window_uuid + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } + +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + response: + actionArguments: + - { name: vm_name, value: ansible-created-vm1-from-time-machine } + - { name: client_public_key, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER } + computeProfileId: test_compute_uuid + databaseType: postgres_database + description: ansible-created-vm1-from-time-machine-time-machine + latestSnapshot: false + maintenanceTasks: + maintenanceWindowId: test_window_uuid + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } + networkProfileId: test_network_uuid + nxClusterId: test_cluster_uuid + snapshotId: test_snapshot_uuid + softwareProfileId: "" + softwareProfileVersionId: "" + timeMachineId: test_uuid + timeZone: Asia/Calcutta + vmPassword: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + uuid: - name: Check mode Status - assert: + ansible.builtin.assert: that: - check_mode_result == expected_result - fail_msg: "Unable to generate create db server vm spec with time machine as source" - success_msg: "DB server VM spec created successfully" - + fail_msg: Unable to generate create db server vm spec with time machine as source + success_msg: DB server VM spec created successfully -- name: create spec for db server vm using software profile and names of profile - check_mode: yes +- name: Create spec for db server vm using software profile and names of profile + check_mode: true ntnx_ndb_db_server_vms: - wait: True + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -156,86 +113,57 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "response": { - "actionArguments": [ - { - "name": "vm_name", - "value": "{{ vm1_name }}" - }, - { - "name": "client_public_key", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - } - ], - "computeProfileId": "{{ compute_profile.uuid }}", - "databaseType": "postgres_database", - "description": "ansible-created-vm1-desc", - "latestSnapshot": false, - "maintenanceTasks": { - "maintenanceWindowId": "{{ maintenance.window_uuid }}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - }, - "networkProfileId": "{{ network_profile.uuid }}", - "nxClusterId": "{{ cluster.cluster1.uuid }}", - "softwareProfileId": "{{ software_profile.uuid }}", - "softwareProfileVersionId": "{{ software_profile.latest_version_id }}", - "timeZone": "UTC", - "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - "uuid": null - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + response: + actionArguments: [{ name: vm_name, value: "{{ vm1_name }}" }, { name: client_public_key, value: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER }] + computeProfileId: "{{ compute_profile.uuid }}" + databaseType: postgres_database + description: ansible-created-vm1-desc + latestSnapshot: false + maintenanceTasks: + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } + networkProfileId: "{{ network_profile.uuid }}" + nxClusterId: "{{ cluster.cluster1.uuid }}" + softwareProfileId: "{{ software_profile.uuid }}" + softwareProfileVersionId: "{{ software_profile.latest_version_id }}" + timeZone: UTC + vmPassword: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + uuid: - name: Check mode Status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to generate create db server vm spec with time machine as source and given names of profile" - success_msg: "DB server VM spec created successfully" + fail_msg: Unable to generate create db server vm spec with time machine as source and given names of profile + success_msg: DB server VM spec created successfully - -- name: create db server vm using software profile +- name: Create db server vm using software profile ntnx_ndb_db_server_vms: - wait: True + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -246,31 +174,31 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties: "{{ properties | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'UP' @@ -283,35 +211,36 @@ - result.response.vmTimeZone == "UTC" - result.response.nxClusterId == cluster.cluster1.uuid - fail_msg: "Unable to create db server vm using software profile" - success_msg: "DB server VM created successfully" + fail_msg: Unable to create db server vm using software profile + success_msg: DB server VM created successfully -- set_fact: +- name: Set db server uuid + ansible.builtin.set_fact: db_server_uuid: "{{ result.uuid }}" -- set_fact: +- name: Set vm ip + ansible.builtin.set_fact: vm_ip: "{{ result.response.ipAddresses[0] }}" - ################################### DB server VM update Tests ############################# -- name: update db server vm name, desc, credentials, tags +- name: Update db server vm name, desc, credentials, tags ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" - name: "{{vm1_name_updated}}" - desc: "ansible-created-vm1-updated-desc" - reset_name_in_ntnx_cluster: True - reset_desc_in_ntnx_cluster: True + wait: true + uuid: "{{ db_server_uuid }}" + name: "{{ vm1_name_updated }}" + desc: ansible-created-vm1-updated-desc + reset_name_in_ntnx_cluster: true + reset_desc_in_ntnx_cluster: true update_credentials: - - username: "{{vm_username}}" - password: "{{vm_password}}" + - username: "{{ vm_username }}" + password: "{{ vm_password }}" tags: ansible-db-server-vms: ansible-updated register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'UP' @@ -325,41 +254,40 @@ - result.response.tags[0].tagId == tags.db_server_vm.uuid - result.response.tags[0].value == "ansible-updated" - fail_msg: "Unable to update db server vm" - success_msg: "DB server VM updated successfully" + fail_msg: Unable to update db server vm + success_msg: DB server VM updated successfully -- name: check idempotency +- name: Check idempotency ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" - name: "{{vm1_name_updated}}" - desc: "ansible-created-vm1-updated-desc" - tags: - ansible-db-server-vms: "ansible-updated" + wait: true + uuid: "{{ db_server_uuid }}" + name: "{{ vm1_name_updated }}" + desc: ansible-created-vm1-updated-desc + tags: + ansible-db-server-vms: ansible-updated register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "db server vm got updated" - success_msg: "db server vm update skipped successfully due to no changes in state" - + fail_msg: db server vm got updated + success_msg: db server vm update skipped successfully due to no changes in state -- name: update db server vm name with check mode and check defaults - check_mode: yes +- name: Update db server vm name with check mode and check defaults + check_mode: true ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" + wait: true + uuid: "{{ db_server_uuid }}" update_credentials: - - username: "user" - password: "pass" + - username: user + password: pass register: result -- name: check mode Status - assert: +- name: Check mode Status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -370,8 +298,8 @@ - result.response.resetDescriptionInNxCluster == False - result.response.resetTags == False - fail_msg: "Unable to generate check mode spec for update" - success_msg: "DB server VM update spec generated successfully" + fail_msg: Unable to generate check mode spec for update + success_msg: DB server VM update spec generated successfully ################################################################ Info module tests ############################################################# @@ -379,364 +307,326 @@ ntnx_ndb_db_servers_info: register: db_servers - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - db_servers.response is defined - db_servers.failed == false - db_servers.changed == false - db_servers.response | length > 0 - fail_msg: "Unable to list all NDB db_servers" - success_msg: "NDB db_servers listed successfully" + fail_msg: Unable to list all NDB db_servers + success_msg: NDB db_servers listed successfully ################################################################ -- name: get NDB db_servers using it's name +- name: Get NDB db_servers using it's name ntnx_ndb_db_servers_info: filters: load_metrics: true - load_databases: True + load_databases: true value_type: name - value: "{{db_servers.response[0].name}}" + value: "{{ db_servers.response[0].name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response[0].id == "{{db_servers.response[0].id}}" + - result.response[0].id == "{{ db_servers.response[0].id }}" - result.response[0].databases is defined - - result.response[0].name == "{{db_servers.response[0].name}}" + - result.response[0].name == "{{ db_servers.response[0].name }}" - result.response[0].metric is defined fail_msg: "Unable to get NDB db_servers using it's name and filters " - success_msg: "get NDB db_server using it's name and filters finished successfully" + success_msg: get NDB db_server using it's name and filters finished successfully ################################################################ -- name: get NDB db_servers using incorrect name +- name: Get NDB db_servers using incorrect name ntnx_ndb_db_servers_info: filters: load_metrics: true - load_databases: True + load_databases: true value_type: name - value: "xxxxxxxxxxxxxx" + value: xxxxxxxxxxxxxx register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response | length == 0 - fail_msg: "listing ndb db servers using incorrect name didn't failed" - success_msg: "Got empty response for incorrect name successfully" + fail_msg: listing ndb db servers using incorrect name didn't failed + success_msg: Got empty response for incorrect name successfully ################################################################ -- name: get NDB db_servers using it's ip +- name: Get NDB db_servers using it's ip ntnx_ndb_db_servers_info: filters: value_type: ip - value: "{{db_servers.response[0].ipAddresses[0]}}" + value: "{{ db_servers.response[0].ipAddresses[0] }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response[0].id == "{{db_servers.response[0].id}}" - - result.response[0].ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" + - result.response[0].id == "{{ db_servers.response[0].id }}" + - result.response[0].ipAddresses[0] == "{{ db_servers.response[0].ipAddresses[0] }}" fail_msg: "Unable to get NDB db_servers using it's ip " - success_msg: "get NDB db_server using it's ip finished successfully" + success_msg: get NDB db_server using it's ip finished successfully ################################################################ -- name: get NDB db_servers using it's name +- name: Get NDB db_servers using it's name ntnx_ndb_db_servers_info: - name: "{{db_servers.response[0].name}}" + name: "{{ db_servers.response[0].name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.id == "{{db_servers.response[0].id}}" - - result.response.ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" + - result.response.id == "{{ db_servers.response[0].id }}" + - result.response.ipAddresses[0] == "{{ db_servers.response[0].ipAddresses[0] }}" fail_msg: "Unable to get NDB db_servers using it's name " - success_msg: "get NDB db_server using it's name finished successfully" + success_msg: get NDB db_server using it's name finished successfully ################################################################ -- name: get NDB db_servers using it's id +- name: Get NDB db_servers using it's id ntnx_ndb_db_servers_info: - uuid: "{{db_servers.response[0].id}}" + uuid: "{{ db_servers.response[0].id }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{db_servers.response[0].name}}" - - result.response.ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" + - result.response.name == "{{ db_servers.response[0].name }}" + - result.response.ipAddresses[0] == "{{ db_servers.response[0].ipAddresses[0] }}" fail_msg: "Unable to get NDB db_servers using it's id " - success_msg: "get NDB db_server using it's id finished successfully" + success_msg: get NDB db_server using it's id finished successfully ################################################################ -- name: get NDB db_servers using ip +- name: Get NDB db_servers using ip ntnx_ndb_db_servers_info: - server_ip: "{{db_servers.response[0].ipAddresses[0]}}" + server_ip: "{{ db_servers.response[0].ipAddresses[0] }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{db_servers.response[0].name}}" - - result.response.id == "{{db_servers.response[0].id}}" + - result.response.name == "{{ db_servers.response[0].name }}" + - result.response.id == "{{ db_servers.response[0].id }}" fail_msg: "Unable to get NDB db_servers using it's ip " - success_msg: "get NDB db_server using it's ip finished successfully" + success_msg: get NDB db_server using it's ip finished successfully ################################################################ -- name: get NDB db_servers with incorrect name +- name: Get NDB db_servers with incorrect name ntnx_ndb_db_servers_info: - name: "abcd" + name: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" - + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given ################################### maintenance tasks update tests ############################# -- name: create spec for adding maintenance window tasks to db server vm - check_mode: yes +- name: Create spec for adding maintenance window tasks to db server vm + check_mode: true ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" - - uuid: "test_vm_1" + - name: "{{ vm1_name_updated }}" + - uuid: test_vm_1 db_server_clusters: - - uuid: "test_cluter_1" - - uuid: "test_cluter_2" + - uuid: test_cluter_1 + - uuid: test_cluter_2 maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "response": { - "entities": { - "ERA_DBSERVER": [ - "{{db_server_uuid}}", - "test_vm_1" - ], - "ERA_DBSERVER_CLUSTER": [ - "test_cluter_1", - "test_cluter_2" - ] - }, - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls", - "preCommand": "ls -a" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls", - "preCommand": "ls -a" - } - }, - "taskType": "DB_PATCHING" - } - ] - }, - "uuid": "{{maintenance.window_uuid}}" - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + response: + entities: { ERA_DBSERVER: ["{{ db_server_uuid }}", test_vm_1], ERA_DBSERVER_CLUSTER: [test_cluter_1, test_cluter_2] } + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls, preCommand: ls -a } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls, preCommand: ls -a } }, taskType: DB_PATCHING } + uuid: "{{ maintenance.window_uuid }}" - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create spec for adding maintenance tasks for db server vm" - success_msg: "spec for adding maintenance tasks for db server vm created successfully" + fail_msg: Unable to create spec for adding maintenance tasks for db server vm + success_msg: spec for adding maintenance tasks for db server vm created successfully - -- name: create spec for removing maintenance window tasks from above created vm - check_mode: yes +- name: Create spec for removing maintenance window tasks from above created vm + check_mode: true ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "response": { - "entities": { - "ERA_DBSERVER": [ - "{{db_server_uuid}}" - ] - }, - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [], - }, - "uuid": "{{maintenance.window_uuid}}" - } +- name: Set expected result + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + response: { entities: { ERA_DBSERVER: ["{{ db_server_uuid }}"] }, maintenanceWindowId: "{{ maintenance.window_uuid }}", tasks: [] } + uuid: "{{ maintenance.window_uuid }}" - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create spec for removing maintenance tasks for db server vm" - success_msg: "spec for removing maintenance tasks for db server vm created successfully" - + fail_msg: Unable to create spec for removing maintenance tasks for db server vm + success_msg: spec for removing maintenance tasks for db server vm created successfully -- name: db server vm already contains some tasks so remove maintenance window tasks from above created vm +- name: Db server vm already contains some tasks so remove maintenance window tasks from above created vm ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result -- set_fact: - tasks: [] - entity_tasks_associations: "{{result.response.entityTaskAssoc}}" +- name: Define variables + ansible.builtin.set_fact: + tasks_list: [] + entity_tasks_associations: "{{ result.response.entityTaskAssoc }}" -- name: entity_tasks_associations can be null so converting it to list - set_fact: +- name: Entity_tasks_associations can be null so converting it to list + ansible.builtin.set_fact: entity_tasks_associations: [] when: entity_tasks_associations == "" -- name: creating list of tasks associated to db server vm - set_fact: - tasks: "{{ tasks | default([]) | union ([item]]) }}" - loop: "{{entity_tasks_associations}}" +- name: Creating list of tasks associated to db server vm + ansible.builtin.set_fact: + tasks_list: "{{ tasks_list | default([]) | union([item]) }}" + loop: "{{ entity_tasks_associations }}" when: item['entity']['id'] == db_server_uuid no_log: true - name: Check update status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - tasks | length == 0 - fail_msg: "Unable to remove maintenance tasks for given db server vm" - success_msg: "maintenance tasks for given db server vm removed successfully" - + fail_msg: Unable to remove maintenance tasks for given db server vm + success_msg: maintenance tasks for given db server vm removed successfully - name: Add maitenance window task for vm ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" + - name: "{{ vm1_name_updated }}" maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result -- set_fact: - tasks: [] +- name: Set tasks fact + ansible.builtin.set_fact: + tasks_list: [] -- set_fact: - entity_tasks_associations: "{{result.response.entityTaskAssoc}}" +- name: Define variable for entity tasks associations + ansible.builtin.set_fact: + entity_tasks_associations: "{{ result.response.entityTaskAssoc }}" -- name: entity_tasks_associations can be null so converting it to list - set_fact: +- name: Entity_tasks_associations can be null so converting it to list + ansible.builtin.set_fact: entity_tasks_associations: [] when: entity_tasks_associations == None -- name: creating list of tasks associated with db server vm - set_fact: - tasks: "{{ tasks | default([]) | union ([item]) }}" +- name: Creating list of tasks associated with db server vm + ansible.builtin.set_fact: + tasks_list: "{{ tasks_list | default([]) | union([item]) }}" loop: "{{entity_tasks_associations}}" when: item['entity']['id'] == db_server_uuid no_log: true - name: Check update status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - tasks | length == 2 - fail_msg: "Unable to add maintenance tasks for given db server vm" - success_msg: "maintenance tasks for given db server vm added successfully" + fail_msg: Unable to add maintenance tasks for given db server vm + success_msg: maintenance tasks for given db server vm added successfully - name: Remove maintenance window tasks from above created vm ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result - name: Check update status - assert: + ansible.builtin.assert: that: - result.changed == true - result.failed == false - fail_msg: "Unable to remove maintenance tasks for given db server vm" - success_msg: "maintenance tasks for given db server vm removed successfully" - + fail_msg: Unable to remove maintenance tasks for given db server vm + success_msg: maintenance tasks for given db server vm removed successfully ################################### DB server VM unregistration tests ############################# -- name: generate check mode spec for unregister with default values - check_mode: yes +- name: Generate check mode spec for unregister with default values + check_mode: true ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + state: absent + wait: true + uuid: "{{ db_server_uuid }}" register: result -- name: check mode Status - assert: +- name: Check mode Status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -746,22 +636,21 @@ - result.response.softRemove == False - result.response.remove == True - fail_msg: "Unable to generate check mode spec for unregister" - success_msg: "DB server VM unregister spec generated successfully" - + fail_msg: Unable to generate check mode spec for unregister + success_msg: DB server VM unregister spec generated successfully -- name: genereate check mode spec for delete vm with vgs and snapshots - check_mode: yes +- name: Genereate check mode spec for delete vm with vgs and snapshots + check_mode: true ntnx_ndb_db_server_vms: - state: "absent" - uuid: "{{db_server_uuid}}" - delete_from_cluster: True - delete_vgs: True - delete_vm_snapshots: True + state: absent + uuid: "{{ db_server_uuid }}" + delete_from_cluster: true + delete_vgs: true + delete_vm_snapshots: true register: result -- name: check mode Status - assert: +- name: Check mode Status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -771,98 +660,80 @@ - result.response.softRemove == False - result.response.remove == False - fail_msg: "Unable to generate check mode spec for unregister" - success_msg: "DB server VM update spec generated successfully" - + fail_msg: Unable to generate check mode spec for unregister + success_msg: DB server VM update spec generated successfully -- name: unregister vm +- name: Unregister vm ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" - delete_from_cluster: False - soft_remove: True - delete_vgs: True - delete_vm_snapshots: True + state: absent + wait: true + uuid: "{{ db_server_uuid }}" + delete_from_cluster: false + soft_remove: true + delete_vgs: true + delete_vm_snapshots: true register: result -- name: check mode Status - assert: +- name: Check mode Status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - fail_msg: "Unable to soft remove db server vm" - success_msg: "DB server VM removed successfully" + fail_msg: Unable to soft remove db server vm + success_msg: DB server VM removed successfully ################################### DB server VM Registration tests ############################# - -- name: generate spec for registeration of the previous unregistered vm using check mode - check_mode: yes +- name: Generate spec for registeration of the previous unregistered vm using check mode + check_mode: true ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + ip: "{{ vm_ip }}" + desc: register-vm-desc reset_desc_in_ntnx_cluster: true cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: - software_path: "{{postgres.software_home}}" - private_ssh_key: "check-key" - username: "{{vm_username}}" + software_path: "{{ postgres.software_home }}" + private_ssh_key: check-key + username: "{{ vm_username }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" - working_directory: "/check" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F + working_directory: /check register: result -- set_fact: +- name: Create action_arguments map + ansible.builtin.set_fact: action_arguments: {} # skip jekyll/Liquid syntax check # {% raw %} -- name: create action_arguments map - set_fact: - action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create action_arguments map + ansible.builtin.set_fact: + action_arguments: "{{ action_arguments | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.actionArguments}}" no_log: true # {% endraw %} -- set_fact: - maintenance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } +- name: Define variables for maintenance tasks + ansible.builtin.set_fact: + maintenance_tasks: + maintenanceWindowId: "{{ maintenance.window_uuid }}" + tasks: + - { payload: { prePostCommand: { postCommand: ls -a, preCommand: ls } }, taskType: OS_PATCHING } + - { payload: { prePostCommand: { postCommand: ls -F, preCommand: ls -l } }, taskType: DB_PATCHING } - name: Check mode status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -876,45 +747,44 @@ - action_arguments["postgres_software_home"] == postgres.software_home - result.response.maintenanceTasks == maintenance_tasks - fail_msg: "Unable to create spec for db server vm registration" - success_msg: "DB server VM registration spec generated successfully" - + fail_msg: Unable to create spec for db server vm registration + success_msg: DB server VM registration spec generated successfully -- name: register the previous unregistered vm +- name: Register the previous unregistered vm ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + ip: "{{ vm_ip }}" + desc: register-vm-desc cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: listener_port: 5432 - software_path: "{{postgres.software_home}}" - username: "{{vm_username}}" - password: "{{vm_password}}" + software_path: "{{ postgres.software_home }}" + username: "{{ vm_username }}" + password: "{{ vm_password }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result # skip jekyll/Liquid syntax check # {% raw %} -- name: create properties map - set_fact: - properties1: "{{ properties1 | default({}) | combine ({ item['name'] : item['value'] }) }}" +- name: Create properties map + ansible.builtin.set_fact: + properties1: "{{ properties1 | default({}) | combine({item['name']: item['value']}) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'UP' @@ -924,39 +794,38 @@ - result.response.description == "register-vm-desc" - result.response.ipAddresses | length > 0 - result.response.ipAddresses[0] == vm_ip - - properties1["era_user"] == "{{vm_username}}" + - properties1["era_user"] == "{{ vm_username }}" - properties1["listener_port"] == "5432" - properties1["postgres_software_home"] == postgres.software_home - properties1["working_dir"] == "/tmp" - properties1["application_type"] == "postgres_database" - result.response.nxClusterId == cluster.cluster1.uuid - fail_msg: "Unable to create db server vm using software profile" - success_msg: "DB server VM created successfully" + fail_msg: Unable to create db server vm using software profile + success_msg: DB server VM created successfully - -- set_fact: - db_server_uuid: "{{result.uuid}}" +- name: Define variable for db server uuid + ansible.builtin.set_fact: + db_server_uuid: "{{ result.uuid }}" ################################### DB server VM Delete test ############################# - -- name: unregister db server vm +- name: Unregister db server vm ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + state: absent + wait: true + uuid: "{{ db_server_uuid }}" delete_from_cluster: false - delete_vgs: True - delete_vm_snapshots: True + delete_vgs: true + delete_vm_snapshots: true register: result -- name: check mode Status - assert: +- name: Check mode Status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.response.status == "5" - fail_msg: "Unable to delete db server vm" - success_msg: "DB server VM deleted successfully" + fail_msg: Unable to delete db server vm + success_msg: DB server VM deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/main.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/main.yml index 5216bd0e2..36399a848 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_db_server_vms integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "crud.yml" + - name: Import tasks + ansible.builtin.import_tasks: crud.yml diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases b/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/meta/main.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml index efaa5eb49..de5996936 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml @@ -1,75 +1,69 @@ --- - -- debug: - msg: "start ndb database maintenance winndow tests" +- name: Ntnx_ndb_maintenance_window integration tests + ansible.builtin.debug: + msg: start ndb database maintenance winndow tests - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - window1_name: "{{random_name[0]}}1" - window2_name: "{{random_name[0]}}2" +- name: Define variables for window names + ansible.builtin.set_fact: + window1_name: "{{ random_name[0] }}1" + window2_name: "{{ random_name[0] }}2" ############################################## create tests #################################### -- name: create spec for maintenance window - check_mode: yes +- name: Create spec for maintenance window + check_mode: true ntnx_ndb_maintenance_window: - name: "{{window1_name}}" - desc: "anisble-created-window" + name: "{{ window1_name }}" + desc: anisble-created-window schedule: - recurrence: "weekly" + recurrence: weekly duration: 2 - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta start_time: "11:00:00" - day_of_week: "tuesday" + day_of_week: tuesday register: result -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "response": { - "description": "anisble-created-window", - "name": "{{window1_name}}", - "schedule": { - "dayOfWeek": "TUESDAY", - "duration": 2, - "recurrence": "WEEKLY", - "startTime": "11:00:00", - "weekOfMonth": null - }, - "timezone": "Asia/Calcutta" - }, - "uuid": null - } +- name: Define variables for expected result + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + response: + description: anisble-created-window + name: "{{ window1_name }}" + schedule: { dayOfWeek: TUESDAY, duration: 2, recurrence: WEEKLY, startTime: "11:00:00", weekOfMonth: !!null "" } + timezone: Asia/Calcutta + uuid: - name: Check mode status - assert: + ansible.builtin.assert: that: - result == expected_result - fail_msg: "Unable to create spec for creating window" - success_msg: "spec for maintenance window generated successfully" - + fail_msg: Unable to create spec for creating window + success_msg: spec for maintenance window generated successfully -- name: create window with weekly schedule +- name: Create window with weekly schedule ntnx_ndb_maintenance_window: - name: "{{window1_name}}" - desc: "anisble-created-window" + name: "{{ window1_name }}" + desc: anisble-created-window schedule: - recurrence: "weekly" + recurrence: weekly duration: 2 start_time: "11:00:00" - day_of_week: "tuesday" - timezone: "UTC" + day_of_week: tuesday + timezone: UTC register: result -- set_fact: - window1_uuid: "{{result.uuid}}" +- name: Define variable for window uuid + ansible.builtin.set_fact: + window1_uuid: "{{ result.uuid }}" -- name: create status - assert: +- name: Create status + ansible.builtin.assert: that: - result.response is defined - result.changed == True @@ -85,29 +79,29 @@ - result.response.schedule.weekOfMonth == None - result.response.schedule.duration == 2 - fail_msg: "Unable to create maintenance window with weekly schedule" - success_msg: "maintenance window with weekly schedule created successfully" - + fail_msg: Unable to create maintenance window with weekly schedule + success_msg: maintenance window with weekly schedule created successfully -- name: create window with monthly schedule +- name: Create window with monthly schedule ntnx_ndb_maintenance_window: - name: "{{window2_name}}" - desc: "anisble-created-window" + name: "{{ window2_name }}" + desc: anisble-created-window schedule: - recurrence: "monthly" + recurrence: monthly duration: 2 start_time: "11:00:00" - day_of_week: "tuesday" + day_of_week: tuesday week_of_month: 2 - timezone: "UTC" + timezone: UTC register: result -- set_fact: - window2_uuid: "{{result.uuid}}" +- name: Define variable for window uuid + ansible.builtin.set_fact: + window2_uuid: "{{ result.uuid }}" -- name: create status - assert: +- name: Create status + ansible.builtin.assert: that: - result.response is defined - result.changed == True @@ -123,20 +117,19 @@ - result.response.schedule.weekOfMonth == 2 - result.response.schedule.duration == 2 - - fail_msg: "Unable to create maintenance window with monthly schedule" - success_msg: "maintenance window with monthly schedule created successfully" + fail_msg: Unable to create maintenance window with monthly schedule + success_msg: maintenance window with monthly schedule created successfully ############################################## info module tests #################################### - name: Info module check ntnx_ndb_maintenance_windows_info: - uuid: "{{window2_uuid}}" + uuid: "{{ window2_uuid }}" register: result - name: Info module status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False @@ -144,49 +137,48 @@ - result.response.name == window2_name - result.response.id == window2_uuid - result.uuid == window2_uuid - fail_msg: "Unable to fetch window info" - success_msg: "maintenance window info obtained successfully" + fail_msg: Unable to fetch window info + success_msg: maintenance window info obtained successfully -- name: get all windows +- name: Get all windows ntnx_ndb_maintenance_windows_info: register: result - name: Info module status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False - result.failed == False - result.response | length > 1 - fail_msg: "Unable to fetch all windows" - success_msg: "all maintenance window info obtained successfully" + fail_msg: Unable to fetch all windows + success_msg: all maintenance window info obtained successfully ############################################## update tests #################################### - -- name: update window schedule +- name: Update window schedule ntnx_ndb_maintenance_window: - uuid: "{{window2_uuid}}" - name: "{{window2_name}}-updated" - desc: "anisble-created-window-updated" + uuid: "{{ window2_uuid }}" + name: "{{ window2_name }}-updated" + desc: anisble-created-window-updated schedule: - recurrence: "monthly" + recurrence: monthly duration: 3 start_time: "12:00:00" - timezone: "UTC" - day_of_week: "wednesday" + timezone: UTC + day_of_week: wednesday week_of_month: 3 register: result -- name: update status - assert: +- name: Update status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.failed == False - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - - result.response.name == "{{window2_name}}-updated" + - result.response.name == "{{ window2_name }}-updated" - result.response.description == "anisble-created-window-updated" - result.response.schedule.dayOfWeek == "WEDNESDAY" - result.response.schedule.recurrence == "MONTHLY" @@ -195,31 +187,30 @@ - result.response.schedule.weekOfMonth == 3 - result.response.schedule.duration == 3 + fail_msg: Unable to update maintenance window + success_msg: maintenance window updated successfully - fail_msg: "Unable to update maintenance window" - success_msg: "maintenance window updated successfully" - -- name: update schedule type +- name: Update schedule type ntnx_ndb_maintenance_window: - uuid: "{{window2_uuid}}" + uuid: "{{ window2_uuid }}" schedule: - recurrence: "weekly" + recurrence: weekly duration: 3 start_time: "12:00:00" - day_of_week: "wednesday" - timezone: "UTC" + day_of_week: wednesday + timezone: UTC register: result -- name: create status - assert: +- name: Create status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.failed == False - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - - result.response.name == "{{window2_name}}-updated" + - result.response.name == "{{ window2_name }}-updated" - result.response.description == "anisble-created-window-updated" - result.response.schedule.dayOfWeek == "WEDNESDAY" - result.response.schedule.recurrence == "WEEKLY" @@ -228,51 +219,49 @@ - result.response.schedule.weekOfMonth == None - result.response.schedule.duration == 3 + fail_msg: Unable to update maintenance window + success_msg: maintenance window updated successfully - fail_msg: "Unable to update maintenance window" - success_msg: "maintenance window updated successfully" - -- name: idempotency checks +- name: Idempotency checks ntnx_ndb_maintenance_window: - uuid: "{{window2_uuid}}" - name: "{{window2_name}}-updated" - desc: "anisble-created-window-updated" + uuid: "{{ window2_uuid }}" + name: "{{ window2_name }}-updated" + desc: anisble-created-window-updated schedule: - recurrence: "weekly" + recurrence: weekly duration: 3 start_time: "12:00:00" - day_of_week: "wednesday" - timezone: "UTC" + day_of_week: wednesday + timezone: UTC register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "window got updated" - success_msg: "window update got skipped due to no state changes" + fail_msg: window got updated + success_msg: window update got skipped due to no state changes -- name: updated day of week +- name: Updated day of week ntnx_ndb_maintenance_window: - uuid: "{{window2_uuid}}" + uuid: "{{ window2_uuid }}" schedule: - day_of_week: "saturday" + day_of_week: saturday register: result - -- name: update status - assert: +- name: Update status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.failed == False - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - - result.response.name == "{{window2_name}}-updated" + - result.response.name == "{{ window2_name }}-updated" - result.response.schedule.dayOfWeek == "SATURDAY" - result.response.schedule.recurrence == "WEEKLY" - result.response.schedule.startTime == "12:00:00" @@ -280,28 +269,27 @@ - result.response.schedule.weekOfMonth == None - result.response.schedule.duration == 3 + fail_msg: Unable to update maintenance window + success_msg: maintenance window updated successfully - fail_msg: "Unable to update maintenance window" - success_msg: "maintenance window updated successfully" - -- name: just update start time +- name: Just update start time ntnx_ndb_maintenance_window: - uuid: "{{window2_uuid}}" + uuid: "{{ window2_uuid }}" schedule: start_time: "11:00:00" - timezone: "Asia/Calcutta" + timezone: Asia/Calcutta register: result -- name: update status - assert: +- name: Update status + ansible.builtin.assert: that: - result.response is defined - result.changed == True - result.failed == False - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - - result.response.name == "{{window2_name}}-updated" + - result.response.name == "{{ window2_name }}-updated" - result.response.schedule.dayOfWeek == "SATURDAY" - result.response.schedule.recurrence == "WEEKLY" - result.response.schedule.startTime == "05:30:00" @@ -309,39 +297,37 @@ - result.response.schedule.weekOfMonth == None - result.response.schedule.duration == 3 - fail_msg: "Unable to update maintenance window" - success_msg: "maintenance window updated successfully" - + fail_msg: Unable to update maintenance window + success_msg: maintenance window updated successfully ############################################## delete tests #################################### -- name: delete window 1 +- name: Delete window 1 ntnx_ndb_maintenance_window: - uuid: "{{window1_uuid}}" - state: "absent" + uuid: "{{ window1_uuid }}" + state: absent register: result -- name: check delete status - assert: +- name: Check delete status + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "success" - fail_msg: "unable to delete window" - success_msg: "window deleted successfully" + fail_msg: unable to delete window + success_msg: window deleted successfully -- name: delete window 2 +- name: Delete window 2 ntnx_ndb_maintenance_window: - uuid: "{{window2_uuid}}" - state: "absent" + uuid: "{{ window2_uuid }}" + state: absent register: result - -- name: check delete status - assert: +- name: Check delete status + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "success" - fail_msg: "unable to delete window" - success_msg: "window deleted successfully" + fail_msg: unable to delete window + success_msg: window deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/main.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/main.yml index abf2644d3..72571841f 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_maintenance_windows integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: False + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "crud.yml" + - name: Import tasks + ansible.builtin.import_tasks: crud.yml diff --git a/tests/integration/targets/ntnx_ndb_profiles/aliases b/tests/integration/targets/ntnx_ndb_profiles/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/aliases +++ b/tests/integration/targets/ntnx_ndb_profiles/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles/meta/main.yml b/tests/integration/targets/ntnx_ndb_profiles/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml index 081e0e8f9..6b3890058 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml @@ -1,36 +1,38 @@ --- -- debug: +- name: Start testing ntnx_ndb_profiles + ansible.builtin.debug: msg: Start testing ntnx_ndb_profiles - ################################################################ - name: Generate random profile_name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" -- set_fact: - suffix_name: "ansible-role-mapping" +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: ansible-role-mapping -- set_fact: - profile1_name: "{{random_name}}{{suffix_name}}1" - profile2_name: "{{random_name}}{{suffix_name}}2" +- name: Set profile names + ansible.builtin.set_fact: + profile1_name: "{{ random_name }}{{ suffix_name }}1" + profile2_name: "{{ random_name }}{{ suffix_name }}2" ################################################################ - name: Verify creation of compute profile with defaults in check mode ntnx_ndb_profiles: - name: "{{profile1_name}}" - desc: "testdesc" + name: "{{ profile1_name }}" + desc: testdesc type: compute register: result ignore_errors: true check_mode: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{profile1_name}}" + - result.response.name == "{{ profile1_name }}" - result.response.description == "testdesc" - result.response.type == "Compute" - result.response.properties[0].name=="CPUS" @@ -42,26 +44,26 @@ fail_msg: "Fail: unable to Verify creation of compute profile with defaults in check mode " success_msg: "Pass: Verify creation of compute profile with defaults in check mode " ################################################################ -- name: verify creation of compute profile +- name: Verify creation of compute profile ntnx_ndb_profiles: - name: "{{profile1_name}}" - desc: "testdesc" + name: "{{ profile1_name }}" + desc: testdesc type: compute compute: - vcpus: 2 - cores_per_cpu: 4 - memory: 8 + vcpus: 2 + cores_per_cpu: 4 + memory: 8 register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - result.profile_uuid is defined - - result.response.name == "{{profile1_name}}" + - result.response.name == "{{ profile1_name }}" - result.response.description == "testdesc" - result.response.type == "Compute" - result.response.versions[0].propertiesMap.CORE_PER_CPU == "4" @@ -71,15 +73,15 @@ fail_msg: "Fail: create compute profile finished successfully " success_msg: "Pass: Unable to create compute profile" - -- set_fact: - todelete: "{{ todelete + [ result.profile_uuid ] }}" +- name: Define variable todelete + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.profile_uuid] }}" ################################################################ -- name: verify update of params in compute profile and publish profile +- name: Verify update of params in compute profile and publish profile ntnx_ndb_profiles: - name: "{{profile2_name}}" - desc: "newdesc" - profile_uuid: "{{result.profile_uuid}}" + name: "{{ profile2_name }}" + desc: newdesc + profile_uuid: "{{ result.profile_uuid }}" type: compute compute: vcpus: 6 @@ -89,15 +91,14 @@ register: result ignore_errors: true - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - result.profile_uuid is defined - - result.response.profile.name == "{{profile2_name}}" + - result.response.profile.name == "{{ profile2_name }}" - result.response.profile.description == "newdesc" - result.response.profile.type == "Compute" - result.response.version.propertiesMap.CORE_PER_CPU == "4" @@ -107,16 +108,16 @@ fail_msg: "Fail: unable to verify update of params in compute profile and publish profile" success_msg: "Pass: verify update of params in compute profile and publish profile finished successfully" ################################################################ -- name: verify idempotency check in compute profile +- name: Verify idempotency check in compute profile ntnx_ndb_profiles: - profile_uuid: "{{result.profile_uuid}}" - name: "{{profile2_name}}" - desc: "newdesc" + profile_uuid: "{{ result.profile_uuid }}" + name: "{{ profile2_name }}" + desc: newdesc register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -125,9 +126,9 @@ fail_msg: "Fail: unable to verify idempotency check in compute profile" success_msg: "Pass: verify idempotency check in compute profile finished successfully" ################################################################ -- name: verify unpublish flow in compute profile +- name: Verify unpublish flow in compute profile ntnx_ndb_profiles: - profile_uuid: "{{result.profile_uuid}}" + profile_uuid: "{{ result.profile_uuid }}" type: compute compute: @@ -135,9 +136,8 @@ register: result ignore_errors: true - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -153,16 +153,17 @@ profile_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true - result.msg == "All items completed" - fail_msg: "unable to delete all created compute profiles" - success_msg: "All compute profiles deleted successfully" + fail_msg: unable to delete all created compute profiles + success_msg: All compute profiles deleted successfully -- set_fact: +- name: Define variable todelete + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml index 69c8634a8..00d1009ac 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml @@ -1,9 +1,11 @@ +--- - name: Generate random profile_name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" -- set_fact: - suffix_name: "ansible-role-mapping" +- name: Define variables + ansible.builtin.set_fact: + suffix_name: ansible-role-mapping max_connections: 50 max_replication_slots: 5 max_locks_per_transaction: 32 @@ -17,101 +19,102 @@ max_worker_processes: 4 checkpoint_timeout: 55 autovacuum: "off" - checkpoint_completion_target: 0.7 + checkpoint_completion_target: !!float "0.7" autovacuum_freeze_max_age: 100000000 autovacuum_vacuum_threshold: 40 - autovacuum_vacuum_scale_factor: 0.3 + autovacuum_vacuum_scale_factor: !!float "0.3" autovacuum_work_mem: 1 autovacuum_max_workers: 2 - autovacuum_vacuum_cost_delay: 22 + autovacuum_vacuum_cost_delay: 22 wal_buffers: 1 synchronous_commit: local random_page_cost: 3 -- set_fact: - profile1_name: "{{random_name}}{{suffix_name}}1" - profile2_name: "{{random_name}}{{suffix_name}}2" - profile3_name: "{{random_name}}{{suffix_name}}3" - profile4_name: "{{random_name}}{{suffix_name}}4" +- name: Define variables for profile names + ansible.builtin.set_fact: + profile1_name: "{{ random_name }}{{ suffix_name }}1" + profile2_name: "{{ random_name }}{{ suffix_name }}2" + profile3_name: "{{ random_name }}{{ suffix_name }}3" + profile4_name: "{{ random_name }}{{ suffix_name }}4" ################################################################ - name: Verify creation of db params profile ntnx_ndb_profiles: - name: "{{profile1_name}}" - desc: "testdesc" + name: "{{ profile1_name }}" + desc: testdesc type: database_parameter database_type: postgres database_parameter: postgres: - max_connections: "{{max_connections}}" - max_replication_slots: "{{max_replication_slots}}" - max_locks_per_transaction: "{{max_locks_per_transaction}}" - effective_io_concurrency: "{{effective_io_concurrency}}" - timezone: "{{timezone}}" - max_prepared_transactions: "{{max_prepared_transactions}}" - max_wal_senders: "{{max_wal_senders}}" - min_wal_size: "{{min_wal_size}}" - max_wal_size: "{{max_wal_size}}" - wal_keep_segments: "{{wal_keep_segments}}" - max_worker_processes: "{{max_worker_processes}}" - checkpoint_timeout: "{{checkpoint_timeout}}" - autovacuum: "{{autovacuum}}" - checkpoint_completion_target: "{{checkpoint_completion_target}}" - autovacuum_freeze_max_age: "{{autovacuum_freeze_max_age}}" - autovacuum_vacuum_threshold: "{{autovacuum_vacuum_threshold}}" - autovacuum_vacuum_scale_factor: "{{autovacuum_vacuum_scale_factor}}" - autovacuum_work_mem: "{{autovacuum_work_mem}}" - autovacuum_max_workers: "{{autovacuum_max_workers}}" - autovacuum_vacuum_cost_delay: "{{autovacuum_vacuum_cost_delay}}" - wal_buffers: "{{wal_buffers}}" - synchronous_commit: "{{synchronous_commit}}" - random_page_cost: "{{random_page_cost}}" + max_connections: "{{ max_connections }}" + max_replication_slots: "{{ max_replication_slots }}" + max_locks_per_transaction: "{{ max_locks_per_transaction }}" + effective_io_concurrency: "{{ effective_io_concurrency }}" + timezone: "{{ timezone }}" + max_prepared_transactions: "{{ max_prepared_transactions }}" + max_wal_senders: "{{ max_wal_senders }}" + min_wal_size: "{{ min_wal_size }}" + max_wal_size: "{{ max_wal_size }}" + wal_keep_segments: "{{ wal_keep_segments }}" + max_worker_processes: "{{ max_worker_processes }}" + checkpoint_timeout: "{{ checkpoint_timeout }}" + autovacuum: "{{ autovacuum }}" + checkpoint_completion_target: "{{ checkpoint_completion_target }}" + autovacuum_freeze_max_age: "{{ autovacuum_freeze_max_age }}" + autovacuum_vacuum_threshold: "{{ autovacuum_vacuum_threshold }}" + autovacuum_vacuum_scale_factor: "{{ autovacuum_vacuum_scale_factor }}" + autovacuum_work_mem: "{{ autovacuum_work_mem }}" + autovacuum_max_workers: "{{ autovacuum_max_workers }}" + autovacuum_vacuum_cost_delay: "{{ autovacuum_vacuum_cost_delay }}" + wal_buffers: "{{ wal_buffers }}" + synchronous_commit: "{{ synchronous_commit }}" + random_page_cost: "{{ random_page_cost }}" register: result ignore_errors: true - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.name == "{{profile1_name}}" + - result.response.name == "{{ profile1_name }}" - result.response.description == "testdesc" - result.response.type == "Database_Parameter" - - result.response.versions[0].propertiesMap.autovacuum == "{{autovacuum}}" - - result.response.versions[0].propertiesMap.autovacuum_freeze_max_age == "{{autovacuum_freeze_max_age}}" - - result.response.versions[0].propertiesMap.autovacuum_max_workers == "{{autovacuum_max_workers}}" - - result.response.versions[0].propertiesMap.autovacuum_vacuum_cost_delay == "{{autovacuum_vacuum_cost_delay}}ms" - - result.response.versions[0].propertiesMap.autovacuum_vacuum_scale_factor == "{{autovacuum_vacuum_scale_factor}}" - - result.response.versions[0].propertiesMap.autovacuum_vacuum_threshold == "{{autovacuum_vacuum_threshold}}" - - result.response.versions[0].propertiesMap.autovacuum_work_mem == "{{autovacuum_work_mem}}" - - result.response.versions[0].propertiesMap.checkpoint_completion_target == "{{checkpoint_completion_target}}" - - result.response.versions[0].propertiesMap.checkpoint_timeout == "{{checkpoint_timeout}}min" - - result.response.versions[0].propertiesMap.effective_io_concurrency == "{{effective_io_concurrency}}" - - result.response.versions[0].propertiesMap.max_connections == "{{max_connections}}" - - result.response.versions[0].propertiesMap.max_locks_per_transaction == "{{max_locks_per_transaction}}" - - result.response.versions[0].propertiesMap.max_prepared_transactions == "{{max_prepared_transactions}}" - - result.response.versions[0].propertiesMap.max_replication_slots == "{{max_replication_slots}}" - - result.response.versions[0].propertiesMap.max_wal_senders == "{{max_wal_senders}}" - - result.response.versions[0].propertiesMap.max_wal_size == "{{max_wal_size}}GB" - - result.response.versions[0].propertiesMap.max_worker_processes == "{{max_worker_processes}}" - - result.response.versions[0].propertiesMap.min_wal_size == "{{min_wal_size}}MB" - - result.response.versions[0].propertiesMap.random_page_cost == "{{random_page_cost}}" - - result.response.versions[0].propertiesMap.synchronous_commit == "{{synchronous_commit}}" - - result.response.versions[0].propertiesMap.timezone == "{{timezone}}" - - result.response.versions[0].propertiesMap.wal_buffers == "{{wal_buffers}}" - - result.response.versions[0].propertiesMap.wal_keep_segments == "{{wal_keep_segments}}" + - result.response.versions[0].propertiesMap.autovacuum == "{{ autovacuum }}" + - result.response.versions[0].propertiesMap.autovacuum_freeze_max_age == "{{ autovacuum_freeze_max_age }}" + - result.response.versions[0].propertiesMap.autovacuum_max_workers == "{{ autovacuum_max_workers }}" + - result.response.versions[0].propertiesMap.autovacuum_vacuum_cost_delay == "{{ autovacuum_vacuum_cost_delay }}ms" + - result.response.versions[0].propertiesMap.autovacuum_vacuum_scale_factor == "{{ autovacuum_vacuum_scale_factor }}" + - result.response.versions[0].propertiesMap.autovacuum_vacuum_threshold == "{{ autovacuum_vacuum_threshold }}" + - result.response.versions[0].propertiesMap.autovacuum_work_mem == "{{ autovacuum_work_mem }}" + - result.response.versions[0].propertiesMap.checkpoint_completion_target == "{{ checkpoint_completion_target }}" + - result.response.versions[0].propertiesMap.checkpoint_timeout == "{{ checkpoint_timeout }}min" + - result.response.versions[0].propertiesMap.effective_io_concurrency == "{{ effective_io_concurrency }}" + - result.response.versions[0].propertiesMap.max_connections == "{{ max_connections }}" + - result.response.versions[0].propertiesMap.max_locks_per_transaction == "{{ max_locks_per_transaction }}" + - result.response.versions[0].propertiesMap.max_prepared_transactions == "{{ max_prepared_transactions }}" + - result.response.versions[0].propertiesMap.max_replication_slots == "{{ max_replication_slots }}" + - result.response.versions[0].propertiesMap.max_wal_senders == "{{ max_wal_senders }}" + - result.response.versions[0].propertiesMap.max_wal_size == "{{ max_wal_size }}GB" + - result.response.versions[0].propertiesMap.max_worker_processes == "{{ max_worker_processes }}" + - result.response.versions[0].propertiesMap.min_wal_size == "{{ min_wal_size }}MB" + - result.response.versions[0].propertiesMap.random_page_cost == "{{ random_page_cost }}" + - result.response.versions[0].propertiesMap.synchronous_commit == "{{ synchronous_commit }}" + - result.response.versions[0].propertiesMap.timezone == "{{ timezone }}" + - result.response.versions[0].propertiesMap.wal_buffers == "{{ wal_buffers }}" + - result.response.versions[0].propertiesMap.wal_keep_segments == "{{ wal_keep_segments }}" fail_msg: "Fail: Unable to create db params profile " success_msg: "Pass: Creation of db params profile finished successfully " -- set_fact: - todelete: "{{ todelete + [ result.profile_uuid ] }}" +- name: Define variable to delete + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.profile_uuid] }}" ################################################################ -- name: verify update of params in database_parameter profile and publish profile +- name: Verify update of params in database_parameter profile and publish profile ntnx_ndb_profiles: - name: "{{profile2_name}}" - desc: "newdesc" - profile_uuid: "{{result.profile_uuid}}" + name: "{{ profile2_name }}" + desc: newdesc + profile_uuid: "{{ result.profile_uuid }}" type: database_parameter database_type: postgres database_parameter: @@ -124,14 +127,14 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - result.profile_uuid is defined - - result.response.profile.name == "{{profile2_name}}" + - result.response.profile.name == "{{ profile2_name }}" - result.response.profile.description == "newdesc" - result.response.version.published == true - result.response.profile.versions[0].propertiesMap.max_connections == "1" @@ -141,9 +144,9 @@ fail_msg: "Fail: unable to verify update of params in database_parameter profile and publish profile " success_msg: "Pass: verify update of params in database_parameter profile and publish profile finished successfully" ################################################################ -- name: verify unpublish flow in database_parameter profile +- name: Verify unpublish flow in database_parameter profile ntnx_ndb_profiles: - profile_uuid: "{{result.profile_uuid}}" + profile_uuid: "{{ result.profile_uuid }}" database_parameter: publish: false type: database_parameter @@ -151,9 +154,8 @@ register: result ignore_errors: true - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -162,47 +164,48 @@ fail_msg: "Fail: verify unpublish flow in database_parameter profile " success_msg: "Pass: verify unpublish flow in database_parameter profile finished successfully " ################################################################ -- name: verify creatition of db params profile with defaults +- name: Verify creatition of db params profile with defaults ntnx_ndb_profiles: - name: "{{profile3_name}}" - desc: "testdesc" + name: "{{ profile3_name }}" + desc: testdesc type: database_parameter database_type: postgres register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.name == "{{profile3_name}}" + - result.response.name == "{{ profile3_name }}" - result.response.description == "testdesc" - result.response.type == "Database_Parameter" - result.response.versions is defined fail_msg: "Fail: Unable to verify creatition of db params profile with defaults " success_msg: "Pass: verify creatition of db params profile with defaults finished successfully " -- set_fact: - todelete: "{{ todelete + [ result.profile_uuid ] }}" +- name: Define variable to delete created profile + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.profile_uuid] }}" ################################################################ -- name: verify idempotency check +- name: Verify idempotency check ntnx_ndb_profiles: - name: "{{profile3_name}}" - desc: "testdesc" - profile_uuid: "{{result.profile_uuid}}" + name: "{{ profile3_name }}" + desc: testdesc + profile_uuid: "{{ result.profile_uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.profile_uuid is defined - - result.response.profile.name == "{{profile3_name}}" + - result.response.profile.name == "{{ profile3_name }}" - result.response.profile.description == "testdesc" fail_msg: "Fail: Unable to verify idempotency check " success_msg: "Pass: verify idempotency check finished successfully" @@ -213,16 +216,17 @@ profile_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true - result.msg == "All items completed" - fail_msg: "unable to delete all created Database_Parameter profiles" - success_msg: "All Database_Parameter profiles deleted successfully" + fail_msg: unable to delete all created Database_Parameter profiles + success_msg: All Database_Parameter profiles deleted successfully -- set_fact: +- name: Define variable to delete + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/main.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/main.yml index 43729cff4..ff3e62e52 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/main.yml @@ -1,11 +1,15 @@ --- -- module_defaults: +- name: Ntnx_ndb_profiles integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "compute.yml" - - import_tasks: "db_params.yml" - - import_tasks: "network_profile.yml" + - name: Import tasks + ansible.builtin.import_tasks: compute.yml + - name: Import tasks + ansible.builtin.import_tasks: db_params.yml + - name: Import tasks + ansible.builtin.import_tasks: network_profile.yml diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml index 6ed797ebc..4e0e79490 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml @@ -1,88 +1,91 @@ +--- - name: Generate random profile_name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" -- set_fact: - suffix_name: "ansible-role-mapping" +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: ansible-role-mapping -- set_fact: +- name: Define variables for profile names & todelete + ansible.builtin.set_fact: todelete: [] - profile1_name: "{{random_name}}{{suffix_name}}1" - profile2_name: "{{random_name}}{{suffix_name}}2" - profile3_name: "{{random_name}}{{suffix_name}}3" + profile1_name: "{{ random_name }}{{ suffix_name }}1" + profile2_name: "{{ random_name }}{{ suffix_name }}2" + profile3_name: "{{ random_name }}{{ suffix_name }}3" ################################################################ -- name: verify create of single cluster network profile +- name: Verify create of single cluster network profile ntnx_ndb_profiles: - name: "{{profile1_name}}" - desc: "testdesc" + name: "{{ profile1_name }}" + desc: testdesc type: network database_type: postgres network: topology: single vlans: - - - cluster: - name: "{{network_profile.single.cluster.name}}" - vlan_name: "{{network_profile.single.vlan_name}}" + - cluster: + name: "{{ network_profile.single.cluster.name }}" + vlan_name: "{{ network_profile.single.vlan_name }}" enable_ip_address_selection: true register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.name == "{{profile1_name}}" + - result.response.name == "{{ profile1_name }}" - result.response.description == "testdesc" - result.response.type == "Network" - result.response.topology == "single" - result.response.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "true" - - result.response.versions[0].propertiesMap.VLAN_NAME == "{{network_profile.single.vlan_name}}" + - result.response.versions[0].propertiesMap.VLAN_NAME == "{{ network_profile.single.vlan_name }}" - result.response.versions[0].published == false fail_msg: "Fail: unable to verify create of single cluster network profile" success_msg: "Pass: verify create of single cluster network profile finished successfully " ################################################################ -- name: update the profile for single cluster by name , desc , publish +- name: Update the profile for single cluster by name , desc , publish ntnx_ndb_profiles: - name: "{{profile2_name}}" - desc: "testdesc2" + name: "{{ profile2_name }}" + desc: testdesc2 network: publish: true - profile_uuid: "{{result.profile_uuid}}" + profile_uuid: "{{ result.profile_uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.profile.name == "{{profile2_name}}" + - result.response.profile.name == "{{ profile2_name }}" - result.response.profile.description == "testdesc2" - result.response.profile.type == "Network" - result.response.profile.topology == "single" - result.response.profile.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "true" - - result.response.profile.versions[0].propertiesMap.VLAN_NAME == "{{network_profile.single.vlan_name}}" + - result.response.profile.versions[0].propertiesMap.VLAN_NAME == "{{ network_profile.single.vlan_name }}" - result.response.profile.versions[0].published == true fail_msg: "Fail: unable to update the profile for single cluster by name , desc , publish " success_msg: "Pass: update the profile for single cluster by name , desc , publish finished successfully " -- set_fact: - todelete: "{{ todelete + [ result.profile_uuid ] }}" +- name: Define variable todelete + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.profile_uuid] }}" ################################################################ -- name: verify idempotency check +- name: Verify idempotency check ntnx_ndb_profiles: - name: "{{profile2_name}}" - desc: "testdesc2" - profile_uuid: "{{result.profile_uuid}}" + name: "{{ profile2_name }}" + desc: testdesc2 + profile_uuid: "{{ result.profile_uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -113,7 +116,7 @@ # ignore_errors: true # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.failed == false @@ -130,7 +133,7 @@ # fail_msg: "Fail: unable to verify create of multiple cluster network profile " # success_msg: "Pass: verify create of multiple cluster network profile finished sucessfully" -# - set_fact: +# - ansible.builtin.set_fact: # todelete: "{{ todelete + [ result.profile_uuid ] }}" ################################################################ # - name: update the profile for multiple cluster by subnets, publish @@ -153,7 +156,7 @@ # ignore_errors: true # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.failed == false @@ -177,10 +180,10 @@ profile_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true @@ -188,5 +191,6 @@ fail_msg: "unable to delete all created network profiles" success_msg: "All network profiles deleted successfully" -- set_fact: +- name: Reset todelete + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/aliases b/tests/integration/targets/ntnx_ndb_profiles_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/aliases +++ b/tests/integration/targets/ntnx_ndb_profiles_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml index 8d6837be7..b11ef2a42 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml @@ -1,5 +1,6 @@ --- -- debug: +- name: Start testing ntnx_ndb_profiles_info + ansible.builtin.debug: msg: Start testing ntnx_ndb_profiles_info - name: List profiles @@ -7,16 +8,15 @@ register: profiles ignore_errors: true - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - profiles.response is defined - profiles.failed == false - profiles.changed == false - profiles.response | length > 0 - fail_msg: "Unable to list all NDB profile" - success_msg: "NDB profiles listed successfully" + fail_msg: Unable to list all NDB profile + success_msg: NDB profiles listed successfully ################################################################ - name: List profiles with postgres database engine ntnx_ndb_profiles_info: @@ -25,15 +25,15 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].engineType == "postgres_database" - fail_msg: "Unable to list all NDB profile with postgres database engine" - success_msg: "NDB profiles with postgres database engine listed successfully" + fail_msg: Unable to list all NDB profile with postgres database engine + success_msg: NDB profiles with postgres database engine listed successfully ################################################################ - name: List Database_Parameter profiles ntnx_ndb_profiles_info: @@ -42,15 +42,15 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].type == "Database_Parameter" - fail_msg: "Unable to list all Database_Parameter NDB profile" - success_msg: "Database_Parameter NDB profiles listed successfully" + fail_msg: Unable to list all Database_Parameter NDB profile + success_msg: Database_Parameter NDB profiles listed successfully ################################################################ - name: List Network profiles ntnx_ndb_profiles_info: @@ -59,53 +59,53 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].type == "Network" - fail_msg: "Unable to list all Network NDB profile" - success_msg: "Network NDB profiles listed successfully" + fail_msg: Unable to list all Network NDB profile + success_msg: Network NDB profiles listed successfully ################################################################ -- name: get network profile with available IPs +- name: Get network profile with available IPs ntnx_ndb_profiles_info: - name: "{{static_network_profile.name}}" + name: "{{ static_network_profile.name }}" include_available_ips: true register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response.available_ips | length > 0 - - result.response.id == "{{static_network_profile.uuid}}" + - result.response.id == "{{ static_network_profile.uuid }}" - fail_msg: "Unable to list network profile with available IPs" - success_msg: "Network NDB profiles along with available IPs obtained successfully" + fail_msg: Unable to list network profile with available IPs + success_msg: Network NDB profiles along with available IPs obtained successfully -- name: get network profile with available IPs +- name: Get network profile with available IPs ntnx_ndb_profiles_info: - uuid: "{{static_network_profile.uuid}}" + uuid: "{{ static_network_profile.uuid }}" include_available_ips: true register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response.available_ips | length > 0 - - result.response.id == "{{static_network_profile.uuid}}" - fail_msg: "Unable to list network profile with available IPs" - success_msg: "Network NDB profiles along with available IPs obtained successfully" + - result.response.id == "{{ static_network_profile.uuid }}" + fail_msg: Unable to list network profile with available IPs + success_msg: Network NDB profiles along with available IPs obtained successfully ################################################################ - name: List Compute profiles ntnx_ndb_profiles_info: @@ -114,15 +114,15 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].type == "Compute" - fail_msg: "Unable to list all Compute NDB profile" - success_msg: "Compute NDB profiles listed successfully" + fail_msg: Unable to list all Compute NDB profile + success_msg: Compute NDB profiles listed successfully ################################################################ - name: List Software profiles ntnx_ndb_profiles_info: @@ -131,63 +131,62 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].type == "Software" - fail_msg: "Unable to list all Software NDB profile" - success_msg: "Software NDB profiles listed successfully" + fail_msg: Unable to list all Software NDB profile + success_msg: Software NDB profiles listed successfully ################################################################ -- name: get NDB profile using NDB profile name +- name: Get NDB profile using NDB profile name ntnx_ndb_profiles_info: - name: "{{profiles.response[0].name}}" + name: "{{ profiles.response[0].name }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.id == "{{profiles.response[0].id}}" - fail_msg: "Unable to get NDB profile using NDB profile name" - success_msg: "get NDB profile using NDB profile name finished successfully" + - result.response.id == "{{ profiles.response[0].id }}" + fail_msg: Unable to get NDB profile using NDB profile name + success_msg: get NDB profile using NDB profile name finished successfully ################################################################ - name: List profiles ntnx_ndb_profiles_info: - uuid: "{{profiles.response[0].id}}" + uuid: "{{ profiles.response[0].id }}" latest_version: true register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{profiles.response[0].name}}" - fail_msg: "Unable to get NDB profile using NDB profile id" - success_msg: "get NDB profile using NDB profile id finished successfully" + - result.response.name == "{{ profiles.response[0].name }}" + fail_msg: Unable to get NDB profile using NDB profile id + success_msg: get NDB profile using NDB profile id finished successfully ################################################################ - -- name: get NDB profiles with incorrect name +- name: Get NDB profiles with incorrect name ntnx_ndb_profiles_info: - name: "abcd" + name: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/main.yml index da502fcc5..8a5b37f01 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_profiles_info integration testsw + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "info.yml" + - name: Import tasks + ansible.builtin.import_tasks: info.yml diff --git a/tests/integration/targets/ntnx_ndb_slas/aliases b/tests/integration/targets/ntnx_ndb_slas/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_slas/aliases +++ b/tests/integration/targets/ntnx_ndb_slas/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_slas/meta/main.yml b/tests/integration/targets/ntnx_ndb_slas/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_slas/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_slas/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml index 48bc70291..90ac36283 100644 --- a/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml +++ b/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml @@ -1,18 +1,21 @@ --- -- debug: +- name: Start testing ntnx_ndb_slas and ntnx_ndb_slas_info + ansible.builtin.debug: msg: Start testing ntnx_ndb_slas and ntnx_ndb_slas_info - name: Generate random profile_name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" -- set_fact: - suffix_name: "ansible-role-mapping" +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: ansible-role-mapping -- set_fact: +- name: Define variables for create flow + ansible.builtin.set_fact: todelete: [] - profile1_name: "{{random_name}}{{suffix_name}}1" - profile2_name: "{{random_name}}{{suffix_name}}2" + profile1_name: "{{ random_name }}{{ suffix_name }}1" + profile2_name: "{{ random_name }}{{ suffix_name }}2" frequency: logs_retention: 4 snapshots_retention: @@ -23,70 +26,72 @@ ################################################################ create flow ######################################### - name: Verify creation of slas with check mode ntnx_ndb_slas: - name: "{{profile1_name}}" - desc: "testdesc" + name: "{{ profile1_name }}" + desc: testdesc frequency: - logs_retention: "{{frequency.logs_retention}}" + logs_retention: "{{ frequency.logs_retention }}" snapshots_retention: - daily: "{{frequency.snapshots_retention.daily}}" - weekly: "{{frequency.snapshots_retention.weekly}}" - monthly: "{{frequency.snapshots_retention.monthly}}" - quarterly: "{{frequency.snapshots_retention.quarterly}}" + daily: "{{ frequency.snapshots_retention.daily }}" + weekly: "{{ frequency.snapshots_retention.weekly }}" + monthly: "{{ frequency.snapshots_retention.monthly }}" + quarterly: "{{ frequency.snapshots_retention.quarterly }}" register: result ignore_errors: true check_mode: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{profile1_name}}" + - result.response.name == "{{ profile1_name }}" - result.response.description == "testdesc" - - result.response.continuousRetention == {{frequency.logs_retention}} - - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} - - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} - - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} - - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + - result.response.continuousRetention == {{ frequency.logs_retention }} + - result.response.dailyRetention == {{ frequency.snapshots_retention.daily }} + - result.response.monthlyRetention == {{ frequency.snapshots_retention.monthly }} + - result.response.quarterlyRetention == {{ frequency.snapshots_retention.quarterly }} + - result.response.weeklyRetention == {{ frequency.snapshots_retention.weekly }} fail_msg: "Fail: Verify creation of slas with check mode failed " success_msg: "Pass: Verify creation of slas with check mode finished successfully " ################################################################ - name: Verify creation of slas ntnx_ndb_slas: - name: "{{profile1_name}}" - desc: "testdesc" + name: "{{ profile1_name }}" + desc: testdesc frequency: - logs_retention: "{{frequency.logs_retention}}" + logs_retention: "{{ frequency.logs_retention }}" snapshots_retention: - daily: "{{frequency.snapshots_retention.daily}}" - weekly: "{{frequency.snapshots_retention.weekly}}" - monthly: "{{frequency.snapshots_retention.monthly}}" - quarterly: "{{frequency.snapshots_retention.quarterly}}" + daily: "{{ frequency.snapshots_retention.daily }}" + weekly: "{{ frequency.snapshots_retention.weekly }}" + monthly: "{{ frequency.snapshots_retention.monthly }}" + quarterly: "{{ frequency.snapshots_retention.quarterly }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.name == "{{profile1_name}}" + - result.response.name == "{{ profile1_name }}" - result.response.description == "testdesc" - - result.response.continuousRetention == {{frequency.logs_retention}} - - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} - - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} - - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} - - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + - result.response.continuousRetention == {{ frequency.logs_retention }} + - result.response.dailyRetention == {{ frequency.snapshots_retention.daily }} + - result.response.monthlyRetention == {{ frequency.snapshots_retention.monthly }} + - result.response.quarterlyRetention == {{ frequency.snapshots_retention.quarterly }} + - result.response.weeklyRetention == {{ frequency.snapshots_retention.weekly }} - result.sla_uuid is defined fail_msg: "Fail: Unable to create sla " success_msg: "Pass: sla is created successfully " -- set_fact: - todelete: "{{ todelete + [ result.sla_uuid ] }}" +- name: Define variable todelete + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.sla_uuid] }}" ################################################################ -- set_fact: +- name: Define variables for update flow + ansible.builtin.set_fact: frequency: logs_retention: 10 snapshots_retention: @@ -95,88 +100,88 @@ monthly: 13 quarterly: 14 -- name: verify slas update flow +- name: Verify slas update flow ntnx_ndb_slas: - sla_uuid: "{{result.sla_uuid}}" - name: "{{profile2_name}}" - desc: "newdesc" + sla_uuid: "{{ result.sla_uuid }}" + name: "{{ profile2_name }}" + desc: newdesc frequency: - logs_retention: "{{frequency.logs_retention}}" + logs_retention: "{{ frequency.logs_retention }}" snapshots_retention: - daily: "{{frequency.snapshots_retention.daily}}" - weekly: "{{frequency.snapshots_retention.weekly}}" - monthly: "{{frequency.snapshots_retention.monthly}}" - quarterly: "{{frequency.snapshots_retention.quarterly}}" + daily: "{{ frequency.snapshots_retention.daily }}" + weekly: "{{ frequency.snapshots_retention.weekly }}" + monthly: "{{ frequency.snapshots_retention.monthly }}" + quarterly: "{{ frequency.snapshots_retention.quarterly }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - - result.response.name == "{{profile2_name}}" + - result.response.name == "{{ profile2_name }}" - result.response.description == "newdesc" - - result.response.continuousRetention == {{frequency.logs_retention}} - - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} - - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} - - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} - - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + - result.response.continuousRetention == {{ frequency.logs_retention }} + - result.response.dailyRetention == {{ frequency.snapshots_retention.daily }} + - result.response.monthlyRetention == {{ frequency.snapshots_retention.monthly }} + - result.response.quarterlyRetention == {{ frequency.snapshots_retention.quarterly }} + - result.response.weeklyRetention == {{ frequency.snapshots_retention.weekly }} - result.sla_uuid is defined fail_msg: "Fail: Unable to update sla " success_msg: "Pass: verify slas update flow finished successfully" ################################################################ update flow ######################################### -- name: verify slas update flow with check mode +- name: Verify slas update flow with check mode ntnx_ndb_slas: - sla_uuid: "{{result.sla_uuid}}" - name: "{{profile2_name}}" - desc: "newdesc" + sla_uuid: "{{ result.sla_uuid }}" + name: "{{ profile2_name }}" + desc: newdesc frequency: - logs_retention: "{{frequency.logs_retention}}" + logs_retention: "{{ frequency.logs_retention }}" snapshots_retention: - daily: "{{frequency.snapshots_retention.daily}}" - weekly: "{{frequency.snapshots_retention.weekly}}" - monthly: "{{frequency.snapshots_retention.monthly}}" - quarterly: "{{frequency.snapshots_retention.quarterly}}" + daily: "{{ frequency.snapshots_retention.daily }}" + weekly: "{{ frequency.snapshots_retention.weekly }}" + monthly: "{{ frequency.snapshots_retention.monthly }}" + quarterly: "{{ frequency.snapshots_retention.quarterly }}" register: result ignore_errors: true check_mode: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{profile2_name}}" + - result.response.name == "{{ profile2_name }}" - result.response.description == "newdesc" - - result.response.continuousRetention == {{frequency.logs_retention}} - - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} - - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} - - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} - - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + - result.response.continuousRetention == {{ frequency.logs_retention }} + - result.response.dailyRetention == {{ frequency.snapshots_retention.daily }} + - result.response.monthlyRetention == {{ frequency.snapshots_retention.monthly }} + - result.response.quarterlyRetention == {{ frequency.snapshots_retention.quarterly }} + - result.response.weeklyRetention == {{ frequency.snapshots_retention.weekly }} - result.sla_uuid is defined fail_msg: "Fail: verify slas update flow with check mode " success_msg: "Pass: verify slas update flow with check mode finished successfully" ################################################################ -- name: verify idempotency +- name: Verify idempotency ntnx_ndb_slas: - sla_uuid: "{{result.sla_uuid}}" - name: "{{profile2_name}}" - desc: "newdesc" + sla_uuid: "{{ result.sla_uuid }}" + name: "{{ profile2_name }}" + desc: newdesc frequency: - logs_retention: "{{frequency.logs_retention}}" + logs_retention: "{{ frequency.logs_retention }}" snapshots_retention: - daily: "{{frequency.snapshots_retention.daily}}" - weekly: "{{frequency.snapshots_retention.weekly}}" - monthly: "{{frequency.snapshots_retention.monthly}}" - quarterly: "{{frequency.snapshots_retention.quarterly}}" + daily: "{{ frequency.snapshots_retention.daily }}" + weekly: "{{ frequency.snapshots_retention.weekly }}" + monthly: "{{ frequency.snapshots_retention.monthly }}" + quarterly: "{{ frequency.snapshots_retention.quarterly }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.msg == "Nothing to change." - result.failed == false @@ -186,88 +191,89 @@ ######################################################################## Info module tests ################################################# -- debug: +- name: Start testing ntnx_ndb_slas_info + ansible.builtin.debug: msg: Start testing ntnx_ndb_slas_info - name: List all era slas ntnx_ndb_slas_info: register: slas -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - slas.response is defined - slas.failed == false - slas.changed == false - slas.response | length > 0 - fail_msg: "Unable to list all era slas" - success_msg: "era slas listed successfully" + fail_msg: Unable to list all era slas + success_msg: era slas listed successfully ################################################################ -- name: get era slas using it's name +- name: Get era slas using it's name ntnx_ndb_slas_info: - name: "{{slas.response[0].name}}" + name: "{{ slas.response[0].name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{slas.response[0].name}}" + - result.response.name == "{{ slas.response[0].name }}" fail_msg: "Unable to get era slas using it's name " - success_msg: "get era slas using it's name successfully" + success_msg: get era slas using it's name successfully ################################################################ - name: List slas use id ntnx_ndb_slas_info: - uuid: "{{slas.response[0].id}}" + uuid: "{{ slas.response[0].id }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{slas.response[0].name}}" + - result.response.name == "{{ slas.response[0].name }}" fail_msg: "Unable to get era slas using it's id " - success_msg: "get era slas using it's id successfully" + success_msg: get era slas using it's id successfully ################################################################ - -- name: get era slas with incorrect name +- name: Get era slas with incorrect name ntnx_ndb_slas_info: - name: "abcd" + name: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given ######################################################################## Delete flow ################################################# -- name: verify slas delete flow +- name: Verify slas delete flow ntnx_ndb_slas: state: absent sla_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true - result.msg == "All items completed" - fail_msg: "unable to delete all created slas" - success_msg: "All slas deleted successfully" + fail_msg: unable to delete all created slas + success_msg: All slas deleted successfully -- set_fact: +- name: Define variable todelete + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_slas/tasks/main.yml b/tests/integration/targets/ntnx_ndb_slas/tasks/main.yml index cbd87d175..305b8272c 100644 --- a/tests/integration/targets/ntnx_ndb_slas/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_slas/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_slas integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "CRUD.yml" + - name: Import tasks + ansible.builtin.import_tasks: CRUD.yml diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/aliases b/tests/integration/targets/ntnx_ndb_snapshots_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_snapshots_info/aliases +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_snapshots_info/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_snapshots_info/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/info.yml index 43463a9c7..818745506 100644 --- a/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/info.yml @@ -1,100 +1,101 @@ --- -- debug: +- name: Start testing ntnx_ndb_snapshots_info + ansible.builtin.debug: msg: Start testing ntnx_ndb_snapshots_info - name: List all NDB snapshots ntnx_ndb_snapshots_info: register: snapshots -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - snapshots.response is defined - snapshots.failed == false - snapshots.changed == false - snapshots.response | length > 0 - fail_msg: "Unable to list all NDB snapshots" - success_msg: "NDB snapshots listed successfully" + fail_msg: Unable to list all NDB snapshots + success_msg: NDB snapshots listed successfully ################################################################ -- name: get NDB snapshots using it's UTC time_zone +- name: Get NDB snapshots using it's UTC time_zone ntnx_ndb_snapshots_info: filters: time_zone: UTC register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].timeZone == "UTC" fail_msg: "Unable to get NDB snapshots with UTC time_zone " - success_msg: "get NDB snapshots using with utc time_zone" + success_msg: get NDB snapshots using with utc time_zone ################################################################ -- name: get NDB snapshots with time-machine id +- name: Get NDB snapshots with time-machine id ntnx_ndb_snapshots_info: filters: - value: "{{snapshots.response[0].timeMachineId}}" + value: "{{ snapshots.response[0].timeMachineId }}" value_type: time-machine register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response[0].timeMachineId == "{{snapshots.response[0].timeMachineId}}" + - result.response[0].timeMachineId == "{{ snapshots.response[0].timeMachineId }}" fail_msg: "Unable to get NDB snapshots with time-machine id " - success_msg: "get NDB snapshots using with time-machine id" + success_msg: get NDB snapshots using with time-machine id ################################################################ -- name: get NDB snapshots using it's uuid +- name: Get NDB snapshots using it's uuid ntnx_ndb_snapshots_info: - uuid: "{{snapshots.response[0].id}}" + uuid: "{{ snapshots.response[0].id }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.id == "{{snapshots.response[0].id}}" + - result.response.id == "{{ snapshots.response[0].id }}" fail_msg: "Unable to get NDB snapshots using it's uuid " - success_msg: "get NDB snapshots using it's uuid successfully" + success_msg: get NDB snapshots using it's uuid successfully # ################################################################ - name: List snapshots use uuid and get snapshot files ntnx_ndb_snapshots_info: - uuid: "{{snapshots.response[0].id}}" + uuid: "{{ snapshots.response[0].id }}" get_files: true register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response[0].fileList is defined - - result.snapshot_uuid == "{{snapshots.response[0].id}}" + - result.snapshot_uuid == "{{ snapshots.response[0].id }}" fail_msg: "Unable to get NDB snapshots using it's id and get files " - success_msg: "get NDB snapshots using it's id and get files successfully" + success_msg: get NDB snapshots using it's id and get files successfully # ################################################################ -- name: get NDB snapshots with incorrect uuid +- name: Get NDB snapshots with incorrect uuid ntnx_ndb_snapshots_info: - uuid: "abcd" + uuid: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/main.yml index da502fcc5..f830fc463 100644 --- a/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_snapshots_info integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "info.yml" + - name: Import tasks + ansible.builtin.import_tasks: info.yml diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/aliases b/tests/integration/targets/ntnx_ndb_software_profiles/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/aliases +++ b/tests/integration/targets/ntnx_ndb_software_profiles/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/meta/main.yml b/tests/integration/targets/ntnx_ndb_software_profiles/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml index aef0c0daf..d54fb114f 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml @@ -8,127 +8,104 @@ # 5. Replicate profiles to multi clusters # 6. Delete of profile -- debug: - msg: "start ndb software profile tests" +- name: Start ndb software profile tests + ansible.builtin.debug: + msg: start ndb software profile tests - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - profile1_name: "{{random_name[0]}}" - profile1_name_updated: "{{random_name[0]}}-updated" - profile2_name: "{{random_name[0]}}2" +- name: Define variables for profile names + ansible.builtin.set_fact: + profile1_name: "{{ random_name[0] }}" + profile1_name_updated: "{{ random_name[0] }}-updated" + profile2_name: "{{ random_name[0] }}2" - - -- name: create software profile create spec - check_mode: yes +- name: Create software profile create spec + check_mode: true ntnx_ndb_profiles: - name: "{{profile1_name}}" - desc: "{{profile1_name}}-desc" - type: "software" - database_type: "postgres" + name: "{{ profile1_name }}" + desc: "{{ profile1_name }}-desc" + type: software + database_type: postgres software: - topology: "cluster" - name: "v1.0" - desc: "v1.0-desc" + topology: cluster + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - name: "{{db_server_vm.name}}" + name: "{{ db_server_vm.name }}" clusters: - - name: "{{cluster.cluster1.name}}" - - uuid: "{{cluster.cluster2.uuid}}" + - name: "{{ cluster.cluster1.name }}" + - uuid: "{{ cluster.cluster2.uuid }}" register: result - - -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "profile_uuid": null, - "response": { - "availableClusterIds": [ - "{{cluster.cluster1.uuid}}", - "{{cluster.cluster2.uuid}}" - ], - "description": "{{profile1_name}}-desc", - "engineType": "postgres_database", - "name": "{{profile1_name}}", - "properties": [ - { - "name": "BASE_PROFILE_VERSION_NAME", - "value": "v1.0" - }, - { - "name": "BASE_PROFILE_VERSION_DESCRIPTION", - "value": "v1.0-desc" - }, - { - "name": "OS_NOTES", - "value": "os_notes" - }, - { - "name": "DB_SOFTWARE_NOTES", - "value": "db_notes" - }, - { - "name": "SOURCE_DBSERVER_ID", - "value": "{{db_server_vm.uuid}}" - } - ], - "systemProfile": false, - "topology": "cluster", - "type": "Software" - } - } - -- name: check spec for creating software profile - assert: +- name: Set expected result for profile create + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + profile_uuid: + response: + availableClusterIds: ["{{ cluster.cluster1.uuid }}", "{{ cluster.cluster2.uuid }}"] + description: "{{ profile1_name }}-desc" + engineType: postgres_database + name: "{{ profile1_name }}" + properties: + - { name: BASE_PROFILE_VERSION_NAME, value: v1.0 } + - { name: BASE_PROFILE_VERSION_DESCRIPTION, value: v1.0-desc } + - { name: OS_NOTES, value: os_notes } + - { name: DB_SOFTWARE_NOTES, value: db_notes } + - { name: SOURCE_DBSERVER_ID, value: "{{ db_server_vm.uuid }}" } + systemProfile: false + topology: cluster + type: Software + +- name: Check spec for creating software profile + ansible.builtin.assert: that: - result == expected_result fail_msg: "Fail: Unable to create spec for software profile create" success_msg: "Pass: Spec for creating software profile generated successfully" -- name: create software profile with base version and cluster instance topology. Replicated to multiple clusters +- name: Create software profile with base version and cluster instance topology. Replicated to multiple clusters ntnx_ndb_profiles: - name: "{{profile1_name}}-replicated" - desc: "{{profile1_name}}-desc-replicated" - type: "software" - database_type: "postgres" + name: "{{ profile1_name }}-replicated" + desc: "{{ profile1_name }}-desc-replicated" + type: software + database_type: postgres software: - topology: "cluster" - name: "v1.0" - desc: "v1.0-desc" + topology: cluster + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" clusters: - - name: "{{cluster.cluster1.name}}" - - uuid: "{{cluster.cluster2.uuid}}" + - name: "{{ cluster.cluster1.name }}" + - uuid: "{{ cluster.cluster2.uuid }}" register: result +- name: Define variable for clusters + ansible.builtin.set_fact: + clusters: ["{{ cluster.cluster1.uuid }}", "{{ cluster.cluster2.uuid }}"] - -- set_fact: - clusters: ["{{cluster.cluster1.uuid}}", "{{cluster.cluster2.uuid}}"] - -- name: check status of creation - assert: +- name: Check status of creation + ansible.builtin.assert: that: - result.changed == True - result.failed == False - result.response is defined - result.profile_uuid is defined - - result.response.name == "{{profile1_name}}-replicated" - - result.response.description == "{{profile1_name}}-desc-replicated" + - result.response.name == "{{ profile1_name }}-replicated" + - result.response.description == "{{ profile1_name }}-desc-replicated" - result.response.clusterAvailability[0].nxClusterId in clusters - result.response.clusterAvailability[1].nxClusterId in clusters - result.response.engineType == "postgres_database" @@ -137,187 +114,160 @@ - result.response.type == "Software" - result.response.versions[0].name == "v1.0" - result.response.versions[0].description == "v1.0-desc" - - result.response.versions[0].propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + - result.response.versions[0].propertiesMap["SOURCE_DBSERVER_ID"] == "{{ db_server_vm.uuid }}" fail_msg: "Fail: Unable to create software profile with base version and cluster instance topology with replicating to multiple clusters." success_msg: "Pass: Software profile with base version, cluster instance topology and replicated to multiple clusters created successfully" - -- name: create software profile with base version and single instance topology +- name: Create software profile with base version and single instance topology ntnx_ndb_profiles: - name: "{{profile2_name}}" - desc: "{{profile2_name}}-desc" - type: "software" - database_type: "postgres" + name: "{{ profile2_name }}" + desc: "{{ profile2_name }}-desc" + type: software + database_type: postgres software: - topology: "single" - name: "v1.0" - desc: "v1.0-desc" + topology: single + name: v1.0 + desc: v1.0-desc notes: - os: "os_notes" - db_software: "db_notes" + os: os_notes + db_software: db_notes db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" clusters: - - name: "{{cluster.cluster1.name}}" + - name: "{{ cluster.cluster1.name }}" register: result - - -- name: check status of creation - assert: +- name: Check status of creation + ansible.builtin.assert: that: - result.changed == True - result.failed == False - result.response is defined - result.profile_uuid is defined - - result.response.name == "{{profile2_name}}" - - result.response.description == "{{profile2_name}}-desc" - - result.response.clusterAvailability[0].nxClusterId == "{{cluster.cluster1.uuid}}" + - result.response.name == "{{ profile2_name }}" + - result.response.description == "{{ profile2_name }}-desc" + - result.response.clusterAvailability[0].nxClusterId == "{{ cluster.cluster1.uuid }}" - result.response.engineType == "postgres_database" - result.response.status == "READY" - result.response.topology == "single" - result.response.type == "Software" - result.response.versions[0].name == "v1.0" - result.response.versions[0].description == "v1.0-desc" - - result.response.versions[0].propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + - result.response.versions[0].propertiesMap["SOURCE_DBSERVER_ID"] == "{{ db_server_vm.uuid }}" fail_msg: "Fail: Unable to create software profile with base version and single instance topology" success_msg: "Pass: Software profile with base version and single instance topology created successfully" +- name: Define variable for profile uuid + ansible.builtin.set_fact: + profile_uuid: "{{ result.profile_uuid }}" -- set_fact: - profile_uuid: "{{result.profile_uuid}}" - -- name: update software profile +- name: Update software profile ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - name: "{{profile1_name}}-updated1" - desc: "{{profile1_name}}-desc-updated" + profile_uuid: "{{ profile_uuid }}" + name: "{{ profile1_name }}-updated1" + desc: "{{ profile1_name }}-desc-updated" register: result - - -- name: check status of creation - assert: +- name: Check status of creation + ansible.builtin.assert: that: - result.changed == True - result.failed == False - result.response is defined - result.profile_uuid is defined - result.response.profile is defined - - result.response.profile.name == "{{profile1_name}}-updated1" - - result.response.profile.description == "{{profile1_name}}-desc-updated" + - result.response.profile.name == "{{ profile1_name }}-updated1" + - result.response.profile.description == "{{ profile1_name }}-desc-updated" fail_msg: "Fail: Unable to update software profile" success_msg: "Pass: Software profile updated successfully" - -- name: idempotency checks +- name: Idempotency checks ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - name: "{{profile1_name}}-updated1" - desc: "{{profile1_name}}-desc-updated" + profile_uuid: "{{ profile_uuid }}" + name: "{{ profile1_name }}-updated1" + desc: "{{ profile1_name }}-desc-updated" register: result - - -- name: check status of creation - assert: +- name: Check status of creation + ansible.builtin.assert: that: - result.changed == False - result.failed == False - result.response is defined - result.profile_uuid is defined - result.response.profile is defined - - result.response.profile.name == "{{profile1_name}}-updated1" - - result.response.profile.description == "{{profile1_name}}-desc-updated" + - result.response.profile.name == "{{ profile1_name }}-updated1" + - result.response.profile.description == "{{ profile1_name }}-desc-updated" fail_msg: "Fail: Update didnt get skipped due to no state changes" success_msg: "Pass: Update skipped successfully due to no state changes" -- name: create software profile version spec - check_mode: yes +- name: Create software profile version spec + check_mode: true ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - name: "v2.0" - desc: "v2.0-desc" + name: v2.0 + desc: v2.0-desc notes: - os: "os_notes for v2" - db_software: "db_notes for v2" + os: os_notes for v2 + db_software: db_notes for v2 db_server_vm: - name: "{{db_server_vm.name}}" + name: "{{ db_server_vm.name }}" register: result -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "profile_type": "software", - "profile_uuid": "{{profile_uuid}}", - "response": { - "profile": { - "description": "{{profile1_name}}-desc-updated", - "engineType": "postgres_database", - "name": "{{profile1_name}}-updated1" - }, - "version": { - "description": "v2.0-desc", - "engineType": "postgres_database", - "name": "v2.0", - "properties": [ - { - "name": "OS_NOTES", - "value": "os_notes for v2" - }, - { - "name": "DB_SOFTWARE_NOTES", - "value": "db_notes for v2" - }, - { - "name": "SOURCE_DBSERVER_ID", - "value": "{{db_server_vm.uuid}}" - } - ], - "systemProfile": false, - "topology": null, - "type": "Software" - } - } - } - -- name: check spec for creating spec for software profile version - assert: +- name: Set expected result for version create + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + profile_type: software + profile_uuid: "{{ profile_uuid }}" + response: + profile: { description: "{{ profile1_name }}-desc-updated", engineType: postgres_database, name: "{{ profile1_name }}-updated1" } + version: + description: v2.0-desc + engineType: postgres_database + name: v2.0 + properties: + - { name: OS_NOTES, value: os_notes for v2 } + - { name: DB_SOFTWARE_NOTES, value: db_notes for v2 } + - { name: SOURCE_DBSERVER_ID, value: "{{ db_server_vm.uuid }}" } + systemProfile: false + topology: + type: Software + +- name: Check spec for creating spec for software profile version + ansible.builtin.assert: that: - result == expected_result fail_msg: "Fail: Unable to create spec for software profile version create" success_msg: "Pass: Spec for creating software profile version generated successfully" - -- name: create software profile version +- name: Create software profile version ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - name: "v2.0" - desc: "v2.0-desc" + name: v2.0 + desc: v2.0-desc notes: - os: "os_notes for v2" - db_software: "db_notes for v2" + os: os_notes for v2 + db_software: db_notes for v2 db_server_vm: - uuid: "{{db_server_vm.uuid}}" + uuid: "{{ db_server_vm.uuid }}" register: result - - -- name: check status of version create - assert: +- name: Check status of version create + ansible.builtin.assert: that: - result.changed == True - result.failed == False @@ -328,31 +278,30 @@ - result.response.version.type == "Software" - result.response.version.name == "v2.0" - result.response.version.description == "v2.0-desc" - - result.response.version.propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + - result.response.version.propertiesMap["SOURCE_DBSERVER_ID"] == "{{ db_server_vm.uuid }}" - result.response.version.published == false fail_msg: "Fail: Unable to create software profile version" success_msg: "Pass: Software profile version created successfully" -- set_fact: - version_uuid: "{{result.version_uuid}}" +- name: Define variable for version uuid + ansible.builtin.set_fact: + version_uuid: "{{ result.version_uuid }}" -- name: create spec for update software profile version - check_mode: yes +- name: Create spec for update software profile version + check_mode: true ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - version_uuid: "{{result.version_uuid}}" - name: "v2.0-updated" - desc: "v2.0-desc-updated" + version_uuid: "{{ result.version_uuid }}" + name: v2.0-updated + desc: v2.0-desc-updated register: result - - -- name: check status of spec - assert: +- name: Check status of spec + ansible.builtin.assert: that: - result.changed == False - result.failed == False @@ -366,22 +315,19 @@ fail_msg: "Fail: Unable to create spec for updating software profile version" success_msg: "Pass: Spec for updating software profile version created successfully" - -- name: update software profile version +- name: Update software profile version ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - database_type: "postgres" + profile_uuid: "{{ profile_uuid }}" + database_type: postgres software: - version_uuid: "{{result.version_uuid}}" - name: "v2.0-updated" - desc: "v2.0-desc-updated" + version_uuid: "{{ result.version_uuid }}" + name: v2.0-updated + desc: v2.0-desc-updated register: result - - -- name: check status of update - assert: +- name: Check status of update + ansible.builtin.assert: that: - result.changed == True - result.failed == False @@ -395,28 +341,26 @@ - result.response.version.name == "v2.0-updated" - result.response.version.description == "v2.0-desc-updated" - result.response.version.status == "READY" - - result.response.version.propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + - result.response.version.propertiesMap["SOURCE_DBSERVER_ID"] == "{{ db_server_vm.uuid }}" - result.response.version.published == false fail_msg: "Fail: Unable to update software profile version" success_msg: "Pass: Software profile version updated successfully" +- name: Define variable for version uuid + ansible.builtin.set_fact: + version_uuid: "{{ result.version_uuid }}" -- set_fact: - version_uuid: "{{result.version_uuid}}" - -- name: publish software profile version +- name: Publish software profile version ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - publish: True + version_uuid: "{{ version_uuid }}" + publish: true register: result - - -- name: check status of update - assert: +- name: Check status of update + ansible.builtin.assert: that: - result.changed == True - result.failed == False @@ -431,16 +375,16 @@ fail_msg: "Fail: Unable to publish software profile version" success_msg: "Pass: Software profile version published successfully" -- name: unpublish software profile version +- name: Unpublish software profile version ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" + version_uuid: "{{ version_uuid }}" publish: false register: result -- name: check status of update - assert: +- name: Check status of update + ansible.builtin.assert: that: - result.changed == True - result.failed == False @@ -455,19 +399,16 @@ fail_msg: "Fail: Unable to unpublish software profile version" success_msg: "Pass: Software version unpublished successfully" - -- name: deprecate software profile version +- name: Deprecate software profile version ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - deprecate: True + version_uuid: "{{ version_uuid }}" + deprecate: true register: result - - -- name: check status of update - assert: +- name: Check status of update + ansible.builtin.assert: that: - result.changed == True - result.failed == False @@ -482,19 +423,16 @@ fail_msg: "Fail: Unable to deprecate software profile version" success_msg: "Pass: Software version deprecated successfully" - - -- name: delete software profile version +- name: Delete software profile version ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + profile_uuid: "{{ profile_uuid }}" software: - version_uuid: "{{version_uuid}}" - state: "absent" + version_uuid: "{{ version_uuid }}" + state: absent register: result - -- name: check status of update - assert: +- name: Check status of update + ansible.builtin.assert: that: - result.changed == True - result.failed == False @@ -506,54 +444,52 @@ fail_msg: "Fail: Unable to delete software profile version" success_msg: "Pass: Software version deleted successfully" - -- name: replicate software profile +- name: Replicate software profile ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" + profile_uuid: "{{ profile_uuid }}" clusters: - - name: "{{cluster.cluster2.name}}" + - name: "{{ cluster.cluster2.name }}" register: result -- name: wait for 3 minutes for replication to finish from source cluster to cluster2 +- name: Wait for 3 minutes for replication to finish from source cluster to cluster2 ansible.builtin.pause: minutes: 3 - -- set_fact: +- name: Build clusters map + ansible.builtin.set_fact: clusters: {} # skip jekyll/Liquid syntax check # {% raw %} -- name: create clusters status map - set_fact: - clusters: "{{ clusters | default({}) | combine ({ item['nxClusterId'] : item['status'] }) }}" +- name: Create clusters status map + ansible.builtin.set_fact: + clusters: "{{ clusters | default({}) | combine({item['nxClusterId']: item['status']}) }}" loop: "{{result.response.profile.clusterAvailability}}" - no_log: True + no_log: true # {% endraw %} -- name: check status of replication - assert: +- name: Check status of replication + ansible.builtin.assert: that: - result.changed == True - result.failed == False - result.response is defined - result.profile_uuid is defined - - clusters["{{cluster.cluster1.uuid}}"] == "INACTIVE" - - clusters["{{cluster.cluster2.uuid}}"] == "ACTIVE" + - clusters["{{ cluster.cluster1.uuid }}"] == "INACTIVE" + - clusters["{{ cluster.cluster2.uuid }}"] == "ACTIVE" - result.response.profile.status == "READY" fail_msg: "Fail: Unable to replicate software profile" success_msg: "Pass: Software profile replicated successfully" -- name: delete software profile +- name: Delete software profile ntnx_ndb_profiles: - profile_uuid: "{{profile_uuid}}" - state: "absent" + profile_uuid: "{{ profile_uuid }}" + state: absent register: result - -- name: check status of delete - assert: +- name: Check status of delete + ansible.builtin.assert: that: - result.changed == True - result.failed == False diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/main.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/main.yml index b25157ea7..8f7305245 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/main.yml @@ -1,10 +1,12 @@ --- -- module_defaults: +- name: Ntnx_ndb_software_profiles integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "crud.yml" + - name: Import tasks + ansible.builtin.import_tasks: crud.yml diff --git a/tests/integration/targets/ntnx_ndb_tags/aliases b/tests/integration/targets/ntnx_ndb_tags/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_tags/aliases +++ b/tests/integration/targets/ntnx_ndb_tags/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_tags/meta/main.yml b/tests/integration/targets/ntnx_ndb_tags/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_tags/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_tags/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml index b719db279..bd2b5bc6b 100644 --- a/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml @@ -1,437 +1,423 @@ --- - -- debug: - msg: "start ntnx_ndb_tags" +- name: Start ntnx_ndb_tags + ansible.builtin.debug: + msg: start ntnx_ndb_tags - name: Generate random name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" -- set_fact: - tag_name: "{{random_name[0]}}" - tag_name_updated: "{{random_name[0]}}-updated" +- name: Define variables for tag name + ansible.builtin.set_fact: + tag_name: "{{ random_name[0] }}" + tag_name_updated: "{{ random_name[0] }}-updated" ############################# Create tests ######################## -- name: check mode for creation - check_mode: yes +- name: Check mode for creation + check_mode: true ntnx_ndb_tags: - name: "{{tag_name}}-timemachine" + name: "{{ tag_name }}-timemachine" desc: tag-created-by-ansible - tag_value_required: True + tag_value_required: true entity_type: TIME_MACHINE register: result -- set_fact: - expected_response: { - "description": "tag-created-by-ansible", - "entityType": "TIME_MACHINE", - "name": "{{tag_name}}-timemachine", - "required": true - } +- name: Define variable for expected response + ansible.builtin.set_fact: + expected_response: { description: tag-created-by-ansible, entityType: TIME_MACHINE, name: "{{ tag_name }}-timemachine", required: true } - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.response == expected_response - fail_msg: "Unable to create spec for tag" - success_msg: "Spec generated successfully for tag creation" - + fail_msg: Unable to create spec for tag + success_msg: Spec generated successfully for tag creation -- name: create tags for clone +- name: Create tags for clone ntnx_ndb_tags: - name: "{{tag_name}}-clone" + name: "{{ tag_name }}-clone" desc: tag-created-by-ansible - tag_value_required: True + tag_value_required: true entity_type: CLONE register: result - name: Check create status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.uuid is defined - - result.response.name == "{{tag_name}}-clone" + - result.response.name == "{{ tag_name }}-clone" - result.response.entityType == "CLONE" - result.response.status == "ENABLED" - result.response.description == "tag-created-by-ansible" - result.response.required == true - fail_msg: "Tag for clone create failed" - success_msg: "Tag for clone created successfully" + fail_msg: Tag for clone create failed + success_msg: Tag for clone created successfully -- set_fact: - clone_tag_uuid: "{{result.uuid}}" +- name: Define variable for clone tag + ansible.builtin.set_fact: + clone_tag_uuid: "{{ result.uuid }}" -- name: create tags for database server +- name: Create tags for database server ntnx_ndb_tags: - name: "{{tag_name}}-database-server" + name: "{{ tag_name }}-database-server" desc: tag-created-by-ansible - tag_value_required: True + tag_value_required: true entity_type: DATABASE_SERVER register: result - name: Check create status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.uuid is defined - - result.response.name == "{{tag_name}}-database-server" + - result.response.name == "{{ tag_name }}-database-server" - result.response.entityType == "DATABASE_SERVER" - result.response.status == "ENABLED" - result.response.description == "tag-created-by-ansible" - result.response.required == true - fail_msg: "Tag for database server create failed" - success_msg: "Tag for clone created successfully" + fail_msg: Tag for database server create failed + success_msg: Tag for clone created successfully -- set_fact: - db_server_tag_uuid: "{{result.uuid}}" +- name: Define variable for database server tag + ansible.builtin.set_fact: + db_server_tag_uuid: "{{ result.uuid }}" -- name: create tags for time machine +- name: Create tags for time machine ntnx_ndb_tags: - name: "{{tag_name}}-time-machine" + name: "{{ tag_name }}-time-machine" desc: tag-created-by-ansible - tag_value_required: True + tag_value_required: true entity_type: TIME_MACHINE register: result - name: Check create status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.uuid is defined - - result.response.name == "{{tag_name}}-time-machine" + - result.response.name == "{{ tag_name }}-time-machine" - result.response.entityType == "TIME_MACHINE" - result.response.status == "ENABLED" - result.response.description == "tag-created-by-ansible" - result.response.required == true - fail_msg: "Tag for time machine create failed" - success_msg: "Tag for time machine created successfully" + fail_msg: Tag for time machine create failed + success_msg: Tag for time machine created successfully -- set_fact: - time_machine_tag_uuid: "{{result.uuid}}" +- name: Define variable for time machine tag + ansible.builtin.set_fact: + time_machine_tag_uuid: "{{ result.uuid }}" -- name: create tags for databases +- name: Create tags for databases ntnx_ndb_tags: - name: "{{tag_name}}-database" + name: "{{ tag_name }}-database" desc: tag-created-by-ansible - tag_value_required: False + tag_value_required: false entity_type: DATABASE register: result -- set_fact: - database_tag_uuid: "{{result.uuid}}" +- name: Define variable for database tag + ansible.builtin.set_fact: + database_tag_uuid: "{{ result.uuid }}" -- name: check create status - assert: +- name: Check create status + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.uuid is defined - - result.response.name == "{{tag_name}}-database" + - result.response.name == "{{ tag_name }}-database" - result.response.required == false - result.response.entityType == "DATABASE" - result.response.status == "ENABLED" - fail_msg: "Tag create for databases failed" - success_msg: "Tag created successfully" + fail_msg: Tag create for databases failed + success_msg: Tag created successfully -- name: create another tag for databases +- name: Create another tag for databases ntnx_ndb_tags: - name: "{{tag_name}}-database2" + name: "{{ tag_name }}-database2" desc: tag-created-by-ansible - tag_value_required: False + tag_value_required: false entity_type: DATABASE register: result -- set_fact: - database_tag2_uuid: "{{result.uuid}}" +- name: Define variable for database tag + ansible.builtin.set_fact: + database_tag2_uuid: "{{ result.uuid }}" -- name: check create status - assert: +- name: Check create status + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.uuid is defined - - result.response.name == "{{tag_name}}-database2" + - result.response.name == "{{ tag_name }}-database2" - result.response.required == false - result.response.entityType == "DATABASE" - result.response.status == "ENABLED" - fail_msg: "Tag create for databases failed" - success_msg: "Tag created successfully" - + fail_msg: Tag create for databases failed + success_msg: Tag created successfully ################################# Info Module tests ########################## -- name: get all tags +- name: Get all tags ntnx_ndb_tags_info: register: result - name: Check info status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response | length > 1 - fail_msg: "Fetching all tags failed" - success_msg: "All tags obtained successfully" + fail_msg: Fetching all tags failed + success_msg: All tags obtained successfully -- name: get tag based on uuid +- name: Get tag based on uuid ntnx_ndb_tags_info: - uuid: "{{database_tag_uuid}}" + uuid: "{{ database_tag_uuid }}" register: result - name: Check info status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response.entityType == "DATABASE" - - result.response.id == "{{database_tag_uuid}}" - fail_msg: "get tag based on uuid failed" - success_msg: "tag based on uuid obtained successfully" + - result.response.id == "{{ database_tag_uuid }}" + fail_msg: get tag based on uuid failed + success_msg: tag based on uuid obtained successfully -- name: get all tags based on DATABASE entity type +- name: Get all tags based on DATABASE entity type ntnx_ndb_tags_info: filters: - entity_type: "DATABASE" + entity_type: DATABASE register: result - name: Check info status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response | length > 1 - result.response[0].entityType == "DATABASE" - fail_msg: "get all tags based on DATABASE entity type failed" - success_msg: "all tags based on DATABASE entity type obtained successfully" + fail_msg: get all tags based on DATABASE entity type failed + success_msg: all tags based on DATABASE entity type obtained successfully -- name: get all tags based on CLONE entity type +- name: Get all tags based on CLONE entity type ntnx_ndb_tags_info: filters: - entity_type: "CLONE" + entity_type: CLONE register: result - name: Check info status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response | length > 0 - result.response[0].entityType == "CLONE" - fail_msg: "get all tags based on CLONE entity type failed" - success_msg: "all tags based on CLONE entity type obtained successfully" + fail_msg: get all tags based on CLONE entity type failed + success_msg: all tags based on CLONE entity type obtained successfully -- name: get all tags based on TIME_MACHINE entity type +- name: Get all tags based on TIME_MACHINE entity type ntnx_ndb_tags_info: filters: - entity_type: "TIME_MACHINE" + entity_type: TIME_MACHINE register: result - name: Check info status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response | length > 0 - result.response[0].entityType == "TIME_MACHINE" - fail_msg: "get all tags based on TIME_MACHINE entity type failed" - success_msg: "all tags based on TIME_MACHINE entity type obtained successfully" + fail_msg: get all tags based on TIME_MACHINE entity type failed + success_msg: all tags based on TIME_MACHINE entity type obtained successfully - -- name: get all tags based on DATABASE_SERVER entity type +- name: Get all tags based on DATABASE_SERVER entity type ntnx_ndb_tags_info: filters: - entity_type: "DATABASE_SERVER" + entity_type: DATABASE_SERVER register: result - name: Check info status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response | length > 0 - result.response[0].entityType == "DATABASE_SERVER" - fail_msg: "get all tags based on DATABASE_SERVER entity type failed" - success_msg: "all tags based on DATABASE_SERVER entity type obtained successfully" - + fail_msg: get all tags based on DATABASE_SERVER entity type failed + success_msg: all tags based on DATABASE_SERVER entity type obtained successfully -- name: get tag based on DATABASE entity type and name +- name: Get tag based on DATABASE entity type and name ntnx_ndb_tags_info: filters: - entity_type: "DATABASE" - name: "{{tag_name}}-database2" + entity_type: DATABASE + name: "{{ tag_name }}-database2" register: result - - name: Check info status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response.entityType == "DATABASE" - - result.response.name == "{{tag_name}}-database2" - fail_msg: "get tag based on DATABASE entity type and name failed" - success_msg: "tags based on DATABASE entity type and name obtained successfully" + - result.response.name == "{{ tag_name }}-database2" + fail_msg: get tag based on DATABASE entity type and name failed + success_msg: tags based on DATABASE entity type and name obtained successfully ################################## Update tests ######################## -- set_fact: - tag_uuid: "{{database_tag_uuid}}" +- name: Define variable for tag update + ansible.builtin.set_fact: + tag_uuid: "{{ database_tag_uuid }}" -- name: update tag +- name: Update tag ntnx_ndb_tags: - uuid: "{{tag_uuid}}" - name: "{{tag_name_updated}}" + uuid: "{{ tag_uuid }}" + name: "{{ tag_name_updated }}" desc: tag-created-by-ansible-updated tag_value_required: true - status: "DEPRECATED" + status: DEPRECATED register: result - name: Check update status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.uuid is defined - - result.response.name == "{{tag_name_updated}}" + - result.response.name == "{{ tag_name_updated }}" - result.response.required == true - result.response.entityType == "DATABASE" - result.response.status == "DEPRECATED" - fail_msg: "tag update failed" - success_msg: "tag updated successfully" + fail_msg: tag update failed + success_msg: tag updated successfully - - -- name: idempotency checks +- name: Idempotency checks ntnx_ndb_tags: - uuid: "{{tag_uuid}}" - name: "{{tag_name_updated}}" + uuid: "{{ tag_uuid }}" + name: "{{ tag_name_updated }}" desc: tag-created-by-ansible-updated tag_value_required: true - status: "DEPRECATED" + status: DEPRECATED register: result - - -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "tag got updated" - success_msg: "tag update skipped due to no state changes" - + fail_msg: tag got updated + success_msg: tag update skipped due to no state changes -- name: enable tag +- name: Enable tag ntnx_ndb_tags: - uuid: "{{tag_uuid}}" + uuid: "{{ tag_uuid }}" tag_value_required: true - status: "ENABLED" + status: ENABLED register: result - - -- name: check status changes - assert: +- name: Check status changes + ansible.builtin.assert: that: - result.response is defined - result.changed == true - - result.uuid == "{{tag_uuid}}" + - result.uuid == "{{ tag_uuid }}" - result.response.status == "ENABLED" - fail_msg: "Enabling tag failed" - success_msg: "Tag enabled successfully" + fail_msg: Enabling tag failed + success_msg: Tag enabled successfully ############################################################### delete tests ######################################## - -- name: delete the database based tag +- name: Delete the database based tag ntnx_ndb_tags: - state: "absent" - uuid: "{{database_tag_uuid}}" + state: absent + uuid: "{{ database_tag_uuid }}" register: result - - name: Check delete status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - fail_msg: "Unable to delete tag" - success_msg: "tag deleted successfully" + fail_msg: Unable to delete tag + success_msg: tag deleted successfully -- name: delete the database based tag +- name: Delete the database based tag ntnx_ndb_tags: - state: "absent" - uuid: "{{database_tag2_uuid}}" + state: absent + uuid: "{{ database_tag2_uuid }}" register: result - name: Check delete status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - fail_msg: "Unable to delete tag" - success_msg: "tag deleted successfully" + fail_msg: Unable to delete tag + success_msg: tag deleted successfully - -- name: delete the clone tag +- name: Delete the clone tag ntnx_ndb_tags: - state: "absent" - uuid: "{{clone_tag_uuid}}" + state: absent + uuid: "{{ clone_tag_uuid }}" register: result - name: Check delete status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - fail_msg: "Unable to delete tag" - success_msg: "tag deleted successfully" - + fail_msg: Unable to delete tag + success_msg: tag deleted successfully -- name: delete the time machine based tag +- name: Delete the time machine based tag ntnx_ndb_tags: - state: "absent" - uuid: "{{time_machine_tag_uuid}}" + state: absent + uuid: "{{ time_machine_tag_uuid }}" register: result - name: Check delete status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - fail_msg: "Unable to delete tag" - success_msg: "tag deleted successfully" - + fail_msg: Unable to delete tag + success_msg: tag deleted successfully -- name: delete the database server based tag +- name: Delete the database server based tag ntnx_ndb_tags: - state: "absent" - uuid: "{{db_server_tag_uuid}}" + state: absent + uuid: "{{ db_server_tag_uuid }}" register: result - name: Check delete status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - fail_msg: "Unable to delete tag" - success_msg: "tag deleted successfully" + fail_msg: Unable to delete tag + success_msg: tag deleted successfully diff --git a/tests/integration/targets/ntnx_ndb_tags/tasks/main.yml b/tests/integration/targets/ntnx_ndb_tags/tasks/main.yml index 5216bd0e2..c49fe91d5 100644 --- a/tests/integration/targets/ntnx_ndb_tags/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_tags/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_tags integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ndb_ip }}" - nutanix_username: "{{ ndb_username }}" - nutanix_password: "{{ ndb_password }}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "crud.yml" + - name: Import tasks + ansible.builtin.import_tasks: crud.yml diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/aliases b/tests/integration/targets/ntnx_ndb_time_machines_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_info/aliases +++ b/tests/integration/targets/ntnx_ndb_time_machines_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml index c76966670..63aba2b50 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml @@ -1,104 +1,104 @@ --- -- debug: +- name: Start testing ntnx_ndb_time_machines_info + ansible.builtin.debug: msg: Start testing ntnx_ndb_time_machines_info - name: List all NDB tms ntnx_ndb_time_machines_info: register: tms -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - tms.response is defined - tms.failed == false - tms.changed == false - tms.response | length > 0 - fail_msg: "Unable to list all NDB tms" - success_msg: "NDB tms listed successfully" + fail_msg: Unable to list all NDB tms + success_msg: NDB tms listed successfully ################################################################ - name: List all NDB tms using filter ntnx_ndb_time_machines_info: filters: value_type: name - value: "{{tms.response[0].name}}" + value: "{{ tms.response[0].name }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response | length > 0 - - result.response[0].id == "{{tms.response[0].id}}" - fail_msg: "Unable to list all NDB tms using filter" - success_msg: "NDB tms listed successfully using filter" + - result.response[0].id == "{{ tms.response[0].id }}" + fail_msg: Unable to list all NDB tms using filter + success_msg: NDB tms listed successfully using filter ################################################################ -- name: get NDB tms using it's name +- name: Get NDB tms using it's name ntnx_ndb_time_machines_info: - name: "{{tms.response[0].name}}" + name: "{{ tms.response[0].name }}" register: result - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.id == "{{tms.response[0].id}}" + - result.response.id == "{{ tms.response[0].id }}" fail_msg: "Unable to get NDB tms using it's name " - success_msg: "get NDB tms using it's name successfully" + success_msg: get NDB tms using it's name successfully ################################################################ - name: List tms use id ntnx_ndb_time_machines_info: - uuid: "{{tms.response[0].id}}" + uuid: "{{ tms.response[0].id }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{tms.response[0].name}}" + - result.response.name == "{{ tms.response[0].name }}" fail_msg: "Unable to get NDB tms using it's id " - success_msg: "get NDB tms using it's id successfully" + success_msg: get NDB tms using it's id successfully ################################################################ - name: List tms use id and load database as well ntnx_ndb_time_machines_info: - uuid: "{{tms.response[0].id}}" + uuid: "{{ tms.response[0].id }}" filters: load_database: true register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{tms.response[0].name}}" + - result.response.name == "{{ tms.response[0].name }}" - result.response.database is defined - fail_msg: "Unable to get NDB tms using it's id" - success_msg: "get NDB tms using it's id successfully" + fail_msg: Unable to get NDB tms using it's id + success_msg: get NDB tms using it's id successfully ################################################################ -- name: get NDB timemachine with incorrect name +- name: Get NDB timemachine with incorrect name ntnx_ndb_time_machines_info: - name: "abcd" + name: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" + fail_msg: module didn't errored out correctly when incorrect name is given + success_msg: module errored out correctly when incorrect name is given diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/main.yml index da502fcc5..f4ad7a36b 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_time_machines_info integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "info.yml" + - name: Import tasks + ansible.builtin.import_tasks: info.yml diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/meta/main.yml b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/data_access_management_and_snapshots.yml b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/data_access_management_and_snapshots.yml index 57ced74b3..93a5a58f8 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/data_access_management_and_snapshots.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/data_access_management_and_snapshots.yml @@ -1,19 +1,19 @@ --- -- debug: +- name: Start testing ntnx_ndb_time_machine_clusters + ansible.builtin.debug: msg: Start testing ntnx_ndb_time_machine_clusters -- name: create data access instance with cluster name and sla name +- name: Create data access instance with cluster name and sla name ntnx_ndb_time_machine_clusters: - time_machine_uuid: "{{time_machine.uuid}}" + time_machine_uuid: "{{ time_machine.uuid }}" cluster: - name: "{{cluster.cluster2.name}}" + name: "{{ cluster.cluster2.name }}" sla: - name: "{{sla.name}}" + name: "{{ sla.name }}" register: out - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - out.response is defined - out.time_machine_uuid is defined @@ -23,19 +23,17 @@ fail_msg: "fail: Unable create data access instance with cluster name and sla name" success_msg: "pass: create data access instance with cluster name and sla name finished successfully" ####################### -- name: update data access instance with new sla name +- name: Update data access instance with new sla name ntnx_ndb_time_machine_clusters: - time_machine_uuid: "{{time_machine.uuid}}" - cluster: - name: "{{cluster.cluster2.name}}" - sla: - name: "{{sla2.name}}" + time_machine_uuid: "{{ time_machine.uuid }}" + cluster: + name: "{{ cluster.cluster2.name }}" + sla: + name: "{{ sla2.name }}" register: result - - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.time_machine_uuid is defined @@ -46,166 +44,144 @@ fail_msg: "fail: Unable to update data access instance with new sla name" success_msg: "pass: update data access instance with new sla name finished successfully" - -- name: idempotency checks +- name: Idempotency checks ntnx_ndb_time_machine_clusters: - time_machine_uuid: "{{time_machine.uuid}}" - cluster: - name: "{{cluster.cluster2.name}}" - sla: - name: "{{sla2.name}}" + time_machine_uuid: "{{ time_machine.uuid }}" + cluster: + name: "{{ cluster.cluster2.name }}" + sla: + name: "{{ sla2.name }}" register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false - "'Nothing to change' in result.msg" - fail_msg: "clusters in time machine go updated" - success_msg: "update of clusters in time machine skipped due to no state changes" - + fail_msg: clusters in time machine go updated + success_msg: update of clusters in time machine skipped due to no state changes ############################################## multicluster snapshots and replication tests ######################### # cluster1: primary cluster # cluster2: secondary cluster -- name: create snapshot on cluster2 +- name: Create snapshot on cluster2 ntnx_ndb_database_snapshots: - name: "ansible-created-snapshot-on-{{cluster.cluster2.name}}" - time_machine_uuid: "{{time_machine.uuid}}" + name: ansible-created-snapshot-on-{{ cluster.cluster2.name }} + time_machine_uuid: "{{ time_machine.uuid }}" clusters: - - name: "{{cluster.cluster2.name}}" + - name: "{{ cluster.cluster2.name }}" register: result -- name: check snapshot status - assert: +- name: Check snapshot status + ansible.builtin.assert: that: - result.response is defined - result.snapshot_uuid is defined - result.changed == true - - result.response.name == "ansible-created-snapshot-on-{{cluster.cluster2.name}}" + - result.response.name == "ansible-created-snapshot-on-{{ cluster.cluster2.name }}" - result.failed == false - result.response.status == "PENDING" fail_msg: "fail: Unable to create snapshot for secondary cluster" success_msg: "pass: snapshot created successfully for secondary cluster" - -- name: wait for 2 minutes for replication to finish from source cluster to cluster2 +- name: Wait for 2 minutes for replication to finish from source cluster to cluster2 ansible.builtin.pause: minutes: 2 -- name: check the status of post of replication if snapshot is active +- name: Check the status of post of replication if snapshot is active ntnx_ndb_snapshots_info: - uuid: "{{result.snapshot_uuid}}" + uuid: "{{ result.snapshot_uuid }}" register: result -- name: check snapshot status - assert: +- name: Check snapshot status + ansible.builtin.assert: that: - - result.response.name == "ansible-created-snapshot-on-{{cluster.cluster2.name}}" + - result.response.name == "ansible-created-snapshot-on-{{ cluster.cluster2.name }}" - result.response.status == "ACTIVE" fail_msg: "fail: Unable to check snapshot status post internal replication" success_msg: "pass: snapshot replicated successfully on secondary cluster" - - -- name: create a snapshot on cluster1 +- name: Create a snapshot on cluster1 ntnx_ndb_database_snapshots: - name: "ansible-created-snapshot-on-{{cluster.cluster1.name}}" - time_machine_uuid: "{{time_machine.uuid}}" + name: ansible-created-snapshot-on-{{ cluster.cluster1.name }} + time_machine_uuid: "{{ time_machine.uuid }}" clusters: - - uuid: "{{cluster.cluster1.uuid}}" + - uuid: "{{ cluster.cluster1.uuid }}" register: result -- name: check snapshot status on cluster2 - assert: +- name: Check snapshot status on cluster2 + ansible.builtin.assert: that: - result.response is defined - result.snapshot_uuid is defined - result.changed == true - - result.response.name == "ansible-created-snapshot-on-{{cluster.cluster1.name}}" + - result.response.name == "ansible-created-snapshot-on-{{ cluster.cluster1.name }}" - result.failed == false - result.response.nxClusterId == cluster.cluster1.uuid fail_msg: "fail: Unable to create snapshot on primary cluster" success_msg: "pass: snapshot created successfully on primary cluster" -- name: setting snapshot uuid for replication - set_fact: - snapshot_uuid: "{{result.snapshot_uuid}}" +- name: Setting snapshot uuid for replication + ansible.builtin.set_fact: + snapshot_uuid: "{{ result.snapshot_uuid }}" -- name: create spec for replicating snapshot from cluster1 on cluster2 - check_mode: yes +- name: Create spec for replicating snapshot from cluster1 on cluster2 + check_mode: true ntnx_ndb_replicate_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + snapshot_uuid: "{{ snapshot_uuid }}" clusters: - - name: "{{cluster.cluster2.name}}" + - name: "{{ cluster.cluster2.name }}" expiry_days: 20 register: result -- set_fact: - expected_result: { - "changed": false, - "error": null, - "failed": false, - "response": { - "lcmConfig": { - "snapshotLCMConfig": { - "expiryDetails": { - "expireInDays": 20 - } - } - }, - "nxClusterIds": [ - "{{cluster.cluster2.uuid}}" - ] - }, - "snapshot_uuid": "{{snapshot_uuid}}" - } - - - -- name: check snapshot replication spec - assert: +- name: Setting expected result for snapshot replication + ansible.builtin.set_fact: + expected_result: + changed: false + error: + failed: false + response: { lcmConfig: { snapshotLCMConfig: { expiryDetails: { expireInDays: 20 } } }, nxClusterIds: ["{{ cluster.cluster2.uuid }}"] } + snapshot_uuid: "{{ snapshot_uuid }}" +- name: Check snapshot replication spec + ansible.builtin.assert: that: - result == expected_result fail_msg: "fail: Unable to create snapshot replication snapshot" success_msg: "pass: snapshot replication spec created successfully" - -- name: replicate snapshot on cluster2 +- name: Replicate snapshot on cluster2 ntnx_ndb_replicate_database_snapshots: - snapshot_uuid: "{{snapshot_uuid}}" + snapshot_uuid: "{{ snapshot_uuid }}" clusters: - - name: "{{cluster.cluster2.name}}" + - name: "{{ cluster.cluster2.name }}" expiry_days: 20 register: result - - -- name: verify status of snapshot replication - assert: +- name: Verify status of snapshot replication + ansible.builtin.assert: that: - result.changed == True - result.failed == false - result.response.status == "5" - fail_msg: "snapshot replication failed" - success_msg: "snapshot replicated successfully" + fail_msg: snapshot replication failed + success_msg: snapshot replicated successfully ####################### -- name: delete time machine +- name: Delete time machine ntnx_ndb_time_machine_clusters: - state: absent - time_machine_uuid: "{{time_machine.uuid}}" - cluster: - uuid: "{{cluster.cluster2.uuid}}" + state: absent + time_machine_uuid: "{{ time_machine.uuid }}" + cluster: + uuid: "{{ cluster.cluster2.uuid }}" register: result -- name: check delete status - assert: +- name: Check delete status + ansible.builtin.assert: that: - result.response is defined - result.time_machine_uuid is defined @@ -214,22 +190,22 @@ fail_msg: "fail: Unable to remove cluster from time machine" success_msg: "pass: cluster from time machine removed successfully" -- name: wait for 2 minutes for internal cleanup to finish +- name: Wait for 2 minutes for internal cleanup to finish ansible.builtin.pause: minutes: 5 ##################### -- name: create data access instance with cluster uuid and sla uuid +- name: Create data access instance with cluster uuid and sla uuid ntnx_ndb_time_machine_clusters: - time_machine_uuid: "{{time_machine.uuid}}" - cluster: - uuid: "{{cluster.cluster2.uuid}}" - sla: - uuid: "{{sla.uuid}}" + time_machine_uuid: "{{ time_machine.uuid }}" + cluster: + uuid: "{{ cluster.cluster2.uuid }}" + sla: + uuid: "{{ sla.uuid }}" register: out -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - out.response is defined - out.time_machine_uuid is defined @@ -239,17 +215,17 @@ fail_msg: "fail: Unable create data access instance with cluster uuid and sla uuid" success_msg: "pass: create data access instance with cluster uuid and sla uuid finished successfully" ####################### -- name: update data access instance with sla uuid +- name: Update data access instance with sla uuid ntnx_ndb_time_machine_clusters: - time_machine_uuid: "{{time_machine.uuid}}" - cluster: - uuid: "{{cluster.cluster2.uuid}}" - sla: - uuid: "{{sla2.uuid}}" + time_machine_uuid: "{{ time_machine.uuid }}" + cluster: + uuid: "{{ cluster.cluster2.uuid }}" + sla: + uuid: "{{ sla2.uuid }}" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.time_machine_uuid is defined @@ -260,16 +236,16 @@ fail_msg: "fail: Unable to update data access instance with sla uuid" success_msg: "pass: update data access instance with sla uuid finished successfully" -- name: delete time machine +- name: Delete time machine ntnx_ndb_time_machine_clusters: - state: absent - time_machine_uuid: "{{result.time_machine_uuid}}" - cluster: - uuid: "{{cluster.cluster2.uuid}}" + state: absent + time_machine_uuid: "{{ result.time_machine_uuid }}" + cluster: + uuid: "{{ cluster.cluster2.uuid }}" register: result -- name: check delete status - assert: +- name: Check delete status + ansible.builtin.assert: that: - result.response is defined - result.time_machine_uuid is defined diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/main.yml b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/main.yml index df1211a7a..3d6488810 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Ntnx_ndb_time_machines_with_multi_clusters integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "data_access_management_and_snapshots.yml" + - name: Import tasks + ansible.builtin.import_tasks: data_access_management_and_snapshots.yml diff --git a/tests/integration/targets/ntnx_ndb_vlans/aliases b/tests/integration/targets/ntnx_ndb_vlans/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/aliases +++ b/tests/integration/targets/ntnx_ndb_vlans/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_vlans/meta/main.yml b/tests/integration/targets/ntnx_ndb_vlans/meta/main.yml index ea2e9da19..c8424a0e9 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml index adcdcc300..09eb1929c 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml @@ -1,70 +1,67 @@ --- -- debug: +- name: Start testing ntnx_ndb_vlans + ansible.builtin.debug: msg: Start testing ntnx_ndb_vlans -- name: create Dhcp ndb vlan +- name: Create Dhcp ndb vlan ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ ndb_vlan.name }}" vlan_type: DHCP cluster: - uuid: "{{cluster.cluster2.uuid}}" + uuid: "{{ cluster.cluster2.uuid }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.vlan_uuid is defined - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.type == "DHCP" - result.response.managed == false - - result.response.clusterId == "{{cluster.cluster2.uuid}}" + - result.response.clusterId == "{{ cluster.cluster2.uuid }}" fail_msg: "fail: Unable to create Dhcp ndb vlan" success_msg: "pass: create Dhcp ndb vlan finished successfully" ################################################################ - -- name: update ndb vlan type for static +- name: Update ndb vlan type for static ntnx_ndb_vlans: - vlan_uuid: "{{result.vlan_uuid}}" + vlan_uuid: "{{ result.vlan_uuid }}" vlan_type: Static - gateway: "{{ndb_vlan.gateway}}" - subnet_mask: "{{ndb_vlan.subnet_mask}}" + gateway: "{{ ndb_vlan.gateway }}" + subnet_mask: "{{ ndb_vlan.subnet_mask }}" ip_pools: - - - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" - - - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" - primary_dns: "{{ndb_vlan.primary_dns}}" - secondary_dns: "{{ndb_vlan.secondary_dns}}" - dns_domain: "{{ndb_vlan.dns_domain}}" + primary_dns: "{{ ndb_vlan.primary_dns }}" + secondary_dns: "{{ ndb_vlan.secondary_dns }}" + dns_domain: "{{ ndb_vlan.dns_domain }}" register: result - ignore_errors: True + ignore_errors: true no_log: true - -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.vlan_uuid is defined - result.changed == true - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.type == "Static" - result.response.managed == false - - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" - - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" - - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" - - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" - - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ ndb_vlan.dns_domain }}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ ndb_vlan.gateway }}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ ndb_vlan.primary_dns }}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ ndb_vlan.secondary_dns }}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ ndb_vlan.subnet_mask }}" - result.response.ipPools[0].endIP == "{{ndb_vlan.ip_pools.0.end_ip}}" - result.response.ipPools[0].startIP == "{{ndb_vlan.ip_pools.0.start_ip}}" - result.response.ipPools[1].endIP == "{{ndb_vlan.ip_pools.1.end_ip}}" @@ -72,77 +69,79 @@ fail_msg: "fail: unable to update ndb vlan type for static" success_msg: "pass: update ndb vlan type for static finished successfully" -- set_fact: +- name: Define variable todelete + ansible.builtin.set_fact: todelete: [] -- set_fact: - todelete: "{{ todelete + [ result.vlan_uuid ] }}" +- name: Define variable todelete + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.vlan_uuid] }}" ################################################################ - name: List all NDB vlans ntnx_ndb_vlans_info: register: vlans no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - vlans.response is defined - vlans.failed == false - vlans.changed == false - vlans.response | length > 0 - fail_msg: "Unable to list all NDB vlans" - success_msg: "NDB vlans listed successfully" + fail_msg: Unable to list all NDB vlans + success_msg: NDB vlans listed successfully ################################################################ -- name: get NDB vlans using it's name +- name: Get NDB vlans using it's name ntnx_ndb_vlans_info: - name: "{{vlans.response[0].name}}" + name: "{{ vlans.response[0].name }}" register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.id == "{{vlans.response[0].id}}" + - result.response.id == "{{ vlans.response[0].id }}" fail_msg: "Unable to get NDB vlans using it's name " - success_msg: "get NDB vlans using it's name successfully" + success_msg: get NDB vlans using it's name successfully ################################################################ - name: List vlans use id ntnx_ndb_vlans_info: - uuid: "{{vlans.response[0].id}}" + uuid: "{{ vlans.response[0].id }}" register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - - result.response.name == "{{vlans.response[0].name}}" + - result.response.name == "{{ vlans.response[0].name }}" fail_msg: "Unable to get NDB vlans using it's id " - success_msg: "get NDB vlans using it's id successfully" + success_msg: get NDB vlans using it's id successfully ################################################################ -- name: get NDB vlans with incorrect name +- name: Get NDB vlans with incorrect name ntnx_ndb_vlans_info: - name: "abcd" + name: abcd register: result no_log: true - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.error is defined - result.failed == true @@ -154,122 +153,121 @@ - name: Delete created vlan's ntnx_ndb_vlans: state: absent - vlan_uuid: "{{ todelete[0]}}" + vlan_uuid: "{{ todelete[0] }}" register: result no_log: true - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Define variable todelete + ansible.builtin.set_fact: todelete: [] ################################################################ -- name: create static ndb vlan +- name: Create static ndb vlan ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ ndb_vlan.name }}" vlan_type: Static cluster: - uuid: "{{cluster.cluster2.uuid}}" - gateway: "{{ndb_vlan.gateway}}" - subnet_mask: "{{ndb_vlan.subnet_mask}}" + uuid: "{{ cluster.cluster2.uuid }}" + gateway: "{{ ndb_vlan.gateway }}" + subnet_mask: "{{ ndb_vlan.subnet_mask }}" ip_pools: - - - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" - - - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" - primary_dns: "{{ndb_vlan.primary_dns}}" - secondary_dns: "{{ndb_vlan.secondary_dns}}" - dns_domain: "{{ndb_vlan.dns_domain}}" + primary_dns: "{{ ndb_vlan.primary_dns }}" + secondary_dns: "{{ ndb_vlan.secondary_dns }}" + dns_domain: "{{ ndb_vlan.dns_domain }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.vlan_uuid is defined - result.changed == true - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.type == "Static" - result.response.managed == false - - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" - - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" - - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" - - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" - - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ ndb_vlan.dns_domain }}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ ndb_vlan.gateway }}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ ndb_vlan.primary_dns }}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ ndb_vlan.secondary_dns }}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ ndb_vlan.subnet_mask }}" - result.response.ipPools[0].endIP == "{{ndb_vlan.ip_pools.0.end_ip}}" - result.response.ipPools[0].startIP == "{{ndb_vlan.ip_pools.0.start_ip}}" - result.response.ipPools[1].endIP == "{{ndb_vlan.ip_pools.1.end_ip}}" - result.response.ipPools[1].startIP == "{{ndb_vlan.ip_pools.1.start_ip}}" fail_msg: "fail: unable to create static ndb vlan" success_msg: "pass: create static ndb vlan finished successfully" -- set_fact: - todelete: "{{ todelete + [ result.vlan_uuid ] }}" +- name: Define variable todelete + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.vlan_uuid] }}" ################################################################ -- name: update ndb vlan by removing ip pool +- name: Update ndb vlan by removing ip pool ntnx_ndb_vlans: - vlan_uuid: "{{result.vlan_uuid}}" + vlan_uuid: "{{ result.vlan_uuid }}" remove_ip_pools: - - "{{result.response.ipPools[0].id}}" - - "{{result.response.ipPools[1].id}}" + - "{{ result.response.ipPools[0].id }}" + - "{{ result.response.ipPools[1].id }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.vlan_uuid is defined - result.changed == true - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.type == "Static" - result.response.managed == false - - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" - - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" - - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" - - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" - - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ ndb_vlan.dns_domain }}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ ndb_vlan.gateway }}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ ndb_vlan.primary_dns }}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ ndb_vlan.secondary_dns }}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ ndb_vlan.subnet_mask }}" - result.response.ipPools == [] fail_msg: "fail: unable to update ndb vlan by removing ip pool" success_msg: "pass: update ndb vlan by removing ip pool finished successfully" ################################################################ -- name: update ndb vlan by adding a pool +- name: Update ndb vlan by adding a pool ntnx_ndb_vlans: - vlan_uuid: "{{result.vlan_uuid}}" + vlan_uuid: "{{ result.vlan_uuid }}" ip_pools: - - - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.ipPools is defined - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.type == "Static" - result.response.managed == false - - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" - - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" - - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" - - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" - - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ ndb_vlan.dns_domain }}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ ndb_vlan.gateway }}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ ndb_vlan.primary_dns }}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ ndb_vlan.secondary_dns }}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ ndb_vlan.subnet_mask }}" - result.response.ipPools[0].endIP == "{{ndb_vlan.ip_pools.0.end_ip}}" - result.response.ipPools[0].startIP == "{{ndb_vlan.ip_pools.0.start_ip}}" fail_msg: "fail: unable to update ndb vlan by adding a pool " @@ -277,51 +275,51 @@ ################################################################ -- name: update ndb vLAN Configuration +- name: Update ndb vLAN Configuration ntnx_ndb_vlans: - vlan_uuid: "{{result.vlan_uuid}}" - gateway: "{{ndb_vlan.updated_gateway}}" - subnet_mask: "{{ndb_vlan.updated_subnet_mask}}" - primary_dns: "{{ndb_vlan.updated_primary_dns}}" - secondary_dns: "{{ndb_vlan.updated_secondary_dns}}" - dns_domain: "{{ndb_vlan.updated_dns_domain}}" + vlan_uuid: "{{ result.vlan_uuid }}" + gateway: "{{ ndb_vlan.updated_gateway }}" + subnet_mask: "{{ ndb_vlan.updated_subnet_mask }}" + primary_dns: "{{ ndb_vlan.updated_primary_dns }}" + secondary_dns: "{{ ndb_vlan.updated_secondary_dns }}" + dns_domain: "{{ ndb_vlan.updated_dns_domain }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.managed == false - - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.updated_dns_domain}}" - - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.updated_gateway}}" - - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.updated_primary_dns}}" - - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.updated_secondary_dns}}" - - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.updated_subnet_mask}}" + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ ndb_vlan.updated_dns_domain }}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ ndb_vlan.updated_gateway }}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ ndb_vlan.updated_primary_dns }}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ ndb_vlan.updated_secondary_dns }}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ ndb_vlan.updated_subnet_mask }}" fail_msg: "fail: unable to update ndb vLAN Configuration" success_msg: "pass: update ndb vLAN Configuration finished successfully" ################################################################ -- name: update ndb vlan type +- name: Update ndb vlan type ntnx_ndb_vlans: - vlan_uuid: "{{result.vlan_uuid}}" + vlan_uuid: "{{ result.vlan_uuid }}" vlan_type: DHCP register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.type == "DHCP" - result.response.managed == false - result.vlan_uuid is defined @@ -340,10 +338,10 @@ # - "{{st_vlan.vlans_subnet1[1]}}" # register: result # ignore_errors: true -# - debug: +# - ansible.builtin.debug: # var: result # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.changed == true @@ -356,12 +354,11 @@ # fail_msg: "fail: Unable to create ndb_stretched vlan" # success_msg: "pass: create ndb_stretched vlan finished successfully" -# - set_fact: +# - ansible.builtin.set_fact: # todelete: "{{ todelete + [ result.vlan_uuid ] }}" # ################################################################ - # - name: update ndb_stretched vlan name, desc and vlans # ntnx_ndb_stretched_vlans: # name: newname @@ -372,10 +369,10 @@ # - "{{st_vlan.vlans_subnet2[1]}}" # register: result # ignore_errors: true -# - debug: +# - ansible.builtin.debug: # var: result # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.changed == true @@ -397,10 +394,10 @@ # subnet_mask: "{{st_vlan.subnet_mask}}" # register: result # ignore_errors: true -# - debug: +# - ansible.builtin.debug: # var: result # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.changed == true @@ -419,16 +416,17 @@ vlan_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true - result.msg == "All items completed" - fail_msg: "unable to delete all created vlan's" - success_msg: "All vlan'sdeleted successfully" -- set_fact: + fail_msg: unable to delete all created vlan's + success_msg: All vlan'sdeleted successfully +- name: Define variable todelete + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/main.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/main.yml index 2ffefe7dc..6a0aeeb96 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/main.yml @@ -1,10 +1,13 @@ --- -- module_defaults: +- name: Ntnx_ndb_vlans integration tests + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ndb_ip}}" - nutanix_username: "{{ndb_username}}" - nutanix_password: "{{ndb_password}}" - validate_certs: false + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false block: - - import_tasks: "create_vlans.yml" - - import_tasks: "negativ_scenarios.yml" + - name: Import tasks + ansible.builtin.import_tasks: create_vlans.yml + - name: Import tasks + ansible.builtin.import_tasks: negativ_scenarios.yml diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml index ad41fd7eb..cf34abc92 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml @@ -1,26 +1,26 @@ --- -- debug: +- name: Start negative secanrios ntnx_ndb_vlans + ansible.builtin.debug: msg: Start negative secanrios ntnx_ndb_vlans -- name: create Dhcp ndb vlan with static Configuration +- name: Create Dhcp ndb vlan with static Configuration ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ ndb_vlan.name }}" vlan_type: DHCP - gateway: "{{ndb_vlan.gateway}}" - subnet_mask: "{{ndb_vlan.subnet_mask}}" + gateway: "{{ ndb_vlan.gateway }}" + subnet_mask: "{{ ndb_vlan.subnet_mask }}" ip_pools: - - - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" - primary_dns: "{{ndb_vlan.primary_dns}}" - secondary_dns: "{{ndb_vlan.secondary_dns}}" - dns_domain: "{{ndb_vlan.dns_domain}}" + primary_dns: "{{ ndb_vlan.primary_dns }}" + secondary_dns: "{{ ndb_vlan.secondary_dns }}" + dns_domain: "{{ ndb_vlan.dns_domain }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed == false - result.failed == true @@ -28,17 +28,17 @@ fail_msg: "fail: create Dhcp ndb vlan with static Configuration finished successfully" success_msg: "pass: Returnerd error as expected" # ############################### -- name: create static ndb vlan with missing Configuration +- name: Create static ndb vlan with missing Configuration ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ ndb_vlan.name }}" vlan_type: Static - gateway: "{{ndb_vlan.gateway}}" + gateway: "{{ ndb_vlan.gateway }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed == false - result.failed == true @@ -47,54 +47,53 @@ success_msg: "pass: Returnerd error as expected" ########### -- name: create Dhcp ndb vlan +- name: Create Dhcp ndb vlan ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ ndb_vlan.name }}" vlan_type: DHCP cluster: - uuid: "{{cluster.cluster2.uuid}}" + uuid: "{{ cluster.cluster2.uuid }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == true - result.vlan_uuid is defined - result.failed == false - - result.response.name == "{{ndb_vlan.name}}" + - result.response.name == "{{ ndb_vlan.name }}" - result.response.type == "DHCP" - result.response.managed == false - - result.response.clusterId == "{{cluster.cluster2.uuid}}" + - result.response.clusterId == "{{ cluster.cluster2.uuid }}" fail_msg: "fail: Unable to create Dhcp ndb vlan" success_msg: "pass: create Dhcp ndb vlan finished successfully" -- set_fact: - todelete: "{{ todelete + [ result.vlan_uuid ] }}" +- name: Define variable todelete + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.vlan_uuid] }}" ###################### -- name: update dhcp ndb vlan with static Configuration +- name: Update dhcp ndb vlan with static Configuration ntnx_ndb_vlans: - vlan_uuid: "{{result.vlan_uuid}}" - gateway: "{{ndb_vlan.gateway}}" - subnet_mask: "{{ndb_vlan.subnet_mask}}" + vlan_uuid: "{{ result.vlan_uuid }}" + gateway: "{{ ndb_vlan.gateway }}" + subnet_mask: "{{ ndb_vlan.subnet_mask }}" ip_pools: - - - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" - - - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" - primary_dns: "{{ndb_vlan.primary_dns}}" - secondary_dns: "{{ndb_vlan.secondary_dns}}" - dns_domain: "{{ndb_vlan.dns_domain}}" + primary_dns: "{{ ndb_vlan.primary_dns }}" + secondary_dns: "{{ ndb_vlan.secondary_dns }}" + dns_domain: "{{ ndb_vlan.dns_domain }}" register: result - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed == false - result.failed == true @@ -110,17 +109,18 @@ vlan_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true no_log: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true - result.msg == "All items completed" - fail_msg: "unable to delete all created vlan's" - success_msg: "All vlan'sdeleted successfully" + fail_msg: unable to delete all created vlan's + success_msg: All vlan'sdeleted successfully -- set_fact: +- name: Define variable todelete + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_operations_info_v2/aliases b/tests/integration/targets/ntnx_operations_info_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_operations_info_v2/meta/main.yml b/tests/integration/targets/ntnx_operations_info_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_operations_info_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_operations_info_v2/tasks/main.yml b/tests/integration/targets/ntnx_operations_info_v2/tasks/main.yml new file mode 100644 index 000000000..3ac529ed6 --- /dev/null +++ b/tests/integration/targets/ntnx_operations_info_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import permissions_info.yml + ansible.builtin.import_tasks: permissions_info.yml diff --git a/tests/integration/targets/ntnx_operations_info_v2/tasks/permissions_info.yml b/tests/integration/targets/ntnx_operations_info_v2/tasks/permissions_info.yml new file mode 100644 index 000000000..804bbc6ba --- /dev/null +++ b/tests/integration/targets/ntnx_operations_info_v2/tasks/permissions_info.yml @@ -0,0 +1,82 @@ +--- +- name: Start testing ntnx_operations_info_v2 + ansible.builtin.debug: + msg: start testing ntnx_operations_info_v2 + +- name: List all operations + ntnx_operations_info_v2: + register: result + ignore_errors: true + +- name: List all operations Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 0 + fail_msg: Unable to list all operations + success_msg: operations info obtained successfully + +- name: Set test permission name + ansible.builtin.set_fact: + test_permission_name: "{{ result.response[0].display_name }}" + +################################################## + +- name: List operations using filter criteria + ntnx_operations_info_v2: + filter: displayName eq '{{ test_permission_name }}' + register: result + ignore_errors: true + +- name: List operations using filter criteria Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response[0].display_name == "{{ test_permission_name }}" + - result.response | length == 1 + fail_msg: Unable to list operations using filter + success_msg: permission info obtained successfully + +- name: Set test permission uuid + ansible.builtin.set_fact: + test_permission_uuid: "{{result.response.0.ext_id}}" + +################################################## + +- name: List permission using ext_id criteria + ntnx_operations_info_v2: + ext_id: "{{ test_permission_uuid }}" + register: result + ignore_errors: true + +- name: List permission using ext_id criteria Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.display_name == "{{ test_permission_name }}" + fail_msg: Unable to list permission using ext_id + success_msg: permission info obtained successfully + +################################################## + +- name: List operations using limit + ntnx_operations_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: List operations using limit Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: Unable to list operations using limit + success_msg: operations listed successfully limit diff --git a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml index 685690e4a..116bc50ac 100644 --- a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml +++ b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml @@ -1,73 +1,74 @@ -- debug: +- name: Start testing create ova for vm + ansible.builtin.debug: msg: Start testing create ova for vm - name: VM with minimum requiremnts ntnx_vms: - state: present - name: integration_test_ova_vm - cluster: - name: "{{ cluster.name }}" + state: present + name: integration_test_ova_vm + cluster: + name: "{{ cluster.name }}" register: vm ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VM with minimum requiremnts ' - success_msg: 'Success: VM with minimum requiremnts created successfully ' + fail_msg: "Fail: Unable to create VM with minimum requiremnts " + success_msg: "Success: VM with minimum requiremnts created successfully " ######################################### -- name: create_ova_image with check mode +- name: Create_ova_image with check mode ntnx_vms_ova: - src_vm_uuid: "{{ vm.vm_uuid }}" - name: integration_test_VMDK_ova - file_format: VMDK + src_vm_uuid: "{{ vm.vm_uuid }}" + name: integration_test_VMDK_ova + file_format: VMDK register: result ignore_errors: true - check_mode: yes + check_mode: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.task_uuid != "" - success_msg: ' Success: returned as expected ' - fail_msg: ' Fail: create_ova_image with check mode ' + success_msg: " Success: returned as expected " + fail_msg: " Fail: create_ova_image with check mode " ######################################### -- name: create QCOW2 ova_image +- name: Create QCOW2 ova_image ntnx_vms_ova: - src_vm_uuid: "{{ vm.vm_uuid }}" - name: integration_test_QCOW2_ova - file_format: QCOW2 + src_vm_uuid: "{{ vm.vm_uuid }}" + name: integration_test_QCOW2_ova + file_format: QCOW2 register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create QCOW2 ova_image ' - success_msg: 'Success: create QCOW2 ova_image successfully ' + fail_msg: "Fail: Unable to create QCOW2 ova_image " + success_msg: "Success: create QCOW2 ova_image successfully " ######################################### -- name: create VMDK ova_image +- name: Create VMDK ova_image ntnx_vms_ova: - src_vm_uuid: "{{ vm.vm_uuid }}" - name: integration_test_VMDK_ova - file_format: VMDK + src_vm_uuid: "{{ vm.vm_uuid }}" + name: integration_test_VMDK_ova + file_format: VMDK register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VMDK ova_image ' - success_msg: 'Success: create VMDK ova_image successfully ' + fail_msg: "Fail: Unable to create VMDK ova_image " + success_msg: "Success: create VMDK ova_image successfully " ######################################### - name: Delete all Created VMs ntnx_vms: diff --git a/tests/integration/targets/ntnx_ova/tasks/main.yml b/tests/integration/targets/ntnx_ova/tasks/main.yml index da181200b..3560275ce 100644 --- a/tests/integration/targets/ntnx_ova/tasks/main.yml +++ b/tests/integration/targets/ntnx_ova/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_ova.yml" + - name: Import create_ova.yml + ansible.builtin.import_tasks: "create_ova.yml" diff --git a/tests/integration/targets/ntnx_pbrs_v2/aliases b/tests/integration/targets/ntnx_pbrs_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_pbrs_v2/meta/main.yml b/tests/integration/targets/ntnx_pbrs_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_pbrs_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_pbrs_v2/tasks/all_operation.yml b/tests/integration/targets/ntnx_pbrs_v2/tasks/all_operation.yml new file mode 100644 index 000000000..e7ec170fe --- /dev/null +++ b/tests/integration/targets/ntnx_pbrs_v2/tasks/all_operation.yml @@ -0,0 +1,808 @@ +--- +- name: Start testing ntnx_pbrs_v2, ntnx_pbrs_info_v2 + ansible.builtin.debug: + msg: Start testing ntnx_pbrs_v2, ntnx_pbrs_info_v2 + +- name: Generate random pbr name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: "ansible-pbr-" + +- name: Set pbr name + ansible.builtin.set_fact: + pbr_name: "{{ random_name }}{{ suffix_name }}" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create min VPC + ntnx_vpcs_v2: + state: present + name: "{{random_name}}vpc" + register: result + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.name == "{{random_name}}vpc" + - result.response.ext_id is defined + - result.changed == true + - result.failed == false + fail_msg: " Unable to create min VPC " + success_msg: " Min VPC for pbr testing created successfully " + +- name: Set vpc uuid + ansible.builtin.set_fact: + pbr: + vpc: + uuid: "{{ result.ext_id }}" +############################################################################################################################### +- name: Create PBR with vpc , source network, external destination, with PERMIT action and icmp with check_mode + ntnx_pbrs_v2: + state: present + name: "{{pbr_name}}1" + priority: "{{ priority.0 }}" + vpc_ext_id: "{{pbr.vpc.uuid}}" + policies: + - policy_action: + action_type: PERMIT + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + destination: + address_type: EXTERNAL + protocol_type: ICMP + protocol_parameters: + icmp: + icmp_type: 25 + icmp_code: 1 + register: result + ignore_errors: true + check_mode: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}1" + - result.response.policies.0.policy_action.action_type == "PERMIT" + - result.response.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "ICMP" + - result.response.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.priority == {{ priority.0 }} + - result.response.policies.0.policy_match.protocol_parameters.icmp_code == 1 + - result.response.policies.0.policy_match.protocol_parameters.icmp_type == 25 + - result.changed == false + - result.failed == false + fail_msg: " Unable to Create PBR with vpc , source network, external destination, with PERMIT action and icmp with check mode" + success_msg: "Returned as expected in check mode" +############################################################################################################################### +- name: Create PBR with vpc , source network, external destination, with PERMIT action and icmp + ntnx_pbrs_v2: + state: present + name: "{{pbr_name}}1" + priority: "{{ priority.0 }}" + vpc_ext_id: "{{pbr.vpc.uuid}}" + description: "test description" + policies: + - policy_action: + action_type: PERMIT + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + destination: + address_type: EXTERNAL + protocol_type: ICMP + protocol_parameters: + icmp: + icmp_type: 25 + icmp_code: 1 + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.ext_id == result.ext_id + - result.response.description == "test description" + - result.response.name == "{{pbr_name}}1" + - result.response.policies.0.policy_action.action_type == "PERMIT" + - result.response.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "ICMP" + - result.response.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.priority == {{ priority.0 }} + - result.response.policies.0.policy_match.protocol_parameters.icmp_code == 1 + - result.response.policies.0.policy_match.protocol_parameters.icmp_type == 25 + - result.changed == true + - result.failed == false + fail_msg: " Unable to Create PBR with vpc , source network, external destination, with PERMIT action and icmp" + success_msg: " Create PBR with vpc , source network, external destination, with PERMIT action and icmp created successfully " + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +############################################################################################################################### +- name: Check idempotency + ntnx_pbrs_v2: + state: present + name: "{{pbr_name}}1" + ext_id: "{{ result.ext_id }}" + priority: "{{ priority.0 }}" + vpc_ext_id: "{{pbr.vpc.uuid}}" + register: result + ignore_errors: true + +- name: Check idempotency status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + fail_msg: " Unable to check idempotency " + success_msg: " idempotency check finished successfully" +############################################################################################################################### +- name: List pbr using priority and vpcExtId filter criteria + ntnx_pbrs_info_v2: + filter: "priority eq {{priority.0}} and vpcExtId eq '{{pbr.vpc.uuid}}' " + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response[0].ext_id == todelete[-1] + - result.response.0.priority == {{ priority.0 }} + - result.response[0].vpc_ext_id == '{{pbr.vpc.uuid}}' + - result.response[0].name == "{{pbr_name}}1" + - result.response.0.policies.0.policy_action.action_type == "PERMIT" + - result.response.0.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.0.policies.0.policy_match.protocol_type == "ICMP" + - result.response.0.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.0.policies.0.policy_match.protocol_parameters.icmp_code == 1 + - result.response.0.policies.0.policy_match.protocol_parameters.icmp_type == 25 + - result.changed == false + - result.failed == false + fail_msg: " Unable to list pbrs using priority and vpcExtId filter criteria " + success_msg: " pbrs listed successfully using priority and vpcExtId filter criteria" +############################################################################################################################### +- name: List pbrs using ext_id + ntnx_pbrs_info_v2: + ext_id: "{{todelete[-1]}}" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.ext_id == "{{todelete[-1]}}" + - result.response.name == "{{pbr_name}}1" + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.policies.0.policy_action.action_type == "PERMIT" + - result.response.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "ICMP" + - result.response.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.policies.0.policy_match.protocol_parameters.icmp_code == 1 + - result.response.policies.0.policy_match.protocol_parameters.icmp_type == 25 + - result.changed == false + - result.failed == false + fail_msg: " Unable to list pbrs using ext_id " + success_msg: " pbrs listed successfully using ext_id" +############################################################################################################################### +- name: Create PBR with vpc, any source, any destination, any protocol and permit action + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.1 }}" + name: "{{pbr_name}}2" + policies: + - policy_action: + action_type: PERMIT + policy_match: + source: + address_type: ANY + destination: + address_type: ANY + protocol_type: ANY + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}2" + - result.response.policies.0.policy_action.action_type == "PERMIT" + - result.response.policies.0.policy_match.destination.address_type == "ANY" + - result.response.policies.0.policy_match.protocol_type == "ANY" + - result.response.policies.0.policy_match.source.address_type == "ANY" + - result.response.priority == {{ priority.1}} + fail_msg: " Unable to create PBR with vpc, any source, any destination, any protocol and PERMIT action " + success_msg: " PBR with vpc, any source, any destination, any protocol and PERMIT action created successfully " + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +############################################################################################## +- name: Check idempotency + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.1 }}" + ext_id: "{{ result.ext_id }}" + name: "{{pbr_name}}2" + policies: + - policy_action: + action_type: PERMIT + policy_match: + source: + address_type: ANY + destination: + address_type: ANY + protocol_type: ANY + register: result + ignore_errors: true + +- name: Check idempotency status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + fail_msg: " Unable to check idempotency " + success_msg: " Idempotency check finished successfully" +############################################################################################################################### +- name: List pbrs using limit + ntnx_pbrs_info_v2: + limit: 1 + register: pbrs + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - pbrs.response is defined + - pbrs.response | length == 1 + fail_msg: " Unable to list pbrs using limit " + success_msg: " PBRs listed successfully using limit" +############################################################################################################################### +- name: Create PBR with vpc, any source, external destination and deny action with protocol number + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.2 }}" + name: "{{pbr_name}}3" + description: "Test desc" + policies: + - policy_action: + action_type: DENY + policy_match: + source: + address_type: ANY + destination: + address_type: EXTERNAL + protocol_type: PROTOCOL_NUMBER + protocol_parameters: + protocol_number: + protocol_number: "{{ protocol.number }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + - result.failed == false + - result.response.description == "Test desc" + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}3" + - result.response.policies.0.policy_action.action_type == "DENY" + - result.response.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "PROTOCOL_NUMBER" + - result.response.policies.0.policy_match.protocol_parameters.protocol_number == {{ protocol.number }} + - result.response.priority == {{ priority.2 }} + fail_msg: " Unable to create PBR with vpc, any source, external destination and deny action with protocol number" + success_msg: " Create PBR with vpc, any source, external destination and deny action with protocol number created successfully " + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +###################################################################################################### +- name: Update PBR name ,description, priority + ntnx_pbrs_v2: + state: present + ext_id: "{{ result.ext_id }}" + priority: "{{ priority.8 }}" + name: "{{pbr_name}}3_new" + description: "Updated Test desc" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.changed == true + - result.failed == false + - result.response.description == "Updated Test desc" + - result.response.name == "{{pbr_name}}3_new" + - result.response.priority == {{ priority.8 }} + fail_msg: " Unable to update PBR name ,description, priority " + success_msg: " PBR name ,description, priority updated successfully " +###################################################################################################### +- name: Update PBR policy_action, policy_match, protocol_parameters + ntnx_pbrs_v2: + state: present + ext_id: "{{ result.ext_id }}" + policies: + - policy_action: + action_type: PERMIT + policy_match: + source: + address_type: ANY + destination: + address_type: ANY + protocol_type: ANY + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.changed == true + - result.failed == false + - result.response.policies.0.policy_action.action_type == "PERMIT" + - result.response.policies.0.policy_match.destination.address_type == "ANY" + - result.response.policies.0.policy_match.protocol_type == "ANY" + - result.response.policies.0.policy_match.source.address_type == "ANY" + + fail_msg: " Unable to update PBR name ,description, priority " + success_msg: " PBR name ,description, priority updated successfully " +###################################################################################################### +- name: Create PBR with vpc with source external and destination network with reroute action and with tcp protocol + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.3 }}" + name: "{{pbr_name}}4" + policies: + - policy_action: + action_type: REROUTE + reroute_params: + - reroute_fallback_action: NO_ACTION + service_ip: + ipv4: + value: 10.0.0.15 + prefix_length: 32 + policy_match: + source: + address_type: EXTERNAL + destination: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + protocol_type: TCP + protocol_parameters: + tcp: + source_port_ranges: + - start_port: "{{ tcp.port }}" + end_port: "{{ tcp.port }}" + destination_port_ranges: + - start_port: "{{ tcp.port_rangelist.0 }}" + end_port: "{{ tcp.port_rangelist.1 }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + - result.failed == false + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}4" + - result.response.policies.0.policy_action.action_type == "REROUTE" + - result.response.policies.0.policy_match.destination.address_type == "SUBNET" + - result.response.policies.0.policy_match.source.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "TCP" + - result.response.policies.0.policy_match.destination.subnet_prefix.ipv4.ip.value == "{{ network.ip }}" + - result.response.policies.0.policy_match.destination.subnet_prefix.ipv4.ip.prefix_length == 32 + - result.response.priority == {{ priority.3}} + - result.response.policies.0.policy_action.reroute_params.0.reroute_fallback_action == "NO_ACTION" + - result.response.policies.0.policy_action.reroute_params.0.service_ip.ipv4.prefix_length == 32 + - result.response.policies.0.policy_action.reroute_params.0.service_ip.ipv4.value == "10.0.0.15" + - result.response.policies.0.policy_match.protocol_parameters.destination_port_ranges.0.end_port == {{ tcp.port_rangelist.1 }} + - result.response.policies.0.policy_match.protocol_parameters.destination_port_ranges.0.start_port == {{ tcp.port_rangelist.0 }} + - result.response.policies.0.policy_match.protocol_parameters.source_port_ranges.0.end_port == {{ tcp.port }} + - result.response.policies.0.policy_match.protocol_parameters.source_port_ranges.0.start_port == {{ tcp.port }} + fail_msg: " Unable to create PBR with vpc with source external and destination network with reroute action and tcp port rangelist " + success_msg: " PBR with vpc with source external and destination network with reroute action and tcp port rangelist created successfully " + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" + +########################################################################################################## +- name: Create PBR with vpc with source external and destination network with reroute action and any tcp port rangelist + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.4 }}" + name: "{{pbr_name}}5" + policies: + - policy_action: + action_type: PERMIT + policy_match: + source: + address_type: EXTERNAL + destination: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + protocol_type: TCP + protocol_parameters: + tcp: + source_port_ranges: [] + destination_port_ranges: [] + + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + - result.failed == false + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}5" + - result.response.policies.0.policy_action.action_type == "PERMIT" + - result.response.policies.0.policy_match.destination.address_type == "SUBNET" + - result.response.policies.0.policy_match.source.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "TCP" + - result.response.policies.0.policy_match.destination.subnet_prefix.ipv4.ip.value == "{{ network.ip }}" + - result.response.policies.0.policy_match.destination.subnet_prefix.ipv4.ip.prefix_length ==32 + - result.response.priority == {{ priority.4}} + fail_msg: " Unable to create PBR with vpc with source external and destination network with reroute action and any tcp ports rangelist " + success_msg: " PBR with vpc with source external and destination network with reroute action and any tcp ports rangelist created successfully " + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +########################################################################################################### +- name: Create PBR with vpc, custom source network, external destination, reroute action and udp port rangelist + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.5 }}" + name: "{{pbr_name}}6" + policies: + - policy_action: + action_type: REROUTE + reroute_params: + - reroute_fallback_action: NO_ACTION + service_ip: + ipv4: + value: 10.0.0.16 + prefix_length: 32 + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + destination: + address_type: ANY + + protocol_type: UDP + protocol_parameters: + udp: + source_port_ranges: + - start_port: "{{ udp.port }}" + end_port: "{{ udp.port }}" + destination_port_ranges: + - start_port: "{{ udp.port_rangelist.0 }}" + end_port: "{{ udp.port_rangelist.1 }}" + + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + - result.failed == false + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}6" + - result.response.policies.0.policy_action.action_type == "REROUTE" + - result.response.policies.0.policy_match.destination.address_type == "ANY" + - result.response.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.policies.0.policy_match.protocol_type == "UDP" + - result.response.policies.0.policy_match.source.subnet_prefix.ipv4.ip.value == "{{ network.ip }}" + - result.response.policies.0.policy_match.source.subnet_prefix.ipv4.ip.prefix_length == 32 + - result.response.priority == {{ priority.5}} + - result.response.policies.0.policy_action.reroute_params.0.reroute_fallback_action == "NO_ACTION" + - result.response.policies.0.policy_action.reroute_params.0.service_ip.ipv4.prefix_length == 32 + - result.response.policies.0.policy_action.reroute_params.0.service_ip.ipv4.value == "10.0.0.16" + - result.response.policies.0.policy_match.protocol_parameters.destination_port_ranges.0.end_port == {{ udp.port_rangelist.1 }} + - result.response.policies.0.policy_match.protocol_parameters.destination_port_ranges.0.start_port == {{ udp.port_rangelist.0 }} + - result.response.policies.0.policy_match.protocol_parameters.source_port_ranges.0.end_port == {{ udp.port }} + - result.response.policies.0.policy_match.protocol_parameters.source_port_ranges.0.start_port == {{ udp.port }} + fail_msg: " Unable to create PBR with vpc with source network and destination external with reroute action and udp port rangelist" + success_msg: " PBR with vpc with source network and destination external with reroute action and udp port rangelist created successfully " + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +############################################################################################################## +- name: Create PBR with vpc name, custom source network, external destination, reroute action and any udp ports + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.6 }}" + name: "{{pbr_name}}7" + policies: + - policy_action: + action_type: REROUTE + reroute_params: + - reroute_fallback_action: NO_ACTION + service_ip: + ipv4: + value: 10.0.0.17 + prefix_length: 32 + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + destination: + address_type: ANY + protocol_type: UDP + protocol_parameters: + udp: + source_port_ranges: [] + destination_port_ranges: [] + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + - result.failed == false + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}7" + - result.response.policies.0.policy_action.action_type == "REROUTE" + - result.response.policies.0.policy_match.destination.address_type == "ANY" + - result.response.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.policies.0.policy_match.protocol_type == "UDP" + - result.response.policies.0.policy_match.source.subnet_prefix.ipv4.ip.value == "{{ network.ip }}" + - result.response.policies.0.policy_match.source.subnet_prefix.ipv4.ip.prefix_length == 32 + - result.response.priority == {{ priority.6}} + - result.response.policies.0.policy_action.reroute_params.0.reroute_fallback_action == "NO_ACTION" + - result.response.policies.0.policy_action.reroute_params.0.service_ip.ipv4.prefix_length == 32 + - result.response.policies.0.policy_action.reroute_params.0.service_ip.ipv4.value == "10.0.0.17" + fail_msg: " Unable to create PBR with vpc with source network and destination external with reroute action and and udp port " + success_msg: " PBR with vpc with source network and destination external with reroute action and any udp ports created successfully " + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +################################################################################################################## +- name: Create PBR with vpc, source network, external destination, with reroute action and icmp + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.7 }}" + name: "{{pbr_name}}8" + policies: + - policy_action: + action_type: REROUTE + reroute_params: + - service_ip: + ipv4: + value: "{{reroute.ip}}" + prefix_length: "{{reroute.prefix_length}}" + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + destination: + address_type: EXTERNAL + protocol_type: ICMP + protocol_parameters: + icmp: + icmp_type: "{{ icmp.type }}" + icmp_code: "{{ icmp.code }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + - result.failed == false + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}8" + - result.response.policies.0.policy_action.action_type == "REROUTE" + - result.response.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "ICMP" + - result.response.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.priority == {{ priority.7 }} + - result.response.policies.0.policy_match.protocol_parameters.icmp_code == {{ icmp.code }} + - result.response.policies.0.policy_match.protocol_parameters.icmp_type == {{ icmp.type }} + fail_msg: "Unable to Create PBR with vpc, source network, external destination, with reroute action and icmp" + success_msg: "Create PBR with vpc, source network, external destination, with reroute action and icmp finished successfully" + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +################################################################################################################## +- name: Create PBR with vpc, source network, external destination, with forward action and icmp + ntnx_pbrs_v2: + vpc_ext_id: "{{pbr.vpc.uuid}}" + state: present + priority: "{{ priority.9 }}" + name: "{{pbr_name}}9" + policies: + - policy_action: + action_type: FORWARD + nexthop_ip_address: + ipv4: + value: "{{reroute.ip}}" + prefix_length: "{{reroute.prefix_length}}" + + policy_match: + source: + address_type: SUBNET + subnet_prefix: + ipv4: + ip: + value: "{{ network.ip }}" + prefix_length: "{{ network.prefix }}" + destination: + address_type: EXTERNAL + protocol_type: ICMP + protocol_parameters: + icmp: + icmp_type: "{{ icmp.type }}" + icmp_code: "{{ icmp.code }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + - result.failed == false + - result.response.vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.name == "{{pbr_name}}9" + - result.response.policies.0.policy_action.action_type == "FORWARD" + - result.response.policies.0.policy_action.nexthop_ip_address.ipv4.value == "{{reroute.ip}}" + - result.response.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.policies.0.policy_match.protocol_type == "ICMP" + - result.response.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.priority == {{ priority.9 }} + - result.response.policies.0.policy_match.protocol_parameters.icmp_code == {{ icmp.code }} + - result.response.policies.0.policy_match.protocol_parameters.icmp_type == {{ icmp.type }} + fail_msg: "Unable to Create PBR with vpc, source network, external destination, with forward action and icmp" + success_msg: "Create PBR with vpc, source network, external destination, with forward action and icmp finished successfully" + +- name: Adding PBR external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +################################################################################################################## +- name: List pbrs using name filter criteria + ntnx_pbrs_info_v2: + filter: "name eq '{{pbr_name}}9'" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + - result.response[0].ext_id == "{{ todelete[-1] }}" + - result.response[0].name == "{{pbr_name}}9" + - result.response[0].vpc_ext_id == "{{pbr.vpc.uuid}}" + - result.response.0.policies.0.policy_action.action_type == "FORWARD" + - result.response.0.policies.0.policy_action.nexthop_ip_address.ipv4.value == "{{reroute.ip}}" + - result.response.0.policies.0.policy_match.destination.address_type == "EXTERNAL" + - result.response.0.policies.0.policy_match.protocol_type == "ICMP" + - result.response.0.policies.0.policy_match.source.address_type == "SUBNET" + - result.response.0.priority == {{ priority.9 }} + - result.response.0.policies.0.policy_match.protocol_parameters.icmp_code == {{ icmp.code }} + - result.response.0.policies.0.policy_match.protocol_parameters.icmp_type == {{ icmp.type }} + fail_msg: " Unable to list pbrs " + success_msg: " PBRs listed successfully " +############################################################################################################################### +- name: Delete all created pbrs + ntnx_pbrs_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.skipped == false + - result.changed == true + - result.msg == "All items completed", + fail_msg: "Unable to delete all pbrs" + success_msg: "All pbrs have been deleted successfully" +############################################################################################################################### +- name: Delete vpc + ntnx_vpcs_v2: + state: absent + ext_id: "{{ pbr.vpc.uuid }}" + register: result + ignore_errors: true + +- name: Delete status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + fail_msg: "Unable to delete vpc" + success_msg: "Vpc has been deleted successfully" + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_pbrs_v2/tasks/main.yml b/tests/integration/targets/ntnx_pbrs_v2/tasks/main.yml new file mode 100644 index 000000000..a0bac9880 --- /dev/null +++ b/tests/integration/targets/ntnx_pbrs_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import all_operation.yml + ansible.builtin.import_tasks: "all_operation.yml" diff --git a/tests/integration/targets/ntnx_pbrs_v2/vars/main.yml b/tests/integration/targets/ntnx_pbrs_v2/vars/main.yml new file mode 100644 index 000000000..d6cd4d54b --- /dev/null +++ b/tests/integration/targets/ntnx_pbrs_v2/vars/main.yml @@ -0,0 +1,19 @@ +--- +priority: [205, 206, 207, 208, 209, 210, 211, 212, 213, 214] +network: + ip: 192.168.2.0 + prefix: 24 +protocol: + number: 80 +reroute: + ip: 10.1.3.2 + prefix_length: 32 +tcp: + port: 80 + port_rangelist: [100, 120] +udp: + port: 69 + port_rangelist: [150, 170] +icmp: + code: 3 + type: 3 diff --git a/tests/integration/targets/ntnx_permissions_info/meta/main.yml b/tests/integration/targets/ntnx_permissions_info/meta/main.yml index e4f447d3a..e0985ec29 100644 --- a/tests/integration/targets/ntnx_permissions_info/meta/main.yml +++ b/tests/integration/targets/ntnx_permissions_info/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - prepare_env diff --git a/tests/integration/targets/ntnx_permissions_info/tasks/main.yml b/tests/integration/targets/ntnx_permissions_info/tasks/main.yml index 2280180dd..94d926a05 100644 --- a/tests/integration/targets/ntnx_permissions_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_permissions_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "permissions_info.yml" + - name: Import permissions_info.yml + ansible.builtin.import_tasks: "permissions_info.yml" diff --git a/tests/integration/targets/ntnx_permissions_info/tasks/permissions_info.yml b/tests/integration/targets/ntnx_permissions_info/tasks/permissions_info.yml index 0a91ee46d..8270e8289 100644 --- a/tests/integration/targets/ntnx_permissions_info/tasks/permissions_info.yml +++ b/tests/integration/targets/ntnx_permissions_info/tasks/permissions_info.yml @@ -1,14 +1,15 @@ -- debug: - msg: start testing ntnx_permissions_info +- name: Start testing ntnx_permissions_info + ansible.builtin.debug: + msg: Start testing ntnx_permissions_info ################################################## - name: List all permissions ntnx_permissions_info: register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.metadata.total_matches > 0 @@ -19,7 +20,8 @@ ################################################## -- set_fact: +- name: Setting permission name + ansible.builtin.set_fact: test_permission_name: "Create_Playbook" - name: List permissions using filter criteria @@ -27,10 +29,10 @@ filter: name: "{{ test_permission_name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -41,7 +43,8 @@ fail_msg: "Unable to list permissions using filter" success_msg: "permission info obtained successfully" -- set_fact: +- name: Setting permission uuid + ansible.builtin.set_fact: test_permission_uuid: "{{result.response.entities.0.metadata.uuid}}" ################################################## @@ -50,10 +53,10 @@ ntnx_permissions_info: permission_uuid: "{{ test_permission_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -71,10 +74,10 @@ length: 1 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false diff --git a/tests/integration/targets/ntnx_power_actions_v2/aliases b/tests/integration/targets/ntnx_power_actions_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_power_actions_v2/meta/main.yml b/tests/integration/targets/ntnx_power_actions_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_power_actions_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_power_actions_v2/tasks/main.yml b/tests/integration/targets/ntnx_power_actions_v2/tasks/main.yml new file mode 100644 index 000000000..4abcdf345 --- /dev/null +++ b/tests/integration/targets/ntnx_power_actions_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import power_actions.yml + ansible.builtin.import_tasks: "power_actions.yml" diff --git a/tests/integration/targets/ntnx_power_actions_v2/tasks/power_actions.yml b/tests/integration/targets/ntnx_power_actions_v2/tasks/power_actions.yml new file mode 100644 index 000000000..410e798e2 --- /dev/null +++ b/tests/integration/targets/ntnx_power_actions_v2/tasks/power_actions.yml @@ -0,0 +1,861 @@ +--- +- name: Start ntnx_power_actions_v2 tests + ansible.builtin.debug: + msg: Start ntnx_power_actions_v2 tests + +- name: Set random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM name + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Create VM to test the power actions + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + memory_size_bytes: 4294967296 + disks: + - backing_info: + vm_disk: + disk_size_bytes: 4294967296 + data_source: + reference: + image_reference: + image_ext_id: "{{ ngt_config.image_uuid }}" + disk_address: + bus_type: SCSI + index: 0 + cd_roms: + - disk_address: + bus_type: IDE + index: 0 + nics: + - network_info: + nic_type: "NORMAL_NIC" + vlan_mode: "ACCESS" + subnet: + ext_id: "{{ network.dhcp.uuid }}" + boot_config: + legacy_boot: + boot_order: ["CDROM", "DISK", "NETWORK"] + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.memory_size_bytes == 4294967296 + - result.response.disks[0].backing_info.data_source.reference.image_ext_id == "{{ ngt_config.image_uuid }}" + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 0 + - result.response.cd_roms[0].disk_address.bus_type == "IDE" + - result.response.cd_roms[0].disk_address.index == 0 + - result.response.nics[0].network_info.nic_type == "NORMAL_NIC" + - result.response.nics[0].network_info.vlan_mode == "ACCESS" + - result.response.nics[0].network_info.subnet.ext_id == "{{ network.dhcp.uuid }}" + - result.response.boot_config.boot_order[0] == "CDROM" + - result.response.boot_config.boot_order[1] == "DISK" + - result.response.boot_config.boot_order[2] == "NETWORK" + fail_msg: "Unable to Create VM " + success_msg: "VM is created successfully " + +- name: Get VM UUID + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + +################################################################################ + +- name: Power off VM + ntnx_vms_power_actions_v2: + state: power_off + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Power off Status + ansible.builtin.assert: + that: + - result.changed == false + - result.changed == false + - result.failed == false + - result.msg == 'Nothing to change.' + - result.skipped == true + fail_msg: "Unable to Power off VM " + success_msg: "VM is powered off successfully " + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not powered off " + success_msg: "VM is powered off successfully " + +############################################################################### + +- name: Power on VM + ntnx_vms_power_actions_v2: + state: power_on + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Power on Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power on VM " + success_msg: "VM is powered on successfully " + +- name: Sleep for 20 seconds until VM is powered on + ansible.builtin.pause: + seconds: 20 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +################################################################################ + +- name: Get VM info and check if IP address is assigned + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + retries: 60 + delay: 5 + until: result.response.nics[0].network_info.ipv4_info.learned_ip_addresses[0].value is defined + +- name: Get VM info and check if IP address is assigned Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.nics[0].network_info.ipv4_info.learned_ip_addresses[0].value is defined + fail_msg: "Unable to get VM info and check if IP address is assigned " + success_msg: "VM info is retrieved successfully and IP address is assigned " + +# ################################################################################# + +- name: Install NGT in VM + ntnx_vms_ngt_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - "VSS_SNAPSHOT" + - "SELF_SERVICE_RESTORE" + credential: + username: "{{ VM_username }}" + password: "{{ VM_password }}" + reboot_preference: + schedule_type: "IMMEDIATE" + register: result + ignore_errors: true + +- name: Install NGT in VM Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - "'VSS_SNAPSHOT' in result.response.capabilities" + - "'SELF_SERVICE_RESTORE' in result.response.capabilities" + - result.response.capabilities | length == 2 + - result.task_ext_id is defined + fail_msg: "Unable to install NGT in VM " + success_msg: "NGT is installed successfully " + +################################################################################# + +- name: Sleep for 2 minutes until NGT is installed completely + ansible.builtin.pause: + minutes: 2 + +- name: Power off VM + ntnx_vms_power_actions_v2: + state: power_off + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Power off Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power off VM " + success_msg: "VM is powered off successfully " + +- name: Sleep for 15 seconds until VM is powered off + ansible.builtin.pause: + seconds: 15 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not powered off " + success_msg: "VM is powered off successfully " + +################################################################################ + +- name: Shutdown VM using acpi + ntnx_vms_power_actions_v2: + state: shutdown + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Shutdown Status + ansible.builtin.assert: + that: + - result.changed == false + - result.changed == false + - result.failed == false + - result.msg == 'Nothing to change.' + - result.skipped == true + fail_msg: "Unable to shutdown VM using acpi " + success_msg: "VM is shutdown successfully " + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not shutdown " + success_msg: "VM is shutdown successfully " + +################################################################################ + +- name: Shutdown VM using ngt + ntnx_vms_power_actions_v2: + state: guest_shutdown + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Shutdown Status + ansible.builtin.assert: + that: + - result.changed == false + - result.changed == false + - result.failed == false + - result.msg == 'Nothing to change.' + - result.skipped == true + fail_msg: "Unable to shutdown VM using ngt " + success_msg: "VM is shutdown successfully " + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not shutdown " + success_msg: "VM is shutdown successfully " + +################################################################################ + +- name: Generate spec for rebooting VM using ngt with check mode + ntnx_vms_power_actions_v2: + state: guest_reboot + ext_id: "0005a7b8-0b0b-4b3b-0000-000000000000" + guest_power_state_transition_config: + should_enable_script_exec: true + should_fail_on_script_failure: false + register: result + ignore_errors: true + check_mode: true + +- name: Generate spec for rebooting VM using ngt with check mode Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.guest_power_state_transition_config.should_enable_script_exec == true + - result.response.guest_power_state_transition_config.should_fail_on_script_failure == false + fail_msg: "Unable to generate spec for rebooting VM using ngt with check mode " + success_msg: "Spec for rebooting VM using ngt with check mode is generated successfully " + +- name: Rebooting VM using ngt + ntnx_vms_power_actions_v2: + state: guest_reboot + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Reboot Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == 'Nothing to change.' + - result.skipped == true + fail_msg: "Unable to reboot VM using ngt " + success_msg: "VM is rebooted successfully " + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not rebooted " + success_msg: "VM is rebooted successfully " + +################################################################################ + +- name: Rebooting VM using acpi + ntnx_vms_power_actions_v2: + state: reboot + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Reboot Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == 'Nothing to change.' + - result.skipped == true + fail_msg: "Unable to reboot VM using acpi " + success_msg: "VM is rebooted successfully " + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not rebooted " + success_msg: "VM is rebooted successfully " + +################################################################################ + +- name: Force power cycle + ntnx_vms_power_actions_v2: + state: force_power_cycle + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Force power cycle Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == 'Nothing to change.' + - result.skipped == true + fail_msg: "Unable to perform force power cycle on VM " + success_msg: "Force power cycle on VM is completed successfully " + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not power cycled " + success_msg: "VM is power cycled successfully " + +################################################################################ + +- name: Resetting VM + ntnx_vms_power_actions_v2: + state: reset + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Reset Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == 'Nothing to change.' + - result.skipped == true + fail_msg: "Unable to reset VM " + success_msg: "Reset VM is completed successfully " + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not reset " + success_msg: "VM is reset successfully " + +################################################################################ + +- name: Power on VM + ntnx_vms_power_actions_v2: + state: power_on + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Power on Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power on VM " + success_msg: "VM is powered on successfully " + +- name: Sleep for 20 seconds until VM is powered on + ansible.builtin.pause: + seconds: 20 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +################################################################################ + +- name: Sleep for 30 seconds before rebooting VM + ansible.builtin.pause: + seconds: 30 + +- name: Rebooting VM using ngt + ntnx_vms_power_actions_v2: + state: guest_reboot + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Reboot Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Failed to reboot VM using ngt" + success_msg: "Reboot VM using ngt is completed successfully" + +- name: Sleep for 15 seconds until VM is rebooted + ansible.builtin.pause: + seconds: 15 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +################################################################################ + +- name: Sleep for 30 seconds before rebooting VM + ansible.builtin.pause: + seconds: 30 + +- name: Rebooting VM using acpi + ntnx_vms_power_actions_v2: + state: reboot + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Reboot Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Failed to reboot VM using acpi" + success_msg: "Reboot VM using acpi is completed successfully" + +- name: Sleep for 15 seconds until VM is rebooted + ansible.builtin.pause: + seconds: 15 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +################################################################################ + +- name: Resetting VM + ntnx_vms_power_actions_v2: + state: reset + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Reset Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Failed reset VM" + success_msg: "Reset VM is completed successfully" + +- name: Sleep for 15 seconds until VM is reset + ansible.builtin.pause: + seconds: 15 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +################################################################################ + +- name: Force power cycle + ntnx_vms_power_actions_v2: + state: force_power_cycle + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Force power cycle Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Failed to perform force power cycle on VM" + success_msg: "Force power cycle on VM is completed successfully" + +- name: Sleep for 20 seconds until VM is powered on + ansible.builtin.pause: + seconds: 20 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +################################################################################ + +- name: Sleep for 30 seconds before shutting down VM + ansible.builtin.pause: + seconds: 30 + +- name: Shutdown VM using acpi + ntnx_vms_power_actions_v2: + state: shutdown + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Shutdown Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Failed to shutdown VM using acpi" + success_msg: "Shutdown VM using acpi is completed successfully" + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + until: result.response.power_state == 'OFF' + retries: 60 + delay: 1 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not powered off " + success_msg: "VM is powered off successfully " + +################################################################################ + +- name: Power on VM + ntnx_vms_power_actions_v2: + state: power_on + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Power on Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power on VM " + success_msg: "VM is powered on successfully " + +- name: Sleep for 20 seconds until VM is powered on + ansible.builtin.pause: + seconds: 20 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +############################################################################### + +- name: Sleep for 30 seconds before shutting down VM + ansible.builtin.pause: + seconds: 30 + +- name: Generate spec for shutting down VM using ngt with check mode + ntnx_vms_power_actions_v2: + state: guest_shutdown + ext_id: "0005a7b8-0b0b-4b3b-1234-847530000000" + guest_power_state_transition_config: + should_enable_script_exec: false + should_fail_on_script_failure: true + register: result + ignore_errors: true + check_mode: true + +- name: Generate spec for shutting down VM using ngt with check mode Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.guest_power_state_transition_config.should_enable_script_exec == false + - result.response.guest_power_state_transition_config.should_fail_on_script_failure == true + fail_msg: "Unable to generate spec for shutting down VM using ngt with check mode " + success_msg: "Spec for shutting down VM using ngt with check mode is generated successfully " + +- name: Shutdown VM using ngt + ntnx_vms_power_actions_v2: + state: guest_shutdown + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Shutdown Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Failed to shutdown VM using ngt" + success_msg: "Shutdown VM using ngt is completed successfully" + +- name: Sleep for 15 seconds until VM is powered off + ansible.builtin.pause: + seconds: 15 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'OFF' + fail_msg: "VM is not powered off " + success_msg: "VM is powered off successfully " + +################################################################################ + +- name: Delete the VM + ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_projects/tasks/create_project.yml b/tests/integration/targets/ntnx_projects/tasks/create_project.yml index 220cc38b1..f7fb0bce3 100644 --- a/tests/integration/targets/ntnx_projects/tasks/create_project.yml +++ b/tests/integration/targets/ntnx_projects/tasks/create_project.yml @@ -1,19 +1,20 @@ -- name: - debug: +- name: Start ntnx_project create tests + ansible.builtin.debug: msg: "Start ntnx_project create tests" - name: Generate random project_name - set_fact: + ansible.builtin.set_fact: random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" -- set_fact: +- name: Define suffix name variable + ansible.builtin.set_fact: suffix_name: "ansible-role-mapping" -- set_fact: +- name: Define project name variables + ansible.builtin.set_fact: project1_name: "{{random_name}}{{suffix_name}}1" project2_name: "{{random_name}}{{suffix_name}}2" - - name: Create Project with minimal spec ntnx_projects: name: "{{project1_name}}" @@ -21,7 +22,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -30,13 +31,14 @@ fail_msg: "Unable to create project with minimal spec" success_msg: "Project with minimal spec created successfully" -- set_fact: +- name: Add project uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.project_uuid ] }}" ################################################################# - name: Create Project with check mode - check_mode: yes + check_mode: true ntnx_projects: name: "{{project2_name}}" desc: desc-123 @@ -57,7 +59,7 @@ ignore_errors: true - name: Check mode Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -104,12 +106,14 @@ register: result ignore_errors: true -- set_fact: - expected_subnets: ["{{ network.dhcp.uuid }}", "{{ static.uuid }}", "{{ overlay.uuid }}"] +- name: Set expected subnets and users + ansible.builtin.set_fact: + expected_subnets: + ["{{ network.dhcp.uuid }}", "{{ static.uuid }}", "{{ overlay.uuid }}"] expected_users: ["{{ users[0] }}", "{{ users[1] }}"] - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -132,7 +136,8 @@ fail_msg: "Unable to create project with all specifications" success_msg: "Project with all specifications created successfully" -- set_fact: +- name: Add project uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.project_uuid ] }}" ################################################################# @@ -143,7 +148,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - "'Project with given name already exists' in result.msg" @@ -157,7 +162,8 @@ project_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_projects/tasks/delete_project.yml b/tests/integration/targets/ntnx_projects/tasks/delete_project.yml index e1fc273be..6fc65d686 100644 --- a/tests/integration/targets/ntnx_projects/tasks/delete_project.yml +++ b/tests/integration/targets/ntnx_projects/tasks/delete_project.yml @@ -1,5 +1,5 @@ -- name: - debug: +- name: Start ntnx_project delete tests + ansible.builtin.debug: msg: "Start ntnx_project delete tests" - name: Create Project for delete @@ -9,7 +9,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -22,10 +22,10 @@ project_uuid: "{{ result.project_uuid }}" wait: true register: result - ignore_errors: True + ignore_errors: true - name: Delete Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'SUCCEEDED' diff --git a/tests/integration/targets/ntnx_projects/tasks/main.yml b/tests/integration/targets/ntnx_projects/tasks/main.yml index eddf0a46d..82acf4be3 100644 --- a/tests/integration/targets/ntnx_projects/tasks/main.yml +++ b/tests/integration/targets/ntnx_projects/tasks/main.yml @@ -1,12 +1,17 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_project.yml" - - import_tasks: "delete_project.yml" - - import_tasks: "update_project.yml" - - import_tasks: "projects_with_role_mappings.yml" + - name: Import create_project.yml + ansible.builtin.import_tasks: "create_project.yml" + - name: Import delete_project.yml + ansible.builtin.import_tasks: "delete_project.yml" + - name: Import update_project.yml + ansible.builtin.import_tasks: "update_project.yml" + - name: Import projects_with_role_mappings.yml + ansible.builtin.import_tasks: "projects_with_role_mappings.yml" diff --git a/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml index 2dd21fe6a..a88a43d6d 100644 --- a/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml +++ b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml @@ -1,40 +1,40 @@ -- name: - debug: +- name: Start ntnx_project tests with role mappings + ansible.builtin.debug: msg: "Start ntnx_project tests with role mappings" - name: Generate random project_name - set_fact: + ansible.builtin.set_fact: random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" -- set_fact: +- name: Define suffix name variable + ansible.builtin.set_fact: suffix_name: "ansible-role-mapping" -- set_fact: +- name: Define project name variable + ansible.builtin.set_fact: project1_name: "{{random_name}}{{suffix_name}}1" project2_name: "{{random_name}}{{suffix_name}}2" project3_name: "{{random_name}}{{suffix_name}}3" -- set_fact: - ignore_errors: false - - name: Create Project with min spec ntnx_projects: name: "{{project1_name}}" desc: "project with role mappings" - collaboration: True + collaboration: true role_mappings: - role: - name: "{{roles[0]}}" + name: "{{role_mappings[0]}}" user: uuid: "{{users[0]}}" register: result - ignore_errors: "{{ignore_errors}}" + ignore_errors: true -- set_fact: +- name: Set response acp + ansible.builtin.set_fact: response_acp: "{{result.response.status.access_control_policy_list_status[0].access_control_policy_status.resources}}" - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -44,17 +44,18 @@ - result.response.status.access_control_policy_list_status | length == 1 - response_acp.filter_list.context_list[0].entity_filter_expression_list[0].left_hand_side.entity_type == "ALL" - response_acp.filter_list.context_list | length == 3 - - response_acp.role_reference.name == "{{roles[0]}}" + - response_acp.role_reference.name == "{{role_mappings[0]}}" - response_acp.user_reference_list[0].uuid == "{{users[0]}}" fail_msg: "Unable to create project with minimal spec of role mappings" success_msg: "Project with minimal spec of role mappings created successfully" -- set_fact: +- name: Add project uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.project_uuid ] }}" ################################################################ -- name: Creat project with all specs +- name: Create project with all specs ntnx_projects: name: "{{project2_name}}" desc: desc-123 @@ -78,29 +79,31 @@ - user: uuid: "{{users[0]}}" role: - name: "{{roles[0]}}" + name: "{{role_mappings[0]}}" - user: uuid: "{{users[1]}}" role: - name: "{{roles[1]}}" + name: "{{role_mappings[1]}}" - user: principal_name: "{{new_user}}" directory_service_uuid: "{{directory_service_uuid}}" role: - name: "{{roles[2]}}" + name: "{{role_mappings[2]}}" - user_group: distinguished_name: "{{new_user_group}}" role: - name: "{{roles[3]}}" + name: "{{role_mappings[3]}}" register: result - ignore_errors: "{{ignore_errors}}" + ignore_errors: true -- set_fact: - expected_subnets: ["{{ network.dhcp.uuid }}", "{{ static.uuid }}", "{{ overlay.uuid }}"] +- name: Set expected subnets + ansible.builtin.set_fact: + expected_subnets: + ["{{ network.dhcp.uuid }}", "{{ static.uuid }}", "{{ overlay.uuid }}"] response_acp: "{{result.response.status.access_control_policy_list_status[0].access_control_policy_status.resources}}" - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -124,14 +127,14 @@ fail_msg: "Unable to create project with all specifications" success_msg: "Project with all specifications created successfully" - -- set_fact: +- name: Add project uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.project_uuid ] }}" -- set_fact: +- name: Set user_group_to_delete + ansible.builtin.set_fact: user_group_to_delete: "{{result.response.status.project_status.resources.external_user_group_reference_list[0].uuid}}" - - name: Update Project role mappings and subnets and quotas ntnx_projects: project_uuid: "{{result.project_uuid}}" @@ -148,7 +151,7 @@ limit: 2147483648 - resource_type: MEMORY limit: 2147483648 - collaboration: True + collaboration: true role_mappings: - role: name: "{{acp.role.name}}" @@ -164,54 +167,69 @@ uuid: "{{user_groups[0]}}" register: result - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Set expected subnets + ansible.builtin.set_fact: expected_subnets: ["{{ network.dhcp.uuid }}", "{{ static.uuid }}"] -- set_fact: +- name: Set acp_users + ansible.builtin.set_fact: response_acp: "{{result.response.status.access_control_policy_list_status[0].access_control_policy_status.resources}}" -- set_fact: - acp_users: ["{{response_acp.user_reference_list[0].uuid}}", "{{response_acp.user_reference_list[1].uuid}}"] -- set_fact: - sorted_acp_users: '{{ acp_users | sort() }}' -- set_fact: +- name: Set acp_users + ansible.builtin.set_fact: + acp_users: + [ + "{{response_acp.user_reference_list[0].uuid}}", + "{{response_acp.user_reference_list[1].uuid}}", + ] +- name: Set sorted acp_users + ansible.builtin.set_fact: + sorted_acp_users: "{{ acp_users | sort() }}" +- name: Set expected_users + ansible.builtin.set_fact: expected_users: ["{{users[0]}}", "{{users[1]}}"] -- set_fact: - expected_users_sorted: '{{ expected_users | sort() }}' -- set_fact: - project_user_reference_list: ["{{result.response.status.project_status.resources.user_reference_list[0].uuid}}", "{{result.response.status.project_status.resources.user_reference_list[1].uuid}}"] -- set_fact: - project_user_references_sorted: '{{ project_user_reference_list|sort() }}' -- set_fact: - expected_quotas: [ - { - "limit": 5, - "resource_type": "VCPUS", - "units": "COUNT", - "value": 0 - }, - { - "limit": 2147483648, - "resource_type": "STORAGE", - "units": "BYTES", - "value": 0 - }, - { - "limit": 2147483648, - "resource_type": "MEMORY", - "units": "BYTES", - "value": 0 - } - ] -- set_fact: +- name: Set expected_users_sorted + ansible.builtin.set_fact: + expected_users_sorted: "{{ expected_users | sort() }}" +- name: Set project_user_reference_list + ansible.builtin.set_fact: + project_user_reference_list: + [ + "{{result.response.status.project_status.resources.user_reference_list[0].uuid}}", + "{{result.response.status.project_status.resources.user_reference_list[1].uuid}}", + ] +- name: Set project_user_references_sorted + ansible.builtin.set_fact: + project_user_references_sorted: "{{ project_user_reference_list|sort() }}" +- name: Set expected quotas + ansible.builtin.set_fact: + expected_quotas: + [ + { "limit": 5, "resource_type": "VCPUS", "units": "COUNT", "value": 0 }, + { + "limit": 2147483648, + "resource_type": "STORAGE", + "units": "BYTES", + "value": 0, + }, + { + "limit": 2147483648, + "resource_type": "MEMORY", + "units": "BYTES", + "value": 0, + }, + ] +- name: Set quotas + ansible.builtin.set_fact: quotas: "{{result.response.status.project_status.resources.resource_domain.resources}}" -- set_fact: +- name: Sort quotas and expected quotas + ansible.builtin.set_fact: sorted_quotas: "{{ quotas| sort(attribute='resource_type') }}" sorted_expected_quotas: "{{ expected_quotas | sort(attribute='resource_type') }}" - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' @@ -254,7 +272,7 @@ limit: 2147483648 - resource_type: MEMORY limit: 2147483648 - collaboration: True + collaboration: true role_mappings: - role: name: "{{acp.role.name}}" @@ -272,18 +290,17 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - "'Nothing to update' in result.msg" fail_msg: "Project update didnt got skipped for update spec same as existing project" success_msg: "Project got skipped successfully for no change in spec" - - name: Create project with existing name ntnx_projects: name: "{{project3_name}}" - collaboration: True + collaboration: true role_mappings: - role: name: "{{acp.role.name}}" @@ -296,9 +313,8 @@ register: result ignore_errors: true - - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - "'Project with given name already exists' in result.msg" @@ -312,20 +328,21 @@ project_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] -- name: delete user group +- name: Delete user group ntnx_user_groups: state: absent user_group_uuid: "{{user_group_to_delete}}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false diff --git a/tests/integration/targets/ntnx_projects/tasks/update_project.yml b/tests/integration/targets/ntnx_projects/tasks/update_project.yml index 4d88442d1..4192a6a75 100644 --- a/tests/integration/targets/ntnx_projects/tasks/update_project.yml +++ b/tests/integration/targets/ntnx_projects/tasks/update_project.yml @@ -1,21 +1,22 @@ -- name: - debug: +- name: Start ntnx_project update tests + ansible.builtin.debug: msg: "Start ntnx_project update tests" - name: Generate random project_name - set_fact: - random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" -- set_fact: +- name: Define suffix name variable + ansible.builtin.set_fact: suffix_name: "ansible-role-mapping" -- set_fact: - project1_name: "{{random_name}}{{suffix_name}}1" - +- name: Define project name variable + ansible.builtin.set_fact: + project1_name: "{{ random_name }}{{ suffix_name }}1" - name: Create Project ntnx_projects: - name: "{{project1_name}}" + name: "{{ project1_name }}" desc: desc-123 subnets: - name: "{{ network.dhcp.name }}" @@ -30,19 +31,20 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: "Unable to create project" success_msg: "Project created successfully" -- set_fact: - todelete: "{{ todelete + [ result.project_uuid ] }}" +- name: Add project uuid to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.project_uuid] }}" ################################################################# - name: Check check mode for update - check_mode: yes + check_mode: true ntnx_projects: project_uuid: "{{ result.project_uuid }}" desc: desc-123-updated @@ -63,7 +65,7 @@ ignore_errors: true - name: Check mode Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -99,14 +101,14 @@ - resource_type: STORAGE limit: 2046 accounts: - - uuid: "{{accounts[0].uuid}}" + - uuid: "{{ accounts[0].uuid }}" vpcs: - - uuid: "{{vpc.uuid}}" + - uuid: "{{ vpc.uuid }}" register: result ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -143,14 +145,14 @@ - resource_type: STORAGE limit: 2046 accounts: - - name: "{{accounts[0].name}}" + - name: "{{ accounts[0].name }}" vpcs: - - uuid: "{{vpc.uuid}}" + - uuid: "{{ vpc.uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.changed == false - "'Nothing to update' in result.msg" @@ -165,7 +167,8 @@ project_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_projects_info/tasks/main.yml b/tests/integration/targets/ntnx_projects_info/tasks/main.yml index 63f4b1464..82c408051 100644 --- a/tests/integration/targets/ntnx_projects_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_projects_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "projects_info.yml" + - name: Import projects_info.yml + ansible.builtin.import_tasks: "projects_info.yml" diff --git a/tests/integration/targets/ntnx_projects_info/tasks/projects_info.yml b/tests/integration/targets/ntnx_projects_info/tasks/projects_info.yml index bba5652d0..b3cff961d 100644 --- a/tests/integration/targets/ntnx_projects_info/tasks/projects_info.yml +++ b/tests/integration/targets/ntnx_projects_info/tasks/projects_info.yml @@ -1,19 +1,20 @@ -- name: - debug: +- name: Start ntnx_project_info tests + ansible.builtin.debug: msg: "Start ntnx_project_info tests" - name: Generate random project_name - set_fact: + ansible.builtin.set_fact: random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" -- set_fact: +- name: Set suffix name + ansible.builtin.set_fact: suffix_name: "ansible-role-mapping" -- set_fact: +- name: Set project names + ansible.builtin.set_fact: project1_name: "{{random_name}}{{suffix_name}}1" project2_name: "{{random_name}}{{suffix_name}}2" - - name: Create Project with minimal spec ntnx_projects: name: "{{project1_name}}" @@ -23,19 +24,21 @@ - name: Create Project with role mappings ntnx_projects: name: "{{project2_name}}" - collaboration: True + collaboration: true role_mappings: - role: - name: "{{roles[0]}}" + name: "{{role_mappings[0]}}" user: uuid: "{{users[0]}}" register: project_2 ignore_errors: true -- set_fact: +- name: Adding project uuid to todelete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ project_1.project_uuid ] }}" -- set_fact: +- name: Adding project uuid to todelete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ project_2.project_uuid ] }}" ################################################## @@ -45,10 +48,10 @@ filter: name: "{{project1_name}}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.entities[0].status.name == "{{project1_name}}" @@ -60,10 +63,10 @@ - name: List all projects ntnx_projects_info: register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.metadata.total_matches > 2 @@ -76,10 +79,10 @@ ntnx_projects_info: project_uuid: "{{ project_2.project_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.name == "{{ project2_name }}" @@ -92,12 +95,12 @@ - name: List project using project uuid criteria including acps ntnx_projects_info: project_uuid: "{{ project_2.project_uuid }}" - include_acps: True + include_acps: true register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.name == "{{ project2_name }}" @@ -113,10 +116,10 @@ length: 1 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: "Unable to list projects using length and offset" @@ -129,10 +132,10 @@ sort_attribute: "name" kind: project register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: "Unable to list projects" @@ -145,7 +148,8 @@ project_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: false -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_protection_rules/aliases b/tests/integration/targets/ntnx_protection_rules/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_protection_rules/aliases +++ b/tests/integration/targets/ntnx_protection_rules/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_protection_rules/tasks/main.yml b/tests/integration/targets/ntnx_protection_rules/tasks/main.yml index f3dd0eaf9..aaca6047f 100644 --- a/tests/integration/targets/ntnx_protection_rules/tasks/main.yml +++ b/tests/integration/targets/ntnx_protection_rules/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "protection_rules.yml" + - name: Import protection_rules.yml + ansible.builtin.import_tasks: "protection_rules.yml" diff --git a/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml b/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml index 0c2d9e7ce..f07342c67 100644 --- a/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml +++ b/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml @@ -1,13 +1,14 @@ --- -- debug: +- name: Start testing protection policies crud tests + ansible.builtin.debug: msg: Start testing protection policies crud tests -############################################################### CREATE Protection Policy tests ########################################################################################### +############################################ CREATE Protection Policy tests ######################### - name: Create protection rule with synchronous schedule and check mode - check_mode: yes + check_mode: true ntnx_protection_rules: state: present - wait: True + wait: true name: test-ansible desc: test-ansible-desc protected_categories: @@ -32,7 +33,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -58,7 +59,7 @@ - name: Create protection rule with sync schedule ntnx_protection_rules: state: present - wait: True + wait: true name: test-ansible desc: test-ansible-desc protected_categories: @@ -83,7 +84,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.rule_uuid == result.response.metadata.uuid @@ -106,22 +107,19 @@ fail_msg: "Unable to create protection rule with synchronous schedule" success_msg: "Protection policy with with synchronous schedule created successfully" - - name: Delete created protection policy inorder to avoid conflict in further tests ntnx_protection_rules: state: absent - wait: True + wait: true rule_uuid: "{{ result.rule_uuid }}" register: result - ########################################################################################################################################################## - - name: Create protection rule with async schedule ntnx_protection_rules: state: present - wait: True + wait: true name: test-ansible desc: test-ansible-desc protected_categories: @@ -132,7 +130,7 @@ availability_zone_url: "{{dr.primary_az_url}}" schedules: - source: - availability_zone_url: "{{dr.primary_az_url}}" + availability_zone_url: "{{dr.primary_az_url}}" destination: availability_zone_url: "{{dr.recovery_az_url}}" protection_type: ASYNC @@ -161,7 +159,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.rule_uuid == result.response.metadata.uuid @@ -176,26 +174,30 @@ - result.response.status.resources.availability_zone_connectivity_list[0]["source_availability_zone_index"] == 0 - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["snapshot_type"] == "CRASH_CONSISTENT" - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["recovery_point_objective_secs"] == 3600 - - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 1 - - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["snapshot_interval_type"] == "HOURLY" - - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["multiple"] == 2 + - result.response.status.resources.availability_zone_connectivity_list[0]\ + ["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 1 + - result.response.status.resources.availability_zone_connectivity_list[0]\ + ["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["snapshot_interval_type"] == "HOURLY" + - result.response.status.resources.availability_zone_connectivity_list[0]\ + ["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["multiple"] == 2 - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["snapshot_type"] == "CRASH_CONSISTENT" - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["recovery_point_objective_secs"] == 3600 - - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 2 - - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["num_snapshots"] == 1 + - result.response.status.resources.availability_zone_connectivity_list[1]\ + ["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 2 + - result.response.status.resources.availability_zone_connectivity_list[1]\ + ["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["num_snapshots"] == 1 - result.response.status.resources.availability_zone_connectivity_list[1]["destination_availability_zone_index"] == 0 - result.response.status.resources.availability_zone_connectivity_list[1]["source_availability_zone_index"] == 1 fail_msg: "Unable to create protection rule with asynchronous schedule" success_msg: "Protection policy with with asynchronous schedule created successfully" - ############################################################## UPDATE Protection Policy Tests ################################################################## - name: Update previously created protection policy ntnx_protection_rules: state: present - wait: True + wait: true rule_uuid: "{{result.rule_uuid}}" name: test-ansible-updated desc: test-ansible-desc-updated @@ -206,7 +208,7 @@ availability_zone_url: "{{dr.primary_az_url}}" schedules: - source: - availability_zone_url: "{{dr.primary_az_url}}" + availability_zone_url: "{{dr.primary_az_url}}" destination: availability_zone_url: "{{dr.recovery_az_url}}" protection_type: ASYNC @@ -235,7 +237,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.rule_uuid == result.response.metadata.uuid @@ -250,25 +252,29 @@ - result.response.status.resources.availability_zone_connectivity_list[0]["source_availability_zone_index"] == 0 - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["snapshot_type"] == "APPLICATION_CONSISTENT" - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["recovery_point_objective_secs"] == 172800 - - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 1 - - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["snapshot_interval_type"] == "YEARLY" - - result.response.status.resources.availability_zone_connectivity_list[0]["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["multiple"] == 2 + - result.response.status.resources.availability_zone_connectivity_list[0]\ + ["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 1 + - result.response.status.resources.availability_zone_connectivity_list[0]\ + ["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["snapshot_interval_type"] == "YEARLY" + - result.response.status.resources.availability_zone_connectivity_list[0]\ + ["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["rollup_retention_policy"]["multiple"] == 2 - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["snapshot_type"] == "APPLICATION_CONSISTENT" - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["recovery_point_objective_secs"] == 172800 - - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 1 - - result.response.status.resources.availability_zone_connectivity_list[1]["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["num_snapshots"] == 2 + - result.response.status.resources.availability_zone_connectivity_list[1]\ + ["snapshot_schedule_list"][0]["local_snapshot_retention_policy"]["num_snapshots"] == 1 + - result.response.status.resources.availability_zone_connectivity_list[1]\ + ["snapshot_schedule_list"][0]["remote_snapshot_retention_policy"]["num_snapshots"] == 2 - result.response.status.resources.availability_zone_connectivity_list[1]["destination_availability_zone_index"] == 0 - result.response.status.resources.availability_zone_connectivity_list[1]["source_availability_zone_index"] == 1 - fail_msg: "Unable to update protection rule with asynchronous schedule" success_msg: "Protection policy with with asynchronous schedule updated successfully" -################################################################################################################################################################## +#################################################################################### - name: Idempotency Check ntnx_protection_rules: state: present - wait: True + wait: true rule_uuid: "{{result.rule_uuid}}" name: test-ansible-updated desc: test-ansible-desc-updated @@ -279,7 +285,7 @@ availability_zone_url: "{{dr.primary_az_url}}" schedules: - source: - availability_zone_url: "{{dr.primary_az_url}}" + availability_zone_url: "{{dr.primary_az_url}}" destination: availability_zone_url: "{{dr.recovery_az_url}}" protection_type: ASYNC @@ -307,8 +313,8 @@ num_snapshots: 2 register: temp_result -- name: idempotency check status - assert: +- name: Idempotency check status + ansible.builtin.assert: that: - temp_result.changed == False - temp_result.failed == False @@ -317,21 +323,20 @@ fail_msg: "Idempotency check failed" success_msg: "Idempotency check passed" -################################################################################################################################################################## - +#################################################################################### - name: Check Mode while update - check_mode: yes + check_mode: true ntnx_protection_rules: state: present - wait: True + wait: true rule_uuid: "{{result.rule_uuid}}" name: test-ansible-updated-check-mode desc: test-ansible-desc-updated register: temp_result - name: Check mode Status - assert: + ansible.builtin.assert: that: - temp_result.response is defined - temp_result.changed == false @@ -339,18 +344,17 @@ fail_msg: "Unable to generate update spec using check mode" success_msg: "Protection policy update spec generated successfully" - ############################################################## DELETE Protection Policy Tests ################################################################## - name: Delete created protection policy ntnx_protection_rules: state: absent - wait: True + wait: true rule_uuid: "{{ result.rule_uuid }}" register: result -- name: delete Status - assert: +- name: Delete Status + ansible.builtin.assert: that: - result.response is defined - result.changed == True diff --git a/tests/integration/targets/ntnx_protection_rules_info/aliases b/tests/integration/targets/ntnx_protection_rules_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_protection_rules_info/aliases +++ b/tests/integration/targets/ntnx_protection_rules_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_protection_rules_info/tasks/main.yml b/tests/integration/targets/ntnx_protection_rules_info/tasks/main.yml index 255ab0f69..9ca06bf3e 100644 --- a/tests/integration/targets/ntnx_protection_rules_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_protection_rules_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "rules_info.yml" + - name: Import rules_info.yml + ansible.builtin.import_tasks: "rules_info.yml" diff --git a/tests/integration/targets/ntnx_protection_rules_info/tasks/rules_info.yml b/tests/integration/targets/ntnx_protection_rules_info/tasks/rules_info.yml index f4eb5e680..ae1fc3f71 100644 --- a/tests/integration/targets/ntnx_protection_rules_info/tasks/rules_info.yml +++ b/tests/integration/targets/ntnx_protection_rules_info/tasks/rules_info.yml @@ -1,11 +1,12 @@ -- debug: - msg: start testing ntnx_protection_rules_info +- name: Start testing ntnx_protection_rules_info + ansible.builtin.debug: + msg: Start testing ntnx_protection_rules_info ################################################## - name: Create protection rule with async schedule ntnx_protection_rules: state: present - wait: True + wait: true name: test-ansible-info-1 desc: test-ansible-desc-1 protected_categories: @@ -15,7 +16,7 @@ availability_zone_url: "{{dr.primary_az_url}}" schedules: - source: - availability_zone_url: "{{dr.primary_az_url}}" + availability_zone_url: "{{dr.primary_az_url}}" destination: availability_zone_url: "{{dr.recovery_az_url}}" protection_type: ASYNC @@ -46,7 +47,7 @@ - name: Create protection rule with async schedule ntnx_protection_rules: state: present - wait: True + wait: true name: test-ansible-info-2 desc: test-ansible-desc-2 protected_categories: @@ -56,7 +57,7 @@ availability_zone_url: "{{dr.primary_az_url}}" schedules: - source: - availability_zone_url: "{{dr.primary_az_url}}" + availability_zone_url: "{{dr.primary_az_url}}" destination: availability_zone_url: "{{dr.recovery_az_url}}" protection_type: ASYNC @@ -89,10 +90,10 @@ - name: List all Protection rules ntnx_protection_rules_info: register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.metadata.total_matches > 0 @@ -100,9 +101,9 @@ fail_msg: "Unable to list all protection rules" success_msg: "protection rule info obtained successfully" -- set_fact: +- name: Set test rule name and uuid + ansible.builtin.set_fact: test_rule_name: "{{result.response.entities.1.status.name}}" -- set_fact: test_rule_uuid: "{{result.response.entities.1.metadata.uuid}}" ################################################## @@ -111,10 +112,10 @@ ntnx_protection_rules_info: rule_uuid: "{{ test_rule_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -133,10 +134,10 @@ filter: name: "{{ test_rule_name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -154,10 +155,10 @@ length: 1 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -171,13 +172,13 @@ - name: Delete created protection policy ntnx_protection_rules: state: absent - wait: True + wait: true rule_uuid: "{{ rule_1.rule_uuid }}" register: result - name: Delete created protection policy ntnx_protection_rules: state: absent - wait: True + wait: true rule_uuid: "{{ rule_2.rule_uuid }}" register: result diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs/aliases b/tests/integration/targets/ntnx_recovery_plans_and_jobs/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs/aliases +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml index 6a35bc2c2..4669f2ae0 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml @@ -1,173 +1,172 @@ --- -- debug: +- name: Start testing recovery plan and recovery plan jobs + ansible.builtin.debug: msg: Start testing recovery plan and recovery plan jobs -############################################################### CREATE Recovery Plan ########################################################################################### - -- set_fact: - expected_availability_zone_list: [ +################################################## CREATE Recovery Plan ####################################### + +- name: Define variables for recovery plan + ansible.builtin.set_fact: + expected_availability_zone_list: + [ + { "availability_zone_url": "{{dr.primary_az_url}}" }, + { "availability_zone_url": "{{dr.recovery_az_url}}" }, + ] + expected_network_mapping_list_for_check_mode: + [ + { + "are_networks_stretched": true, + "availability_zone_network_mapping_list": + [ + { + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_network": { "name": "{{network.dhcp.name}}" }, + "test_network": { "name": "{{network.dhcp.name}}" }, + }, + { + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_network": { "name": "{{dr.recovery_site_network}}" }, + "test_network": { "name": "{{dr.recovery_site_network}}" }, + }, + ], + }, + ] + expected_network_mapping_list: + [ + { + "are_networks_stretched": false, + "availability_zone_network_mapping_list": + [ { - "availability_zone_url": "{{dr.primary_az_url}}" + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip2}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "recovery_network": + { + "name": "{{network.dhcp.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip1}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "test_network": + { + "name": "{{network.dhcp.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, }, { - "availability_zone_url": "{{dr.recovery_az_url}}" - } - ] - expected_network_mapping_list_for_check_mode: [ - { - "are_networks_stretched": True, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_network": { - "name": "{{network.dhcp.name}}" - }, - "test_network": { - "name": "{{network.dhcp.name}}" - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_network": { - "name": "{{dr.recovery_site_network}}" - }, - "test_network": { - "name": "{{dr.recovery_site_network}}" - } - } - ] - } - ] - expected_network_mapping_list: [ - { - "are_networks_stretched": False, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip2}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "recovery_network": { - "name": "{{network.dhcp.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip1}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "test_network": { - "name": "{{network.dhcp.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip2}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "recovery_network": { - "name": "{{dr.recovery_site_network}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip1}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "test_network": { - "name": "{{dr.recovery_site_network}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - } - } - ] - } - ] - expected_stage_work_0: { - "recover_entities": { - "entity_info_list": [ - { - "any_entity_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - }, - "script_list": [ - { - "enable_script_exec": true - } - ] - } - ] - } - } + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip2}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "recovery_network": + { + "name": "{{dr.recovery_site_network}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip1}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "test_network": + { + "name": "{{dr.recovery_site_network}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + }, + ], + }, + ] + expected_stage_work_0: + { + "recover_entities": + { + "entity_info_list": + [ + { + "any_entity_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + "script_list": [{ "enable_script_exec": true }], + }, + ], + }, + } - name: Create checkmode spec for recovery plan with networks and 2 stage - check_mode: yes + check_mode: true ntnx_recovery_plans: state: "present" name: test-integration-rp @@ -195,9 +194,8 @@ name: "{{dr.recovery_site_network}}" register: result - - name: Checkmode spec assert - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed is false @@ -209,8 +207,8 @@ - result.response.spec.resources.stage_list[0]["stage_work"] == expected_stage_work_0 - result.response.spec.resources.parameters.availability_zone_list == expected_availability_zone_list - result.response.spec.resources.parameters.network_mapping_list == expected_network_mapping_list_for_check_mode - fail_msg: 'Unable to create recovery plan check mode spec' - success_msg: 'Recovery plan check mode spec created successfully' + fail_msg: "Unable to create recovery plan check mode spec" + success_msg: "Recovery plan check mode spec created successfully" - name: Create recovery plan with networks and 2 stage ntnx_recovery_plans: @@ -265,11 +263,12 @@ register: result -- set_fact: +- name: Set plan_uuid + ansible.builtin.set_fact: plan_uuid: "{{result.plan_uuid}}" - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed is true @@ -284,138 +283,128 @@ - result.response.status.resources.stage_list[0]["stage_work"] == expected_stage_work_0 - result.response.status.resources.parameters.availability_zone_list == expected_availability_zone_list - result.response.status.resources.parameters.network_mapping_list == expected_network_mapping_list - fail_msg: 'Unable to create recovery plans' - success_msg: 'Recovery plan created successfully' - -############################################################### Update Recovery Plan ########################################################################################### - -- set_fact: - expected_availability_zone_list: [ + fail_msg: "Unable to create recovery plans" + success_msg: "Recovery plan created successfully" + +############# Update Recovery Plan ########################### + +- name: Define variables for update recovery plan + ansible.builtin.set_fact: + expected_availability_zone_list: + [ + { "availability_zone_url": "{{dr.primary_az_url}}" }, + { "availability_zone_url": "{{dr.recovery_az_url}}" }, + ] + expected_network_mapping_list_in_check_mode: + [ + { + "are_networks_stretched": false, + "availability_zone_network_mapping_list": + [ + { + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + }, + { + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_network": { "name": "{{dr.recovery_site_network}}" }, + "test_network": { "name": "{{dr.recovery_site_network}}" }, + }, + ], + }, + ] + expected_network_mapping_list: + [ + { + "are_networks_stretched": false, + "availability_zone_network_mapping_list": + [ { - "availability_zone_url": "{{dr.primary_az_url}}" + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, }, { - "availability_zone_url": "{{dr.recovery_az_url}}" - } - ] - expected_network_mapping_list_in_check_mode: [ - { - "are_networks_stretched": false, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_network": { - "name": "{{dr.recovery_site_network}}" - }, - "test_network": { - "name": "{{dr.recovery_site_network}}" - } - } - ] - } - ] - expected_network_mapping_list: [ - { - "are_networks_stretched": false, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_network": { - "name": "{{dr.recovery_site_network}}" - }, - "test_network": { - "name": "{{dr.recovery_site_network}}" - } - } - ] - } - ] - exepected_stage_work_0: { - "recover_entities": { - "entity_info_list": [ - { - "any_entity_reference": { - "kind": "vm", - "name": "{{dr_vm.name}}", - "uuid": "{{dr_vm.uuid}}" - }, - "script_list": [ - { - "enable_script_exec": true - } - ] - }, - { - "categories": { - "Environment": "Staging" - }, - "script_list": [ - { - "enable_script_exec": true - } - ] - } - ] - } - } - exepected_stage_work_1: { - "recover_entities": { - "entity_info_list": [ - { - "categories": { - "Environment": "Dev" - } - } - ] - } - } + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_network": { "name": "{{dr.recovery_site_network}}" }, + "test_network": { "name": "{{dr.recovery_site_network}}" }, + }, + ], + }, + ] + exepected_stage_work_0: + { + "recover_entities": + { + "entity_info_list": + [ + { + "any_entity_reference": + { + "kind": "vm", + "name": "{{dr_vm.name}}", + "uuid": "{{dr_vm.uuid}}", + }, + "script_list": [{ "enable_script_exec": true }], + }, + { + "categories": { "Environment": "Staging" }, + "script_list": [{ "enable_script_exec": true }], + }, + ], + }, + } + exepected_stage_work_1: + { + "recover_entities": + { "entity_info_list": [{ "categories": { "Environment": "Dev" } }] }, + } - name: Checkmode spec for Update recovery plan. Update networks and stages. - check_mode: yes + check_mode: true ntnx_recovery_plans: plan_uuid: "{{plan_uuid}}" state: "present" @@ -456,8 +445,8 @@ register: result -- name: check mode spec for Update status - assert: +- name: Check mode spec for Update status + ansible.builtin.assert: that: - result.response is defined - result.changed is false @@ -470,9 +459,8 @@ - result.response.spec.resources.stage_list[1]["stage_work"] == exepected_stage_work_1 - result.response.spec.resources.stage_list[0]["delay_time_secs"] == 2 - fail_msg: 'Unable to create update recovery plan checkmode spec' - success_msg: 'Recovery plan update spec created successfully' - + fail_msg: "Unable to create update recovery plan checkmode spec" + success_msg: "Recovery plan update spec created successfully" - name: Update recovery plan. Add another stage, vm and update networks. ntnx_recovery_plans: @@ -515,7 +503,7 @@ register: recovery_plan - name: Update status - assert: + ansible.builtin.assert: that: - recovery_plan.response is defined - recovery_plan.changed is true @@ -530,9 +518,8 @@ - recovery_plan.response.status.resources.stage_list[1]["stage_work"] == exepected_stage_work_1 - recovery_plan.response.status.resources.stage_list[0]["delay_time_secs"] == 2 - fail_msg: 'Unable to updae recovery plans' - success_msg: 'Recovery plan updated successfully' - + fail_msg: "Unable to updae recovery plans" + success_msg: "Recovery plan updated successfully" - name: Idempotency Check ntnx_recovery_plans: @@ -575,8 +562,8 @@ register: result -- name: idempotency check status - assert: +- name: Idempotency check status + ansible.builtin.assert: that: - result.changed == False - result.failed == False @@ -585,8 +572,7 @@ fail_msg: "Idempotency check failed" success_msg: "Idempotency check passed" -############################################################### Run Recovery Plan Jobs########################################################################################### - +######################################### Run Recovery Plan Jobs############################################ - name: Run Test Failover with validation errors for checking negative scenario. It will fail in validation phase ntnx_recovery_plan_jobs: @@ -605,8 +591,8 @@ register: result -- name: assert job status - assert: +- name: Assert job status + ansible.builtin.assert: that: - result.changed == false - result.failed == true @@ -632,9 +618,8 @@ register: test_failover_job - -- name: assert job status - assert: +- name: Assert job status + ansible.builtin.assert: that: - test_failover_job.changed == true - test_failover_job.failed == false @@ -649,7 +634,6 @@ fail_msg: "Test failover job failed" success_msg: "Test failover job run successfully" - - name: Run Cleanup ntnx_recovery_plan_jobs: job_uuid: "{{test_failover_job.job_uuid}}" @@ -658,9 +642,8 @@ action: CLEANUP register: result - -- name: assert job status - assert: +- name: Assert job status + ansible.builtin.assert: that: - result.changed == true - result.failed == false @@ -672,7 +655,7 @@ fail_msg: "Cleanup job failed" success_msg: "Cleanup job run successfully" -############################################################### Delete Recovery Plan Test########################################################################################### +################################## Delete Recovery Plan Test######################################## - name: Delete recovery plan ntnx_recovery_plans: @@ -680,8 +663,8 @@ state: "absent" register: result -- name: delete Status - assert: +- name: Delete Status + ansible.builtin.assert: that: - result.response is defined - result.changed == True diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/main.yml b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/main.yml index 2369b4b0b..075f510f4 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/main.yml +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "crud.yml" + - name: Import crud.yml + ansible.builtin.import_tasks: "crud.yml" diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/aliases b/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/aliases index 7a68b11da..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/aliases +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/aliases @@ -1 +1 @@ -disabled +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/info.yml b/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/info.yml index 654b50050..f5bd86be6 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/info.yml @@ -1,5 +1,6 @@ -- debug: - msg: start testing ntnx_recovery_plans_info +- name: Start testing ntnx_recovery_plans_info + ansible.builtin.debug: + msg: Start testing ntnx_recovery_plans_info ################################################## - name: Create recovery plan with network @@ -67,10 +68,10 @@ - name: List all Recovery plans ntnx_recovery_plans_info: register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.metadata.total_matches > 0 @@ -78,9 +79,9 @@ fail_msg: "Unable to list all recovery plan" success_msg: "recovery plans info obtained successfully" -- set_fact: +- name: Setting recovery plan name and uuid + ansible.builtin.set_fact: test_plan_name: "{{result.response.entities.1.status.name}}" -- set_fact: test_plan_uuid: "{{result.response.entities.1.metadata.uuid}}" ################################################## @@ -89,10 +90,10 @@ ntnx_recovery_plans_info: plan_uuid: "{{ test_plan_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -111,10 +112,10 @@ filter: name: "{{ test_plan_name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -132,10 +133,10 @@ length: 1 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -146,8 +147,9 @@ ################################################## Recovery plan jobs info test ######################################################### -- debug: - msg: start testing ntnx_recovery_plan_jobs_info +- name: Start testing ntnx_recovery_plan_jobs_info + ansible.builtin.debug: + msg: Start testing ntnx_recovery_plan_jobs_info - name: Run validation job, eventually will fail due to some tech issues but can be used for info tests. ntnx_recovery_plan_jobs: @@ -161,6 +163,7 @@ recovery_site: url: "{{dr.recovery_az_url}}" action: VALIDATE + register: result ignore_errors: true no_log: true @@ -186,10 +189,10 @@ ntnx_recovery_plan_jobs_info: nutanix_host: "{{recovery_site_ip}}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.metadata.total_matches > 0 @@ -197,11 +200,12 @@ fail_msg: "Unable to list all recovery plan jobs" success_msg: "recovery plans jobs info obtained successfully" -- set_fact: +- name: Setting recovery plan job name and uuid + ansible.builtin.set_fact: test_job_name: "{{result.response.entities.1.status.name}}" -- set_fact: test_job_uuid: "{{result.response.entities.1.metadata.uuid}}" -- set_fact: +- name: Setting recovery plan job name + ansible.builtin.set_fact: test_job_name_1: "{{ test_job.error.status.name }}" ################################################## @@ -211,10 +215,10 @@ nutanix_host: "{{recovery_site_ip}}" job_uuid: "{{ test_job_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -233,10 +237,10 @@ filter: name: "{{ test_job_name_1 }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -255,10 +259,10 @@ length: 2 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -267,19 +271,18 @@ fail_msg: "Unable to list recovery plan jobs using length and offset" success_msg: "recovery plan jobs listed successfully using length and offset" - ################################################## Delete recovery plans ######################################################### - name: Delete created recovery plans ntnx_recovery_plans: state: absent - wait: True + wait: true plan_uuid: "{{ recovery_plan1.plan_uuid }}" register: result - name: Delete created recovery plans ntnx_recovery_plans: state: absent - wait: True + wait: true plan_uuid: "{{ recovery_plan2.plan_uuid }}" register: result diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/main.yml b/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/main.yml index 3364b30c6..74e773fce 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import info.yml + ansible.builtin.import_tasks: "info.yml" diff --git a/tests/integration/targets/ntnx_recovery_points_v2/aliases b/tests/integration/targets/ntnx_recovery_points_v2/aliases new file mode 100644 index 000000000..87e7bdaae --- /dev/null +++ b/tests/integration/targets/ntnx_recovery_points_v2/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_recovery_points_v2/meta/main.yml b/tests/integration/targets/ntnx_recovery_points_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_recovery_points_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_recovery_points_v2/tasks/main.yml b/tests/integration/targets/ntnx_recovery_points_v2/tasks/main.yml new file mode 100644 index 000000000..f890c32f1 --- /dev/null +++ b/tests/integration/targets/ntnx_recovery_points_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import recovery_points.yml + ansible.builtin.import_tasks: recovery_points.yml diff --git a/tests/integration/targets/ntnx_recovery_points_v2/tasks/recovery_points.yml b/tests/integration/targets/ntnx_recovery_points_v2/tasks/recovery_points.yml new file mode 100644 index 000000000..431b5f7a5 --- /dev/null +++ b/tests/integration/targets/ntnx_recovery_points_v2/tasks/recovery_points.yml @@ -0,0 +1,1085 @@ +--- +- name: Start recovery points related tests + ansible.builtin.debug: + msg: Start recovery points related tests + +- name: Generate random category key & value + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set recovery point name suffix + ansible.builtin.set_fact: + prefix_name: ansible_test + +- name: Set recovery and VM names and todelete lists + ansible.builtin.set_fact: + vm_todelete: [] + recovery_point_todelete: [] + vg_todelete: [] + recovery_point_name: "{{ prefix_name }}_{{ random_name }}" + vm_name: "{{ prefix_name }}_{{ random_name }}_vm" + vg_name: "{{ prefix_name }}_{{ random_name }}_vg" + +- name: Get current time in ISO-8601 format and set expiration time after a month + ansible.builtin.set_fact: + expiration_time: '{{ lookup(''pipe'', ''date -d "+1 month" +%Y-%m-%dT%H:%M:%S%:z'') }}' + +######################################################################################################## + +- name: Create first VM + ntnx_vms_v2: + name: "{{ vm_name }}_1" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}_1" + - result.response.description == "ansible test" + fail_msg: " Unable to Create first VM " + success_msg: "first VM is created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + vm_todelete: '{{ vm_todelete + [result["ext_id"]] }}' + vm_ext_id_1: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create second VM + ntnx_vms_v2: + name: "{{ vm_name }}_2" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}_2" + - result.response.description == "ansible test" + fail_msg: " Unable to Create second VM " + success_msg: "second VM is created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + vm_todelete: '{{ vm_todelete + [result["ext_id"]] }}' + vm_ext_id_2: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create first Volume group + ntnx_volume_groups_v2: + name: "{{ vg_name }}_1" + description: "Volume group" + cluster_reference: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Verify creation of VG + ansible.builtin.assert: + that: + - result.error == None + - result.ext_id is defined + - result.task_ext_id is defined + - result.response is defined + - result.changed == true + - result.ext_id == result.response.ext_id + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.name == "{{ vg_name }}_1" + - result.response.description == "Volume group" + - result.response.should_load_balance_vm_attachments == false + - result.response.sharing_status == None + fail_msg: "Unable to create VG" + success_msg: "VG created successfully" + +- name: Adding VG to todelete list + ansible.builtin.set_fact: + vg_todelete: '{{ vg_todelete + [result["ext_id"]] }}' + vg_ext_id_1: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create second Volume group + ntnx_volume_groups_v2: + name: "{{ vg_name }}_2" + description: "Volume group" + cluster_reference: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Verify creation of VG + ansible.builtin.assert: + that: + - result.error == None + - result.ext_id is defined + - result.task_ext_id is defined + - result.response is defined + - result.changed == true + - result.ext_id == result.response.ext_id + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.name == "{{ vg_name }}_2" + - result.response.description == "Volume group" + - result.response.should_load_balance_vm_attachments == false + - result.response.sharing_status == None + fail_msg: "Unable to create VG" + success_msg: "VG created successfully" + +- name: Adding VG to todelete list + ansible.builtin.set_fact: + vg_todelete: '{{ vg_todelete + [result["ext_id"]] }}' + vg_ext_id_2: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create VM recovery point with check mode enabled + ntnx_recovery_points_v2: + name: "Recovery Point" + expiration_time: "2024-09-30T14:15:22+00:00" + recovery_point_type: "APPLICATION_CONSISTENT" + vm_recovery_points: + - vm_ext_id: 15a06f9e-1371-4486-99e5-0d27855ee7ba + application_consistent_properties: + application_consistent_properties_spec: + backup_type: "FULL_BACKUP" + should_include_writers: true + writers: + - 0f95b402-67aa-431c-9eab-bf0907a99345 + - 87e74c5f-2a51-48f5-a74a-ab121923f6d8 + should_store_vss_metadata: true + volume_group_recovery_points: + - volume_group_ext_id: 6aeec7b5-6ab6-4eb6-acf9-cf1e8b14a0b8 + check_mode: true + register: result + ignore_errors: true + +- name: Create VM recovery point with check mode enabled status + ansible.builtin.assert: + that: + - result.changed == false + - result.response is defined + - result.response.name == "Recovery Point" + - result.response.expiration_time == "2024-09-30T14:15:22+00:00" + - result.response.recovery_point_type == "APPLICATION_CONSISTENT" + - result.response.vm_recovery_points[0].vm_ext_id == "15a06f9e-1371-4486-99e5-0d27855ee7ba" + - result.response.vm_recovery_points[0].application_consistent_properties.backup_type == "FULL_BACKUP" + - result.response.vm_recovery_points[0].application_consistent_properties.should_include_writers == true + - > + result.response.vm_recovery_points[0].application_consistent_properties.writers == + ["0f95b402-67aa-431c-9eab-bf0907a99345", "87e74c5f-2a51-48f5-a74a-ab121923f6d8"] + - result.response.vm_recovery_points[0].application_consistent_properties.should_store_vss_metadata == true + - result.response.volume_group_recovery_points[0].volume_group_ext_id == "6aeec7b5-6ab6-4eb6-acf9-cf1e8b14a0b8" + fail_msg: "Unable to create VM recovery point with check mode enabled" + success_msg: "VM recovery point created successfully with check mode enabled" + +######################################################################################################## + +- name: Create VM recovery point + ntnx_recovery_points_v2: + name: "{{ recovery_point_name }}_1" + expiration_time: "{{ expiration_time }}" + recovery_point_type: "CRASH_CONSISTENT" + vm_recovery_points: + - vm_ext_id: "{{ vm_ext_id_1 }}" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Create VM recovery point status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.response is defined + - result.response.ext_id == result.ext_id + - result.response.name == "{{ recovery_point_name }}_1" + - actual_expiration_time == expected_expiration_time + - result.response.status == "COMPLETE" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.vm_recovery_points[0].vm_ext_id == "{{ vm_ext_id_1 }}" + fail_msg: "Unable to create VM recovery point" + success_msg: "VM recovery point created successfully" + +- name: Add recovery point to todelete list + ansible.builtin.set_fact: + recovery_point_todelete: '{{ recovery_point_todelete + [result["ext_id"]] }}' + recovery_point_ext_id_1: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create VM recovery point with multiple VMs + ntnx_recovery_points_v2: + name: "{{ recovery_point_name }}_2" + expiration_time: "{{ expiration_time }}" + recovery_point_type: "CRASH_CONSISTENT" + vm_recovery_points: + - vm_ext_id: "{{ vm_ext_id_1 }}" + - vm_ext_id: "{{ vm_ext_id_2 }}" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Create VM recovery point with multiple VMs status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.response is defined + - result.response.ext_id == result.ext_id + - result.response.name == "{{ recovery_point_name }}_2" + - actual_expiration_time == expected_expiration_time + - result.response.status == "COMPLETE" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.vm_recovery_points[0].vm_ext_id in "{{ vm_todelete[0:2] }}" + - result.response.vm_recovery_points[1].vm_ext_id in "{{ vm_todelete[0:2] }}" + fail_msg: "Unable to create VM recovery point with multiple VMs" + success_msg: "VM recovery point created successfully with multiple VMs" + +- name: Add recovery point to todelete list + ansible.builtin.set_fact: + recovery_point_todelete: '{{ recovery_point_todelete + [result["ext_id"]] }}' + recovery_point_ext_id_2: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create VG recovery point + ntnx_recovery_points_v2: + name: "{{ recovery_point_name }}_3" + expiration_time: "{{ expiration_time }}" + recovery_point_type: "CRASH_CONSISTENT" + volume_group_recovery_points: + - volume_group_ext_id: "{{ vg_ext_id_1 }}" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Create VG recovery point status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.response is defined + - result.response.ext_id == result.ext_id + - result.response.name == "{{ recovery_point_name }}_3" + - actual_expiration_time == expected_expiration_time + - result.response.status == "COMPLETE" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.volume_group_recovery_points[0].volume_group_ext_id == "{{ vg_ext_id_1 }}" + fail_msg: "Unable to create VG recovery point" + success_msg: "VG recovery point created successfully" + +- name: Add recovery point to todelete list + ansible.builtin.set_fact: + recovery_point_todelete: '{{ recovery_point_todelete + [result["ext_id"]] }}' + recovery_point_ext_id_3: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create VG recovery point with multiple VGs + ntnx_recovery_points_v2: + name: "{{ recovery_point_name }}_4" + expiration_time: "{{ expiration_time }}" + recovery_point_type: "CRASH_CONSISTENT" + volume_group_recovery_points: + - volume_group_ext_id: "{{ vg_ext_id_1 }}" + - volume_group_ext_id: "{{ vg_ext_id_2 }}" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Create VG recovery point with multiple VGs status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.response is defined + - result.response.ext_id == result.ext_id + - result.response.name == "{{ recovery_point_name }}_4" + - actual_expiration_time == expected_expiration_time + - result.response.status == "COMPLETE" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.volume_group_recovery_points[0].volume_group_ext_id in "{{ vg_todelete[0:2] }}" + - result.response.volume_group_recovery_points[1].volume_group_ext_id in "{{ vg_todelete[0:2] }}" + fail_msg: "Unable to create VG recovery point with multiple VGs" + success_msg: "VG recovery point created successfully with multiple VGs" + +- name: Add recovery point to todelete list + ansible.builtin.set_fact: + recovery_point_todelete: '{{ recovery_point_todelete + [result["ext_id"]] }}' + recovery_point_ext_id_4: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Create recovery point with multiple VMs and VGs + ntnx_recovery_points_v2: + name: "{{ recovery_point_name }}_5" + expiration_time: "{{ expiration_time }}" + recovery_point_type: "CRASH_CONSISTENT" + vm_recovery_points: + - vm_ext_id: "{{ vm_ext_id_1 }}" + - vm_ext_id: "{{ vm_ext_id_2 }}" + volume_group_recovery_points: + - volume_group_ext_id: "{{ vg_ext_id_1 }}" + - volume_group_ext_id: "{{ vg_ext_id_2 }}" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Create recovery point with multiple VMs and VGs status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id is defined + - result.response is defined + - result.response.ext_id == result.ext_id + - result.response.name == "{{ recovery_point_name }}_5" + - actual_expiration_time == expected_expiration_time + - result.response.status == "COMPLETE" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.vm_recovery_points[0].vm_ext_id in "{{ vm_todelete[0:2] }}" + - result.response.vm_recovery_points[1].vm_ext_id in "{{ vm_todelete[0:2] }}" + - result.response.volume_group_recovery_points[0].volume_group_ext_id in "{{ vg_todelete[0:2] }}" + - result.response.volume_group_recovery_points[1].volume_group_ext_id in "{{ vg_todelete[0:2] }}" + - result.task_ext_id is defined + fail_msg: "Unable to create recovery point with multiple VMs and VGs" + success_msg: "Recovery point created successfully with multiple VMs and VGs" + +- name: Add recovery point to todelete list + ansible.builtin.set_fact: + recovery_point_todelete: '{{ recovery_point_todelete + [result["ext_id"]] }}' + recovery_point_ext_id_5: '{{ result["ext_id"] }}' + +######################################################################################################## + +- name: Get current time in ISO-8601 format and set updated expiration time after two months + ansible.builtin.set_fact: + expiration_time_updated: '{{ lookup(''pipe'', ''date -d "+2 month" +%Y-%m-%dT%H:%M:%S%:z'') }}' + +- name: Update recovery point expiration time with check mode enabled + ntnx_recovery_points_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + expiration_time: "2024-09-30T14:15:22+00:00" + check_mode: true + register: result + ignore_errors: true + +- name: Update recovery point expiration time with check mode enabled status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response.expiration_time == "2024-09-30T14:15:22+00:00" + fail_msg: "Unable to update recovery point expiration time with check mode enabled" + success_msg: "Recovery point expiration time updated successfully with check mode enabled" + +######################################################################################################## + +- name: Update recovery point expiration time + ntnx_recovery_points_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + expiration_time: "{{ expiration_time_updated }}" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time_updated ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Update recovery point expiration time status + ansible.builtin.assert: + that: + - result.changed == true + - result.ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_1 }}" + - actual_expiration_time == expected_expiration_time + fail_msg: "Unable to update recovery point expiration time" + success_msg: "Recovery point expiration time updated successfully" + +####################################################################################################### + +- name: Check idempotency by updating recovery point expiration time with the same value + ntnx_recovery_points_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + expiration_time: "{{ expiration_time_updated }}" + register: result + ignore_errors: true + +- name: Check idempotency by updating recovery point expiration time with the same value status + ansible.builtin.assert: + that: + - result.skipped == true + - result.response is defined + - result.msg == "Nothing to change." + fail_msg: "Idempotency check failed" + success_msg: "Idempotency check passed" + +####################################################################################################### + +- name: Get current time in ISO-8601 format and set updated expiration time after three months + ansible.builtin.set_fact: + expiration_time_updated: '{{ lookup(''pipe'', ''date -d "+3 month" +%Y-%m-%dT%H:%M:%S%:z'') }}' + +- name: Check if expiration time is updated but name is same + ntnx_recovery_points_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + expiration_time: "{{ expiration_time_updated }}" + name: "{{ recovery_point_name }}_updated" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time_updated ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Check if expiration time is updated but name is same status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response.ext_id == "{{ recovery_point_ext_id_1 }}" + - actual_expiration_time == expected_expiration_time + - result.response.name == "{{ recovery_point_name }}_1" + - result.warning == "Only Expiration time Updation is allowed. Can't update other fields." + fail_msg: "Check if expiration time is updated but name is same failed" + success_msg: "Check if expiration time is updated but name is same passed" + +####################################################################################################### + +- name: Check for name is changed but expiration time is same + ntnx_recovery_points_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + expiration_time: "{{ expiration_time_updated }}" + name: "{{ recovery_point_name }}_updated" + register: result + ignore_errors: true + +- name: Check for name is changed but expiration time is same status + ansible.builtin.assert: + that: + - result.changed == false + - result.msg == "Update of other operations is not supported. Only updation of Expiration time is allowed." + fail_msg: "Updated name but expiration time is same" + success_msg: "Didn't update name as expiration time is same" + +####################################################################################################### + +- name: Check if Expiration time is not passed for update + ntnx_recovery_points_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + register: result + ignore_errors: true + +- name: Check if Expiration time is not passed for update status + ansible.builtin.assert: + that: + - result.changed == false + - result.error == "Expiration time is required for updating recovery point and other fields can't be updated." + - result.msg == "Expiration time is required" + fail_msg: "Expiration time is not passed for update test failed" + success_msg: "Expiration time is not passed for update test passed" + +####################################################################################################### + +- name: Get recovery point details and set recovery point restore external ID for one VM + ntnx_recovery_points_info_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + register: result + ignore_errors: true + +- name: Get recovery point details status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response.name == "{{ recovery_point_name }}_1" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.vm_recovery_points[0].vm_ext_id == "{{ vm_ext_id_1 }}" + - result.response.vm_recovery_points[0].ext_id is defined + fail_msg: "Unable to get recovery point details" + success_msg: "Recovery point details fetched successfully" + +- name: Set recovery point restore external ID + ansible.builtin.set_fact: + vm_restore_ext_id: "{{ result.response.vm_recovery_points[0].ext_id }}" + +- name: Restore recovery point for one VM + ntnx_recovery_point_restore_v2: + ext_id: "{{ recovery_point_ext_id_1 }}" + cluster_ext_id: "{{ cluster.uuid }}" + vm_recovery_point_restore_overrides: + - vm_recovery_point_ext_id: "{{ vm_restore_ext_id }}" + register: result + ignore_errors: true + +- name: Add Newly created VM to todelete list + ansible.builtin.set_fact: + vm_todelete: '{{ vm_todelete + [result["vms_ext_ids"]] }}' + +- name: Restore recovery point for one VM status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response.completion_details | length == 1 + - result.response.entities_affected | length == 1 + - result.response.entities_affected[0].ext_id == "{{ recovery_point_ext_id_1 }}" + - result.response.status == "SUCCEEDED" + - result.task_ext_id is defined + fail_msg: "Unable to restore VM recovery point for one VM" + success_msg: "recovery point for one VM is restored successfully" + +######################################################################################################## +# Testing the restore module for multiple VMs +- name: Get recovery point details and set recovery point restore external ID for multiple VMs + ntnx_recovery_points_info_v2: + ext_id: "{{ recovery_point_ext_id_2 }}" + register: result + ignore_errors: true + +- name: Get recovery point details status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "{{ recovery_point_ext_id_2 }}" + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_2 }}" + - result.response.name == "{{ recovery_point_name }}_2" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.vm_recovery_points[0].vm_ext_id in "{{ vm_todelete[0:2] }}" + - result.response.vm_recovery_points[1].vm_ext_id in "{{ vm_todelete[0:2] }}" + - result.response.vm_recovery_points[0].ext_id is defined + - result.response.vm_recovery_points[1].ext_id is defined + fail_msg: "Unable to get recovery point details" + success_msg: "Recovery point details fetched successfully" + +- name: Set recovery point restore external ID + ansible.builtin.set_fact: + vm_restore_ext_id_1: "{{ result.response.vm_recovery_points[0].ext_id }}" + vm_restore_ext_id_2: "{{ result.response.vm_recovery_points[1].ext_id }}" + +- name: Restore recovery point for multiple VMs + ntnx_recovery_point_restore_v2: + ext_id: "{{ recovery_point_ext_id_2 }}" + cluster_ext_id: "{{ cluster.uuid }}" + vm_recovery_point_restore_overrides: + - vm_recovery_point_ext_id: "{{ vm_restore_ext_id_1 }}" + - vm_recovery_point_ext_id: "{{ vm_restore_ext_id_2 }}" + register: result + ignore_errors: true + +- name: Define new VMs ext ids + ansible.builtin.set_fact: + vms_ext_ids: "{{ result.vms_ext_ids.split(',') }}" + +- name: Add Newly created VMs to todelete list + ansible.builtin.set_fact: + vm_todelete: "{{ vm_todelete + vms_ext_ids }}" + +- name: Restore recovery point for multiple VMs status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.ext_id == "{{ recovery_point_ext_id_2 }}" + - result.vms_ext_ids.split(',') | length == 2 + - result.response.status == "SUCCEEDED" + - result.task_ext_id is defined + fail_msg: "Unable to restore VM recovery point for multiple VMs" + success_msg: "recovery point for multiple VMs is restored successfully" + +######################################################################################################## + +- name: Get recovery point details and set recovery point restore external ID for one VG + ntnx_recovery_points_info_v2: + ext_id: "{{ recovery_point_ext_id_3 }}" + register: result + ignore_errors: true + +- name: Get recovery point details status + + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "{{ recovery_point_ext_id_3 }}" + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_3 }}" + - result.response.name == "{{ recovery_point_name }}_3" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.volume_group_recovery_points[0].volume_group_ext_id == "{{ vg_ext_id_1 }}" + - result.response.volume_group_recovery_points[0].ext_id is defined + fail_msg: "Unable to get recovery point details" + success_msg: "Recovery point details fetched successfully" + +- name: Set recovery point restore external ID + ansible.builtin.set_fact: + vg_restore_ext_id: "{{ result.response.volume_group_recovery_points[0].ext_id }}" + +- name: Restore recovery point for one VG + ntnx_recovery_point_restore_v2: + ext_id: "{{ recovery_point_ext_id_3 }}" + cluster_ext_id: "{{ cluster.uuid }}" + volume_group_recovery_point_restore_overrides: + - volume_group_recovery_point_ext_id: "{{ vg_restore_ext_id }}" + register: result + ignore_errors: true + +- name: Add Newly created VG to todelete list + ansible.builtin.set_fact: + vg_todelete: '{{ vg_todelete + [result["vgs_ext_ids"]] }}' + +- name: Restore recovery point for one VG status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.ext_id == "{{ recovery_point_ext_id_3 }}" + - result.response.completion_details | length == 1 + - result.response.entities_affected | length == 1 + - result.response.entities_affected[0].ext_id == "{{ recovery_point_ext_id_3 }}" + - result.response.status == "SUCCEEDED" + - result.task_ext_id is defined + fail_msg: "Unable to restore VG recovery point for one VG" + success_msg: "recovery point for one VG is restored successfully" + +######################################################################################################## + +- name: Get recovery point details and set recovery point restore external ID for multiple VGs + ntnx_recovery_points_info_v2: + ext_id: "{{ recovery_point_ext_id_4 }}" + register: result + ignore_errors: true + +- name: Get recovery point details status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "{{ recovery_point_ext_id_4 }}" + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_4 }}" + - result.response.name == "{{ recovery_point_name }}_4" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.volume_group_recovery_points[0].volume_group_ext_id in "{{ vg_todelete[0:2] }}" + - result.response.volume_group_recovery_points[1].volume_group_ext_id in "{{ vg_todelete[0:2] }}" + - result.response.volume_group_recovery_points[0].ext_id is defined + - result.response.volume_group_recovery_points[1].ext_id is defined + fail_msg: "Unable to get recovery point details" + success_msg: "Recovery point details fetched successfully" + +- name: Set recovery point restore external ID + ansible.builtin.set_fact: + vg_restore_ext_id_1: "{{ result.response.volume_group_recovery_points[0].ext_id }}" + vg_restore_ext_id_2: "{{ result.response.volume_group_recovery_points[1].ext_id }}" + +- name: Restore recovery point for multiple VGs + ntnx_recovery_point_restore_v2: + ext_id: "{{ recovery_point_ext_id_4 }}" + cluster_ext_id: "{{ cluster.uuid }}" + volume_group_recovery_point_restore_overrides: + - volume_group_recovery_point_ext_id: "{{ vg_restore_ext_id_1 }}" + - volume_group_recovery_point_ext_id: "{{ vg_restore_ext_id_2 }}" + register: result + ignore_errors: true + +- name: Define new VGs ext ids + ansible.builtin.set_fact: + vgs_ext_ids: "{{ result.vgs_ext_ids.split(',') }}" + +- name: Add Newly created VGs to todelete list + ansible.builtin.set_fact: + vg_todelete: "{{ vg_todelete + vgs_ext_ids }}" + +- name: Restore recovery point for multiple VGs status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.ext_id == "{{ recovery_point_ext_id_4 }}" + - result.response.completion_details[0].value.split(',') | length == 2 + - result.response.entities_affected | length == 1 + - result.response.entities_affected[0].ext_id == "{{ recovery_point_ext_id_4 }}" + - result.response.status == "SUCCEEDED" + - result.task_ext_id is defined + fail_msg: "Unable to restore VG recovery point for multiple VGs" + success_msg: "recovery point for multiple VGs is restored successfully" + +######################################################################################################## + +- name: Get recovery point details and set recovery point restore external ID for multiple VMs and VGs + ntnx_recovery_points_info_v2: + ext_id: "{{ recovery_point_ext_id_5 }}" + register: result + ignore_errors: true + +- name: Get recovery point details status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id == "{{ recovery_point_ext_id_5 }}" + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_5 }}" + - result.response.name == "{{ recovery_point_name }}_5" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.vm_recovery_points | length == 2 + - result.response.volume_group_recovery_points | length == 2 + - result.response.vm_recovery_points[0].vm_ext_id in "{{ vm_todelete }}" + - result.response.vm_recovery_points[1].vm_ext_id in "{{ vm_todelete }}" + - result.response.volume_group_recovery_points[0].volume_group_ext_id in "{{ vg_todelete }}" + - result.response.volume_group_recovery_points[1].volume_group_ext_id in "{{ vg_todelete }}" + - result.response.vm_recovery_points[0].ext_id is defined + - result.response.vm_recovery_points[1].ext_id is defined + - result.response.volume_group_recovery_points[0].ext_id is defined + - result.response.volume_group_recovery_points[1].ext_id is defined + fail_msg: "Unable to get recovery point details" + success_msg: "Recovery point details fetched successfully" + +- name: Set recovery point restore external ID + ansible.builtin.set_fact: + vm_restore_ext_id_1: "{{ result.response.vm_recovery_points[0].ext_id }}" + vm_restore_ext_id_2: "{{ result.response.vm_recovery_points[1].ext_id }}" + vg_restore_ext_id_1: "{{ result.response.volume_group_recovery_points[0].ext_id }}" + vg_restore_ext_id_2: "{{ result.response.volume_group_recovery_points[1].ext_id }}" + +- name: Restore recovery point for multiple VMs and VGs + ntnx_recovery_point_restore_v2: + ext_id: "{{ recovery_point_ext_id_5 }}" + cluster_ext_id: "{{ cluster.uuid }}" + vm_recovery_point_restore_overrides: + - vm_recovery_point_ext_id: "{{ vm_restore_ext_id_1 }}" + - vm_recovery_point_ext_id: "{{ vm_restore_ext_id_2 }}" + volume_group_recovery_point_restore_overrides: + - volume_group_recovery_point_ext_id: "{{ vg_restore_ext_id_1 }}" + - volume_group_recovery_point_ext_id: "{{ vg_restore_ext_id_2 }}" + register: result + ignore_errors: true + +- name: Define new VMs ext ids + ansible.builtin.set_fact: + vms_ext_ids: "{{ result.vms_ext_ids.split(',') }}" + +- name: Define new VGs ext ids + ansible.builtin.set_fact: + vgs_ext_ids: "{{ result.vgs_ext_ids.split(',') }}" + +- name: Add Newly created VMs to todelete list + ansible.builtin.set_fact: + vm_todelete: "{{ vm_todelete + vms_ext_ids }}" + +- name: Add Newly created VGs to todelete list + ansible.builtin.set_fact: + vg_todelete: "{{ vg_todelete + vgs_ext_ids }}" + +- name: Restore recovery point for multiple VMs and VGs status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.ext_id == "{{ recovery_point_ext_id_5 }}" + - result.response.completion_details | length == 2 + - result.response.entities_affected | length == 1 + - result.response.entities_affected[0].ext_id == "{{ recovery_point_ext_id_5 }}" + - result.response.status == "SUCCEEDED" + - result.task_ext_id is defined + fail_msg: "Unable to restore recovery point for multiple VMs and VGs" + success_msg: "recovery point for multiple VMs and VGs is restored successfully" + +######################################################################################################## + +- name: Replicate Recovery point + ntnx_recovery_point_replicate_v2: + ext_id: "{{ recovery_point_ext_id_5 }}" + pc_ext_id: "{{ pc_uuid }}" + cluster_ext_id: "{{ cluster_availability_zone.uuid }}" + register: result + ignore_errors: true + +- name: Replicate Recovery point status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.response.cluster_ext_ids[0] == "{{ cluster.uuid }}" + - result.response.entities_affected | length >= 2 + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to replicate recovery point" + success_msg: "Recovery point replicated successfully" + +######################################################################################################## + +- name: Get recovery point details + ntnx_recovery_points_info_v2: + ext_id: "{{ recovery_point_ext_id_5 }}" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response.expiration_time ~ ' +%s') | int }}" + +- name: Get recovery point details status + ansible.builtin.assert: + that: + - result.changed == false + - result.response is defined + - result.response.ext_id == "{{ recovery_point_ext_id_5 }}" + - result.response.name == "{{ recovery_point_name }}_5" + - actual_expiration_time == expected_expiration_time + - result.response.status == "COMPLETE" + - result.response.recovery_point_type == "CRASH_CONSISTENT" + - result.response.vm_recovery_points | length == 2 + - result.response.volume_group_recovery_points | length == 2 + - result.response.vm_recovery_points[0].vm_ext_id in "{{ vm_todelete }}" + - result.response.vm_recovery_points[1].vm_ext_id in "{{ vm_todelete }}" + - result.response.volume_group_recovery_points[0].volume_group_ext_id in "{{ vg_todelete }}" + - result.response.volume_group_recovery_points[1].volume_group_ext_id in "{{ vg_todelete }}" + fail_msg: "Unable to get recovery point details" + success_msg: "Recovery point details fetched successfully" + +######################################################################################################## + +- name: Get a VM recovery point details + ntnx_vm_recovery_point_info_v2: + recovery_point_ext_id: "{{ recovery_point_ext_id_5 }}" + vm_recovery_point_ext_id: "{{ result.response.vm_recovery_points[0].ext_id }}" + register: result + ignore_errors: true + +- name: Get a VM recovery point details status + ansible.builtin.assert: + that: + - result.changed == false + - result.ext_id is defined + - result.ext_id == "{{ vm_restore_ext_id_1 }}" + - result.response is defined + - result.recovery_point_ext_id == "{{ recovery_point_ext_id_5 }}" + - result.response.ext_id == "{{ vm_restore_ext_id_1 }}" + - result.response.vm_ext_id == "{{ vm_ext_id_1 }}" + fail_msg: "Unable to get VM recovery point details" + success_msg: "VM recovery point details fetched successfully" + +######################################################################################################## + +- name: Revert a VM recovery point + ntnx_vm_revert_v2: + ext_id: "{{ vm_ext_id_1 }}" + vm_recovery_point_ext_id: "{{ vm_restore_ext_id }}" + register: result + ignore_errors: true + +- name: Revert a VM recovery point status + ansible.builtin.assert: + that: + - result.changed == true + - result.response is defined + - result.response.ext_id is defined + - result.response.completion_details | length == 1 + - result.response.entities_affected | length == 2 + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to revert VM recovery point" + success_msg: "VM recovery point reverted successfully" + +######################################################################################################## + +- name: Get recovery point using wrong external ID + ntnx_recovery_points_info_v2: + ext_id: "0dc3fd69-dc3c-4812-a8d3-9a77a20a0981" + register: result + ignore_errors: true + +- name: Get recovery point using wrong external ID status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == true + - result.response is defined + - result.response.data.error | length > 0 + - result.status == 404 + fail_msg: "Get recovery point using wrong external ID failed" + success_msg: "Get recovery point using wrong external ID passed" + +######################################################################################################## + +- name: List all recovery points + ntnx_recovery_points_info_v2: + register: result + ignore_errors: true + +- name: List all recovery points status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response is defined + - result.response | length > 0 + fail_msg: "List all recovery points failed" + success_msg: "List all recovery points passed" + +######################################################################################################## + +- name: List all recovery points with filter + ntnx_recovery_points_info_v2: + filter: "name eq '{{ recovery_point_name }}_1'" + register: result + ignore_errors: true + +- name: Set actual and expected expiration time in seconds + ansible.builtin.set_fact: + actual_expiration_time: "{{ lookup('pipe', 'date -d ' ~ expiration_time_updated ~ ' +%s') | int }}" + expected_expiration_time: "{{ lookup('pipe', 'date -d ' ~ result.response[0].expiration_time ~ ' +%s') | int }}" + +- name: List all recovery points with filter status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response is defined + - result.response | length == 1 + - result.response[0].name == "{{ recovery_point_name }}_1" + - actual_expiration_time == expected_expiration_time + fail_msg: "List all recovery points with filter failed" + success_msg: "List all recovery points with filter passed" + +######################################################################################################## + +- name: List all recovery points with limit + ntnx_recovery_points_info_v2: + limit: 2 + register: result + ignore_errors: true + +- name: List all recovery points with limit status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response is defined + - result.response | length == 2 + fail_msg: "List all recovery points with limit failed" + success_msg: "List all recovery points with limit passed" + +######################################################################################################## + +- name: Delete all Created recovery points + ntnx_recovery_points_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ recovery_point_todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + - item.response is defined + - item.changed == True + - item.failed == False + - item.ext_id == "{{ recovery_point_todelete[recovery_point_index] }}" + fail_msg: "Unable to delete recovery point " + success_msg: "Recovery point is deleted successfully " + loop: "{{ result.results }}" + loop_control: + index_var: recovery_point_index + +######################################################################################################## + +- name: Delete all Created Volume Groups + ntnx_volume_groups_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ vg_todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + - item.response is defined + - item.changed == True + - item.failed == False + - item.ext_id == "{{ vg_todelete[vgs_index] }}" + fail_msg: "Unable to delete Volume Group " + success_msg: "Volume Group is deleted successfully " + loop: "{{ result.results }}" + loop_control: + index_var: vgs_index + +######################################################################################################## + +- name: Delete all Created VMs + ntnx_vms_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ vm_todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + - item.response is defined + - item.changed == True + - item.failed == False + - item.ext_id == "{{ vm_todelete[vms_index] }}" + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " + loop: "{{ result.results }}" + loop_control: + index_var: vms_index + +- name: Reset to delete lists + ansible.builtin.set_fact: + vm_todelete: [] + vg_todelete: [] + recovery_point_todelete: [] diff --git a/tests/integration/targets/ntnx_roles/tasks/create.yml b/tests/integration/targets/ntnx_roles/tasks/create.yml index 541965519..767d48b6f 100644 --- a/tests/integration/targets/ntnx_roles/tasks/create.yml +++ b/tests/integration/targets/ntnx_roles/tasks/create.yml @@ -1,13 +1,15 @@ --- -- debug: - msg: start ntnx_roles create tests +- name: Start ntnx_roles create tests + ansible.builtin.debug: + msg: Start ntnx_roles create tests - name: Get Some permissions for test ntnx_permissions_info: length: 3 register: result -- set_fact: +- name: Setting permission variables + ansible.builtin.set_fact: test_permission_1_name: "{{ result.response.entities[0].status.name }}" test_permission_1_uuid: "{{ result.response.entities[0].metadata.uuid }}" test_permission_2_name: "{{ result.response.entities[1].status.name }}" @@ -28,25 +30,30 @@ wait: true register: result -- set_fact: +- name: Set permission variables + ansible.builtin.set_fact: p1: "{{ result.response.status.resources.permission_reference_list[0].uuid }}" p2: "{{ result.response.status.resources.permission_reference_list[1].uuid }}" - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.role_uuid is defined - result.response.status.state == 'COMPLETE' - - result.changed == True + - result.changed == true - result.response.status.name == "test-ansible-role-1" - result.response.status.description == "test-ansible-role-1-desc" - - ("{{ p1 }}" == "{{ test_permission_1_uuid }}" and "{{ p2 }}" == "{{ test_permission_2_uuid }}") or ("{{ p2 }}" == "{{ test_permission_1_uuid }}" and "{{ p1 }}" == "{{ test_permission_2_uuid }}") - + - > + ( + ("{{ p1 }}" == "{{ test_permission_1_uuid }}" and "{{ p2 }}" == "{{ test_permission_2_uuid }}") or + ("{{ p2 }}" == "{{ test_permission_1_uuid }}" and "{{ p1 }}" == "{{ test_permission_2_uuid }}") + ) fail_msg: "Unable to create roles with certain permissions" success_msg: "Roles with given permissions created susccessfully" -- set_fact: +- name: Set role uuid to delete later + ansible.builtin.set_fact: todelete: '{{ result["response"]["metadata"]["uuid"] }}' ################################################################################################### @@ -60,10 +67,10 @@ - uuid: "{{ test_permission_2_uuid }}" wait: true register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == "Role with given name already exists" - result.changed == False @@ -73,7 +80,7 @@ ################################################################################################### - name: Check mode test - check_mode: yes + check_mode: true ntnx_roles: state: present name: test-ansible-role-2 @@ -85,7 +92,7 @@ register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == False @@ -99,14 +106,13 @@ ################################################################################################### - -- name: cleanup created entities +- name: Cleanup created entities ntnx_roles: state: absent role_uuid: "{{ todelete }}" register: result - ignore_errors: True - + ignore_errors: true -- set_fact: +- name: Reset todelete variable + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_roles/tasks/delete.yml b/tests/integration/targets/ntnx_roles/tasks/delete.yml index 3d4f00410..939cabfa1 100644 --- a/tests/integration/targets/ntnx_roles/tasks/delete.yml +++ b/tests/integration/targets/ntnx_roles/tasks/delete.yml @@ -1,13 +1,15 @@ --- -- debug: - msg: start ntnx_roles delete tests +- name: Start ntnx_roles delete tests + ansible.builtin.debug: + msg: Start ntnx_roles delete tests - name: Get Some permissions for test ntnx_permissions_info: length: 3 register: result -- set_fact: +- name: Setting permission variables + ansible.builtin.set_fact: test_permission_1_uuid: "{{ result.response.entities[0].metadata.uuid }}" ############################################################################################## @@ -23,7 +25,7 @@ register: test_role - name: Creation Status - assert: + ansible.builtin.assert: that: - test_role.response is defined - test_role.changed == True @@ -32,14 +34,14 @@ ################################################################################################### -- name: delete role +- name: Delete role ntnx_roles: state: absent role_uuid: "{{ test_role.role_uuid }}" register: result -- name: delete Status - assert: +- name: Delete Status + ansible.builtin.assert: that: - result.response is defined - result.changed == True diff --git a/tests/integration/targets/ntnx_roles/tasks/main.yml b/tests/integration/targets/ntnx_roles/tasks/main.yml index a2c7a07b0..65a6e2952 100644 --- a/tests/integration/targets/ntnx_roles/tasks/main.yml +++ b/tests/integration/targets/ntnx_roles/tasks/main.yml @@ -1,11 +1,15 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "update.yml" - - import_tasks: "delete.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" + - name: Import update.yml + ansible.builtin.import_tasks: "update.yml" + - name: Import delete.yml + ansible.builtin.import_tasks: "delete.yml" diff --git a/tests/integration/targets/ntnx_roles/tasks/update.yml b/tests/integration/targets/ntnx_roles/tasks/update.yml index 16644d37e..e16d5a3e5 100644 --- a/tests/integration/targets/ntnx_roles/tasks/update.yml +++ b/tests/integration/targets/ntnx_roles/tasks/update.yml @@ -1,13 +1,15 @@ --- -- debug: - msg: start ntnx_roles update tests +- name: Start ntnx_roles update tests + ansible.builtin.debug: + msg: Start ntnx_roles update tests - name: Get Some permissions for test ntnx_permissions_info: length: 3 register: result -- set_fact: +- name: Setting permission variables + ansible.builtin.set_fact: test_permission_1_name: "{{ result.response.entities[0].status.name }}" test_permission_1_uuid: "{{ result.response.entities[0].metadata.uuid }}" test_permission_2_name: "{{ result.response.entities[1].status.name }}" @@ -29,14 +31,13 @@ register: test_role - name: Creation Status - assert: + ansible.builtin.assert: that: - test_role.response is defined - - test_role.changed == True + - test_role.changed == true fail_msg: "Unable to create roles with certain permissions" success_msg: "Roles with given permissions created susccessfully" - ################################################################################################### - name: Update all fields @@ -51,12 +52,12 @@ register: result - name: Update status - assert: + ansible.builtin.assert: that: - result.response is defined - result.role_uuid is defined - result.response.status.state == 'COMPLETE' - - result.changed == True + - result.changed == true - result.response.status.name == "test-ansible-role-3-updated" - result.response.status.description == "test-ansible-role-3-desc-updated" - result.response.status.resources.permission_reference_list[0]["uuid"] == "{{ test_permission_3_uuid }}" @@ -78,8 +79,8 @@ wait: true register: result -- name: idempotency check status - assert: +- name: Idempotency check status + ansible.builtin.assert: that: - result.changed == False - result.failed == False @@ -91,7 +92,7 @@ ################################################################################################### - name: Check mode test - check_mode: yes + check_mode: true ntnx_roles: state: present role_uuid: "{{test_role.role_uuid}}" @@ -102,8 +103,8 @@ wait: true register: result -- name: check mode Status - assert: +- name: Check mode Status + ansible.builtin.assert: that: - result.response is defined - result.changed == False @@ -117,9 +118,9 @@ ################################################################################################### -- name: cleanup created entities +- name: Cleanup created entities ntnx_roles: state: absent role_uuid: "{{ test_role.role_uuid }}" register: result - ignore_errors: True + ignore_errors: true diff --git a/tests/integration/targets/ntnx_roles_info/tasks/main.yml b/tests/integration/targets/ntnx_roles_info/tasks/main.yml index 99faaf32c..faf9ff944 100644 --- a/tests/integration/targets/ntnx_roles_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_roles_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "roles_info.yml" + - name: Import roles_info.yml + ansible.builtin.import_tasks: "roles_info.yml" diff --git a/tests/integration/targets/ntnx_roles_info/tasks/roles_info.yml b/tests/integration/targets/ntnx_roles_info/tasks/roles_info.yml index 176a97f46..322581042 100644 --- a/tests/integration/targets/ntnx_roles_info/tasks/roles_info.yml +++ b/tests/integration/targets/ntnx_roles_info/tasks/roles_info.yml @@ -1,15 +1,16 @@ -- debug: - msg: start testing ntnx_roles_info +- name: Start testing ntnx_roles_info + ansible.builtin.debug: + msg: Start testing ntnx_roles_info ################################################## - name: List roles ntnx_roles_info: length: 2 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.metadata.total_matches > 0 @@ -17,9 +18,9 @@ fail_msg: "Unable to list all roles" success_msg: "roles info obtained successfully" -- set_fact: +- name: Setting role variables + ansible.builtin.set_fact: test_role_name: "{{result.response.entities.1.status.name}}" -- set_fact: test_role_uuid: "{{result.response.entities.1.metadata.uuid}}" ################################################## @@ -28,10 +29,10 @@ ntnx_roles_info: role_uuid: "{{ test_role_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -48,10 +49,10 @@ filter: name: "{{ test_role_name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -69,10 +70,10 @@ length: 1 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false diff --git a/tests/integration/targets/ntnx_roles_v2/aliases b/tests/integration/targets/ntnx_roles_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_roles_v2/meta/main.yml b/tests/integration/targets/ntnx_roles_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_roles_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_roles_v2/tasks/main.yml b/tests/integration/targets/ntnx_roles_v2/tasks/main.yml new file mode 100644 index 000000000..9c2081e29 --- /dev/null +++ b/tests/integration/targets/ntnx_roles_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import roles_operations.yml + ansible.builtin.import_tasks: roles_operations.yml diff --git a/tests/integration/targets/ntnx_roles_v2/tasks/roles_operations.yml b/tests/integration/targets/ntnx_roles_v2/tasks/roles_operations.yml new file mode 100644 index 000000000..e2f6334a7 --- /dev/null +++ b/tests/integration/targets/ntnx_roles_v2/tasks/roles_operations.yml @@ -0,0 +1,303 @@ +--- +- name: Start ntnx_roles_v2 tests + ansible.builtin.debug: + msg: start ntnx_roles_v2 tests + +- name: Get create operations + ntnx_operations_info_v2: + filter: displayName startswith 'Create_' + register: create_operations + ignore_errors: true + +- name: Define variables + ansible.builtin.set_fact: + test_operation_1_uuid: "{{ create_operations.response[0].ext_id }}" + test_operation_2_uuid: "{{ create_operations.response[1].ext_id }}" + test_operation_3_uuid: "{{ create_operations.response[2].ext_id }}" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Generate random role name + ansible.builtin.set_fact: + raw_role_1_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" + raw_role_2_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" + +- name: Ensure Pattern Compliance + ansible.builtin.set_fact: + role_1_name: role_name_{{ raw_role_1_name | regex_replace('[^a-zA-Z0-9]', '') }} + role_2_name: role_name_{{ raw_role_2_name | regex_replace('[^a-zA-Z0-9]', '') }} + +################################################################################################### + +- name: Create roles with operations in check mode + ntnx_roles_v2: + state: present + display_name: "{{ role_2_name }}" + description: test-ansible-role-1-desc + operations: + - "{{ test_operation_1_uuid }}" + - "{{ test_operation_2_uuid }}" + wait: true + register: result + check_mode: true + ignore_errors: true + +- name: Create roles with operations in check mode Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.response.display_name == "{{ role_2_name }}" + - result.response.description == "test-ansible-role-1-desc" + - result.response.operations[0] == "{{ test_operation_1_uuid }}" + - result.response.operations[1] == "{{ test_operation_2_uuid }}" + + fail_msg: Create roles with operations in check mode failed + success_msg: Create roles with operations in check mode passed + +############################################################################################## + +- name: Create roles with operations + ntnx_roles_v2: + state: present + display_name: "{{ role_1_name }}" + description: test-ansible-role-1-desc + operations: + - "{{ test_operation_1_uuid }}" + - "{{ test_operation_2_uuid }}" + wait: true + register: result + ignore_errors: true + +- name: Set p1 and p2 variables + ansible.builtin.set_fact: + p1: "{{ result.response.operations[0] }}" + p2: "{{ result.response.operations[1] }}" + +- name: Create roles with operations Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == True + - result.failed == False + - result.response.display_name == "{{ role_1_name }}" + - result.response.description == "test-ansible-role-1-desc" + - result.response.operations | length == 2 + - p1 in result.response.operations + - p2 in result.response.operations + + fail_msg: Unable to create roles with certain operations + success_msg: Roles with given operations created successfully + +- name: Adding role external ID to delete list + ansible.builtin.set_fact: + todelete: '{{ result["response"]["ext_id"] }}' + +################################################################################################### + +- name: Check if role with existing name fails or not + ntnx_roles_v2: + state: present + display_name: "{{ role_1_name }}" + operations: + - "{{ test_operation_1_uuid }}" + - "{{ test_operation_2_uuid }}" + wait: true + register: result + ignore_errors: true + +- name: Check if role with existing name fails or not Status + ansible.builtin.assert: + that: + - result.status == 409 + - result.changed == False + - result.failed == true + fail_msg: Was able to create role with existing role name + success_msg: Roles with duplicate role name failed successfully + +################################################################################################### + +- name: Update roles in check mode + ntnx_roles_v2: + state: present + ext_id: "{{ todelete }}" + display_name: "{{ role_2_name }}" + description: test-ansible-role-2-desc + operations: + - "{{ test_operation_2_uuid }}" + wait: true + register: result + check_mode: true + ignore_errors: true + +- name: Update roles in check mode Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.ext_id is defined + - result.ext_id == "{{ todelete }}" + - result.response.display_name == "{{ role_2_name }}" + - result.response.description == "test-ansible-role-2-desc" + - result.response.operations[0] == "{{ test_operation_2_uuid }}" + - result.response.operations | length == 1 + fail_msg: Update roles in check mode failed + success_msg: Update roles in check mode passed + +################################################################################################### + +- name: Update roles + ntnx_roles_v2: + state: present + ext_id: "{{ todelete }}" + display_name: "{{ role_2_name }}" + description: test-ansible-role-3-desc-updated + operations: + - "{{ test_operation_3_uuid }}" + wait: true + register: result + ignore_errors: true + +- name: Update roles status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.ext_id == "{{ todelete }}" + - result.changed == True + - result.response.display_name == "{{ role_2_name }}" + - result.response.description == "test-ansible-role-3-desc-updated" + - result.response.operations[0] == "{{ test_operation_3_uuid }}" + - result.response.operations | length == 1 + + fail_msg: Unable to update role + success_msg: Roles with given operations updated successfully + +################################################################################################### + +- name: Test idempotency by updating roles with same values + ntnx_roles_v2: + state: present + ext_id: "{{ todelete }}" + display_name: "{{ role_2_name }}" + description: test-ansible-role-3-desc-updated + operations: + - "{{ test_operation_3_uuid }}" + wait: true + register: result + ignore_errors: true + +- name: Idempotency check status + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.msg == "Nothing to change." + + fail_msg: Idempotency check failed + success_msg: Idempotency check passed + +################################################################################################### + +- name: List all roles + ntnx_roles_info_v2: + register: result + ignore_errors: true + +- name: List all roles Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length > 0 + fail_msg: Unable to list all roles + success_msg: roles info obtained successfully + +- name: Set test role name + ansible.builtin.set_fact: + test_role_name: "{{result.response.0.display_name}}" +- name: Set test role uuid + ansible.builtin.set_fact: + test_role_uuid: "{{result.response.0.ext_id}}" + +################################################################################################### + +- name: Fetch role using external ID + ntnx_roles_info_v2: + ext_id: "{{ test_role_uuid }}" + register: result + ignore_errors: true + +- name: Fetch role using external ID Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.display_name == "{{ test_role_name }}" + - result.response.ext_id == "{{ test_role_uuid }}" + fail_msg: Fetch role using external ID failed + success_msg: Fetch role using external ID passed + +################################################################################################### + +- name: List roles using limit + ntnx_roles_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: List roles using limit Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: Unable to list roles using length and offset + success_msg: roles listed successfully using length and offset + +################################################################################################### + +- name: List roles using filter + ntnx_roles_info_v2: + filter: displayName eq '{{ test_role_name }}' + register: result + ignore_errors: true + +- name: List roles using filter Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response[0].display_name == '{{ test_role_name }}' + - result.response[0].ext_id == "{{ test_role_uuid }}" + - result.response | length == 1 + fail_msg: List roles using filter failed + success_msg: roles listed successfully using filter + +################################################################################################### + +- name: Delete roles + ntnx_roles_v2: + state: absent + ext_id: "{{ todelete }}" + register: result + ignore_errors: true + +- name: Delete roles Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id == "{{ todelete }}" + fail_msg: Unable to delete roles + success_msg: Roles deleted successfully + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_routes_v2/aliases b/tests/integration/targets/ntnx_routes_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_routes_v2/meta/main.yml b/tests/integration/targets/ntnx_routes_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_routes_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_routes_v2/tasks/main.yml b/tests/integration/targets/ntnx_routes_v2/tasks/main.yml new file mode 100644 index 000000000..76f7079ad --- /dev/null +++ b/tests/integration/targets/ntnx_routes_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import routes.yml + ansible.builtin.import_tasks: "routes.yml" diff --git a/tests/integration/targets/ntnx_routes_v2/tasks/routes.yml b/tests/integration/targets/ntnx_routes_v2/tasks/routes.yml new file mode 100644 index 000000000..6c65c51b7 --- /dev/null +++ b/tests/integration/targets/ntnx_routes_v2/tasks/routes.yml @@ -0,0 +1,643 @@ +--- +- name: Start testing ntnx_routes_v2 + ansible.builtin.debug: + msg: Start testing ntnx_routes_v2 + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set Subnet and VPC name + ansible.builtin.set_fact: + subnet_name: "{{ random_name }}_subnet_test" + vpc_name: "{{ random_name }}_vpc_test" + +############################################################################################ + +- name: List subnets using subnet name and define external subent variable + ntnx_subnets_info_v2: + filter: name eq '{{ external_nat_subnets[0].name }}' + limit: 1 + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length >= 1 + - result.response[0].name == "{{ external_nat_subnets[0].name }}" + fail_msg: "List subnets using subnet name and define external subent variable failed " + success_msg: "List subnets using subnet name and define external subent variable passed " + +- name: Define variable + ansible.builtin.set_fact: + external_subnet_uuid: "{{ result.response[0].ext_id }}" + +############################################################################################ + +- name: Create first VPC + ntnx_vpcs_v2: + state: present + name: "{{ vpc_name }}_1" + external_subnets: + - subnet_reference: "{{ external_subnet_uuid }}" + register: result + ignore_errors: true + +- name: Create first VPC Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.ext_id is defined + - result.response.external_subnets | length == 1 + - result.response.external_subnets[0].external_ips[0].ipv4.value is defined + - result.response.external_subnets[0].subnet_reference == "{{ external_subnet_uuid }}" + - result.response.vpc_type == "REGULAR" + - result.response.name == "{{ vpc_name }}_1" + - result.task_ext_id is defined + fail_msg: "Create first VPC failed " + success_msg: "Create first VPC passed " + +- name: Set VPC UUID + ansible.builtin.set_fact: + vpc_uuid_1: "{{ result.ext_id }}" + +########################################################################################################### + +- name: Create second VPC + ntnx_vpcs_v2: + state: present + name: "{{ vpc_name }}_2" + external_subnets: + - subnet_reference: "{{ external_subnet_uuid }}" + register: result + ignore_errors: true + +- name: Create second VPC Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.ext_id is defined + - result.response.external_subnets | length == 1 + - result.response.external_subnets[0].external_ips[0].ipv4.value is defined + - result.response.external_subnets[0].subnet_reference == "{{ external_subnet_uuid }}" + - result.response.vpc_type == "REGULAR" + - result.response.name == "{{ vpc_name }}_2" + - result.task_ext_id is defined + fail_msg: "Create second VPC failed " + success_msg: "Create second VPC passed " + +- name: Set VPC UUID + ansible.builtin.set_fact: + vpc_uuid_2: "{{ result.ext_id }}" + +########################################################################################################### + +- name: Fetch first VPC route table with filter and set route table external ID + ntnx_route_tables_info_v2: + filter: vpcReference eq '{{ vpc_uuid_1 }}' + register: route_tables_filter_1 + ignore_errors: true + +- name: Fetch first VPC route table with filter and set route table external ID status + ansible.builtin.assert: + that: + - route_tables_filter_1 is defined + - route_tables_filter_1.failed == false + - route_tables_filter_1.changed == false + - route_tables_filter_1.response | length == 1 + - route_tables_filter_1.response[0].vpc_reference == vpc_uuid_1 + fail_msg: "Failed to fetch first VPC route table with filter and set route table external ID" + success_msg: "Successfully fetched first VPC route table with filter and set route table external ID" + +- name: Set route table external ID + ansible.builtin.set_fact: + route_table_external_id_1: "{{ route_tables_filter_1.response[0].ext_id }}" + +########################################################################################################### + +- name: Fetch second VPC route table with filter and set route table external ID + ntnx_route_tables_info_v2: + filter: vpcReference eq '{{ vpc_uuid_2 }}' + register: route_tables_filter_2 + ignore_errors: true + +- name: Fetch second VPC route table with filter and set route table external ID status + ansible.builtin.assert: + that: + - route_tables_filter_2 is defined + - route_tables_filter_2.failed == false + - route_tables_filter_2.changed == false + - route_tables_filter_2.response | length == 1 + - route_tables_filter_2.response[0].vpc_reference == vpc_uuid_2 + fail_msg: "Failed to fetch second VPC route table with filter and set route table external ID" + success_msg: "Successfully fetched second VPC route table with filter and set route table external ID" + +- name: Set route table external ID + ansible.builtin.set_fact: + route_table_external_id_2: "{{ route_tables_filter_2.response[0].ext_id }}" + +########################################################################################################### + +- name: Create route with check mode enabled + ntnx_routes_v2: + state: present + name: "route_test" + description: "Route for testing" + vpc_reference: "54228821-52b9-4862-a623-05b28ce93a92" + route_table_ext_id: "60448d4b-0532-44c3-8e10-9662cb138b37" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.1" + prefix_length: 32 + nexthop: + nexthop_type: "EXTERNAL_SUBNET" + nexthop_reference: "f4b3b3b4-4b3b-4b3b-4b3b-4b3b4b3b4b3b" + metadata: + owner_reference_id: "a88a8226-83a4-4159-aa99-298eefcc5af0" + project_reference_id: "6aeec7b5-6ab6-4eb6-acf9-cf1e8b14a0b8" + check_mode: true + register: result + ignore_errors: true + +- name: Create route with check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id is defined + - result.response.name == "route_test" + - result.response.description == "Route for testing" + - result.response.vpc_reference == "54228821-52b9-4862-a623-05b28ce93a92" + - result.response.route_type == "STATIC" + - result.response.destination.ipv4.ip.value == "10.0.0.1" + - result.response.destination.ipv4.prefix_length == 32 + - result.response.nexthop.nexthop_type == "EXTERNAL_SUBNET" + - result.response.nexthop.nexthop_reference == "f4b3b3b4-4b3b-4b3b-4b3b-4b3b4b3b4b3b" + - result.response.metadata.owner_reference_id == "a88a8226-83a4-4159-aa99-298eefcc5af0" + - result.response.metadata.project_reference_id == "6aeec7b5-6ab6-4eb6-acf9-cf1e8b14a0b8" + fail_msg: "Create route with check mode enabled failed " + success_msg: "Create route with check mode enabled passed " + +########################################################################################################### + +- name: Create first static route + ntnx_routes_v2: + state: present + name: "{{ random_name }}_route_1" + description: "Route for testing" + vpc_reference: "{{ vpc_uuid_1 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.1" + prefix_length: 32 + nexthop: + nexthop_type: "EXTERNAL_SUBNET" + nexthop_reference: "{{ external_subnet_uuid }}" + metadata: + owner_reference_id: "{{ vpc_uuid_1 }}" + project_reference_id: "{{ project.uuid }}" + register: result + ignore_errors: true + +- name: Create first static route Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.name == "{{ random_name }}_route_1" + - result.response.description == "Route for testing" + - result.response.vpc_reference == "{{ vpc_uuid_1 }}" + - result.response.route_type == "STATIC" + - result.response.destination.ipv4.ip.value == "10.0.0.1" + - result.response.destination.ipv4.prefix_length == 32 + - result.response.nexthop.nexthop_type == "EXTERNAL_SUBNET" + - result.response.nexthop.nexthop_reference == "{{ external_subnet_uuid }}" + - result.response.metadata.owner_reference_id == "{{ vpc_uuid_1 }}" + - result.response.metadata.project_reference_id == "{{ project.uuid }}" + - result.task_ext_id is defined + fail_msg: "Create first static route failed " + success_msg: "Create first static route passed " + +- name: Set route external ID + ansible.builtin.set_fact: + static_route_external_id_1: "{{ result.response.ext_id }}" + +########################################################################################################### + +- name: Create second static route + ntnx_routes_v2: + state: present + name: "{{ random_name }}_route_2" + description: "Route for testing" + vpc_reference: "{{ vpc_uuid_1 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.2" + prefix_length: 32 + nexthop: + nexthop_type: "EXTERNAL_SUBNET" + nexthop_reference: "{{ external_subnet_uuid }}" + metadata: + owner_reference_id: "{{ vpc_uuid_1 }}" + project_reference_id: "{{ project.uuid }}" + register: result + ignore_errors: true + +- name: Create second static route Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.name == "{{ random_name }}_route_2" + - result.response.description == "Route for testing" + - result.response.vpc_reference == "{{ vpc_uuid_1 }}" + - result.response.route_type == "STATIC" + - result.response.destination.ipv4.ip.value == "10.0.0.2" + - result.response.destination.ipv4.prefix_length == 32 + - result.response.nexthop.nexthop_type == "EXTERNAL_SUBNET" + - result.response.nexthop.nexthop_reference == "{{ external_subnet_uuid }}" + - result.response.metadata.owner_reference_id == "{{ vpc_uuid_1 }}" + - result.response.metadata.project_reference_id == "{{ project.uuid }}" + - result.task_ext_id is defined + fail_msg: "Create second static route failed " + success_msg: "Create second static route passed " + +- name: Set route external ID + ansible.builtin.set_fact: + static_route_external_id_2: "{{ result.response.ext_id }}" + +########################################################################################################### + +- name: Update route with check mode enabled + ntnx_routes_v2: + state: present + name: "{{ random_name }}_route_updated" + description: "Route for testing updated" + vpc_reference: "{{ vpc_uuid_1 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + ext_id: "{{ static_route_external_id_1 }}" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.3" + prefix_length: 32 + check_mode: true + register: result + ignore_errors: true + +- name: Update route with check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ static_route_external_id_1 }}" + - result.response.name == "{{ random_name }}_route_updated" + - result.response.description == "Route for testing updated" + - result.response.vpc_reference == "{{ vpc_uuid_1 }}" + - result.response.route_type == "STATIC" + - result.response.destination.ipv4.ip.value == "10.0.0.3" + - result.response.destination.ipv4.prefix_length == 32 + fail_msg: "Update route with check mode enabled failed " + success_msg: "Update route with check mode enabled passed " + +########################################################################################################### + +- name: Update route + ntnx_routes_v2: + state: present + name: "{{ random_name }}_route_updated" + description: "Route for testing updated" + vpc_reference: "{{ vpc_uuid_1 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + ext_id: "{{ static_route_external_id_1 }}" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.3" + prefix_length: 32 + register: result + ignore_errors: true + +- name: Update route Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.ext_id == "{{ static_route_external_id_1 }}" + - result.response.name == "{{ random_name }}_route_updated" + - result.response.description == "Route for testing updated" + - result.response.vpc_reference == "{{ vpc_uuid_1 }}" + - result.response.route_type == "STATIC" + - result.response.destination.ipv4.ip.value == "10.0.0.3" + - result.response.destination.ipv4.prefix_length == 32 + - result.task_ext_id is defined + fail_msg: "Update route failed " + success_msg: "Update route passed " + +########################################################################################################### + +- name: Check idempotency by updating route with the same data + ntnx_routes_v2: + state: present + name: "{{ random_name }}_route_updated" + description: "Route for testing updated" + vpc_reference: "{{ vpc_uuid_1 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + ext_id: "{{ static_route_external_id_1 }}" + route_type: STATIC + destination: + ipv4: + ip: + value: "10.0.0.3" + prefix_length: 32 + nexthop: + nexthop_type: "EXTERNAL_SUBNET" + nexthop_reference: "{{ external_subnet_uuid }}" + metadata: + owner_reference_id: "{{ vpc_uuid_1 }}" + project_reference_id: "{{ project.uuid }}" + register: result + ignore_errors: true + +- name: Check idempotency by updating route with the same data Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.skipped == true + - result.ext_id == "{{ static_route_external_id_1 }}" + - result.route_table_ext_id == "{{ route_table_external_id_1 }}" + - result.msg == "Nothing to change." + fail_msg: "Check idempotency by updating route with the same data failed " + success_msg: "Check idempotency by updating route with the same data passed " + +########################################################################################################### + +- name: Fetch route by external ID + ntnx_routes_info_v2: + ext_id: "{{ static_route_external_id_1 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + register: result + ignore_errors: true + +- name: Fetch route by external ID status + ansible.builtin.assert: + that: + - result.ext_id == static_route_external_id_1 + - result.route_table_ext_id == route_table_external_id_1 + - result.response is defined + - result.response.ext_id == static_route_external_id_1 + - result.route_table_ext_id == route_table_external_id_1 + - result.response.name == "{{ random_name }}_route_updated" + - result.response.description == "Route for testing updated" + - result.response.vpc_reference == "{{ vpc_uuid_1 }}" + - result.response.route_type == "STATIC" + - result.response.destination.ipv4.ip.value == "10.0.0.3" + - result.response.destination.ipv4.prefix_length == 32 + - result.response.nexthop.nexthop_type == "EXTERNAL_SUBNET" + - result.response.nexthop.nexthop_reference == "{{ external_subnet_uuid }}" + - result.response.metadata.owner_reference_id == "{{ vpc_uuid_1 }}" + - result.response.metadata.project_reference_id == "{{ project.uuid }}" + fail_msg: "Fetch route by external ID failed " + success_msg: "Fetch route by external ID passed " + +########################################################################################################### + +- name: List all routes + ntnx_routes_info_v2: + route_table_ext_id: "{{ route_table_external_id_1 }}" + register: result + ignore_errors: true + +- name: List all routes Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 2 + fail_msg: "List all routes failed " + success_msg: "List all routes passed " + +########################################################################################################### + +- name: List all routes with limit + ntnx_routes_info_v2: + route_table_ext_id: "{{ route_table_external_id_1 }}" + limit: 2 + register: result + ignore_errors: true + +- name: List all routes with limit Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 2 + fail_msg: "List all routes with limit failed " + success_msg: "List all routes with limit passed " + +########################################################################################################### + +- name: List all routes with filter + ntnx_routes_info_v2: + route_table_ext_id: "{{ route_table_external_id_1 }}" + filter: name eq '{{ random_name }}_route_updated' + register: result + ignore_errors: true + +- name: List all routes with filter Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + fail_msg: "List all routes with filter failed " + success_msg: "List all routes with filter passed " + +########################################################################################################### + +- name: List all route tables + ntnx_route_tables_info_v2: + register: route_tables + ignore_errors: true + +- name: List all route tables status + ansible.builtin.assert: + that: + - route_tables is defined + - route_tables.failed == false + - route_tables.changed == false + - route_tables.response | length > 1 + fail_msg: "Failed to list route tables" + success_msg: "Successfully listed route tables" + +- name: Set variables + ansible.builtin.set_fact: + vpc_reference: "{{ route_tables.response[0].vpc_reference }}" + +########################################################################################################### + +- name: Fetch route table by external_id + ntnx_route_tables_info_v2: + ext_id: "{{ route_table_external_id_1 }}" + register: route_table + ignore_errors: true + +- name: Fetch route table by external_id status + ansible.builtin.assert: + that: + - route_table is defined + - route_table.failed == false + - route_table.changed == false + - route_table.ext_id == "{{route_table_external_id_1}}" + - route_table.response.ext_id == "{{route_table_external_id_1}}" + - route_table.response.vpc_reference == "{{ vpc_uuid_1 }}" + fail_msg: "Failed to fetch route table by external_id" + success_msg: "Successfully fetched route table by external_id" + +########################################################################################################### + +- name: List all route tables with limit + ntnx_route_tables_info_v2: + limit: 2 + register: route_tables_limit + ignore_errors: true + +- name: List all route tables with limit status + ansible.builtin.assert: + that: + - route_tables_limit is defined + - route_tables_limit.failed == false + - route_tables_limit.changed == false + - route_tables_limit.response | length == 2 + fail_msg: "Failed to list route tables with limit" + success_msg: "Successfully listed route tables with limit" + +########################################################################################################### + +- name: List all route tables with filter + ntnx_route_tables_info_v2: + filter: vpcReference eq '{{ vpc_uuid_2 }}' + register: route_tables_filter + ignore_errors: true + +- name: List all route tables with filter status + ansible.builtin.assert: + that: + - route_tables_filter is defined + - route_tables_filter.failed == false + - route_tables_filter.changed == false + - route_tables_filter.response | length == 1 + - route_tables_filter.response[0].vpc_reference == "{{ vpc_uuid_2 }}" + - route_tables_filter.response[0].ext_id == route_table_external_id_2 + fail_msg: "Failed to list route tables with filter" + success_msg: "Successfully listed route tables with filter" + +########################################################################################################### + +- name: Delete first static route + ntnx_routes_v2: + state: absent + ext_id: "{{ static_route_external_id_1 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + register: result + ignore_errors: true + +- name: Delete first static route Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id == "{{ static_route_external_id_1 }}" + - result.route_table_ext_id == "{{ route_table_external_id_1 }}" + - result.changed == true + - result.failed == false + - result.task_ext_id is defined + - result.response.entities_affected[0].ext_id == "{{ static_route_external_id_1 }}" + - result.response.ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Delete first static route failed " + success_msg: "Delete first static route passed " + +########################################################################################################### + +- name: Delete second static route + ntnx_routes_v2: + state: absent + ext_id: "{{ static_route_external_id_2 }}" + route_table_ext_id: "{{ route_table_external_id_1 }}" + register: result + ignore_errors: true + +- name: Delete second static route Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id == "{{ static_route_external_id_2 }}" + - result.route_table_ext_id == "{{ route_table_external_id_1 }}" + - result.changed == true + - result.failed == false + - result.task_ext_id is defined + - result.response.entities_affected[0].ext_id == "{{ static_route_external_id_2 }}" + - result.response.ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Delete second static route failed " + success_msg: "Delete second static route passed " + +########################################################################################################### + +- name: Delete first VPC + ntnx_vpcs_v2: + state: absent + ext_id: "{{ vpc_uuid_1 }}" + register: result + ignore_errors: true + +- name: Delete first VPC Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vpc_uuid_1 }}" + - result.task_ext_id is defined + fail_msg: " Unable to Delete first VPC" + success_msg: "First VPC has been deleted successfully" + +############################################################################################ + +- name: Delete second VPC + ntnx_vpcs_v2: + state: absent + ext_id: "{{ vpc_uuid_2 }}" + register: result + ignore_errors: true + +- name: Delete second VPC Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vpc_uuid_2 }}" + - result.task_ext_id is defined + fail_msg: " Unable to Delete second VPC" + success_msg: "second VPC has been deleted successfully" diff --git a/tests/integration/targets/ntnx_saml_identity_providers_v2/aliases b/tests/integration/targets/ntnx_saml_identity_providers_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_saml_identity_providers_v2/meta/main.yml b/tests/integration/targets/ntnx_saml_identity_providers_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_saml_identity_providers_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_saml_identity_providers_v2/tasks/all_operation.yml b/tests/integration/targets/ntnx_saml_identity_providers_v2/tasks/all_operation.yml new file mode 100644 index 000000000..ced2314dc --- /dev/null +++ b/tests/integration/targets/ntnx_saml_identity_providers_v2/tasks/all_operation.yml @@ -0,0 +1,245 @@ +--- +- name: Start ntnx_saml_identity_providers_v2 and ntnx_saml_identity_providers_info_v2 tests + ansible.builtin.debug: + msg: start ntnx_saml_identity_providers_v2 and ntnx_saml_identity_providers_info_v2 tests + +- name: Generate random category key & value + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: ansible-ag + +- name: Set variables + ansible.builtin.set_fact: + todelete: [] + user_name: "{{ random_name }}{{ suffix_name }}user" + full_name: "{{ random_name }}{{ suffix_name }}saml" + +- name: Verify spec generation for identity providers with check_mode + ntnx_saml_identity_providers_v2: + name: "{{ full_name }}1" + username_attribute: "{{ user_name }}1" + email_attribute: email + groups_attribute: groups + groups_delim: "," + idp_metadata_url: https://samltest.id/saml/idp + idp_metadata_xml: idp_xml_value + entity_issuer: entity_issuer_test + is_signed_authn_req_enabled: true + custom_attributes: + - custom1 + - custom2 + state: present + register: result + ignore_errors: true + check_mode: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.name == "{{ full_name }}1" + - result.response.username_attribute == "{{ user_name }}1" + - result.response.email_attribute == "email" + - result.response.groups_attribute == "groups" + - result.response.groups_delim == "," + - result.response.idp_metadata_url == "https://samltest.id/saml/idp" + - result.response.idp_metadata_xml == "idp_xml_value" + - result.response.entity_issuer == "entity_issuer_test" + - result.response.is_signed_authn_req_enabled == true + - result.response.custom_attributes[0] == "custom1" + - result.response.custom_attributes[1] == "custom2" + fail_msg: " Unable to Verify spec generation for identity providers with check_mode " + success_msg: " Verify spec generation for identity providers with check_mode finished successfully " + +- name: Read content from file + ansible.builtin.set_fact: + xml_file_content: "{{ lookup('file', xml_content.dest + '/content.txt') }}" + +- name: Create saml + ntnx_saml_identity_providers_v2: + name: "{{ full_name }}1" + username_attribute: "{{ user_name }}1" + email_attribute: email + groups_attribute: groups + groups_delim: "," + idp_metadata_xml: "{{ xml_file_content }}" + state: present + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.name == "{{ full_name }}1" + - result.response.username_attribute == "{{ user_name }}1" + - result.response.email_attribute == "email" + - result.response.groups_attribute == "groups" + - result.response.groups_delim == "," + fail_msg: " Unable to create saml " + success_msg: " saml created successfully " + +- name: Adding saml external ID to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' + +- name: Get saml using filter name + ntnx_saml_identity_providers_info_v2: + filter: name eq '{{ full_name }}1' + register: result + ignore_errors: true + +- name: Status of saml + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response[0].name == "{{ full_name }}1" + - result.response[0].username_attribute == "{{ user_name }}1" + - result.response[0].email_attribute == "email" + - result.response[0].groups_attribute == "groups" + - result.response[0].groups_delim == "," + - result.response[0].ext_id == "{{ todelete[-1] }}" + - result.response | length == 1 + fail_msg: " Unable to fetch saml using filter name " + success_msg: " saml fetched using filter name successfully " + +- name: Verify spec generation for update identity providers with check_mode + ntnx_saml_identity_providers_v2: + state: present + ext_id: "{{ todelete[-1] }}" + name: "{{ full_name }}2" + username_attribute: "{{ user_name }}2" + email_attribute: new_email + groups_attribute: group_list + groups_delim: ; + is_signed_authn_req_enabled: true + register: result + ignore_errors: true + check_mode: true + +- name: Update status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.name == "{{ full_name }}2" + - result.response.username_attribute == "{{ user_name }}2" + - result.response.email_attribute == "new_email" + - result.response.groups_attribute == "group_list" + - result.response.groups_delim == ";" + - result.response.ext_id == "{{ todelete[-1] }}" + - result.response.is_signed_authn_req_enabled == true + fail_msg: " Unable to Verify spec generation for update identity providers with check_mode " + success_msg: " Verify spec generation for update identity providers with check_mode finished successfully" + +- name: Update saml + ntnx_saml_identity_providers_v2: + state: present + ext_id: "{{ todelete[-1] }}" + name: "{{ full_name }}2" + username_attribute: "{{ user_name }}2" + email_attribute: new_email + groups_attribute: group_list + groups_delim: ; + is_signed_authn_req_enabled: true + register: result + ignore_errors: true + +- name: Update status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.name == "{{ full_name }}2" + - result.response.username_attribute == "{{ user_name }}2" + - result.response.email_attribute == "new_email" + - result.response.groups_attribute == "group_list" + - result.response.groups_delim == ";" + - result.response.ext_id == "{{ todelete[-1] }}" + - result.response.is_signed_authn_req_enabled == true + fail_msg: " Unable to update saml " + success_msg: " saml updated successfully " + +- name: Check idempotency + ntnx_saml_identity_providers_v2: + state: present + ext_id: "{{ todelete[-1] }}" + name: "{{ full_name }}2" + username_attribute: "{{ user_name }}2" + is_signed_authn_req_enabled: true + register: result + ignore_errors: true + +- name: Idempotency status + ansible.builtin.assert: + that: + - result.msg == "Nothing to change." + - result.changed == False + - result.failed == False + fail_msg: " Unable to check idempotency " + success_msg: " Idempotency check successfully " + +- name: Get saml using ext_id + ntnx_saml_identity_providers_info_v2: + ext_id: "{{ todelete[-1] }}" + register: result + ignore_errors: true + +- name: Status of saml + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.name == "{{ full_name }}2" + - result.response.username_attribute == "{{ user_name }}2" + - result.response.email_attribute == "new_email" + - result.response.groups_attribute == "group_list" + - result.response.groups_delim == ";" + - result.response.ext_id == "{{ todelete[-1] }}" + - result.response.is_signed_authn_req_enabled == true + fail_msg: " Unable to fetch saml using ext_id " + success_msg: " saml fetched using ext_id successfully " + +- name: List all identity providers + ntnx_saml_identity_providers_info_v2: + register: result + ignore_errors: true + +- name: List status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + fail_msg: " Unable to list all identity providers " + success_msg: " list all identity providers successfully " + +- name: Delete the created SAML identity providers + ntnx_saml_identity_providers_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.changed == True + - result.msg == "All items completed" + - result.skipped == False + fail_msg: " Unable to delete saml " + success_msg: " saml deleted successfully " diff --git a/tests/integration/targets/ntnx_saml_identity_providers_v2/tasks/main.yml b/tests/integration/targets/ntnx_saml_identity_providers_v2/tasks/main.yml new file mode 100644 index 000000000..d54134b6f --- /dev/null +++ b/tests/integration/targets/ntnx_saml_identity_providers_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import all_operation.yml + ansible.builtin.import_tasks: all_operation.yml diff --git a/tests/integration/targets/ntnx_saml_identity_providers_v2/vars/main.yml b/tests/integration/targets/ntnx_saml_identity_providers_v2/vars/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml b/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml index 0e9b038e3..9da4edc13 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml @@ -1,74 +1,67 @@ -- name: create app security rule with inbound and outbound list +- name: Create app security rule with inbound and outbound list ntnx_security_rules: name: test_app_rule app_rule: target_group: - categories: - apptype: Apache_Spark - apptype_filter_by_category: - AppFamily: - - Backup - apptiers: - - "{{categories.apptiers[0]}}" - - "{{categories.apptiers[1]}}" + categories: + apptype: Apache_Spark + apptype_filter_by_category: + AppFamily: + - Backup + apptiers: + - "{{categories.apptiers[0]}}" + - "{{categories.apptiers[1]}}" - default_internal_policy: DENY_ALL + default_internal_policy: DENY_ALL inbounds: - - - categories: - AppFamily: - - Databases - - DevOps - description: test description - protocol: - tcp: - - start_port: 22 - end_port: 80 - - - categories: - AppFamily: - - Databases - - DevOps - protocol: - icmp: - - code: 1 - type: 1 - - - categories: - AppFamily: - - Databases - - DevOps - protocol: - udp: - - start_port: 82 - end_port: 8080 - - - categories: - AppFamily: - - Databases - - DevOps - protocol: - service: - name: 6a44 - - - ip_subnet: - prefix_length: 24 - ip: 192.168.1.0 - description: test description - - - address: + - categories: + AppFamily: + - Databases + - DevOps + description: test description + protocol: + tcp: + - start_port: 22 + end_port: 80 + - categories: + AppFamily: + - Databases + - DevOps + protocol: + icmp: + - code: 1 + type: 1 + - categories: + AppFamily: + - Databases + - DevOps + protocol: + udp: + - start_port: 82 + end_port: 8080 + - categories: + AppFamily: + - Databases + - DevOps + protocol: + service: + name: 6a44 + - ip_subnet: + prefix_length: 24 + ip: 192.168.1.0 + description: test description + - address: name: dest outbounds: - - - categories: - AppFamily: - - Databases - - DevOps - protocol: - icmp: - - code: 1 - type: 1 + - categories: + AppFamily: + - Databases + - DevOps + protocol: + icmp: + - code: 1 + type: 1 policy_mode: MONITOR allow_ipv6_traffic: true policy_hitlog: true @@ -77,78 +70,76 @@ - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.status.state == 'COMPLETE' - result.response.status.name=="test_app_rule" - result.response.status.resources.app_rule.target_group.filter.params.AppTier | length == 2 - fail_msg: ' fail: unable to create app security rule with inbound and outbound list' - success_msg: 'pass: create app security rule with inbound and outbound list successfully' + fail_msg: " fail: unable to create app security rule with inbound and outbound list" + success_msg: "pass: create app security rule with inbound and outbound list successfully" -- name: update app security rule by adding to outbound list and remove tule from inbound list +- name: Update app security rule by adding to outbound list and remove tule from inbound list ntnx_security_rules: - security_rule_uuid: '{{ result.response.metadata.uuid }}' + security_rule_uuid: "{{ result.response.metadata.uuid }}" app_rule: policy_mode: APPLY inbounds: - - - rule_id: "{{result.response.spec.resources.app_rule.inbound_allow_list.0.rule_id}}" + - rule_id: "{{result.response.spec.resources.app_rule.inbound_allow_list.0.rule_id}}" state: absent outbounds: - - - protocol: - icmp: - - code: 1 - type: 1 - categories: - AppFamily: - - Databases - - DevOps + - protocol: + icmp: + - code: 1 + type: 1 + categories: + AppFamily: + - Databases + - DevOps register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.status.state == 'COMPLETE' - result.response.spec.resources.app_rule.action == "APPLY" - result.response.spec.resources.app_rule.outbound_allow_list.0.icmp_type_code_list is defined - fail_msg: ' fail: unable to update app security rule with outbound list ' - success_msg: 'pass :update app security rule with outbound list successfully' + fail_msg: " fail: unable to update app security rule with outbound list " + success_msg: "pass :update app security rule with outbound list successfully" -- name: delete app security rule +- name: Delete app security rule ntnx_security_rules: state: absent - security_rule_uuid: '{{ result.response.metadata.uuid }}' + security_rule_uuid: "{{ result.response.metadata.uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.status == 'SUCCEEDED' - fail_msg: ' fail: unable to delete app security rule ' - success_msg: 'pass : delete app security rule successfully' -- name: create app security rule with allow all inbound and outbound list + fail_msg: " fail: unable to delete app security rule " + success_msg: "pass : delete app security rule successfully" +- name: Create app security rule with allow all inbound and outbound list ntnx_security_rules: name: test_app_rule app_rule: target_group: - categories: - apptype: Apache_Spark - apptype_filter_by_category: - AppFamily: - - Backup - apptiers: - - "{{categories.apptiers[0]}}" - - "{{categories.apptiers[1]}}" - default_internal_policy: DENY_ALL + categories: + apptype: Apache_Spark + apptype_filter_by_category: + AppFamily: + - Backup + apptiers: + - "{{categories.apptiers[0]}}" + - "{{categories.apptiers[1]}}" + default_internal_policy: DENY_ALL allow_all_outbounds: true allow_all_inbounds: true policy_mode: MONITOR @@ -158,7 +149,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -166,20 +157,20 @@ - result.response.spec.name=="test_app_rule" - result.response.status.resources.app_rule.target_group.filter.params.AppTier | length == 2 - fail_msg: ' fail: unable to create app security rule with allow all inbound and outbound list' - success_msg: 'pass: create app security rule with allow all inbound and outbound list successfully' -- name: delete app security rule + fail_msg: " fail: unable to create app security rule with allow all inbound and outbound list" + success_msg: "pass: create app security rule with allow all inbound and outbound list successfully" +- name: Delete app security rule ntnx_security_rules: state: absent - security_rule_uuid: '{{ result.response.metadata.uuid }}' + security_rule_uuid: "{{ result.response.metadata.uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.status == 'SUCCEEDED' - fail_msg: ' fail: unable to delete app security rule ' - success_msg: 'pass : delete app security rule successfully' + fail_msg: " fail: unable to delete app security rule " + success_msg: "pass : delete app security rule successfully" diff --git a/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml b/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml index 5a7243409..3cf47b9d5 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml @@ -1,15 +1,15 @@ - name: >- - create isolation security rule with first_entity_filter and + Create isolation security rule with first_entity_filter and second_entity_filter with check mode ntnx_security_rules: name: test_isolation_rule isolation_rule: isolate_category: - Environment: - - Dev + Environment: + - Dev from_category: - Environment: - - Production + Environment: + - Production subset_category: Environment: - Staging @@ -19,29 +19,29 @@ check_mode: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == false - result.response.spec.name=="test_isolation_rule" - result.security_rule_uuid is none - fail_msg: ' fail: unable to create isolation security rule with first_entity_filter and second_entity_filter with check mode ' + fail_msg: " fail: unable to create isolation security rule with first_entity_filter and second_entity_filter with check mode " success_msg: >- pass: create isolation security rule with first_entity_filter and second_entity_filter successfully with check mode - name: >- - create isolation security rule with first_entity_filter and + Create isolation security rule with first_entity_filter and second_entity_filter ntnx_security_rules: name: test_isolation_rule isolation_rule: isolate_category: - Environment: - - Dev + Environment: + - Dev from_category: - Environment: - - Production + Environment: + - Production subset_category: Environment: - Staging @@ -51,20 +51,20 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.spec.name=="test_isolation_rule" - result.response.status.state == 'COMPLETE' - fail_msg: ' fail: unable to create isolation security rule with first_entity_filter and second_entity_filter' + fail_msg: " fail: unable to create isolation security rule with first_entity_filter and second_entity_filter" success_msg: >- pass: create isolation security rule with first_entity_filter and second_entity_filter successfully -- name: update isoloation security rule action with check_mode +- name: Update isoloation security rule action with check_mode ntnx_security_rules: - security_rule_uuid: '{{ result.response.metadata.uuid }}' + security_rule_uuid: "{{ result.response.metadata.uuid }}" isolation_rule: policy_mode: APPLY register: output @@ -72,62 +72,62 @@ check_mode: true - name: Creation Status - assert: + ansible.builtin.assert: that: - output.response is defined - output.failed == false - output.changed == false - output.response.spec.name=="test_isolation_rule" - output.security_rule_uuid is none - fail_msg: ' fail: unable to update isoloation security rule action with check_mode' + fail_msg: " fail: unable to update isoloation security rule action with check_mode" success_msg: >- pass: update isoloation security rule action with check_mode successfully -- name: update isoloation security rule action +- name: Update isoloation security rule action ntnx_security_rules: - security_rule_uuid: '{{ result.security_rule_uuid}}' + security_rule_uuid: "{{ result.security_rule_uuid}}" isolation_rule: policy_mode: APPLY register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - result.response.status.state == 'COMPLETE' - result.response.spec.resources.isolation_rule.action == "APPLY" - fail_msg: ' fail: unable to update isolation rule action ' - success_msg: 'pass : update isolation rule action successfully' -- name: update isoloation security with same values + fail_msg: " fail: unable to update isolation rule action " + success_msg: "pass : update isolation rule action successfully" +- name: Update isoloation security with same values ntnx_security_rules: - security_rule_uuid: '{{result.security_rule_uuid}}' + security_rule_uuid: "{{result.security_rule_uuid}}" isolation_rule: policy_mode: APPLY register: output ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - output.failed == false - output.changed == false - output.msg == "Nothing to change" - fail_msg: ' fail: unable to update isolation rule action ' - success_msg: 'pass : update isolation rule action successfully' -- name: delete isolation rule + fail_msg: " fail: unable to update isolation rule action " + success_msg: "pass : update isolation rule action successfully" +- name: Delete isolation rule ntnx_security_rules: state: absent - security_rule_uuid: '{{ result.security_rule_uuid }}' + security_rule_uuid: "{{ result.security_rule_uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.status == 'SUCCEEDED' - fail_msg: ' fail: unable to delete isolation security rule ' - success_msg: 'pass : delete isolation security rule successfully' + fail_msg: " fail: unable to delete isolation security rule " + success_msg: "pass : delete isolation security rule successfully" diff --git a/tests/integration/targets/ntnx_security_rules/tasks/main.yml b/tests/integration/targets/ntnx_security_rules/tasks/main.yml index 172cfd461..3af3f59bf 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/main.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/main.yml @@ -1,12 +1,17 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "app_rule.yml" - - import_tasks: "isolation_rule.yml" - - import_tasks: "quarantine_rule.yml" - - import_tasks: "vdi.yml" + - name: Import app_rule.yml + ansible.builtin.import_tasks: "app_rule.yml" + - name: Import isolation_rule.yml + ansible.builtin.import_tasks: "isolation_rule.yml" + - name: Import quarantine_rule.yml + ansible.builtin.import_tasks: "quarantine_rule.yml" + - name: Import vdi.yml + ansible.builtin.import_tasks: "vdi.yml" diff --git a/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml b/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml index dba621b3d..352a247d3 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml @@ -1,35 +1,33 @@ - - name: update quarantine_rule by adding inbound and outbound list - ntnx_security_rules: - security_rule_uuid: "{{quarantine_rule_uuid}}" - quarantine_rule: - target_group: - default_internal_policy: DENY_ALL - inbounds: - - - categories: - AppFamily: - - Databases - - DevOps - outbounds: - - - categories: - AppFamily: - - Databases - - DevOps - policy_mode: MONITOR - allow_ipv6_traffic: true - policy_hitlog: true - register: result - ignore_errors: true +- name: Update quarantine_rule by adding inbound and outbound list + ntnx_security_rules: + security_rule_uuid: "{{quarantine_rule_uuid}}" + quarantine_rule: + target_group: + default_internal_policy: DENY_ALL + inbounds: + - categories: + AppFamily: + - Databases + - DevOps + outbounds: + - categories: + AppFamily: + - Databases + - DevOps + policy_mode: MONITOR + allow_ipv6_traffic: true + policy_hitlog: true + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == false - - result.response.status.state == 'COMPLETE' - - result.response.spec.resources.quarantine_rule.action == "MONITOR" - fail_msg: ' fail: unable to update quarantine_rule by adding inbound and outbound list ' - success_msg: >- - pass: update quarantine_rule by adding inbound and outbound list - successfully +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.response.status.state == 'COMPLETE' + - result.response.spec.resources.quarantine_rule.action == "MONITOR" + fail_msg: " fail: unable to update quarantine_rule by adding inbound and outbound list " + success_msg: >- + pass: update quarantine_rule by adding inbound and outbound list + successfully diff --git a/tests/integration/targets/ntnx_security_rules/tasks/vdi.yml b/tests/integration/targets/ntnx_security_rules/tasks/vdi.yml index 42896d5ed..bdbb84777 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/vdi.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/vdi.yml @@ -1,51 +1,46 @@ -- name: create ad security rule with inbound and outbound list +- name: Create ad security rule with inbound and outbound list ntnx_security_rules: name: VDI Policy vdi_rule: target_group: - categories: - adgroup: "$Default" - default_internal_policy: DENY_ALL + categories: + adgroup: "$Default" + default_internal_policy: DENY_ALL allow_all_outbounds: true inbounds: - - - categories: - AppFamily: - - Databases - - DevOps - description: test description - protocol: - tcp: - - start_port: 22 - end_port: 80 - - - categories: - AppFamily: - - Databases - - DevOps - protocol: - icmp: - - code: 1 - type: 1 - - - categories: - AppFamily: - - Databases - - DevOps - protocol: - udp: - - start_port: 82 - end_port: 8080 - - - categories: - AppFamily: - - Databases - - DevOps - protocol: - service: - name: 6a44 - - - address: + - categories: + AppFamily: + - Databases + - DevOps + description: test description + protocol: + tcp: + - start_port: 22 + end_port: 80 + - categories: + AppFamily: + - Databases + - DevOps + protocol: + icmp: + - code: 1 + type: 1 + - categories: + AppFamily: + - Databases + - DevOps + protocol: + udp: + - start_port: 82 + end_port: 8080 + - categories: + AppFamily: + - Databases + - DevOps + protocol: + service: + name: 6a44 + - address: name: dest policy_mode: MONITOR allow_ipv6_traffic: true @@ -53,22 +48,21 @@ register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.spec.name=="VDI Policy" - result.response.status.state == 'COMPLETE' - fail_msg: ' fail: unable create ad security rule with inbound and outbound list' + fail_msg: " fail: unable create ad security rule with inbound and outbound list" success_msg: >- pass: create ad security rule with inbound and outbound list finished successfully -- name: update VDI security rule action +- name: Update VDI security rule action ntnx_security_rules: - security_rule_uuid: '{{ result.response.metadata.uuid }}' + security_rule_uuid: "{{ result.response.metadata.uuid }}" vdi_rule: inbounds: - - - ip_subnet: + - ip_subnet: prefix_length: 8 ip: 10.0.0.0 description: test description @@ -76,27 +70,27 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.changed == true - result.response.status.state == 'COMPLETE' - fail_msg: ' fail: unable to update vdi_rule ' - success_msg: 'pass : update vdi_rule successfully' + fail_msg: " fail: unable to update vdi_rule " + success_msg: "pass : update vdi_rule successfully" -- name: delete vdi_rule rule +- name: Delete vdi_rule rule ntnx_security_rules: state: absent - security_rule_uuid: '{{ result.response.metadata.uuid }}' + security_rule_uuid: "{{ result.response.metadata.uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.status == 'SUCCEEDED' - fail_msg: ' fail: unable to delete vdi_rule security rule ' - success_msg: 'pass : delete vdi_rule security rule successfully' + fail_msg: " fail: unable to delete vdi_rule security rule " + success_msg: "pass : delete vdi_rule security rule successfully" diff --git a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml index fe02bd1bd..7d74cf3eb 100644 --- a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml +++ b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml @@ -1,4 +1,5 @@ -- debug: +- name: Start testing ntnx_security_rules_info + ansible.builtin.debug: msg: Start testing ntnx_security_rules_info ################################### - name: Create isolation_rule for testing @@ -16,49 +17,49 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - first_rule.response is defined - first_rule.failed == false - first_rule.response.status.state == 'COMPLETE' - first_rule.response.spec.name=="isolation_test_rule" - fail_msg: ' fail: Unable to create isolation_rule for testing ' - success_msg: 'pass: isolation_rule for testing created successfully ' + fail_msg: " fail: Unable to create isolation_rule for testing " + success_msg: "pass: isolation_rule for testing created successfully " ################################### -- name: getting all security rules +- name: Getting all security rules ntnx_security_rules_info: register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response.metadata.kind == "network_security_rule" - result.response.metadata.total_matches > 0 - fail_msg: ' fail: unable to get security rules ' - success_msg: 'pass: get all security rules successfully ' + fail_msg: " fail: unable to get security rules " + success_msg: "pass: get all security rules successfully " ################################### -- name: getting particlar security rule using security_rule_uuid +- name: Getting particlar security rule using security_rule_uuid ntnx_security_rules_info: - security_rule_uuid: '{{ first_rule.response.metadata.uuid }}' + security_rule_uuid: "{{ first_rule.response.metadata.uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.response.status.state == 'COMPLETE' - first_rule.response.metadata.uuid == result.response.metadata.uuid - fail_msg: ' fail : unable to get particlar security rule using security_rule_uuid' - success_msg: 'pass: getting security rule using security_rule_uuid succesfuly' + fail_msg: " fail : unable to get particlar security rule using security_rule_uuid" + success_msg: "pass: getting security rule using security_rule_uuid succesfuly" ################################### -- name: getting all security rules sorted +- name: Getting all security rules sorted ntnx_security_rules_info: sort_order: ASCENDING sort_attribute: Name @@ -66,7 +67,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -74,22 +75,22 @@ - result.response.metadata.kind == "network_security_rule" - result.response.metadata.sort_order == "ASCENDING" - result.response.metadata.sort_attribute == "Name" - fail_msg: ' fail: unable to get all security rules sorted' - success_msg: 'pass: getting all security rules sorted successfully ' + fail_msg: " fail: unable to get all security rules sorted" + success_msg: "pass: getting all security rules sorted successfully " ################################### -- name: delete security rule +- name: Delete security rule ntnx_security_rules: state: absent - security_rule_uuid: '{{ first_rule.response.metadata.uuid }}' + security_rule_uuid: "{{ first_rule.response.metadata.uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.failed == false - result.response.status == 'SUCCEEDED' - fail_msg: ' fail: unable to delete secutiry rule ' - success_msg: 'pass: security rule deleted successfully ' + fail_msg: " fail: unable to delete secutiry rule " + success_msg: "pass: security rule deleted successfully " ################################### diff --git a/tests/integration/targets/ntnx_security_rules_info/tasks/main.yml b/tests/integration/targets/ntnx_security_rules_info/tasks/main.yml index cc243432b..8291c35a5 100644 --- a/tests/integration/targets/ntnx_security_rules_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_security_rules_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "get_security_rules.yml" + - name: Import get_security_rules.yml + ansible.builtin.import_tasks: "get_security_rules.yml" diff --git a/tests/integration/targets/ntnx_security_rules_v2/aliases b/tests/integration/targets/ntnx_security_rules_v2/aliases new file mode 100644 index 000000000..87e7bdaae --- /dev/null +++ b/tests/integration/targets/ntnx_security_rules_v2/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_security_rules_v2/meta/main.yml b/tests/integration/targets/ntnx_security_rules_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_security_rules_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_security_rules_v2/tasks/application_rules_and_info_tests.yml b/tests/integration/targets/ntnx_security_rules_v2/tasks/application_rules_and_info_tests.yml new file mode 100644 index 000000000..708ee33aa --- /dev/null +++ b/tests/integration/targets/ntnx_security_rules_v2/tasks/application_rules_and_info_tests.yml @@ -0,0 +1,1163 @@ +--- +- name: Start testing application security rules + ansible.builtin.debug: + msg: Start testing application security rules + +- name: Generate random names for security rules creation + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set prefix name + ansible.builtin.set_fact: + prefix: ansible-nsr- + +- name: Set security rules name + ansible.builtin.set_fact: + rule_name_1: "{{ prefix }}{{ random_name }}1" + rule_name_2: "{{ prefix }}{{ random_name }}2" + rule_name_3: "{{ prefix }}{{ random_name }}3" + +################################################ Test Setup ################################################ + +- name: Create min VPC for security rules + ntnx_vpcs_v2: + name: ansible-vpc-{{ random_name }} + external_subnets: + - subnet_reference: "{{ external_nat_subnet.uuid }}" + register: result + +- name: Verify VPC creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.ext_id is defined + fail_msg: Failed to create VPC + success_msg: Successfully created VPC + +- name: Set VPC id + ansible.builtin.set_fact: + vpc_id: "{{ result.ext_id }}" + +- name: Create categories for tests + ntnx_categories_v2: + key: AnsibleSecurityRuleTest + value: AnsibleSecurityRuleTest{{ random_name }}{{ item }} + description: ansible test + register: results + loop: [0, 1, 2, 3, 4] + +- name: Verify category creation status + ansible.builtin.assert: + that: + - item.response is defined + - item.changed == true + - item.ext_id is defined + fail_msg: Failed to create category + success_msg: Successfully created category + loop: "{{ results.results }}" + +- name: Set categories to be used in security rules + ansible.builtin.set_fact: + category1: "{{ results.results[0].response.ext_id }}" + category2: "{{ results.results[1].response.ext_id }}" + category3: "{{ results.results[2].response.ext_id }}" + category4: "{{ results.results[3].response.ext_id }}" + category5: "{{ results.results[4].response.ext_id }}" + +- name: Create service group to be used in security rules + ntnx_service_groups_v2: + name: ansible-sg-{{ random_name }} + description: desc + tcp_services: + - start_port: 80 + end_port: 90 + register: result + +- name: Verify service group creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.ext_id is defined + fail_msg: Failed to create service group + success_msg: Successfully created service group + +- name: Set service group id + ansible.builtin.set_fact: + service_group_id: "{{ result.ext_id }}" + +- name: Create address group to be used in security rules + ntnx_address_groups_v2: + state: present + name: ansible-ag-{{ random_name }} + description: test-ansible-group-1-desc + ipv4_addresses: + - value: 10.1.1.0 + prefix_length: 24 + - value: 10.1.2.2 + prefix_length: 32 + register: result + +- name: Verify creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.changed == true + fail_msg: Unable to create address group + success_msg: Address group created successfully + +- name: Set address group id + ansible.builtin.set_fact: + address_group_ext_id: "{{ result.ext_id }}" + +################################################ Create tests ################################################ + +- name: Generate spec for basic application rule using check mode + ntnx_security_rules_v2: + name: "{{ rule_name_1 }}" + description: Ansible created rule + type: APPLICATION + policy_state: SAVE + scope: ALL_VPC + is_hitlog_enabled: true + is_ipv6_traffic_allowed: true + rules: + - description: Allow intra category group for cat1 + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "{{ category1 }}" + secured_group_action: ALLOW + - description: Allow to all destinations for cat 1 + type: APPLICATION + spec: + application_rule_spec: + dest_allow_spec: ALL + secured_group_category_references: + - "{{ category1 }}" + - description: Allow to all sources for cat 1 + type: APPLICATION + spec: + application_rule_spec: + src_allow_spec: ALL + secured_group_category_references: + - "{{ category1 }}" + register: result + check_mode: true + +- name: Verify spec creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response.name == "{{ rule_name_1 }}" + - result.response.description == "Ansible created rule" + - result.response.type == "APPLICATION" + - result.response.state == "SAVE" + - result.response.scope == "ALL_VPC" + - result.response.is_hitlog_enabled == true + - result.response.is_ipv6_traffic_allowed == true + - result.response.rules | length == 3 + - result.response.rules[0].description == "Allow intra category group for cat1" + - result.response.rules[0].type == "INTRA_GROUP" + - result.response.rules[0].spec.secured_group_category_references[0] == "{{ category1 }}" + - result.response.rules[0].spec.secured_group_action == "ALLOW" + - result.response.rules[1].description == "Allow to all destinations for cat 1" + - result.response.rules[1].type == "APPLICATION" + - result.response.rules[1].spec.dest_allow_spec == "ALL" + - result.response.rules[1].spec.secured_group_category_references[0] == "{{ category1 }}" + - result.response.rules[2].description == "Allow to all sources for cat 1" + - result.response.rules[2].type == "APPLICATION" + - result.response.rules[2].spec.src_allow_spec == "ALL" + - result.response.rules[2].spec.secured_group_category_references[0] == "{{ category1 }}" + fail_msg: Failed to create security rule + success_msg: Successfully created security rule + +- name: Create basic application security rule for certain VPC scope with intra group traffic not allowed + ntnx_security_rules_v2: + name: "{{ rule_name_1 }}" + description: Ansible created rule + type: APPLICATION + policy_state: ENFORCE + scope: VPC_LIST + vpc_references: + - "{{ vpc_id }}" + is_hitlog_enabled: true + is_ipv6_traffic_allowed: false + rules: + - description: inbound1 + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "{{ category1 }}" + - "{{ category2 }}" + secured_group_action: DENY + - description: outbound1 + type: APPLICATION + spec: + application_rule_spec: + dest_allow_spec: ALL + secured_group_category_references: + - "{{ category1 }}" + - "{{ category2 }}" + is_all_protocol_allowed: true + - description: inbound2 + type: APPLICATION + spec: + application_rule_spec: + src_allow_spec: ALL + secured_group_category_references: + - "{{ category1 }}" + - "{{ category2 }}" + is_all_protocol_allowed: true + register: result + +- name: Create category list for asserts + ansible.builtin.set_fact: + category_list: ["{{ category1 }}", "{{ category2 }}"] + +- name: Initialize indexes for inbound1, inbound2, and outbound1 + ansible.builtin.set_fact: + inbound1_index: -1 + inbound2_index: -1 + outbound1_index: -1 + +- name: Find index of 'inbound1' + ansible.builtin.set_fact: + inbound1_index: "{{ item_index }}" + when: item.description == "inbound1" + with_items: "{{ result.response.rules }}" + loop_control: + index_var: item_index + +- name: Find index of 'inbound2' + ansible.builtin.set_fact: + inbound2_index: "{{ item_index }}" + when: item.description == "inbound2" + with_items: "{{ result.response.rules }}" + loop_control: + index_var: item_index + +- name: Find index of 'outbound1' + ansible.builtin.set_fact: + outbound1_index: "{{ item_index }}" + when: item.description == "outbound1" + with_items: "{{ result.response.rules }}" + loop_control: + index_var: item_index + +- name: Verify creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.ext_id == result.response.ext_id + - result.response.name == "{{ rule_name_1 }}" + - result.response.description == "Ansible created rule" + - result.response.type == "APPLICATION" + - result.response.state == "ENFORCE" + - result.response.scope == "VPC_LIST" + - result.response.vpc_references | length == 1 + - result.response.vpc_references[0] == "{{ vpc_id }}" + - result.response.is_hitlog_enabled == true + - result.response.is_ipv6_traffic_allowed == false + - result.response.rules | length == 3 + - result.response.rules[inbound1_index].description == "inbound1" + - result.response.rules[inbound1_index].type == "INTRA_GROUP" + - result.response.rules[inbound1_index].spec.secured_group_category_references[0] in category_list + - result.response.rules[inbound1_index].spec.secured_group_category_references[1] in category_list + - result.response.rules[inbound1_index].spec.secured_group_action == "DENY" + - result.response.rules[outbound1_index].description == "outbound1" + - result.response.rules[outbound1_index].type == "APPLICATION" + - result.response.rules[outbound1_index].spec.dest_allow_spec == "ALL" + - result.response.rules[outbound1_index].spec.secured_group_category_references[0] in category_list + - result.response.rules[outbound1_index].spec.secured_group_category_references[1] in category_list + - result.response.rules[outbound1_index].spec.is_all_protocol_allowed == true + - result.response.rules[inbound2_index].description == "inbound2" + - result.response.rules[inbound2_index].type == "APPLICATION" + - result.response.rules[inbound2_index].spec.src_allow_spec == "ALL" + - result.response.rules[inbound2_index].spec.secured_group_category_references[0] in category_list + - result.response.rules[inbound2_index].spec.secured_group_category_references[1] in category_list + - result.response.rules[inbound2_index].spec.is_all_protocol_allowed == true + fail_msg: Failed to create security rule + success_msg: Successfully created security rule + +- name: Delete created security rule + ntnx_security_rules_v2: + state: absent + ext_id: "{{ result.ext_id }}" + register: result + +- name: Verify deletion status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: Failed to delete security rule + success_msg: Successfully deleted security rule + +- name: Create security rule with multienvisolation rule + ntnx_security_rules_v2: + name: "{{ rule_name_3 }}" + description: Ansible created rule + type: ISOLATION + policy_state: ENFORCE + scope: ALL_VLAN + is_hitlog_enabled: true + is_ipv6_traffic_allowed: false + rules: + - description: multienvisolation rule + type: MULTI_ENV_ISOLATION + spec: + multi_env_isolation_rule_spec: + spec: + all_to_all_isolation_group: + isolation_groups: + - group_category_references: + - "{{ category1 }}" + - group_category_references: + - "{{ category2 }}" + register: result + +- name: Create default rule map + ansible.builtin.set_fact: + rule_map: {} + +- name: Since rules are unordered, create map of unique description to rules config + ansible.builtin.set_fact: + rule_map: "{{ rule_map | default({}) | combine({item.description: item}) }}" + loop: "{{ result.response.rules }}" + +- name: Verify creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.ext_id == result.response.ext_id + - result.response.name == "{{ rule_name_3 }}" + - result.response.description == "Ansible created rule" + - result.response.type == "ISOLATION" + - result.response.state == "ENFORCE" + - result.response.scope == "ALL_VLAN" + - result.response.is_hitlog_enabled == true + - result.response.is_ipv6_traffic_allowed == false + - result.response.rules | length == 1 + - result.response.rules[0].description == "multienvisolation rule" + - result.response.rules[0].type == "MULTI_ENV_ISOLATION" + - result.response.rules[0].spec.spec.isolation_groups[0].group_category_references[0] in category_list + - result.response.rules[0].spec.spec.isolation_groups[1].group_category_references[0] in category_list + fail_msg: Failed to create security rule + success_msg: Successfully created security rule + +- name: Set policy external ID + ansible.builtin.set_fact: + policy_ext_id1: "{{ result.ext_id }}" + +- name: Create policy with all vlan scope and using certain sources and destinations + ntnx_security_rules_v2: + name: "{{ rule_name_2 }}" + description: Ansible created rule + type: APPLICATION + policy_state: MONITOR + scope: ALL_VLAN + is_hitlog_enabled: true + is_ipv6_traffic_allowed: true + rules: + - description: inbound1 + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + secured_group_action: DENY + - description: inbound2 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_category_references: + - "{{ category3 }}" + service_group_references: + - "{{ service_group_id }}" + - description: inbound3 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_address_group_references: + - "{{ address_group_ext_id }}" + tcp_services: + - start_port: 80 + end_port: 90 + - start_port: 100 + end_port: 110 + udp_services: + - start_port: 120 + end_port: 130 + - start_port: 140 + end_port: 150 + icmp_services: + - type: 3 + code: 1 + - type: 4 + code: 2 + - description: inbound4 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_subnet: + value: 10.0.0.0 + prefix_length: 24 + service_group_references: + - "{{ service_group_id }}" + - description: outbound1 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_category_references: + - "{{ category5 }}" + is_all_protocol_allowed: true + - description: outbound2 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_subnet: + value: 10.0.1.1 + prefix_length: 32 + is_all_protocol_allowed: true + - description: outbound3 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_address_group_references: + - "{{ address_group_ext_id }}" + is_all_protocol_allowed: true + register: result + +- name: Create default rule map + ansible.builtin.set_fact: + rule_map: {} + +- name: Since rules are unordered, create map of unique description to rules config + ansible.builtin.set_fact: + rule_map: "{{ rule_map | default({}) | combine({item.description: item}) }}" + loop: "{{ result.response.rules }}" + +- name: Verify creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.ext_id == result.response.ext_id + - result.response.name == "{{ rule_name_2 }}" + - result.response.description == "Ansible created rule" + - result.response.type == "APPLICATION" + - result.response.state == "MONITOR" + - result.response.scope == "ALL_VLAN" + - result.response.is_hitlog_enabled == true + - result.response.is_ipv6_traffic_allowed == true + - result.response.rules | length == 7 + - rule_map["inbound1"].type == "INTRA_GROUP" + - rule_map["inbound1"].description == "inbound1" + - rule_map["inbound1"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["inbound1"].spec.secured_group_action == "DENY" + - rule_map["inbound2"].type == "APPLICATION" + - rule_map["inbound2"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["inbound2"].spec.src_category_references[0] == "{{ category3 }}" + - rule_map["inbound2"].spec.service_group_references[0] == "{{ service_group_id }}" + - rule_map["inbound3"].type == "APPLICATION" + - rule_map["inbound3"].spec.src_address_group_references[0] == "{{ address_group_ext_id }}" + - rule_map["inbound3"].spec.tcp_services[0].start_port == 80 + - rule_map["inbound3"].spec.tcp_services[0].end_port == 90 + - rule_map["inbound3"].spec.tcp_services[1].start_port == 100 + - rule_map["inbound3"].spec.tcp_services[1].end_port == 110 + - rule_map["inbound3"].spec.udp_services[0].start_port == 120 + - rule_map["inbound3"].spec.udp_services[0].end_port == 130 + - rule_map["inbound3"].spec.udp_services[1].start_port == 140 + - rule_map["inbound3"].spec.udp_services[1].end_port == 150 + - rule_map["inbound3"].spec.icmp_services[0].type == 3 + - rule_map["inbound3"].spec.icmp_services[0].code == 1 + - rule_map["inbound3"].spec.icmp_services[1].type == 4 + - rule_map["inbound3"].spec.icmp_services[1].code == 2 + - rule_map["inbound4"].spec.src_subnet.value == "10.0.0.0" + - rule_map["inbound4"].spec.src_subnet.prefix_length == 24 + - rule_map["inbound4"].spec.service_group_references[0] == "{{ service_group_id }}" + - rule_map["inbound4"].type == "APPLICATION" + - rule_map["inbound4"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound1"].type == "APPLICATION" + - rule_map["outbound1"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound1"].spec.dest_category_references[0] == "{{ category5 }}" + - rule_map["outbound1"].spec.is_all_protocol_allowed == true + - rule_map["outbound2"].type == "APPLICATION" + - rule_map["outbound2"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound2"].spec.dest_subnet.value == "10.0.1.1" + - rule_map["outbound2"].spec.dest_subnet.prefix_length == 32 + - rule_map["outbound2"].spec.is_all_protocol_allowed == true + - rule_map["outbound3"].type == "APPLICATION" + - rule_map["outbound3"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound3"].spec.dest_address_group_references[0] == "{{ address_group_ext_id }}" + - rule_map["outbound3"].spec.is_all_protocol_allowed == true + fail_msg: Failed to create security rule + success_msg: Successfully created security rule + +- name: Set policy external ID + ansible.builtin.set_fact: + policy_ext_id2: "{{ result.ext_id }}" + +######################################### Update tests ######################################### + +- name: Generate spec to update the policy using check mode + check_mode: true + ntnx_security_rules_v2: + ext_id: "{{ policy_ext_id2 }}" + name: "{{ rule_name_2 }}-updated" + rules: + - description: inbound1 + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + secured_group_action: DENY + - description: inbound2_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_category_references: + - "{{ category5 }}" + service_group_references: + - "{{ service_group_id }}" + - description: inbound3_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_address_group_references: + - "{{ address_group_ext_id }}" + tcp_services: + - start_port: 85 + end_port: 90 + udp_services: + - start_port: 125 + end_port: 130 + icmp_services: + - type: 3 + code: 2 + - description: inbound4_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_subnet: + value: 10.0.1.0 + prefix_length: 24 + service_group_references: + - "{{ service_group_id }}" + - description: outbound1 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_category_references: + - "{{ category5 }}" + service_group_references: + - "{{ service_group_id }}" + - description: outbound2_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_subnet: + value: 10.0.1.2 + prefix_length: 32 + is_all_protocol_allowed: true + - description: outbound3 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_address_group_references: + - "{{ address_group_ext_id }}" + is_all_protocol_allowed: true + register: result + +- name: Verify update spec + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.ext_id == "{{ policy_ext_id2 }}" + - result.response.name == "{{ rule_name_2 }}-updated" + - result.response.description == "Ansible created rule" + - result.response.type == "APPLICATION" + - result.response.state == "MONITOR" + - result.response.scope == "ALL_VLAN" + - result.response.is_hitlog_enabled == true + - result.response.is_ipv6_traffic_allowed == true + - result.response.rules | length == 7 + - result.response.rules[0].description == "inbound1" + - result.response.rules[0].type == "INTRA_GROUP" + - result.response.rules[0].spec.secured_group_category_references[0] == "{{ category4 }}" + - result.response.rules[0].spec.secured_group_action == "DENY" + - result.response.rules[1].description == "inbound2_updated" + - result.response.rules[1].type == "APPLICATION" + - result.response.rules[1].spec.secured_group_category_references[0] == "{{ category4 }}" + - result.response.rules[1].spec.src_category_references[0] == "{{ category5 }}" + - result.response.rules[1].spec.service_group_references[0] == "{{ service_group_id }}" + - result.response.rules[2].description == "inbound3_updated" + - result.response.rules[2].type == "APPLICATION" + - result.response.rules[2].spec.src_address_group_references[0] == "{{ address_group_ext_id }}" + - result.response.rules[2].spec.tcp_services[0].start_port == 85 + - result.response.rules[2].spec.tcp_services[0].end_port == 90 + - result.response.rules[2].spec.udp_services[0].start_port == 125 + - result.response.rules[2].spec.udp_services[0].end_port == 130 + - result.response.rules[2].spec.icmp_services[0].type == 3 + - result.response.rules[2].spec.icmp_services[0].code == 2 + - result.response.rules[3].description == "inbound4_updated" + - result.response.rules[3].spec.src_subnet.value == "10.0.1.0" + - result.response.rules[3].spec.src_subnet.prefix_length == 24 + - result.response.rules[3].spec.service_group_references[0] == "{{ service_group_id }}" + - result.response.rules[3].type == "APPLICATION" + - result.response.rules[3].spec.secured_group_category_references[0] == "{{ category4 }}" + - result.response.rules[4].description == "outbound1" + - result.response.rules[4].type == "APPLICATION" + - result.response.rules[4].spec.secured_group_category_references[0] == "{{ category4 }}" + - result.response.rules[4].spec.dest_category_references[0] == "{{ category5 }}" + - result.response.rules[4].spec.service_group_references[0] == "{{ service_group_id }}" + - result.response.rules[5].description == "outbound2_updated" + - result.response.rules[5].type == "APPLICATION" + - result.response.rules[5].spec.secured_group_category_references[0] == "{{ category4 }}" + - result.response.rules[5].spec.dest_subnet.value == "10.0.1.2" + - result.response.rules[5].spec.dest_subnet.prefix_length == 32 + - result.response.rules[5].spec.is_all_protocol_allowed == true + - result.response.rules[6].description == "outbound3" + - result.response.rules[6].type == "APPLICATION" + - result.response.rules[6].spec.secured_group_category_references[0] == "{{ category4 }}" + - result.response.rules[6].spec.dest_address_group_references[0] == "{{ address_group_ext_id }}" + - result.response.rules[6].spec.is_all_protocol_allowed == true + fail_msg: Failed to update security rule + success_msg: Successfully updated security rule + +- name: Update policy rules and config + ntnx_security_rules_v2: + ext_id: "{{ policy_ext_id2 }}" + name: "{{ rule_name_2 }}-updated" + description: Ansible created rule updated + type: APPLICATION + policy_state: ENFORCE + scope: ALL_VLAN + is_hitlog_enabled: false + is_ipv6_traffic_allowed: false + rules: + - description: inbound1 + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + secured_group_action: DENY + - description: inbound2_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_category_references: + - "{{ category5 }}" + service_group_references: + - "{{ service_group_id }}" + - description: inbound3_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_address_group_references: + - "{{ address_group_ext_id }}" + tcp_services: + - start_port: 85 + end_port: 90 + udp_services: + - start_port: 125 + end_port: 130 + icmp_services: + - type: 3 + code: 2 + - description: inbound4_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_subnet: + value: 10.0.1.0 + prefix_length: 24 + service_group_references: + - "{{ service_group_id }}" + - description: outbound1 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_category_references: + - "{{ category5 }}" + service_group_references: + - "{{ service_group_id }}" + - description: outbound2_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_subnet: + value: 10.0.1.2 + prefix_length: 32 + is_all_protocol_allowed: true + - description: outbound3 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_address_group_references: + - "{{ address_group_ext_id }}" + is_all_protocol_allowed: true + register: result + +- name: Create default rule map + ansible.builtin.set_fact: + rule_map: {} + +- name: Since rules are unordered, create map of unique description to rules config + ansible.builtin.set_fact: + rule_map: "{{ rule_map | default({}) | combine({item.description: item}) }}" + loop: "{{ result.response.rules }}" + +- name: Verify updated policy status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.ext_id == result.response.ext_id + - result.response.name == "{{ rule_name_2 }}-updated" + - result.response.description == "Ansible created rule updated" + - result.response.type == "APPLICATION" + - result.response.state == "ENFORCE" + - result.response.scope == "ALL_VLAN" + - result.response.is_hitlog_enabled == false + - result.response.is_ipv6_traffic_allowed == false + - result.response.rules | length == 7 + - rule_map["inbound1"].type == "INTRA_GROUP" + - rule_map["inbound1"].description == "inbound1" + - rule_map["inbound1"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["inbound1"].spec.secured_group_action == "DENY" + - rule_map["inbound2_updated"].type == "APPLICATION" + - rule_map["inbound2_updated"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["inbound2_updated"].spec.src_category_references[0] == "{{ category5 }}" + - rule_map["inbound2_updated"].spec.service_group_references[0] == "{{ service_group_id }}" + - rule_map["inbound3_updated"].type == "APPLICATION" + - rule_map["inbound3_updated"].spec.src_address_group_references[0] == "{{ address_group_ext_id }}" + - rule_map["inbound3_updated"].spec.tcp_services[0].start_port == 85 + - rule_map["inbound3_updated"].spec.tcp_services[0].end_port == 90 + - rule_map["inbound3_updated"].spec.udp_services[0].start_port == 125 + - rule_map["inbound3_updated"].spec.udp_services[0].end_port == 130 + - rule_map["inbound3_updated"].spec.icmp_services[0].type == 3 + - rule_map["inbound3_updated"].spec.icmp_services[0].code == 2 + - rule_map["inbound4_updated"].spec.src_subnet.value == "10.0.1.0" + - rule_map["inbound4_updated"].spec.src_subnet.prefix_length == 24 + - rule_map["inbound4_updated"].spec.service_group_references[0] == "{{ service_group_id }}" + - rule_map["inbound4_updated"].type == "APPLICATION" + - rule_map["inbound4_updated"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound1"].type == "APPLICATION" + - rule_map["outbound1"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound1"].spec.dest_category_references[0] == "{{ category5 }}" + - rule_map["outbound1"].spec.service_group_references[0] == "{{ service_group_id }}" + - rule_map["outbound2_updated"].type == "APPLICATION" + - rule_map["outbound2_updated"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound2_updated"].spec.dest_subnet.value == "10.0.1.2" + - rule_map["outbound2_updated"].spec.dest_subnet.prefix_length == 32 + - rule_map["outbound2_updated"].spec.is_all_protocol_allowed == true + - rule_map["outbound3"].type == "APPLICATION" + - rule_map["outbound3"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound3"].spec.dest_address_group_references[0] == "{{ address_group_ext_id }}" + - rule_map["outbound3"].spec.is_all_protocol_allowed == true + fail_msg: Failed to update security rule + success_msg: Successfully updated security rule + +- name: Update policy rules and config with same value to check idempotency + ntnx_security_rules_v2: + ext_id: "{{ policy_ext_id2 }}" + name: "{{ rule_name_2 }}-updated" + description: Ansible created rule updated + type: APPLICATION + policy_state: ENFORCE + scope: ALL_VLAN + is_hitlog_enabled: false + is_ipv6_traffic_allowed: false + rules: + - description: inbound1 + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + secured_group_action: DENY + - description: inbound2_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_category_references: + - "{{ category5 }}" + service_group_references: + - "{{ service_group_id }}" + - description: inbound3_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_address_group_references: + - "{{ address_group_ext_id }}" + tcp_services: + - start_port: 85 + end_port: 90 + udp_services: + - start_port: 125 + end_port: 130 + icmp_services: + - type: 3 + code: 2 + - description: inbound4_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_subnet: + value: 10.0.1.0 + prefix_length: 24 + service_group_references: + - "{{ service_group_id }}" + - description: outbound1 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_category_references: + - "{{ category5 }}" + service_group_references: + - "{{ service_group_id }}" + - description: outbound2_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_subnet: + value: 10.0.1.2 + prefix_length: 32 + is_all_protocol_allowed: true + - description: outbound3 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_address_group_references: + - "{{ address_group_ext_id }}" + is_all_protocol_allowed: true + register: result + +- name: Verify skip status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + - result.skipped == true + fail_msg: Module failed to skip the update + success_msg: "Pass : return as expected" + +- name: Update the policy state + ntnx_security_rules_v2: + ext_id: "{{ policy_ext_id2 }}" + policy_state: MONITOR + register: result + +- name: Verify policy state update + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.state == "MONITOR" + - result.ext_id is defined + - result.task_ext_id is defined + - result.response.rules | length == 7 + fail_msg: Failed to update policy state + success_msg: Successfully updated policy state + +- name: Update some rules and delete some rules from policy + ntnx_security_rules_v2: + ext_id: "{{ policy_ext_id2 }}" + rules: + - description: inbound1 + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + secured_group_action: ALLOW + - description: inbound4_updated + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + src_subnet: + value: 10.0.1.0 + prefix_length: 24 + service_group_references: + - "{{ service_group_id }}" + - description: outbound1 + type: APPLICATION + spec: + application_rule_spec: + secured_group_category_references: + - "{{ category4 }}" + dest_subnet: + value: 10.0.1.0 + prefix_length: 24 + is_all_protocol_allowed: true + register: result + +- name: Create default rule map + ansible.builtin.set_fact: + rule_map: {} + +- name: Since rules are unordered, create map of unique description to rules config + ansible.builtin.set_fact: + rule_map: "{{ rule_map | default({}) | combine({item.description: item}) }}" + loop: "{{ result.response.rules }}" + +- name: Verify update status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.rules | length == 3 + - rule_map["inbound1"].type == "INTRA_GROUP" + - rule_map["inbound1"].description == "inbound1" + - rule_map["inbound1"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["inbound1"].spec.secured_group_action == "ALLOW" + - rule_map["inbound4_updated"].type == "APPLICATION" + - rule_map["inbound4_updated"].spec.src_subnet.value == "10.0.1.0" + - rule_map["inbound4_updated"].spec.src_subnet.prefix_length == 24 + - rule_map["inbound4_updated"].spec.service_group_references[0] == "{{ service_group_id }}" + - rule_map["inbound4_updated"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound1"].type == "APPLICATION" + - rule_map["outbound1"].spec.secured_group_category_references[0] == "{{ category4 }}" + - rule_map["outbound1"].spec.dest_subnet.value == "10.0.1.0" + - rule_map["outbound1"].spec.dest_subnet.prefix_length == 24 + - rule_map["outbound1"].spec.is_all_protocol_allowed == true + fail_msg: Failed to update security rule + success_msg: Successfully updated security rule + +######################################### Info tests ######################################### + +- name: Get all policies + ntnx_security_rules_info_v2: + register: result + +- name: Verify listing status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.response | length > 1 + fail_msg: Failed to list security rules + success_msg: Successfully listed security rules + +- name: Get particular policy + ntnx_security_rules_info_v2: + ext_id: "{{ policy_ext_id2 }}" + register: result + +- name: Verify get status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.response.ext_id == "{{ policy_ext_id2 }}" + - result.response.name == "{{ rule_name_2 }}-updated" + fail_msg: Failed to get security rule + success_msg: Successfully got security rule + +- name: Fetch certain policy using filters + ntnx_security_rules_info_v2: + filter: name eq '{{ rule_name_2 }}-updated' + register: result + +- name: Verify filter status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.response | length == 1 + - result.response[0].name == "{{ rule_name_2 }}-updated" + - result.response[0].ext_id == "{{ policy_ext_id2 }}" + fail_msg: Failed to filter security rules + success_msg: Successfully filtered security rules + +- name: Fetch policies using limit + ntnx_security_rules_info_v2: + limit: 1 + register: result + +- name: Verify limit status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.response | length == 1 + fail_msg: Failed to limit security rules + success_msg: Successfully limited security rules + +######################################### Delete tests ######################################### + +- name: Delete security rule 1 + ntnx_security_rules_v2: + state: absent + ext_id: "{{ policy_ext_id1 }}" + register: result + +- name: Verify deletion status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: Failed to delete security rule + success_msg: Successfully deleted security rule + +- name: Delete security rule 2 + ntnx_security_rules_v2: + state: absent + ext_id: "{{ policy_ext_id2 }}" + register: result + +- name: Verify deletion status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: Failed to delete security rule + success_msg: Successfully deleted security rule + +######################################### Delete test setup created entities ######################################### + +- name: Delete service group + ntnx_service_groups_v2: + state: absent + ext_id: "{{ service_group_id }}" + register: result + +- name: Verify deletion status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: Failed to delete service group + success_msg: Successfully deleted service group + +- name: Delete address group + ntnx_address_groups_v2: + state: absent + ext_id: "{{ address_group_ext_id }}" + register: result + +- name: Verify deletion status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: Failed to delete address group + success_msg: Successfully deleted address group + +- name: Delete vpc + ntnx_vpcs_v2: + state: absent + ext_id: "{{ vpc_id }}" + register: result + +- name: Verify deletion status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: Failed to delete vpc + success_msg: Successfully deleted vpc + +- name: Delete all categories + ntnx_categories_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: + [ + "{{ category1 }}", + "{{ category2 }}", + "{{ category3 }}", + "{{ category4 }}", + "{{ category5 }}", + ] + +- name: Verify deletion status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + fail_msg: Failed to delete category + success_msg: Successfully deleted category + loop: "{{ result.results }}" diff --git a/tests/integration/targets/ntnx_security_rules_v2/tasks/isolation_rules_tests.yml b/tests/integration/targets/ntnx_security_rules_v2/tasks/isolation_rules_tests.yml new file mode 100644 index 000000000..481ade230 --- /dev/null +++ b/tests/integration/targets/ntnx_security_rules_v2/tasks/isolation_rules_tests.yml @@ -0,0 +1,242 @@ +--- +- name: Start testing isolation type security rules + ansible.builtin.debug: + msg: Start testing isolation type security rules + +- name: Generate random names for security rules creation + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set prefix name + ansible.builtin.set_fact: + prefix: ansible-nsr- + +- name: Set security rules name + ansible.builtin.set_fact: + rule_name_1: "{{ prefix }}{{ random_name }}1" + +################################################ Test Setup ################################################ + +- name: Create categories for tests + ntnx_categories_v2: + key: AnsibleSecurityRuleTest + value: AnsibleSecurityRuleTest{{ random_name }}{{ item }} + description: ansible test + register: results + loop: [0, 1, 2, 3] + +- name: Verify category creation status + ansible.builtin.assert: + that: + - item.response is defined + - item.changed == true + - item.ext_id is defined + fail_msg: Failed to create category + success_msg: Successfully created category + loop: "{{ results.results }}" + +- name: Set categories to be used in security rules + ansible.builtin.set_fact: + category1: "{{ results.results[0].response.ext_id }}" + category2: "{{ results.results[1].response.ext_id }}" + category3: "{{ results.results[2].response.ext_id }}" + category4: "{{ results.results[3].response.ext_id }}" + +################################################ Create tests ################################################ + +- name: Generate spec for isolation type security rule creation using check mode + ntnx_security_rules_v2: + name: "{{ rule_name_1 }}" + description: Ansible created rule + type: ISOLATION + policy_state: ENFORCE + scope: ALL_VLAN + is_hitlog_enabled: true + rules: + - description: Isolate group of categories + type: TWO_ENV_ISOLATION + spec: + two_env_isolation_rule_spec: + first_isolation_group: + - "{{ category1 }}" + - "{{ category2 }}" + second_isolation_group: + - "{{ category3 }}" + - "{{ category4 }}" + register: result + check_mode: true + +- name: Verify generated spec + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response is defined + - result.response.name == rule_name_1 + - result.response.description == "Ansible created rule" + - result.response.type == "ISOLATION" + - result.response.state == "ENFORCE" + - result.response.scope == "ALL_VLAN" + - result.response.is_hitlog_enabled == true + - result.response.rules[0].description == "Isolate group of categories" + - result.response.rules[0].type == "TWO_ENV_ISOLATION" + - result.response.rules[0].spec.first_isolation_group[0] == category1 + - result.response.rules[0].spec.first_isolation_group[1] == category2 + - result.response.rules[0].spec.second_isolation_group[0] == category3 + - result.response.rules[0].spec.second_isolation_group[1] == category4 + fail_msg: Failed to generate spec for isolation type security rule creation + success_msg: Successfully generated spec for isolation type security rule creation + +- name: Create isolation type security rule + ntnx_security_rules_v2: + name: "{{ rule_name_1 }}" + description: Ansible created rule + type: ISOLATION + policy_state: ENFORCE + scope: ALL_VLAN + is_hitlog_enabled: true + rules: + - description: Isolate group of categories + type: TWO_ENV_ISOLATION + spec: + two_env_isolation_rule_spec: + first_isolation_group: + - "{{ category1 }}" + - "{{ category2 }}" + second_isolation_group: + - "{{ category3 }}" + - "{{ category4 }}" + register: result + +- name: Verify creation status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response is defined + - result.ext_id is defined + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + - result.response.name == rule_name_1 + - result.response.description == "Ansible created rule" + - result.response.type == "ISOLATION" + - result.response.state == "ENFORCE" + - result.response.scope == "ALL_VLAN" + - result.response.is_hitlog_enabled == true + - result.response.rules[0].description == "Isolate group of categories" + - result.response.rules[0].type == "TWO_ENV_ISOLATION" + - result.response.rules[0].spec.first_isolation_group[0] == category1 + - result.response.rules[0].spec.first_isolation_group[1] == category2 + - result.response.rules[0].spec.second_isolation_group[0] == category3 + - result.response.rules[0].spec.second_isolation_group[1] == category4 + fail_msg: Failed to create isolation type security rule + success_msg: Successfully created isolation type security rule + +- name: Set policy ext id + ansible.builtin.set_fact: + policy_ext_id1: "{{ result.ext_id }}" + +################################################ Update tests ################################################ + +- name: Update isolation type security rule + ntnx_security_rules_v2: + state: present + ext_id: "{{ policy_ext_id1 }}" + name: "{{ rule_name_1 }}-updated" + policy_state: MONITOR + is_hitlog_enabled: false + rules: + - description: Isolate group of categories -- updated + type: TWO_ENV_ISOLATION + spec: + two_env_isolation_rule_spec: + first_isolation_group: + - "{{ category1 }}" + - "{{ category3 }}" + second_isolation_group: + - "{{ category2 }}" + - "{{ category4 }}" + register: result + +- name: Verify update status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.name == "{{ rule_name_1 }}-updated" + - result.response.state == "MONITOR" + - result.response.is_hitlog_enabled == false + - result.response.rules[0].description == "Isolate group of categories -- updated" + - result.response.rules[0].type == "TWO_ENV_ISOLATION" + - result.response.rules[0].spec.first_isolation_group[0] == category1 + - result.response.rules[0].spec.first_isolation_group[1] == category3 + - result.response.rules[0].spec.second_isolation_group[0] == category2 + - result.response.rules[0].spec.second_isolation_group[1] == category4 + fail_msg: Failed to update isolation type security rule + success_msg: Successfully updated isolation type security rule + +- name: Update security rule with same values to check idempotency + ntnx_security_rules_v2: + state: present + ext_id: "{{ policy_ext_id1 }}" + name: "{{ rule_name_1 }}-updated" + policy_state: MONITOR + is_hitlog_enabled: false + rules: + - description: Isolate group of categories -- updated + type: TWO_ENV_ISOLATION + spec: + two_env_isolation_rule_spec: + first_isolation_group: + - "{{ category1 }}" + - "{{ category3 }}" + second_isolation_group: + - "{{ category2 }}" + - "{{ category4 }}" + register: result + +- name: Verify skip status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + - result.skipped == true + fail_msg: Module failed to skip the update + success_msg: "Pass : return as expected" + +######################################### Delete tests ######################################### + +- name: Delete security rule + ntnx_security_rules_v2: + state: absent + ext_id: "{{ policy_ext_id1 }}" + register: result + +- name: Verify deletion status + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.status == "SUCCEEDED" + fail_msg: Failed to delete security rule + success_msg: Successfully deleted security rule + +######################################### Delete test setup created entities ######################################### + +- name: Delete all categories + ntnx_categories_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: + ["{{ category1 }}", "{{ category2 }}", "{{ category3 }}", "{{ category4 }}"] + +- name: Verify deletion status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + fail_msg: Failed to delete category + success_msg: Successfully deleted category + loop: "{{ result.results }}" diff --git a/tests/integration/targets/ntnx_security_rules_v2/tasks/main.yml b/tests/integration/targets/ntnx_security_rules_v2/tasks/main.yml new file mode 100644 index 000000000..4b6691650 --- /dev/null +++ b/tests/integration/targets/ntnx_security_rules_v2/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import tests for application rule CRUD and security rules info tests + ansible.builtin.import_tasks: application_rules_and_info_tests.yml + - name: Import create, update and delete tests for isolation type security rules + ansible.builtin.import_tasks: isolation_rules_tests.yml + - name: Import tests for quarantine type security rules + ansible.builtin.import_tasks: quarantine_rules_tests.yml diff --git a/tests/integration/targets/ntnx_security_rules_v2/tasks/quarantine_rules_tests.yml b/tests/integration/targets/ntnx_security_rules_v2/tasks/quarantine_rules_tests.yml new file mode 100644 index 000000000..0b8f70571 --- /dev/null +++ b/tests/integration/targets/ntnx_security_rules_v2/tasks/quarantine_rules_tests.yml @@ -0,0 +1,78 @@ +--- +- name: Start testing quarantine security rules + ansible.builtin.debug: + msg: Start testing quarantine security rules + +- name: Generate random names for security rules creation + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set prefix name + ansible.builtin.set_fact: + prefix: ansible-nsr- + +- name: Set security rules name + ansible.builtin.set_fact: + rule_name_1: "{{ prefix }}{{ random_name }}1" + +################################################ Create tests ################################################ + +- name: Generate spec for quarantine rule using check mode + ntnx_security_rules_v2: + name: "{{ rule_name_1 }}" + description: Ansible created rule + type: QUARANTINE + policy_state: SAVE + scope: ALL_VLAN + is_hitlog_enabled: true + rules: + - description: Deny all outbound traffic + type: QUARANTINE + spec: + application_rule_spec: + secured_group_category_references: + - category1 + dest_allow_spec: NONE + - description: Deny all inbound traffic + type: QUARANTINE + spec: + application_rule_spec: + secured_group_category_references: + - category1 + src_allow_spec: NONE + - description: Deny intragroup traffic + type: INTRA_GROUP + spec: + intra_entity_group_rule_spec: + secured_group_category_references: + - category1 + secured_group_action: DENY + register: result + check_mode: true + +- name: Verify spec for quarantine rule + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response.name == "{{ rule_name_1 }}" + - result.response.description == "Ansible created rule" + - result.response.type == "QUARANTINE" + - result.response.state == "SAVE" + - result.response.scope == "ALL_VLAN" + - result.response.is_hitlog_enabled == true + - result.response.rules | length == 3 + - result.response.rules[0].description == "Deny all outbound traffic" + - result.response.rules[0].type == "QUARANTINE" + - result.response.rules[0].spec.secured_group_category_references[0] == "category1" + - result.response.rules[0].spec.dest_allow_spec == "NONE" + - result.response.rules[1].description == "Deny all inbound traffic" + - result.response.rules[1].type == "QUARANTINE" + - result.response.rules[1].spec.secured_group_category_references[0] == "category1" + - result.response.rules[1].spec.src_allow_spec == "NONE" + - result.response.rules[2].description == "Deny intragroup traffic" + - result.response.rules[2].type == "INTRA_GROUP" + - result.response.rules[2].spec.secured_group_category_references[0] == "category1" + - result.response.rules[2].spec.secured_group_action == "DENY" + fail_msg: Failed to create security rule create spec + success_msg: Successfully created spec for creating security rule diff --git a/tests/integration/targets/ntnx_service_groups/aliases b/tests/integration/targets/ntnx_service_groups/aliases index 87e7bdaae..e69de29bb 100644 --- a/tests/integration/targets/ntnx_service_groups/aliases +++ b/tests/integration/targets/ntnx_service_groups/aliases @@ -1 +0,0 @@ -disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_service_groups/tasks/create.yml b/tests/integration/targets/ntnx_service_groups/tasks/create.yml index 47b8759cc..9c7c13f6c 100644 --- a/tests/integration/targets/ntnx_service_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_service_groups/tasks/create.yml @@ -1,10 +1,11 @@ --- -- debug: +- name: Start testing ntnx_service_groups creation + ansible.builtin.debug: msg: Start testing ntnx_service_groups creation -- name: create tcp service group +- name: Create tcp service group ntnx_service_groups: - name: tcp_srvive_group + name: tcp_service_group desc: desc service_details: tcp: @@ -15,14 +16,14 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: Getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -38,12 +39,13 @@ fail_msg: "Fail: Unable to create tcp service group " success_msg: "Pass: tcp service group created successfully" -- set_fact: +- name: Adding service group uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.response.uuid ] }}" ################################################################ -- name: create udp service group +- name: Create udp service group ntnx_service_groups: - name: udp_srvive_group + name: udp_service_group desc: desc service_details: udp: @@ -54,14 +56,14 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: Getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -77,12 +79,13 @@ fail_msg: "Fail: Unable to create udp service group " success_msg: "Pass: udp service group created successfully" -- set_fact: +- name: Adding service group uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.response.uuid ] }}" ################################################################ -- name: create icmp with service group +- name: Create icmp with service group ntnx_service_groups: - name: icmp_srvive_group + name: icmp_service_group desc: desc service_details: icmp: @@ -93,14 +96,14 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: Getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -112,12 +115,13 @@ fail_msg: "Fail: Unable to create icmp service group " success_msg: "Pass: icmp service group created successfully" -- set_fact: +- name: Adding service group uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.response.uuid ] }}" ################################################################ -- name: create service group with tcp and udp and icmp +- name: Create service group with tcp and udp and icmp ntnx_service_groups: - name: app_srvive_group + name: app_service_group desc: desc service_details: tcp: @@ -126,18 +130,18 @@ - "10-50" - "60-90" - "99" - any_icmp: True + any_icmp: true register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: Getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -152,11 +156,12 @@ - result.response.service_group.service_list[1].udp_port_range_list[2].start_port == 99 - result.response.service_group.service_list[1].udp_port_range_list[2].end_port == 99 - result.response.service_group.service_list[2].protocol == "ICMP" - - result.response.service_group.service_list[2].icmp_type_code_list == [] + - result.response.service_group.service_list[2].icmp_type_code_list == [{}] fail_msg: "Fail: Unable to create tcp service group " success_msg: "Pass: tcp service group created successfully" -- set_fact: +- name: Adding service group uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.response.uuid ] }}" ################################################################ - name: Delete all created service groups @@ -165,10 +170,10 @@ service_group_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true @@ -176,5 +181,6 @@ fail_msg: "unable to delete all created service groups" success_msg: "All service groups deleted successfully" -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_service_groups/tasks/main.yml b/tests/integration/targets/ntnx_service_groups/tasks/main.yml index c9939d444..6eed15801 100644 --- a/tests/integration/targets/ntnx_service_groups/tasks/main.yml +++ b/tests/integration/targets/ntnx_service_groups/tasks/main.yml @@ -1,10 +1,13 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "update.yml" + - name: Import info.yml + ansible.builtin.import_tasks: "create.yml" + - name: Import update.yml + ansible.builtin.import_tasks: "update.yml" diff --git a/tests/integration/targets/ntnx_service_groups/tasks/update.yml b/tests/integration/targets/ntnx_service_groups/tasks/update.yml index 2845caa71..316c528aa 100644 --- a/tests/integration/targets/ntnx_service_groups/tasks/update.yml +++ b/tests/integration/targets/ntnx_service_groups/tasks/update.yml @@ -1,6 +1,6 @@ --- -- name: create tcp service group +- name: Create tcp service group ntnx_service_groups: name: tcp_srvive_group desc: desc @@ -14,8 +14,8 @@ register: service_group ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - service_group.response is defined - service_group.failed == false @@ -25,10 +25,11 @@ fail_msg: "Fail: Unable to create tcp service group " success_msg: "Pass: tcp service group created successfully" -- set_fact: +- name: Adding service group uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ service_group.service_group_uuid ] }}" ################################################################ -- name: update tcp service group name and description and other protocols +- name: Update tcp service group name and description and other protocols ntnx_service_groups: service_group_uuid: "{{service_group.service_group_uuid}}" name: updated_name @@ -42,14 +43,14 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: Getting particular service_group using uuid ntnx_service_groups_info: service_group_uuid: '{{ result.service_group_uuid }}' register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -65,7 +66,7 @@ fail_msg: "Fail: Unable to update tcp service group " success_msg: "Pass: tcp service group update successfully" ################################################################ -- name: update tcp service group with same values +- name: Update tcp service group with same values ntnx_service_groups: service_group_uuid: "{{service_group.service_group_uuid}}" name: updated_name @@ -79,8 +80,8 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -94,10 +95,10 @@ service_group_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed is defined - result.changed == true diff --git a/tests/integration/targets/ntnx_service_groups_info/aliases b/tests/integration/targets/ntnx_service_groups_info/aliases index 87e7bdaae..e69de29bb 100644 --- a/tests/integration/targets/ntnx_service_groups_info/aliases +++ b/tests/integration/targets/ntnx_service_groups_info/aliases @@ -1 +0,0 @@ -disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_service_groups_info/tasks/info.yml b/tests/integration/targets/ntnx_service_groups_info/tasks/info.yml index e621973ec..cab6c0140 100644 --- a/tests/integration/targets/ntnx_service_groups_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_service_groups_info/tasks/info.yml @@ -1,14 +1,15 @@ --- -- debug: - msg: "start ntnx_service_groups_info tests" +- name: Start ntnx_service_groups_info tests + ansible.builtin.debug: + msg: "Start ntnx_service_groups_info tests" -- name: test getting all service groups +- name: Test getting all service groups ntnx_service_groups_info: register: service_groups ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - service_groups.response is defined - service_groups.changed == false @@ -26,8 +27,8 @@ sort_attribute: "name" register: result -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -37,7 +38,7 @@ fail_msg: "Unable to list all service groups using length, offset, sort order and name sort attribute" success_msg: "service groups listed successfully using length, offset, sort order and name sort attribute" ################################################################ -- name: test getting particular service_group using filter +- name: Test getting particular service_group using filter ntnx_service_groups_info: filter: name: "{{ service_groups.response.entities[0].service_group.name }}" @@ -45,8 +46,8 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -57,14 +58,14 @@ fail_msg: "Unable to get particular service_group" success_msg: "service_group info obtained successfully" ################################################################ -- name: test getting particular service_group using uuid +- name: Test getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.response.entities[0].uuid }}' + service_group_uuid: "{{ result.response.entities[0].uuid }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false diff --git a/tests/integration/targets/ntnx_service_groups_info/tasks/main.yml b/tests/integration/targets/ntnx_service_groups_info/tasks/main.yml index 3364b30c6..89b68cd04 100644 --- a/tests/integration/targets/ntnx_service_groups_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_service_groups_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "info.yml" diff --git a/tests/integration/targets/ntnx_service_groups_v2/aliases b/tests/integration/targets/ntnx_service_groups_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_service_groups_v2/meta/main.yml b/tests/integration/targets/ntnx_service_groups_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_service_groups_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_service_groups_v2/tasks/main.yml b/tests/integration/targets/ntnx_service_groups_v2/tasks/main.yml new file mode 100644 index 000000000..a1b35babc --- /dev/null +++ b/tests/integration/targets/ntnx_service_groups_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import service_groups.yml + ansible.builtin.import_tasks: service_groups.yml diff --git a/tests/integration/targets/ntnx_service_groups_v2/tasks/service_groups.yml b/tests/integration/targets/ntnx_service_groups_v2/tasks/service_groups.yml new file mode 100644 index 000000000..21df51f4d --- /dev/null +++ b/tests/integration/targets/ntnx_service_groups_v2/tasks/service_groups.yml @@ -0,0 +1,385 @@ +--- +- name: Start testing ntnx_service_groups_v2 module + ansible.builtin.debug: + msg: Start testing ntnx_service_groups_v2 module + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set service group name + ansible.builtin.set_fact: + service_group_name: service_group_{{ random_name }} + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create tcp service group with check mode enabled + ntnx_service_groups_v2: + name: "{{ service_group_name }}_1" + description: desc + tcp_services: + - start_port: 10 + end_port: 50 + - start_port: 60 + end_port: 90 + - start_port: 98 + end_port: 98 + - start_port: 99 + end_port: 99 + register: result + ignore_errors: true + check_mode: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{ service_group_name }}_1" + - result.response.description == "desc" + - result.response.tcp_services[0].start_port == 10 + - result.response.tcp_services[0].end_port == 50 + - result.response.tcp_services[1].start_port == 60 + - result.response.tcp_services[1].end_port == 90 + - result.response.tcp_services[2].start_port == 98 + - result.response.tcp_services[2].end_port == 98 + - result.response.tcp_services[3].start_port == 99 + - result.response.tcp_services[3].end_port == 99 + fail_msg: "Fail: Unable to create tcp service group with check mode enabled" + success_msg: "Pass: tcp service group created successfully with check mode enabled" +##################################################################### +- name: Create tcp service group + ntnx_service_groups_v2: + name: "{{ service_group_name }}_1" + description: desc + tcp_services: + - start_port: 10 + end_port: 50 + - start_port: 60 + end_port: 90 + - start_port: 98 + end_port: 98 + - start_port: 99 + end_port: 99 + register: result + ignore_errors: true + +- name: Getting particular service_group using uuid + ntnx_service_groups_info_v2: + ext_id: "{{ result.ext_id }}" + register: result + ignore_errors: true + +- name: Fetch service group status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.ext_id == "{{ result.response.ext_id }}" + - result.failed == false + - result.response.name == "{{ service_group_name }}_1" + - result.response.description == "desc" + - result.response.tcp_services[0].start_port == 10 + - result.response.tcp_services[0].end_port == 50 + - result.response.tcp_services[1].start_port == 60 + - result.response.tcp_services[1].end_port == 90 + - result.response.tcp_services[2].start_port == 98 + - result.response.tcp_services[2].end_port == 98 + - result.response.tcp_services[3].start_port == 99 + - result.response.tcp_services[3].end_port == 99 + fail_msg: "Fail: Unable to create tcp service group " + success_msg: "Pass: tcp service group created successfully" + +- name: Add service group to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" +############################################################### +- name: Create udp service group + ntnx_service_groups_v2: + name: "{{ service_group_name }}_2" + description: desc + udp_services: + - start_port: 10 + end_port: 50 + - start_port: 60 + end_port: 90 + - start_port: 98 + end_port: 98 + - start_port: 99 + end_port: 99 + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.ext_id == "{{ result.response.ext_id }}" + - result.response.description == "desc" + - result.response.name == "{{ service_group_name }}_2" + - result.response.udp_services[0].start_port == 10 + - result.response.udp_services[0].end_port == 50 + - result.response.udp_services[1].start_port == 60 + - result.response.udp_services[1].end_port == 90 + - result.response.udp_services[2].start_port == 98 + - result.response.udp_services[2].end_port == 98 + - result.response.udp_services[3].start_port == 99 + - result.response.udp_services[3].end_port == 99 + fail_msg: "Fail: Unable to create udp service group " + success_msg: "Pass: udp service group created successfully" + +- name: Add service group to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" +############################################################### +- name: Create icmp with service group + ntnx_service_groups_v2: + name: "{{ service_group_name }}_3" + description: desc + icmp_services: + - code: 10 + - type: 1 + - type: 2 + code: 3 + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.ext_id == "{{ result.response.ext_id }}" + - result.response.description == "desc" + - result.response.name == "{{ service_group_name }}_3" + - result.response.icmp_services[0].code == 10 + - result.response.icmp_services[1].type == 1 + - result.response.icmp_services[2].code == 3 + - result.response.icmp_services[2].type == 2 + + fail_msg: "Fail: Unable to create icmp service group " + success_msg: "Pass: icmp service group created successfully" + +- name: Add service group to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" +############################################################### +- name: Create service group with tcp and udp and icmp + ntnx_service_groups_v2: + name: "{{ service_group_name }}_4" + description: desc + tcp_services: + - start_port: 1 + end_port: 65535 + udp_services: + - start_port: 10 + end_port: 50 + - start_port: 60 + end_port: 90 + - start_port: 99 + end_port: 99 + icmp_services: + - is_all_allowed: true + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.ext_id is defined + - result.ext_id == "{{ result.response.ext_id }}" + - result.response.name == "{{ service_group_name }}_4" + - result.response.icmp_services[0].is_all_allowed == true + - result.response.tcp_services[0].start_port == 1 + - result.response.tcp_services[0].end_port == 65535 + - result.response.udp_services[0].start_port == 10 + - result.response.udp_services[0].end_port == 50 + - result.response.udp_services[1].start_port == 60 + - result.response.udp_services[1].end_port == 90 + - result.response.udp_services[2].start_port == 99 + - result.response.udp_services[2].end_port == 99 + fail_msg: "Fail: Unable to create tcp service group " + success_msg: "Pass: tcp service group created successfully" + +- name: Add service group to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" +################################################################ +- name: Test getting all service groups + ntnx_service_groups_info_v2: + register: service_groups + ignore_errors: true + +- name: Fetching all service groups status + ansible.builtin.assert: + that: + - service_groups.response is defined + - service_groups.changed == false + - service_groups.failed == false + - service_groups.response | length > 0 + fail_msg: Unable to list all service groups + success_msg: service groups listed successfully +################################################################ +- name: List service_group using limit + ntnx_service_groups_info_v2: + limit: 1 + register: result + +- name: List service group with limit status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: Unable to list all service groups using limit + success_msg: service groups listed successfully using limit +############################################################### +- name: Test getting particular service_group using filter + ntnx_service_groups_info_v2: + filter: name eq '{{ service_group_name }}_4' + register: result + ignore_errors: true + +- name: List service group with filter status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response[0].ext_id == '{{ todelete[3] }}' + - result.response[0].name == "{{ service_group_name }}_4" + fail_msg: Unable to get particular service_group + success_msg: service_group info obtained successfully +################################################################ +- name: Test getting particular service_group using external id + ntnx_service_groups_info_v2: + ext_id: "{{ todelete[0] }}" + register: result + ignore_errors: true + +- name: List service group with external id status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == "{{ service_group_name }}_1" + fail_msg: Unable to get particular service_group + success_msg: service_group info obtained successfully +################################################################ +- name: Update tcp service group name and description and other protocols with check mode enabled + ntnx_service_groups_v2: + ext_id: "{{ todelete[0] }}" + name: updated_name + description: updated_desc + tcp_services: + - start_port: 60 + end_port: 90 + icmp_services: + - type: 2 + code: 3 + register: result + ignore_errors: true + check_mode: true + +- name: Update service group status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "updated_name" + - result.response.description == "updated_desc" + - result.response.tcp_services[0].start_port == 60 + - result.response.tcp_services[0].end_port == 90 + - result.response.icmp_services[0].code == 3 + - result.response.icmp_services[0].type == 2 + fail_msg: "Fail: Unable to update tcp service group with check mode enabled" + success_msg: "Pass: tcp service group update successfully with check mode enabled" +################################################################ +- name: Update tcp service group name and description and other protocols + ntnx_service_groups_v2: + ext_id: "{{ todelete[0] }}" + name: updated_name + description: updated_desc + tcp_services: + - start_port: 60 + end_port: 90 + icmp_services: + - type: 2 + code: 3 + register: result + ignore_errors: true + +- name: Update service group status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.ext_id is defined + - result.ext_id == "{{ result.response.ext_id }}" + - result.response.name == "updated_name" + - result.response.description == "updated_desc" + - result.response.tcp_services[0].start_port == 60 + - result.response.tcp_services[0].end_port == 90 + - result.response.icmp_services[0].code == 3 + - result.response.icmp_services[0].type == 2 + fail_msg: "Fail: Unable to update tcp service group " + success_msg: "Pass: tcp service group update successfully" +################################################################ +- name: Update tcp service group with same values + ntnx_service_groups_v2: + ext_id: "{{ todelete[0] }}" + name: updated_name + description: updated_desc + tcp_services: + - start_port: 60 + end_port: 90 + icmp_services: + - type: 2 + code: 3 + register: result + ignore_errors: true + +- name: Update service group status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + fail_msg: Fail service group updated with same values + success_msg: "Pass : return as expected " +################################################################ +- name: Delete all created service groups + ntnx_service_groups_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Delete service group status + ansible.builtin.assert: + that: + - result.changed is defined + - result.changed == true + - result.msg == "All items completed" + fail_msg: unable to delete all created service groups + success_msg: All service groups deleted successfully + +- name: Set todelete to empty list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_static_routes/tasks/create.yml b/tests/integration/targets/ntnx_static_routes/tasks/create.yml index 16c81ed50..8e3a48f7c 100644 --- a/tests/integration/targets/ntnx_static_routes/tasks/create.yml +++ b/tests/integration/targets/ntnx_static_routes/tasks/create.yml @@ -1,4 +1,5 @@ -- debug: +- name: Start testing static routes update tests + ansible.builtin.debug: msg: Start testing static routes update tests - name: Add default static route and external nat static route to the vpc route table @@ -17,31 +18,48 @@ next_hop: external_subnet_ref: uuid: "{{ external_nat_subnet.uuid }}" - wait: True + wait: true register: result -- set_fact: +- name: Set destination variables + ansible.builtin.set_fact: d1: "{{ result.response.status.resources.static_routes_list[0].destination }}" d2: "{{ result.response.status.resources.static_routes_list[1].destination }}" - name: Update static routes list Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.changed == true - - result.response.status.resources.static_routes_list[0]["is_active"] == true - ("{{d1}}" == "10.2.2.0/24" and "{{d2}}" == "10.2.3.0/24") or ("{{d2}}" == "10.2.2.0/24" and "{{d1}}" == "10.2.3.0/24") - result.response.status.resources.static_routes_list[0]["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" - - result.response.status.resources.static_routes_list[1]["is_active"] == true - result.response.status.resources.static_routes_list[1]["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" - - result.response.status.resources.default_route["is_active"] == true - result.response.status.resources.default_route["destination"] == "0.0.0.0/0" - result.response.status.resources.default_route["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" - fail_msg: 'Fail: Unable to update static routes of vpc' - success_msg: 'Succes: static routes updated successfully' + fail_msg: "Fail: Unable to update static routes of vpc" + success_msg: "Succes: static routes updated successfully" + +- name: Wait 1 minute for static routes to be become active + ansible.builtin.pause: + seconds: 60 +- name: Get all static routes + ntnx_static_routes_info: + vpc_uuid: "{{ vpc.uuid }}" + register: result + +- name: Set destination variables + ansible.builtin.set_fact: + d1: "{{ result.response.status.resources.static_routes_list[0].destination }}" + d2: "{{ result.response.status.resources.static_routes_list[1].destination }}" + +- name: Check info module response + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false ########################################################################################################### - name: Idempotency check @@ -62,8 +80,8 @@ uuid: "{{ external_nat_subnet.uuid }}" register: result -- name: check idempotency status - assert: +- name: Check idempotency status + ansible.builtin.assert: that: - result.changed == false - result.failed == false @@ -84,17 +102,38 @@ register: result - name: Update static routes list Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.changed == true - - result.response.status.resources.static_routes_list[0]["is_active"] == true - result.response.status.resources.static_routes_list[0]["destination"] == "10.2.4.0/24" - result.response.status.resources.static_routes_list[0]["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" fail_msg: "Static routes overriding failed" success_msg: "Static routes overriden successfully" +- name: Wait 1 minute for static routes to be become active + ansible.builtin.pause: + seconds: 60 + +- name: Get all static routes + ntnx_static_routes_info: + vpc_uuid: "{{ vpc.uuid }}" + register: result + +- name: Set destination variable + ansible.builtin.set_fact: + d1: "{{ result.response.status.resources.static_routes_list[0].destination }}" + +- name: Check info module response + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response.status.resources.static_routes_list[0]["is_active"] == true + - result.response.status.resources.static_routes_list[0]["destination"] == "10.2.4.0/24" + - result.response.status.resources.static_routes_list[0]["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" + ########################################################################################################### - name: Netgative scenario of cretaing multiple default routes @@ -113,7 +152,7 @@ ignore_errors: true - name: Update static routes list Status - assert: + ansible.builtin.assert: that: - result.changed == false - result.failed == true @@ -121,14 +160,14 @@ success_msg: "Static routes update failed successfully" ########################################################################################################### -- name: remove all routes +- name: Remove all routes ntnx_static_routes: vpc_uuid: "{{ vpc.uuid }}" remove_all_routes: true register: result - name: Remove all routes status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' diff --git a/tests/integration/targets/ntnx_static_routes/tasks/main.yml b/tests/integration/targets/ntnx_static_routes/tasks/main.yml index b19cfc1ec..361bacf0a 100644 --- a/tests/integration/targets/ntnx_static_routes/tasks/main.yml +++ b/tests/integration/targets/ntnx_static_routes/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" diff --git a/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml b/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml index 4b79f1a08..d439a4527 100644 --- a/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml @@ -1,7 +1,8 @@ -- debug: +- name: Start testing static routes info tests + ansible.builtin.debug: msg: Start testing static routes info tests -- name: create new static routes +- name: Create new static routes ntnx_static_routes: vpc_uuid: "{{ vpc.uuid }}" static_routes: @@ -19,29 +20,33 @@ uuid: "{{ external_nat_subnet.uuid }}" register: result -- name: Update static routes list Status - assert: +- name: Create new static routes status + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.changed == true - fail_msg: 'Fail: Unable to update static routes of vpc' - success_msg: 'Succes: static routes updated successfully' + fail_msg: "Fail: Unable to create static routes for vpc" + success_msg: "Success: Static routes created" ########################################################################################################### -- name: get all static routes +- name: Wait 60 seconds for static routes to be created + ansible.builtin.pause: + seconds: 60 + +- name: Get all static routes ntnx_static_routes_info: vpc_uuid: "{{ vpc.uuid }}" register: result - -- set_fact: +- name: Set destination variables + ansible.builtin.set_fact: d1: "{{ result.response.status.resources.static_routes_list[0].destination }}" d2: "{{ result.response.status.resources.static_routes_list[1].destination }}" -- name: check info module response - assert: +- name: Check info module response + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -54,12 +59,12 @@ - result.response.status.resources.default_route["destination"] == "0.0.0.0/0" - result.response.status.resources.default_route["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" - fail_msg: 'Fail: Unable to get static routes for vpc' - success_msg: 'Succes' + fail_msg: "Fail: Unable to get static routes for vpc" + success_msg: "Succes" ########################################################################################################### -- name: remove all routes for cleanup +- name: Remove all routes for cleanup ntnx_static_routes: vpc_uuid: "{{ vpc.uuid }}" remove_all_routes: true @@ -67,7 +72,7 @@ ignore_errors: true - name: Remove all routes status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' diff --git a/tests/integration/targets/ntnx_static_routes_info/tasks/main.yml b/tests/integration/targets/ntnx_static_routes_info/tasks/main.yml index 3364b30c6..74e773fce 100644 --- a/tests/integration/targets/ntnx_static_routes_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_static_routes_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "info.yml" + - name: Import info.yml + ansible.builtin.import_tasks: "info.yml" diff --git a/tests/integration/targets/ntnx_storage_containers_v2/aliases b/tests/integration/targets/ntnx_storage_containers_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_storage_containers_v2/meta/main.yml b/tests/integration/targets/ntnx_storage_containers_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_storage_containers_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_storage_containers_v2/tasks/all_operation.yml b/tests/integration/targets/ntnx_storage_containers_v2/tasks/all_operation.yml new file mode 100644 index 000000000..737322c61 --- /dev/null +++ b/tests/integration/targets/ntnx_storage_containers_v2/tasks/all_operation.yml @@ -0,0 +1,398 @@ +--- +- name: Start ntnx_storage_containers_v2 tests + ansible.builtin.debug: + msg: Start ntnx_storage_containers_v2 tests + +- name: Generate random key & value + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set prefix name + ansible.builtin.set_fact: + prefix_name: ansible-ag + +- name: Set storage container name + ansible.builtin.set_fact: + todelete: [] + storage_container_name: "{{ prefix_name }}{{ random_name }}" + +- name: Generate storage container create spec with check mode enabled + ntnx_storage_containers_v2: + name: "{{ storage_container_name }}1" + cluster_ext_id: "{{ cluster.uuid }}" + owner_ext_id: "{{ owner.uuid }}" + replication_factor: 2 + logical_explicit_reserved_capacity_bytes: 0 + logical_advertised_capacity_bytes: 107374182400 + nfs_whitelist_address: + - ipv4: + value: 192.168.1.1 + erasure_code: "ON" + is_inline_ec_enabled: true + has_higher_ec_fault_domain_preference: true + erasure_code_delay_secs: 0 + cache_deduplication: "ON" + on_disk_dedup: "OFF" + is_compression_enabled: true + compression_delay_secs: 0 + is_internal: false + is_software_encryption_enabled: true + affinity_host_ext_id: "0005b6b1-3d06-ce49-1fc3-ac1f6b6029c1" + check_mode: true + register: result + ignore_errors: true + +- name: Generate storage container create spec with check mode enabled status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response.name == "{{ storage_container_name }}1" + - result.response.cluster_ext_id == "{{ cluster.uuid }}" + - result.response.owner_ext_id == "{{ owner.uuid }}" + - result.response.replication_factor == 2 + - result.response.logical_explicit_reserved_capacity_bytes == 0 + - result.response.logical_advertised_capacity_bytes == 107374182400 + - result.response.nfs_whitelist_address[0].ipv4.value == "192.168.1.1" + - result.response.erasure_code == "ON" + - result.response.is_inline_ec_enabled == true + - result.response.has_higher_ec_fault_domain_preference == true + - result.response.erasure_code_delay_secs == 0 + - result.response.cache_deduplication == "ON" + - result.response.on_disk_dedup == "OFF" + - result.response.is_compression_enabled == true + - result.response.compression_delay_secs == 0 + - result.response.is_internal == false + - result.response.is_software_encryption_enabled == true + - result.response.affinity_host_ext_id == "0005b6b1-3d06-ce49-1fc3-ac1f6b6029c1" + fail_msg: Unable to generate storage container create spec with check mode enabled + success_msg: Storage container create spec generated successfully with check mode enabled + +- name: Create storage container with minimal spec + ntnx_storage_containers_v2: + name: "{{ storage_container_name }}1" + cluster_ext_id: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Check storage container creation with minimal spec + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.container_ext_id == result.ext_id + - result.task_ext_id is defined + - result.response.name == "{{ storage_container_name }}1" + - result.response.cluster_ext_id == "{{ cluster.uuid }}" + - result.ext_id is defined + fail_msg: Unable to create storage container with minimal spec + success_msg: Storage container created successfully with minimal spec + +- name: Adding storage container to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +- name: Create storage container with missing cluster id + ntnx_storage_containers_v2: + name: "{{ storage_container_name }}1" + register: result + ignore_errors: true + +- name: Creaion status + ansible.builtin.assert: + that: + - result.changed == false + - "'state is present but all of the following are missing: cluster_ext_id' in result.msg" + fail_msg: "Fail: storage container created with missing cluster id" + success_msg: "Success: Returned as expected" + +- name: Create storage container with diffrent attributes + ntnx_storage_containers_v2: + name: "{{ storage_container_name }}2" + cluster_ext_id: "{{ cluster.uuid }}" + owner_ext_id: "{{ owner.uuid }}" + logical_explicit_reserved_capacity_bytes: 20 + logical_advertised_capacity_bytes: 1073741824000 + on_disk_dedup: "OFF" + is_compression_enabled: true + compression_delay_secs: 3600 + is_internal: false + is_software_encryption_enabled: false + is_inline_ec_enabled: false + has_higher_ec_fault_domain_preference: true + nfs_whitelist_address: + - ipv4: + value: 192.168.12.0 + register: result + ignore_errors: true + +- name: Adding storage container to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +- name: Check storage container creation with diffrent attributes + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.container_ext_id == result.ext_id + - result.task_ext_id is defined + - result.response.name == "{{ storage_container_name }}2" + - result.response.cluster_ext_id == "{{ cluster.uuid }}" + - result.response.owner_ext_id == "{{ owner.uuid }}" + - result.response.logical_explicit_reserved_capacity_bytes == 20 + - result.response.logical_advertised_capacity_bytes == 1073741824000 + - result.response.nfs_whitelist_address[0].ipv4.value == "192.168.12.0" + - result.response.nfs_whitelist_address[0].ipv4.prefix_length == 32 + fail_msg: Unable to create storage container with diffrent attributes + success_msg: Storage container created successfully with diffrent attributes + +- name: Set storage container external id + ansible.builtin.set_fact: + storage_container_ext_id: "{{ result.ext_id }}" + +- name: Check for idempotency + ntnx_storage_containers_v2: + state: present + name: "{{ storage_container_name }}2" + ext_id: "{{ storage_container_ext_id }}" + cluster_ext_id: "{{ cluster.uuid }}" + owner_ext_id: "{{ owner.uuid }}" + logical_explicit_reserved_capacity_bytes: 20 + logical_advertised_capacity_bytes: 1073741824000 + register: result + ignore_errors: true + +- name: Check storage container idempotency + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.skipped == true + - result.msg == "Nothing to change." + fail_msg: Unable to create storage container idempotency + success_msg: Storage container created successfully idempotency + +- name: Generate storage container update spec with check mode enabled + ntnx_storage_containers_v2: + state: present + ext_id: "{{ storage_container_ext_id }}" + name: "{{ storage_container_name }}2_updated" + cluster_ext_id: "{{ cluster.uuid }}" + owner_ext_id: "{{ owner.uuid }}" + logical_explicit_reserved_capacity_bytes: 20 + logical_advertised_capacity_bytes: 1073741824000 + nfs_whitelist_address: + - ipv4: + value: 192.168.13.0 + prefix_length: 32 + - ipv6: + value: "2001:db8::1" + prefix_length: 128 + - fqdn: + value: "example.com" + register: result + ignore_errors: true + check_mode: true + +- name: Generate storage container update spec with check mode enabled status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response.name == "{{ storage_container_name }}2_updated" + - result.response.cluster_ext_id == "{{ cluster.uuid }}" + - result.response.owner_ext_id == "{{ owner.uuid }}" + - result.response.logical_explicit_reserved_capacity_bytes == 20 + - result.response.logical_advertised_capacity_bytes == 1073741824000 + - result.response.nfs_whitelist_address[0].ipv4.value == "192.168.13.0" + - result.response.nfs_whitelist_address[0].ipv4.prefix_length == 32 + - result.response.nfs_whitelist_address[1].ipv6.value == "2001:db8::1" + - result.response.nfs_whitelist_address[1].ipv6.prefix_length == 128 + - result.response.nfs_whitelist_address[2].fqdn.value == "example.com" + - result.ext_id == "{{ storage_container_ext_id }}" + fail_msg: Unable to generate storage container update spec with check mode enabled + success_msg: Storage container update spec generated successfully with check mode enabled + +- name: Update storage container + ntnx_storage_containers_v2: + state: present + ext_id: "{{ storage_container_ext_id }}" + name: "{{ storage_container_name }}2_updated" + cluster_ext_id: "{{ cluster.uuid }}" + owner_ext_id: "{{ owner.uuid }}" + logical_explicit_reserved_capacity_bytes: 25 + logical_advertised_capacity_bytes: 2147483648000 + nfs_whitelist_address: + - ipv4: + value: 192.168.13.2 + register: result + ignore_errors: true + +- name: Check storage container update + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.response.name == "{{ storage_container_name }}2_updated" + - result.response.cluster_ext_id == "{{ cluster.uuid }}" + - result.response.owner_ext_id == "{{ owner.uuid }}" + - result.response.logical_explicit_reserved_capacity_bytes == 25 + - result.response.logical_advertised_capacity_bytes == 2147483648000 + - result.response.nfs_whitelist_address[0].ipv4.value == "192.168.13.2" + - result.response.nfs_whitelist_address[0].ipv4.prefix_length == 32 + - result.ext_id == "{{ storage_container_ext_id }}" + fail_msg: "Unable to update storage container " + success_msg: "Storage container updated successfully " + +- name: Fetch storage container configurations + ntnx_storage_containers_info_v2: + ext_id: "{{ storage_container_ext_id }}" + register: result + ignore_errors: true + +- name: Check storage container configurations + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response.name == "{{ storage_container_name }}2_updated" + - result.response.cluster_ext_id == "{{ cluster.uuid }}" + - result.response.owner_ext_id == "{{ owner.uuid }}" + - result.response.logical_explicit_reserved_capacity_bytes == 25 + - result.response.logical_advertised_capacity_bytes == 2147483648000 + - result.response.nfs_whitelist_address[0].ipv4.value == "192.168.13.2" + - result.response.nfs_whitelist_address[0].ipv4.prefix_length == 32 + - result.ext_id == "{{ storage_container_ext_id }}" + fail_msg: Unable to fetch storage container configurations + success_msg: Storage container configurations fetched successfully + +- name: Get current time in the required format + ansible.builtin.command: date -u +"%Y-%m-%dT%H:%M:%S.%3NZ" + register: end_time + changed_when: false + +- name: Get current time minus 5 minutes in the required format + ansible.builtin.command: date -u -d "-300 seconds" +"%Y-%m-%dT%H:%M:%S.%3NZ" + register: start_time + changed_when: false + +- name: Fetch storage container stats + ntnx_storage_containers_stats_v2: + ext_id: "{{ storage_container.uuid }}" + start_time: "{{ start_time.stdout }}" + end_time: "{{ end_time.stdout }}" + register: result + ignore_errors: true + +- name: Check storage container stats + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response is defined + - result.response.controller_num_iops is defined + - result.response.controller_num_iops | length > 2 + - result.response.container_ext_id is defined + - result.response.container_ext_id == "{{ storage_container.uuid }}" + - result.ext_id == "{{ storage_container.uuid }}" + fail_msg: Unable to fetch storage container stats + success_msg: Storage container stats fetched successfully + +- name: Fetch storage container stats with interval defined + ntnx_storage_containers_stats_v2: + ext_id: "{{ storage_container.uuid }}" + start_time: "{{ start_time.stdout }}" + end_time: "{{ end_time.stdout }}" + sampling_interval: 30 + stat_type: SUM + register: result + ignore_errors: true + +- name: Check storage container stats with interval defined + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response is defined + - result.response.controller_num_iops is defined + - result.response.controller_num_iops | length > 2 + - result.response.container_ext_id is defined + - result.response.container_ext_id == "{{ storage_container.uuid }}" + - result.ext_id == "{{ storage_container.uuid }}" + fail_msg: Unable to fetch storage container stats with interval defined + success_msg: Storage container stats fetched successfully with interval defined + +- name: Fetch all storage containers + ntnx_storage_containers_info_v2: + register: result + ignore_errors: true + +- name: Check all storage containers + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response | length > 0 + fail_msg: Unable to fetch all storage containers + success_msg: All storage containers fetched successfully + +- name: Fetch a particular storage container using filter + ntnx_storage_containers_info_v2: + filter: name eq '{{ storage_container_name }}2_updated' + register: result + ignore_errors: true + +- name: Check all storage containers with filter + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response | length == 1 + - result.response[0].name == "{{ storage_container_name }}2_updated" + fail_msg: Unable to Fetch a particular storage container using filter + success_msg: A particular storage container fetched successfully using filter + +- name: Fetch all storage containers with limit + ntnx_storage_containers_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Check all storage containers with limit + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.response | length == 1 + fail_msg: Unable to fetch all storage containers with limit + success_msg: All storage containers fetched successfully with limit + +- name: Delete all Created storage containers + ntnx_storage_containers_v2: + state: absent + ext_id: "{{ item }}" + ignore_small_files: true + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + - item.response is defined + - item.ext_id == "{{ todelete[storage_containers_index] }}" + fail_msg: "Unable to delete storage_container " + success_msg: "storage_container is deleted successfully " + loop: "{{ result.results }}" + loop_control: + index_var: storage_containers_index + +- name: Reset to delete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_storage_containers_v2/tasks/main.yml b/tests/integration/targets/ntnx_storage_containers_v2/tasks/main.yml new file mode 100644 index 000000000..b4eefeeea --- /dev/null +++ b/tests/integration/targets/ntnx_storage_containers_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import storage container all operation tests + ansible.builtin.import_tasks: "all_operation.yml" diff --git a/tests/integration/targets/ntnx_user_groups/tasks/create.yml b/tests/integration/targets/ntnx_user_groups/tasks/create.yml index 31fb156e3..91b37f378 100644 --- a/tests/integration/targets/ntnx_user_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_user_groups/tasks/create.yml @@ -1,8 +1,9 @@ --- -- debug: - msg: start ntnx_user_groups and info tests +- name: Start ntnx_user_groups and info tests + ansible.builtin.debug: + msg: Start ntnx_user_groups and info tests -- name: create user group +- name: Create user group ntnx_user_groups: distinguished_name: "{{distinguished_name}}" project: @@ -13,8 +14,8 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -26,27 +27,28 @@ fail_msg: "Unable to create user group " success_msg: "user group created successfully" -- set_fact: +- name: Set user group uuid to delete later + ansible.builtin.set_fact: user_group_to_delete: "{{result.user_group_uuid}}" -#############################INFO Tests##################### +############################# INFO Tests ##################### - name: List all user groups ntnx_user_groups_info: register: user_groups - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - user_groups.response is defined - user_groups.response.metadata.total_matches > 0 fail_msg: "Unable to list all user groups" success_msg: "User groups info obtained successfully" -- set_fact: +- name: Setting user group name and uuid + ansible.builtin.set_fact: test_user_group_name: "{{user_groups.response.entities.0.status.resources.display_name}}" -- set_fact: test_user_group_uuid: "{{user_groups.response.entities.0.metadata.uuid}}" ################################################## @@ -55,10 +57,10 @@ ntnx_user_groups_info: usergroup_uuid: "{{ test_user_group_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -75,10 +77,10 @@ filter: name: "{{ test_user_group_name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -96,10 +98,10 @@ length: 2 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -107,7 +109,6 @@ - result.response.metadata.total_matches > 0 - result.response.metadata.length == 2 - fail_msg: "Unable to list user groups using length and offset" success_msg: "user groups listed successfully using length and offset" ################################################## @@ -117,10 +118,10 @@ # sort_attribute: "group_name" # kind: user_group # register: result -# ignore_errors: True +# ignore_errors: true # - name: Listing Status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.changed == false @@ -128,15 +129,15 @@ # fail_msg: "Unable to list user groups using ascending name sorting" # success_msg: "user groups listed successfully using ascending name sorting" -- name: delete user group +- name: Delete user group ntnx_user_groups: state: absent user_group_uuid: "{{user_group_to_delete}}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == false @@ -144,8 +145,6 @@ - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" fail_msg: "Unable to delete user group " success_msg: "user group deletd successfully" - - # - name: create user group with idp # ntnx_user_groups: # idp: @@ -155,7 +154,7 @@ # ignore_errors: true # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.failed == false @@ -174,7 +173,7 @@ # ignore_errors: true # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.failed == false diff --git a/tests/integration/targets/ntnx_user_groups/tasks/main.yml b/tests/integration/targets/ntnx_user_groups/tasks/main.yml index b19cfc1ec..361bacf0a 100644 --- a/tests/integration/targets/ntnx_user_groups/tasks/main.yml +++ b/tests/integration/targets/ntnx_user_groups/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" diff --git a/tests/integration/targets/ntnx_user_groups_v2/aliases b/tests/integration/targets/ntnx_user_groups_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_user_groups_v2/meta/main.yml b/tests/integration/targets/ntnx_user_groups_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_user_groups_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_user_groups_v2/tasks/main.yml b/tests/integration/targets/ntnx_user_groups_v2/tasks/main.yml new file mode 100644 index 000000000..12351bbec --- /dev/null +++ b/tests/integration/targets/ntnx_user_groups_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import user_groups.yml + ansible.builtin.import_tasks: user_groups.yml diff --git a/tests/integration/targets/ntnx_user_groups_v2/tasks/user_groups.yml b/tests/integration/targets/ntnx_user_groups_v2/tasks/user_groups.yml new file mode 100644 index 000000000..2fa930d46 --- /dev/null +++ b/tests/integration/targets/ntnx_user_groups_v2/tasks/user_groups.yml @@ -0,0 +1,340 @@ +--- +- name: Start ntnx_user_groups_v2 tests + ansible.builtin.debug: + msg: start ntnx_user_groups_v2 tests + +- name: Set user groups variable. usergroups is a list of user groups to be deleted if already exist. + ansible.builtin.set_fact: + usergroups_query: "" + usergroups: + - "{{ distinguished_name }}" + - "{{ new_saml_user_group }}#{{ identity_provider_uuid }}" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Here we create a query string to fetch given user groups to delete if already exist + ansible.builtin.set_fact: + usergroups_query: "{{ usergroups_query }}{{ ' or ' if usergroups_query else '' }}distinguishedName eq '{{ item }}'" + with_items: "{{ usergroups }}" + +- name: Create LDAP user group with check mode enabled + ntnx_user_groups_v2: + distinguished_name: "{{ distinguished_name }}" + idp_id: "{{ directory_service_uuid }}" + group_type: LDAP + register: result + ignore_errors: true + check_mode: true + +- name: Create LDAP user group with check mode enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.idp_id == "{{ directory_service_uuid }}" + - result.response.group_type == "LDAP" + - result.response.distinguished_name == "{{ distinguished_name }}" + fail_msg: Unable to create LDAP user group with check mode enabled + success_msg: LDAP user group created successfully with check mode enabled + +################################################################################# + +- name: Fetch user groups to delete if already exist + ntnx_user_groups_info_v2: + filter: "{{ usergroups_query }}" + register: result + ignore_errors: true + +- name: Fetch user groups to delete if already exist Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + fail_msg: Fetch user groups to delete if already exist failed + success_msg: Fetch user groups to delete if already exist passed + +- name: Adding user group external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ result.response | map(attribute='ext_id') | list }}" + +- name: Delete user groups if already exist + ntnx_user_groups_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ todelete }}" + when: todelete | length > 0 + +- name: Delete user groups if already exist status + ansible.builtin.assert: + that: + - result.changed == true + - result.msg == "All items completed" + fail_msg: Delete user groups if already exist failed + success_msg: Delete user groups if already exist passed + when: todelete | length > 0 + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] + +################################################################################# + +- name: Create LDAP user group + ntnx_user_groups_v2: + group_type: LDAP + distinguished_name: "{{ distinguished_name }}" + idp_id: "{{ directory_service_uuid }}" + + register: result + ignore_errors: true + +- name: Create LDAP user group status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.distinguished_name == "{{ distinguished_name }}" + - result.response.group_type == "LDAP" + - result.response.idp_id == "{{ directory_service_uuid }}" + - result.response.ext_id == result.ext_id + fail_msg: Unable to create LDAP user group + success_msg: LDAP user group created successfully + +- name: Adding user group external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +################################################################################# + +- name: Create LDAP user group that already exists + ntnx_user_groups_v2: + group_type: LDAP + distinguished_name: "{{ distinguished_name }}" + idp_id: "{{ directory_service_uuid }}" + + register: result + ignore_errors: true + +- name: Create LDAP user group that already exists status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == true + - result.status == 409 + - result.response.data.error is defined + - result.response.data.error | length > 0 + fail_msg: Create LDAP user group that already exists failed + success_msg: Create LDAP user group that already exists passed + +################################################################################# + +- name: Create SAML user group with check mode enabled + ntnx_user_groups_v2: + group_type: SAML + idp_id: "{{ identity_provider_uuid }}" + name: "{{ new_saml_user_group }}" + register: result + ignore_errors: true + check_mode: true + +- name: Create SAML user group with check mode enabled status + + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.idp_id == "{{ identity_provider_uuid }}" + - result.response.group_type == "SAML" + - result.response.name == "{{ new_saml_user_group }}" + fail_msg: Unable to create SAML user group with check mode enabled + success_msg: SAML user group created successfully with check mode enabled + +############################################################################### + +- name: Create SAML user group + ntnx_user_groups_v2: + group_type: SAML + idp_id: "{{ identity_provider_uuid }}" + name: "{{ new_saml_user_group }}" + register: result + ignore_errors: true + +- name: Create SAML user group status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.group_type == "SAML" + - result.response.idp_id == "{{ identity_provider_uuid }}" + - result.response.ext_id == result.ext_id + - result.response.name == "{{ new_saml_user_group }}" + fail_msg: Unable to create SAML user group + success_msg: SAML user group created successfully + +- name: Adding user group external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +############################################################################### + +- name: List all user groups + ntnx_user_groups_info_v2: + register: user_groups + ignore_errors: true + +- name: List all user groups Status + ansible.builtin.assert: + that: + - user_groups.response is defined + - user_groups.response | length > 0 + fail_msg: Unable to list all user groups + success_msg: User groups info obtained successfully + +- name: Set test user group name and uuid + ansible.builtin.set_fact: + test_user_group_name: "{{user_groups.response.0.name}}" + test_user_group_uuid: "{{user_groups.response.0.ext_id}}" + +############################################################################### + +- name: List user groups using user_group uuid criteria + ntnx_user_groups_info_v2: + ext_id: "{{ test_user_group_uuid }}" + register: result + ignore_errors: true + +- name: List user groups using user_group uuid criteria Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == "{{ test_user_group_name }}" + fail_msg: Unable to list user group using uuid + success_msg: user group info obtained successfully + +############################################################################### + +- name: List user groups using filter criteria + ntnx_user_groups_info_v2: + filter: name eq '{{ test_user_group_name }}' + register: result + ignore_errors: true + +- name: List user groups using filter criteria Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response[0].name == "{{ test_user_group_name }}" + - result.response | length == 1 + fail_msg: Unable to list user groups using filter + success_msg: user group info obtained successfully + +############################################################################### +# these tests are commented because the filter is not working as expected: https://jira.nutanix.com/browse/ENG-673830 +# - name: Fetch only LDAP user groups +# ntnx_user_groups_info_v2: +# filter: "groupType eq Schema.Enums.GroupType'LDAP'" +# register: result +# ignore_errors: True + +# - ansible.builtin.set_fact: +# group_type: "{{ result.response | map(attribute='group_type') | list | unique }}" + +# - name: Fetch only LDAP user groups Status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == false +# - result.failed == false +# - result.response | length > 0 # response is null +# - group_type | length == 1 +# - group_type[0] == "LDAP" +# fail_msg: "Unable to fetch only LDAP user groups" +# success_msg: "LDAP user groups fetched successfully" + +# ############################################################################### + +# - name: Fetch only SAML user groups +# ntnx_user_groups_info_v2: +# filter: "groupType eq Schema.Enums.GroupType'SAML'" +# register: result +# ignore_errors: True + +# - ansible.builtin.set_fact: +# group_type: "{{ result.response | map(attribute='group_type') | list | unique }}" + +# - name: Fetch only SAML user groups Status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == false +# - result.failed == false +# - result.response | length > 0 # response is null +# - group_type | length == 1 +# - group_type[0] == "SAML" +# fail_msg: "Unable to fetch only SAML user groups" +# success_msg: "SAML user groups fetched successfully" + +############################################################################### + +- name: List user groups using limit + ntnx_user_groups_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: List user groups using limit Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + + fail_msg: Unable to list user groups using length and offset + success_msg: user groups listed successfully using length and offset + +############################################################################### + +- name: Delete created user groups + ntnx_user_groups_v2: + state: absent + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ todelete }}" + +- name: Delete created user groups status + vars: + msg: "User group with ext_id: {{ item.ext_id }} deleted successfully" + ansible.builtin.assert: + that: + - result.changed == true + - result.msg == "All items completed" + - result.results | length == todelete | length + - item.failed == false + - item.ext_id in todelete + - item.msg == "{{ msg }}" + fail_msg: Delete created user groups failed + success_msg: Delete created user groups passed + loop: "{{ result.results }}" + when: todelete | length > 0 and result | length > 0 + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_users/tasks/create.yml b/tests/integration/targets/ntnx_users/tasks/create.yml index b6bdf0c4b..332760804 100644 --- a/tests/integration/targets/ntnx_users/tasks/create.yml +++ b/tests/integration/targets/ntnx_users/tasks/create.yml @@ -1,19 +1,20 @@ --- -- debug: - msg: "start ntnx_users tests" +- name: Start ntnx_users tests + ansible.builtin.debug: + msg: "Start ntnx_users tests" ################################################## -- name: create local user with check mode +- name: Create local user with check mode ntnx_users: principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" + directory_service_uuid: "{{directory_service_uuid}}" project: uuid: "{{project.uuid}}" register: result ignore_errors: true check_mode: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -23,17 +24,16 @@ fail_msg: "fail: user created whil check mode on" success_msg: "pass: returned as expected" - ################################################# -- name: create local user +- name: Create local user ntnx_users: principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" + directory_service_uuid: "{{directory_service_uuid}}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -45,8 +45,9 @@ fail_msg: "fail" success_msg: "pass" -- set_fact: - todelete: "{{ todelete + [ result.user_uuid ] }}" +- name: Adding user uuid to todelete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.user_uuid ] }}" ################################################# - name: Delete created users ntnx_users: @@ -55,20 +56,21 @@ loop: "{{ todelete }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed == true - result.msg == "All items completed" fail_msg: "Fail: unable to delete all users" success_msg: "Pass: all users deleted successfully" -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] ################################################# -- name: create local user with project and categories +- name: Create local user with project and categories ntnx_users: principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" + directory_service_uuid: "{{directory_service_uuid}}" project: uuid: "{{project.uuid}}" categories: @@ -79,8 +81,8 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.changed == true @@ -92,13 +94,14 @@ fail_msg: "fail" success_msg: "pass" -- set_fact: - todelete: "{{ todelete + [ result.user_uuid ] }}" +- name: Adding user uuid to todelete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.user_uuid ] }}" ################################################# -- name: create local user not in the directory_service +- name: Create local user not in the directory_service ntnx_users: principal_name: wrong_name - directory_service_uuid: "{{directory_service_uuid}}" + directory_service_uuid: "{{directory_service_uuid}}" project: uuid: "{{project.uuid}}" categories: @@ -109,8 +112,8 @@ register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.response is defined - result.failed == true @@ -126,7 +129,7 @@ # ignore_errors: true # - name: check listing status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.changed == true @@ -137,7 +140,7 @@ # fail_msg: "Fail: unable to create idp user" # success_msg: "Pass: idp user created successfully" -# - set_fact: +# - ansible.builtin.set_fact: # todelete: "{{ todelete + [ result.user_uuid ] }}" ################################################# - name: Delete created users @@ -147,22 +150,23 @@ loop: "{{ todelete }}" register: result ignore_errors: true -- name: check listing status - assert: +- name: Check listing status + ansible.builtin.assert: that: - result.changed == true - result.msg == "All items completed" fail_msg: "Fail: unable to delete all users" success_msg: "Pass: all users deleted successfully" -- set_fact: +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] ############# DELETE TEST ############################## -- name: create local user +- name: Create local user ntnx_users: principal_name: "{{principal_name}}" - directory_service_uuid: "{{directory_service_uuid}}" + directory_service_uuid: "{{directory_service_uuid}}" project: uuid: "{{project.uuid}}" register: result @@ -175,8 +179,8 @@ register: result ignore_errors: true -- name: check delete status - assert: +- name: Check delete status + ansible.builtin.assert: that: - result.changed == true - result.response is defined diff --git a/tests/integration/targets/ntnx_users/tasks/main.yml b/tests/integration/targets/ntnx_users/tasks/main.yml index b19cfc1ec..361bacf0a 100644 --- a/tests/integration/targets/ntnx_users/tasks/main.yml +++ b/tests/integration/targets/ntnx_users/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" diff --git a/tests/integration/targets/ntnx_users_info/tasks/main.yml b/tests/integration/targets/ntnx_users_info/tasks/main.yml index 0773f67ed..7495f59c3 100644 --- a/tests/integration/targets/ntnx_users_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_users_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "users_info.yml" + - name: Import users_info.yml + ansible.builtin.import_tasks: "users_info.yml" diff --git a/tests/integration/targets/ntnx_users_info/tasks/users_info.yml b/tests/integration/targets/ntnx_users_info/tasks/users_info.yml index 88425c12a..5720b990e 100644 --- a/tests/integration/targets/ntnx_users_info/tasks/users_info.yml +++ b/tests/integration/targets/ntnx_users_info/tasks/users_info.yml @@ -1,23 +1,24 @@ -- debug: - msg: start testing ntnx_users_info +- name: Start testing ntnx_users_info + ansible.builtin.debug: + msg: Start testing ntnx_users_info ################################################## - name: List all users ntnx_users_info: register: users - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - users.response is defined - users.response.metadata.total_matches > 0 fail_msg: "Unable to list all users" success_msg: "User info obtained successfully" -- set_fact: +- name: Setting user name and uuid + ansible.builtin.set_fact: test_user_name: "{{users.response.entities.2.status.name}}" -- set_fact: test_user_uuid: "{{users.response.entities.2.metadata.uuid}}" ################################################## @@ -26,10 +27,10 @@ ntnx_users_info: user_uuid: "{{ test_user_uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -46,10 +47,10 @@ filter: username: "{{ test_user_name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -67,10 +68,10 @@ length: 2 offset: 1 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false @@ -88,10 +89,10 @@ # sort_attribute: "name" # kind: user # register: result -# ignore_errors: True +# ignore_errors: true # - name: Listing Status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.changed == false diff --git a/tests/integration/targets/ntnx_users_v2/aliases b/tests/integration/targets/ntnx_users_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_users_v2/meta/main.yml b/tests/integration/targets/ntnx_users_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_users_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_users_v2/tasks/main.yml b/tests/integration/targets/ntnx_users_v2/tasks/main.yml new file mode 100644 index 000000000..25b8025f6 --- /dev/null +++ b/tests/integration/targets/ntnx_users_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import users_operations.yml + ansible.builtin.import_tasks: users_operations.yml diff --git a/tests/integration/targets/ntnx_users_v2/tasks/users_operations.yml b/tests/integration/targets/ntnx_users_v2/tasks/users_operations.yml new file mode 100644 index 000000000..341e1c93e --- /dev/null +++ b/tests/integration/targets/ntnx_users_v2/tasks/users_operations.yml @@ -0,0 +1,630 @@ +--- +- name: Start ntnx_users_v2 tests + ansible.builtin.debug: + msg: start ntnx_users_v2 tests + +- name: Generate random strings + ansible.builtin.set_fact: + random_string: "{{ query('community.general.random_string', numbers=false, special=false, length=12) }}" + +- name: Ensure Pattern Compliance + ansible.builtin.set_fact: + random: user_test_{{ random_string | regex_replace('[^a-zA-Z0-9]', '') }} + users_query: "" + +- name: Set users variable + ansible.builtin.set_fact: + users: + - "{{ random }}_username_1" + - "{{ new_user }}" + - "{{ new_saml_user }}" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Set users_query variable + ansible.builtin.set_fact: + users_query: "{{ users_query }}{{ ' or ' if users_query else '' }}username eq '{{ item }}'" + with_items: "{{ users }}" + +- name: Create local user with check mode + ntnx_users_v2: + user_type: LOCAL + username: "{{ random }}_username_1" + first_name: "{{ random }}_firstname_1" + last_name: "{{ random }}_lastname_1" + display_name: "{{ random }}_displayname_1" + password: test.Password.123 + middle_initial: "{{ random }}_middleinitial_1" + email_id: "{{ random }}_1@email.com" + locale: en-US + region: ansible-region + status: ACTIVE + is_force_reset_password_enabled: true + register: result + ignore_errors: true + check_mode: true + +- name: Create local user with check mode status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.user_type == "LOCAL" + - result.response.username == "{{ random }}_username_1" + - result.response.first_name == "{{ random }}_firstname_1" + - result.response.last_name == "{{ random }}_lastname_1" + - result.response.display_name == "{{ random }}_displayname_1" + - result.response.middle_initial == "{{ random }}_middleinitial_1" + - result.response.email_id == "{{ random }}_1@email.com" + - result.response.locale == "en-US" + - result.response.status == "ACTIVE" + - result.response.is_force_reset_password_enabled == True + fail_msg: create local user with check mode failed + success_msg: create local user with check mode passed + +################################################ +# Commented as the delete API is not available using v1 instead +- name: Fetch users to delete if already exist + ntnx_users_info_v2: + filter: "{{ users_query }}" + register: result + ignore_errors: true + +- name: Fetch users to delete if already exist Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + fail_msg: Fetch users to delete if already exist failed + success_msg: Fetch users to delete if already exist passed + +- name: Adding user external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ result.response | map(attribute='ext_id') | list }}" + +# - name: Delete users if already exist +# ntnx_users_v2: +# state: absent +# ext_id: "{{ item }}" +# register: result +# ignore_errors: true +# loop: "{{ todelete }}" +# when: todelete | length > 0 + +# - name: Delete users if already exist status +# ansible.builtin.assert: +# that: +# - result.changed == true +# - result.msg == "All items completed" +# fail_msg: Delete users if already exist failed +# success_msg: Delete users if already exist passed +# when: todelete | length > 0 + +- name: Delete users if already exist + ntnx_users: + state: absent + user_uuid: "{{ item }}" + loop: "{{ todelete }}" + register: result + ignore_errors: true + when: todelete | length > 0 + +- name: Delete users if already exist status + ansible.builtin.assert: + that: + - result.changed == true + - result.msg == "All items completed" + fail_msg: Delete users if already exist failed + success_msg: Delete users if already exist passed + when: todelete | length > 0 + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] + +################################################ + +- name: Create active local user + ntnx_users_v2: + user_type: LOCAL + username: "{{ random }}_username_1" + first_name: "{{ random }}_firstname_1" + last_name: "{{ random }}_lastname_1" + display_name: "{{ random }}_displayname_1" + password: test.Password.123 + middle_initial: "{{ random }}_middleinitial_1" + email_id: "{{ random }}_1@email.com" + locale: en-US + status: ACTIVE + is_force_reset_password_enabled: true + register: result + ignore_errors: true + +- name: Create active local user status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.ext_id == result.ext_id + - result.response.user_type == "LOCAL" + - result.response.username | lower == "{{ random }}_username_1" | lower + - result.response.first_name == "{{ random }}_firstname_1" + - result.response.last_name == "{{ random }}_lastname_1" + - result.response.display_name == "{{ random }}_displayname_1" + - result.response.middle_initial == "{{ random }}_middleinitial_1" + - result.response.email_id == "{{ random }}_1@email.com" + - result.response.locale == "en-US" + - result.response.status == "ACTIVE" + - result.response.is_force_reset_password_enabled == True + fail_msg: create active local user failed + success_msg: create active local user passed + +- name: Adding user external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +################################################# + +- name: Create inactive local user + ntnx_users_v2: + user_type: LOCAL + username: "{{ random }}_username_2" + first_name: "{{ random }}_firstname_2" + last_name: "{{ random }}_lastname_2" + display_name: "{{ random }}_displayname_2" + password: test.Password.123 + middle_initial: "{{ random }}_middleinitial_2" + email_id: "{{ random }}_2@email.com" + locale: en-US + status: INACTIVE + is_force_reset_password_enabled: true + register: result + ignore_errors: true + +- name: Create inactive local user status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.user_type == "LOCAL" + - result.response.username | lower == "{{ random }}_username_2" | lower + - result.response.first_name == "{{ random }}_firstname_2" + - result.response.last_name == "{{ random }}_lastname_2" + - result.response.display_name == "{{ random }}_displayname_2" + - result.response.middle_initial == "{{ random }}_middleinitial_2" + - result.response.email_id == "{{ random }}_2@email.com" + - result.response.locale == "en-US" + - result.response.status == "INACTIVE" + - result.response.is_force_reset_password_enabled == True + fail_msg: create inactive local user failed + success_msg: create inactive local user passed + +- name: Adding user external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +################################################# + +- name: Create local user that already exists + ntnx_users_v2: + user_type: LOCAL + username: "{{ random }}_username_1" + first_name: "{{ random }}_firstname_1" + last_name: "{{ random }}_lastname_1" + display_name: "{{ random }}_displayname_1" + password: test.Password.123 + middle_initial: "{{ random }}_middleinitial_1" + email_id: "{{ random }}_1@email.com" + locale: en-US + region: ansible-region + status: ACTIVE + is_force_reset_password_enabled: true + register: result + ignore_errors: true + +- name: Create local user that already exists status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == true + - result.status == 409 + - result.response.data.error is defined + - result.response.data.error | length > 0 + fail_msg: create local user that already exists failed + success_msg: create local user that already exists passed + +################################################# + +- name: Create SAML user + ntnx_users_v2: + user_type: SAML + username: "{{ users[2] }}" + idp_id: "{{ identity_provider_uuid }}" + register: result + ignore_errors: true + +- name: Create SAML user status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.user_type == "SAML" + - result.response.username == "{{ users[2] }}" + - result.response.idp_id == "{{ identity_provider_uuid }}" + - result.ext_id == result.response.ext_id + fail_msg: Create SAML user failed + success_msg: Create SAML user passed + +- name: Adding user external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +################################################# + +- name: Create LDAP user with minimum config + ntnx_users_v2: + user_type: LDAP + username: "{{ new_user }}" + idp_id: "{{ directory_service_uuid }}" + register: result + ignore_errors: true + +- name: Create LDAP user with minimum config status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.user_type == "LDAP" + - result.response.idp_id == "{{ directory_service_uuid }}" + - result.ext_id == result.response.ext_id + - result.response.username == "{{ new_user }}" + fail_msg: Create LDAP user with minimum config failed + success_msg: Create LDAP user with minimum config passed + +- name: Adding user external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [result.ext_id] }}" + +################################################# + +- name: Update local user with check mode enabled + ntnx_users_v2: + ext_id: "{{ todelete[0] }}" + first_name: "{{ random }}_firstname_1_updated" + last_name: "{{ random }}_lastname_1_updated" + password: test.Password.123Updated + display_name: "{{ random }}_displayname_1_updated" + middle_initial: "{{ random }}_middleinitial_1_updated" + email_id: "{{ random }}_1_updated@email.com" + is_force_reset_password_enabled: true + register: result + ignore_errors: true + check_mode: true + +- name: Update local user with check mode enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.response.first_name == "{{ random }}_firstname_1_updated" + - result.response.last_name == "{{ random }}_lastname_1_updated" + - result.response.display_name == "{{ random }}_displayname_1_updated" + - result.response.middle_initial == "{{ random }}_middleinitial_1_updated" + - result.response.email_id == "{{ random }}_1_updated@email.com" + - result.response.is_force_reset_password_enabled == true + fail_msg: Update local user with check mode enabled failed + success_msg: Update local user with check mode enabled passed + +################################################# + +- name: Update local user + ntnx_users_v2: + ext_id: "{{ todelete[0] }}" + first_name: "{{ random }}_firstname_1_updated" + last_name: "{{ random }}_lastname_1_updated" + password: test.Password.123Updated + display_name: "{{ random }}_displayname_1_updated" + middle_initial: "{{ random }}_middleinitial_1_updated" + email_id: "{{ random }}_1_updated@email.com" + is_force_reset_password_enabled: true + register: result + ignore_errors: true + +- name: Update local user status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.response.first_name == "{{ random }}_firstname_1_updated" + - result.response.last_name == "{{ random }}_lastname_1_updated" + - result.response.display_name == "{{ random }}_displayname_1_updated" + - result.response.middle_initial == "{{ random }}_middleinitial_1_updated" + - result.response.email_id == "{{ random }}_1_updated@email.com" + - result.response.is_force_reset_password_enabled == true + fail_msg: Update local user failed + success_msg: Update local user passed + +################################################# + +- name: Verify if module is idempotent if same config is provided + ntnx_users_v2: + ext_id: "{{ todelete[0] }}" + first_name: "{{ random }}_firstname_1_updated" + last_name: "{{ random }}_lastname_1_updated" + display_name: "{{ random }}_displayname_1_updated" + middle_initial: "{{ random }}_middleinitial_1_updated" + email_id: "{{ random }}_1_updated@email.com" + is_force_reset_password_enabled: true + register: result + ignore_errors: true + +- name: Verify if module is idempotent if same config is provided status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + - result.skipped == true + fail_msg: Verify if module is idempotent if same config is provided failed + success_msg: Verify if module is idempotent if same config is provided passed + +################################################# + +- name: Verify if idempotency checks are skipped if password is provided + ntnx_users_v2: + ext_id: "{{ todelete[0] }}" + first_name: "{{ random }}_firstname_1_updated" + last_name: "{{ random }}_lastname_1_updated" + password: test.Password.123Updated + display_name: "{{ random }}_displayname_1_updated" + middle_initial: "{{ random }}_middleinitial_1_updated" + email_id: "{{ random }}_1_updated@email.com" + is_force_reset_password_enabled: true + register: result + ignore_errors: true + +- name: Verify if idempotency checks are skipped if password is provided status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.response.first_name == "{{ random }}_firstname_1_updated" + - result.response.last_name == "{{ random }}_lastname_1_updated" + - result.response.display_name == "{{ random }}_displayname_1_updated" + - result.response.middle_initial == "{{ random }}_middleinitial_1_updated" + - result.response.email_id == "{{ random }}_1_updated@email.com" + - result.response.is_force_reset_password_enabled == true + fail_msg: Verify if idempotency checks are skipped if password is provided failed + success_msg: Verify if idempotency checks are skipped if password is provided passed + +################################################# +# these test were commented since they are using different APIs +# - name: Deactivate local user +# ntnx_users_v2: +# ext_id: "{{todelete[0]}}" +# status: INACTIVE +# register: result +# ignore_errors: true + +# - name: Deactivate local user status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.failed == false +# - result.ext_id == "{{todelete[0]}}" +# fail_msg: "Deactivate local user failed" +# success_msg: "Deactivate local user passed" + +# ################################################# + +# - name: Fetch user info using user ext_id criteria and check status +# ntnx_users_info_v2: +# ext_id: "{{todelete[0]}}" +# register: result +# ignore_errors: True + +# - name: Fetch user info using user ext_id criteria and check status result +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == false +# - result.failed == false +# - result.response.ext_id == "{{todelete[0]}}" +# - result.response.status == "INACTIVE" +# fail_msg: "Fetch user info using user ext_id criteria and check status failed" +# success_msg: "Fetch user info using user ext_id criteria and check status passed" + +# #################################################### + +# - name: Activate local user +# ntnx_users_v2: +# ext_id: "{{todelete[0]}}" +# status: ACTIVE +# register: result +# ignore_errors: true + +# - name: Activate local user status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.failed == false +# - result.ext_id == "{{todelete[0]}}" +# - result.response.status == "ACTIVE" +# fail_msg: "Activate local user failed" +# success_msg: "Activate local user passed" + +#################################################### + +- name: List all users + ntnx_users_info_v2: + register: users + ignore_errors: true + +- name: List all users Status + ansible.builtin.assert: + that: + - users.response is defined + - users.response | length> 0 + - users.failed == false + - users.changed == false + fail_msg: Unable to list all users + success_msg: User info obtained successfully + +- name: Set test user name and uuid + ansible.builtin.set_fact: + test_user_name: "{{users.response.0.username}}" + test_user_uuid: "{{users.response.0.ext_id}}" + +################################################## + +- name: Fetch only local users + ntnx_users_info_v2: + filter: userType eq Schema.Enums.UserType'LOCAL' + register: result + ignore_errors: true + +- name: Get all users type and remove duplicates to check if only LOCAL users are fetched + ansible.builtin.set_fact: + user_type: "{{ result.response | map(attribute='user_type') | list | unique }}" + +- name: Fetch only local users Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 0 + - user_type | length == 1 + - user_type[0] == "LOCAL" + fail_msg: Fetch only local users failed + success_msg: Fetch only local users passed + +#################################################### + +- name: Fetch user info using user ext_id criteria + ntnx_users_info_v2: + ext_id: "{{ test_user_uuid }}" + register: result + ignore_errors: true + +- name: Fetch user info using user ext_id criteria Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.username == "{{ test_user_name }}" + - result.response.ext_id == "{{ test_user_uuid }}" + fail_msg: Unable to fetch user using user ext_id criteria + success_msg: user info obtained successfully + +################################################## + +- name: List users with specific username + ntnx_users_info_v2: + filter: username eq '{{ test_user_name }}' + register: result + ignore_errors: true + +- name: List users with specific username Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response[0].username == "{{ test_user_name }}" + - result.response[0].ext_id == "{{ test_user_uuid }}" + - result.response | length == 1 + + fail_msg: Unable to List users with specific username + success_msg: users listed successfully with specific username + +################################################## + +- name: List users using limit + ntnx_users_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: List users using limit Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length == 1 + + fail_msg: Unable to list users using limit + success_msg: users listed successfully using limit +################################################## +# Commented as the delete API is not available using v1 instead +# - name: Delete created users +# ntnx_users_v2: +# state: absent +# ext_id: "{{ item }}" +# register: result +# ignore_errors: true +# loop: "{{ todelete }}" + +# - name: Delete created users status +# vars: +# msg: "User with ext_id: {{ item.ext_id }} deleted successfully" +# ansible.builtin.assert: +# that: +# - result.changed == true +# - result.msg == "All items completed" +# - result.results | length == todelete | length +# - item.failed == false +# - item.ext_id in todelete +# - item.msg == "{{ msg }}" +# fail_msg: Delete created users failed +# success_msg: Delete created users passed +# loop: "{{ result.results }}" +# when: todelete | length > 0 and result | length > 0 + +- name: Delete created users + ntnx_users: + state: absent + user_uuid: "{{ item }}" + loop: "{{ todelete }}" + register: result + ignore_errors: true + +- name: Delete created users status + ansible.builtin.assert: + that: + - result.changed == true + - result.msg == "All items completed" + - result.results | length == todelete | length + - item.failed == false + fail_msg: Delete created users failed + success_msg: Delete created users passed + loop: "{{ result.results }}" + when: todelete | length > 0 and result | length > 0 + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_vms_categories_v2/aliases b/tests/integration/targets/ntnx_vms_categories_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_categories_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_categories_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_categories_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_categories_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_categories_v2/tasks/main.yml new file mode 100644 index 000000000..ed367ac14 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_categories_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import vms_categories.yml + ansible.builtin.import_tasks: "vms_categories.yml" diff --git a/tests/integration/targets/ntnx_vms_categories_v2/tasks/vms_categories.yml b/tests/integration/targets/ntnx_vms_categories_v2/tasks/vms_categories.yml new file mode 100644 index 000000000..28f521f11 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_categories_v2/tasks/vms_categories.yml @@ -0,0 +1,306 @@ +--- +- name: Start testing ntnx_vms_categories_v2 module + ansible.builtin.debug: + msg: "Start testing ntnx_vms_categories_v2 module" + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM name + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Create VM to test categories + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + fail_msg: "Unable to Create VM " + success_msg: "VM is created successfully " + +- name: Set VM ext_id + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + +################################################################################## + +- name: List all categories and define variables + ntnx_categories_info_v2: + register: result + ignore_errors: true + +- name: Status of all categories + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + fail_msg: "Unable to list categories " + success_msg: "Categories listed successfully" + +- name: Set category ext_id + ansible.builtin.set_fact: + category_ext_id: '{{ result["response"][0]["ext_id"] }}' + category_ext_id1: '{{ result["response"][1]["ext_id"] }}' + category_ext_id2: '{{ result["response"][2]["ext_id"] }}' + category_ext_id3: '{{ result["response"][3]["ext_id"] }}' + +################################################################################## + +- name: Associate category to VM - check mode is enabled + ntnx_vms_categories_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + categories: + - ext_id: "{{ category_ext_id }}" + - ext_id: "{{ category_ext_id1 }}" + register: result + ignore_errors: true + check_mode: true + +- name: Set all categories + ansible.builtin.set_fact: + all_categories: "{{ result.response.categories | map(attribute='ext_id') | list }}" + +- name: Associate category to VM - check mode is enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - "'{{ category_ext_id }}' in {{ all_categories }}" + - "'{{ category_ext_id1 }}' in {{ all_categories }}" + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: " Associate category to VM - check mode is enabled failed " + success_msg: " Associate category to VM - check mode is enabled passed " + +############################################################################### + +- name: Associate category to VM + ntnx_vms_categories_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + categories: + - ext_id: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Associate category to VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response[0] == "{{ category_ext_id }}" + - result.response | length == 1 + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Associate category to VM failed " + success_msg: "Associate category to VM passed " + +############################################################################### + +- name: Associate categories to VM + ntnx_vms_categories_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + categories: + - ext_id: "{{ category_ext_id1 }}" + - ext_id: "{{ category_ext_id2 }}" + register: result + ignore_errors: true + +- name: Associate categories to VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response[0] == "{{ category_ext_id }}" + - result.response[1] == "{{ category_ext_id1 }}" + - result.response[2] == "{{ category_ext_id2 }}" + - result.response | length == 3 + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Associate categories to VM failed " + success_msg: "Associate categories to VM passed " + +############################################################################### + +- name: Associate category to VM that is already associated to that VM + ntnx_vms_categories_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + categories: + - ext_id: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Associate category to VM that is already associated to that VM status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.skipped == true + - result.msg == "Nothing to change." + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Associate category to VM that is already associated to that VM failed " + success_msg: "Associate category to VM that is already associated to that VM passed " + +############################################################################### + +- name: Get categories from VM + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Set all categories + ansible.builtin.set_fact: + all_categories: "{{ result.response.categories | map(attribute='ext_id') | list }}" + +- name: Get categories from VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.categories | length > 0 + - all_categories | length == 3 + - "'{{ category_ext_id }}' in {{ all_categories }}" + - "'{{ category_ext_id1 }}' in {{ all_categories }}" + - "'{{ category_ext_id2 }}' in {{ all_categories }}" + fail_msg: "Get categories from VM failed " + success_msg: "Get categories from VM passed " + +################################################################################## + +- name: Disassociate category from VM - check mode is enabled + ntnx_vms_categories_v2: + state: absent + vm_ext_id: "{{ vm_uuid }}" + categories: + - ext_id: "{{ category_ext_id }}" + - ext_id: "{{ category_ext_id1 }}" + register: result + ignore_errors: true + check_mode: true + +- name: Set all categories + ansible.builtin.set_fact: + all_categories: "{{ result.response.categories | map(attribute='ext_id') | list }}" + +- name: Disassociate category from VM - check mode is enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - "'{{ category_ext_id }}' in {{ all_categories }}" + - "'{{ category_ext_id1 }}' in {{ all_categories }}" + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: " Disassociate category from VM - check mode is enabled failed " + success_msg: " Disassociate category from VM - check mode is enabled passed " + +############################################################################### + +- name: Disassociate category from VM that is not associated to that VM + ntnx_vms_categories_v2: + state: absent + vm_ext_id: "{{ vm_uuid }}" + categories: + - ext_id: "{{ category_ext_id3 }}" + register: result + ignore_errors: true + +- name: Disassociate category from VM that is not associated to that VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.skipped == true + - result.msg == "Nothing to change." + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Disassociate category from VM that is not associated to that VM failed " + success_msg: "Disassociate category from VM that is not associated to that VM passed " + +############################################################################### + +- name: Disassociate category from VM + ntnx_vms_categories_v2: + state: absent + vm_ext_id: "{{ vm_uuid }}" + categories: + - ext_id: "{{ category_ext_id }}" + register: result + ignore_errors: true + +- name: Disassociate category from VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response[0] == "{{ category_ext_id1 }}" + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Disassociate category from VM failed " + success_msg: "Disassociate category from VM passed " + +############################################################################### + +- name: Get categories from VM + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Set all categories + ansible.builtin.set_fact: + all_categories: "{{ result.response.categories | map(attribute='ext_id') | list }}" + +- name: Get categories from VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.categories | length > 0 + - all_categories | length == 2 + - "'{{ category_ext_id1 }}' in {{ all_categories }}" + - "'{{ category_ext_id2 }}' in {{ all_categories }}" + fail_msg: "Get categories from VM failed " + success_msg: "Get categories from VM passed " + +############################################################################### + +- name: Delete the VM + ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_vms_cd_rom_v2/aliases b/tests/integration/targets/ntnx_vms_cd_rom_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_cd_rom_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_cd_rom_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_cd_rom_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_cd_rom_v2/tasks/cd_rom_operations.yml b/tests/integration/targets/ntnx_vms_cd_rom_v2/tasks/cd_rom_operations.yml new file mode 100644 index 000000000..027ac0fb9 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_cd_rom_v2/tasks/cd_rom_operations.yml @@ -0,0 +1,441 @@ +--- +- name: Start testing ntnx_vms_cd_rom_v2 + ansible.builtin.debug: + msg: Start testing ntnx_vms_cd_rom_v2 + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM name + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create VM to test CD ROM + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + fail_msg: "Unable to Create VM " + success_msg: "VM is created successfully " + +- name: Set VM attributes + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + +################################################################################# + +- name: Create CD ROM with minimum requirements + ntnx_vms_cd_rom_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: CD-ROM Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.disk_address.bus_type == "IDE" + - result.response.disk_address.index == 0 + - result.response.backing_info == none + - result.response.ext_id == result.ext_id + fail_msg: "Create CD ROM with minimum requirements failed " + success_msg: "Create CD ROM with minimum requirements passed " + +################################################################################# + +- name: Create CD ROM with all attributes - check mode is enabled + ntnx_vms_cd_rom_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + disk_address: + bus_type: "IDE" + index: 1 + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + check_mode: true + register: result + ignore_errors: true + +- name: CD-ROM Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disk_address.bus_type == "IDE" + - result.response.disk_address.index == 1 + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: " Create CD ROM with all attributes check mode failed " + success_msg: "Create CD ROM with all attributes check mode passed " + +################################################################################# + +- name: Create CD ROM with all attributes + ntnx_vms_cd_rom_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + disk_address: + bus_type: "IDE" + index: 1 + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + register: result + ignore_errors: true + +- name: CD-ROM Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.disk_address.bus_type == "IDE" + - result.response.disk_address.index == 1 + - result.response.backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.ext_id == result.ext_id + fail_msg: " Create CD ROM with all attributes failed " + success_msg: "Create CD ROM with all attributes passed " + +################################################################################# + +- name: Get CD ROM IDs from VM + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get CD ROM IDs from VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.cd_roms is defined + - result.response.cd_roms | length == 2 + - result.response.cd_roms[0].disk_address.bus_type == "IDE" + - result.response.cd_roms[0].disk_address.index == 0 + - result.response.cd_roms[0].backing_info == none + - result.response.cd_roms[1].disk_address.bus_type == "IDE" + - result.response.cd_roms[1].disk_address.index == 1 + - result.response.cd_roms[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + fail_msg: "Unable to get CD ROMs IDs" + success_msg: "CD ROMs IDs are fetched successfully " + +- name: Adding CD ROM IDs to todelete list + ansible.builtin.set_fact: + todelete: "{{ result.response.cd_roms | map(attribute='ext_id') | list }}" + +################################################################################ + +- name: Inject ISO in CD ROM of a VM - check mode is enabled + ntnx_vms_cd_rom_iso_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[0] }}" + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "{{ iso_image.image_ext_ids[0] }}" + register: result + check_mode: true + ignore_errors: true + +- name: Inject ISO in CD ROM of a VM with check mode enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.backing_info.data_source.reference.image_ext_id == "{{ iso_image.image_ext_ids[0] }}" + fail_msg: "Inject ISO in CD ROM of a VM with check mode enabled failed " + success_msg: "Inject ISO in CD ROM of a VM with check mode enabled passed " + +################################################################################# + +- name: Inject ISO in CD ROM of a VM + ntnx_vms_cd_rom_iso_v2: + state: present + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[0] }}" + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "{{ iso_image.image_ext_ids[0] }}" + register: result + ignore_errors: true + +- name: Inject ISO in CD ROM of a VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == todelete[0] + - result.response.backing_info.data_source.reference.image_ext_id == "{{ iso_image.image_ext_ids[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Inject ISO in CD ROM of a VM failed " + success_msg: "Inject ISO in CD ROM of a VM passed " + +################################################################################# + +- name: Get CD ROM details + ntnx_vms_cd_rom_info_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[0] }}" + register: result + ignore_errors: true + +- name: Get CD ROM details status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == todelete[0] + - result.response.disk_address.bus_type == "IDE" + - result.response.backing_info.data_source.reference.image_ext_id == "{{ iso_image.image_ext_ids[0] }}" + fail_msg: "Unable to get CD ROM details " + success_msg: "CD ROM details are fetched successfully " + +################################################################################ + +- name: Eject ISO from CD ROM of a VM - check mode is enabled + ntnx_vms_cd_rom_iso_v2: + state: absent + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[0] }}" + register: result + ignore_errors: true + check_mode: true + +- name: Eject ISO from CD ROM of a VM with check mode status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == todelete[0] + - todelete[0] in result.response + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Eject ISO from CD ROM of a VM with check mode failed " + success_msg: "Eject ISO from CD ROM of a VM with check mode passed " + +################################################################################# + +- name: Eject ISO from CD ROM of a VM + ntnx_vms_cd_rom_iso_v2: + state: absent + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[0] }}" + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + register: result + ignore_errors: true + +- name: Eject ISO from CD ROM of a VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == todelete[0] + - result.vm_ext_id == "{{ vm_uuid }}" + - result.task_ext_id is defined + - result.response.ext_id == todelete[0] + - result.response.backing_info == none + fail_msg: "Eject ISO from CD ROM of a VM failed " + success_msg: "Eject ISO from CD ROM of a VM passed " + +################################################################################# + +- name: Get CD ROM details + ntnx_vms_cd_rom_info_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[0] }}" + register: result + ignore_errors: true + +- name: Get CD ROM details status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == todelete[0] + - result.response.disk_address.bus_type == "IDE" + - result.response.backing_info == none + fail_msg: "Unable to get CD ROM details " + success_msg: "CD ROM details are fetched successfully " + +################################################################################ + +- name: Get all CD ROMs details + ntnx_vms_cd_rom_info_v2: + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Set CD ROM list + ansible.builtin.set_fact: + cd_rom_list: "{{ result.response | map(attribute='ext_id') | list }}" + +- name: Get all CD ROMs details status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - cd_rom_list | length == 2 + - cd_rom_list[0] == todelete[0] + - cd_rom_list[1] == todelete[1] + fail_msg: "Get all CD ROMs details failed " + success_msg: "Get all CD ROMs details passed " + +################################################################################ + +- name: Get all CD ROMs details with limit 1 + ntnx_vms_cd_rom_info_v2: + vm_ext_id: "{{ vm_uuid }}" + limit: 1 + register: result + ignore_errors: true + +- name: Set CD ROM list + ansible.builtin.set_fact: + cd_rom_list: "{{ result.response | map(attribute='ext_id') | list }}" + +- name: Get all CD ROMs details with limit 1 status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - cd_rom_list | length == 1 + - cd_rom_list[0] == todelete[0] + fail_msg: "Get all CD ROMs details with limit 1 failed " + success_msg: "Get all CD ROMs details with limit 1 passed " + +################################################################################ + +- name: Get all CD ROMs details with limit 2 + ntnx_vms_cd_rom_info_v2: + vm_ext_id: "{{ vm_uuid }}" + limit: 2 + register: result + ignore_errors: true + +- name: Set CD ROM list + ansible.builtin.set_fact: + cd_rom_list: "{{ result.response | map(attribute='ext_id') | list }}" + +- name: Get all CD ROMs details with limit 2 status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - cd_rom_list | length == 2 + - cd_rom_list[0] == todelete[0] + - cd_rom_list[1] == todelete[1] + fail_msg: "Get all CD ROMs details with limit 2 failed " + success_msg: "Get all CD ROMs details with limit 2 passed " + +############################################################################### + +- name: Delete All CD ROMs + ntnx_vms_cd_rom_v2: + state: absent + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ item }}" + loop: "{{ todelete }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.response is defined + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + - item.ext_id in todelete + - result.results | length == todelete | length + fail_msg: "Unable to delete CD ROM " + success_msg: "CD ROM is deleted successfully " + loop: "{{ result.results }}" + +################################################################################# + +- name: Verify CD ROMs are deleted + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Verify CD ROMs are deleted status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.cd_roms == none + fail_msg: "Verify CD ROMs are deleted failed" + success_msg: "Verify CD ROMs are deleted passed " + +################################################################################# + +- name: Delete the VM + ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + - result.vm_uuid == "{{ vm_uuid }}" + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_vms_cd_rom_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_cd_rom_v2/tasks/main.yml new file mode 100644 index 000000000..494704802 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_cd_rom_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import cd_rom_operations.yml + ansible.builtin.import_tasks: "cd_rom_operations.yml" diff --git a/tests/integration/targets/ntnx_vms_clone/tasks/create.yml b/tests/integration/targets/ntnx_vms_clone/tasks/create.yml index cf168c02f..ca6226a7f 100644 --- a/tests/integration/targets/ntnx_vms_clone/tasks/create.yml +++ b/tests/integration/targets/ntnx_vms_clone/tasks/create.yml @@ -1,134 +1,139 @@ -- debug: +- name: Start testing VM clone + ansible.builtin.debug: msg: Start testing VM clone - name: Create Cloud-init Script file - copy: + ansible.builtin.copy: + mode: "0644" dest: "init_cloud.yml" content: | - #cloud-config - chpasswd: - list: | - root: "{{ password }}" - expire: False - fqdn: myNutanixVM + #cloud-config + chpasswd: + list: | + root: "{{ password }}" + expire: False + fqdn: myNutanixVM - name: VM with minimum requiremnts to clone ntnx_vms: - state: present - name: integration_test_clone_vm - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 + state: present + name: integration_test_clone_vm + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 register: vm ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VM with minimum requiremnts to clone ' - success_msg: 'Succes: VM with minimum requiremnts created successfully ' + fail_msg: "Fail: Unable to create VM with minimum requiremnts to clone " + success_msg: "Succes: VM with minimum requiremnts created successfully " ############################## -- name: clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off +- name: Clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - vcpus: 2 - cores_per_vcpu: 2 - memory_gb: 2 - name: cloned vm - timezone: GMT - force_power_off: true + src_vm_uuid: "{{ vm.vm_uuid }}" + vcpus: 2 + cores_per_vcpu: 2 + memory_gb: 2 + name: cloned vm + timezone: GMT + force_power_off: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off' - success_msg: 'Succes: VM cloned successfully and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off ' + fail_msg: "Fail: Unable to clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off" + success_msg: "Succes: VM cloned successfully and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off " -- set_fact: - todelete: '{{ todelete + [ result.vm_uuid ] }}' +- name: Adding VM uuid to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.vm_uuid ] }}" ############################## -- name: clone vm and add network +- name: Clone vm and add network ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - networks: - - is_connected: true - subnet: - uuid: "{{ static.uuid }}" + src_vm_uuid: "{{ vm.vm_uuid }}" + networks: + - is_connected: true + subnet: + uuid: "{{ static.uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to clone vm while it is off ' - success_msg: 'Succes: VM cloned successfully ' + fail_msg: "Fail: Unable to clone vm while it is off " + success_msg: "Succes: VM cloned successfully " -- set_fact: - todelete: '{{ todelete + [ result.vm_uuid ] }}' +- name: Adding VM uuid to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.vm_uuid ] }}" ########################################### -- name: clone vm with check mode +- name: Clone vm with check mode ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - networks: - - is_connected: false - subnet: - name: "{{ network.dhcp.name }}" + src_vm_uuid: "{{ vm.vm_uuid }}" + networks: + - is_connected: false + subnet: + name: "{{ network.dhcp.name }}" register: result ignore_errors: true - check_mode: yes + check_mode: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.task_uuid != "" - success_msg: ' Success: returned response as expected ' - fail_msg: ' Fail: clone vm with check_mode ' + success_msg: " Success: returned response as expected " + fail_msg: " Fail: clone vm with check_mode " ########################################### -- name: clone vm with script +- name: Clone vm with script ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - guest_customization: - type: "cloud_init" - script_path: "./init_cloud.yml" - is_overridable: True + src_vm_uuid: "{{ vm.vm_uuid }}" + guest_customization: + type: "cloud_init" + script_path: "./init_cloud.yml" + is_overridable: true register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to clone vm vm with script' - success_msg: 'Succes: VM cloned with script successfully ' + fail_msg: "Fail: Unable to clone vm vm with script" + success_msg: "Succes: VM cloned with script successfully " -- set_fact: - todelete: '{{ todelete + [ result.vm_uuid ] }}' +- name: Adding VM uuid to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.vm_uuid ] }}" ########################################### - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - loop: '{{ todelete }}' + state: absent + vm_uuid: "{{ item }}" + loop: "{{ todelete }}" - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ vm.vm_uuid }}' + state: absent + vm_uuid: "{{ vm.vm_uuid }}" diff --git a/tests/integration/targets/ntnx_vms_clone/tasks/main.yml b/tests/integration/targets/ntnx_vms_clone/tasks/main.yml index b19cfc1ec..361bacf0a 100644 --- a/tests/integration/targets/ntnx_vms_clone/tasks/main.yml +++ b/tests/integration/targets/ntnx_vms_clone/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" diff --git a/tests/integration/targets/ntnx_vms_clone_v2/aliases b/tests/integration/targets/ntnx_vms_clone_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_clone_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_clone_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_clone_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_clone_v2/tasks/clone.yml b/tests/integration/targets/ntnx_vms_clone_v2/tasks/clone.yml new file mode 100644 index 000000000..47c744940 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_clone_v2/tasks/clone.yml @@ -0,0 +1,228 @@ +--- +- name: Start ntnx_vms_clone_v2 tests + ansible.builtin.debug: + msg: Start ntnx_vms_clone_v2 tests + +- name: Set random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM name + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create VM to use it for cloning + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + fail_msg: "Unable to Create VM " + success_msg: "VM is created successfully " + +- name: Set VM attributes + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + vm_name: '{{ result["response"]["name"] }}' + vm_num_threads_per_core: '{{ result["response"]["num_threads_per_core"] }}' + vm_num_sockets: '{{ result["response"]["num_sockets"] }}' + vm_num_cores_per_socket: '{{ result["response"]["num_cores_per_socket"] }}' + vm_power_state: '{{ result["response"]["power_state"] }}' + vm_machine_type: '{{ result["response"]["machine_type"] }}' + todelete: '{{ todelete + [ result["ext_id"] ] }}' + +################################################################################## + +- name: Clone VM - check mode is enabled + ntnx_vms_clone_v2: + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_clone" + num_sockets: 2 + num_cores_per_socket: 2 + num_threads_per_core: 2 + check_mode: true + register: result + ignore_errors: true + +- name: Clone VM - check mode is enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == '{{ vm_name }}_clone' + - result.response.num_sockets == 2 + - result.response.num_cores_per_socket == 2 + - result.response.num_threads_per_core == 2 + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Clone VM - check mode is enabled failed" + success_msg: "Clone VM - check mode is enabled passed" + +################################################################################# + +- name: Clone VM with same attributes values + ntnx_vms_clone_v2: + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_clone1" + register: result + ignore_errors: true + +- name: Adding VM to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["ext_id"] ] }}' + +- name: Clone VM with same attributes values status + ansible.builtin.assert: + that: + - result is defined + - result.response.name == '{{ vm_name }}_clone1' + - result.response.num_sockets == {{ vm_num_sockets }} + - result.response.num_cores_per_socket == {{ vm_num_cores_per_socket }} + - result.response.num_threads_per_core == {{ vm_num_threads_per_core }} + - result.response.power_state == "{{ vm_power_state }}" + - result.response.machine_type == "{{ vm_machine_type }}" + fail_msg: "Unable to clone VM" + success_msg: "VM cloned successfully" + +################################################################################# + +- name: Clone VM with different attributes values + ntnx_vms_clone_v2: + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_clone2" + num_sockets: 2 + num_cores_per_socket: 2 + num_threads_per_core: 2 + register: result + ignore_errors: true + +- name: Adding VM to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["ext_id"] ] }}' + +- name: Clone VM with different attributes values status + ansible.builtin.assert: + that: + - result is defined + - result.response.name == '{{ vm_name }}_clone2' + - result.response.num_sockets == 2 + - result.response.num_cores_per_socket == 2 + - result.response.num_threads_per_core == 2 + - result.response.power_state == "{{ vm_power_state }}" + - result.response.machine_type == "{{ vm_machine_type }}" + fail_msg: "Unable to clone VM" + success_msg: "VM cloned successfully" + +################################################################################## + +- name: Clone VM with guest customization + ntnx_vms_clone_v2: + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_clone3" + guest_customization: + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + cloud_init_script: + user_data: + value: I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ== + register: result + ignore_errors: true + +- name: Adding VM to delete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["ext_id"] ] }}' + +- name: Clone VM with guest customization status + ansible.builtin.assert: + that: + - result is defined + - result.response.name == '{{ vm_name }}_clone3' + - result.response.power_state == "{{ vm_power_state }}" + - result.response.machine_type == "{{ vm_machine_type }}" + - result.response.cd_roms[0].iso_type == "GUEST_CUSTOMIZATION" + fail_msg: "Unable to clone VM" + success_msg: "VM cloned successfully" + +################################################################################## + +- name: Clone VM - check mode is enabled + ntnx_vms_clone_v2: + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_clone2" + num_sockets: 4 + num_cores_per_socket: 4 + num_threads_per_core: 4 + register: result + ignore_errors: true + check_mode: true + +- name: Clone VM with check mode status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == '{{ vm_name }}_clone2' + - result.response.num_sockets == 4 + - result.response.num_cores_per_socket == 4 + - result.response.num_threads_per_core == 4 + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Clone VM - check mode is enabled failed" + success_msg: "Clone VM - check mode is enabled passed" + +################################################################################## + +- name: Clone VM that does not exist + ntnx_vms_clone_v2: + ext_id: "12345678-1234-1234-1234-123456789123" + name: "{{ vm_name }}_clone4" + num_sockets: 2 + num_cores_per_socket: 2 + num_threads_per_core: 2 + register: result + ignore_errors: true + +- name: Cloning VM that does not exist status + ansible.builtin.assert: + that: + - result is defined + - result.changed == false + - result.failed == true + - result.status == 404 + fail_msg: "Clone VM that does not exist failed" + success_msg: "Clone VM that does not exist passed" + +################################################################################## + +- name: Delete all VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " + loop: "{{ result.results }}" diff --git a/tests/integration/targets/ntnx_vms_clone_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_clone_v2/tasks/main.yml new file mode 100644 index 000000000..02e8179a9 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_clone_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import clone.yml + ansible.builtin.import_tasks: "clone.yml" diff --git a/tests/integration/targets/ntnx_vms_disks_v2/aliases b/tests/integration/targets/ntnx_vms_disks_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_disks_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_disks_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_disks_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_disks_v2/tasks/disks_operations.yml b/tests/integration/targets/ntnx_vms_disks_v2/tasks/disks_operations.yml new file mode 100644 index 000000000..532b09812 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_disks_v2/tasks/disks_operations.yml @@ -0,0 +1,740 @@ +--- +- name: Start testing ntnx_vms_disks_v2 + ansible.builtin.debug: + msg: Start testing ntnx_vms_disks_v2 + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM and VG Names + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + vg_name: "{{ random_name }}_vg_test" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create VM with minimum requirements + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + storage_config: + is_flash_mode_enabled: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + fail_msg: "Unable to Create VM with minimum requirements " + success_msg: "VM with minimum requirements created successfully " + +- name: Set VM ext_id + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + +################################################################################# + +- name: Create a disk for a VM - check mode is enabled + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_config: + is_flash_mode_enabled: true + storage_container: + ext_id: "{{ storage_container.uuid }}" + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: "SCSI" + index: 1 + state: present + register: result + ignore_errors: true + check_mode: true + +- name: Create a disk for a VM - check mode is enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.backing_info.storage_config.is_flash_mode_enabled == true + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.disk_address.index == 1 + fail_msg: "Create a disk for a VM - check mode is enabled failed" + success_msg: "Create a disk for a VM - check mode is enabled passed" + +################################################################################# + +- name: Create Disk with storage container + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + disk_address: + bus_type: "SCSI" + index: 1 + state: present + register: result + ignore_errors: true + +- name: Create Disk with storage container status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.backing_info.disk_size_bytes == 26843545600 + - result.response.disk_address.index == 1 + - result.response.disk_address.bus_type == "SCSI" + fail_msg: "Create Disk with storage container failed" + success_msg: "Create Disk with storage container passed" + +################################################################################ + +- name: Create Disk with image reference + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: "SCSI" + index: 2 + state: present + register: result + ignore_errors: true + +- name: Create Disk with image reference status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disk_address.index == 2 + - result.response.disk_address.bus_type == "SCSI" + fail_msg: "Create Disk with image reference failed" + success_msg: "Create Disk with image reference passed" + +############################################################################### + +- name: Get disks ids from VM + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Adding disks ids to todelete + ansible.builtin.set_fact: + todelete: "{{ result.response.disks | map(attribute='ext_id') | list }}" + +- name: Get disks ids from VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.disks is defined + - todelete | length == 2 + - result.response.disks | length == 2 + - result.response.disks[0].disk_address.index == 1 + - result.response.disks[1].disk_address.index == 2 + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].backing_info.storage_config.is_flash_mode_enabled == true + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[1].backing_info.storage_config.is_flash_mode_enabled == true + fail_msg: "Unable to get disks ids from VM " + success_msg: "disks ids are fetched from VM successfully " + +################################################################################ + +- name: Create Disk with disk reference + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + vm_disk_reference: + disk_ext_id: "{{ todelete[0] }}" + vm_reference: + ext_id: "{{ vm_uuid }}" + disk_address: + bus_type: "SCSI" + index: 2 + disk_address: + bus_type: "SCSI" + index: 3 + state: present + register: result + ignore_errors: true + +# this fails since data source is null in response +# - name: Create Disk with disk reference status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.failed == false +# - result.ext_id is defined +# - result.task_ext_id is defined +# - result.vm_ext_id == "{{ vm_uuid }}" +# - result.response.backing_info.data_source.vm_disk_reference.disk_ext_id == "{{ todelete[0] }}" +# - result.response.backing_info.data_source.vm_disk_reference.vm_reference.ext_id == "{{ vm_uuid }}" +# - result.response.backing_info.data_source.vm_disk_reference.disk_address.index == 2 +# - result.response.backing_info.data_source.vm_disk_reference.disk_address.bus_type == "SCSI" +# - result.response.disk_address.index == 3 +# - result.response.disk_address.bus_type == "SCSI" +# fail_msg: "Create Disk with disk reference failed" +# success_msg: "Create Disk with disk reference passed" + +################################################################################ + +- name: Create Disk with minimum spec + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + state: present + register: result + ignore_errors: true + +- name: Create Disk without disk index and bus type status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.disk_address.index == 0 + - result.response.disk_address.bus_type == "SCSI" + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.backing_info.disk_size_bytes == 26843545600 + fail_msg: "Create Disk without disk index and bus type failed" + success_msg: "Create Disk without disk index and bus type passed" + +################################################################################# + +- name: Create Volume group with min spec and no Auth + ntnx_volume_groups_v2: + state: "present" + name: "{{ vg_name }}" + description: "Volume group with min spec and no Auth" + cluster_reference: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Create Volume group with min spec and no Auth status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ vg_name }}" + fail_msg: "Create Volume group with min spec and no Auth failed" + success_msg: "Create Volume group with min spec and no Auth passed" + +- name: Set VG ext_id + ansible.builtin.set_fact: + vg1_uuid: '{{ result["response"]["ext_id"] }}' + +################################################################################ + +- name: Create disk from volume group disk + ntnx_volume_groups_disks_v2: + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + index: 6 + description: "Create disk from volume group disk description" + disk_data_source_reference: + entity_type: "STORAGE_CONTAINER" + ext_id: "{{ storage_container.uuid }}" + disk_size_bytes: 26843545600 + register: result + ignore_errors: true + +- name: Set VG ref + ansible.builtin.set_fact: + vg_ref: '{{ result["response"]["ext_id"] }}' + +- name: Create disk from volume group disk status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.disk_size_bytes == 26843545600 + - result.response.storage_container_id == "{{ storage_container.uuid }}" + - result.response.index == 6 + fail_msg: "Create disk from volume group disk failed" + success_msg: "Create disk from volume group disk passed" + +################################################################################ + +- name: Create Disk with VG reference + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + backing_info: + adsf_volume_group: + volume_group_ext_id: "{{ vg1_uuid }}" + disk_address: + bus_type: "SCSI" + index: 7 + state: present + register: result + ignore_errors: true + +- name: Create Disk with VG reference status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.task_ext_id is defined + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.backing_info.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.disk_address.index == 7 + - result.response.disk_address.bus_type == "SCSI" + fail_msg: "Create Disk with VG reference failed" + success_msg: "Create Disk with VG reference passed" + +################################################################################ + +- name: Get disks ids from VM and verify disks info + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Adding disks ids to todelete + ansible.builtin.set_fact: + todelete: "{{ result.response.disks | map(attribute='ext_id') | list }}" + +- name: Get disks ids from VM and verify disks info status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.disks is defined + - todelete | length == 5 + - result.response.disks | length == 5 + - result.response.disks[0].disk_address.index == 1 + - result.response.disks[1].disk_address.index == 2 + - result.response.disks[2].disk_address.index == 3 + - result.response.disks[3].disk_address.index == 0 + - result.response.disks[4].disk_address.index == 7 + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[2].disk_address.bus_type == "SCSI" + - result.response.disks[3].disk_address.bus_type == "SCSI" + - result.response.disks[4].disk_address.bus_type == "SCSI" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].backing_info.storage_config.is_flash_mode_enabled == true + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[1].backing_info.storage_config.is_flash_mode_enabled == true + - result.response.disks[2].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[2].backing_info.storage_config.is_flash_mode_enabled == true + - result.response.disks[3].backing_info.disk_size_bytes == 26843545600 + - result.response.disks[3].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[4].backing_info.volume_group_ext_id == "{{ vg1_uuid }}" + fail_msg: "Get disks ids from VM and verify disks info failed " + success_msg: "Get disks ids from VM and verify disks info passed " + +################################################################################# + +- name: Update Disk - check mode is enabled + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[1] }}" + backing_info: + vm_disk: + disk_size_bytes: 29527900160 + storage_container: + ext_id: "{{ storage_container.uuid }}" + state: present + register: result + ignore_errors: true + check_mode: true + +- name: Update Disk - check mode is enabled status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.backing_info.storage_config.is_flash_mode_enabled == true + - result.response.backing_info.disk_ext_id == "{{ todelete[1] }}" + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.disk_address.index == 2 + fail_msg: "Update Disk - check mode is enabled failed" + success_msg: "Update Disk - check mode is enabled passed" + +################################################################################# + +# Update disk image is not working, the disk is not getting updated +# - name: Update disk image +# ntnx_vms_disks_v2: +# vm_ext_id: "{{ vm_uuid }}" +# ext_id: "{{ todelete[1] }}" +# backing_info: +# vm_disk: +# storage_container: +# ext_id: "{{ storage_container.uuid }}" +# data_source: +# reference: +# image_reference: +# image_ext_id: "{{ disk_image.image_ext_ids[1] }}" +# state: present +# register: result +# ignore_errors: true + +# - name: Update disk image status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.failed == false +# - result.response.backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[1] }}" +# - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" +# - result.response.backing_info.storage_config.is_flash_mode_enabled == true +# - result.response.backing_info.disk_ext_id == "{{ todelete[1] }}" +# - result.vm_ext_id == "{{ vm_uuid }}" +# fail_msg: "Update disk image failed" +# success_msg: "Update disk image passed" + +################################################################################# + +- name: Update disk data source and disk_size_bytes together + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[1] }}" + backing_info: + vm_disk: + disk_size_bytes: 268435456000 + storage_container: + ext_id: "{{ storage_container.uuid }}" + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + state: present + register: result + ignore_errors: true + +- name: Update disk data source and disk_size_bytes together status + ansible.builtin.assert: + that: + - result.response == None + - result.changed == false + - result.failed == false + - result.error == "data source and disk_size_bytes cannot be sent together" + fail_msg: "Update disk data source and disk_size_bytes together failed" + success_msg: "Update disk data source and disk_size_bytes together passed" + +################################################################################# + +- name: Power on VM + ntnx_vms_power_actions_v2: + state: power_on + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Power on Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power on VM " + success_msg: "VM is powered on successfully " + +- name: Pausing for 10 seconds until VM is powered on + ansible.builtin.pause: + seconds: 10 + +- name: Get VM power state + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM power state status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.power_state == 'ON' + fail_msg: "VM is not powered on " + success_msg: "VM is powered on successfully " + +################################################################################ + +- name: Update disk size when VM is powered on + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[1] }}" + backing_info: + vm_disk: + disk_size_bytes: 29527900160 + state: present + register: result + ignore_errors: true + +- name: Update disk size when VM is powered on status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.backing_info.storage_config.is_flash_mode_enabled == true + - result.response.backing_info.disk_ext_id == "{{ todelete[1] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.disk_address.index == 2 + - result.response.backing_info.disk_size_bytes == 29527900160 + fail_msg: "Update disk size when VM is powered on failed" + success_msg: "Update disk size when VM is powered on passed" + +################################################################################ + +- name: Check for idempotency by updating disk size with same value + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[1] }}" + backing_info: + vm_disk: + disk_size_bytes: 29527900160 + state: present + register: result + ignore_errors: true + +- name: Check for idempotency by updating disk size with same value status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + fail_msg: "Check for idempotency by updating disk size with same value failed" + success_msg: "Check for idempotency by updating disk size with same value passed" + +################################################################################# + +- name: Get disk info + ntnx_vms_disks_info_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[1] }}" + register: result + ignore_errors: true + +- name: Get disk info status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.backing_info.storage_config.is_flash_mode_enabled == true + - result.response.backing_info.disk_ext_id == "{{ todelete[1] }}" + - result.response.disk_address.bus_type == "SCSI" + - result.response.disk_address.index == 2 + fail_msg: "Get disk info failed" + success_msg: "Get disk info passed" + +################################################################################# + +- name: Get disk info that does not exist + ntnx_vms_disks_info_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "12345678-1234-1234-1234-123456789123" + register: result + ignore_errors: true + +- name: Get disk info that does not exist status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == true + - result.status == 404 + fail_msg: "Get disk info that does not exist failed" + success_msg: "Get disk info that does not exist passed" + +################################################################################# + +- name: List all disks from VM + ntnx_vms_disks_info_v2: + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: List all disks from VM status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - item.ext_id in todelete + - result.response | length == todelete | length + fail_msg: "List all disks from VM failed" + success_msg: "List all disks from VM passed" + loop: "{{ result.response }}" + +################################################################################# + +- name: List all disks from VM with limit 1 + ntnx_vms_disks_info_v2: + vm_ext_id: "{{ vm_uuid }}" + limit: 1 + register: result + ignore_errors: true + +- name: List all disks from VM with limit 1 status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response | length == 1 + fail_msg: "List all disks from VM with limit 1 failed" + success_msg: "List all disks from VM with limit 1 passed" + loop: "{{ result.response }}" + +################################################################################# + +- name: List all disks from VM with limit 2 + ntnx_vms_disks_info_v2: + vm_ext_id: "{{ vm_uuid }}" + limit: 2 + register: result + ignore_errors: true + +- name: List all disks from VM with limit 2 status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response | length == 2 + fail_msg: "List all disks from VM with limit 2 failed" + success_msg: "List all disks from VM with limit 2 passed" + loop: "{{ result.response }}" + +################################################################################# + +- name: Delete all disks from VM + ntnx_vms_disks_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ item }}" + state: absent + register: result + ignore_errors: true + loop: "{{ todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.response is defined + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + - item.item in todelete + - result.results | length == todelete | length + fail_msg: "Unable to delete disks from VM " + success_msg: "Disks from VM are deleted successfully " + loop: "{{ result.results }}" + +################################################################################ + +- name: Delete Volume group + ntnx_volume_groups_v2: + state: "absent" + ext_id: "{{ vg1_uuid }}" + register: result + ignore_errors: true + +- name: Delete Volume group status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + - result.ext_id == "{{ vg1_uuid }}" + fail_msg: "Delete Volume group failed" + success_msg: "Volume group deleted successfully " + +################################################################################# + +- name: Delete the VM + ntnx_vms_v2: + state: absent + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + - result.ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_vms_disks_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_disks_v2/tasks/main.yml new file mode 100644 index 000000000..a95be469f --- /dev/null +++ b/tests/integration/targets/ntnx_vms_disks_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import disks_operations.yml + ansible.builtin.import_tasks: "disks_operations.yml" diff --git a/tests/integration/targets/ntnx_vms_ngt_v2/aliases b/tests/integration/targets/ntnx_vms_ngt_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_ngt_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_ngt_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_ngt_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_ngt_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_ngt_v2/tasks/main.yml new file mode 100644 index 000000000..8f73295a0 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_ngt_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module_defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import vms_ngt.yml + ansible.builtin.import_tasks: vms_ngt.yml diff --git a/tests/integration/targets/ntnx_vms_ngt_v2/tasks/vms_ngt.yml b/tests/integration/targets/ntnx_vms_ngt_v2/tasks/vms_ngt.yml new file mode 100644 index 000000000..e5ab10434 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_ngt_v2/tasks/vms_ngt.yml @@ -0,0 +1,821 @@ +--- +- name: Start testing ntnx_vms_ngt_v2 + ansible.builtin.debug: + msg: Start testing ntnx_vms_ngt_v2 + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set VM name + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Create VM + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + disks: + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + image_reference: + image_ext_id: "{{ ngt_config.image_uuid }}" + disk_address: + bus_type: SCSI + index: 0 + cd_roms: + - disk_address: + bus_type: IDE + index: 0 + nics: + - network_info: + subnet: + ext_id: "{{ network.dhcp.uuid }}" + ipv4_config: + should_assign_ip: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.cd_roms[0].disk_address.bus_type == "IDE" + - result.response.cd_roms[0].disk_address.index == 0 + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 0 + - result.response.disks[0].backing_info.data_source.reference.image_ext_id == "{{ ngt_config.image_uuid }}" + - result.response.nics[0].network_info.subnet.ext_id == "{{ network.dhcp.uuid }}" + fail_msg: "Unable to Create VM " + success_msg: "VM is created successfully " + +- name: Set vm_uuid + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + +############################################################################## + +- name: Power on VM + ntnx_vms_power_actions_v2: + state: power_on + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Power on Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to Power on VM " + success_msg: "VM is powered on successfully " + +################################################################################# + +- name: Get VM info and check if IP address is assigned + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + retries: 60 + delay: 5 + until: result.response.nics[0].network_info.ipv4_info.learned_ip_addresses[0].value is defined + +- name: Get VM info and check if IP address is assigned Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.nics[0].network_info.ipv4_info.learned_ip_addresses[0].value is defined + fail_msg: "Unable to get VM info and check if IP address is assigned " + success_msg: "VM info is retrieved successfully and IP address is assigned " + +################################################################################# + +- name: Install NGT in VM with check mode enabled + ntnx_vms_ngt_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + credential: + username: "{{ ngt_config.username }}" + password: "{{ ngt_config.password }}" + reboot_preference: + schedule_type: LATER + schedule: + start_time: "2026-08-01T00:00:00Z" + register: result + ignore_errors: true + check_mode: true + +- name: Install NGT in VM with check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.capabilities[1] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 2 + - result.response.credential.username == "{{ ngt_config.username }}" + - result.response.reboot_preference.schedule_type == "LATER" + - result.response.reboot_preference.schedule.start_time == "2026-08-01T00:00:00Z" + fail_msg: "Unable to install NGT with check mode enabled " + success_msg: "NGT is installed successfully with check mode enabled " + +################################################################################## + +- name: Install NGT in VM with reboot preference set to IMMEDIATE + ntnx_vms_ngt_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - VSS_SNAPSHOT + credential: + username: "{{ ngt_config.username }}" + password: "{{ ngt_config.password }}" + reboot_preference: + schedule_type: IMMEDIATE + register: result + ignore_errors: true + +- name: Install NGT in VM with reboot preference set to IMMEDIATE Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.capabilities[0] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 1 + - result.task_ext_id is defined + fail_msg: "Unable to install NGT with reboot preference set to IMMEDIATE " + success_msg: "NGT is installed successfully with reboot preference set to IMMEDIATE " + +################################################################################## + +- name: Sleep for 1 minute until NGT is installed completely + ansible.builtin.pause: + minutes: 1 + +- name: Get NGT config from VM after NGT is installed + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get NGT config from VM after NGT is installed Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.guest_tools.is_installed == true + - result.response.guest_tools.is_iso_inserted == false + - result.response.guest_tools.is_reachable == true + - result.response.guest_tools.is_enabled == true + - result.response.guest_tools.capabilities[0] == "VSS_SNAPSHOT" + - result.response.guest_tools.capabilities | length == 1 + - result.response.guest_tools.guest_os_version is defined + - result.response.guest_tools.version is defined + fail_msg: "Unable to get NGT config from VM " + success_msg: "NGT config from VM is retrieved successfully " + +################################################################################## + +- name: Fetch NGT config after installing NGT in VM with reboot preference set to IMMEDIATE + ntnx_vms_ngt_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Fetch NGT config after installing NGT in VM with reboot preference set to IMMEDIATE Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.is_installed == true + - result.response.is_iso_inserted == false + - result.response.is_reachable == true + - result.response.is_enabled == true + - result.response.capabilities[0] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 1 + fail_msg: "Unable to fetch NGT config after installing NGT in VM with reboot preference set to IMMEDIATE " + success_msg: "NGT config is fetched successfully after installing NGT in VM with reboot preference set to IMMEDIATE " + +################################################################################## + +- name: Install NGT in VM while it is already installed + ntnx_vms_ngt_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - VSS_SNAPSHOT + credential: + username: "{{ ngt_config.username }}" + password: "{{ ngt_config.password }}" + reboot_preference: + schedule_type: IMMEDIATE + register: result + ignore_errors: true + +- name: Install NGT in VM while it is already installed Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.skipped == true + - result.ext_id == "{{ vm_uuid }}" + - result.msg == "NGT is already installed in given vm." + fail_msg: "Install NGT in VM while it is already installed failed " + success_msg: "Install NGT in VM while it is already installed passed " + +################################################################################## + +- name: Sleep for 2 minutes before uninstalling NGT + ansible.builtin.pause: + minutes: 2 + +- name: Uninstall NGT in VM + ntnx_vms_ngt_v2: + state: absent + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Uninstall NGT in VM Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.task_ext_id is defined + - result.ext_id == "{{ vm_uuid }}" + - result.response.is_installed == none + fail_msg: "Unable to uninstall NGT in VM " + success_msg: "NGT is uninstalled successfully in VM " + +################################################################################## + +- name: Sleep for 1 minute until NGT is uninstalled completely + ansible.builtin.pause: + minutes: 1 + +- name: Get VM info after NGT is uninstalled + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get VM info after NGT is uninstalled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.guest_tools == none + fail_msg: "Unable to get VM info after NGT is uninstalled " + success_msg: "VM info is retrieved successfully after NGT is uninstalled " + +################################################################################# + +- name: Fetch NGT config after NGT is uninstalled + ntnx_vms_ngt_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Fetch NGT config after NGT is uninstalled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.is_installed == none + fail_msg: "Unable to fetch NGT config after NGT is uninstalled " + success_msg: "NGT config is fetched successfully after NGT is uninstalled " + +################################################################################# + +- name: Install NGT in VM with reboot preference set to SKIP + ntnx_vms_ngt_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + credential: + username: "{{ ngt_config.username }}" + password: "{{ ngt_config.password }}" + reboot_preference: + schedule_type: SKIP + register: result + ignore_errors: true + +- name: Install NGT in VM with reboot preference set to SKIP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id is defined + - result.response.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.capabilities[1] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 2 + - result.task_ext_id is defined + fail_msg: "Unable to install NGT with reboot preference set to SKIP " + success_msg: "NGT is installed successfully with reboot preference set to SKIP " + +################################################################################## + +- name: Sleep for 1 minute until NGT is installed + ansible.builtin.pause: + minutes: 1 + +- name: Get NGT config from VM after NGT is installed + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get NGT config from VM after NGT is installed Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.guest_tools.is_installed == true + - result.response.guest_tools.is_iso_inserted == false + - result.response.guest_tools.is_reachable == true + - result.response.guest_tools.is_enabled == true + - result.response.guest_tools.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.guest_tools.capabilities[1] == "VSS_SNAPSHOT" + - result.response.guest_tools.capabilities | length == 2 + - result.response.guest_tools.guest_os_version is defined + - result.response.guest_tools.version is defined + fail_msg: "Unable to get NGT config from VM " + success_msg: "NGT config from VM is retrieved successfully " + +################################################################################ + +- name: Fetch NGT config after installing NGT in VM with reboot preference set to SKIP + ntnx_vms_ngt_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Fetch NGT config after installing NGT in VM with reboot preference set to SKIP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.is_installed == true + - result.response.is_iso_inserted == false + - result.response.is_reachable == true + - result.response.is_enabled == true + - result.response.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.capabilities[1] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 2 + fail_msg: "Unable to fetch NGT config after installing NGT in VM with reboot preference set to SKIP " + success_msg: "NGT config is fetched successfully after installing NGT in VM with reboot preference set to SKIP " + +################################################################################## + +- name: Update NGT in VM with check mode enabled + ntnx_vms_ngt_update_v2: + state: present + ext_id: "{{ vm_uuid }}" + is_enabled: false + capabilities: + - VSS_SNAPSHOT + register: result + ignore_errors: true + check_mode: true + +- name: Update NGT in VM with check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.capabilities[0] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 1 + - result.response.is_enabled == false + - result.response.is_installed == true + - result.response.is_iso_inserted == false + - result.response.is_reachable == true + - result.response.guest_os_version is defined + - result.response.version is defined + fail_msg: "Unable to update NGT with check mode enabled " + success_msg: "NGT is updated successfully with check mode enabled " + +################################################################################# + +- name: Update NGT in VM by disabling it and setting capabilities to VSS_SNAPSHOT + ntnx_vms_ngt_update_v2: + state: present + ext_id: "{{ vm_uuid }}" + is_enabled: false + capabilities: + - VSS_SNAPSHOT + register: result + ignore_errors: true + +- name: Update NGT in VM by disabling it and setting capabilities to VSS_SNAPSHOT Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.capabilities[0] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 1 + - result.response.is_enabled == false + fail_msg: "Unable to update NGT " + success_msg: "NGT is updated successfully " + +################################################################################ + +- name: Get NGT config from VM after NGT is updated + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get NGT config from VM after NGT is updated Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.guest_tools.capabilities[0] == "VSS_SNAPSHOT" + - result.response.guest_tools.capabilities | length == 1 + - result.response.guest_tools.is_enabled == false + - result.response.guest_tools.is_installed == true + - result.response.guest_tools.is_iso_inserted == false + - result.response.guest_tools.is_reachable == false + - result.response.guest_tools.guest_os_version is defined + - result.response.guest_tools.version is defined + fail_msg: "Unable to get NGT config from VM " + success_msg: "NGT config from VM is retrieved successfully " + +################################################################################ + +- name: Test idempotency by updating NGT in VM with same attributes + ntnx_vms_ngt_update_v2: + state: present + ext_id: "{{ vm_uuid }}" + is_enabled: false + capabilities: + - VSS_SNAPSHOT + register: result + ignore_errors: true + +- name: Test idempotency by updating NGT in VM with same attributes Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.skipped == true + - result.ext_id == "{{ vm_uuid }}" + - result.msg == "Nothing to change." + fail_msg: "Test idempotency by updating NGT in VM with same attributes failed " + success_msg: "Test idempotency by updating NGT in VM with same attributes passed " + +################################################################################ + +- name: Update NGT in VM by enabling it + ntnx_vms_ngt_update_v2: + state: present + ext_id: "{{ vm_uuid }}" + is_enabled: true + register: result + ignore_errors: true + +- name: Update NGT in VM by enabling it Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.is_enabled == true + fail_msg: "Unable to update NGT " + success_msg: "NGT is updated successfully " + +################################################################################# + +- name: Get NGT config from VM after NGT is updated + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get NGT config from VM after NGT is updated Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.guest_tools.capabilities[0] == "VSS_SNAPSHOT" + - result.response.guest_tools.capabilities | length == 1 + - result.response.guest_tools.is_enabled == true + - result.response.guest_tools.is_installed == true + - result.response.guest_tools.is_iso_inserted == false + - result.response.guest_tools.is_reachable == true + - result.response.guest_tools.guest_os_version is defined + - result.response.guest_tools.version is defined + fail_msg: "Unable to get NGT config from VM " + success_msg: "NGT config from VM is retrieved successfully " + +################################################################################# + +- name: Upgrade NGT in VM with check mode enabled + ntnx_vms_ngt_upgrade_v2: + state: present + ext_id: "{{ vm_uuid }}" + reboot_preference: + schedule_type: LATER + schedule: + start_time: "2026-08-01T00:00:00Z" + register: result + ignore_errors: true + check_mode: true + +- name: Upgrade NGT in VM with check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.reboot_preference.schedule_type == "LATER" + - result.response.reboot_preference.schedule.start_time == "2026-08-01T00:00:00Z" + fail_msg: "Unable to upgrade NGT with check mode enabled " + success_msg: "NGT is upgraded successfully with check mode enabled " + +############################################################################### + +# This test case is commented since NGT upgrade is failing from v4 api: +# https://jira.nutanix.com/browse/ENG-665842 + +# - name: Upgrade NGT in VM +# ntnx_vms_ngt_upgrade_v2: +# state: present +# ext_id: "{{ vm_uuid }}" +# reboot_preference: +# schedule_type: "SKIP" +# register: result +# ignore_errors: true + +# - name: Upgrade NGT in VM Status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.failed == false +# - result.ext_id == "{{ vm_uuid }}" +# - result.response.reboot_preference.schedule_type == "SKIP" +# fail_msg: "Unable to upgrade NGT " +# success_msg: "NGT is upgraded successfully " + +################################################################################ + +- name: Insert ISO into CD ROM in VM with is_config_only set to true and check mode enabled + ntnx_vms_ngt_insert_iso_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + is_config_only: true + register: result + ignore_errors: true + check_mode: true + +- name: Insert ISO into CD ROM in VM with is_config_only set to true and check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.capabilities[1] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 2 + - result.response.is_config_only == true + fail_msg: "Insert ISO into CD ROM in VM with is_config_only set to true and check mode enabled failed " + success_msg: "Insert ISO into CD ROM in VM with is_config_only set to true and check mode enabled passed " + +################################################################################# + +- name: Insert ISO into CD ROM in VM with is_config_only set to false and check mode enabled + ntnx_vms_ngt_insert_iso_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - SELF_SERVICE_RESTORE + - VSS_SNAPSHOT + is_config_only: false + register: result + ignore_errors: true + check_mode: true + +- name: Insert ISO into CD ROM in VM with is_config_only set to false and check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.capabilities[1] == "VSS_SNAPSHOT" + - result.response.capabilities | length == 2 + - result.response.is_config_only == false + fail_msg: "Insert ISO into CD ROM in VM with is_config_only set to false and check mode enabled failed " + success_msg: "Insert ISO into CD ROM in VM with is_config_only set to false and check mode enabled passed " + +################################################################################# + +- name: Insert ISO into CD ROM in VM + ntnx_vms_ngt_insert_iso_v2: + state: present + ext_id: "{{ vm_uuid }}" + capabilities: + - SELF_SERVICE_RESTORE + is_config_only: true + register: result + ignore_errors: true + +- name: Insert ISO into CD ROM in VM Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.capabilities | length == 1 + - result.response.is_enabled == true + - result.response.is_iso_inserted == true + - result.response.is_reachable == true + - result.response.guest_os_version is defined + - result.response.version is defined + - result.task_ext_id is defined + fail_msg: "Unable to insert ISO into CD ROM in VM " + success_msg: "ISO is inserted into CD ROM in VM successfully " + +################################################################################# + +- name: Get NGT config from VM after ISO is inserted + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Get NGT config from VM after ISO is inserted Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ext_id == "{{ vm_uuid }}" + - result.response.cd_roms[0].iso_type == "GUEST_TOOLS" + fail_msg: "Unable to get NGT config from VM " + success_msg: "NGT config from VM is retrieved successfully " + +################################################################################## + +- name: Fetch NGT config + ntnx_vms_ngt_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Fetch NGT config Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.capabilities[0] == "SELF_SERVICE_RESTORE" + - result.response.capabilities | length == 1 + - result.response.is_enabled == true + - result.response.is_reachable == true + - result.response.guest_os_version is defined + - result.response.version is defined + fail_msg: "Unable to fetch NGT config " + success_msg: "NGT config is fetched successfully " + +################################################################################## + +- name: Sleep for 2 minutes before uninstalling NGT + ansible.builtin.pause: + minutes: 2 + +- name: Uninstall NGT in VM + ntnx_vms_ngt_v2: + state: absent + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Uninstall NGT in VM Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.task_ext_id is defined + - result.ext_id == "{{ vm_uuid }}" + - result.response.is_installed == none + fail_msg: "Unable to uninstall NGT in VM " + success_msg: "NGT is uninstalled successfully in VM " + +################################################################################## + +- name: Fetch NGT config after NGT is uninstalled + ntnx_vms_ngt_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Fetch NGT config after NGT is uninstalled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.is_installed == none + fail_msg: "Unable to fetch NGT config after NGT is uninstalled " + success_msg: "NGT config is fetched successfully after NGT is uninstalled " + +################################################################################# + +- name: Uninstall NGT in VM to test idempotency + ntnx_vms_ngt_v2: + state: absent + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Uninstall NGT in VM to test idempotency Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.skipped == true + - result.ext_id == "{{ vm_uuid }}" + - result.msg == "NGT is already not installed in the given vm" + fail_msg: "Uninstall NGT in VM to test idempotency failed " + success_msg: "Uninstall NGT in VM to test idempotency passed " + +################################################################################## + +- name: Delete the VM + ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_vms_nics_v2/aliases b/tests/integration/targets/ntnx_vms_nics_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_nics_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_nics_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_nics_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_nics_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_nics_v2/tasks/main.yml new file mode 100644 index 000000000..b0622aa44 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_nics_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import nics_operations.yml + ansible.builtin.import_tasks: "nics_operations.yml" diff --git a/tests/integration/targets/ntnx_vms_nics_v2/tasks/nics_operations.yml b/tests/integration/targets/ntnx_vms_nics_v2/tasks/nics_operations.yml new file mode 100644 index 000000000..0487eb788 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_nics_v2/tasks/nics_operations.yml @@ -0,0 +1,847 @@ +--- +- name: Start testing ntnx_vms_nics_v2 + ansible.builtin.debug: + msg: Start testing ntnx_vms_nics_v2 + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM and nics names + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Create VM with minimum requirements + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + storage_config: + is_flash_mode_enabled: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + fail_msg: "Unable to Create VM with minimum requirements " + success_msg: "VM with minimum requirements created successfully " + +- name: Set vm_uuid + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +################################################################################ + +- name: Create nic - check mode is enabled + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + state: present + backing_info: + is_connected: true + network_info: + nic_type: "NORMAL_NIC" + subnet: + ext_id: "{{ static.uuid }}" + vlan_mode: "ACCESS" + ipv4_config: + should_assign_ip: true + check_mode: true + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.network_info.nic_type == "NORMAL_NIC" + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.vlan_mode == "ACCESS" + - result.response.network_info.ipv4_config.should_assign_ip == true + - result.response.backing_info.is_connected == true + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to Create nic when check mode is enabled " + success_msg: "nic is created successfully when check mode is enabled " + +################################################################################ + +- name: Create nic with static IP and secondary ip list - check mode is enabled + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + state: present + backing_info: + model: "VIRTIO" + is_connected: true + network_info: + nic_type: "DIRECT_NIC" + subnet: + ext_id: "{{ static.uuid }}" + vlan_mode: "ACCESS" + should_allow_unknown_macs: false + ipv4_config: + should_assign_ip: true + ip_address: + value: "{{ network.managed.IPs[0] }}" + secondary_ip_address_list: + - value: "{{ network.managed.IPs[1] }}" + register: result + ignore_errors: true + check_mode: true + +- name: Create nic with static IP and secondary ip list - check mode is enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.network_info.nic_type == "DIRECT_NIC" + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.vlan_mode == "ACCESS" + - result.response.network_info.ipv4_config.ip_address.value == "{{ network.managed.IPs[0] }}" + - result.response.network_info.ipv4_config.secondary_ip_address_list[0].value == "{{ network.managed.IPs[1] }}" + - result.response.backing_info.is_connected == true + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Create nic with static IP and secondary ip list - check mode is enabled failed " + success_msg: "Create nic with static IP and secondary ip list - check mode is enabled passed " + +################################################################################ + +- name: Create nic with allowing unknown macs - check mode is enabled + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + state: present + backing_info: + model: "VIRTIO" + is_connected: true + network_info: + nic_type: "DIRECT_NIC" + subnet: + ext_id: "{{ static.uuid }}" + vlan_mode: "ACCESS" + should_allow_unknown_macs: true + ipv4_config: + should_assign_ip: true + register: result + ignore_errors: true + check_mode: true + +- name: Create nic with allowing unknown macs - check mode is enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.network_info.nic_type == "DIRECT_NIC" + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.vlan_mode == "ACCESS" + - result.response.backing_info.is_connected == true + - result.response.network_info.should_allow_unknown_macs == true + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Create nic with allowing unknown macs - check mode is enabled failed " + success_msg: "Create nic with allowing unknown macs - check mode is enabled passed " + +################################################################################## +- name: Create nic with minimal spec + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + state: present + network_info: + subnet: + ext_id: "{{ network.dhcp.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - result.ext_id is defined + - result.response.network_info.subnet.ext_id == "{{ network.dhcp.uuid }}" + fail_msg: "Unable to Create nic with minimal spec " + success_msg: "nic with minimal spec created successfully " + +################################################################################## + +- name: Create nic with network function nic type + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + state: present + backing_info: + model: "VIRTIO" + is_connected: true + network_info: + nic_type: "NETWORK_FUNCTION_NIC" + network_function_nic_type: "INGRESS" + vlan_mode: "ACCESS" + should_allow_unknown_macs: false + ipv4_config: + should_assign_ip: false + register: result + ignore_errors: true + +- name: Create nic with network function nic type Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - result.ext_id is defined + - result.response.network_info.nic_type == "NETWORK_FUNCTION_NIC" + - result.response.network_info.network_function_nic_type == "INGRESS" + - result.response.network_info.vlan_mode == "ACCESS" + - result.response.network_info.should_allow_unknown_macs == false + - result.response.backing_info.is_connected == true + fail_msg: "Create nic with network function nic type failed " + success_msg: "Create nic with network function nic type passed " + +################################################################################ + +- name: Create Direct type NIC with dynamic IP + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + state: present + backing_info: + model: "VIRTIO" + is_connected: true + network_info: + nic_type: "DIRECT_NIC" + subnet: + ext_id: "{{ static.uuid }}" + vlan_mode: "ACCESS" + should_allow_unknown_macs: false + ipv4_config: + should_assign_ip: true + register: result + ignore_errors: true + +- name: Create Direct type NIC with dynamic IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - result.ext_id is defined + - result.response.network_info.nic_type == "DIRECT_NIC" + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.vlan_mode == "ACCESS" + - result.response.network_info.should_allow_unknown_macs == false + - result.response.backing_info.is_connected == true + - result.response.network_info.ipv4_config.ip_address.value is defined + fail_msg: "Create Direct type NIC with dynamic IP failed " + success_msg: "Create Direct type NIC with dynamic IP passed " + +################################################################################ + +- name: Create Normal type NIC with static IP + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + state: present + backing_info: + model: "VIRTIO" + is_connected: true + network_info: + nic_type: "NORMAL_NIC" + subnet: + ext_id: "{{ static.uuid }}" + vlan_mode: "ACCESS" + should_allow_unknown_macs: false + ipv4_config: + should_assign_ip: true + ip_address: + value: "{{ network.managed.IPs[0] }}" + register: result + ignore_errors: true + +- name: Create Normal type NIC with static IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.vm_ext_id == "{{ vm_uuid }}" + - result.ext_id is defined + - result.response.network_info.nic_type == "NORMAL_NIC" + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.vlan_mode == "ACCESS" + - result.response.network_info.should_allow_unknown_macs == false + - result.response.backing_info.is_connected == true + fail_msg: "Create Normal type NIC with static IP failed " + success_msg: "Create Normal type NIC with static IP passed " + +################################################################################ + +- name: Waiting for 1 minute before fetching nics info so that all IPs are attached + ansible.builtin.pause: + seconds: 60 + +- name: Get nics ids from VM + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Adding nics ids to todelete + ansible.builtin.set_fact: + todelete: "{{ result.response.nics | map(attribute='ext_id') | list }}" + +- name: Get nics ids status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.nics is defined + - todelete | length == 4 + - result.response.nics[0].ext_id is defined + - result.response.nics[0].network_info.subnet.ext_id == "{{ network.dhcp.uuid }}" + - result.response.nics[0].network_info.nic_type == "NORMAL_NIC" + - result.response.nics[0].network_info.vlan_mode == "ACCESS" + - result.response.nics[0].backing_info.is_connected == true + - result.response.nics[1].backing_info.is_connected == true + - result.response.nics[1].network_info.nic_type == "NETWORK_FUNCTION_NIC" + - result.response.nics[1].network_info.ipv4_config == none + - result.response.nics[1].network_info.network_function_nic_type == "INGRESS" + - result.response.nics[1].network_info.vlan_mode == "ACCESS" + - result.response.nics[1].network_info.should_allow_unknown_macs == false + - result.response.nics[2].backing_info.is_connected == true + - result.response.nics[2].network_info.nic_type == "DIRECT_NIC" + - result.response.nics[2].network_info.ipv4_config.ip_address.value is defined + - result.response.nics[2].network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.nics[2].network_info.vlan_mode == "ACCESS" + - result.response.nics[2].network_info.should_allow_unknown_macs == false + - result.response.nics[3].backing_info.is_connected == true + - result.response.nics[3].network_info.nic_type == "NORMAL_NIC" + - result.response.nics[3].network_info.ipv4_config.ip_address.value == "{{ network.managed.IPs[0] }}" + - result.response.nics[3].network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.nics[3].network_info.vlan_mode == "ACCESS" + - result.response.nics[3].network_info.should_allow_unknown_macs == false + fail_msg: "Unable to get nics ids from VM " + success_msg: "nics ids from VM fetched successfully " + +################################################################################ + +- name: Update nic - check mode is enabled + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[1] }}" + state: present + backing_info: + model: "VIRTIO" + is_connected: true + network_info: + nic_type: "DIRECT_NIC" + vlan_mode: "TRUNK" + trunked_vlans: + - 0 + - 1 + should_allow_unknown_macs: false + register: result + check_mode: true + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.backing_info.is_connected == true + - result.response.backing_info.model == "VIRTIO" + - result.response.network_info.vlan_mode == "TRUNK" + - result.response.network_info.trunked_vlans == [0,1] + - result.response.network_info.nic_type == "DIRECT_NIC" + - result.response.network_info.network_function_nic_type == "INGRESS" + - result.response.network_info.should_allow_unknown_macs == false + fail_msg: "Unable to update nic when check mode is enabled " + success_msg: "nic updated successfully when check mode is enabled " + +############################################################################### + +- name: Check for idempotency by updating the nic with same values + ntnx_vms_nics_v2: + ext_id: "{{ todelete[3] }}" + vm_ext_id: "{{ vm_uuid }}" + state: present + backing_info: + model: "VIRTIO" + is_connected: true + network_info: + nic_type: "NORMAL_NIC" + subnet: + ext_id: "{{ static.uuid }}" + vlan_mode: "ACCESS" + should_allow_unknown_macs: false + ipv4_config: + ip_address: + value: "{{ network.managed.IPs[0] }}" + register: result + ignore_errors: true + check_mode: true + +- name: Check for idempotency Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + - result.skipped == true + - result.ext_id == "{{ todelete[3] }}" + fail_msg: "Check for idempotency by updating the nic with same values failed " + success_msg: "Check for idempotency by updating the nic with same values passed " + +############################################################################### + +- name: Update nic + ntnx_vms_nics_v2: + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ todelete[0] }}" + state: present + backing_info: + is_connected: false + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + fail_msg: "Unable to update nic " + success_msg: "nic updated successfully " + +############################################################################### + +- name: Migrate nic to another subnet - check mode is enabled + ntnx_vms_nics_migrate_v2: + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + migrate_type: "ASSIGN_IP" + subnet: + ext_id: "{{ static.uuid }}" + ip_address: + value: "{{ network.managed.IPs[1] }}" + register: result + ignore_errors: true + check_mode: true + +- name: Migration Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.migrate_type == "ASSIGN_IP" + - result.response.subnet.ext_id == '{{ static.uuid }}' + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to migrate nic to another subnet when check mode is enabled " + success_msg: "nic migrated to another subnet successfully when check mode is enabled " + +############################################################################### + +- name: Migrate nic to another subnet - migrate type = ASSIGN_IP + ntnx_vms_nics_migrate_v2: + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + migrate_type: "ASSIGN_IP" + subnet: + ext_id: "{{ static.uuid }}" + ip_address: + value: "{{ network.managed.IPs[1] }}" + register: result + ignore_errors: true + +- name: Migrate nic to another subnet - migrate type = ASSIGN_IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.network_info.subnet.ext_id == '{{ static.uuid }}' + - result.response.network_info.ipv4_config.ip_address.value == "{{ network.managed.IPs[1] }}" + - result.response.network_info.nic_type == "NORMAL_NIC" + - result.response.backing_info.is_connected == false + fail_msg: "Unable to migrate nic to another subnet " + success_msg: "nic migrated to another subnet successfully " + +################################################################################ + +- name: Retrieve nic info after migrating nic to another subnet - migrate type = ASSIGN_IP + ntnx_vms_nics_info_v2: + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Retrieve nic info after migrating nic to another subnet - migrate type = ASSIGN_IP status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.ipv4_config.ip_address.value == "{{ network.managed.IPs[1] }}" + fail_msg: "Unable to get nic info " + success_msg: "nic info fetched successfully " + +############################################################################### + +- name: Migrate nic to another subnet - migrate type = RELEASE_IP + ntnx_vms_nics_migrate_v2: + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + migrate_type: "RELEASE_IP" + subnet: + ext_id: "{{ static.uuid }}" + register: result + ignore_errors: true + +- name: Migrate nic to another subnet - migrate type = RELEASE_IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.network_info.subnet.ext_id == '{{ static.uuid }}' + - result.response.network_info.ipv4_config.ip_address.value != "{{ network.managed.IPs[1] }}" + - result.response.backing_info.is_connected == false + fail_msg: "Unable to migrate nic to another subnet " + success_msg: "nic migrated to another subnet successfully " + +############################################################################### + +- name: Retrieve nic info after migrating nic to another subnet - migrate type = RELEASE_IP + ntnx_vms_nics_info_v2: + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Retrieve nic info after migrating nic to another subnet - migrate type = RELEASE_IP status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.ipv4_config.ip_address.value != "{{ network.managed.IPs[1] }}" + fail_msg: "Unable to get nic info " + success_msg: "nic info fetched successfully " + +############################################################################### + +- name: Assign IP address to nic - check mode is enabled + ntnx_vms_nics_ip_v2: + state: present + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + ip_address: + value: "{{ network.managed.IPs[0] }}" + register: result + check_mode: true + ignore_errors: true + +- name: Assign IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.ip_address.value == "{{ network.managed.IPs[0] }}" + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to assign IP address to nic when check mode is enabled " + success_msg: "IP address assigned to nic successfully when check mode is enabled " + +############################################################################### + +- name: Assign IP address to nic + ntnx_vms_nics_ip_v2: + state: present + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + ip_address: + value: "{{ network.managed.IPs[1] }}" + register: result + ignore_errors: true + +- name: Assign IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.network_info.ipv4_config.ip_address.value == "{{ network.managed.IPs[1] }}" + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to assign IP address to nic " + success_msg: "IP address assigned to nic successfully " + +############################################################################### + +- name: Retrieve nic info after assigning IP to nic + ntnx_vms_nics_info_v2: + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Retrieve nic info after assigning IP to nic status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.backing_info.is_connected == false + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.ipv4_config.ip_address.value == "{{ network.managed.IPs[1] }}" + fail_msg: "Unable to get nic info " + success_msg: "nic info fetched successfully " + +############################################################################### + +- name: Release IP address from nic - check mode is enabled + ntnx_vms_nics_ip_v2: + state: absent + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + ip_address: + value: "{{ network.managed.IPs[1] }}" + register: result + check_mode: true + ignore_errors: true + +- name: Release IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to release IP address from nic when check mode is enabled " + success_msg: "IP address released from nic successfully when check mode is enabled " + +############################################################################### + +- name: Release IP address from nic + ntnx_vms_nics_ip_v2: + state: absent + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + ip_address: + value: "{{ network.managed.IPs[0] }}" + register: result + ignore_errors: true + +- name: Release IP Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to release IP address from nic " + success_msg: "IP address released from nic successfully " + +############################################################################### + +- name: Retrieve nic info after releasing IP from nic + ntnx_vms_nics_info_v2: + ext_id: "{{ todelete[0] }}" + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Retrieve nic info after releasing IP from nic status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.ext_id == "{{ todelete[0] }}" + - result.vm_ext_id == "{{ vm_uuid }}" + - result.response.backing_info.is_connected == false + - result.response.network_info.subnet.ext_id == "{{ static.uuid }}" + - result.response.network_info.ipv4_config == none + fail_msg: "Unable to get nic info " + success_msg: "nic info fetched successfully " + +############################################################################### + +- name: List all nics for VM + ntnx_vms_nics_info_v2: + vm_ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: List all nics for VM status + ansible.builtin.set_fact: + nics_list: "{{ result.response | map(attribute='ext_id') | list }}" + +- name: Get nics list status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - item in nics_list + fail_msg: "Unable to list all nics for VM " + success_msg: "All nics for VM listed successfully " + loop: "{{ todelete }}" + +################################################################################ + +# - name: List all nics for VM with adding filter +# ntnx_vms_nics_info_v2: +# vm_ext_id: "{{ vm_uuid }}" +# filter: "extId eq '{{ todelete[0] }}'" +# register: result +# ignore_errors: true + +# - name: Set nics list +# ansible.builtin.set_fact: +# nics_list: "{{ result.response | map(attribute='ext_id') | list }}" + +# - name: Get nics list status +# ansible.builtin.assert: +# that: +# - result.response is defined +# - result.changed == false +# - result.failed == false +# - todelete[0] in nics_list +# - nics_list | length == 1 +# fail_msg: "Unable to list all nics for VM with adding filter " +# success_msg: "All nics for VM listed successfully with adding filter " + +################################################################################ + +- name: List all nics for VM with limit 1 + ntnx_vms_nics_info_v2: + vm_ext_id: "{{ vm_uuid }}" + limit: 1 + register: result + ignore_errors: true + +- name: Set nics list + ansible.builtin.set_fact: + nics_list: "{{ result.response | map(attribute='ext_id') | list }}" + +- name: List all nics for VM with limit 1 status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - todelete[0] in nics_list + - nics_list | length == 1 + fail_msg: "List all nics for VM with limit 1 failed " + success_msg: "List all nics for VM with limit 1 passed " + +############################################################################### + +- name: List all nics for VM with limit 2 + ntnx_vms_nics_info_v2: + vm_ext_id: "{{ vm_uuid }}" + limit: 2 + register: result + ignore_errors: true + +- name: Set nics list + ansible.builtin.set_fact: + nics_list: "{{ result.response | map(attribute='ext_id') | list }}" + +- name: List all nics for VM with limit 2 status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - todelete[0] in nics_list + - todelete[1] in nics_list + - nics_list | length == 2 + fail_msg: "List all nics for VM with limit 2 failed " + success_msg: "List all nics for VM with limit 2 passed " + +############################################################################### + +- name: Delete nics + ntnx_vms_nics_v2: + state: absent + vm_ext_id: "{{ vm_uuid }}" + ext_id: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete nics " + success_msg: "nics deleted successfully " + loop: "{{ result.results }}" + +################################################################################# + +- name: Verify that all nics are deleted + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Verify that all nics are deleted status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.nics == none + fail_msg: "Unable to verify that all nics are deleted " + success_msg: "All nics are deleted successfully " + +################################################################################ + +- name: Delete the VM + ntnx_vms: + state: absent + vm_uuid: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + - result.vm_uuid == "{{ vm_uuid }}" + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_vms_serial_port_v2/aliases b/tests/integration/targets/ntnx_vms_serial_port_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_serial_port_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_serial_port_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_serial_port_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_serial_port_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_serial_port_v2/tasks/main.yml new file mode 100644 index 000000000..52a634090 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_serial_port_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import serial_ports.yml + ansible.builtin.import_tasks: "serial_ports.yml" diff --git a/tests/integration/targets/ntnx_vms_serial_port_v2/tasks/serial_ports.yml b/tests/integration/targets/ntnx_vms_serial_port_v2/tasks/serial_ports.yml new file mode 100644 index 000000000..e644dbf7d --- /dev/null +++ b/tests/integration/targets/ntnx_vms_serial_port_v2/tasks/serial_ports.yml @@ -0,0 +1,170 @@ +--- +- name: Create, Update, Fetch and Delete Serial Ports + ansible.builtin.debug: + msg: "Create, Update, Fetch and Delete Serial Ports" +- name: Initialize todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: VM with minimum requirements + ntnx_vms: + state: present + name: MinReqVM + cluster: + name: "{{ cluster.name }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " + +- name: Set VM ext_id + ansible.builtin.set_fact: + vm_ext_id: '{{ result["response"]["metadata"]["uuid"] }}' + when: result.response.status.state == 'COMPLETE' + +- name: Create Serial Port + ntnx_vms_serial_port_v2: + vm_ext_id: "{{ vm_ext_id }}" + state: present + index: 0 + is_connected: true + register: result + ignore_errors: true + +- name: Serial Port Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.ext_id is defined + - result.vm_ext_id == '{{ vm_ext_id }}' + - result.response.is_connected == true + - result.response.index == 0 + - result.ext_id is defined + fail_msg: " Unable to create Serial Port " + success_msg: " Serial Port created successfully " + +- name: Fetch information about all serial ports of a vm + ntnx_vms_serial_port_info_v2: + vm_ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Serial Port Info Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + - result.response[0].is_connected == true + - result.response[0].index == 0 + - result.vm_ext_id == '{{ vm_ext_id }}' + fail_msg: " Unable to fetch information about all serial ports of a vm " + success_msg: " Information about all serial ports of a vm fetched successfully " +- name: Add ext_id to todelete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.response.0.ext_id ] }}" + +- name: Update Serial Port connection status + ntnx_vms_serial_port_v2: + vm_ext_id: "{{ vm_ext_id }}" + ext_id: "{{ result.response.0.ext_id }}" + state: present + is_connected: false + register: result + ignore_errors: true + +- name: Serial Port Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.ext_id is defined + - result.vm_ext_id == '{{ vm_ext_id }}' + - result.response.is_connected == false + fail_msg: " Unable to Update Serial Port connection status " + success_msg: " Serial Port connection status updated successfully " + +- name: Fetch information about a specific serial port + ntnx_vms_serial_port_info_v2: + vm_ext_id: "{{ vm_ext_id }}" + ext_id: "{{result.response.ext_id}}" + register: result + ignore_errors: true + +- name: Serial Port Info Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.is_connected == false + - result.response.index == 0 + - result.vm_ext_id == '{{ vm_ext_id }}' + fail_msg: " Unable to fetch information about a specific serial port " + success_msg: " Information about a specific serial port fetched successfully " + +- name: Update Serial Port index + ntnx_vms_serial_port_v2: + vm_ext_id: "{{ vm_ext_id }}" + ext_id: "{{ result.response.ext_id }}" + state: present + index: 2 + register: result + ignore_errors: true + +- name: Serial Port Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.ext_id is defined + - result.vm_ext_id == '{{ vm_ext_id }}' + - result.response.is_connected == false + - result.response.index == 2 + - result.task_ext_id is defined + fail_msg: " Unable to create Serial Port " + success_msg: " Serial Port created successfully " + +- name: Delete Serial Port + ntnx_vms_serial_port_v2: + state: absent + ext_id: "{{item}}" + vm_ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: false + loop: "{{ todelete }}" + +- name: Serial Port Deletion Status + ansible.builtin.assert: + that: + - result.changed == true + - result.msg == "All items completed" + fail_msg: " Unable to delete Serial Port " + success_msg: " Serial Port deleted successfully " + +- name: Fetch information about all serial ports of a vm + ntnx_vms_serial_port_info_v2: + vm_ext_id: "{{ vm_ext_id }}" + register: result + ignore_errors: true + +- name: Serial Port Info Status + ansible.builtin.assert: + that: + - result.response == None + - result.vm_ext_id == '{{ vm_ext_id }}' + fail_msg: " Unable to fetch information about all serial ports of a vm " + success_msg: " Information about all serial ports of a vm fetched successfully " + +- name: Delete Created VM + ntnx_vms: + state: absent + vm_uuid: "{{ vm_ext_id }}" + register: result diff --git a/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/aliases b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/meta/main.yml new file mode 100644 index 000000000..e0985ec29 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/tasks/main.yml new file mode 100644 index 000000000..0dde4c287 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module_defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import stage.yml + ansible.builtin.import_tasks: stage.yml diff --git a/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/tasks/stage.yml b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/tasks/stage.yml new file mode 100644 index 000000000..97e9402af --- /dev/null +++ b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/tasks/stage.yml @@ -0,0 +1,170 @@ +--- +- name: Start ntnx_vms_stage_guest_customization_v2 tests + ansible.builtin.debug: + msg: Start ntnx_vms_stage_guest_customization_v2 tests + +- name: Create vm with different disks, using ubuntu image , with cdrom + ntnx_vms_v2: + name: test + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + disks: + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + disk_address: + bus_type: SCSI + index: 2 + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: SCSI + index: 3 + guest_customization: + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + cloud_init_script: + user_data: + value: I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IHRydWUKc3NoX3B3YXV0aDogICB0cnVl + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "test" + - result.response.description == "ansible test" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 2 + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.index == 3 + fail_msg: " Unable to create vm with different disks, using ubuntu image , with cdrom" + success_msg: "Vm with different disks, using ubuntu image , with cdrom created successfully " + +- name: Set variables + ansible.builtin.set_fact: + vm_uuid: "{{ result.response.ext_id }}" + +###################################################################################################################### + +- name: Generate spec with all attributes for linux vm + ntnx_vms_stage_guest_customization_v2: + ext_id: "{{ vm_uuid }}" + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + cloud_init_script: + user_data: + value: I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ== + register: result + check_mode: true + +- name: Generate spec with all attributes for linux vm status + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.config.datasource_type == "CONFIG_DRIVE_V2" + - result.response.config.cloud_init_script.value == "I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ==" + fail_msg: Unable to Generate spec with all attributes for linux vm in check mode + success_msg: Generate spec with all attributes for linux vm finished successfully in check mode + +###################################################################################################################### + +- name: Generate spec with all attributes for sysprep + ntnx_vms_stage_guest_customization_v2: + ext_id: "{{ vm_uuid }}" + config: + sysprep: + install_type: FRESH + sysprep_script: + unattendxml: + value: test + register: result + check_mode: true + +- name: Check the response + ansible.builtin.assert: + that: + - result.changed == false + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.config.install_type == "FRESH" + - result.response.config.sysprep_script.value == "test" + fail_msg: Unable to Generate spec with all attributes for sysprep in check mode + success_msg: Generate spec with all attributes for sysprep finished successfully in check mode + +###################################################################################################################### + +- name: Update guest script + ntnx_vms_stage_guest_customization_v2: + ext_id: "{{ vm_uuid }}" + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + cloud_init_script: + user_data: + value: I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ== + register: result + +- name: Check the response + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.status == "SUCCEEDED" + fail_msg: Unable to Update guest script + success_msg: Update guest script finished successfully + +###################################################################################################################### + +- name: Power on a VM + ntnx_vms_power_actions_v2: + ext_id: "{{ vm_uuid }}" + state: power_on + wait: true + register: result + +- name: Check the response + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + - result.response.status == "SUCCEEDED" + fail_msg: Unable to Power on a VM + success_msg: Power on a VM finished successfully + +###################################################################################################################### + +- name: Delete all Created VMs + ntnx_vms_v2: + state: absent + ext_id: "{{ vm_uuid }}" + register: result + +- name: Check the response + ansible.builtin.assert: + that: + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vm_uuid }}" + fail_msg: Unable to Delete all Created VMs + success_msg: Delete all Created VMs finished successfully diff --git a/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/vars/main.yml b/tests/integration/targets/ntnx_vms_stage_guest_customization_v2/vars/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_templates_v2/aliases b/tests/integration/targets/ntnx_vms_templates_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_templates_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_templates_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_templates_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_templates_v2/tasks/all_templates_operation.yml b/tests/integration/targets/ntnx_vms_templates_v2/tasks/all_templates_operation.yml new file mode 100644 index 000000000..c0dcefc5d --- /dev/null +++ b/tests/integration/targets/ntnx_vms_templates_v2/tasks/all_templates_operation.yml @@ -0,0 +1,693 @@ +--- +- name: > + Start ntnx_templates_deploy_v2,ntnx_templates_guest_os_v2,ntnx_templates_v2,ntnx_templates_info_v2, + ntnx_templates_version_v2,ntnx_templates_versions_info_v2 tests + ansible.builtin.debug: + msg: > + start ntnx_templates_deploy_v2,ntnx_templates_guest_os_v2,ntnx_templates_v2,ntnx_templates_info_v2, + ntnx_templates_version_v2,ntnx_templates_versions_info_v2 tests + +- name: Generate random category key & value + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set suffix_name + ansible.builtin.set_fact: + suffix_name: ansible-ag + +- name: Define variables + ansible.builtin.set_fact: + category_name: OSType + vm1_name: "{{ random_name }}{{ suffix_name }}vm1" + vm2_name: "{{ random_name }}{{ suffix_name }}vm2" + vm3_name: "{{ random_name }}{{ suffix_name }}vm3" + to_delete_vms: [] + template_name: "{{ random_name }}{{ suffix_name }}template" + template2_name: "{{ random_name }}{{ suffix_name }}template2" + template3_name: "{{ random_name }}{{ suffix_name }}template3" + version_1_name: "{{ random_name }}{{ suffix_name }}version1" + version_2_name: "{{ random_name }}{{ suffix_name }}version2" + version_3_name: "{{ random_name }}{{ suffix_name }}version3" + value2: "{{ random_name }}{{ suffix_name }}value2" +######################################################################## +- name: VM with minimum requirements + ntnx_vms: + state: present + name: "{{ vm1_name }}" + cluster: + name: "{{ cluster.name }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " + +- name: Set variables + ansible.builtin.set_fact: + to_delete_vms: "{{ to_delete_vms + [result.vm_uuid] }}" + vm_uuid: "{{ result.vm_uuid }}" +######################################################################## +- name: Create new template from a vm with check mode + ntnx_templates_v2: + template_name: "{{ template_name }}" + template_description: ansible test + template_version_spec: + version_source: + template_vm_reference: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + check_mode: true + +- name: Verify spec + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.template_name == "{{ template_name }}" + - result.response.template_description == "ansible test" + - result.response.template_version_spec.version_source.ext_id == "{{ vm_uuid }}" + fail_msg: " Template created successfully in check mode " + success_msg: Returned as expected in check mode +######################################################################## +- name: Create new template from a vm + ntnx_templates_v2: + template_name: "{{ template_name }}" + template_description: ansible test + template_version_spec: + version_source: + template_vm_reference: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Set variables + ansible.builtin.set_fact: + template1_ext_id: "{{ result.ext_id }}" + version1_ext_id: "{{ result.response.template_version_spec.ext_id }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id is defined + - result.response.template_name == "{{ template_name }}" + - result.response.template_description == "ansible test" + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + fail_msg: " Unable to create new template " + success_msg: " New template created successfully " +######################################################################## +- name: List all templates + ntnx_templates_info_v2: + register: result + ignore_errors: true + +- name: Status of all templates + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 0 + fail_msg: "Unable to list templates " + success_msg: templates listed successfully +######################################################################## +- name: Fetch template info using ext id + ntnx_templates_info_v2: + ext_id: "{{ template1_ext_id }}" + register: result + ignore_errors: true + +- name: Status of template + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.ext_id == "{{ template1_ext_id }}" + - result.response.template_name == "{{ template_name }}" + - result.response.template_description == "ansible test" + fail_msg: Unable to fetch template using ext id + success_msg: template fetched using ext id successfully +######################################################################## +- name: Update template description & name & override_vm_config & guest_customization user_data with check mode + ntnx_templates_v2: + ext_id: "{{ template1_ext_id }}" + template_version_spec: + version_name: "{{ version_2_name }}" + version_description: ansible_template_version_description New + version_source: + template_version_reference: + version_id: "{{ version1_ext_id }}" + override_vm_config: + num_sockets: 1 + num_cores_per_socket: 1 + num_threads_per_core: 1 + name: "{{ vm2_name }}" + guest_customization: + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + metadata: test + cloud_init_script: + user_data: + value: I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ== + register: result + ignore_errors: true + check_mode: true + +- name: Update status + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.response.template_name == "{{ template_name }}" + - result.response.template_description == "ansible test" + - result.response.template_version_spec.version_name == "{{ version_2_name }}" + - result.response.template_version_spec.version_description == "ansible_template_version_description New" + - result.response.template_version_spec.version_source.override_vm_config.num_cores_per_socket == 1 + - result.response.template_version_spec.version_source.override_vm_config.num_sockets == 1 + - result.response.template_version_spec.version_source.override_vm_config.num_threads_per_core == 1 + - result.response.template_version_spec.version_source.override_vm_config.name == "{{ vm2_name }}" + - > + result.response.template_version_spec.version_source.override_vm_config. + guest_customization.config.datasource_type == "CONFIG_DRIVE_V2" + - > + result.response.template_version_spec.version_source.override_vm_config.guest_customization.config.cloud_init_script.value + == "I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ==" + - result.response.template_version_spec.version_source.override_vm_config.guest_customization.config.metadata == "test" + fail_msg: Finished update template description & name in check mode + success_msg: Returned as expected in check mode + +######################################################################## +- name: Update template description & name & override_vm_config with guest_customization key_value_pairs with check mode + ntnx_templates_v2: + ext_id: "{{ template1_ext_id }}" + template_version_spec: + version_name: "{{ version_2_name }}" + version_description: ansible_template_version_description New + version_source: + template_version_reference: + version_id: "{{ version1_ext_id }}" + override_vm_config: + num_sockets: 2 + num_cores_per_socket: 2 + num_threads_per_core: 2 + name: "{{ vm2_name }}" + guest_customization: + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + metadata: test + cloud_init_script: + custom_key_values: + key_value_pairs: + - name: test_name + value: test_value + - name: test2_name + value: test2_value + register: result + ignore_errors: true + check_mode: true + +- name: Update status + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.response.template_name == "{{ template_name }}" + - result.response.template_description == "ansible test" + - result.response.template_version_spec.version_name == "{{ version_2_name }}" + - result.response.template_version_spec.version_description == "ansible_template_version_description New" + - result.response.template_version_spec.version_source.override_vm_config.num_cores_per_socket == 2 + - result.response.template_version_spec.version_source.override_vm_config.num_sockets == 2 + - result.response.template_version_spec.version_source.override_vm_config.num_threads_per_core == 2 + - result.response.template_version_spec.version_source.override_vm_config.name == "{{ vm2_name }}" + - > + result.response.template_version_spec.version_source.override_vm_config. + guest_customization.config.datasource_type == "CONFIG_DRIVE_V2" + - > + result.response.template_version_spec.version_source.override_vm_config.guest_customization. + config.cloud_init_script.key_value_pairs is defined + - > + result.response.template_version_spec.version_source.override_vm_config.guest_customization. + config.cloud_init_script.key_value_pairs | length == 2 + - > + result.response.template_version_spec.version_source.override_vm_config.guest_customization. + config.cloud_init_script.key_value_pairs[0].name == "test_name" + - > + result.response.template_version_spec.version_source.override_vm_config.guest_customization. + config.cloud_init_script.key_value_pairs[0].value == "test_value" + - > + result.response.template_version_spec.version_source.override_vm_config.guest_customization. + config.cloud_init_script.key_value_pairs[1].name == "test2_name" + - > + result.response.template_version_spec.version_source.override_vm_config.guest_customization. + config.cloud_init_script.key_value_pairs[1].value == "test2_value" + - result.response.template_version_spec.version_source.override_vm_config.guest_customization.config.metadata == "test" + fail_msg: Finished update template description & name in check mode + success_msg: Returned as expected in check mode +######################################################################## +- name: Update template description & name + ntnx_templates_v2: + ext_id: "{{ template1_ext_id }}" + template_version_spec: + version_name: "{{ version_2_name }}" + version_description: ansible_template_version_description New + version_source: + template_version_reference: + version_id: "{{ version1_ext_id }}" + override_vm_config: + num_sockets: 2 + num_cores_per_socket: 2 + num_threads_per_core: 2 + name: "{{ vm2_name }}" + register: result + ignore_errors: true + +- name: Set variables + ansible.builtin.set_fact: + version2_ext_id: "{{ result.response.template_version_spec.ext_id }}" + +- name: Update status + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.template_name == "{{ template_name }}" + - result.response.template_description == "ansible test" + - result.response.template_version_spec.version_name == "{{ version_2_name }}" + - result.response.template_version_spec.version_description == "ansible_template_version_description New" + - result.response.template_version_spec.vm_spec.num_cores_per_socket == 2 + - result.response.template_version_spec.vm_spec.num_sockets == 2 + - result.response.template_version_spec.vm_spec.num_threads_per_core == 2 + - result.response.template_version_spec.vm_spec.name == "{{ vm2_name }}" + fail_msg: Unable to update template description & name + success_msg: template description & name updated successfully +######################################################################## +- name: Retrieve the Template Version details for the given Template Version identifier. + ntnx_templates_versions_info_v2: + ext_id: "{{ version2_ext_id }}" + template_ext_id: "{{ template1_ext_id }}" + register: result + ignore_errors: true + +- name: Status of template version + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.ext_id == "{{ version2_ext_id }}" + - result.response.version_name == "{{ version_2_name }}" + - result.response.version_description == "ansible_template_version_description New" + - result.response.vm_spec.num_cores_per_socket == 2 + - result.response.vm_spec.num_sockets == 2 + - result.response.vm_spec.num_threads_per_core == 2 + - result.response.vm_spec.name == "{{ vm2_name }}" + - result.response.is_active_version == true + fail_msg: Unable to fetch template version using ext id + success_msg: template version fetched using ext id successfully +######################################################################## +- name: Set the Active Version + ntnx_templates_version_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + register: result + ignore_errors: true + +- name: Status of template version + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.template_ext_id == "{{ template1_ext_id }}" + - result.response.template_version_spec.ext_id == "{{ version1_ext_id }}" + - result.response.template_description == "ansible test" + - result.response.template_version_spec.version_name == "Initial Version" + - result.response.template_version_spec.is_active_version == true + fail_msg: Unable to set the Active Version + success_msg: Active Version set successfully +######################################################################## +- name: Delete Template Version + ntnx_templates_version_v2: + state: absent + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{ version2_ext_id }}" + register: result + ignore_errors: true + +- name: Retrieve the Template Version details for the given Template Version identifier. + ntnx_templates_versions_info_v2: + ext_id: "{{ version2_ext_id }}" + template_ext_id: "{{ template1_ext_id }}" + register: result + ignore_errors: true + +- name: Check version Deletion + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == True + - result.error == "NOT FOUND" + fail_msg: Unable to delete template version + success_msg: template version deleted successfully +######################################################################## +- name: Deploy VM + ntnx_templates_deploy_v2: + ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + cluster_reference: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Status of VM deployment + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.status == "SUCCEEDED" + fail_msg: Unable to deploy VM + success_msg: VM deployed successfully +######################################################################## +- name: Generate spec for deploying vm with override config with check mode + ntnx_templates_deploy_v2: + ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + cluster_reference: "{{ cluster.uuid }}" + override_vms_config: + - name: "{{ vm3_name }}" + num_sockets: 2 + num_cores_per_socket: 2 + num_threads_per_core: 2 + memory_size_bytes: 4294967296 + register: result + ignore_errors: true + check_mode: true + +- name: Status of VM deployment + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.ext_id == "{{ template1_ext_id }}" + - result.response.number_of_vms == 1 + - result.response.override_vm_config_map['0'].memory_size_bytes == 4294967296 + - result.response.override_vm_config_map['0'].num_cores_per_socket == 2 + - result.response.override_vm_config_map['0'].num_sockets == 2 + - result.response.override_vm_config_map['0'].num_threads_per_core == 2 + - result.response.override_vm_config_map['0'].name == "{{ vm3_name }}" + - result.response.version_id == "{{ version1_ext_id }}" + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.msg == "Template ({{ template1_ext_id }}) with given version ({{ version1_ext_id }}) will be deployed." + + fail_msg: VM deployed with override config successfully with check mode + success_msg: Returned as expected with check mode +######################################################################## +- name: Deploy vm and override config + ntnx_templates_deploy_v2: + ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + cluster_reference: "{{ cluster.uuid }}" + override_vms_config: + - name: "{{ vm3_name }}" + num_sockets: 4 + num_cores_per_socket: 4 + num_threads_per_core: 2 + memory_size_bytes: 4294967296 + register: result + ignore_errors: true + +- name: Status of VM deployment + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.status == "SUCCEEDED" + fail_msg: Unable to deploy VM with override config + success_msg: VM deployed with override config successfully +- name: Get deployed vms using name filter criteria + ntnx_vms_info: + filter: + vm_name: "{{ vm3_name }}" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.entities | length == 1 + - result.response.entities[0].spec.name == "{{ vm3_name }}" + - result.response.entities[0].spec.resources.num_vcpus_per_socket == 4 + - result.response.entities[0].spec.resources.num_sockets == 4 + - result.response.entities[0].spec.resources.num_threads_per_core == 2 + - result.response.entities[0].spec.cluster_reference.uuid == "{{ cluster.uuid }}" + - result.response.entities[0].status.state == "COMPLETE" + fail_msg: " Unable to find deployed vms using name filter criteria" + success_msg: " VMs listed successfully " +- name: Add vm to delete list + ansible.builtin.set_fact: + to_delete_vms: "{{ to_delete_vms + [result.response.entities[0].metadata.uuid] }}" +######################################################################## +- name: Initiate_guest_os_update with check mode + ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + state: start + register: result + ignore_errors: true + check_mode: true + +- name: Status of guest os update + ansible.builtin.assert: + that: + - result.changed == False + - result.failed == False + - result.response.version_id == "{{ version1_ext_id }}" + - result.template_ext_id == "{{ template1_ext_id }}" + - result.msg == "Guest OS update will be initiated" + fail_msg: Guest os update initiated successfully with check mode + success_msg: Returned as expected with check mode +######################################################################## +- name: Initiate_guest_os_update + ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + state: start + register: result + ignore_errors: true + +- name: Status of guest os update + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.status == "SUCCEEDED" + fail_msg: Unable to initiate guest os update + success_msg: Guest os update initiated successfully +######################################################################## +- name: Cancel guest_os_update + ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + state: cancel + register: result + ignore_errors: true + +- name: Status of guest os update cancel + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.status == "SUCCEEDED" + fail_msg: Unable to cancel guest os update + success_msg: Guest os update cancel successfully +######################################################################## +- name: Initiate_guest_os_update + ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + state: start + register: result + ignore_errors: true + +- name: Status of guest os update + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.status == "SUCCEEDED" + fail_msg: Unable to initiate guest os update + success_msg: Guest os update initiated successfully +######################################################################## +- name: Finish guest_os_update + ntnx_templates_guest_os_v2: + template_ext_id: "{{ template1_ext_id }}" + version_id: "{{ version1_ext_id }}" + state: finish + version_name: "{{ version_3_name }}" + version_description: finish guest os update + register: result + ignore_errors: true + +- name: Status of guest os update finish + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.response.status == "SUCCEEDED" + - result.response.entities_affected[0].ext_id == "{{ template1_ext_id }}" + fail_msg: Unable to finish guest os update + success_msg: Guest os update finish successfully + +- name: Set variables + ansible.builtin.set_fact: + version3_ext_id: "{{ result.response.entities_affected.1.ext_id }}" + +- name: Retrieve the Template Version details for the given Template Version identifier. + ntnx_templates_versions_info_v2: + ext_id: "{{ version3_ext_id }}" + template_ext_id: "{{ template1_ext_id }}" + register: result + ignore_errors: true + +- name: Status of template version + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.ext_id == "{{ version3_ext_id }}" + - result.response.version_name == "{{ version_3_name }}" + - result.response.version_description == "finish guest os update" + - result.response.is_active_version == true + fail_msg: Unable to fetch template version using ext id + success_msg: template version fetched using ext id successfully +######################################################################## +- name: Create new template from a vm + ntnx_templates_v2: + template_name: "{{ template2_name }}" + template_description: ansible test + template_version_spec: + version_source: + template_vm_reference: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Set variables + ansible.builtin.set_fact: + template2_ext_id: "{{ result.ext_id }}" + version3_ext_id: "{{ result.response.template_version_spec.ext_id }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.ext_id is defined + - result.response.template_name == "{{ template2_name }}" + - result.response.template_description == "ansible test" + - result.ext_id == result.response.ext_id + - result.task_ext_id is defined + fail_msg: " Unable to create new template " + success_msg: " New template created successfully " +######################################################################## +- name: List all templates + ntnx_templates_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Status of all templates + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length == 1 + fail_msg: "Unable to list templates " + success_msg: templates listed successfully +######################################################################## +- name: Get all templates with non existent name + ntnx_templates_info_v2: + filter: templateName eq '{{ template3_name }}' + register: result + ignore_errors: true + +- name: Status of all templates + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response == [] + fail_msg: Template listed with non existent name + success_msg: Returned as expected +######################################################################## +- name: Delete Template + ntnx_templates_v2: + state: absent + ext_id: "{{ template1_ext_id }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.changed == True + - result.failed == False + - result.ext_id == "{{ template1_ext_id }}" + fail_msg: Unable to delete template + success_msg: Template deleted successfully + +- name: Fetch template info using ext id + ntnx_templates_info_v2: + ext_id: "{{ template1_ext_id }}" + register: result + ignore_errors: true + +- name: Status of template + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == True + - result.error == "NOT FOUND" + fail_msg: Templated not deleted + success_msg: Returned as expected +######################################################################## +- name: Delete created vms + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + ignore_errors: true + loop: "{{ to_delete_vms }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.changed == True + - result.msg == "All items completed" + fail_msg: Unable to delete vm + success_msg: VM deleted successfully +######################################################################## +- name: Delete Created Templates + ntnx_templates_v2: + state: absent + ext_id: "{{ template2_ext_id }}" + register: result + ignore_errors: true diff --git a/tests/integration/targets/ntnx_vms_templates_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_templates_v2/tasks/main.yml new file mode 100644 index 000000000..4fe64953f --- /dev/null +++ b/tests/integration/targets/ntnx_vms_templates_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module_defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import all_templates_operation.yml + ansible.builtin.import_tasks: all_templates_operation.yml diff --git a/tests/integration/targets/ntnx_vms_v2/aliases b/tests/integration/targets/ntnx_vms_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vms_v2/meta/main.yml b/tests/integration/targets/ntnx_vms_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vms_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vms_v2/tasks/create_delete_info_vm.yml b/tests/integration/targets/ntnx_vms_v2/tasks/create_delete_info_vm.yml new file mode 100644 index 000000000..f26150ddf --- /dev/null +++ b/tests/integration/targets/ntnx_vms_v2/tasks/create_delete_info_vm.yml @@ -0,0 +1,636 @@ +--- +- name: Start ntnx_vms_v2 tests + ansible.builtin.debug: + msg: start ntnx_vms_v2 tests + +- name: Generate random category key & value + ansible.builtin.set_fact: + random_name: "{{ query('community.general.random_string', numbers=false, special=false, length=12)[0] }}" + +- name: Set VM name suffix + ansible.builtin.set_fact: + suffix_name: ansible-ag + +- name: Set VM name and todelete list + ansible.builtin.set_fact: + todelete: [] + vm_name: "{{ random_name }}{{ suffix_name }}vm" + +- name: Create VM with full requirements with check_mode + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + num_sockets: 1 + num_cores_per_socket: 1 + num_threads_per_core: 1 + num_numa_nodes: 1 + memory_size_bytes: 4294967296 + is_vcpu_hard_pinning_enabled: false + is_cpu_passthrough_enabled: false + is_memory_overcommit_enabled: false + is_gpu_console_enabled: false + is_vga_console_enabled: false + machine_type: PC + hardware_clock_timezone: UTC + enabled_cpu_features: + - HARDWARE_VIRTUALIZATION + is_branding_enabled: false + is_agent_vm: false + apc_config: + is_apc_enabled: true + cpu_model: + name: test + ext_id: "00000000-0000-0000-0000-000000000000" + vtpm_config: + is_vtpm_enabled: true + version: "2.0" + gpus: + - mode: VIRTUAL + device_id: 1 + name: NVIDIA T4 + vendor: NVIDIA + pci_address: + segment: "0000" + func: 0 + device: 0 + bus: 1 + availability_zone: + ext_id: "00000000-0000-0000-0000-000000000000" + cd_roms: + - disk_address: + bus_type: IDE + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disks: + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + disk_address: + bus_type: SCSI + index: 2 + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: SCSI + index: 3 + boot_config: + uefi_boot: + nvram_device: + backing_storage_info: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + serial_ports: + - is_connected: true + index: 0 + - is_connected: true + index: 3 + register: result + ignore_errors: true + check_mode: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + - result.response.num_sockets == 1 + - result.response.num_cores_per_socket == 1 + - result.response.num_threads_per_core == 1 + - result.response.memory_size_bytes == 4294967296 + - result.response.is_vcpu_hard_pinning_enabled == false + - result.response.is_cpu_passthrough_enabled == false + - result.response.is_memory_overcommit_enabled == false + - result.response.is_gpu_console_enabled == false + - result.response.is_vga_console_enabled == false + - result.response.machine_type == "PC" + - result.response.hardware_clock_timezone == "UTC" + - result.response.num_numa_nodes == 1 + - result.response.enabled_cpu_features[0] == "HARDWARE_VIRTUALIZATION" + - result.response.is_branding_enabled == false + - result.response.is_agent_vm == false + - result.response.apc_config.is_apc_enabled == true + - result.response.vtpm_config.is_vtpm_enabled == true + - result.response.apc_config.cpu_model.name == "test" + - result.response.apc_config.cpu_model.ext_id == "00000000-0000-0000-0000-000000000000" + - result.response.vtpm_config.version == "2.0" + - result.response.cd_roms[0].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.cd_roms[0].disk_address.bus_type == "IDE" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 2 + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.index == 3 + - result.response.gpus[0].mode == "VIRTUAL" + - result.response.gpus[0].device_id == 1 + - result.response.gpus[0].name == "NVIDIA T4" + - result.response.gpus[0].vendor == "NVIDIA" + - result.response.gpus[0].pci_address.segment == 0 + - result.response.gpus[0].pci_address.func == 0 + - result.response.gpus[0].pci_address.device == 0 + - result.response.gpus[0].pci_address.bus == 1 + - result.response.availability_zone.ext_id == "00000000-0000-0000-0000-000000000000" + - result.response.serial_ports[0].is_connected == true + - result.response.serial_ports[0].index == 0 + - result.response.serial_ports[1].is_connected == true + - result.response.serial_ports[1].index == 3 + - result.response.boot_config.nvram_device.backing_storage_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.boot_config.nvram_device.backing_storage_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.boot_config.nvram_device.backing_storage_info.disk_size_bytes == 26843545600 + fail_msg: " Unable to create VM with full requirements " + success_msg: Returned as expected +######################################################################## +- name: Create VM with minimum requirements + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' + +- name: Get vm using filter name + ntnx_vms_info_v2: + filter: name eq '{{ vm_name }}' + register: result + ignore_errors: true + +- name: Status of vm + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response[0].name == "{{ vm_name }}" + - result.response[0].description == "ansible test" + - result.response[0].cluster.ext_id == "{{ cluster.uuid }}" + - result.response[0].ext_id == "{{ todelete.0 }}" + fail_msg: " Unable to fetch vm using filter name " + success_msg: " vm fetched using filter name successfully " +######################################################################## +- name: Create VM with full requirements + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + num_sockets: 1 + num_cores_per_socket: 1 + num_threads_per_core: 1 + num_numa_nodes: 1 + memory_size_bytes: 4294967296 + is_vcpu_hard_pinning_enabled: false + is_cpu_passthrough_enabled: false + is_memory_overcommit_enabled: false + is_gpu_console_enabled: false + is_vga_console_enabled: false + machine_type: PC + hardware_clock_timezone: UTC + enabled_cpu_features: + - HARDWARE_VIRTUALIZATION + is_branding_enabled: false + is_agent_vm: false + apc_config: + is_apc_enabled: false + vtpm_config: + is_vtpm_enabled: false + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + - result.response.num_sockets == 1 + - result.response.num_cores_per_socket == 1 + - result.response.num_threads_per_core == 1 + - result.response.memory_size_bytes == 4294967296 + - result.response.is_vcpu_hard_pinning_enabled == false + - result.response.is_cpu_passthrough_enabled == false + - result.response.is_memory_overcommit_enabled == false + - result.response.is_gpu_console_enabled == false + - result.response.is_vga_console_enabled == false + - result.response.machine_type == "PC" + - result.response.hardware_clock_timezone == "UTC" + - result.response.num_numa_nodes == 1 + - result.response.enabled_cpu_features[0] == "HARDWARE_VIRTUALIZATION" + - result.response.is_branding_enabled == false + - result.response.is_agent_vm == false + - result.response.apc_config.is_apc_enabled == false + - result.response.vtpm_config.is_vtpm_enabled == false + fail_msg: " Unable to create VM with full requirements " + success_msg: " VM with full requirements created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' +######################################################################## +- name: Create vm with different disks, using ubuntu image , with cdrom + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + cd_roms: + - disk_address: + bus_type: IDE + backing_info: + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disks: + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + disk_address: + bus_type: SCSI + index: 2 + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: SCSI + index: 3 + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 2 + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.index == 3 + - result.response.cd_roms[0].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.cd_roms[0].disk_address.bus_type == "IDE" + fail_msg: " Unable to create vm with different disks, using ubuntu image , with cdrom" + success_msg: "Vm with different disks, using ubuntu image , with cdrom created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' + +- name: Fetch vm using ext_id + ntnx_vms_info_v2: + ext_id: "{{ result.ext_id }}" + register: result + ignore_errors: true + +- name: Status of vm + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.ext_id == "{{ result.ext_id }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 2 + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.index == 3 + - result.response.cd_roms[0].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.cd_roms[0].disk_address.bus_type == "IDE" + fail_msg: " Unable to fetch vm using ext_id " + success_msg: " vm fetched using ext_id successfully " +######################################################################## +- name: List all categories + ntnx_categories_info_v2: + limit: 3 + register: result + ignore_errors: true + +- name: Set category ext_id + ansible.builtin.set_fact: + category_ext_id_1: "{{ result.response[0].ext_id }}" + category_ext_id_2: "{{ result.response[1].ext_id }}" + +- name: Create vm with multiple serial port, and categories + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + categories: + - ext_id: "{{ result.response[0].ext_id }}" + - ext_id: "{{ result.response[1].ext_id }}" + serial_ports: + - is_connected: true + index: 0 + - is_connected: true + index: 3 + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + - result.response.serial_ports[0].is_connected == true + - result.response.serial_ports[0].index == 0 + - result.response.serial_ports[1].is_connected == true + - result.response.serial_ports[1].index == 3 + - result.response.categories[0].ext_id == "{{ category_ext_id_1 }}" + - result.response.categories[1].ext_id == "{{ category_ext_id_2 }}" + fail_msg: " Unable to Create vm with multiple serial port, and categories " + success_msg: " Vm with multiple serial port, and categories created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' +######################################################################## +- name: Create vm with legacy_boot + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + disks: + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + disk_address: + bus_type: SCSI + index: 2 + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: SCSI + index: 3 + boot_config: + legacy_boot: + boot_device: + boot_device_disk: + disk_address: + bus_type: SCSI + index: 3 + boot_order: + - DISK + - CDROM + - NETWORK + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 2 + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.index == 3 + - result.response.boot_config.boot_device.disk_address.bus_type == "SCSI" + - result.response.boot_config.boot_device.disk_address.index == 3 + - result.response.boot_config.boot_order[0] == "DISK" + - result.response.boot_config.boot_order[1] == "CDROM" + - result.response.boot_config.boot_order[2] == "NETWORK" + fail_msg: " Unable to create VM with legacy_boot " + success_msg: " VM with legacy_boot created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' +######################################################################## +- name: Create vm with nics,cloud_init script + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + nics: + - network_info: + subnet: + ext_id: "{{ network.dhcp.uuid }}" + disks: + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + storage_container: + ext_id: "{{ storage_container.uuid }}" + disk_address: + bus_type: SCSI + index: 2 + - backing_info: + vm_disk: + disk_size_bytes: 26843545600 + data_source: + reference: + image_reference: + image_ext_id: "{{ disk_image.image_ext_ids[0] }}" + disk_address: + bus_type: SCSI + index: 3 + guest_customization: + config: + cloudinit: + datasource_type: CONFIG_DRIVE_V2 + cloud_init_script: + user_data: + value: I2Nsb3VkLWNvbmZpZwpkaXNhYmxlX3Jvb3Q6IGZhbHNlCnNzaF9wd2F1dGg6ICAgdHJ1ZQ== + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "ansible test" + - result.response.disks[0].backing_info.storage_container.ext_id == "{{ storage_container.uuid }}" + - result.response.disks[0].disk_address.bus_type == "SCSI" + - result.response.disks[0].disk_address.index == 2 + - result.response.disks[1].backing_info.data_source.reference.image_ext_id == "{{ disk_image.image_ext_ids[0] }}" + - result.response.disks[1].disk_address.bus_type == "SCSI" + - result.response.disks[1].disk_address.index == 3 + - result.response.nics[0].network_info.subnet.ext_id == "{{ network.dhcp.uuid }}" + - result.response.nics[0].backing_info.is_connected == true + - result.response.nics[0].network_info.nic_type == "NORMAL_NIC" + - result.response.nics[0].network_info.vlan_mode == "ACCESS" + - result.response.nics[0].network_info.subnet.ext_id == "{{ network.dhcp.uuid }}" + - result.response.cd_roms[0] is defined + - result.response.cd_roms[0].iso_type == "GUEST_CUSTOMIZATION" + fail_msg: " Unable to create VM with cloud_init script " + success_msg: " VM with cloud_init script created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' +######################################################################## + +- name: Read content from file + ansible.builtin.set_fact: + xml_file_content: "{{ lookup('file', unattendxml.dest + '/unattendxml.txt') }}" + +- name: Create vm with windows unattend xml file + ntnx_vms_v2: + name: "{{ vm_name }}" + description: ansible test + cluster: + ext_id: "{{ cluster.uuid }}" + guest_customization: + config: + sysprep: + install_type: "FRESH" + sysprep_script: + unattendxml: + value: "{{ xml_file_content }}" + register: result + ignore_errors: true + +- name: Creation status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.response.ext_id is defined + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.cd_roms[0] is defined + - result.response.cd_roms[0].iso_type == "GUEST_CUSTOMIZATION" + fail_msg: " Unable to create VM with windows unattend xml file " + success_msg: " VM with windows unattend xml file created successfully " + +- name: Adding VM to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [result["ext_id"]] }}' + +######################################################################## +- name: List all VMs + ntnx_vms_info_v2: + register: result + ignore_errors: true + +- name: Status of all VMs + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 1 + fail_msg: "Unable to list VMs " + success_msg: VMs listed successfully +######################################################################## +- name: List all VMs with limit 3 + ntnx_vms_info_v2: + limit: 3 + register: result + ignore_errors: true + +- name: Status of all VMs + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length == 3 + fail_msg: "Unable to list VMs " + success_msg: VMs listed successfully +######################################################################## +- name: Delete all Created VMs + ntnx_vms_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + - item.response is defined + - item.changed == True + - item.failed == False + - item.ext_id == "{{ todelete[vms_index] }}" + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " + loop: "{{ result.results }}" + loop_control: + index_var: vms_index + +- name: Reset to delete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_vms_v2/tasks/main.yml b/tests/integration/targets/ntnx_vms_v2/tasks/main.yml new file mode 100644 index 000000000..8ee3cfad8 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_v2/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import create_delete_info_vm.yml + ansible.builtin.import_tasks: create_delete_info_vm.yml + - name: Import update_vm.yml + ansible.builtin.import_tasks: "update_vm.yml" diff --git a/tests/integration/targets/ntnx_vms_v2/tasks/update_vm.yml b/tests/integration/targets/ntnx_vms_v2/tasks/update_vm.yml new file mode 100644 index 000000000..ac56d15d3 --- /dev/null +++ b/tests/integration/targets/ntnx_vms_v2/tasks/update_vm.yml @@ -0,0 +1,292 @@ +--- +- name: Start ntnx_vms_v2 tests + ansible.builtin.debug: + msg: Start ntnx_vms_v2 tests + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VM and nics names + ansible.builtin.set_fact: + vm_name: "{{ random_name }}_vm_test" + +- name: Create VM + ntnx_vms_v2: + state: present + name: "{{ vm_name }}" + cluster: + ext_id: "{{ cluster.uuid }}" + description: "Test VM" + num_sockets: 1 + num_threads_per_core: 1 + num_cores_per_socket: 1 + num_numa_nodes: 1 + memory_size_bytes: 2147483648 + machine_type: "PC" + is_vcpu_hard_pinning_enabled: false + is_cpu_passthrough_enabled: true + is_memory_overcommit_enabled: false + is_gpu_console_enabled: false + is_branding_enabled: false + is_vga_console_enabled: true + is_agent_vm: false + enabled_cpu_features: + - "HARDWARE_VIRTUALIZATION" + + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.cluster.ext_id == "{{ cluster.uuid }}" + - result.response.name == "{{ vm_name }}" + - result.response.description == "Test VM" + - result.response.num_sockets == 1 + - result.response.num_threads_per_core == 1 + - result.response.num_cores_per_socket == 1 + - result.response.num_numa_nodes == 1 + - result.response.memory_size_bytes == 2147483648 + - result.response.machine_type == "PC" + - result.response.is_vcpu_hard_pinning_enabled == false + - result.response.is_cpu_passthrough_enabled == true + - result.response.is_memory_overcommit_enabled == false + - result.response.is_gpu_console_enabled == false + - result.response.is_branding_enabled == false + - result.response.is_vga_console_enabled == true + - result.response.is_agent_vm == false + - result.response.enabled_cpu_features[0] == "HARDWARE_VIRTUALIZATION" + fail_msg: "Unable to Create VM " + success_msg: "VM is created successfully " + +- name: Set vm_uuid + ansible.builtin.set_fact: + vm_uuid: '{{ result["ext_id"] }}' + +################################################################################ + +- name: Fetch VM details after Creating VM + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Fetch VM details after Creating VM Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == "{{ vm_name }}" + - result.response.description == "Test VM" + - result.response.num_sockets == 1 + - result.response.num_threads_per_core == 1 + - result.response.num_cores_per_socket == 1 + - result.response.num_numa_nodes == 1 + - result.response.memory_size_bytes == 2147483648 + - result.response.machine_type == "PC" + - result.response.is_vcpu_hard_pinning_enabled == false + - result.response.is_cpu_passthrough_enabled == true + - result.response.is_memory_overcommit_enabled == false + - result.response.is_gpu_console_enabled == false + - result.response.is_branding_enabled == false + - result.response.is_vga_console_enabled == true + - result.response.is_agent_vm == false + - result.response.enabled_cpu_features[0] == "HARDWARE_VIRTUALIZATION" + fail_msg: "Fetch VM details after Creating VM failed " + success_msg: "Fetch VM details after Creating VM passed " + +################################################################################ + +- name: Update VM with all attributes with check mode enabled + ntnx_vms_v2: + state: present + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_updated" + description: "Test VM updated" + num_sockets: 4 + num_threads_per_core: 4 + num_cores_per_socket: 4 + num_numa_nodes: 4 + memory_size_bytes: 8589934592 + machine_type: "Q35" + is_vcpu_hard_pinning_enabled: true + is_cpu_passthrough_enabled: false + is_memory_overcommit_enabled: true + is_gpu_console_enabled: true + is_branding_enabled: true + is_vga_console_enabled: true + is_agent_vm: true + enabled_cpu_features: HARDWARE_VIRTUALIZATION + register: result + ignore_errors: true + check_mode: true + +- name: Update VM with all attributes with check mode enabled Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == "{{ vm_name }}_updated" + - result.response.description == "Test VM updated" + - result.response.num_sockets == 4 + - result.response.num_threads_per_core == 4 + - result.response.num_cores_per_socket == 4 + - result.response.num_numa_nodes == 4 + - result.response.memory_size_bytes == 8589934592 + - result.response.machine_type == "Q35" + - result.response.is_vcpu_hard_pinning_enabled == true + - result.response.is_cpu_passthrough_enabled == false + - result.response.is_memory_overcommit_enabled == true + - result.response.is_gpu_console_enabled == true + - result.response.is_branding_enabled == true + - result.response.is_vga_console_enabled == true + - result.response.is_agent_vm == true + - result.response.enabled_cpu_features[0] == "HARDWARE_VIRTUALIZATION" + fail_msg: "Update VM with all attributes with check mode enabled failed " + success_msg: "Update VM with all attributes with check mode enabled passed " + +################################################################################ + +- name: Update VM with all attributes + ntnx_vms_v2: + state: present + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_updated" + description: "Test VM updated" + num_sockets: 2 + num_threads_per_core: 2 + num_cores_per_socket: 2 + num_numa_nodes: 2 + memory_size_bytes: 4294967296 + machine_type: "Q35" + is_vcpu_hard_pinning_enabled: true + is_cpu_passthrough_enabled: false + is_memory_overcommit_enabled: false + is_gpu_console_enabled: false + is_branding_enabled: true + is_vga_console_enabled: false + is_agent_vm: true + enabled_cpu_features: HARDWARE_VIRTUALIZATION + register: result + ignore_errors: true + +- name: Update VM with all attributes Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ vm_name }}_updated" + - result.response.description == "Test VM updated" + - result.response.num_sockets == 2 + - result.response.num_threads_per_core == 2 + - result.response.num_cores_per_socket == 2 + - result.response.num_numa_nodes == 2 + - result.response.memory_size_bytes == 4294967296 + - result.response.machine_type == "Q35" + - result.response.is_vcpu_hard_pinning_enabled == true + - result.response.is_cpu_passthrough_enabled == false + - result.response.is_memory_overcommit_enabled == false + - result.response.is_gpu_console_enabled == false + - result.response.is_branding_enabled == true + - result.response.is_vga_console_enabled == false + - result.response.is_agent_vm == true + - result.response.enabled_cpu_features[0] == "HARDWARE_VIRTUALIZATION" + fail_msg: "Update VM with all attributes failed " + success_msg: "Update VM with all attributes passed " + +################################################################################ + +- name: Update VM with all attributes to test idempotency + ntnx_vms_v2: + state: present + ext_id: "{{ vm_uuid }}" + name: "{{ vm_name }}_updated" + description: "Test VM updated" + num_sockets: 2 + num_threads_per_core: 2 + num_cores_per_socket: 2 + num_numa_nodes: 2 + memory_size_bytes: 4294967296 + machine_type: "Q35" + is_vcpu_hard_pinning_enabled: true + is_cpu_passthrough_enabled: false + is_memory_overcommit_enabled: false + is_gpu_console_enabled: false + is_branding_enabled: true + is_vga_console_enabled: false + is_agent_vm: true + enabled_cpu_features: HARDWARE_VIRTUALIZATION + register: result + ignore_errors: true + +- name: Update VM with all attributes to test idempotency Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.skipped == true + - result.ext_id == "{{ vm_uuid }}" + - result.msg == "Nothing to change." + fail_msg: "Update VM with all attributes to test idempotency failed " + success_msg: "Update VM with all attributes to test idempotency passed " + +################################################################################ + +- name: Fetch VM details after Updating VM attributes + ntnx_vms_info_v2: + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Fetch VM details after Updating VM attributes Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == "{{ vm_name }}_updated" + - result.response.description == "Test VM updated" + - result.response.num_sockets == 2 + - result.response.num_threads_per_core == 2 + - result.response.num_cores_per_socket == 2 + - result.response.num_numa_nodes == 2 + - result.response.memory_size_bytes == 4294967296 + - result.response.machine_type == "Q35" + - result.response.is_vcpu_hard_pinning_enabled == true + - result.response.is_cpu_passthrough_enabled == false + - result.response.is_memory_overcommit_enabled == false + - result.response.is_gpu_console_enabled == false + - result.response.is_branding_enabled == true + - result.response.is_vga_console_enabled == false + - result.response.is_agent_vm == true + - result.response.enabled_cpu_features[0] == "HARDWARE_VIRTUALIZATION" + fail_msg: "Fetch VM details after Updating VM attributes failed " + success_msg: "Fetch VM details after Updating VM attributes passed " + +################################################################################ + +- name: Delete the VM + ntnx_vms_v2: + state: absent + ext_id: "{{ vm_uuid }}" + register: result + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == 'SUCCEEDED' + - result.ext_id == "{{ vm_uuid }}" + fail_msg: "Unable to delete VM " + success_msg: "VM is deleted successfully " diff --git a/tests/integration/targets/ntnx_volume_groups_disks_v2/aliases b/tests/integration/targets/ntnx_volume_groups_disks_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_volume_groups_disks_v2/meta/main.yml b/tests/integration/targets/ntnx_volume_groups_disks_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_disks_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_volume_groups_disks_v2/tasks/disks.yml b/tests/integration/targets/ntnx_volume_groups_disks_v2/tasks/disks.yml new file mode 100644 index 000000000..430552978 --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_disks_v2/tasks/disks.yml @@ -0,0 +1,360 @@ +- name: "Start Volume groups disks tests" + ansible.builtin.debug: + msg: "Start Volume groups disks tests" + +- name: Generate random names + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set VG name suffix + ansible.builtin.set_fact: + suffix_name: "ansible-vgs" + +- name: Set VG names + ansible.builtin.set_fact: + vg1_name: "{{suffix_name}}-{{random_name}}1" + vg2_name: "{{suffix_name}}-{{random_name}}2" + +- name: Create Volume group for tests + ntnx_volume_groups_v2: + name: "{{vg1_name}}" + description: "Volume group for disk tests" + should_load_balance_vm_attachments: true + sharing_status: "SHARED" + target_prefix: "vg1" + cluster_reference: "{{ cluster.uuid }}" + usage_type: "USER" + storage_features: + flash_mode: + is_enabled: true + register: result + ignore_errors: true + +- name: Verify vg create + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.ext_id is defined + + fail_msg: "Unable to create VG for tests" + success_msg: "VG created successfully for tests" + +- name: Set VG1 UUID + ansible.builtin.set_fact: + vg1_uuid: "{{ result.ext_id }}" + +######################################### Create disk tests ######################################### + +- name: Create disk with check mode + check_mode: true + ntnx_volume_groups_disks_v2: + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + index: 0 + disk_size_bytes: 21474836480 + description: "ansible-created-disk-updated" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "STORAGE_CONTAINER" + ext_id: "{{ storage_container.uuid }}" + register: result + ignore_errors: true + +- name: Verify disk spec + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.response.index == 0 + - result.response.disk_size_bytes == 21474836480 + - result.response.description == "ansible-created-disk-updated" + - result.response.disk_storage_features.flash_mode.is_enabled == true + - result.response.disk_data_source_reference.entity_type == "STORAGE_CONTAINER" + - result.response.disk_data_source_reference.ext_id == "{{storage_container.uuid}}" + + fail_msg: "Unable to create spec for disk create" + success_msg: "Spec generated successfully for disk create" + +- name: Create disk with min spec + ntnx_volume_groups_disks_v2: + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + disk_size_bytes: 21474836480 + disk_data_source_reference: + entity_type: "STORAGE_CONTAINER" + ext_id: "{{ storage_container.uuid }}" + register: result + ignore_errors: true + +- name: Verify disk create + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.task_ext_id is defined + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.response.disk_storage_features.flash_mode.is_enabled == true + - result.response.disk_size_bytes == 21474836480 + - result.response.ext_id == result.ext_id + - result.response.storage_container_id == "{{storage_container.uuid}}" + + fail_msg: "Unable to create disk" + success_msg: "Disk created successfully" + +- name: Create disk with all attributes + ntnx_volume_groups_disks_v2: + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + index: 1 + disk_size_bytes: 21474836480 + description: "ansible-created-disk-updated" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "STORAGE_CONTAINER" + ext_id: "{{ storage_container.uuid }}" + register: result + ignore_errors: true + +- name: Verify disk create + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.response.index == 1 + - result.response.disk_size_bytes == 21474836480 + - result.response.description == "ansible-created-disk-updated" + - result.response.disk_storage_features.flash_mode.is_enabled == true + - result.response.ext_id == result.ext_id + - result.response.storage_container_id == "{{storage_container.uuid}}" + + fail_msg: "Unable to create disk" + success_msg: "Disk created successfully" + +- name: Set disk1 UUID + ansible.builtin.set_fact: + disk1_uuid: "{{ result.ext_id }}" + +- name: Create disk with vdisk ref in check mode + check_mode: true + ntnx_volume_groups_disks_v2: + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + index: 1 + description: "ansible-created-disk-updated" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "VM_DISK" + ext_id: "{{disk1_uuid}}" + register: result + ignore_errors: true + +- name: Verify spec of disk + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.response.index == 1 + - result.response.description == "ansible-created-disk-updated" + - result.response.disk_storage_features.flash_mode.is_enabled == true + - result.response.disk_data_source_reference.entity_type == "VM_DISK" + - result.response.disk_data_source_reference.ext_id == "{{disk1_uuid}}" + + fail_msg: "Unable to create spec for disk create" + success_msg: "Spec generated successfully for disk create" + +- name: Create disk from recovery point in check mode + check_mode: true + ntnx_volume_groups_disks_v2: + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + index: 1 + description: "ansible-created-disk-updated" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "DISK_RECOVERY_POINT" + ext_id: "{{disk1_uuid}}" + register: result + ignore_errors: true + +- name: Verify spec of disk + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.response.index == 1 + - result.response.description == "ansible-created-disk-updated" + - result.response.disk_storage_features.flash_mode.is_enabled == true + - result.response.disk_data_source_reference.entity_type == "DISK_RECOVERY_POINT" + - result.response.disk_data_source_reference.ext_id == "{{disk1_uuid}}" + + fail_msg: "Unable to create spec for disk create" + success_msg: "Spec generated successfully for disk create" + +- name: Create disk from previously created vdisk + ntnx_volume_groups_disks_v2: + state: "present" + volume_group_ext_id: "{{ vg1_uuid }}" + index: 2 + description: "ansible-created-disk-updated" + disk_storage_features: + flash_mode: + is_enabled: true + disk_data_source_reference: + entity_type: "VOLUME_DISK" + ext_id: "{{disk1_uuid}}" + register: result + ignore_errors: true + +- name: Set disk2 UUID + ansible.builtin.set_fact: + disk2_uuid: "{{ result.ext_id }}" + +- name: Verify disk create + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.response.index == 2 + - result.response.description == "ansible-created-disk-updated" + - result.response.disk_storage_features.flash_mode.is_enabled == true + - result.response.ext_id == result.ext_id + + fail_msg: "Unable to create disk" + success_msg: "Disk created successfully" + +- name: Fetch all disks from VG + nutanix.ncp.ntnx_volume_groups_disks_info_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + register: result + +- name: Verify disk fetch + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response | length == 3 + - result.volume_group_ext_id == "{{vg1_uuid}}" + + fail_msg: "Unable to fetch disks" + success_msg: "Disks fetched successfully" + +- name: Fetch disks from VG using certain limit + nutanix.ncp.ntnx_volume_groups_disks_info_v2: + limit: 1 + volume_group_ext_id: "{{ vg1_uuid }}" + register: result + +- name: Verify disk fetch + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response | length == 1 + - result.volume_group_ext_id == "{{vg1_uuid}}" + + fail_msg: "Unable to fetch disks" + success_msg: "Disks fetched successfully" + +- name: Fetch certain VG disk + nutanix.ncp.ntnx_volume_groups_disks_info_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ disk1_uuid }}" + register: result + +- name: Verify disk fetch + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response.ext_id == "{{disk1_uuid}}" + - result.volume_group_ext_id == "{{vg1_uuid}}" + + fail_msg: "Unable to fetch disk" + success_msg: "Disk fetched successfully" + +######################################### Delete disk tests ######################################### + +- name: Delete disk + nutanix.ncp.ntnx_volume_groups_disks_v2: + state: absent + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ disk1_uuid }}" + register: result + +- name: Verify disk delete + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.response.status == "SUCCEEDED" + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.ext_id == "{{disk1_uuid}}" + - result.task_ext_id is defined + + fail_msg: "Unable to delete disk" + success_msg: "Disk deleted successfully" + +- name: Delete disk + nutanix.ncp.ntnx_volume_groups_disks_v2: + state: absent + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ disk2_uuid }}" + register: result + +- name: Verify disk delete + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.response.status == "SUCCEEDED" + - result.volume_group_ext_id == "{{vg1_uuid}}" + - result.ext_id == "{{disk2_uuid}}" + - result.task_ext_id is defined + + fail_msg: "Unable to delete disk" + success_msg: "Disk deleted successfully" + +######################################### Cleanup ######################################### +- name: Delete Volume group + ntnx_volume_groups_v2: + state: absent + ext_id: "{{ vg1_uuid }}" + register: result + ignore_errors: true + +- name: Verify delete of VGs + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{ vg1_uuid }}" + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VG" + success_msg: "VG deleted successfully" diff --git a/tests/integration/targets/ntnx_volume_groups_disks_v2/tasks/main.yml b/tests/integration/targets/ntnx_volume_groups_disks_v2/tasks/main.yml new file mode 100644 index 000000000..d4a397f5b --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_disks_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import disks.yml + ansible.builtin.import_tasks: "disks.yml" diff --git a/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/aliases b/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/meta/main.yml b/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/tasks/iscsi_client_connections.yml b/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/tasks/iscsi_client_connections.yml new file mode 100644 index 000000000..c8abd40cb --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/tasks/iscsi_client_connections.yml @@ -0,0 +1,250 @@ +- name: "Start Volume groups disks tests" + ansible.builtin.debug: + msg: "Start Volume groups disks tests" + +- name: Generate random names + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: "ansible-vgs" + +- name: Set VG name + ansible.builtin.set_fact: + vg1_name: "{{suffix_name}}-{{random_name}}1" + +############################################ Test Setup ############################################ + +- name: List all available ISCSI connections already attached to the VGs + ntnx_volume_groups_iscsi_clients_info_v2: + register: result + +- name: Verify iscsi client list + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response | length >= 0 + + fail_msg: "Unable to list iscsi clients" + success_msg: "ISCSI clients listed successfully" + +- name: Set iscsi client name and uuid + ansible.builtin.set_fact: + iscsi_client_name: "{{ result.response[0].iscsi_initiator_name }}" + iscsi_client_uuid: "{{ result.response[0].ext_id }}" + +- name: Create Volume group for tests + ntnx_volume_groups_v2: + name: "{{vg1_name}}" + description: "Volume group for iscsi client tests tests" + target_prefix: "vg1" + cluster_reference: "{{ cluster.uuid }}" + usage_type: "USER" + register: result + +- name: Verify vg create + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.ext_id is defined + + fail_msg: "Unable to create VG for tests" + success_msg: "VG created successfully for tests" + +- name: Set VG uuid + ansible.builtin.set_fact: + vg1_uuid: "{{ result.ext_id }}" + +############################################ Verify check modes ############################################ + +- name: Generate check mode response for Iscsi attachments using IQN + check_mode: true + ntnx_volume_groups_iscsi_clients_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + iscsi_initiator_name: "{{ iscsi_client_name }}" + num_virtual_targets: 32 + enabled_authentications: CHAP + client_secret: "1234455" + attachment_site: "PRIMARY" + register: result + +- name: Verify spec + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response.iscsi_initiator_name == iscsi_client_name + - result.response.attachment_site == "PRIMARY" + - result.response.enabled_authentications == "CHAP" + - result.response.num_virtual_targets == 32 + - result.volume_group_ext_id == vg1_uuid + fail_msg: "Unable to create spec for attach iscsi client using IQN" + success_msg: "ISCSI client attach spec generated successfully using IQN" + +- name: Generate check mode response for Iscsi attachments using Ipv4 + check_mode: true + ntnx_volume_groups_iscsi_clients_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + num_virtual_targets: 32 + enabled_authentications: CHAP + client_secret: "Nutanix.1234455" + attachment_site: "PRIMARY" + iscsi_initiator_network_id: + ipv4: + value: "0.0.0.0" + register: result + +- name: Verify spec + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response.iscsi_initiator_network_id.ipv4.value == "0.0.0.0" + fail_msg: "Unable to create spec for attach iscsi client using IPv4" + success_msg: "ISCSI client attach spec generated successfully using IPv4" + +- name: Generate check mode response for Iscsi attachments using fqdn + check_mode: true + ntnx_volume_groups_iscsi_clients_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + num_virtual_targets: 32 + enabled_authentications: CHAP + client_secret: "Nutanix.1234455" + attachment_site: "PRIMARY" + iscsi_initiator_network_id: + fqdn: + value: "test.com" + register: result + +- name: Verify spec + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response.iscsi_initiator_network_id.fqdn.value == "test.com" + - result.volume_group_ext_id == vg1_uuid + fail_msg: "Unable to create spec for attach iscsi client using FQDN" + success_msg: "ISCSI client attach spec generated successfully using FQDN" + +############################################ Attachment/Detachment tests ############################################ + +- name: Attach iscsi client to VG using initiator name + ntnx_volume_groups_iscsi_clients_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + iscsi_initiator_name: "{{ iscsi_client_name }}" + num_virtual_targets: 32 + register: result + +- name: Verify status + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.ext_id == iscsi_client_uuid + - result.volume_group_ext_id == vg1_uuid + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to attach iscsi client using initiator name" + success_msg: "ISCSI client attached successfully using initiator name" + +- name: Fetch specific client + ntnx_volume_groups_iscsi_clients_info_v2: + ext_id: "{{ iscsi_client_uuid }}" + register: result + +- name: Verify response + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response.iscsi_initiator_name == iscsi_client_name + - result.ext_id == iscsi_client_uuid + - result.ext_id == result.response.ext_id + fail_msg: "Unable to fetch specific iscsi client" + success_msg: "Specific ISCSI client fetched successfully" + +- name: Detach client from VG + ntnx_volume_groups_iscsi_clients_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ iscsi_client_uuid }}" + state: absent + register: result + +- name: Verify status + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.volume_group_ext_id == vg1_uuid + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to detach iscsi client using initiator name" + success_msg: "ISCSI client detached successfully using initiator name" + +- name: Attach iscsi client to VG using iscsi client id + ntnx_volume_groups_iscsi_clients_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ iscsi_client_uuid }}" + register: result + +- name: Verify status + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.ext_id == iscsi_client_uuid + - result.volume_group_ext_id == vg1_uuid + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to attach iscsi client using ext_id" + success_msg: "ISCSI client attached successfully using ext_id" + +- name: Detach client from VG + ntnx_volume_groups_iscsi_clients_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ iscsi_client_uuid }}" + state: absent + register: result + +- name: Verify status + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.volume_group_ext_id == vg1_uuid + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to detach iscsi client using initiator name" + success_msg: "ISCSI client detached successfully using initiator name" + +############################################ Cleanup ############################################ + +- name: Delete Volume group + ntnx_volume_groups_v2: + state: absent + ext_id: "{{ vg1_uuid }}" + register: result + +- name: Verify delete of VGs + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{ vg1_uuid }}" + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VG" + success_msg: "VG deleted successfully" diff --git a/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/tasks/main.yml b/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/tasks/main.yml new file mode 100644 index 000000000..bdede0553 --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_iscsi_clients_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import iscsi_client_connections.yml + ansible.builtin.import_tasks: "iscsi_client_connections.yml" diff --git a/tests/integration/targets/ntnx_volume_groups_v2/aliases b/tests/integration/targets/ntnx_volume_groups_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_volume_groups_v2/meta/main.yml b/tests/integration/targets/ntnx_volume_groups_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_volume_groups_v2/tasks/main.yml b/tests/integration/targets/ntnx_volume_groups_v2/tasks/main.yml new file mode 100644 index 000000000..f89fd58f2 --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import volume_groups_crud.yml + ansible.builtin.import_tasks: "volume_groups_crud.yml" diff --git a/tests/integration/targets/ntnx_volume_groups_v2/tasks/volume_groups_crud.yml b/tests/integration/targets/ntnx_volume_groups_v2/tasks/volume_groups_crud.yml new file mode 100644 index 000000000..f8cc86a57 --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_v2/tasks/volume_groups_crud.yml @@ -0,0 +1,221 @@ +- name: "Start Volume groups tests" + ansible.builtin.debug: + msg: "Start Volume groups tests" + +- name: Generate random names + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: "ansible-vgs" + +- name: Set VG names + ansible.builtin.set_fact: + vg1_name: "{{suffix_name}}-{{random_name}}1" + vg2_name: "{{suffix_name}}-{{random_name}}2" + +- name: Create Volume group with check mode + check_mode: true + ntnx_volume_groups_v2: + name: "{{vg1_name}}" + description: "Volume group 1" + should_load_balance_vm_attachments: true + sharing_status: "SHARED" + target_prefix: "vg1" + cluster_reference: "{{ cluster.uuid }}" + usage_type: "USER" + is_hidden: true + storage_features: + flash_mode: + is_enabled: true + iscsi_features: + target_secret: "Secret1234567" + enabled_authentications: "CHAP" + register: result + ignore_errors: true + +- name: Verify spec of VG + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == false + - result.response.cluster_reference == "{{cluster.uuid}}" + - result.response.name == "{{vg1_name}}" + - result.response.description == "Volume group 1" + - result.response.should_load_balance_vm_attachments == true + - result.response.sharing_status == "SHARED" + - result.response.target_prefix == "vg1" + - result.response.usage_type == "USER" + - result.response.is_hidden == true + - result.response.storage_features.flash_mode.is_enabled == true + - result.response.iscsi_features.enabled_authentications == "CHAP" + + fail_msg: "Unable to create spec for VG create" + success_msg: "Spec generated successfully for VG create" + +- name: Create Volume group with min spec and no Auth + ntnx_volume_groups_v2: + name: "{{vg1_name}}" + description: "Volume group 1" + cluster_reference: "{{ cluster.uuid }}" + register: result + ignore_errors: true + +- name: Verify creation of VG + ansible.builtin.assert: + that: + - result.error == None + - result.ext_id is defined + - result.task_ext_id is defined + - result.response is defined + - result.changed == true + - result.ext_id == result.response.ext_id + - result.response.cluster_reference == "{{cluster.uuid}}" + - result.response.name == "{{vg1_name}}" + - result.response.description == "Volume group 1" + - result.response.should_load_balance_vm_attachments == false + - result.response.sharing_status == None + fail_msg: "Unable to create VG" + success_msg: "VG created successfully" + +- name: Set VG UUID + ansible.builtin.set_fact: + vg1_uuid: "{{ result.ext_id }}" + +- name: Create Volume group with all config and enabled chap auth + ntnx_volume_groups_v2: + name: "{{vg2_name}}" + description: "Volume group 2" + should_load_balance_vm_attachments: true + sharing_status: "SHARED" + target_prefix: "vg1" + cluster_reference: "{{ cluster.uuid }}" + usage_type: "USER" + storage_features: + flash_mode: + is_enabled: true + iscsi_features: + target_secret: "Secret1234567" + enabled_authentications: "CHAP" + register: result + ignore_errors: true + +- name: Verify creation of VG + ansible.builtin.assert: + that: + - result.error == None + - result.ext_id is defined + - result.task_ext_id is defined + - result.response is defined + - result.changed == true + - result.response.cluster_reference == "{{cluster.uuid}}" + - result.response.name == "{{vg2_name}}" + - result.response.description == "Volume group 2" + - result.response.should_load_balance_vm_attachments == true + - result.response.sharing_status == "SHARED" + - "'vg1' in result.response.target_name" + - result.response.usage_type == "USER" + - result.response.storage_features.flash_mode.is_enabled == true + - result.response.iscsi_features.target_secret == None + - result.response.iscsi_features.enabled_authentications == "CHAP" + fail_msg: "Unable to create VG" + success_msg: "VG created successfully" + +- name: Set VG UUID + ansible.builtin.set_fact: + vg2_uuid: "{{ result.ext_id }}" + +- name: Fetch above created VG + ntnx_volume_groups_info_v2: + ext_id: "{{ vg1_uuid }}" + register: result + ignore_errors: true + +- name: Fetch above created VG + ansible.builtin.assert: + that: + - result.error == None + - result.changed == False + - result.response is defined + - result.ext_id == "{{vg1_uuid}}" + - result.response.ext_id == "{{vg1_uuid}}" + - result.response.cluster_reference == "{{cluster.uuid}}" + - result.response.name == "{{vg1_name}}" + fail_msg: "Unable to fetch VG" + success_msg: "VG fetched successfully" + +- name: Fetch all VGs created above using filters + ntnx_volume_groups_info_v2: + filter: "startswith(name, '{{suffix_name}}-{{random_name}}')" + register: result + ignore_errors: true + +- name: Set expected VGs + ansible.builtin.set_fact: + expected_vgs: ["{{vg1_uuid}}", "{{vg2_uuid}}"] + +- name: Fetch above created VG + ansible.builtin.assert: + that: + - result.error == None + - result.changed == False + - result.response is defined + - result.response | length == 2 + - result.response[0].ext_id in expected_vgs + - result.response[1].ext_id in expected_vgs + fail_msg: "Unable to fetch VGs using filters" + success_msg: "VGs fetched successfully" + +- name: Apply limits on VG response from list + ntnx_volume_groups_info_v2: + limit: 1 + register: result + ignore_errors: true + +- name: Fetch above created VG + ansible.builtin.assert: + that: + - result.error == None + - result.changed == False + - result.response is defined + - result.response | length == 1 + fail_msg: "Unable to fetch VGs using limits" + success_msg: "VGs fetched successfully" + +- name: Delete Volume groups + ntnx_volume_groups_v2: + state: absent + ext_id: "{{ vg1_uuid }}" + register: result + ignore_errors: true + +- name: Verify delete of VGs + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{vg1_uuid}}" + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VG" + success_msg: "VG deleted successfully" + +- name: Delete Volume groups + ntnx_volume_groups_v2: + state: absent + ext_id: "{{ vg2_uuid }}" + register: result + ignore_errors: true + +- name: Verify delete of VGs + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{vg2_uuid}}" + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VG" + success_msg: "VG deleted successfully" diff --git a/tests/integration/targets/ntnx_volume_groups_vms_v2/aliases b/tests/integration/targets/ntnx_volume_groups_vms_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_volume_groups_vms_v2/meta/main.yml b/tests/integration/targets/ntnx_volume_groups_vms_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_vms_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_volume_groups_vms_v2/tasks/ahv_vm_vg_connections.yml b/tests/integration/targets/ntnx_volume_groups_vms_v2/tasks/ahv_vm_vg_connections.yml new file mode 100644 index 000000000..05d35e3bc --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_vms_v2/tasks/ahv_vm_vg_connections.yml @@ -0,0 +1,278 @@ +- name: "Start Volume groups disks tests" + ansible.builtin.debug: + msg: "Start Volume groups disks tests" + +- name: Generate random names + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: "ansible-vgs" + +- name: Set VG names + ansible.builtin.set_fact: + vg1_name: "{{suffix_name}}-{{random_name}}1" + +############################################ Test Setup ############################################ +- name: Create Volume group for tests + ntnx_volume_groups_v2: + name: "{{vg1_name}}" + description: "Volume group for disk tests" + should_load_balance_vm_attachments: true + sharing_status: "SHARED" + target_prefix: "vg1" + cluster_reference: "{{ cluster.uuid }}" + usage_type: "USER" + storage_features: + flash_mode: + is_enabled: true + register: result + +- name: Verify vg create + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.ext_id is defined + + fail_msg: "Unable to create VG for tests" + success_msg: "VG created successfully for tests" + +- name: Set VG UUID + ansible.builtin.set_fact: + vg1_uuid: "{{ result.ext_id }}" + +- name: Create VM1 for VG + ntnx_vms_v2: + name: "ansible-created-for-vg" + num_sockets: 2 + num_cores_per_socket: 2 + memory_size_bytes: 4294967296 + cluster: + ext_id: "{{ cluster.uuid }}" + nics: + - network_info: + nic_type: "NORMAL_NIC" + vlan_mode: "ACCESS" + subnet: + ext_id: "{{network.dhcp.uuid}}" + ipv4_config: + should_assign_ip: true + register: result + +- name: Set VM1 UUID + ansible.builtin.set_fact: + vm1_uuid: "{{ result.ext_id }}" + +- name: Create VM2 for VG + ntnx_vms_v2: + name: "ansible-created-for-vg" + num_sockets: 2 + num_cores_per_socket: 2 + memory_size_bytes: 4294967296 + cluster: + ext_id: "{{ cluster.uuid }}" + nics: + - network_info: + nic_type: "NORMAL_NIC" + vlan_mode: "ACCESS" + subnet: + ext_id: "{{network.dhcp.uuid}}" + ipv4_config: + should_assign_ip: true + register: result + +- name: Verify vm creation + ansible.builtin.assert: + that: + - result.error == None + - result.response is defined + - result.changed == true + - result.ext_id is defined + + fail_msg: "Unable to create VM for tests" + success_msg: "VM created successfully for tests" + +- name: Set VM2 UUID + ansible.builtin.set_fact: + vm2_uuid: "{{ result.ext_id }}" + +############################################ Attach VM tests ############################################ +- name: Create spec for attaching VM to VG + check_mode: true + ntnx_volume_groups_vms_v2: + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ vm1_uuid }}" + index: 0 + register: result + +- name: Verify spec creation + ansible.builtin.assert: + that: + - result.error == None + - result.changed == false + - result.ext_id == "{{ vm1_uuid }}" + - result.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.index == 0 + fail_msg: "Unable to create spec for attaching VM to VG" + success_msg: "Spec created successfully for VM to VG" + +- name: Attach VM1 to VG + ntnx_volume_groups_vms_v2: + state: present + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ vm1_uuid }}" + index: 1 + register: result + +- name: Verify attachment of VM from VG side + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{ vm1_uuid }}" + - result.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to attach VM to VG" + success_msg: "VM attached to VG successfully" + +- name: Fetch VM1 info to verify attachment + ntnx_vms_info_v2: + ext_id: "{{ vm1_uuid }}" + register: result + +- name: Verify VG attachment from VM status + ansible.builtin.assert: + that: + - result.error == None + - result.changed == false + - result.response.disks[0].backing_info.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.disks[0].disk_address.index == 1 + + fail_msg: "Unable to verify VG attachment from VM side" + success_msg: "VG attachment verified from VM side" + +- name: Attach VM2 to VG + ntnx_volume_groups_vms_v2: + state: present + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ vm2_uuid }}" + register: result + +- name: Verify attachment of VM from VG side + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{ vm2_uuid }}" + - result.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to attach VM to VG" + success_msg: "VM attached to VG successfully" + +- name: Fetch VM2 info to verify attachment + ntnx_vms_info_v2: + ext_id: "{{ vm2_uuid }}" + register: result + +- name: Verify VG attachment from VM status + ansible.builtin.assert: + that: + - result.error == None + - result.changed == false + - result.response.disks[0].backing_info.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.disks[0].disk_address.index == 0 + + fail_msg: "Unable to verify VG attachment from VM side" + success_msg: "VG attachment verified from VM side" + +############################################ Detach VM tests ############################################ + +- name: Detach VM1 from VG + ntnx_volume_groups_vms_v2: + state: absent + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ vm1_uuid }}" + register: result + +- name: Verify detachment of VM from VG side + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{ vm1_uuid }}" + - result.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to detach VM from VG" + success_msg: "VM detached from VG successfully" + +- name: Detach VM2 from VG + ntnx_volume_groups_vms_v2: + state: absent + volume_group_ext_id: "{{ vg1_uuid }}" + ext_id: "{{ vm2_uuid }}" + register: result + +- name: Verify detachment of VM2 from VG side + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{ vm2_uuid }}" + - result.volume_group_ext_id == "{{ vg1_uuid }}" + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to detach VM2 from VG" + success_msg: "VM2 detached from VG successfully" + +############################################ Cleanup tests ############################################ + +- name: Delete VM1 + ntnx_vms_v2: + state: absent + ext_id: "{{ vm1_uuid }}" + register: result + +- name: Verify delete of VM1 + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VM1" + success_msg: "VM1 deleted successfully" + +- name: Delete VM2 + ntnx_vms_v2: + state: absent + ext_id: "{{ vm2_uuid }}" + register: result + +- name: Verify delete of VM1 + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VM1" + success_msg: "VM1 deleted successfully" + +- name: Delete Volume group + ntnx_volume_groups_v2: + state: absent + ext_id: "{{ vg1_uuid }}" + register: result + +- name: Verify delete of VGs + ansible.builtin.assert: + that: + - result.error == None + - result.changed == true + - result.ext_id == "{{ vg1_uuid }}" + - result.task_ext_id is defined + - result.response.status == "SUCCEEDED" + fail_msg: "Unable to delete VG" + success_msg: "VG deleted successfully" diff --git a/tests/integration/targets/ntnx_volume_groups_vms_v2/tasks/main.yml b/tests/integration/targets/ntnx_volume_groups_vms_v2/tasks/main.yml new file mode 100644 index 000000000..629556770 --- /dev/null +++ b/tests/integration/targets/ntnx_volume_groups_vms_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import ahv_vm_vg_connections.yml + ansible.builtin.import_tasks: "ahv_vm_vg_connections.yml" diff --git a/tests/integration/targets/ntnx_vpcs_v2/aliases b/tests/integration/targets/ntnx_vpcs_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_vpcs_v2/meta/main.yml b/tests/integration/targets/ntnx_vpcs_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/ntnx_vpcs_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/ntnx_vpcs_v2/tasks/all_operations.yml b/tests/integration/targets/ntnx_vpcs_v2/tasks/all_operations.yml new file mode 100644 index 000000000..9f1e36068 --- /dev/null +++ b/tests/integration/targets/ntnx_vpcs_v2/tasks/all_operations.yml @@ -0,0 +1,351 @@ +--- +- name: Start testing ntnx_vpcs_v2, ntnx_vpcs_info_v2 + ansible.builtin.debug: + msg: Start testing ntnx_vpcs_v2, ntnx_vpcs_info_v2 + +- name: Generate random vpc name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set suffix name + ansible.builtin.set_fact: + suffix_name: "ansible-ag-vpc" + +- name: Generate random vpc name + ansible.builtin.set_fact: + vpc_name: "{{ random_name }}{{ suffix_name }}" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +########################################################### +- name: Create min VPC with check mode + ntnx_vpcs_v2: + state: present + wait: true + name: "{{ vpc_name }}1" + register: result + check_mode: true + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.name == "{{vpc_name}}1" + fail_msg: " Unable to Create min VPC with check mode" + success_msg: "Create min VPC with check mode finished as expected" +########################################################### +- name: Create min VPC with external_nat_subnet uuid + ntnx_vpcs_v2: + state: present + wait: true + name: "{{vpc_name}}2" + external_subnets: + - subnet_reference: "{{ external_nat_subnet.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.external_subnets.0.subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response.name == "{{vpc_name}}2" + fail_msg: " Unable to create Min vpc with external subnet uuid " + success_msg: " Min vpc with external subnet uuid created successfully " + +- name: Adding VPC external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +########################################################### +- name: Update vpc name + ntnx_vpcs_v2: + state: present + ext_id: "{{ result.ext_id }}" + name: "{{vpc_name}}2_updated" + register: result + ignore_errors: true + +- name: Update status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.name == "{{vpc_name}}2_updated" + fail_msg: " Unable to update vpc name " + success_msg: " VPC name updated successfully " +########################################################### +- name: Delete all created vpcs + ntnx_vpcs_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] +########################################################### +- name: Create VPC with routable_ips + ntnx_vpcs_v2: + state: present + name: "{{vpc_name}}3" + description: "VPC with routable_ips" + externally_routable_prefixes: + - ipv4: + ip: + value: "{{ routable_ips.network_ip }}" + prefix_length: "{{ routable_ips.network_prefix }}" + prefix_length: "{{ routable_ips.network_prefix }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.name == "{{vpc_name}}3" + - result.response.description == "VPC with routable_ips" + - result.response.externally_routable_prefixes.0.ipv4.prefix_length == {{ routable_ips.network_prefix }} + - result.response.externally_routable_prefixes.0.ipv4.ip.value == "{{ routable_ips.network_ip }}" + fail_msg: " Unable to create vpc with routable_ips " + success_msg: " VPC with routable ips created successfully " + +- name: Adding VPC external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +########################################################## +- name: Update vpc with external_nat_subnet + ntnx_vpcs_v2: + state: present + ext_id: "{{ result.ext_id }}" + external_subnets: + - subnet_reference: "{{ external_nat_subnet.uuid }}" + register: result + ignore_errors: true + +- name: Update status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.external_subnets.0.subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response.name == "{{vpc_name}}3" + - result.response.description == "VPC with routable_ips" + - result.response.externally_routable_prefixes.0.ipv4.prefix_length == {{ routable_ips.network_prefix }} + - result.response.externally_routable_prefixes.0.ipv4.ip.value == "{{ routable_ips.network_ip }}" + fail_msg: " Unable to update vpc with external_nat_subnet " + success_msg: " VPC updated with external_nat_subnet successfully " +########################################################## +- name: Check idempotency + ntnx_vpcs_v2: + state: present + ext_id: "{{ result.ext_id }}" + name: "{{vpc_name}}3" + description: "VPC with routable_ips" + externally_routable_prefixes: + - ipv4: + ip: + value: "{{ routable_ips.network_ip }}" + prefix_length: "{{ routable_ips.network_prefix }}" + register: result + ignore_errors: true + +- name: Idempotency Status + ansible.builtin.assert: + that: + - result.failed == false + - result.changed == false + - result.msg == "Nothing to change." + fail_msg: " Unable to check idempotency " + success_msg: " Idempotency check finished successfully " +########################################################### +- name: Create VPC with routable_ips and external subnet + ntnx_vpcs_v2: + state: present + name: "{{vpc_name}}4" + externally_routable_prefixes: + - ipv4: + ip: + value: "{{ routable_ips.network_ip_2 }}" + prefix_length: "{{ routable_ips.network_prefix_2 }}" + external_subnets: + - subnet_reference: "{{ external_nat_subnet.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.ext_id == result.ext_id + - result.response.name == "{{vpc_name}}4" + - result.response.externally_routable_prefixes.0.ipv4.prefix_length == {{ routable_ips.network_prefix_2 }} + - result.response.externally_routable_prefixes.0.ipv4.ip.value == "{{ routable_ips.network_ip_2 }}" + - result.response.external_subnets.0.subnet_reference == "{{ external_nat_subnet.uuid }}" + fail_msg: " Unable to create vpc with routable_ips and external subnet " + success_msg: " VPC with routable ips and external subnet created successfully " + +- name: Adding VPC external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +########################################################## +- name: Create VPC with dns_servers + ntnx_vpcs_v2: + state: present + name: "{{vpc_name}}5" + common_dhcp_options: + domain_name_servers: + - ipv4: + value: "{{ dns_servers.0 }}" + prefix_length: 32 + - ipv4: + value: "{{ dns_servers.1 }}" + prefix_length: 32 + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.name == "{{vpc_name}}5" + - result.response.common_dhcp_options.domain_name_servers.0.ipv4.value == "{{ dns_servers.0 }}" + - result.response.common_dhcp_options.domain_name_servers.1.ipv4.value == "{{ dns_servers.1 }}" + fail_msg: "Unable to create VPC with dns_servers " + success_msg: "Create VPC with dns_servers finished successfully" + +- name: Adding VPC external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +########################################################## +- name: List VPCs + ntnx_vpcs_info_v2: + register: vpcs + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - vpcs.response is defined + - vpcs.response | length > 3 + fail_msg: " Unable to list vpcs " + success_msg: " VPCs listed successfully " +########################################################## +- name: List VPC using name filter criteria + ntnx_vpcs_info_v2: + filter: "name eq '{{ vpcs.response.0.name }}'" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.0.ext_id == vpcs.response.0.ext_id + fail_msg: " Unable to list vpcs " + success_msg: " VPCs listed successfully " +######################################################## +- name: List VPC using limit + ntnx_vpcs_info_v2: + limit: 1 + check_mode: true + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + fail_msg: " Unable to list vpcs " + success_msg: " VPCs listed successfully " +########################################################## +- name: List VPC using ext_id + ntnx_vpcs_info_v2: + ext_id: "{{ vpcs.response.0.ext_id }}" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.name == vpcs.response.0.name + fail_msg: " Unable to list vpcs " + success_msg: " VPCs listed successfully " +########################################################## +- name: Delete all created vpcs + ntnx_vpcs_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] +########################################################### +- name: Create VPC with all specifications + ntnx_vpcs_v2: + state: present + name: "{{vpc_name}}6" + common_dhcp_options: + domain_name_servers: + - ipv4: + value: "{{ dns_servers.0 }}" + prefix_length: 32 + - ipv4: + value: "{{ dns_servers.1 }}" + prefix_length: 32 + externally_routable_prefixes: + - ipv4: + ip: + value: "{{ routable_ips.network_ip}}" + prefix_length: "{{ routable_ips.network_prefix }}" + external_subnets: + - subnet_reference: "{{ external_nat_subnet.uuid }}" + register: result + ignore_errors: true + +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.name == "{{vpc_name}}6" + - result.response.externally_routable_prefixes.0.ipv4.prefix_length == {{ routable_ips.network_prefix }} + - result.response.externally_routable_prefixes.0.ipv4.ip.value == "{{ routable_ips.network_ip }}" + - result.response.external_subnets.0.subnet_reference == "{{ external_nat_subnet.uuid }}" + - result.response.common_dhcp_options.domain_name_servers.0.ipv4.value == "{{ dns_servers.0 }}" + - result.response.common_dhcp_options.domain_name_servers.1.ipv4.value == "{{ dns_servers.1 }}" + fail_msg: " Unable to create vpc all specifications " + success_msg: " VPC with all specifications created successfully " + +- name: Adding VPC external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +########################################################### +- name: Delete all created vpcs + ntnx_vpcs_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - result.changed == true + - result.msg == "All items completed" + fail_msg: " Unable to Delete VPC" + success_msg: "Vpc has been deleted successfully" diff --git a/tests/integration/targets/ntnx_vpcs_v2/tasks/main.yml b/tests/integration/targets/ntnx_vpcs_v2/tasks/main.yml new file mode 100644 index 000000000..722b1337b --- /dev/null +++ b/tests/integration/targets/ntnx_vpcs_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import all_operations.yml + ansible.builtin.import_tasks: "all_operations.yml" diff --git a/tests/integration/targets/ntnx_vpcs_v2/vars/main.yml b/tests/integration/targets/ntnx_vpcs_v2/vars/main.yml new file mode 100644 index 000000000..43a7612b9 --- /dev/null +++ b/tests/integration/targets/ntnx_vpcs_v2/vars/main.yml @@ -0,0 +1,6 @@ +dns_servers: ["8.8.8.8", "8.8.4.4"] +routable_ips: + network_ip: 192.168.2.0 + network_prefix: 24 + network_ip_2: 192.168.8.0 + network_prefix_2: 24 diff --git a/tests/integration/targets/nutanix_floating_ips/tasks/create_floating_ips.yml b/tests/integration/targets/nutanix_floating_ips/tasks/create_floating_ips.yml index 96ce8c30c..6471472d0 100644 --- a/tests/integration/targets/nutanix_floating_ips/tasks/create_floating_ips.yml +++ b/tests/integration/targets/nutanix_floating_ips/tasks/create_floating_ips.yml @@ -2,20 +2,21 @@ - name: Create floating ip with external subnet uuid ntnx_floating_ips: state: present - wait: True + wait: true external_subnet: uuid: "{{ external_nat_subnet.uuid }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create floating ip with external subnet uuid " success_msg: " Floating ip with external subnet uuid created successfully " -- set_fact: +- name: Adding floating ip to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.fip_uuid ] }}" ############################################################## - name: Create floating ip with vpc name and external subnet uuid @@ -24,20 +25,21 @@ external_subnet: uuid: "{{external_nat_subnet.uuid}}" vpc: - name: "{{ vpc.name }}" + name: "{{ vpc.name }}" private_ip: "{{ private_ip }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create floating ip with vpc " success_msg: " Floating ip with vpc name and external subnet uuid created successfully " -- set_fact: +- name: Adding floating ip to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.fip_uuid ] }}" ########################################################## - name: Delete all created floating ips @@ -47,7 +49,8 @@ register: result loop: "{{ todelete }}" -- set_fact: +- name: Removing all floating ips from delete list + ansible.builtin.set_fact: todelete: [] ########################################################### - name: Create floating ip with external subnet name @@ -57,17 +60,18 @@ external_subnet: name: "{{ external_nat_subnet.name }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create floating ip with external subnet name " success_msg: " Floating ip with external subnet name created successfully " -- set_fact: +- name: Adding floating ip to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.fip_uuid ] }}" ############################################################ - name: Create floating ip with vpc uuid and external subnet name @@ -76,20 +80,21 @@ external_subnet: name: "{{ external_nat_subnet.name }}" vpc: - uuid: "{{ vpc.uuid }}" + uuid: "{{ vpc.uuid }}" private_ip: "{{ private_ip }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create floating ip with vpc uuid and external subnet name " success_msg: " Floating ip with vpc uuid and external subnet name created successfully " -- set_fact: +- name: Adding floating ip to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.fip_uuid ] }}" ############################################################ - name: Delete all created floating ips @@ -98,9 +103,10 @@ fip_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Removing all floating ips from delete list + ansible.builtin.set_fact: todelete: [] ######################################################### - name: Create floating ip with external subnet and vm name @@ -110,18 +116,19 @@ name: "{{ external_nat_subnet.name }}" vm: name: "{{ vm.name }}" - ignore_errors: True + ignore_errors: true register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create floating ip with vm" success_msg: " Floating ip with external subnet and vm created successfully " -- set_fact: +- name: Adding floating ip to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.fip_uuid ] }}" ######################################################### - name: Delete all created floating ips @@ -130,9 +137,10 @@ fip_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Removing all floating ips from delete list + ansible.builtin.set_fact: todelete: [] ######################################################### - name: Create floating ip with external subnet and vm uuid @@ -142,18 +150,19 @@ name: "{{ external_nat_subnet.name }}" vm: uuid: "{{ vm.uuid }}" - ignore_errors: True + ignore_errors: true register: result - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create floating ip with vm" success_msg: " Floating ip with external subnet and vm created successfully " -- set_fact: +- name: Adding floating ip to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.fip_uuid ] }}" ########################################################## - name: Delete all created floating ips @@ -162,4 +171,4 @@ fip_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true diff --git a/tests/integration/targets/nutanix_floating_ips/tasks/main.yml b/tests/integration/targets/nutanix_floating_ips/tasks/main.yml index f98895ee0..68b56a406 100644 --- a/tests/integration/targets/nutanix_floating_ips/tasks/main.yml +++ b/tests/integration/targets/nutanix_floating_ips/tasks/main.yml @@ -1,10 +1,13 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_floating_ips.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import create_floating_ips.yml + ansible.builtin.import_tasks: "create_floating_ips.yml" + - name: Import delete_floating_ips.yml + ansible.builtin.import_tasks: "negative_scenarios.yml" diff --git a/tests/integration/targets/nutanix_floating_ips/tasks/negative_scenarios.yml b/tests/integration/targets/nutanix_floating_ips/tasks/negative_scenarios.yml index b9cdbd377..cd8925aad 100644 --- a/tests/integration/targets/nutanix_floating_ips/tasks/negative_scenarios.yml +++ b/tests/integration/targets/nutanix_floating_ips/tasks/negative_scenarios.yml @@ -1,19 +1,20 @@ -- debug: - msg: "Started Negative Creation Cases" +- name: Started Negative Creation Cases + ansible.builtin.debug: + msg: "Started Negative Creation Cases" - name: Create floating ip with unknown external subnet name ntnx_floating_ips: state: present external_subnet: name: "external_subnet" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - - result.failed==True + - result.failed==true - result.msg=="Failed generating floating_ip spec" - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ################################################################# - name: Create floating ip with unknown external subnet uuid ntnx_floating_ips: @@ -22,13 +23,13 @@ external_subnet: uuid: 54e295c3-16d3-49de-96ba-b921948b3a register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.status_code==422 - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ############################################################### - name: Create floating ip with unknown vpc ntnx_floating_ips: @@ -36,17 +37,17 @@ external_subnet: name: "{{ external_nat_subnet.name}}" vpc: - name: vpc + name: vpc private_ip: "{{ private_ip }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - - result.failed==True + - result.failed==true - result.msg=="Failed generating floating_ip spec" - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ############################################################### - name: Create floating ip with unknown vpc uuid ntnx_floating_ips: @@ -54,26 +55,26 @@ external_subnet: name: "{{ external_nat_subnet.name }}" vpc: - uuid: 471181f8-eb2d-4303-9a61-6b25a1b3 + uuid: 471181f8-eb2d-4303-9a61-6b25a1b3 private_ip: "{{ private_ip }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.status_code==422 - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ############################################################## - name: Delete floating ip with unknown uuid ntnx_floating_ips: state: absent fip_uuid: 5 register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.status_code==400 - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " diff --git a/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml b/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml index 43570995d..28c4e9383 100644 --- a/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml +++ b/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml @@ -4,10 +4,10 @@ floating_ip: "10.0.1.2" kind: floating_ip register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list floating_ips " @@ -19,10 +19,10 @@ offset: 0 check_mode: true register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list floating_ips " @@ -33,10 +33,10 @@ sort_order: "ASCENDING" sort_attribute: "floating_ip" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list floating_ips " diff --git a/tests/integration/targets/nutanix_floating_ips_info/tasks/main.yml b/tests/integration/targets/nutanix_floating_ips_info/tasks/main.yml index 0c600deed..6bf5a8b13 100644 --- a/tests/integration/targets/nutanix_floating_ips_info/tasks/main.yml +++ b/tests/integration/targets/nutanix_floating_ips_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "list_floating_ips.yml" + - name: Import list_floating_ips.yml + ansible.builtin.import_tasks: "list_floating_ips.yml" diff --git a/tests/integration/targets/nutanix_pbrs/tasks/create_pbrs.yml b/tests/integration/targets/nutanix_pbrs/tasks/create_pbrs.yml index 0a2f19293..bc037b6f5 100644 --- a/tests/integration/targets/nutanix_pbrs/tasks/create_pbrs.yml +++ b/tests/integration/targets/nutanix_pbrs/tasks/create_pbrs.yml @@ -1,6 +1,6 @@ - name: Create PBR with vpc name, source network, external destination, with reroute action and any type and code icmp ntnx_pbrs: - validate_certs: False + validate_certs: false state: present nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" @@ -13,24 +13,25 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - external: True + external: true action: - allow: True + allow: true protocol: icmp: type: 25 register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create PBR with vpc name with source network and destination external with reroute action and udp port rangelist" success_msg: " PBR with vpc name with source network and destination external with reroute action and udp port rangelist created successfully " -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ################################################################################################################################ - name: Create PBR with vpc name, any source, any destination, any protocol and deny action @@ -40,25 +41,26 @@ vpc: name: "{{ vpc.name }}" source: - any: True + any: true destination: - any: True + any: true action: - deny: True + deny: true protocol: - any: True + any: true register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create PBR with vpc name, any source, any destination, any protocol and deny action " success_msg: " PBR with vpc name, any source, any destination, any protocol and deny action created successfully " -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ############################################################################################### - name: Create PBR with vpc uuid, any source, external destination and allow action with protocol number @@ -68,35 +70,36 @@ vpc: uuid: "{{ vpc.uuid }}" source: - any: True + any: true destination: - external: True + external: true action: - allow: True + allow: true protocol: number: "{{ protocol.number }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create PBR with vpc uuid, any source, external destination and allow action with protocol number" success_msg: " Create PBR with vpc uuid, any source, external destination and allow action with protocol number created successfully " -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ####################################################################################################### -- name: create PBR with vpc name with source external and destination network with reroute action and tcp port rangelist +- name: Create PBR with vpc name with source external and destination network with reroute action and tcp port rangelist ntnx_pbrs: state: present priority: "{{ priority.2 }}" vpc: name: "{{ vpc.name }}" source: - external: True + external: true destination: network: ip: "{{ network.ip }}" @@ -108,49 +111,51 @@ src: "{{ tcp.port }}" dst: "{{ tcp.port_rangelist }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create PBR with vpc name with source external and destination network with reroute action and tcp port rangelist " success_msg: " PBR with vpc name with source external and destination network with reroute action and tcp port rangelist created successfully " -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ########################################################################################################## -- name: create PBR with vpc name with source external and destination network with reroute action and any tcp port rangelist +- name: Create PBR with vpc name with source external and destination network with reroute action and any tcp port rangelist ntnx_pbrs: state: present priority: "{{ priority.3 }}" vpc: name: "{{ vpc.name }}" source: - external: True + external: true destination: network: ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" action: - allow: True + allow: true protocol: tcp: src: "*" dst: "*" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create PBR with vpc name with source external and destination network with reroute action and any tcp ports rangelist " success_msg: " PBR with vpc name with source external and destination network with reroute action and any tcp ports rangelist created successfully " -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ########################################################################################################### - name: Create PBR with vpc name, custom source network, external destination, reroute action and udp port rangelist @@ -164,7 +169,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - any: True + any: true action: reroute: "{{ reroute_ip }}" protocol: @@ -172,17 +177,18 @@ src: "{{ udp.port_rangelist }}" dst: "{{ udp.port }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create PBR with vpc name with source network and destination external with reroute action and udp port rangelist" success_msg: " PBR with vpc name with source network and destination external with reroute action and udp port rangelist created successfully " -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ############################################################################################################## - name: Create PBR with vpc name, custom source network, external destination, reroute action and any udp ports @@ -197,7 +203,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - any: True + any: true action: reroute: "{{ reroute_ip }}" protocol: @@ -205,17 +211,18 @@ src: "*" dst: "*" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create PBR with vpc name with source network and destination external with reroute action and and udp port " success_msg: " PBR with vpc name with source network and destination external with reroute action and any udp ports created successfully " -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ################################################################################################################## - name: Create PBR with vpc name, source network, external destination, with reroute action and icmp @@ -230,7 +237,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - external: True + external: true action: reroute: "{{reroute_ip}}" protocol: @@ -238,9 +245,10 @@ code: "{{ icmp.code }}" type: "{{ icmp.type }}" register: result - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Adding PBR to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.pbr_uuid ] }}" ################################################################################################################## - name: Delete all created pbrs @@ -249,4 +257,4 @@ pbr_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true diff --git a/tests/integration/targets/nutanix_pbrs/tasks/main.yml b/tests/integration/targets/nutanix_pbrs/tasks/main.yml index a7c9b0026..87cb815fc 100644 --- a/tests/integration/targets/nutanix_pbrs/tasks/main.yml +++ b/tests/integration/targets/nutanix_pbrs/tasks/main.yml @@ -1,10 +1,13 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_pbrs.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import create_pbrs.yml + ansible.builtin.import_tasks: "create_pbrs.yml" + - name: Import delete_pbrs.yml + ansible.builtin.import_tasks: "negative_scenarios.yml" diff --git a/tests/integration/targets/nutanix_pbrs/tasks/negative_scenarios.yml b/tests/integration/targets/nutanix_pbrs/tasks/negative_scenarios.yml index 67c4312c3..b4e97dca2 100644 --- a/tests/integration/targets/nutanix_pbrs/tasks/negative_scenarios.yml +++ b/tests/integration/targets/nutanix_pbrs/tasks/negative_scenarios.yml @@ -1,5 +1,6 @@ -- debug: - msg: "Started Negative Creation Cases" +- name: Started Negative Creation Cases + ansible.builtin.debug: + msg: "Started Negative Creation Cases" - name: Create PBR with unknown vpc name ntnx_pbrs: @@ -12,7 +13,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - external: True + external: true action: reroute: "{{reroute_ip}}" protocol: @@ -20,13 +21,13 @@ code: "{{ icmp.code }}" type: "{{ icmp.type }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.failed is defined - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ################################################################## - name: Create PBR with priority less than 10 ntnx_pbrs: @@ -39,7 +40,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - external: True + external: true action: reroute: "{{reroute_ip}}" protocol: @@ -47,13 +48,13 @@ code: "{{ icmp.code }}" type: "{{ icmp.type }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.failed is defined - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ################################################################### - name: Create PBR with invalid network ip ntnx_pbrs: @@ -66,7 +67,7 @@ ip: 192.168.0.5 prefix: 24 destination: - external: True + external: true action: reroute: "{{reroute_ip}}" protocol: @@ -74,13 +75,13 @@ code: "{{ icmp.code }}" type: "{{ icmp.type }}" register: result - ignore_errors: True + ignore_errors: true - name: Check failure status - assert: + ansible.builtin.assert: that: - result.failed == true - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ################################################################# - name: Create PBR with unknown vpc uuid @@ -94,7 +95,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - external: True + external: true action: reroute: "{{reroute_ip}}" protocol: @@ -102,13 +103,13 @@ code: "{{ icmp.code }}" type: "{{ icmp.type }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.failed is defined - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ################################################################# - name: Create PBR with invalid type and code values for icmp ntnx_pbrs: @@ -121,7 +122,7 @@ ip: "{{ network.ip }}" prefix: "{{ network.prefix }}" destination: - external: True + external: true action: reroute: "{{reroute_ip}}" protocol: @@ -129,22 +130,22 @@ type: 10 code: 10 register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.failed is defined - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " ##################################################################### - name: Delete pbrs with unknown uuid ntnx_pbrs: state: absent pbr_uuid: 5 register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.failed is defined - success_msg: ' Success: returned error as expected ' + success_msg: " Success: returned error as expected " diff --git a/tests/integration/targets/nutanix_pbrs_info/tasks/list_pbrs.yml b/tests/integration/targets/nutanix_pbrs_info/tasks/list_pbrs.yml index e3948dc4c..1b96d3b97 100644 --- a/tests/integration/targets/nutanix_pbrs_info/tasks/list_pbrs.yml +++ b/tests/integration/targets/nutanix_pbrs_info/tasks/list_pbrs.yml @@ -4,10 +4,10 @@ priority: "10" check_mode: true register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list pbrs " @@ -18,10 +18,10 @@ length: 1 offset: 0 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list pbrs " @@ -32,10 +32,10 @@ sort_order: "ASCENDING" sort_attribute: "priority" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list pbrs " diff --git a/tests/integration/targets/nutanix_pbrs_info/tasks/main.yml b/tests/integration/targets/nutanix_pbrs_info/tasks/main.yml index d89e9da7d..08357ac64 100644 --- a/tests/integration/targets/nutanix_pbrs_info/tasks/main.yml +++ b/tests/integration/targets/nutanix_pbrs_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "list_pbrs.yml" + - name: Import list_pbrs.yml + ansible.builtin.import_tasks: "list_pbrs.yml" diff --git a/tests/integration/targets/nutanix_subnets/tasks/create_subnet.yml b/tests/integration/targets/nutanix_subnets/tasks/create_subnet.yml index d5547b424..5932b5fd0 100644 --- a/tests/integration/targets/nutanix_subnets/tasks/create_subnet.yml +++ b/tests/integration/targets/nutanix_subnets/tasks/create_subnet.yml @@ -13,14 +13,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create VLAN subnet without IPAM " success_msg: " VLAN subnet without IPAM created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: VLAN subnet with IPAM @@ -42,14 +43,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create VLAN subnet with IPAM " success_msg: " VLAN subnet with IPAM created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: VLAN subnet with IPAM and IP pools @@ -73,14 +75,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create VLAN subnet with IPAM and IP pools" success_msg: " VLAN subnet with IPAM and IP pools created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" - name: Delete all Created Subnets @@ -89,9 +92,10 @@ subnet_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Remove all subnets from delete list + ansible.builtin.set_fact: todelete: [] ################################################################# - name: VLAN subnet with IPAM and IP pools and cluster uuid @@ -115,14 +119,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create VLAN subnet with IPAM and IP pools and cluster uuid " success_msg: " VLAN subnet with IPAM and IP pools and cluster uuid created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" - name: Delete all Created Subnets @@ -131,9 +136,10 @@ subnet_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Remove all subnets from delete list + ansible.builtin.set_fact: todelete: [] ################################################################# - name: VLAN subnet with IPAM IP pools and DHCP @@ -164,14 +170,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create VLAN subnet with IPAM, IP pools and DHCP " success_msg: " VLAN subnet with IPAM, IP pools and DHCP created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: VLAN subnet with IPAM multiple IP pools and DHCP @@ -204,14 +211,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create VLAN subnet with IPAM, multiple IP pools and DHCP " success_msg: " VLAN subnet with IPAM, multiple IP pools and DHCP created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: External subnet without NAT @@ -236,14 +244,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create External subnet without NAT " success_msg: " External subnet without NAT created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: External subnet with NAT @@ -252,7 +261,7 @@ name: " External subnet with NAT " external_subnet: vlan_id: "{{ external_nat_subnet.vlan_id }}" - enable_nat: True + enable_nat: true cluster: name: "{{ cluster.name }}" ipam: @@ -268,14 +277,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create External subnet with NAT " success_msg: " External subnet with NAT created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: Overlay Subnet with minimum requirements @@ -293,14 +303,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create Overlay Subnet with minimum requirements " success_msg: " Overlay Subnet with minimum requirements created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: Delete all Created Subnets @@ -309,9 +320,10 @@ subnet_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Remove all subnets from delete list + ansible.builtin.set_fact: todelete: [] ################################################################# - name: Overlay Subnet with IP_pools and DHCP @@ -338,13 +350,14 @@ register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create Overlay Subnet with IP_pools and DHCP " success_msg: " Overlay Subnet with IP_pools and DHCP created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" ################################################################# - name: Delete all Created Subnets @@ -353,9 +366,10 @@ subnet_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Remove all subnets from delete list + ansible.builtin.set_fact: todelete: [] ################################################################# - name: Overlay Subnet with IP_pools and DHCP with vpc uuid @@ -381,13 +395,14 @@ register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create Overlay Subnet with IP_pools and DHCP with vpc uuid " success_msg: " Overlay Subnet with IP_pools and DHCP with vpc uuid created successfully " -- set_fact: +- name: Adding subnet uuid to delete subnets list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.subnet_uuid ] }}" - name: Delete all Created Subnets @@ -396,4 +411,4 @@ subnet_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true diff --git a/tests/integration/targets/nutanix_subnets/tasks/delete_subnet.yml b/tests/integration/targets/nutanix_subnets/tasks/delete_subnet.yml index 2f1a9e7cb..d90486241 100644 --- a/tests/integration/targets/nutanix_subnets/tasks/delete_subnet.yml +++ b/tests/integration/targets/nutanix_subnets/tasks/delete_subnet.yml @@ -23,7 +23,7 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' diff --git a/tests/integration/targets/nutanix_subnets/tasks/main.yml b/tests/integration/targets/nutanix_subnets/tasks/main.yml index 74ba8ce03..477e7efdf 100644 --- a/tests/integration/targets/nutanix_subnets/tasks/main.yml +++ b/tests/integration/targets/nutanix_subnets/tasks/main.yml @@ -1,11 +1,15 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_subnet.yml" - - import_tasks: "delete_subnet.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import create_subnet.yml + ansible.builtin.import_tasks: "create_subnet.yml" + - name: Import delete_subnet.yml + ansible.builtin.import_tasks: "delete_subnet.yml" + - name: Import negative_scenarios.yml + ansible.builtin.import_tasks: "negative_scenarios.yml" diff --git a/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml b/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml index ecb702975..aef70e5fb 100644 --- a/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml +++ b/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml @@ -1,84 +1,86 @@ - - debug: - msg: "Started Negative Creation Cases" +--- +- name: Started Negative Creation Cases + ansible.builtin.debug: + msg: "Started Negative Creation Cases" - - name: Unknow virtual switch name - ntnx_subnets: - state: present - name: VLAN subnet without IPAM - vlan_subnet: - vlan_id: "{{ vlan_subnets_ids.0 }}" - virtual_switch: - name: "virtual_switch" - cluster: - uuid: "{{ cluster.uuid }}" - register: result - ignore_errors: True +- name: Unknow virtual switch name + ntnx_subnets: + state: present + name: VLAN subnet without IPAM + vlan_subnet: + vlan_id: "{{ vlan_subnets_ids.0 }}" + virtual_switch: + name: "virtual_switch" + cluster: + uuid: "{{ cluster.uuid }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - - result.msg=="Failed generating subnet spec" - success_msg: ' Success: returned error as expected ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + - result.msg=="Failed generating subnet spec" + success_msg: " Success: returned error as expected " ############################################################### - - name: Unknow virtual switch uuid - ntnx_subnets: - state: present - name: VLAN subnet with IPAM - vlan_subnet: - vlan_id: "{{ vlan_subnets_ids.1 }}" - virtual_switch: - uuid: 91639374-c0b9-48c3-bfc1-f9c89343b3e - cluster: - name: "{{ cluster.name }}" - ipam: - network_ip: "{{ ip_address_management.network_ip }}" - network_prefix: "{{ ip_address_management.network_prefix }}" - gateway_ip: "{{ ip_address_management.gateway_ip_address }}" - register: result - ignore_errors: true +- name: Unknow virtual switch uuid + ntnx_subnets: + state: present + name: VLAN subnet with IPAM + vlan_subnet: + vlan_id: "{{ vlan_subnets_ids.1 }}" + virtual_switch: + uuid: 91639374-c0b9-48c3-bfc1-f9c89343b3e + cluster: + name: "{{ cluster.name }}" + ipam: + network_ip: "{{ ip_address_management.network_ip }}" + network_prefix: "{{ ip_address_management.network_prefix }}" + gateway_ip: "{{ ip_address_management.gateway_ip_address }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + success_msg: " Success: returned error as expected " ############################################################### - - name: Unknown Cluster - ntnx_subnets: - state: present - name: VLAN subnet with IPAM and IP pools - vlan_subnet: - vlan_id: "{{vlan_subnets_ids.2}}" - virtual_switch: - name: "{{ virtual_switch.name }}" - cluster: - name: auto_cluster_prod_1a642ea0a5c - ipam: - network_ip: "{{ ip_address_management.network_ip }}" - network_prefix: "{{ ip_address_management.network_prefix }}" - gateway_ip: "{{ ip_address_management.gateway_ip_address }}" - ip_pools: - - start_ip: "{{ ip_address_pools.start_address }}" - end_ip: "{{ ip_address_pools.end_address }}" - register: result - ignore_errors: true +- name: Unknown Cluster + ntnx_subnets: + state: present + name: VLAN subnet with IPAM and IP pools + vlan_subnet: + vlan_id: "{{vlan_subnets_ids.2}}" + virtual_switch: + name: "{{ virtual_switch.name }}" + cluster: + name: auto_cluster_prod_1a642ea0a5c + ipam: + network_ip: "{{ ip_address_management.network_ip }}" + network_prefix: "{{ ip_address_management.network_prefix }}" + gateway_ip: "{{ ip_address_management.gateway_ip_address }}" + ip_pools: + - start_ip: "{{ ip_address_pools.start_address }}" + end_ip: "{{ ip_address_pools.end_address }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + success_msg: " Success: returned error as expected " ############################################################### - - name: Delete subnet with unknown uuid - ntnx_subnets: - state: absent - subnet_uuid: 5 - register: resultt - ignore_errors: true +- name: Delete subnet with unknown uuid + ntnx_subnets: + state: absent + subnet_uuid: 5 + register: resultt + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + success_msg: " Success: returned error as expected " diff --git a/tests/integration/targets/nutanix_subnets_info/tasks/list_subnets.yml b/tests/integration/targets/nutanix_subnets_info/tasks/list_subnets.yml index 8ab34fbef..269704084 100644 --- a/tests/integration/targets/nutanix_subnets_info/tasks/list_subnets.yml +++ b/tests/integration/targets/nutanix_subnets_info/tasks/list_subnets.yml @@ -3,10 +3,10 @@ filter: subnet_type: "VLAN" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list subnets " @@ -18,10 +18,10 @@ offset: 2 check_mode: true register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list subnets " @@ -33,10 +33,10 @@ sort_attribute: "vlan_id" kind: subnet register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list subnets " @@ -47,10 +47,10 @@ custom_filter: vswitch_name: br0 register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.entities[0].status.resources.vswitch_name == "br0" diff --git a/tests/integration/targets/nutanix_subnets_info/tasks/main.yml b/tests/integration/targets/nutanix_subnets_info/tasks/main.yml index d676a3281..c29ae7a13 100644 --- a/tests/integration/targets/nutanix_subnets_info/tasks/main.yml +++ b/tests/integration/targets/nutanix_subnets_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "list_subnets.yml" + - name: Import list_subnets.yml + ansible.builtin.import_tasks: "list_subnets.yml" diff --git a/tests/integration/targets/nutanix_subnets_v2/aliases b/tests/integration/targets/nutanix_subnets_v2/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/nutanix_subnets_v2/meta/main.yml b/tests/integration/targets/nutanix_subnets_v2/meta/main.yml new file mode 100644 index 000000000..e4f447d3a --- /dev/null +++ b/tests/integration/targets/nutanix_subnets_v2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_env diff --git a/tests/integration/targets/nutanix_subnets_v2/tasks/main.yml b/tests/integration/targets/nutanix_subnets_v2/tasks/main.yml new file mode 100644 index 000000000..bfd5ba5b1 --- /dev/null +++ b/tests/integration/targets/nutanix_subnets_v2/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Set module defaults + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + block: + - name: Import subnet_operations.yml + ansible.builtin.import_tasks: "subnet_operations.yml" diff --git a/tests/integration/targets/nutanix_subnets_v2/tasks/subnet_operations.yml b/tests/integration/targets/nutanix_subnets_v2/tasks/subnet_operations.yml new file mode 100644 index 000000000..c9f27f8c1 --- /dev/null +++ b/tests/integration/targets/nutanix_subnets_v2/tasks/subnet_operations.yml @@ -0,0 +1,948 @@ +--- +- name: Start nutanix_subnets_v2 tests + ansible.builtin.debug: + msg: "Start nutanix_subnets_v2 tests" + +- name: Generate random name + ansible.builtin.set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- name: Set Subnet and VPC name + ansible.builtin.set_fact: + subnet_name: "{{ random_name }}_subnet_test" + vpc_name: "{{ random_name }}_vpc_test" + +- name: Set todelete list + ansible.builtin.set_fact: + todelete: [] + +- name: Create VLAN subnet with check mode enabled + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_1" + subnet_type: VLAN + cluster_reference: "{{ cluster.uuid }}" + virtual_switch_reference: "{{ virtual_switch.uuid }}" + network_id: "{{ vlan_subnets_ids.0 }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + pool_list: + - start_ip: + value: "{{ ip_address_pools.start_address }}" + end_ip: + value: "{{ ip_address_pools.end_address }}" + dhcp_options: + domain_name_servers: + - ipv4: + value: "{{ dhcp_settings.dns_servers.0 }}" + - ipv4: + value: "{{ dhcp_settings.dns_servers.1 }}" + search_domains: "{{ dhcp_settings.domain_search }}" + domain_name: "{{ dhcp_settings.domain_name }}" + tftp_server_name: + - "{{ dhcp_settings.tftp_server_name }}" + boot_file_name: "{{ dhcp_settings.boot_file }}" + is_external: true + is_nat_enabled: true + network_function_chain_reference: "00061663-9fa0-28ca-185b-ac1f6b6f97e1" + is_advanced_networking: true + metadata: + project_reference_id: "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + owner_reference_id: "00061663-9fa0-28ca-185b-ac1f6b6f97e3" + category_ids: + - "00061663-9fa0-28ca-185b-ac1f6b6f97e4" + register: result + ignore_errors: true + check_mode: true + +- name: Create VLAN subnet with check mode status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == false + - result.response.subnet_type == "VLAN" + - result.response.name == "{{ subnet_name }}_1" + - result.response.virtual_switch_reference == "{{ virtual_switch.uuid }}" + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.network_id == {{ vlan_subnets_ids.0 }} + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.response.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.response.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + - result.response.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.response.dhcp_options.boot_file_name == "{{ dhcp_settings.boot_file }}" + - result.response.dhcp_options.domain_name == "{{ dhcp_settings.domain_name }}" + - result.response.dhcp_options.search_domains == {{ dhcp_settings.domain_search }} + - result.response.dhcp_options.domain_name_servers.0.ipv4.value == "{{ dhcp_settings.dns_servers.0 }}" + - result.response.dhcp_options.domain_name_servers.1.ipv4.value == "{{ dhcp_settings.dns_servers.1 }}" + - result.response.is_external == true + - result.response.is_nat_enabled == true + - result.response.metadata.project_reference_id == "00061663-9fa0-28ca-185b-ac1f6b6f97e2" + - result.response.metadata.owner_reference_id == "00061663-9fa0-28ca-185b-ac1f6b6f97e3" + - result.response.metadata.category_ids.0 == "00061663-9fa0-28ca-185b-ac1f6b6f97e4" + - result.response.network_function_chain_reference == "00061663-9fa0-28ca-185b-ac1f6b6f97e1" + - result.response.is_advanced_networking == true + fail_msg: " Unable to create VLAN subnet with check mode enabled " + success_msg: " VLAN subnet with check mode enabled created successfully " + +################################################## +- name: VLAN subnet without IPAM + ntnx_subnets_v2: + state: present + wait: true + name: "{{ subnet_name }}_1" + subnet_type: VLAN + cluster_reference: "{{ cluster.uuid }}" + virtual_switch_reference: "{{ virtual_switch.uuid }}" + network_id: "{{ vlan_subnets_ids.0 }}" + register: result + ignore_errors: true + +- name: Check creating subnet status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_1" + - result.response.subnet_type == "VLAN" + - result.response.virtual_switch_reference == "{{ virtual_switch.uuid }}" + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.network_id == {{ vlan_subnets_ids.0 }} + fail_msg: " Unable to create VLAN subnet without IPAM " + success_msg: " VLAN subnet without IPAM created successfully " + +- name: Fetch subnet after creation for verification + ntnx_subnets_info_v2: + filter: "name eq '{{ subnet_name }}_1'" + register: result + ignore_errors: true + +- name: Set result variable for verification + ansible.builtin.set_fact: + result: "{{ result.response[0] }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result is defined + - result.subnet_type == "VLAN" + - result.name == "{{ subnet_name }}_1" + - result.virtual_switch_reference == "{{ virtual_switch.uuid }}" + - result.cluster_reference == "{{ cluster.uuid }}" + - result.network_id == {{ vlan_subnets_ids.0 }} + fail_msg: " Unable to verify created subnet " + success_msg: " VLAN subnet without IPAM created and verified successfully " + +- name: Adding subnet external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +################################################################ +- name: VLAN subnet with IPAM and IP pools + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_2" + subnet_type: VLAN + cluster_reference: "{{ cluster.uuid }}" + virtual_switch_reference: "{{ virtual_switch.uuid }}" + network_id: "{{ vlan_subnets_ids.2 }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + dhcp_server_address: + value: "{{ ip_address_management.dhcp_server_address }}" + pool_list: + - start_ip: + value: "{{ ip_address_pools.start_address }}" + end_ip: + value: "{{ ip_address_pools.end_address }}" + register: result + ignore_errors: true + +- name: Check creating subnet status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_2" + - result.response.subnet_type == "VLAN" + - result.response.virtual_switch_reference == "{{ virtual_switch.uuid }}" + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.network_id == {{ vlan_subnets_ids.2 }} + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.response.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.response.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + - result.response.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.response.ip_config.0.ipv4.dhcp_server_address.value == "{{ ip_address_management.dhcp_server_address }}" + fail_msg: " Unable to create VLAN subnet with IPAM " + success_msg: " VLAN subnet with IPAM created successfully " + +- name: Fetch subnet after creation for verification + ntnx_subnets_info_v2: + filter: "name eq '{{ subnet_name }}_2'" + register: result + ignore_errors: true + +- name: Set result variable for verification + ansible.builtin.set_fact: + result: "{{ result.response[0] }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result is defined + - result.subnet_type == "VLAN" + - result.name == "{{ subnet_name }}_2" + - result.virtual_switch_reference == "{{ virtual_switch.uuid }}" + - result.cluster_reference == "{{ cluster.uuid }}" + - result.network_id == {{ vlan_subnets_ids.2 }} + - result.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + - result.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.ip_config.0.ipv4.dhcp_server_address.value == "{{ ip_address_management.dhcp_server_address }}" + fail_msg: " Unable to verify created subnet " + success_msg: " VLAN subnet with IPAM and IP pools created successfully " + +- name: Set external ID + ansible.builtin.set_fact: + ext_id: "{{ result.ext_id }}" + +- name: Adding subnet external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ ext_id ] }}" + +################################################################ + +- name: Update VLAN subnet with check mode enabled + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_2_updated_check_mode" + ext_id: "{{ ext_id }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + pool_list: + - start_ip: + value: "{{ ip_address_pools.start_address }}" + end_ip: + value: "{{ ip_address_pools.end_address }}" + - start_ip: + value: "{{ ip_address_pools.start_address_2 }}" + end_ip: + value: "{{ ip_address_pools.end_address_2 }}" + register: result + ignore_errors: true + check_mode: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.ext_id is defined + - result.response.ext_id == ext_id + - result.changed == false + - result.response.name == "{{ subnet_name }}_2_updated_check_mode" + - result.response.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.response.ip_config.0.ipv4.pool_list.1.start_ip.value == "{{ ip_address_pools.start_address_2 }}" + - result.response.ip_config.0.ipv4.pool_list.1.end_ip.value == "{{ ip_address_pools.end_address_2 }}" + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + fail_msg: " Unable to update VLAN subnet name and IP pools " + success_msg: " VLAN subnet name and IP pools updated successfully " + +################################################################ + +- name: Update VLAN subnet name and IP pools + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_2_updated" + ext_id: "{{ ext_id }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + dhcp_server_address: + value: "{{ ip_address_management.dhcp_server_address }}" + pool_list: + - start_ip: + value: "{{ ip_address_pools.start_address }}" + end_ip: + value: "{{ ip_address_pools.end_address }}" + - start_ip: + value: "{{ ip_address_pools.start_address_2 }}" + end_ip: + value: "{{ ip_address_pools.end_address_2 }}" + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.ext_id is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_2_updated" + - result.response.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.response.ip_config.0.ipv4.pool_list.1.start_ip.value == "{{ ip_address_pools.start_address_2 }}" + - result.response.ip_config.0.ipv4.pool_list.1.end_ip.value == "{{ ip_address_pools.end_address_2 }}" + - result.response.ip_config.0.ipv4.dhcp_server_address.value == "{{ ip_address_management.dhcp_server_address }}" + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + fail_msg: " Unable to update VLAN subnet name and IP pools " + success_msg: " VLAN subnet name and IP pools updated successfully " + +################################################################ +- name: Check idempotency by updating Subnet with same values + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_2_updated" + ext_id: "{{ result.response.ext_id }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + dhcp_server_address: + value: "{{ ip_address_management.dhcp_server_address }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + pool_list: + - start_ip: + value: "{{ ip_address_pools.start_address }}" + end_ip: + value: "{{ ip_address_pools.end_address }}" + - start_ip: + value: "{{ ip_address_pools.start_address_2 }}" + end_ip: + value: "{{ ip_address_pools.end_address_2 }}" + register: result + ignore_errors: true + +- name: Update Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.skipped == true + - result.msg == "Nothing to change." + fail_msg: " Unable to update VLAN subnet name and IP pools " + success_msg: " VLAN subnet name and IP pools updated successfully " + +################################################################ + +- name: Delete all Created Subnets + ntnx_subnets_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete Subnet " + success_msg: "Subnet is deleted successfully " + loop: "{{ result.results }}" + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] + +################################################################ +- name: VLAN subnet with IPAM and DHCP options enabled + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_3" + subnet_type: VLAN + cluster_reference: "{{ cluster.uuid }}" + virtual_switch_reference: "{{ virtual_switch.uuid }}" + network_id: "{{ vlan_subnets_ids.4 }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + dhcp_server_address: + value: "{{ ip_address_management.dhcp_server_address }}" + pool_list: + - start_ip: + value: "{{ ip_address_pools.start_address }}" + end_ip: + value: "{{ ip_address_pools.end_address }}" + dhcp_options: + domain_name_servers: + - ipv4: + value: "{{ dhcp_settings.dns_servers.0 }}" + - ipv4: + value: "{{ dhcp_settings.dns_servers.1 }}" + search_domains: "{{ dhcp_settings.domain_search }}" + domain_name: "{{ dhcp_settings.domain_name }}" + tftp_server_name: + - "{{ dhcp_settings.tftp_server_name }}" + boot_file_name: "{{ dhcp_settings.boot_file }}" + register: result + ignore_errors: true + +- name: Check creating subnet status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_3" + - result.response.subnet_type == "VLAN" + - result.response.virtual_switch_reference == "{{ virtual_switch.uuid }}" + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.network_id == {{ vlan_subnets_ids.4 }} + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.response.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.response.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + - result.response.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.response.ip_config.0.ipv4.dhcp_server_address.value == "{{ ip_address_management.dhcp_server_address }}" + - result.response.dhcp_options.boot_file_name == "{{ dhcp_settings.boot_file }}" + - result.response.dhcp_options.domain_name == "{{ dhcp_settings.domain_name }}" + - result.response.dhcp_options.search_domains == {{ dhcp_settings.domain_search }} + - result.response.dhcp_options.domain_name_servers.0.ipv4.value == "{{ dhcp_settings.dns_servers.0 }}" + - result.response.dhcp_options.domain_name_servers.1.ipv4.value == "{{ dhcp_settings.dns_servers.1 }}" + fail_msg: " Unable to create VLAN subnet without IPAM and DHCP options" + success_msg: " VLAN subnet with IPAM and DHCP options created successfully " + +- name: Fetch subnet after creation for verification + ntnx_subnets_info_v2: + filter: "name eq '{{ subnet_name }}_3'" + register: result + ignore_errors: true + +- name: Set result variable for verification + ansible.builtin.set_fact: + result: "{{ result.response[0] }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result is defined + - result.subnet_type == "VLAN" + - result.name == "{{ subnet_name }}_3" + - result.virtual_switch_reference == "{{ virtual_switch.uuid }}" + - result.cluster_reference == "{{ cluster.uuid }}" + - result.network_id == {{ vlan_subnets_ids.4 }} + - result.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + - result.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.ip_config.0.ipv4.dhcp_server_address.value == "{{ ip_address_management.dhcp_server_address }}" + - result.dhcp_options.boot_file_name == "{{ dhcp_settings.boot_file }}" + - result.dhcp_options.domain_name == "{{ dhcp_settings.domain_name }}" + - result.dhcp_options.search_domains == {{ dhcp_settings.domain_search }} + - result.dhcp_options.domain_name_servers.0.ipv4.value == "{{ dhcp_settings.dns_servers.0 }}" + - result.dhcp_options.domain_name_servers.1.ipv4.value == "{{ dhcp_settings.dns_servers.1 }}" + fail_msg: " Unable to verify creation of VLAN subnet with IPAM, IP pools and DHCP " + success_msg: " VLAN subnet with IPAM, IP pools and DHCP created and verified successfully " + +- name: Adding subnet external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +################################################################ +- name: External subnet without NAT + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_4" + subnet_type: VLAN + cluster_reference: "{{ cluster.uuid }}" + network_id: "{{external_nonat_subnet.vlan_id}}" + is_external: true + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ external_nonat_subnet.network_ip }}" + prefix_length: "{{ external_nonat_subnet.network_prefix }}" + default_gateway_ip: + value: "{{ external_nonat_subnet.gateway_ip_address }}" + pool_list: + - start_ip: + value: "{{ external_nonat_subnet.dhcp.start_address }}" + end_ip: + value: "{{ external_nonat_subnet.dhcp.end_address }}" + - start_ip: + value: "{{ external_nonat_subnet.static.start_address }}" + end_ip: + value: "{{ external_nonat_subnet.static.end_address }}" + register: result + ignore_errors: true + +- name: Check creating subnet status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_4" + - result.response.subnet_type == "VLAN" + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.network_id == {{external_nonat_subnet.vlan_id}} + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ external_nonat_subnet.gateway_ip_address }}" + - result.response.ip_config.0.ipv4.ip_subnet.prefix_length == {{ external_nonat_subnet.network_prefix }} + - result.response.ip_config.0.ipv4.ip_subnet.ip.value == "{{ external_nonat_subnet.network_ip }}" + - result.response.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ external_nonat_subnet.dhcp.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ external_nonat_subnet.dhcp.end_address }}" + - result.response.ip_config.0.ipv4.pool_list.1.start_ip.value == "{{ external_nonat_subnet.static.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.1.end_ip.value == "{{ external_nonat_subnet.static.end_address }}" + fail_msg: " Unable to create External subnet without NAT " + success_msg: " External subnet without NAT created successfully " + +- name: Fetch subnet after creation for verification + ntnx_subnets_info_v2: + filter: "name eq '{{ subnet_name }}_4'" + register: result + ignore_errors: true + +- name: Set result variable for verification + ansible.builtin.set_fact: + result: "{{ result.response[0] }}" + +- name: Verify creation status Status + ansible.builtin.assert: + that: + - result is defined + - result.subnet_type == "VLAN" + - result.name == "{{ subnet_name }}_4" + - result.cluster_reference == "{{ cluster.uuid }}" + - result.network_id == {{external_nonat_subnet.vlan_id}} + - result.ip_config.0.ipv4.default_gateway_ip.value == "{{ external_nonat_subnet.gateway_ip_address }}" + - result.ip_config.0.ipv4.ip_subnet.prefix_length == {{ external_nonat_subnet.network_prefix }} + - result.ip_config.0.ipv4.ip_subnet.ip.value == "{{ external_nonat_subnet.network_ip }}" + - result.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ external_nonat_subnet.dhcp.start_address }}" + - result.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ external_nonat_subnet.dhcp.end_address }}" + - result.ip_config.0.ipv4.pool_list.1.start_ip.value == "{{ external_nonat_subnet.static.start_address }}" + - result.ip_config.0.ipv4.pool_list.1.end_ip.value == "{{ external_nonat_subnet.static.end_address }}" + fail_msg: " Unable to verify External subnet without NAT " + success_msg: " External subnet without NAT created and verified successfully " + +- name: Adding subnet external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" + +################################################################# +- name: External subnet with NAT + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_5" + subnet_type: VLAN + cluster_reference: "{{ cluster.uuid }}" + network_id: "{{ external_nat_subnet.vlan_id }}" + is_external: true + is_nat_enabled: true + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ external_nat_subnet.network_ip }}" + prefix_length: "{{ external_nat_subnet.network_prefix }}" + default_gateway_ip: + value: "{{ external_nat_subnet.gateway_ip_address }}" + pool_list: + - start_ip: + value: "{{ external_nat_subnet.dhcp.start_address }}" + end_ip: + value: "{{ external_nat_subnet.dhcp.end_address }}" + - start_ip: + value: "{{ external_nat_subnet.static.start_address }}" + end_ip: + value: "{{ external_nat_subnet.static.end_address }}" + register: result + ignore_errors: true + +- name: Check creating subnet status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_5" + - result.response.subnet_type == "VLAN" + - result.response.cluster_reference == "{{ cluster.uuid }}" + - result.response.network_id == {{ external_nat_subnet.vlan_id }} + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ external_nat_subnet.gateway_ip_address }}" + - result.response.ip_config.0.ipv4.ip_subnet.prefix_length == {{ external_nat_subnet.network_prefix }} + - result.response.ip_config.0.ipv4.ip_subnet.ip.value == "{{ external_nat_subnet.network_ip }}" + - result.response.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ external_nat_subnet.dhcp.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ external_nat_subnet.dhcp.end_address }}" + - result.response.ip_config.0.ipv4.pool_list.1.start_ip.value == "{{ external_nat_subnet.static.start_address }}" + - result.response.ip_config.0.ipv4.pool_list.1.end_ip.value == "{{ external_nat_subnet.static.end_address }}" + fail_msg: " Unable to create External subnet without NAT " + success_msg: " External subnet without NAT created successfully " + +- name: Fetch subnet after creation for verification + ntnx_subnets_info_v2: + filter: "name eq '{{ subnet_name }}_5'" + register: result + ignore_errors: true + +- name: Set result variable for verification + ansible.builtin.set_fact: + result: "{{ result.response[0] }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result is defined + - result.ext_id is defined + - result.subnet_type == "VLAN" + - result.is_external == true + - result.name == "{{ subnet_name }}_5" + - result.cluster_reference == "{{ cluster.uuid }}" + - result.network_id == {{ external_nat_subnet.vlan_id }} + - result.ip_config.0.ipv4.default_gateway_ip.value == "{{ external_nat_subnet.gateway_ip_address }}" + - result.ip_config.0.ipv4.ip_subnet.prefix_length == {{ external_nat_subnet.network_prefix }} + - result.ip_config.0.ipv4.ip_subnet.ip.value == "{{ external_nat_subnet.network_ip }}" + - result.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ external_nat_subnet.dhcp.start_address }}" + - result.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ external_nat_subnet.dhcp.end_address }}" + - result.ip_config.0.ipv4.pool_list.1.start_ip.value == "{{ external_nat_subnet.static.start_address }}" + - result.ip_config.0.ipv4.pool_list.1.end_ip.value == "{{ external_nat_subnet.static.end_address }}" + - result.is_external == true + - result.is_nat_enabled == true + fail_msg: " Unable to verify creation of External subnet without NAT " + success_msg: " External subnet without NAT created and verified successfully " + +- name: Adding subnet external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +################################################################ + +- name: Create min VPC + ntnx_vpcs_v2: + state: present + name: "{{ vpc_name }}" + register: result + ignore_errors: true + +- name: Create min VPC Status + ansible.builtin.assert: + that: + - result.response is defined + - result.ext_id is defined + - result.response.ext_id is defined + - result.response.vpc_type == "REGULAR" + - result.response.name == "{{ vpc_name }}" + - result.task_ext_id is defined + fail_msg: "Create min VPC failed " + success_msg: "Create min VPC passed " + +- name: Set VPC external ID + ansible.builtin.set_fact: + vpc_uuid: "{{ result.ext_id }}" + +################################################################ + +- name: Overlay Subnet with minimum requirements + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_6" + subnet_type: OVERLAY + vpc_reference: "{{ vpc_uuid }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + register: result + ignore_errors: true + +- name: Check creating subnet status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_6" + - result.response.subnet_type == "OVERLAY" + - result.response.vpc_reference == "{{ vpc_uuid }}" + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.response.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.response.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + fail_msg: " Unable to create Overlay Subnet with minimum requirements " + success_msg: " Overlay Subnet with minimum requirements created successfully " + +- name: Fetch subnet after creation for verification + ntnx_subnets_info_v2: + filter: "name eq '{{ subnet_name }}_6'" + register: result + ignore_errors: true + +- name: Set result variable for verification + ansible.builtin.set_fact: + result: "{{ result.response[0] }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result is defined + - result.subnet_type == "OVERLAY" + - result.vpc_reference == "{{ vpc_uuid }}" + - result.name == "{{ subnet_name }}_6" + - result.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + fail_msg: " Unable to verify creation of Overlay Subnet with minimum requirements " + success_msg: " Overlay Subnet with minimum requirements created and verified successfully " + +- name: Adding subnet external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" +################################################################# + +- name: List subnets using limit + ntnx_subnets_info_v2: + limit: 1 + check_mode: true + register: subnets + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - subnets.response is defined + - subnets.response | length == 1 + fail_msg: " Unable to list subnets using limit " + success_msg: " Subnets listed successfully using limit" +######################################################### +- name: List subnets using subnet_type filter criteria + ntnx_subnets_info_v2: + filter: "name eq '{{subnets.response.0.name}}'" + limit: 1 + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length >= 1 + - result.response.0.name == subnets.response.0.name + fail_msg: " Unable to list subnets using subnet_type filter criteria " + success_msg: " Subnets listed successfully using subnet_type filter criteria " +########################################################## +- name: Get subnet using ext_id + ntnx_subnets_info_v2: + ext_id: "{{subnets.response.0.ext_id}}" + register: result + ignore_errors: true + +- name: Fetch Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.ext_id == '{{subnets.response.0.ext_id}}' + fail_msg: " Unable to Get subnet using ext_id " + success_msg: " Subnet fetched successfully using ext_id" +######################################################### +- name: List subnets using VPC reference + ntnx_subnets_info_v2: + filter: "vpcReference eq '{{ vpc_uuid }}'" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length == 1 + - result.response[0].vpc_reference == "{{vpc_uuid}}" + fail_msg: " Unable to list subnets using vpc_reference " + success_msg: " Subnets listed successfully using vpc_reference" +######################################################### +- name: List subnets using cluster reference + ntnx_subnets_info_v2: + filter: "clusterReference eq '{{ cluster.uuid }}'" + register: result + ignore_errors: true + +- name: Listing Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response | length > 0 + - result.response.0.cluster_reference == "{{ cluster.uuid }}" + fail_msg: " Unable to list subnets using vpc_reference " + success_msg: " Subnets listed successfully using vpc_reference" +######################################################### + +- name: Delete all Created Subnets + ntnx_subnets_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete Subnet " + success_msg: "Subnet is deleted successfully " + loop: "{{ result.results }}" + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] +################################################################# +- name: Overlay Subnet with IP_pools and DHCP + ntnx_subnets_v2: + state: present + name: "{{ subnet_name }}_7" + subnet_type: OVERLAY + vpc_reference: "{{ vpc_uuid }}" + ip_config: + - ipv4: + ip_subnet: + ip: + value: "{{ ip_address_management.network_ip }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + default_gateway_ip: + value: "{{ ip_address_management.gateway_ip_address }}" + prefix_length: "{{ ip_address_management.network_prefix }}" + pool_list: + - start_ip: + value: "{{ ip_address_pools.start_address }}" + end_ip: + value: "{{ ip_address_pools.end_address }}" + dhcp_options: + domain_name_servers: + - ipv4: + value: "{{ dhcp_settings.dns_servers.0 }}" + - ipv4: + value: "{{ dhcp_settings.dns_servers.1 }}" + search_domains: "{{ dhcp_settings.domain_search }}" + domain_name: "{{ dhcp_settings.domain_name }}" + tftp_server_name: + - "{{ dhcp_settings.tftp_server_name }}" + boot_file_name: "{{ dhcp_settings.boot_file }}" + register: result + ignore_errors: true + +- name: Check creating subnet status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.response.name == "{{ subnet_name }}_7" + - result.response.subnet_type == "OVERLAY" + - result.response.vpc_reference == "{{ vpc_uuid }}" + - result.response.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.response.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + fail_msg: " Unable to create Overlay Subnet with IP_pools and DHCP " + success_msg: " Overlay Subnet with IP_pools and DHCP created successfully " + +- name: Fetch subnet after creation for verification + ntnx_subnets_info_v2: + filter: "name eq '{{ subnet_name }}_7'" + register: result + ignore_errors: true + +- name: Set result variable for verification + ansible.builtin.set_fact: + result: "{{ result.response[0] }}" + +- name: Creation Status + ansible.builtin.assert: + that: + - result is defined + - result.subnet_type == "OVERLAY" + - result.vpc_reference == "{{ vpc_uuid }}" + - result.name == "{{ subnet_name }}_7" + - result.ip_config.0.ipv4.default_gateway_ip.value == "{{ ip_address_management.gateway_ip_address }}" + - result.ip_config.0.ipv4.ip_subnet.prefix_length == {{ ip_address_management.network_prefix }} + - result.ip_config.0.ipv4.ip_subnet.ip.value == "{{ ip_address_management.network_ip }}" + - result.ip_config.0.ipv4.pool_list.0.start_ip.value == "{{ ip_address_pools.start_address }}" + - result.ip_config.0.ipv4.pool_list.0.end_ip.value == "{{ ip_address_pools.end_address }}" + - result.dhcp_options.boot_file_name == "{{ dhcp_settings.boot_file }}" + - result.dhcp_options.domain_name == "{{ dhcp_settings.domain_name }}" + - result.dhcp_options.search_domains == {{ dhcp_settings.domain_search }} + - result.dhcp_options.domain_name_servers.0.ipv4.value == "{{ dhcp_settings.dns_servers.0 }}" + - result.dhcp_options.domain_name_servers.1.ipv4.value == "{{ dhcp_settings.dns_servers.1 }}" + fail_msg: " Unable to verify creation of Overlay Subnet with IP_pools and DHCP " + success_msg: " Overlay Subnet with IP_pools and DHCP created and verified successfully " + +- name: Adding subnet external ID to delete list + ansible.builtin.set_fact: + todelete: "{{ todelete + [ result.ext_id ] }}" + +################################################################# + +- name: Delete all Created Subnets + ntnx_subnets_v2: + state: absent + ext_id: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: true + +- name: Deletion Status + ansible.builtin.assert: + that: + - item.changed == true + - item.failed == false + - item.response.status == 'SUCCEEDED' + fail_msg: "Unable to delete Subnet " + success_msg: "Subnet is deleted successfully " + loop: "{{ result.results }}" + +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] +################################################################# + +- name: Delete vpc + ntnx_vpcs_v2: + state: absent + ext_id: "{{ vpc_uuid }}" + register: result + ignore_errors: true + +- name: Delete Status + ansible.builtin.assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.ext_id == "{{ vpc_uuid }}" + - result.task_ext_id is defined + fail_msg: " Unable to Delete VPC" + success_msg: "Vpc has been deleted successfully" +################################################################# diff --git a/tests/integration/targets/nutanix_subnets_v2/vars/main.yml b/tests/integration/targets/nutanix_subnets_v2/vars/main.yml new file mode 100644 index 000000000..e8a68ce7d --- /dev/null +++ b/tests/integration/targets/nutanix_subnets_v2/vars/main.yml @@ -0,0 +1,43 @@ +vlan_subnets_ids: [221, 222, 223, 224, 226, 227] +ip_address_management: + network_ip: 192.168.0.0 + network_prefix: 24 + gateway_ip_address: 192.168.0.254 + dhcp_server_address: 192.168.0.253 + +ip_address_pools: + start_address: 192.168.0.20 + end_address: 192.168.0.30 + start_address_2: 192.168.0.40 + end_address_2: 192.168.0.50 + +dhcp_settings: + dns_servers: [8.8.8.8, 8.8.4.4] + domain_search: ["calm.nutanix.com", "eng.nutanix.com"] + domain_name: nutanix.com + tftp_server_name: 10.5.0.10 + boot_file: pxelinux.0 +dhcp_server_address: 192.168.0.35 +external_nat_subnet: + vlan_id: 205 + gateway_ip_address: 10.44.3.193 + network_prefix: 27 + network_ip: 10.44.3.192 + dhcp: + start_address: 10.44.3.198 + end_address: 10.44.3.207 + static: + start_address: 10.44.3.208 + end_address: 10.44.3.217 + +external_nonat_subnet: + vlan_id: 206 + gateway_ip_address: 10.44.3.225 + network_prefix: 27 + network_ip: 10.44.3.224 + dhcp: + start_address: 10.44.3.230 + end_address: 10.44.3.239 + static: + start_address: 10.44.3.240 + end_address: 10.44.3.249 diff --git a/tests/integration/targets/nutanix_vms/tasks/create.yml b/tests/integration/targets/nutanix_vms/tasks/create.yml index 9da6dfc96..7c21ebe07 100644 --- a/tests/integration/targets/nutanix_vms/tasks/create.yml +++ b/tests/integration/targets/nutanix_vms/tasks/create.yml @@ -1,601 +1,622 @@ - - name: Create Cloud-init Script file - copy: - dest: "cloud_init.yml" - content: | - #cloud-config - chpasswd: - list: | - root: "{{ password }}" - expire: False - fqdn: myNutanixVM +- name: Create Cloud-init Script file + ansible.builtin.copy: + mode: "0644" + dest: "cloud_init.yml" + content: | + #cloud-config + chpasswd: + list: | + root: "{{ password }}" + expire: False + fqdn: myNutanixVM ########################################################################## - - name: VM with none values - ntnx_vms: - state: present - name: none - timezone: GMT - project: - uuid: "{{ project.uuid }}" - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: DISK - size_gb: 5 - bus: SCSI - vcpus: - cores_per_vcpu: - memory_gb: - register: result - ignore_errors: true +- name: VM with none values + ntnx_vms: + state: present + name: none + timezone: GMT + project: + uuid: "{{ project.uuid }}" + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: DISK + size_gb: 5 + bus: SCSI + vcpus: + cores_per_vcpu: + memory_gb: + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to Create VM with none values ' - success_msg: 'VM with none values created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to Create VM with none values " + success_msg: "VM with none values created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' # ################################################################################## - - name: VM with owner name - ntnx_vms: - state: present - name: none - timezone: GMT - project: - uuid: "{{ project.uuid }}" - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - owner: - name: "{{ vm_owner.name }}" - disks: - - type: DISK - size_gb: 5 - bus: SCSI - register: result - ignore_errors: true +- name: VM with owner name + ntnx_vms: + state: present + name: none + timezone: GMT + project: + uuid: "{{ project.uuid }}" + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + owner: + name: "{{ vm_owner.name }}" + disks: + - type: DISK + size_gb: 5 + bus: SCSI + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - - result.response.metadata.owner_reference.name == "{{ vm_owner.name }}" - - result.response.metadata.owner_reference.uuid == "{{ vm_owner.uuid }}" - - result.response.metadata.owner_reference.kind == "user" - fail_msg: 'Unable to Create VM with owner' - success_msg: 'VM with owner created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + - result.response.metadata.owner_reference.name == "{{ vm_owner.name }}" + - result.response.metadata.owner_reference.uuid == "{{ vm_owner.uuid }}" + - result.response.metadata.owner_reference.kind == "user" + fail_msg: "Unable to Create VM with owner" + success_msg: "VM with owner created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' ################################################################################## - - name: VM with ubuntu image and different specifications - ntnx_vms: - state: present - project: - name: "{{ project.name }}" - name: "VM with Ubuntu image" - desc: "VM with cluster, network, category, disk with Ubuntu image, guest customization " - categories: - AppType: - - Default - Environment: - - Dev - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "{{ network.dhcp.name }}" - disks: - - type: "DISK" - size_gb: 30 - bus: "SATA" - clone_image: - name: "{{ ubuntu }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: result +- name: VM with ubuntu image and different specifications + ntnx_vms: + state: present + project: + name: "{{ project.name }}" + name: "VM with Ubuntu image" + desc: "VM with cluster, network, category, disk with Ubuntu image, guest customization " + categories: + AppType: + - Default + Environment: + - Dev + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + name: "{{ network.dhcp.name }}" + disks: + - type: "DISK" + size_gb: 30 + bus: "SATA" + clone_image: + name: "{{ ubuntu }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: true + register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - - result.response.metadata.categories_mapping["AppType"] == ["Default"] - - result.response.metadata.categories_mapping["Environment"] == ["Dev"] - fail_msg: 'Unable to Create VM with Ubuntu image and different specifications ' - success_msg: 'VM with Ubuntu image and different specifications created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + - result.response.metadata.categories_mapping["AppType"] == ["Default"] + - result.response.metadata.categories_mapping["Environment"] == ["Dev"] + fail_msg: "Unable to Create VM with Ubuntu image and different specifications " + success_msg: "VM with Ubuntu image and different specifications created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ######################################################################################### - - name: VM with CentOS-7-cloud-init image with disk image size - ntnx_vms: - state: present - name: VM with CentOS-7-cloud-init image - memory_gb: 1 - timezone: "UTC" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with CentOS-7-cloud-init image' - success_msg: 'VM with CentOS-7-cloud-init image created successfully ' +- name: VM with CentOS-7-cloud-init image with disk image size + ntnx_vms: + state: present + name: VM with CentOS-7-cloud-init image + memory_gb: 1 + timezone: "UTC" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: true + register: result + ignore_errors: true +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with CentOS-7-cloud-init image" + success_msg: "VM with CentOS-7-cloud-init image created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ################################################################################# - - name: VM with CentOS-7-cloud-init image without disk image size - ntnx_vms: - state: present - memory_gb: 1 - name: VM with CentOS-7-cloud-init image without image size - timezone: "UTC" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - clone_image: - name: "{{ centos }}" - bus: "SCSI" - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with CentOS-7-cloud-init image' - success_msg: 'VM with CentOS-7-cloud-init image created successfully ' +- name: VM with CentOS-7-cloud-init image without disk image size + ntnx_vms: + state: present + memory_gb: 1 + name: VM with CentOS-7-cloud-init image without image size + timezone: "UTC" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + clone_image: + name: "{{ centos }}" + bus: "SCSI" + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: true + register: result + ignore_errors: true +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with CentOS-7-cloud-init image" + success_msg: "VM with CentOS-7-cloud-init image created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] ################################################################################# - - name: VM with Cluster, Network, Universal time zone, one Disk - ntnx_vms: - state: present - name: "VM with Cluster Network and Disk" - memory_gb: 1 - timezone: "Universal" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: False - subnet: - uuid: "{{ network.dhcp.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - bus: "PCI" - register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with Cluster , Network, Universal time zone, one Disk' - success_msg: 'VM with Cluster , Network, Universal time zone, one Disk created successfully ' +- name: VM with Cluster, Network, Universal time zone, one Disk + ntnx_vms: + state: present + name: "VM with Cluster Network and Disk" + memory_gb: 1 + timezone: "Universal" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: false + subnet: + uuid: "{{ network.dhcp.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + bus: "PCI" + register: result +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with Cluster , Network, Universal time zone, one Disk" + success_msg: "VM with Cluster , Network, Universal time zone, one Disk created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ######################################################################################## - - name: VM with Cluster, different Disks, Memory size - ntnx_vms: - state: present - name: "VM with different disks" - timezone: "UTC" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - bus: "SATA" - - type: "DISK" - size_gb: 30 - bus: "SCSI" - memory_gb: 2 - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with Cluster, different Disks, Memory size' - success_msg: 'VM with Cluster, different Disks, Memory size created successfully ' +- name: VM with Cluster, different Disks, Memory size + ntnx_vms: + state: present + name: "VM with different disks" + timezone: "UTC" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + bus: "SATA" + - type: "DISK" + size_gb: 30 + bus: "SCSI" + memory_gb: 2 + register: result + ignore_errors: true +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with Cluster, different Disks, Memory size" + success_msg: "VM with Cluster, different Disks, Memory size created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ##################################################################################### - - name: VM with Cluster, different CDROMs - ntnx_vms: - state: present - memory_gb: 1 - wait: true - name: "VM with multiple CDROMs" - cluster: - name: "{{ cluster.name }}" - disks: - - type: "CDROM" - bus: "SATA" - empty_cdrom: True - - type: "CDROM" - bus: "IDE" - empty_cdrom: True - cores_per_vcpu: 1 - register: result - ignore_errors: True +- name: VM with Cluster, different CDROMs + ntnx_vms: + state: present + memory_gb: 1 + wait: true + name: "VM with multiple CDROMs" + cluster: + name: "{{ cluster.name }}" + disks: + - type: "CDROM" + bus: "SATA" + empty_cdrom: true + - type: "CDROM" + bus: "IDE" + empty_cdrom: true + cores_per_vcpu: 1 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to Create VM with Cluster, different CDROMs ' - success_msg: 'VM with Cluster, different CDROMs created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to Create VM with Cluster, different CDROMs " + success_msg: "VM with Cluster, different CDROMs created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] #################################################################################### - - name: VM with all specification - ntnx_vms: - state: present - wait: True - name: "All specification" - timezone: "GMT" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 2 - bus: "SCSI" - - type: "DISK" - size_gb: 10 - bus: "PCI" - - type: "DISK" - size_gb: 2 - bus: "SATA" - - type: "DISK" - size_gb: 10 - bus: "SCSI" - - type: "CDROM" - bus: "IDE" - empty_cdrom: True - boot_config: - boot_type: "UEFI" - boot_order: - - "DISK" - - "CDROM" - - "NETWORK" - vcpus: 1 - cores_per_vcpu: 2 - memory_gb: 1 - register: result - ignore_errors: True +- name: VM with all specification + ntnx_vms: + state: present + wait: true + name: "All specification" + timezone: "GMT" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 2 + bus: "SCSI" + - type: "DISK" + size_gb: 10 + bus: "PCI" + - type: "DISK" + size_gb: 2 + bus: "SATA" + - type: "DISK" + size_gb: 10 + bus: "SCSI" + - type: "CDROM" + bus: "IDE" + empty_cdrom: true + boot_config: + boot_type: "UEFI" + boot_order: + - "DISK" + - "CDROM" + - "NETWORK" + vcpus: 1 + cores_per_vcpu: 2 + memory_gb: 1 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with all specification ' - success_msg: ' VM with all specification created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with all specification " + success_msg: " VM with all specification created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ################################################################################################## - - name: VM with managed subnet - ntnx_vms: - state: present - name: VM with managed subnet - memory_gb: 1 - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: true - subnet: - uuid: "{{ network.dhcp.uuid }}" - register: result - ignore_errors: true +- name: VM with managed subnet + ntnx_vms: + state: present + name: VM with managed subnet + memory_gb: 1 + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + uuid: "{{ network.dhcp.uuid }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with managed subnet ' - success_msg: ' VM with with managed subnet created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with managed subnet " + success_msg: " VM with with managed subnet created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ################################################################################################### - - name: VM with minimum requiremnts - ntnx_vms: - state: present - name: MinReqVM - cluster: - name: "{{ cluster.name }}" - register: result - ignore_errors: true +- name: VM with minimum requiremnts + ntnx_vms: + state: present + name: MinReqVM + cluster: + name: "{{ cluster.name }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with minimum requiremnts " + success_msg: " VM with minimum requiremnts created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] ################################################################################################## - - name: VM with unmanaged vlan - ntnx_vms: - desc: "VM with unmanaged vlan" - state: present - name: VM with unmanaged vlan - timezone: UTC - cluster: - uuid: "{{ cluster.uuid }}" - networks: - - is_connected: false - subnet: - uuid: "{{ static.uuid }}" - private_ip: "{{ network.static.ip }}" - boot_config: - boot_type: LEGACY - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result - ignore_errors: true +- name: VM with unmanaged vlan + ntnx_vms: + desc: "VM with unmanaged vlan" + state: present + name: VM with unmanaged vlan + timezone: UTC + cluster: + uuid: "{{ cluster.uuid }}" + networks: + - is_connected: false + subnet: + uuid: "{{ static.uuid }}" + private_ip: "{{ network.static.ip }}" + boot_config: + boot_type: LEGACY + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with unmanaged vlan ' - success_msg: ' VM with unmanaged vlan created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with unmanaged vlan " + success_msg: " VM with unmanaged vlan created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VM - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VM + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] ###################################################################################### - - name: VM with managed and unmanaged network - ntnx_vms: - state: present - name: VM_NIC - timezone: UTC - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: true - subnet: - name: "{{ network.dhcp.name }}" - cluster: - name: "{{ cluster.name }}" - - is_connected: true - subnet: - uuid: "{{ static.uuid }}" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: DISK - size_gb: 1 - bus: SCSI - - type: DISK - size_gb: 3 - bus: PCI - - type: CDROM - bus: SATA - empty_cdrom: True - - type: CDROM - bus: IDE - empty_cdrom: True - boot_config: - boot_type: UEFI - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result - ignore_errors: true +- name: VM with managed and unmanaged network + ntnx_vms: + state: present + name: VM_NIC + timezone: UTC + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + name: "{{ network.dhcp.name }}" + cluster: + name: "{{ cluster.name }}" + - is_connected: true + subnet: + uuid: "{{ static.uuid }}" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: DISK + size_gb: 1 + bus: SCSI + - type: DISK + size_gb: 3 + bus: PCI + - type: CDROM + bus: SATA + empty_cdrom: true + - type: CDROM + bus: IDE + empty_cdrom: true + boot_config: + boot_type: UEFI + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with managed and unmanaged network ' - success_msg: ' VM with managed and unmanaged network created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with managed and unmanaged network " + success_msg: " VM with managed and unmanaged network created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ######################################################################################### - - name: VM with diffrent disk types and diffrent sizes with UEFI boot type - ntnx_vms: - state: present - name: VM with UEFI boot type - timezone: GMT - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - name: "{{ storage_container.name }}" - - type: DISK - size_gb: 2 - bus: PCI - storage_container: - name: "{{ storage_container.name }}" - - type: DISK - size_gb: 3 - bus: SATA - boot_config: - boot_type: UEFI - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result +- name: VM with diffrent disk types and diffrent sizes with UEFI boot type + ntnx_vms: + state: present + name: VM with UEFI boot type + timezone: GMT + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + name: "{{ storage_container.name }}" + - type: DISK + size_gb: 2 + bus: PCI + storage_container: + name: "{{ storage_container.name }}" + - type: DISK + size_gb: 3 + bus: SATA + boot_config: + boot_type: UEFI + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result ################################################################################ - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with diffrent disk types and diffrent sizes with UEFI boot type ' - success_msg: ' VM with diffrent disk types and diffrent sizes with UEFI boot type created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with diffrent disk types and diffrent sizes with UEFI boot type " + success_msg: " VM with diffrent disk types and diffrent sizes with UEFI boot type created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VM - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' +- name: Delete all Created VM + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" - - set_fact: - todelete: [] +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] #################################################################################### - - name: VM with storage container - ntnx_vms: - state: present - name: VM with UEFI boot type - timezone: GMT - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - uuid: "{{ storage_container.uuid }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result +- name: VM with storage container + ntnx_vms: + state: present + name: VM with UEFI boot type + timezone: GMT + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + uuid: "{{ storage_container.uuid }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM withstorage container ' - success_msg: ' VM with storage container created successfully ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM withstorage container " + success_msg: " VM with storage container created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' #################################################################################### - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" diff --git a/tests/integration/targets/nutanix_vms/tasks/delete.yml b/tests/integration/targets/nutanix_vms/tasks/delete.yml index b1cf3046d..835259925 100644 --- a/tests/integration/targets/nutanix_vms/tasks/delete.yml +++ b/tests/integration/targets/nutanix_vms/tasks/delete.yml @@ -1,20 +1,20 @@ --- - name: VM with minimum requiremnts ntnx_vms: - state: present - name: MinReqVM - cluster: - name: "{{ cluster.name }}" + state: present + name: MinReqVM + cluster: + name: "{{ cluster.name }}" register: result ignore_errors: true - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + ansible.builtin.assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with minimum requiremnts " + success_msg: " VM with minimum requiremnts created successfully " - name: Delete VM ntnx_vms: @@ -22,8 +22,8 @@ state: absent register: result -- name: assert when status not complete - assert: +- name: Assert when status not complete + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'SUCCEEDED' diff --git a/tests/integration/targets/nutanix_vms/tasks/main.yml b/tests/integration/targets/nutanix_vms/tasks/main.yml index 1a9593038..54eee0d10 100644 --- a/tests/integration/targets/nutanix_vms/tasks/main.yml +++ b/tests/integration/targets/nutanix_vms/tasks/main.yml @@ -1,14 +1,21 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "negtaive_scenarios.yml" - - import_tasks: "delete.yml" - - import_tasks: "vm_operations.yml" - - import_tasks: "vm_update.yml" - - import_tasks: "negtaive_vm_update.yml" + - name: Import create.yml + ansible.builtin.import_tasks: "create.yml" + - name: Import negtaive_scenarios.yml + ansible.builtin.import_tasks: "negtaive_scenarios.yml" + - name: Import delete.yml + ansible.builtin.import_tasks: "delete.yml" + - name: Import vm_operations.yml + ansible.builtin.import_tasks: "vm_operations.yml" + - name: Import vm_update.yml + ansible.builtin.import_tasks: "vm_update.yml" + - name: Import negtaive_vm_update.yml + ansible.builtin.import_tasks: "negtaive_vm_update.yml" diff --git a/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml b/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml index f003d4459..640d111f0 100644 --- a/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml +++ b/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml @@ -1,309 +1,308 @@ - - debug: - msg: "Started Negative Creation Cases" +- name: Started Negative Creation Cases + ansible.builtin.debug: + msg: "Started Negative Creation Cases" - - name: Unknown project name - ntnx_vms: - state: present - name: Unknown project name - timezone: "UTC" - project: - name: project - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown project name + ntnx_vms: + state: present + name: Unknown project name + timezone: "UTC" + project: + name: project + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.msg == "Failed generating VM Spec" - - result.failed == True - - result.failed is defined - - result.error == "Project project not found." - success_msg: ' Success: returned error as expected ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.msg == "Failed generating VM Spec" + - result.failed == true + - result.failed is defined + - result.error == "Project project not found." + success_msg: " Success: returned error as expected " ############################################################# - - name: Check if error is produced when disk size is not given for storage container - check_mode: yes - ntnx_vms: - state: present - name: VM with storage container - timezone: GMT - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: DISK - bus: SCSI - storage_container: - name: "{{ storage_container.name }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result - ignore_errors: True +- name: Check if error is produced when disk size is not given for storage container + check_mode: true + ntnx_vms: + state: present + name: VM with storage container + timezone: GMT + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: DISK + bus: SCSI + storage_container: + name: "{{ storage_container.name }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.msg == "Unsupported operation: Unable to create disk, 'size_gb' is required for using storage container." - - result.failed == True - - result.failed is defined - success_msg: ' Success: returned error as expected ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.msg == "Unsupported operation: Unable to create disk, 'size_gb' is required for using storage container." + - result.failed == true + - result.failed is defined + success_msg: " Success: returned error as expected " ################################################################################## - - name: Unknown Cluster - ntnx_vms: - state: present - name: Unknown Cluster - timezone: "UTC" - cluster: - uuid: "auto_cluster_1aa888141361" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown Cluster + ntnx_vms: + state: present + name: Unknown Cluster + timezone: "UTC" + cluster: + uuid: "auto_cluster_1aa888141361" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.response.state == 'ERROR' - - result.status_code == 422 - - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail Vm created successfully with unknown cluster ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + - result.response.state == 'ERROR' + - result.status_code == 422 + - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" + success_msg: " Success: returned error as expected " + fail_msg: " Fail Vm created successfully with unknown cluster " ################################################################################ - - name: Unknown Cluster name - ntnx_vms: - state: present - name: Unknown Cluster - timezone: "UTC" - cluster: - name: "auto_cluster" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown Cluster name + ntnx_vms: + state: present + name: Unknown Cluster + timezone: "UTC" + cluster: + name: "auto_cluster" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.msg == "Failed generating VM Spec" - - result.failed == True - - result.response is defined - - result.error == "Cluster auto_cluster not found." - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail Vm created successfully with unknown cluster ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.msg == "Failed generating VM Spec" + - result.failed == true + - result.response is defined + - result.error == "Cluster auto_cluster not found." + success_msg: " Success: returned error as expected " + fail_msg: " Fail Vm created successfully with unknown cluster " ################################################################################### - - name: Unknown Network name - ntnx_vms: - state: present - name: Unknown Network - desc: "Unknown network" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "vlan.8000" - register: result - ignore_errors: True +- name: Unknown Network name + ntnx_vms: + state: present + name: Unknown Network + desc: "Unknown network" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + name: "vlan.8000" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.msg == "Failed generating VM Spec" - - result.error == "Subnet vlan.8000 not found." - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with unknown network name ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + - result.msg == "Failed generating VM Spec" + - result.error == "Subnet vlan.8000 not found." + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with unknown network name " ################################################################################### - - name: Unknown Network uuid - ntnx_vms: - state: present - name: Unknown Network - desc: "Unknown network" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - uuid: "8000" - register: result - ignore_errors: True +- name: Unknown Network uuid + ntnx_vms: + state: present + name: Unknown Network + desc: "Unknown network" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + uuid: "8000" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" - - result.response.state == 'ERROR' - - result.status_code == 422 - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with unknown network name ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" + - result.response.state == 'ERROR' + - result.status_code == 422 + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with unknown network name " ################################################################################### - - name: Unknow Image name - ntnx_vms: - state: present - name: unknown image_vm - timezone: "UTC" - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "centos-7-cloudinit" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknow Image name + ntnx_vms: + state: present + name: unknown image_vm + timezone: "UTC" + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "centos-7-cloudinit" + bus: "SCSI" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.response.state == 'ERROR' - - result.status_code == 422 - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with not existed image ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + - result.response.state == 'ERROR' + - result.status_code == 422 + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with not existed image " ######################################################################################## - - name: Wrong disk size value - ntnx_vms: - state: present - name: "Wrong disk size value" - timezone: "UTC" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "{{ network.dhcp.name }}" - disks: - - type: "DISK" - size_gb: 10g - bus: "PCI" - register: result - ignore_errors: True +- name: Wrong disk size value + ntnx_vms: + state: present + name: "Wrong disk size value" + timezone: "UTC" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + name: "{{ network.dhcp.name }}" + disks: + - type: "DISK" + size_gb: 10g + bus: "PCI" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with invalid argument for size_gb ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed == true + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with invalid argument for size_gb " ############################################################################################# - - name: Image size less than actual - ntnx_vms: - state: present - name: "image size less than actual" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "{{ network.dhcp.name }}" - disks: - - type: "DISK" - size_gb: 2 #must be 20 - bus: "SATA" - clone_image: - name: "{{ centos }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - guest_customization: - type: "cloud_init" - script_path: "cloud_init.yml" - is_overridable: True - register: result - ignore_errors: True +- name: Image size less than actual + ntnx_vms: + state: present + name: "image size less than actual" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + name: "{{ network.dhcp.name }}" + disks: + - type: "DISK" + size_gb: 2 # must be 20 + bus: "SATA" + clone_image: + name: "{{ centos }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + guest_customization: + type: "cloud_init" + script_path: "cloud_init.yml" + is_overridable: true + register: result + ignore_errors: true - - - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail: VM created successfully with image size is less than actual ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + success_msg: " Success: returned error as expected " + fail_msg: " Fail: VM created successfully with image size is less than actual " ################################################################################# - - name: Unknow storage container name - ntnx_vms: - state: present - name: unknown storage container - timezone: "UTC" - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - size_gb: 10 - storage_container: - name: "storage" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknow storage container name + ntnx_vms: + state: present + name: unknown storage container + timezone: "UTC" + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + size_gb: 10 + storage_container: + name: "storage" + bus: "SCSI" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with unknown storage container name ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with unknown storage container name " ################################################################################# - - name: Delete vm with unknown uuid - ntnx_vms: - state: absent - vm_uuid: 5 - register: result - ignore_errors: True +- name: Delete vm with unknown uuid + ntnx_vms: + state: absent + vm_uuid: 5 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting VM with unknown uuid ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.response is defined + - result.failed == true + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting VM with unknown uuid " ################################################################################# - - name: Delete vm with missing uuid - ntnx_vms: - state: absent - register: result - ignore_errors: True +- name: Delete vm with missing uuid + ntnx_vms: + state: absent + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting VM with missing uuid ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed == true + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting VM with missing uuid " diff --git a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml index a17b52529..bb5b112ed 100644 --- a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml @@ -1,4 +1,4 @@ -- name: create VM with minimum requiremnts to update +- name: Create VM with minimum requiremnts to update ntnx_vms: state: present name: update vm @@ -20,7 +20,7 @@ uuid: "{{ storage_container.uuid }}" - type: "CDROM" bus: "IDE" - empty_cdrom: True + empty_cdrom: true - type: DISK size_gb: 3 bus: PCI @@ -37,16 +37,16 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - vm.vm_uuid - vm.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requiremnts " + success_msg: " VM with minimum requiremnts created successfully " -- name: update vm without change any value +- name: Update vm without change any value ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" vcpus: 4 @@ -55,20 +55,20 @@ register: result ignore_errors: true - - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == false - - result.changed == false - - result.msg == 'Nothing to change' - fail_msg: 'Fail : VM updated successfully with same current values ' - success_msg: ' Success: returned error as expected ' + - result.failed == false + - result.changed == false + - result.msg == 'Nothing to change' + fail_msg: "Fail : VM updated successfully with same current values " + success_msg: " Success: returned error as expected " ############################################################### -- debug: +- name: Start negative update scenarios tests for memory vcpus cores_per_vcpu + ansible.builtin.debug: msg: Start negative update scenarios tests for memory vcpus cores_per_vcpu -- name: decrease values for vcpus without force_power_off and vm is on +- name: Decrease values for vcpus without force_power_off and vm is on ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" vcpus: 3 @@ -76,15 +76,15 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : decrease the value for vcpus while while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : decrease the value for vcpus while while vm is on " + success_msg: " Success: returned error as expected " -- name: decrease values for memory_gb without force_power_off and vm is on +- name: Decrease values for memory_gb without force_power_off and vm is on ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" memory_gb: 3 @@ -92,15 +92,15 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : decrease the value for memory_gb while while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : decrease the value for memory_gb while while vm is on " + success_msg: " Success: returned error as expected " -- name: decrease values for cores_per_vcpu without force_power_off and vm is on +- name: Decrease values for cores_per_vcpu without force_power_off and vm is on ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" cores_per_vcpu: 3 @@ -108,15 +108,16 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : decrease the value for cores_per_vcpu while while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : decrease the value for cores_per_vcpu while while vm is on " + success_msg: " Success: returned error as expected " ############################################################### -- debug: +- name: Start negative update scenarios tests for disks + ansible.builtin.debug: msg: Start negative update scenarios tests for disks ############ negative test : Decrase size @@ -131,12 +132,11 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the disk that contains the image with SCSI bus type ' - success_msg: ' Success: returned error as expected ' - + fail_msg: " Fail: decreasing the size of the disk that contains the image with SCSI bus type " + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the SCSI disk with storage container ntnx_vms: @@ -149,13 +149,13 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the SCSI disk with storage container ' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the SCSI disk with storage container " + success_msg: " Success: returned error as expected " -- name: Update VM by decreasing the size of the empty ide cdrom #error +- name: Update VM by decreasing the size of the empty ide cdrom # error ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" disks: @@ -166,13 +166,13 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == 'Unsupported operation: Cannot resize empty cdrom.' - result.changed == false - result.failed == true - fail_msg: ' Fail: change the size of the empty CDROM' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: change the size of the empty CDROM" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the pci disk ntnx_vms: @@ -185,11 +185,11 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the pci disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the pci disk" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the sata disk ntnx_vms: @@ -202,11 +202,11 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the sata disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the sata disk" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the SCSI disk ntnx_vms: @@ -219,11 +219,11 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the SCSI disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the SCSI disk" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the IDE disk ntnx_vms: @@ -236,11 +236,11 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the IDE disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the IDE disk" + success_msg: " Success: returned error as expected " ################ - name: Update VM by change ths bus type of ide disk ntnx_vms: @@ -253,12 +253,12 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == ' parameters are mutually exclusive: uuid|bus found in disks ' - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail: Update VM by change ths bus type of ide disk sucessfuly ' + - result.failed == true + success_msg: " Success: returned error as expected " + fail_msg: " Fail: Update VM by change ths bus type of ide disk sucessfuly " ############ - name: Update VM by adding IDE disk while vm is on ntnx_vms: @@ -271,13 +271,13 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by add ide disk while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by add ide disk while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by adding SATA disk while vm is on ntnx_vms: @@ -290,13 +290,13 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by add SATA disk while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by add SATA disk while vm is on " + success_msg: " Success: returned error as expected " ############# - name: Update VM by removing IDE disks while vm is on ntnx_vms: @@ -307,13 +307,13 @@ register: result ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing IDE disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing IDE disks while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by removing IDE disks while vm is on ntnx_vms: @@ -325,13 +325,13 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing IDE disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing IDE disks while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by removing PCI disks while vm is on ntnx_vms: @@ -343,13 +343,13 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing PCI disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing PCI disks while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by removing SATA disks while vm is on ntnx_vms: @@ -361,13 +361,13 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing SATA disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == true + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing SATA disks while vm is on " + success_msg: " Success: returned error as expected " ########################################################### - name: Delete created vm's ntnx_vms: @@ -376,11 +376,12 @@ ignore_errors: true register: result -- assert: +- name: Delete Status + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'SUCCEEDED' - result.vm_uuid - result.task_uuid - fail_msg: 'Fail: Unable to delete created vm ' - success_msg: 'Success: Vm deleted sucessfully' + fail_msg: "Fail: Unable to delete created vm " + success_msg: "Success: Vm deleted sucessfully" diff --git a/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml b/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml index d64f9f755..522c1154a 100644 --- a/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml +++ b/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml @@ -1,103 +1,105 @@ -- debug: +- name: Start testing VM with different opperations + ansible.builtin.debug: msg: Start testing VM with different opperations -- set_fact: - todelete: [] +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] - name: VM with minimum requiremnts ntnx_vms: - state: present - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 + state: present + name: integration_test_opperations_vm + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 register: vm ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requiremnts " + success_msg: " VM with minimum requiremnts created successfully " ############################################ - name: VM with minimum requiremnts with check mode ntnx_vms: - state: present - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 + state: present + name: integration_test_opperations_vm + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 register: result ignore_errors: true - check_mode: yes + check_mode: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.changed == false - result.failed == false - result.task_uuid != "" - success_msg: ' Success: returned as expected ' - fail_msg: ' Fail ' + success_msg: " Success: returned as expected " + fail_msg: " Fail " ########################################### -- name: hard power off the vm +- name: Hard power off the vm ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - state: hard_poweroff + vm_uuid: "{{ vm.vm_uuid }}" + state: hard_poweroff register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.response.status.resources.power_state == 'OFF' - fail_msg: ' Unable to hard power off the vm ' - success_msg: ' VM powerd off successfully ' + fail_msg: " Unable to hard power off the vm " + success_msg: " VM powerd off successfully " # ########################################### -- name: power on the vm +- name: Power on the vm ntnx_vms: - state: power_on - vm_uuid: "{{ vm.vm_uuid }}" + state: power_on + vm_uuid: "{{ vm.vm_uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.response.status.resources.power_state == 'ON' - fail_msg: ' Unable to power on vm ' - success_msg: ' VM powerd on successfully ' + fail_msg: " Unable to power on vm " + success_msg: " VM powerd on successfully " ########################################## -- name: power on the vm while it's on +- name: Power on the vm while it's on ntnx_vms: - state: power_on - vm_uuid: "{{ vm.vm_uuid }}" + state: power_on + vm_uuid: "{{ vm.vm_uuid }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.msg == "Nothing to change" - success_msg: ' Success: returned msg as expected ' - fail_msg: ' Fail ' + success_msg: " Success: returned msg as expected " + fail_msg: " Fail " ########################################## # - name: soft shut down the vm # ntnx_vms: @@ -108,7 +110,7 @@ # ignore_errors: true # - name: Creation Status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.response.status.state == 'COMPLETE' @@ -133,7 +135,7 @@ # ignore_errors: true # - name: Creation Status -# assert: +# ansible.builtin.assert: # that: # - result.response is defined # - result.response.status.state == 'COMPLETE' @@ -142,65 +144,68 @@ # fail_msg: ' Unable to create VM with minimum requiremnts and soft_shutdown ' # success_msg: ' VM with minimum requiremnts created successfully and soft_shutdown ' -# - set_fact: +# - ansible.builtin.set_fact: # todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - name: Create VM with minimum requiremnts with hard_poweroff opperation ntnx_vms: - state: hard_poweroff - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" + state: hard_poweroff + name: integration_test_opperations_vm + cluster: + name: "{{ cluster.name }}" register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.response.status.resources.power_state == 'OFF' - result.response.status.resources.power_state_mechanism.mechanism == 'HARD' - fail_msg: ' Unable to create VM with minimum requiremnts with hard_poweroff opperation ' - success_msg: ' VM with minimum requiremnts and hard_poweroff state created successfully ' + fail_msg: " Unable to create VM with minimum requiremnts with hard_poweroff opperation " + success_msg: " VM with minimum requiremnts and hard_poweroff state created successfully " -- set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - name: Create VM with minimum requiremnts with hard_poweroff opperation without wait ntnx_vms: - state: hard_poweroff - name: integration_test_opperations_vm_111 - cluster: - name: "{{ cluster.name }}" - wait: false + state: hard_poweroff + name: integration_test_opperations_vm_111 + cluster: + name: "{{ cluster.name }}" + wait: false register: result ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' or result.response.status.state == 'PENDING' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts with hard_poweroff opperation ' - success_msg: ' VM with minimum requiremnts and hard_poweroff state created successfully ' + fail_msg: " Unable to create VM with minimum requiremnts with hard_poweroff opperation " + success_msg: " VM with minimum requiremnts and hard_poweroff state created successfully " -- set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' when: result.response.status.state == 'COMPLETE' - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - loop: '{{ todelete }}' + state: absent + vm_uuid: "{{ item }}" + loop: "{{ todelete }}" - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ vm.vm_uuid }}' + state: absent + vm_uuid: "{{ vm.vm_uuid }}" -- set_fact: - todelete: [] +- name: Reset todelete list + ansible.builtin.set_fact: + todelete: [] diff --git a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml index fa0ccd60e..0e0160071 100644 --- a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml @@ -1,14 +1,14 @@ # ########################### UPDATE_VM ################################ -- name: create VM with minimum requiremnts to update +- name: Create VM with minimum requiremnts to update ntnx_vms: state: present name: update vm cluster: name: "{{ cluster.name }}" categories: - Environment: - - Production + Environment: + - Production vcpus: 5 cores_per_vcpu: 5 memory_gb: 5 @@ -17,16 +17,16 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requiremnts " + success_msg: " VM with minimum requiremnts created successfully " #################################################################### -- name: update vm by set owner by uuid +- name: Update vm by set owner by uuid ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" owner: @@ -35,7 +35,7 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid @@ -44,14 +44,15 @@ - result.response.metadata.owner_reference.name == "{{ vm_owner.name }}" - result.response.metadata.owner_reference.uuid == "{{ vm_owner.uuid }}" - result.response.metadata.owner_reference.kind == "user" - fail_msg: ' Unable to update vm by setting owner ' - success_msg: ' VM updated successfully by setting owner ' + fail_msg: " Unable to update vm by setting owner " + success_msg: " VM updated successfully by setting owner " #################################################################### -- debug: +- name: Start update tests for memory vcpus cores_per_vcpu + ansible.builtin.debug: msg: Start update tests for memory vcpus cores_per_vcpu -- name: decrease values for memory, vcpus and corespervcpu with force_power_off +- name: Decrease values for memory, vcpus and corespervcpu with force_power_off ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" vcpus: 2 @@ -63,16 +64,16 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by decrease the values for memory, vcpus and corespervcpu with force_power_off ' - success_msg: ' VM updated successfully by decrease the values for memory, vcpus and corespervcpu with force_power_off ' + fail_msg: " Unable to update vm by decrease the values for memory, vcpus and corespervcpu with force_power_off " + success_msg: " VM updated successfully by decrease the values for memory, vcpus and corespervcpu with force_power_off " -- name: increase values for memory, vcpus and corespervcpu +- name: Increase values for memory, vcpus and corespervcpu ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" vcpus: 4 @@ -81,16 +82,16 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increase values for memory, vcpus ' - success_msg: ' VM updated successfully by increase values for memory, vcpus ' + fail_msg: " Unable to update vm by increase values for memory, vcpus " + success_msg: " VM updated successfully by increase values for memory, vcpus " -- name: increase values for corespervcpu with force_power_off +- name: Increase values for corespervcpu with force_power_off ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" cores_per_vcpu: 4 @@ -99,32 +100,33 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increase values for corespervcpu with force_power_off' - success_msg: ' VM updated successfully by increase values for corespervcpu with force_power_off ' + fail_msg: " Unable to update vm by increase values for corespervcpu with force_power_off" + success_msg: " VM updated successfully by increase values for corespervcpu with force_power_off " #################################################################### -- debug: +- name: Start update tests for vm categoies + ansible.builtin.debug: msg: Start update tests for vm categories -- name: update categories +- name: Update categories ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" categories: - Environment: - - Dev - AppType: - - Default + Environment: + - Dev + AppType: + - Default register: result ignore_errors: true - name: Assert categories Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid @@ -132,10 +134,10 @@ - result.response.status.state == "COMPLETE" - result.response.metadata.categories_mapping["Environment"] == ["Dev"] - result.response.metadata.categories_mapping["AppType"] == ["Default"] - fail_msg: ' Unable to update categories attached to vm' - success_msg: ' VM categories updated successfully ' + fail_msg: " Unable to update categories attached to vm" + success_msg: " VM categories updated successfully " -- name: remove all categoies attached to vm +- name: Remove all categoies attached to vm ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" remove_categories: true @@ -143,18 +145,19 @@ ignore_errors: true - name: Assert categories Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - result.response.metadata.categories_mapping == {} - fail_msg: ' Unable to remove all categories attached to vm' - success_msg: ' All VM categories removed successfully ' + fail_msg: " Unable to remove all categories attached to vm" + success_msg: " All VM categories removed successfully " ################################################################### -- debug: +- name: Start update tests for disks + ansible.builtin.debug: msg: Start update tests for disks ##### CRUD opperation for SCSI disks - name: Update VM by adding SCSI disks @@ -178,14 +181,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding SCSI disks ' - success_msg: ' VM updated successfully by adding SCSI disks ' + fail_msg: " Unable to update vm by adding SCSI disks " + success_msg: " VM updated successfully by adding SCSI disks " - name: Update VM by increasing the size of the SCSI disks ntnx_vms: @@ -204,14 +207,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the SCSI disks ' - success_msg: ' VM updated successfully by increasing the size of the SCSI disks ' + fail_msg: " Unable to update vm by increasing the size of the SCSI disks " + success_msg: " VM updated successfully by increasing the size of the SCSI disks " - name: Update VM by removing SCSI disks ntnx_vms: @@ -227,14 +230,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing SCSI disks ' - success_msg: ' VM updated successfully by removing SCSI disks ' + fail_msg: " Unable to update vm by removing SCSI disks " + success_msg: " VM updated successfully by removing SCSI disks " ####### ##### CRUD opperation for PCI disks - name: Update VM by adding PCI disks @@ -248,14 +251,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding PCI disks ' - success_msg: ' VM updated successfully by adding PCI disks ' + fail_msg: " Unable to update vm by adding PCI disks " + success_msg: " VM updated successfully by adding PCI disks " - name: Update VM by increasing the size of the PCI disks ntnx_vms: @@ -268,14 +271,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the PCI disks ' - success_msg: ' VM updated successfully by increasing the size of the PCI disks ' + fail_msg: " Unable to update vm by increasing the size of the PCI disks " + success_msg: " VM updated successfully by increasing the size of the PCI disks " - name: Update VM by removing PCI disks with force_power_off ntnx_vms: @@ -288,14 +291,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing PCI disks with force_power_off ' - success_msg: ' VM updated successfully by removing PCI disks with force_power_off ' + fail_msg: " Unable to update vm by removing PCI disks with force_power_off " + success_msg: " VM updated successfully by removing PCI disks with force_power_off " ##### CRUD opperation for IDE disks - name: Update VM by adding IDE disks with force_power_off ntnx_vms: @@ -306,20 +309,20 @@ size_gb: 1 - type: "CDROM" bus: "IDE" - empty_cdrom: True + empty_cdrom: true force_power_off: true register: result ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding IDE disks with force_power_off ' - success_msg: ' VM updated successfully by adding IDE disks with force_power_off ' + fail_msg: " Unable to update vm by adding IDE disks with force_power_off " + success_msg: " VM updated successfully by adding IDE disks with force_power_off " - name: Update VM by increasing the size of the IDE disks with force_power_off ntnx_vms: @@ -333,14 +336,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the IDE disks with force_power_off ' - success_msg: ' VM updated successfully by increasing the size of the IDE disks with force_power_off ' + fail_msg: " Unable to update vm by increasing the size of the IDE disks with force_power_off " + success_msg: " VM updated successfully by increasing the size of the IDE disks with force_power_off " - name: Update VM by removing IDE disks with force_power_off ntnx_vms: @@ -355,14 +358,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing IDE disks with force_power_off' - success_msg: ' VM updated successfully by removing IDE disks with force_power_off' + fail_msg: " Unable to update vm by removing IDE disks with force_power_off" + success_msg: " VM updated successfully by removing IDE disks with force_power_off" ####### ##### CRUD opperation for SATA disks - name: Update VM by adding SATA disks with force_power_off @@ -377,14 +380,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding SATA disks with force_power_off' - success_msg: ' VM updated successfully by adding SATA disks with force_power_off' + fail_msg: " Unable to update vm by adding SATA disks with force_power_off" + success_msg: " VM updated successfully by adding SATA disks with force_power_off" - name: Update VM by increasing the size of the SATA disks with force_power_off ntnx_vms: @@ -398,14 +401,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the SATA disks with force_power_off ' - success_msg: ' VM updated successfully by increasing the size of the SATA disks with force_power_off' + fail_msg: " Unable to update vm by increasing the size of the SATA disks with force_power_off " + success_msg: " VM updated successfully by increasing the size of the SATA disks with force_power_off" - name: Update VM by removing SATA disks with force_power_off ntnx_vms: @@ -418,17 +421,18 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing SATA disks with force_power_off' - success_msg: ' VM updated successfully by removing SATA disks with force_power_off ' + fail_msg: " Unable to update vm by removing SATA disks with force_power_off" + success_msg: " VM updated successfully by removing SATA disks with force_power_off " # #################################################################### -- debug: +- name: Start update tests for network + ansible.builtin.debug: msg: Start update tests for network - name: Update VM by adding subnets @@ -446,14 +450,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding subnets ' - success_msg: ' VM updated successfully by adding subnets' + fail_msg: " Unable to update vm by adding subnets " + success_msg: " VM updated successfully by adding subnets" - name: Update VM by editing a subnet is_connected ntnx_vms: @@ -468,14 +472,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by editing a subnet ' - success_msg: ' VM updated successfully by editing a subnet ' + fail_msg: " Unable to update vm by editing a subnet " + success_msg: " VM updated successfully by editing a subnet " - name: Update VM by change the private ip for subnet ntnx_vms: @@ -490,14 +494,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by editing private_ip for subnet ' - success_msg: ' VM updated successfully by editing private_ip for subnet' + fail_msg: " Unable to update vm by editing private_ip for subnet " + success_msg: " VM updated successfully by editing private_ip for subnet" - name: Update VM by change vlan subnet ntnx_vms: @@ -516,14 +520,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by editing a subnet vlan ' - success_msg: ' VM updated successfully by editing a subnet vlan ' + fail_msg: " Unable to update vm by editing a subnet vlan " + success_msg: " VM updated successfully by editing a subnet vlan " - name: Update VM by deleting a subnet ntnx_vms: @@ -537,14 +541,14 @@ ignore_errors: true - name: Update Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by deleting a subnet ' - success_msg: ' VM updated successfully by deleting a subnet ' + fail_msg: " Unable to update vm by deleting a subnet " + success_msg: " VM updated successfully by deleting a subnet " # #################################################################### - name: Update VM by deleting it @@ -554,11 +558,12 @@ register: result ignore_errors: true -- assert: +- name: Delete Status + ansible.builtin.assert: that: - result.response is defined - result.response.status == 'SUCCEEDED' - result.vm_uuid - result.task_uuid - fail_msg: 'Fail: Unable to delete created vm ' - success_msg: 'Success: Vm deleted sucessfully' + fail_msg: "Fail: Unable to delete created vm " + success_msg: "Success: Vm deleted sucessfully" diff --git a/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml b/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml index 93b21f003..47a7e973a 100644 --- a/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml +++ b/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml @@ -1,4 +1,5 @@ -- set_fact: +- name: Initialize todelete list + ansible.builtin.set_fact: todelete: [] - name: Creat anohter VM with same name @@ -11,14 +12,15 @@ ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - output.response is defined - output.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requiremnts " + success_msg: " VM with minimum requiremnts created successfully " -- set_fact: +- name: Adding VM uuid to todelete list + ansible.builtin.set_fact: todelete: '{{ todelete + [ output["response"]["metadata"]["uuid"] ] }}' when: output.response.status.state == 'COMPLETE' ################################################## @@ -26,10 +28,10 @@ ntnx_vms_info: filter_string: vm_name=={{vm.name}};power_state==off register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.entities[0].metadata.uuid == '{{output["response"]["metadata"]["uuid"]}}' @@ -44,11 +46,10 @@ vm_name: "{{ vm.name }}" power_state: "on" register: result - ignore_errors: True - + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list vms " @@ -60,10 +61,10 @@ offset: 1 register: result check_mode: true - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list vms " @@ -75,10 +76,10 @@ sort_attribute: "vm_name" kind: vm register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list vms " @@ -87,8 +88,9 @@ - name: Delete all Created VMs ntnx_vms: state: absent - vm_uuid: '{{ item }}' + vm_uuid: "{{ item }}" register: result - loop: '{{ todelete }}' -- set_fact: + loop: "{{ todelete }}" +- name: Reset todelete list + ansible.builtin.set_fact: todelete: [] diff --git a/tests/integration/targets/nutanix_vms_info/tasks/main.yml b/tests/integration/targets/nutanix_vms_info/tasks/main.yml index 0e35d7fb5..e4194d5ad 100644 --- a/tests/integration/targets/nutanix_vms_info/tasks/main.yml +++ b/tests/integration/targets/nutanix_vms_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Set module defaults + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "list_vms.yml" + - name: Import list_vms.yml + ansible.builtin.import_tasks: "list_vms.yml" diff --git a/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml b/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml index 3cc3113d4..41f5c4543 100644 --- a/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml +++ b/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml @@ -1,13 +1,13 @@ - name: Create min VPC with subnet name ntnx_vpcs: state: present - wait: True + wait: true name: MinVPC external_subnets: - subnet_name: "{{ external_nat_subnet.name }}" register: result check_mode: true - ignore_errors: True + ignore_errors: true ########################################################## - name: Create min VPC with external_nat_subnet uuid ntnx_vpcs: @@ -18,17 +18,18 @@ - subnet_uuid: "{{ external_nat_subnet.uuid }}" check_mode: false register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create Min vpc with external subnet uuid " success_msg: " Min vpc with external subnet uuid created successfully " -- set_fact: +- name: Adding vpc uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.vpc_uuid ] }}" ########################################################## - name: Delete all created vpcs @@ -37,9 +38,10 @@ vpc_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Remove all vpcs from todelete list + ansible.builtin.set_fact: todelete: [] ########################################################## - name: Create VPC with routable_ips @@ -48,19 +50,20 @@ name: vpc_with_routable_ips routable_ips: - network_ip: "{{ routable_ips.network_ip }}" - network_prefix: "{{ routable_ips.network_prefix }}" + network_prefix: "{{ routable_ips.network_prefix }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create vpc with routable_ips " success_msg: " VPC with routable ips created successfully " -- set_fact: +- name: Adding vpc uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.vpc_uuid ] }}" ########################################################## - name: Create VPC with routable_ips and external subnet @@ -71,19 +74,20 @@ - subnet_name: "{{ external_nat_subnet.name }}" routable_ips: - network_ip: "{{ routable_ips.network_ip_2 }}" - network_prefix: "{{ routable_ips.network_prefix_2 }}" + network_prefix: "{{ routable_ips.network_prefix_2 }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create vpc with routable_ips and external subnet " success_msg: " VPC with routable ips and external subnet created successfully " -- set_fact: +- name: Adding vpc uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.vpc_uuid ] }}" ########################################################## - name: Create VPC with dns_servers @@ -93,10 +97,10 @@ dns_servers: "{{ dns_servers }}" wait: false register: result - ignore_errors: True - + ignore_errors: true -- set_fact: +- name: Adding vpc uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.vpc_uuid ] }}" ########################################################## - name: Delete all created vpcs @@ -105,9 +109,10 @@ vpc_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true -- set_fact: +- name: Remove all vpcs from todelete list + ansible.builtin.set_fact: todelete: [] ########################################################## - name: Create VPC with all specfactions @@ -119,19 +124,20 @@ dns_servers: "{{ dns_servers }}" routable_ips: - network_ip: "{{ routable_ips.network_ip }}" - network_prefix: "{{ routable_ips.network_prefix }}" + network_prefix: "{{ routable_ips.network_prefix }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create vpc all specfactions " success_msg: " VPC with all specfactions created successfully " -- set_fact: +- name: Adding vpc uuid to delete list + ansible.builtin.set_fact: todelete: "{{ todelete + [ result.vpc_uuid ] }}" ########################################################## - name: Delete all created vpcs @@ -140,4 +146,4 @@ vpc_uuid: "{{ item }}" register: result loop: "{{ todelete }}" - ignore_errors: True + ignore_errors: true diff --git a/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml b/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml index 7d0339fa6..16b8b4925 100644 --- a/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml +++ b/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml @@ -8,19 +8,18 @@ dns_servers: "{{ dns_servers }}" routable_ips: - network_ip: "{{ routable_ips.network_ip }}" - network_prefix: "{{ routable_ips.network_prefix }}" + network_prefix: "{{ routable_ips.network_prefix }}" register: result - ignore_errors: True + ignore_errors: true - name: Creation Status - assert: + ansible.builtin.assert: that: - result.response is defined - result.response.status.state == 'COMPLETE' fail_msg: " Unable to create vpc all specfactions " success_msg: " VPC with all specfactions created successfully " - - name: Delete vpc ntnx_vpcs: state: absent diff --git a/tests/integration/targets/nutanix_vpcs/tasks/main.yml b/tests/integration/targets/nutanix_vpcs/tasks/main.yml index d113348a4..5c6dd8f76 100644 --- a/tests/integration/targets/nutanix_vpcs/tasks/main.yml +++ b/tests/integration/targets/nutanix_vpcs/tasks/main.yml @@ -1,11 +1,15 @@ --- -- module_defaults: - group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" +- name: Initializing variables + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_vpcs.yml" - - import_tasks: "delete_vpc.yml" - - import_tasks: "negative_scenarios.yml" + - name: Import create_vpcs.yml + ansible.builtin.import_tasks: "create_vpcs.yml" + - name: Import delete_vpc.yml + ansible.builtin.import_tasks: "delete_vpc.yml" + - name: Import negative_scenarios.yml + ansible.builtin.import_tasks: "negative_scenarios.yml" diff --git a/tests/integration/targets/nutanix_vpcs/tasks/negative_scenarios.yml b/tests/integration/targets/nutanix_vpcs/tasks/negative_scenarios.yml index 9f40bbf94..35138c16d 100644 --- a/tests/integration/targets/nutanix_vpcs/tasks/negative_scenarios.yml +++ b/tests/integration/targets/nutanix_vpcs/tasks/negative_scenarios.yml @@ -1,80 +1,82 @@ - - debug: - msg: "Started Negative Creation Cases" +--- +- name: Started Negative Creation Cases + ansible.builtin.debug: + msg: "Started Negative Creation Cases" - - name: Unknown subnet name - ntnx_vpcs: - state: present - name: MinVPC - external_subnets: - - subnet_name: "ET_90" - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.failed==True - - result.msg=="Failed generating vpc spec" - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VPC created successfully with unknown subnet name ' +- name: Unknown subnet name + ntnx_vpcs: + state: present + name: MinVPC + external_subnets: + - subnet_name: "ET_90" + register: result + ignore_errors: true +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + - result.msg=="Failed generating vpc spec" + success_msg: " Success: returned error as expected " + fail_msg: " Fail VPC created successfully with unknown subnet name " ####################################################### - - name: Unknown subnet uuid - ntnx_vpcs: - state: present - name: MinVPC - external_subnets: - - subnet_uuid: 57959f75-6b21-431c-b76b-516447d52622 - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.failed==True - - result.status_code=="500" - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VPC created successfully with unknown subnet name ' +- name: Unknown subnet uuid + ntnx_vpcs: + state: present + name: MinVPC + external_subnets: + - subnet_uuid: 57959f75-6b21-431c-b76b-516447d52622 + register: result + ignore_errors: true +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + - result.status_code=="500" + success_msg: " Success: returned error as expected " + fail_msg: " Fail VPC created successfully with unknown subnet name " ####################################################### - - name: Create VPC with same routable_ips - ntnx_vpcs: - state: present - name: vpc_with_routable_ips - routable_ips: - - network_ip: 192.168.0.1 - network_prefix: 24 - - network_ip: 192.168.0.1 - network_prefix: 24 - register: result - ignore_errors: True +- name: Create VPC with same routable_ips + ntnx_vpcs: + state: present + name: vpc_with_routable_ips + routable_ips: + - network_ip: 192.168.0.1 + network_prefix: 24 + - network_ip: 192.168.0.1 + network_prefix: 24 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - - result.status_code=="500" - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VPC created successfully with unknown subnet name ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + - result.status_code=="500" + success_msg: " Success: returned error as expected " + fail_msg: " Fail VPC created successfully with unknown subnet name " ####################################################### - - name: Delete vpc with unknown uuid - ntnx_vpcs: - state: absent - vpc_uuid: 5 - ignore_errors: True - register: result - - name: Creation Status - assert: - that: - - result.status_code==400 - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting vpc with unknown uuid ' +- name: Delete vpc with unknown uuid + ntnx_vpcs: + state: absent + vpc_uuid: 5 + ignore_errors: true + register: result +- name: Creation Status + ansible.builtin.assert: + that: + - result.status_code==400 + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting vpc with unknown uuid " ####################################################### - - name: Delete vpc with missing uuid - ntnx_vpcs: - state: absent - ignore_errors: True - register: result +- name: Delete vpc with missing uuid + ntnx_vpcs: + state: absent + ignore_errors: true + register: result - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting vpc with missing uuid ' +- name: Creation Status + ansible.builtin.assert: + that: + - result.failed==true + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting vpc with missing uuid " diff --git a/tests/integration/targets/nutanix_vpcs_info/tasks/list_vpcs.yml b/tests/integration/targets/nutanix_vpcs_info/tasks/list_vpcs.yml index f21cb3ec2..539f04602 100644 --- a/tests/integration/targets/nutanix_vpcs_info/tasks/list_vpcs.yml +++ b/tests/integration/targets/nutanix_vpcs_info/tasks/list_vpcs.yml @@ -3,10 +3,10 @@ filter: name: "{{ vpc.name }}" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list vpcs " @@ -18,10 +18,10 @@ offset: 1 check_mode: true register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list vpcs " @@ -32,10 +32,10 @@ sort_order: "DESCENDING" sort_attribute: "name" register: result - ignore_errors: True + ignore_errors: true - name: Listing Status - assert: + ansible.builtin.assert: that: - result.response is defined fail_msg: " Unable to list vpcs " diff --git a/tests/integration/targets/nutanix_vpcs_info/tasks/main.yml b/tests/integration/targets/nutanix_vpcs_info/tasks/main.yml index 549edc380..8dad6362f 100644 --- a/tests/integration/targets/nutanix_vpcs_info/tasks/main.yml +++ b/tests/integration/targets/nutanix_vpcs_info/tasks/main.yml @@ -1,9 +1,11 @@ --- -- module_defaults: +- name: Initializing variables + module_defaults: group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "list_vpcs.yml" + - name: Import list_vpcs.yml + ansible.builtin.import_tasks: "list_vpcs.yml" diff --git a/tests/integration/targets/prepare_env/playbooks/cleanup.yml b/tests/integration/targets/prepare_env/playbooks/cleanup.yml index 1cbf6ea5b..d25ffb7f6 100644 --- a/tests/integration/targets/prepare_env/playbooks/cleanup.yml +++ b/tests/integration/targets/prepare_env/playbooks/cleanup.yml @@ -64,4 +64,4 @@ register: result ansible.builtin.file: path: "{{ disk_image.dest }}" - state: absent \ No newline at end of file + state: absent diff --git a/tests/integration/targets/prepare_env/playbooks/prepare_env.yml b/tests/integration/targets/prepare_env/playbooks/prepare_env.yml index 6bdbd503c..1159d1c79 100644 --- a/tests/integration/targets/prepare_env/playbooks/prepare_env.yml +++ b/tests/integration/targets/prepare_env/playbooks/prepare_env.yml @@ -202,11 +202,23 @@ mode: "0644" url: "{{ disk_image.url }}" dest: "{{ disk_image.dest }}" -# # - name: create address group for network security policy related tests -# # nutanix.ncp.ntnx_address_groups: -# # state: present -# # name: dest -# # desc: dest -# # subnets: -# # - network_ip: "10.1.1.0" -# # network_prefix: 24 \ No newline at end of file + # # - name: create address group for network security policy related tests + # # nutanix.ncp.ntnx_address_groups: + # # state: present + # # name: dest + # # desc: dest + # # subnets: + # # - network_ip: "10.1.1.0" + # # network_prefix: 24 + + - name: Downloading xml content for saml tests + ansible.builtin.get_url: + mode: "0644" + url: "{{ xml_content.url }}" + dest: "{{ xml_content.dest }}" + + - name: Downloading unattendxml file for windows tests + ansible.builtin.get_url: + mode: "0644" + url: "{{ unattendxml.url }}" + dest: "{{ unattendxml.dest }}" diff --git a/tests/integration/targets/prepare_env/vars/main.yml b/tests/integration/targets/prepare_env/vars/main.yml index 2fb8d27fd..f311e9ab9 100644 --- a/tests/integration/targets/prepare_env/vars/main.yml +++ b/tests/integration/targets/prepare_env/vars/main.yml @@ -142,4 +142,4 @@ # recovery_ip1: TEST_RECOVERY_IP1 # recovery_ip2: TEST_RECOVERY_IP2 # gateway_ip: TEST_GATEWAY_IP -# prefix: TEST_PREFIX \ No newline at end of file +# prefix: TEST_PREFIX diff --git a/tests/integration/targets/prepare_fc_env/playbooks/prepare_fc_env.yml b/tests/integration/targets/prepare_fc_env/playbooks/prepare_fc_env.yml index 131dead38..3b5f92614 100644 --- a/tests/integration/targets/prepare_fc_env/playbooks/prepare_fc_env.yml +++ b/tests/integration/targets/prepare_fc_env/playbooks/prepare_fc_env.yml @@ -5,4 +5,4 @@ tasks: - name: Include var file - ansible.builtin.include_vars: ../vars/main.yml \ No newline at end of file + ansible.builtin.include_vars: ../vars/main.yml diff --git a/tests/integration/targets/prepare_foundation_env/playbooks/cleanup.yml b/tests/integration/targets/prepare_foundation_env/playbooks/cleanup.yml index 5a4d0704c..bc4aefe85 100644 --- a/tests/integration/targets/prepare_foundation_env/playbooks/cleanup.yml +++ b/tests/integration/targets/prepare_foundation_env/playbooks/cleanup.yml @@ -9,4 +9,4 @@ # file: # path: "{{ source }}" # state: absent - # ignore_errors: true \ No newline at end of file + # ignore_errors: true diff --git a/tests/integration/targets/prepare_foundation_env/playbooks/prepare_foundation_env.yml b/tests/integration/targets/prepare_foundation_env/playbooks/prepare_foundation_env.yml index 131dead38..3b5f92614 100644 --- a/tests/integration/targets/prepare_foundation_env/playbooks/prepare_foundation_env.yml +++ b/tests/integration/targets/prepare_foundation_env/playbooks/prepare_foundation_env.yml @@ -5,4 +5,4 @@ tasks: - name: Include var file - ansible.builtin.include_vars: ../vars/main.yml \ No newline at end of file + ansible.builtin.include_vars: ../vars/main.yml diff --git a/tests/integration/targets/prepare_foundation_env/vars/main.yml b/tests/integration/targets/prepare_foundation_env/vars/main.yml index 8abb799a2..5004c9a67 100644 --- a/tests/integration/targets/prepare_foundation_env/vars/main.yml +++ b/tests/integration/targets/prepare_foundation_env/vars/main.yml @@ -92,4 +92,4 @@ # ipmi_netmask: "TEST_IPMI_NETMASK_BMC" # ipmi_gateway: TEST_IPMI_GATEWAY_BMC # ipmi_mac: "TEST_IPMI_MAC" -# ipmi_ip: TEST_IPMI_IP_BMC \ No newline at end of file +# ipmi_ip: TEST_IPMI_IP_BMC diff --git a/tests/integration/targets/prepare_ndb_env/paybooks/prepare_env.yml b/tests/integration/targets/prepare_ndb_env/paybooks/prepare_env.yml index a4f4fb3a3..2d4e34cf9 100644 --- a/tests/integration/targets/prepare_ndb_env/paybooks/prepare_env.yml +++ b/tests/integration/targets/prepare_ndb_env/paybooks/prepare_env.yml @@ -18,4 +18,4 @@ block: | ndb_ip: "{{ lookup('env', 'NDB_HOST') }}" ndb_username: "{{ lookup('env', 'NDB_USERNAME') }}" - ndb_password: "{{ lookup('env', 'NDB_PASSWORD') }}" \ No newline at end of file + ndb_password: "{{ lookup('env', 'NDB_PASSWORD') }}" diff --git a/tests/integration/targets/prepare_ndb_env/playbooks/prepare_env.yml b/tests/integration/targets/prepare_ndb_env/playbooks/prepare_env.yml new file mode 100644 index 000000000..2d4e34cf9 --- /dev/null +++ b/tests/integration/targets/prepare_ndb_env/playbooks/prepare_env.yml @@ -0,0 +1,21 @@ +--- +- name: Prepare the environment for ndb + hosts: localhost + gather_facts: false + + tasks: + - name: Include var file + ansible.builtin.include_vars: ../vars/main.yml + - name: Set environment variables + ansible.builtin.set_fact: + ndb_ip: "{{ lookup('env', 'NDB_HOST') }}" + ndb_username: "{{ lookup('env', 'NDB_USERNAME') }}" + ndb_password: "{{ lookup('env', 'NDB_PASSWORD') }}" + - name: Insert credentials block to vars + ansible.builtin.blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 0" + block: | + ndb_ip: "{{ lookup('env', 'NDB_HOST') }}" + ndb_username: "{{ lookup('env', 'NDB_USERNAME') }}" + ndb_password: "{{ lookup('env', 'NDB_PASSWORD') }}" diff --git a/tests/integration/targets/prepare_ndb_env/playbooks/tmp/.gitkeep b/tests/integration/targets/prepare_ndb_env/playbooks/tmp/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/prepare_ndb_env/vars/main.yml b/tests/integration/targets/prepare_ndb_env/vars/main.yml index 12f9a1d2e..32dbe924b 100644 --- a/tests/integration/targets/prepare_ndb_env/vars/main.yml +++ b/tests/integration/targets/prepare_ndb_env/vars/main.yml @@ -181,4 +181,4 @@ # updated_primary_dns: "TEST_UPDATED_PRIMARY_DNS" # updated_secondary_dns: "TEST_UPDATED_SECONDARY_DNS" -# todelete: [] \ No newline at end of file +# todelete: [] diff --git a/tests/unit/plugins/module_utils/test_entity.py b/tests/unit/plugins/module_utils/test_entity.py index 419263a2b..f065cfae7 100644 --- a/tests/unit/plugins/module_utils/test_entity.py +++ b/tests/unit/plugins/module_utils/test_entity.py @@ -4,7 +4,7 @@ from base64 import b64encode from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible_collections.nutanix.ncp.plugins.module_utils.entity import Entity +from ansible_collections.nutanix.ncp.plugins.module_utils.v3.entity import Entity from ansible_collections.nutanix.ncp.tests.unit.plugins.modules.utils import ( AnsibleExitJson, AnsibleFailJson,