diff --git a/README.md b/README.md index 3e59c706a..d32b1714c 100644 --- a/README.md +++ b/README.md @@ -260,6 +260,56 @@ We glady welcome contributions from the community. From updating the documentati * [Contributing Guide](CONTRIBUTING.md) * [Code of Conduct](CODE_OF_CONDUCT.md) +# Testing + +## Integration Testing for Nutanix Ansible Modules + +To conduct integration tests for a specific Ansible module such as the `ntnx_vms` module, the following step-by-step procedures can be followed: + +### Prerequisites +- Ensure you are in the installed collection directory where the module is located. For example: +`/Users/mac.user1/.ansible/collections/ansible_collections/nutanix/ncp` + +### Setting up Variables +1. Navigate to the `tests/integration/targets` directory within the collection. + +2. Define the necessary variables within the feature-specific var files, such as `tests/integration/targets/prepare_env/vars/main.yml`, `tests/integration/targets/prepare_foundation_env/vars/main.yml`,`tests/integration/targets/prepare_ndb_env/tasks/prepare_env.yml`, etc. + +Note: For Karbon and FC tests, use the PC vars exclusively, as these features rely on pc setup. Not all variables are mandatory; define only the required variables for the particular feature to be tested. + +3. Run the test setup playbook for the specific feature you intend to test to create entities in setup: + - For PC, NDB, and Foundation tests, execute the relevant commands: + ```bash + ansible-playbook prepare_env/tasks/prepare_env.yml + ansible-playbook prepare_ndb_env/tasks/prepare_env.yml + ansible-playbook prepare_foundation_env/tasks/prepare_foundation_env.yml + ``` + +### Running Integration Tests +1. Conduct integration tests for all modules using: + ```bash + ansible-integration + ``` + +2. To perform integration tests for a specific module: + ```bash + ansible-integration module_test_name + ``` + Replace `module_test_name` with test directory name under tests/integration/targets. + +### Cleanup +1. After completing the integration tests, perform a cleanup specific to the tested feature: + - For PC tests, execute the command: + ```bash + ansible-playbook prepare_env/tasks/clean_up.yml + ``` + - For Foundation tests, execute the command: + ```bash + ansible-playbook prepare_foundation_env/tasks/clean_up.yml + ``` + +By following these steps, you can perform comprehensive integration testing for the specified Ansible module and ensure a clean testing environment afterward. Define only the necessary variables for the specific feature you intend to test. + # Examples ## Playbook for IaaS provisioning on Nutanix diff --git a/tests/integration/targets/prepare_env/vars/main.yml b/tests/integration/targets/prepare_env/vars/main.yml index e69de29bb..508f92c27 100644 --- a/tests/integration/targets/prepare_env/vars/main.yml +++ b/tests/integration/targets/prepare_env/vars/main.yml @@ -0,0 +1,145 @@ +ip: TEST_PC_IP +username: TEST_PC_USERNAME +password: TEST_PC_PASSWORD +validate_certs: TEST_VALIDATE_CERT_FLAG + +# for NKE related tests +k8s_version: TEST_K8S_VERSION +host_os: TEST_HOST_OS +nutanix_cluster_password: TEST_CLUSTER_PASSWORD +nutanix_cluster_username: TEST_CLUSTER_USERNAME +cni: + node_cidr_mask_size: "TEST_MASK_CIDR" + service_ipv4_cidr: "TEST_SERVICE_CIDR" + pod_ipv4_cidr: "TEST_POD_CIDR" + +# docker registry url for nke tests +url: TEST_REGISTRIES_URL + +# cluster reference for several tests +cluster: + name: TEST_CLUSTER_NAME + uuid: TEST_CLUSTER_UUID + +# For tests like vms, etc. +network: + dhcp: + name: TEST_VLAN_NAME + uuid: TEST_VLAN_UUID + static: + ip: TEST_STATIC_IP + +vm_owner: + name: TEST_OWNER_NAME + uuid: TEST_OWNER_UUID + +# can be skipped +todelete: [] + +# for ntnx_image tests +disk_image: + url: TEST_IMAGE_URL + dest: TEST_DEST_PATH + checksum: TEST_CHECKSUM + centos: "TEST_CENTOS" + +# used in test setup for creation of networks and some networking tests +external_nat_subnets: + name: TEST_NAT_NAME + vlan_id: TEST_VLAN_ID + gateway_ip_address: TEST_GATEWAY_IP + network_prefix: TEST_PREFIX + network_ip: TEST_NETWORK_IP + dhcp: + start_address: TEST_START_DHCP + end_address: TEST_END_DHCP + static: + start_address: TEST_START_STATIC + end_address: TEST_END_STATIC + +# Details for creating this resources during test setup and same are used related tests +vpc_name: TEST_VPC_NAME +vm_name: TEST_VM_NAME +static_subnet_name: TEST_STATIC_SUBNET +overlay_subnet: + name: TEST_OVERLAY_NAME + network_ip: TEST_OVERLAY_NETWORK_IP + network_prefix: TEST_OVERLAY_PREFIX + gateway_ip: TEST_OVERLAY_GATEWAY_IP + private_ip: TEST_OVERLAY_PRIVATE_IP + +# security rule related tests +quarantine_rule_uuid: TEST_QUARANTINE_UUID +categories: + apptiers: + - Default + - sub_app_1 + +# name of images in setup for vms test +centos: "TEST_CENTOS" +ubuntu: "TEST_UBUNTU" + +# virtual switch reference primarily for vms test +virtual_switch: + name: TEST_SWITCH_NAME + uuid: TEST_SWITCH_UUID + +# storage reference primarily for vms test +storage_container: + name: TEST_CONTAINER_NAME + uuid: TEST_CONTAINER_UUID + +# user and user groups from AD for user and user groups tests +users: + - TEST_USER_UUID_1 + - TEST_USER_UUID_2 +user_groups: + - TEST_GROUP_UUID + +# project reference for vms test +project: + name: TEST_PROJECT_NAME + uuid: TEST_PROJECT_UUID + +# list of roles for advanced projects tests +roles: + - "TEST_ROLES_NAME_1" + - "TEST_ROLES_NAME_2" + - "TEST_ROLES_NAME_3" + - "TEST_ROLES_NAME_4" + +# account reference for advanced projects related tests +accounts: + - name: "NTNX_LOCAL_AZ" + uuid: "TEST_ACCOUNT_UUID" + +# user and user groups for projects tests when new users are to be added +new_user: "TEST_NEW_USER_EMAIL" +new_user_group: "TEST_NEW_USER_GROUP_DN" + +# acp tests related vars +acp: + role: + name: "Project Admin" + uuid: TEST_ROLE_UUID + user_uuid: TEST_USER_UUID + user_group_uuid: TEST_GROUP_UUID + +# auth details for projects, acps, user and user groups related tests +distinguished_name: "TEST_DISTINGUISHED_NAME" +principal_name: "TEST_PRINCIPAL_NAME" +directory_service_uuid: "TEST_DIRECTORY_UUID" +identity_provider_uuid: "TEST_IDENTITY_UUID" + +# vm created during test setup for dr tests +dr_vm_name: TEST_VM_NAME + +# dr related tests vars +dr: + primary_az_url: TEST_PRIMARY_URL + recovery_az_url: TEST_RECOVERY_URL + recovery_site_network: "TEST_RECOVERY_NETWORK" + recovery_ip1: TEST_RECOVERY_IP1 + recovery_ip2: TEST_RECOVERY_IP2 + gateway_ip: TEST_GATEWAY_IP + prefix: TEST_PREFIX diff --git a/tests/integration/targets/prepare_foundation_env/vars/main.yml b/tests/integration/targets/prepare_foundation_env/vars/main.yml new file mode 100644 index 000000000..268a046dc --- /dev/null +++ b/tests/integration/targets/prepare_foundation_env/vars/main.yml @@ -0,0 +1,95 @@ +# foundation host to be ued in tests +foundation_host: "TEST_FOUNDATION_HOST" + +# the dir in local machine where test setup will install nos image and pick during image upload tests +source: 'TEST_SOURCE_PATH' + +# nos image url for images tests +image_url: "TEST_IMAGE_URL" + +# nos package name +nos_package: "TEST_NOS_PACKAGE" + +# common details for all nodes used under test +cvm_gateway: "TEST_CVM_GATEWAY" +cvm_netmask: "TEST_CVM_NETMASK" +hypervisor_gateway: "TEST_HYPERVISOR_GATEWAY" +hypervisor_netmask: "TEST_HYPERVISOR_NETMASK" +default_ipmi_user: "TEST_IPMI_USER" + +# IBIS_node is just a random name we used for our tests +# this nodes where used for tests related to bare metal, dos and aos installed nodes. +IBIS_node: + block_id: "TEST_BLOCK_ID" + node1: + node_serial: "TEST_NODE_SERIAL_1" + hypervisor_hostname: "TEST_HYPERVISOR_HOSTNAME_1" + cvm_ip: TEST_CVM_IP_1 + hypervisor_ip: TEST_HYPERVISOR_IP_1 + node_position: "TEST_NODE_POSITION_1" + hypervisor: "TEST_HYPERVISOR_1" + ipmi_password: "TEST_IPMI_PASSWORD_1" + ipmi_ip: TEST_IPMI_IP_1 + ipmi_netmask: "TEST_IPMI_NETMASK_1" + ipmi_gateway: TEST_IPMI_GATEWAY_1 + node2: + node_serial: "TEST_NODE_SERIAL_2" + hypervisor_hostname: "TEST_HYPERVISOR_HOSTNAME_2" + cvm_ip: TEST_CVM_IP_2 + hypervisor_ip: TEST_HYPERVISOR_IP_2 + node_position: "TEST_NODE_POSITION_2" + hypervisor: "TEST_HYPERVISOR_2" + ipmi_password: "TEST_IPMI_PASSWORD_2" + ipmi_ip: TEST_IPMI_IP_2 + node3: + node_serial: "TEST_NODE_SERIAL_3" + hypervisor_hostname: "TEST_HYPERVISOR_HOSTNAME_3" + cvm_ip: TEST_CVM_IP_3 + hypervisor_ip: TEST_HYPERVISOR_IP_3 + node_position: "TEST_NODE_POSITION_3" + hypervisor: "TEST_HYPERVISOR_3" + ipmi_password: "TEST_IPMI_PASSWORD_3" + ipmi_ip: TEST_IPMI_IP_3 + node4: + node_serial: "TEST_NODE_SERIAL_4" + hypervisor_hostname: "TEST_HYPERVISOR_HOSTNAME_4" + cvm_ip: TEST_CVM_IP_4 + hypervisor_ip: TEST_HYPERVISOR_IP_4 + node_position: "TEST_NODE_POSITION_4" + hypervisor: "TEST_HYPERVISOR_4" + ipmi_password: "TEST_IPMI_PASSWORD_4" + ipmi_ip: TEST_IPMI_IP_4 + +# we used this nodes for our sanity tests. +nodes: + current_cvm_vlan_tag: "TEST_CVM_VLAN_TAG" + block_id: "TEST_BLOCK_ID_NODES" + node1: + cvm_ip: TEST_CVM_IP_NODE1 + hypervisor: "TEST_HYPERVISOR_NODE1" + hypervisor_ip: TEST_HYPERVISOR_IP_NODE1 + node_position: "TEST_NODE_POSITION_NODE1" + hypervisor_hostname: "TEST_HYPERVISOR_HOSTNAME_NODE1" + node_serial: "TEST_NODE_SERIAL_NODE1" + ipmi_password: "TEST_IPMI_PASSWORD_NODE1" + ipmi_ip: TEST_IPMI_IP_NODE1 + ipmi_netmask: "TEST_IPMI_NETMASK_NODE1" + ipmi_gateway: TEST_IPMI_GATEWAY_NODE1 + ntp_servers: + - "TEST_NTP_SERVER_1" + - "TEST_NTP_SERVER_2" + dns_servers: + - "TEST_DNS_SERVER_1" + - "TEST_DNS_SERVER_2" + backplane_vlan: "TEST_BACKPLANE_VLAN" + backplane_subnet: "TEST_BACKPLANE_SUBNET" + backplane_netmask: "TEST_BACKPLANE_NETMASK" + +# this is for bmc ipmi config related tests +bmc: + ipmi_user: "TEST_IPMI_USER_BMC" + ipmi_password: "TEST_IPMI_PASSWORD_BMC" + ipmi_netmask: "TEST_IPMI_NETMASK_BMC" + ipmi_gateway: TEST_IPMI_GATEWAY_BMC + ipmi_mac: "TEST_IPMI_MAC" + ipmi_ip: TEST_IPMI_IP_BMC diff --git a/tests/integration/targets/prepare_ndb_env/vars/main.yml b/tests/integration/targets/prepare_ndb_env/vars/main.yml index e69de29bb..bc3e181b2 100644 --- a/tests/integration/targets/prepare_ndb_env/vars/main.yml +++ b/tests/integration/targets/prepare_ndb_env/vars/main.yml @@ -0,0 +1,184 @@ +# details for setup to be used in tests +ndb_ip: "NDB_HOST_IP" +ndb_username: "NDB_USERNAME" +ndb_password: "NDB_PASSWORD" + +# network profile for various ndb tests. All vars are related to NDB setup. +network_profile: + name: "TEST_NETWORK_PROFILE_NAME" + uuid: "TEST_NETWORK_PROFILE_UUID" + single: + cluster: + name: "TEST_CLUSTER_NAME" + vlan_name: "TEST_VLAN_NAME" + HA: + cluster1: + name: "TEST_CLUSTER1_NAME" + vlan_name: "TEST_VLAN_NAME" + vlan_name2: "TEST_VLAN_NAME" + cluster2: + name: "TEST_CLUSTER2_NAME" + vlan_name: "TEST_VLAN_NAME" + vlan_name2: "TEST_VLAN_NAME" + +# profiles for single node postgres database instances +software_profile: + name: "TEST_SOFTWARE_PROFILE_NAME" + uuid: "TEST_SOFTWARE_PROFILE_UUID" + latest_version_id: "TEST_LATEST_VERSION_ID" + +compute_profile: + name: "TEST_COMPUTE_PROFILE_NAME" + uuid: "TEST_COMPUTE_PROFILE_UUID" + +db_params_profile: + name: "TEST_DB_PARAMS_PROFILE_NAME" + uuid: "TEST_DB_PARAMS_PROFILE_UUID" + +static_network_profile: + name: "TEST_STATIC_NETWORK_PROFILE_NAME" + uuid: "TEST_STATIC_NETWORK_PROFILE_UUID" + +# for postgres HA related tests, requires different profiles then single node ones. +postgres_ha_profiles: + static_network_profile: + name: "TEST_STATIC_HA_STATIC_NAME" + uuid: "TEST_STATIC_HA_STATIC_UUID" + + multicluster_network_profile: + name: "TEST_HA_MULTICLUSTER_NAME" + uuid: "TEST_HA_MULTICLUSTER_UUID" + + software_profile: + name: "TEST_POSTGRES_SOFTWARE_PROFILE_NAME" + uuid: "TEST_POSTGRES_SOFTWARE_PROFILE_UUID" + latest_version_id: "TEST_LATEST_VERSION_ID" + + compute_profile: + name: "TEST_COMPUTE_PROFILE_NAME" + uuid: "TEST_COMPUTE_PROFILE_UUID" + + db_params_profile: + name: "TEST_POSTGRES_HA_PARAMS_NAME" + uuid: "TEST_POSTGRES_HA_PARAMS_UUID" + +public_ssh_key: "TEST_PUBLIC_SSH_KEY" + +# reference for existing maintenance window to be added in certain entities like database instance, etc. +maintenance: + window_name: "TEST_MAINTENANCE_WINDOW_NAME" + window_uuid: "TEST_MAINTENANCE_WINDOW_UUID" + +# reference for existing sla to be used in certain entities like database instance, etc. +sla: + name: "TEST_SLA_NAME" + uuid: "TEST_SLA_UUID" + +# reference for existing sla to be used in certain entities like database instance, etc. +sla2: + name: "TEST_SLA2_NAME" + uuid: "TEST_SLA2_UUID" + +# reference for clusters for various tests +# cluster3 here is used for clusters crud tests +cluster: + cluster1: + name: "TEST_CLUSTER1_NAME" + uuid: "TEST_CLUSTER1_UUID" + cluster2: + name: "TEST_CLUSTER2_NAME" + uuid: "TEST_CLUSTER2_UUID" + cluster3: + name: "TEST_CLUSTER3_NAME" + uuid: "" + desc: "TEST_CLUSTER3_DESC" + name_prefix: "TEST_CLUSTER3_NAME_PREFIX" + cluster_ip: "TEST_CLUSTER3_IP" + update_cluster_ip: "TEST_UPDATE_CLUSTER_IP" + cluster_credentials: + username: "TEST_CLUSTER_CREDENTIALS_USERNAME" + password: "TEST_CLUSTER_CREDENTIALS_PASSWORD" + agent_network: + dns_servers: + - "TEST_DNS_SERVER_1" + - "TEST_DNS_SERVER_2" + ntp_servers: + - "TEST_NTP_SERVER_1" + - "TEST_NTP_SERVER_2" + - "TEST_NTP_SERVER_3" + - "TEST_NTP_SERVER_4" + vlan_access: + prism_vlan: + vlan_name: "TEST_PRISM_VLAN_NAME" + vlan_type: "TEST_VLAN_TYPE" + static_ip: "TEST_STATIC_IP" + gateway: "TEST_GATEWAY" + subnet_mask: "TEST_SUBNET_MASK" + storage_container: "TEST_STORAGE_CONTAINER" + +# free IPs from cluster.cluster1 for HA related tests +cluster_ips: + ip1: "TEST_IP_1" + ip2: "TEST_IP_2" + ip3: "TEST_IP_3" + ip4: "TEST_IP_4" + ip5: "TEST_IP_5" + vip: "TEST_VIP" + +# for new db server vms +vm_password: "TEST_VM_PASSWORD" +vm_username: "TEST_VM_USERNAME" + +# exitsing db server VM reference for software profile tests +db_server_vm: + name: "TEST_DB_SERVER_VM_NAME" + uuid: "TEST_DB_SERVER_VM_UUID" + +# existing tags for various entities +tags: + db_server_vm: + name: "TEST_DB_SERVER_VM_TAGS_NAME" + uuid: "TEST_DB_SERVER_VM_TAGS_UUID" + databases: + name: "TEST_DATABASES_NAME" + uuid: "TEST_DATABASES_UUID" + clones: + name: "TEST_CLONES_NAME" + uuid: "TEST_CLONES_UUID" + +# existing time machine info for data access management and snapshot related tests +time_machine: + name: "TEST_TIME_MACHINE_NAME" + uuid: "TEST_TIME_MACHINE_UUID" + snapshot_uuid: "" + +# for brownfield import of vms +postgres: + software_home: "/usr/pgsql-10.4" + +# ip for creating database vm using static network +vm_ip: "TEST_VM_IP" + +# info for vlan related tests +ndb_vlan: + name: "TEST_VLAN_IN_CLUSTER" + ip_pools: + - start_ip: "TEST_START_IP_1" + end_ip: "TEST_END_IP_1" + - start_ip: "TEST_START_IP_2" + end_ip: "TEST_END_IP_2" + - start_ip: "TEST_START_IP_3" + end_ip: "TEST_END_IP_3" + - start_ip: "TEST_START_IP_4" + end_ip: "TEST_END_IP_4" + gateway: "TEST_GATEWAY" + subnet_mask: "TEST_SUBNET_MASK" + primary_dns: "TEST_PRIMARY_DNS" + secondary_dns: "TEST_SECONDARY_DNS" + dns_domain: "TEST_DNS_DOMAIN" + updated_gateway: "TEST_UPDATED_GATEWAY" + updated_subnet_mask: "TEST_UPDATED_SUBNET_MASK" + updated_primary_dns: "TEST_UPDATED_PRIMARY_DNS" + updated_secondary_dns: "TEST_UPDATED_SECONDARY_DNS" + +todelete: [] \ No newline at end of file