From e6aa9be0bc8b106d165dc15165d7f5dae17cd6ae Mon Sep 17 00:00:00 2001 From: Satheesh Rajendran Date: Wed, 20 Jan 2021 20:39:31 +0530 Subject: [PATCH 1/3] Add multivm fusedcore test conf Add multivm fusedcore kvm test configuration file. Signed-off-by: Satheesh Rajendran --- config/tests/guest/libvirt/fusedcore.cfg | 146 +++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 config/tests/guest/libvirt/fusedcore.cfg diff --git a/config/tests/guest/libvirt/fusedcore.cfg b/config/tests/guest/libvirt/fusedcore.cfg new file mode 100644 index 0000000..7465cf2 --- /dev/null +++ b/config/tests/guest/libvirt/fusedcore.cfg @@ -0,0 +1,146 @@ +include tests-shared.cfg +username = root +password = 123456 +test_timeout=360000 +main_vm = virt-tests-vm1 +vms = virt-tests-vm1 +nettype = bridge +netdst = virbr0 +# Using Text mode of installation +display = 'nographic' +take_regular_screendumps = no +keep_screendumps_on_error = no +keep_screendumps = no +vm_info_delay = 60 +store_vm_info = yes +vm_info_cmds = "registers,pic" +virt_install_binary = /usr/bin/virt-install +qemu_img_binary = /usr/bin/qemu-img +hvm_or_pv = hvm +machine_type = pseries +only bridge +no xen, lxc, esx, ovmf +# Filterout unwanted disk types +no ide,xenblk,lsi_scsi,ahci,sd +no qed,qcow2v3,raw_dd,vmdk, usb2 +no e1000-82540em,e1000-82545em,e1000-82544gc,xennet,nic_custom +only no_virtio_rng +only smp2 +only no_9p_export +only no_pci_assignable +only (image_backend = filesystem) +only smallpages +vcpu_maxcpus = 16 +vcpu_threads = 1 +vcpu_cores = 16 +smp = 16 +mem = 4096 +sockets = 1 +vcpu_sockets = 1 +setvcpus_max = 16 +only virtio_net +only virtio_scsi +only qcow2 +main_vm = vm1 +create_vm_libvirt = yes +kill_vm_libvirt = yes +env_cleanup = yes +backup_image_before_testing = no +restore_image_after_testing = no +vms = "vm1 vm2 vm3 vm5 vm6 vm7" +# just removed xics, hash, smt8 power9, power10 guest +#vms = "vm1 vm2 vm3 vm4 vm5 vm6 vm7 vm8" +host_stress_args = "--cpu 60 --io 6 --vm 6 --vm-bytes 256M --timeout 7000s" +qemu_binary=/home/sath/qemu/build/qemu-system-ppc64 +emulator_path=/home/sath/qemu/build/qemu-system-ppc64 +kernel=/home/sath/linux/vmlinux +initrd='' +kernel_args='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0 powersave=off' +variants: + - mixedmode_multivm: + kernel_args_vm3='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0 disable_radix powersave=off' + kernel_args_vm4='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0 disable_radix powersave=off' + kernel_args_vm7='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0 disable_radix powersave=off' + kernel_args_vm8='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0 disable_radix powersave=off' + virtinstall_qemu_cmdline_vm1 = " -M pseries,vsmt=1,ic-mode=xics,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + virtinstall_qemu_cmdline_vm2 = " -M pseries,vsmt=2,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + virtinstall_qemu_cmdline_vm3 = " -M pseries,vsmt=4,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + virtinstall_qemu_cmdline_vm4 = " -M pseries,vsmt=8,ic-mode=xics,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + virtinstall_qemu_cmdline_vm5 = " -M pseries,vsmt=1,max-cpu-compat=power9,ic-mode=xics,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + virtinstall_qemu_cmdline_vm6 = " -M pseries,vsmt=2,max-cpu-compat=power9,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + virtinstall_qemu_cmdline_vm7 = " -M pseries,vsmt=4,max-cpu-compat=power9,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + virtinstall_qemu_cmdline_vm8 = " -M pseries,max-cpu-compat=power9,ic-mode=xics,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + stress_args = '--cpu 12 --io 15 --vm 4 --vm-bytes 512M --timeout 7200s' + host_stress = no + only multivm_cpustress.custom_host_events.custom_vm_events + stress_itrs = 50 + guest_stress = yes + cores_vm2 = 8 + threads_vm2 = 2 + vcpu_cores_vm2 = 8 + vcpu_threads_vm2 = 2 + cores_vm3 = 4 + threads_vm3 = 4 + vcpu_cores_vm3 = 4 + vcpu_threads_vm3 = 4 + cores_vm4 = 2 + threads_vm4 = 8 + vcpu_cores_vm4 = 2 + vcpu_threads_vm4 = 8 + cores_vm6 = 8 + threads_vm6 = 2 + vcpu_cores_vm6 = 8 + vcpu_threads_vm6 = 2 + cores_vm7 = 4 + threads_vm7 = 4 + vcpu_cores_vm7 = 4 + vcpu_threads_vm7 = 4 + cores_vm8 = 2 + threads_vm8 = 8 + vcpu_cores_vm8 = 2 + vcpu_threads_vm8 = 8 + variants: + - only_vcpuhotplug: + stress_events = "cpuhotplug" + smp = 8 + - only_suspend: + stress_events = "suspend" + - guestreboot: + mem = 2048 + stress_events = "reboot" + guest_stress = no + stress_itrs = 100 + - only_vcpupin: + stress_events = "vcpupin,emulatorpin" + - pin_suspend: + stress_events = "vcpupin,emulatorpin,suspend" + - pin_vcpuhotplug: + stress_events = "vcpupin,emulatorpin,cpuhotplug" + smp = 8 + + + - default_multivm: + virtinstall_qemu_cmdline = " -M pseries,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off" + host_stress = no + stress_args = '--cpu 12 --io 15 --vm 5 --vm-bytes 512M --timeout 7200s' + only multivm_cpustress.with_hoststress.without_hoststress_events.custom_vm_events + stress_itrs = 50 + variants: + - only_vcpuhotplug: + stress_events = "cpuhotplug" + smp = 8 + - only_suspend: + stress_events = "suspend" + - guestreboot: + mem = 2048 + stress_events = "reboot" + guest_stress = no + stress_itrs = 100 + - only_vcpupin: + stress_events = "vcpupin,emulatorpin" + - pin_suspend: + stress_events = "vcpupin,emulatorpin,suspend" + - pin_vcpuhotplug: + stress_events = "vcpupin,emulatorpin,cpuhotplug" + smp = 8 + From 1cb070231db641c136b45959984fd444ab21418e Mon Sep 17 00:00:00 2001 From: Satheesh Rajendran Date: Wed, 20 Jan 2021 20:48:27 +0530 Subject: [PATCH 2/3] Add multivm test config for secure kvm guests Add multivm test config for secure kvm guests Signed-off-by: Satheesh Rajendran --- config/tests/guest/libvirt/pef.cfg | 276 +++++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) create mode 100644 config/tests/guest/libvirt/pef.cfg diff --git a/config/tests/guest/libvirt/pef.cfg b/config/tests/guest/libvirt/pef.cfg new file mode 100644 index 0000000..5faee65 --- /dev/null +++ b/config/tests/guest/libvirt/pef.cfg @@ -0,0 +1,276 @@ +include tests-shared.cfg +username = root +password = 123456 +main_vm = vm1 +vms = vm1 +vm_type = libvirt +virt_install_binary = /usr/bin/virt-install +qemu_img_binary = /usr/bin/qemu-img +hvm_or_pv = hvm +machine_type = pseries +use_os_variant = no +use_os_type = no +only bridge +only smp2 +only no_9p_export +only no_virtio_rng +only no_pci_assignable +only (image_backend=filesystem) +only smallpages +kill_vm = yes +kill_vm_libvirt = yes +env_cleanup = yes +create_vm_libvirt = yes +vga = None +display = nographic +take_regular_screendumps = no +backup_image_before_testing = no +avocado_reinstall = False +test_timeout = 3600000 +only qcow2 +only virtio_scsi +only virtio_net +mem = 8192 +qemu_binary = /usr/share/avocado-plugins-vt/bin/install_root/bin/qemu-system-ppc64 +emulator_path = /usr/share/avocado-plugins-vt/bin/install_root/bin/qemu-system-ppc64 +bios_path = /usr/share/avocado-plugins-vt/bin/install_root/share/qemu/slof.bin + +variants: + - secure: + # this guest image has modified initrd image using svm-tools + # and addtional kernel cmdline option svm=on xive=off to enable + # svm functionality. + image_name = images/rhel8-devel-ppc64le-svm + # https://bugzilla.linux.ibm.com/show_bug.cgi?id=186761 + tpm_device_path = "/dev/tpmrm0" + tpm_model = "spapr-tpm-proxy" + virtinstall_qemu_cmdline=" -M pseries,x-svm-allowed=on -global virtio-scsi-pci.disable-legacy=on -global virtio-scsi-pci.disable-modern=off -global virtio-scsi-pci.iommu_platform=on -global virtio-blk-pci.disable-legacy=on -global virtio-blk-pci.disable-modern=off -global virtio-blk-pci.iommu_platform=on -global virtio-net-pci.disable-legacy=on -global virtio-net-pci.disable-modern=off -global virtio-net-pci.iommu_platform=on -global virtio-serial-pci.disable-legacy=on -global virtio-serial-pci.disable-modern=off -global virtio-serial-pci.iommu_platform=on -global virtio-balloon-pci.disable-legacy=on -global virtio-balloon-pci.disable-modern=off -global virtio-balloon-pci.iommu_platform=on" + smp = 32 + mem = 81920 + vcpu_cores = 32 + vcpu_threads = 1 + vcpu_sockets = 1 + vcpu_maxcpus = 32 + variants: + - singlevm: + variants: + - reboottest: + only io-github-autotest-qemu.reboot + reboot_count = 100 + variants: + - 512M: + smp = 1 + vcpu_maxcpus = 1 + vcpu_threads = 1 + vcpu_maxcpus = 1 + vcpu_cores = 1 + mem = 512 + - 1G: + mem = 1024 + - 2G: + mem = 2048 + - 4G: + mem = 4096 + - 8G: + mem = 8192 + - 16G: + mem = 16384 + - 32G: + vcpu_cores = 16 + smp = 16 + vcpu_maxcpus = 16 + mem = 32768 + - 64G: + vcpu_cores = 16 + smp = 16 + vcpu_maxcpus = 16 + mem = 65536 + login_timeout = 450 + - 128G: + vcpu_cores = 32 + smp = 32 + vcpu_maxcpus = 32 + mem = 131072 + login_timeout = 660 + + - vcpuhotplug: + start_vm = yes + test_itr = 2 + vcpu_max_timeout = 480 + vcpu_sockets = 1 + vcpu_maxcpus = 64 + topology_correction = "no" + variants: + - 1thread: + vcpu_cores = 64 + smp = 1 + vcpu_threads = 1 + vcpu_current_num = 1 + vcpu_plug_num = 64 + vcpu_unplug_num = 1 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu + - 2threads: + smp = 2 + vcpu_cores = 32 + vcpu_threads = 2 + vcpu_current_num = 2 + vcpu_plug_num = 64 + vcpu_unplug_num = 2 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu + - 4threads: + smp = 4 + vcpu_cores = 16 + vcpu_threads = 4 + vcpu_current_num = 4 + vcpu_plug_num = 64 + vcpu_unplug_num = 4 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu + - 8threads: + smp = 8 + vcpu_cores = 8 + vcpu_threads = 8 + vcpu_current_num = 8 + vcpu_plug_num = 64 + vcpu_unplug_num = 8 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu + - memoryhotplug: + attach_times = 15 + only libvirt_mem.positive_test.memory.hot.unplug.max_slots.with_rand_reboot + - guestdump: + only virsh.dump.positive_test.non_acl.live_dump + - diskhotplug: + at_dt_disk_device_target = sdb + only virsh.attach_detach_disk_matrix..at_option_live + no dt_option_live_config,dt_option_config,dt_option_default,dt_option_live_config,at_okay_dt_error,at_error_dt_error,pre_vm_state_shutoff,pre_vm_state_paused,pre_vm_state_transient,dt_option_current + - guesttests: + variants: + - memory: + mem = 8192 + only avocado_guest.memhotplug,avocado_guest.eatmemory,avocado_guest.memintegrity + - generic: + test_timeout = 600000 + only avocado_guest.ltp + # kself test needs a src rpm for distro + #only avocado_guest.ltp,avocado_guest.kselftest + - cpu: + vcpu_cores = 4 + vcpu_threads = 8 + vcpu_sockets = 1 + vcpu_maxcpus = 32 + only avocado_guest.ebizzy,avocado_guest.ppc64_cpu_test + - io: + vcpu_cores = 16 + vcpu_threads = 2 + vcpu_sockets = 1 + vcpu_maxcpus = 32 + only avocado_guest.fiotest + + - multivm: + # this has to set based on host svm memory, in this case we have 512G svm mem on host + #mem = 4096 + mem = 8192 + #mem = 12288 + #mem = 65536 + login_timeout = 500 + vms = "vm1 vm2 vm3 vm4" + vms = "vm1" + drive_format_vm2=virtio + scsi_hba_vm2="" + libvirt_controller_vm2="" + smp_vm1 = 32 + vcpu_cores_vm1 = 32 + vcpu_threads_vm1 = 1 + vcpu_sockets_vm1 = 1 + vcpu_maxcpus_vm1 = 32 + smp_vm2 = 32 + vcpu_cores_vm2 = 16 + vcpu_threads_vm2 = 2 + vcpu_sockets_vm2 = 1 + vcpu_maxcpus_vm2 = 32 + smp_vm3 = 32 + vcpu_cores_vm3 = 8 + vcpu_threads_vm3 = 4 + vcpu_sockets_vm3 = 1 + vcpu_maxcpus_vm3 = 32 + smp_vm4 = 32 + vcpu_cores_vm4 = 4 + vcpu_threads_vm4 = 8 + vcpu_sockets_vm4 = 1 + vcpu_maxcpus_vm4 = 32 + reboot_count = 6 + variants: + - reboottest: + guest_stress = no + host_stress = no + host_stress_events = "" + stress_events = "reboot" + stress_itrs = 100 + test_timeout = 1800000 + event_sleep_time = 30 + itr_sleep_time = 30 + only multivm_cpustress.custom_host_events.custom_vm_events + - cpupin_withstress: + guest_stress = yes + host_stress = no + host_stress_events = "" + stress_events = "vcpupin,emulatorpin" + stress_itrs = 500 + test_timeout = 6000000 + event_sleep_time = 30 + itr_sleep_time = 30 + only multivm_cpustress.custom_host_events.custom_vm_events + + - nonsecure: + image_name = images/rhel8-devel-ppc64le + virtinstall_qemu_cmdline=" -M pseries,x-svm-allowed=off -global virtio-scsi-pci.disable-legacy=on -global virtio-scsi-pci.disable-modern=off -global virtio-scsi-pci.iommu_platform=on -global virtio-blk-pci.disable-legacy=on -global virtio-blk-pci.disable-modern=off -global virtio-blk-pci.iommu_platform=on -global virtio-net-pci.disable-legacy=on -global virtio-net-pci.disable-modern=off -global virtio-net-pci.iommu_platform=on -global virtio-serial-pci.disable-legacy=on -global virtio-serial-pci.disable-modern=off -global virtio-serial-pci.iommu_platform=on -global virtio-balloon-pci.disable-legacy=on -global virtio-balloon-pci.disable-modern=off -global virtio-balloon-pci.iommu_platform=on" + smp = 80 + mem = 81920 + vcpu_cores = 80 + vcpu_threads = 1 + vcpu_sockets = 1 + vcpu_maxcpus = 80 + variants: + - singlevm: + variants: + - reboottest: + only io-github-autotest-qemu.reboot + reboot_count = 6 + - multivm: + mem = 8192 + vms = "vm1 vm2 vm3 vm4" + smp_vm1 = 16 + vcpu_cores_vm1 = 16 + vcpu_threads_vm1 = 1 + vcpu_sockets_vm1 = 1 + vcpu_maxcpus_vm1 = 16 + smp_vm2 = 16 + vcpu_cores_vm2 = 8 + vcpu_threads_vm2 = 2 + vcpu_sockets_vm2 = 1 + vcpu_maxcpus_vm2 = 16 + smp_vm3 = 16 + vcpu_cores_vm3 = 4 + vcpu_threads_vm3 = 4 + vcpu_sockets_vm3 = 1 + vcpu_maxcpus_vm3 = 16 + smp_vm4 = 16 + vcpu_cores_vm4 = 2 + vcpu_threads_vm4 = 8 + vcpu_sockets_vm4 = 1 + vcpu_maxcpus_vm4 = 16 + variants: + - reboottest: + guest_stress = no + host_stress = no + host_stress_events = "" + stress_events = "reboot" + stress_itrs = 6 + test_timeout = 6000000 + event_sleep_time = 30 + itr_sleep_time = 30 + only multivm_cpustress.custom_host_events.custom_vm_events + From 99af4377270fc2ced7627c0d2b9db84993f48660 Mon Sep 17 00:00:00 2001 From: Satheesh Rajendran Date: Wed, 20 Jan 2021 20:50:23 +0530 Subject: [PATCH 3/3] Add test config for nested kvm guests Add test config for nested kvm(L2) guests Signed-off-by: Satheesh Rajendran --- config/tests/guest/libvirt/nested.cfg | 286 ++++++++++++++++++++++++++ 1 file changed, 286 insertions(+) create mode 100644 config/tests/guest/libvirt/nested.cfg diff --git a/config/tests/guest/libvirt/nested.cfg b/config/tests/guest/libvirt/nested.cfg new file mode 100644 index 0000000..bd1bd9f --- /dev/null +++ b/config/tests/guest/libvirt/nested.cfg @@ -0,0 +1,286 @@ +include tests-shared.cfg +username = root +password = 123456 +main_vm = vm1 +vms = vm1 +nettype = bridge +netdst=virbr0 +display = nographic +take_regular_screendumps = no +keep_screendumps_on_error = no +keep_screendumps = no +store_vm_register = no +restore_image_after_testing=no +vga=none +virt_install_binary = /usr/bin/virt-install +hvm_or_pv = hvm +machine_type = pseries +only bridge +no xen, lxc, esx, ovmf +#Filterout unwanted disk types +no qed,qcow2v3,raw_dd,vmdk, usb2 +only no_virtio_rng +only smp2 +only no_9p_export +only no_pci_assignable +only (image_backend=filesystem) +create_vm_libvirt=yes +kill_vm=yes +kill_vm_libvirt=yes +env_cleanup=yes +mem=8192 +smp=8 +threads=1 +cores=8 +sockets=1 +vcpu_sockets=1 +vcpu_cores=8 +vcpu_threads=1 +setvcpus_max = 8 +vcpu_maxcpus = 8 +qemu_binary=/usr/local/lib/python3.9/site-packages/virttest/bin/install_root/bin/qemu-system-ppc64 +emulator_path=/usr/local/lib/python3.9/site-packages/virttest/bin/install_root/bin/qemu-system-ppc64 +kernel=/home/epcci/linux/vmlinux +initrd='' +use_serial_login=yes +test_timeout = 72000 + +variants: + - vmkernel: + only virtio_net + only qcow2 + only smallpages + only virtio_scsi + pre_command = "rm -rf /home/epcci/linux;git clone --depth 1 https://github.com/torvalds/linux /home/epcci/linux/ && cd /home/epcci/linux/ && git log -1 && make ppc64le_guest_defconfig && make -j 100 -s" + kernel_args='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0' + only guest_test.isa_serial_operations + + - vmstart: + only boot + no virsh.boot + only virtio_net + only qcow2 + variants: + - @: + - with_numa: + only with_smt1 + numa=yes + smp=8 + threads=1 + cores=8 + sockets=1 + setvcpus_max = 8 + vcpu_maxcpus = 8 + vcpu_cores = 8 + vcpu_threads = 1 + guest_numa_nodes="node0 node1 node2 node3" + variants: + - symmetric_nodes: + numa_mem_node0="2097152" + numa_cpus_node0="0-1" + numa_nodeid_node0="0" + numa_mem_node1="2097152" + numa_cpus_node1="2-3" + numa_nodeid_node1="1" + numa_mem_node2="2097152" + numa_cpus_node2="4-5" + numa_nodeid_node2="2" + numa_mem_node3="2097152" + numa_cpus_node3="6-7" + numa_nodeid_node3="3" + - asymmetric_nodes: + numa_mem_node0="1048576" + numa_cpus_node0="1,3,7" + numa_nodeid_node0="0" + numa_mem_node1="2097152" + numa_cpus_node1="0,4-5" + numa_nodeid_node1="1" + numa_mem_node2="2097152" + numa_cpus_node2="2" + numa_nodeid_node2="2" + numa_mem_node3="3145728" + numa_cpus_node3="6" + numa_nodeid_node3="3" + variants: + - @: + - with_smt1: + smp=8 + threads=1 + cores=8 + sockets=1 + setvcpus_max=8 + vcpu_maxcpus=8 + vcpu_cores=8 + vcpu_threads=1 + vcpu_sockets=1 + - with_smt2: + smp=8 + threads=2 + cores=4 + sockets=1 + setvcpus_max=8 + vcpu_maxcpus=8 + vcpu_cores=4 + vcpu_threads=2 + vcpu_sockets=1 + - with_smt4: + smp=8 + threads=4 + cores=2 + sockets=1 + setvcpus_max=8 + vcpu_maxcpus=8 + vcpu_cores=2 + vcpu_threads=4 + vcpu_sockets=1 + - with_smt8: + smp=8 + threads=4 + cores=2 + sockets=1 + setvcpus_max=8 + vcpu_maxcpus=8 + vcpu_cores=2 + vcpu_threads=4 + vcpu_sockets=1 + - with_lowmem: + no with_numa + mem=2048 + - with_singlecpu: + only with_smt1 + smp=1 + threads=1 + cores=1 + sockets=1 + setvcpus_max=1 + vcpu_maxcpus=1 + vcpu_cores=1 + vcpu_threads=1 + vcpu_sockets=1 + variants: + - @: + - with_intc: + only with_smt4 + variants: + - xics: + virtinstall_qemu_cmdline_vm1=" -M pseries,ic-mode=xics" + - xive: + virtinstall_qemu_cmdline_vm1=" -M pseries,ic-mode=xive" + - dual: + virtinstall_qemu_cmdline_vm1=" -M pseries,ic-mode=dual" + - xics_kernel_irqchip_off: + virtinstall_qemu_cmdline_vm1=" -M pseries,ic-mode=xics,kernel-irqchip=off" + - xive_kernel_irqchip_off: + virtinstall_qemu_cmdline_vm1=" -M pseries,ic-mode=xive,kernel-irqchip=off" + - dual_kernel_irqchip_off: + virtinstall_qemu_cmdline_vm1=" -M pseries,ic-mode=dual,kernel-irqchip=off" + - with_vsmt: + mem = 20480 + no with_intc,mem_merge,with_numa + only with_smt1 + vms = "vm1" + cores_vm1=1 + threads_vm1=8 + sockets=1 + vcpu_cores_vm1=1 + vcpu_threads_vm1=8 + vcpu_sockets_vm1=1 + virtinstall_qemu_cmdline_vm1=" -M pseries,vsmt=8" + - with_nested_cap: + no with_vsmt,with_intc,mem_merge + only with_smt4,with_smt8 + virtinstall_qemu_cmdline_vm1=" -M pseries,cap-nested-hv=on" + variants: + - with_virtio_scsi: + kernel_args='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0' + no with_virtio_blk + only virtio_scsi + - with_virtio_blk: + kernel_args='root=/dev/vda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0' + no with_virtio_scsi + only virtio_blk + + - vm: + kernel_args='root=/dev/sda2 rw console=tty0 console=ttyS0,115200 init=/sbin/init initcall_debug selinux=0' + only virtio_scsi + only virtio_net + only qcow2 + variants: + - guesttests: + variants: + - memory: + only avocado_guest.memhotplug,avocado_guest.eatmemory,avocado_guest.memintegrity + - generic: + only smallpages + only avocado_guest.ltp,avocado_guest.kselftest + - cpu: + only smallpages + vcpu_cores = 4 + vcpu_threads = 8 + vcpu_sockets = 1 + vcpu_maxcpus = 32 + only avocado_guest.ebizzy,avocado_guest.ppc64_cpu_test + - io: + vcpu_cores = 16 + vcpu_threads = 2 + vcpu_sockets = 1 + vcpu_maxcpus = 32 + only avocado_guest.fiotest + + - guestdump: + only virsh.dump.positive_test.non_acl.live_dump + - diskhotplug: + at_dt_disk_device_target = sdb + only virsh.attach_detach_disk_matrix..at_option_live + no dt_option_live_config,dt_option_config,dt_option_default,dt_option_live_config,at_okay_dt_error,at_error_dt_error,pre_vm_state_shutoff,pre_vm_state_paused,pre_vm_state_transient,dt_option_current + - memoryhotplug: + # FIXME + only libvirt_mem.positive_test.memory.hot.unplug.max_slots.with_rand_reboot + - vcpuhotplug: + start_vm = yes + test_itr = 2 + mem = 81920 + vcpu_max_timeout = 480 + vcpu_sockets = 1 + vcpu_maxcpus = 64 + topology_correction = "no" + variants: + - 1thread: + no hugepages + vcpu_cores = 64 + smp = 1 + vcpu_threads = 1 + vcpu_current_num = 1 + vcpu_plug_num = 64 + vcpu_unplug_num = 1 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu + - 2threads: + no hugepages + smp = 2 + vcpu_cores = 32 + vcpu_threads = 2 + vcpu_current_num = 2 + vcpu_plug_num = 64 + vcpu_unplug_num = 2 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu + - 4threads: + smp = 4 + vcpu_cores = 16 + vcpu_threads = 4 + vcpu_current_num = 4 + vcpu_plug_num = 64 + vcpu_unplug_num = 4 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu + - 8threads: + no hugepages + smp = 8 + vcpu_cores = 8 + vcpu_threads = 8 + vcpu_current_num = 8 + vcpu_plug_num = 64 + vcpu_unplug_num = 8 + vcpu_max_num = 64 + only libvirt_vcpu_plug_unplug..with_maxvcpu