diff --git a/qemu/deps/key_keycode/key_to_keycode_win.json b/deps/key_keycode/key_to_keycode_win.json similarity index 99% rename from qemu/deps/key_keycode/key_to_keycode_win.json rename to deps/key_keycode/key_to_keycode_win.json index f5a04cdc91de36731c2f28113bb8f78a9a375fbd..282f596e4da3ac8a43107835332bf2f87caefe7a 100644 --- a/qemu/deps/key_keycode/key_to_keycode_win.json +++ b/deps/key_keycode/key_to_keycode_win.json @@ -1,4 +1,4 @@ -{ +{ "space" : "KEY_SPACE", "apostrophe" : "KEY_APOSTROPHE", "comma" : "KEY_COMMA", diff --git a/deps/kvm_unit_tests/kvm-unit-tests.tar.gz b/deps/kvm_unit_tests/kvm-unit-tests.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..7785c35a6aeb84913c3399e58a722089f7ca8d1f Binary files /dev/null and b/deps/kvm_unit_tests/kvm-unit-tests.tar.gz differ diff --git a/deps/pjdfstest/pjdfstest-0.1.tar.bz2 b/deps/pjdfstest/pjdfstest-0.1.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..66a4a3db29629d2880e97ef112ee20a2c596d715 Binary files /dev/null and b/deps/pjdfstest/pjdfstest-0.1.tar.bz2 differ diff --git a/deps/rdrand/RdRand.exe b/deps/rdrand/RdRand.exe new file mode 100755 index 0000000000000000000000000000000000000000..f06abb9900e4b8a4139d756710ce4a181f858c00 Binary files /dev/null and b/deps/rdrand/RdRand.exe differ diff --git a/deps/rdrand/rdrand.c b/deps/rdrand/rdrand.c new file mode 100644 index 0000000000000000000000000000000000000000..12ab36b5ce93d619f65988409aeade499cc57ca3 --- /dev/null +++ b/deps/rdrand/rdrand.c @@ -0,0 +1,15 @@ +#include +#include +#include + +float randoms(float min, float max) +{ + return (float)(rand())/RAND_MAX*(max - min) + min; +} + +int main() +{ + srand((unsigned int)time(0)); + printf("%f\n",randoms(-100.001, 100.001)); + return 0; +} diff --git a/generic/tests/cfg/unattended_install.cfg b/generic/tests/cfg/unattended_install.cfg index 47f70dceac1afa2d7cc4bd803f1844dca5539860..b19f29849aaad6eebc9656c48281b76be1ad3208 100644 --- a/generic/tests/cfg/unattended_install.cfg +++ b/generic/tests/cfg/unattended_install.cfg @@ -89,6 +89,8 @@ image_size_equal = 1G ahci: images = "image1 stg stg2 stg3 stg4 stg5" + default_bios..Windows: + images = "image1 stg stg2 stg3 stg4 stg5" - large_image: only unattended_install.cdrom only qcow2 qcow2v3 @@ -189,6 +191,11 @@ boot_once = d medium = cdrom redirs += " unattended_install" + virtio_scsi: + # disable iothread + iothread_scheme ?= + iothreads ?= + image_iothread ?= # Install guest from http/ftp url - url: only Linux diff --git a/generic/tests/ping.py b/generic/tests/ping.py index 1983e0f936a60bc560d7d752791d13d7d065f1a5..1ae700a5ff97814b6c1c207df6bed2a8e971b7f0 100644 --- a/generic/tests/ping.py +++ b/generic/tests/ping.py @@ -68,13 +68,14 @@ def run(test, params, env): if ping_ext_host: ext_host = params.get("ext_host", "") ext_host_get_cmd = params.get("ext_host_get_cmd", "") - try: - ext_host = process.system_output(ext_host_get_cmd, shell=True) - ext_host = ext_host.decode() - except process.CmdError: - logging.warn("Can't get specified host with cmd '%s'," - " Fallback to default host '%s'", - ext_host_get_cmd, ext_host) + if ext_host_get_cmd: + try: + ext_host = process.system_output(ext_host_get_cmd, shell=True) + ext_host = ext_host.decode() + except process.CmdError: + logging.warn("Can't get specified host with cmd '%s'," + " Fallback to default host '%s'", + ext_host_get_cmd, ext_host) dest_ips = [ext_host] sessions = [session] interfaces = [None] diff --git a/generic/tests/pktgen_perf.py b/generic/tests/pktgen_perf.py index 99737efeb87f6a30824e6f0aa2b64810a26262ca..6d5e193f7a6d124af45f9a9930fe87c30ee93173 100644 --- a/generic/tests/pktgen_perf.py +++ b/generic/tests/pktgen_perf.py @@ -12,7 +12,6 @@ from virttest import utils_test from virttest import utils_misc from virttest import error_context - _system_output = functools.partial(process.system_output, shell=True) @@ -93,6 +92,29 @@ def run(test, params, env): for record in record_list.split(): record_line += "%s|" % format_result(record) + def install_package(ver, session=None): + """ check module pktgen, install kernel-modules-internal package """ + + output_cmd = _system_output + kernel_ver = "kernel-modules-internal-%s" % ver + cmd_download = "cd /tmp && brew download-build %s --rpm" % kernel_ver + cmd_install = "cd /tmp && rpm -ivh %s.rpm --force --nodeps" % kernel_ver + output_cmd(cmd_download).decode() + cmd_clean = "rm -rf /tmp/%s.rpm" % kernel_ver + if session: + output_cmd = session.cmd_output + local_path = "/tmp/%s.rpm" % kernel_ver + remote_path = "/tmp/" + vm.copy_files_to(local_path, remote_path) + output_cmd(cmd_install) + output_cmd(cmd_clean) + + check_cmd = "uname -r |grep el8" + if process.run(check_cmd, shell=True): + install_package(host_ver) + if session.cmd(check_cmd): + install_package(guest_ver.strip(), session=session) + # get result tested by each scenario for pkt_cate in category.split(): result_file.write("Category:%s\n" % pkt_cate) diff --git a/generic/tests/pxe_boot.py b/generic/tests/pxe_boot.py index d089d4a377394016cc6f738e32a12610c9d4c890..89ea1d6b7fc1badf122c15a97c39b60e33e3dacb 100644 --- a/generic/tests/pxe_boot.py +++ b/generic/tests/pxe_boot.py @@ -24,7 +24,7 @@ def run(test, params, env): timeout = int(params.get("pxe_timeout", 60)) error_context.context("Snoop packet in the tap device", logging.info) - tcpdump_cmd = "tcpdump -nli %s" % vm.get_ifname() + tcpdump_cmd = "tcpdump -nli %s port '(tftp or bootps)'" % vm.get_ifname() try: tcpdump_process = aexpect.run_bg(command=tcpdump_cmd, output_func=logging.debug, diff --git a/provider/backup_utils.py b/provider/backup_utils.py index 25615f1cfdddd48b3319074765225cc2e9c53d81..3c57e4dc9bf1c2db644724ea8f05e3e84d1415ca 100644 --- a/provider/backup_utils.py +++ b/provider/backup_utils.py @@ -77,13 +77,12 @@ def generate_tempfile(vm, root_dir, filename, size="10M", timeout=720): md5_cmd = "certutil -hashfile %s MD5 > %s.md5" % (file_path, file_path) else: file_path = "%s/%s" % (root_dir, filename) - size_str = int( + count = int( utils_numeric.normalize_data_size( size, - order_magnitude="K", + order_magnitude="M", factor=1024)) - count = size_str // 4 - mk_file_cmd = "dd if=/dev/urandom of=%s bs=4k count=%s oflag=direct" % ( + mk_file_cmd = "dd if=/dev/urandom of=%s bs=1M count=%s oflag=direct" % ( file_path, count) md5_cmd = "md5sum %s > %s.md5 && sync" % (file_path, file_path) try: @@ -143,6 +142,8 @@ def blockdev_mirror_qmp_cmd(source, target, **extra_options): "speed", "copy-mode", "buf-size", + "on-source-error", + "on-target-error", "unmap"] arguments = copy_out_dict_if_exists(extra_options, options) arguments["device"] = source @@ -228,11 +229,17 @@ def blockdev_snapshot(vm, source, target, **extra_options): @fail_on -def blockdev_mirror(vm, source, target, **extra_options): +def blockdev_mirror_nowait(vm, source, target, **extra_options): + """Don't wait mirror completed, return job id""" cmd, arguments = blockdev_mirror_qmp_cmd(source, target, **extra_options) - timeout = int(extra_options.pop("timeout", 600)) vm.monitor.cmd(cmd, arguments) - job_id = arguments.get("job-id", source) + return arguments.get("job-id", source) + + +@fail_on +def blockdev_mirror(vm, source, target, **extra_options): + timeout = int(extra_options.pop("timeout", 600)) + job_id = blockdev_mirror_nowait(vm, source, target, **extra_options) job_utils.wait_until_block_job_completed(vm, job_id, timeout) @@ -246,11 +253,18 @@ def block_commit(vm, device, **extra_options): @fail_on -def blockdev_stream(vm, device, **extra_options): - timeout = int(extra_options.pop("timeout", 600)) +def blockdev_stream_nowait(vm, device, **extra_options): + """Do block-stream and don't wait stream completed, return job id""" cmd, arguments = blockdev_stream_qmp_cmd(device, **extra_options) vm.monitor.cmd(cmd, arguments) - job_id = arguments.get("job-id", device) + return arguments.get("job-id", device) + + +@fail_on +def blockdev_stream(vm, device, **extra_options): + """Do block-stream and wait stream completed""" + timeout = int(extra_options.pop("timeout", 600)) + job_id = blockdev_stream_nowait(vm, device, **extra_options) job_utils.wait_until_block_job_completed(vm, job_id, timeout) @@ -294,6 +308,15 @@ def blockdev_batch_backup(vm, source_lst, target_lst, timeout = int(extra_options.pop("timeout", 600)) completion_mode = extra_options.pop("completion_mode", None) sync_mode = extra_options.get("sync") + + # we can disable dirty-map in a transaction + bitmap_disable_cmd = "block-dirty-bitmap-disable" + disabled_bitmap_lst = extra_options.pop("disabled_bitmaps", None) + + # sometimes the job will never complete, e.g. backup in pull mode, + # export fleecing image by internal nbd server + wait_job_complete = extra_options.pop("wait_job_complete", True) + for idx, src in enumerate(source_lst): if sync_mode in ["incremental", "bitmap"]: assert len(bitmap_lst) == len( @@ -305,7 +328,7 @@ def blockdev_batch_backup(vm, source_lst, target_lst, jobs_id.append(job_id) actions.append({"type": backup_cmd, "data": arguments}) - if bitmap_lst and sync_mode == 'full': + if bitmap_lst and (sync_mode == 'full' or sync_mode == 'none'): bitmap_data = {"node": source_lst[idx], "name": bitmap_lst[idx]} granularity = extra_options.get("granularity") persistent = extra_options.get("persistent") @@ -315,11 +338,20 @@ def blockdev_batch_backup(vm, source_lst, target_lst, bitmap_data["persistent"] = persistent actions.append({"type": bitmap_add_cmd, "data": bitmap_data}) + if disabled_bitmap_lst: + bitmap_data = {"node": source_lst[idx], + "name": disabled_bitmap_lst[idx]} + actions.append({"type": bitmap_disable_cmd, "data": bitmap_data}) + arguments = {"actions": actions} if completion_mode == 'grouped': arguments['properties'] = {"completion-mode": "grouped"} vm.monitor.cmd("transaction", arguments) - list(map(lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), jobs_id)) + + if wait_job_complete: + list(map( + lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), + jobs_id)) @fail_on diff --git a/provider/block_devices_plug.py b/provider/block_devices_plug.py index f74ff0d9362dc831c898baa7bddf6369b52d3022..5a17b98542c862db5e1c772a2f404f03f0a15173 100644 --- a/provider/block_devices_plug.py +++ b/provider/block_devices_plug.py @@ -29,6 +29,7 @@ from virttest.qemu_capabilities import Flags from virttest.qemu_devices import qdevices from virttest.qemu_devices.utils import (DeviceError, DeviceHotplugError, DeviceUnplugError) +from virttest.qemu_monitor import MonitorLockError HOTPLUG, UNPLUG = ('hotplug', 'unplug') HOTPLUGGED_HBAS = {} @@ -37,6 +38,7 @@ DISK = {'name': 'images', 'media': 'disk'} CDROM = {'name': 'cdroms', 'media': 'cdrom'} _LOCK = threading.Lock() +_QMP_OUTPUT = {} def _verify_plugged_num(action): @@ -157,6 +159,9 @@ class BlockDevicesPlug(object): The Block Devices Plug. """ + ACQUIRE_LOCK_TIMEOUT = 20 + VERIFY_UNPLUG_TIMEOUT = 60 + def __init__(self, vm): self.vm = vm self._imgs = vm.params.get("images").split()[1:] @@ -167,7 +172,6 @@ class BlockDevicesPlug(object): self._plugged_disks = [] self._orig_disks = set() self._all_disks = set() - self._qmp_outputs = {} self._event_devs = [] self._dev_type = DISK self._qdev_type = qdevices.QBlockdevNode if vm.check_capability( @@ -200,10 +204,15 @@ class BlockDevicesPlug(object): def _check_qmp_outputs(self, action): """ Check the output of qmp commands. """ - for dev_id in list(self._qmp_outputs.keys()): - output = self._qmp_outputs.pop(dev_id) + for dev_id in list(_QMP_OUTPUT.keys()): + output = _QMP_OUTPUT.pop(dev_id) if output[1] is False: - raise TestError("Failed to %s device %s." % (action, dev_id)) + err = "Failed to %s device %s. " % (action, dev_id) + if not output[0] and action == 'unplug': + err += 'No deleted event generated and %s still in qtree' % dev_id + else: + err += output[0] + raise TestError(err) def _get_events_deleted(self): """ Get the device deleted events. """ @@ -225,7 +234,7 @@ class BlockDevicesPlug(object): 'No \"DEVICE DELETED\" event generated after unplug \"%s\".' % (';'.join(self.event_devs))) - def _create_devices(self, images): + def _create_devices(self, images, pci_bus={"aobject": "pci.0"}): """ Create the block devcies. """ self._hotplugged_devs.clear() for img in images: @@ -234,10 +243,14 @@ class BlockDevicesPlug(object): img_params = self.vm.params.object_params(img) devices_created = getattr( self.vm.devices, '%s_define_by_params' % self._dev_type['name'])( - img, img_params, self._dev_type['media']) + img, img_params, self._dev_type['media'], pci_bus=pci_bus) for dev in reversed(devices_created): - if dev.get_qid().endswith(img): + qid = dev.get_qid() + if isinstance(dev, qdevices.QObject) and dev.get_param( + 'backend') == 'secret' and qid.startswith('%s_' % img): + self._hotplugged_devs[img].insert(0, dev) + elif qid.endswith('_%s' % img) or qid == img: self._hotplugged_devs[img].insert(0, dev) bus = dev.get_param('bus') if bus: @@ -246,13 +259,23 @@ class BlockDevicesPlug(object): elif bus_name == dev.get_qid() and dev not in self.vm.devices: self._hotplugged_devs[img].insert(-1, dev) HOTPLUGGED_HBAS[img] = dev - break + + def _plug(self, plug_func, monitor): + end = time.time() + self.ACQUIRE_LOCK_TIMEOUT + while time.time() < end: + try: + return plug_func(monitor) + except MonitorLockError: + pass + else: + return plug_func(monitor) def _hotplug_atomic(self, device, monitor, bus=None): """ Function hot plug device to devices representation. """ with _LOCK: self.vm.devices.set_dirty() + qdev_out = '' if isinstance(device, qdevices.QDevice): dev_bus = device.get_param('bus') if bus is None: @@ -278,7 +301,7 @@ class BlockDevicesPlug(object): bus.prepare_hotplug(device) qdev_out = self.vm.devices.insert(device) - out = device.hotplug(monitor) + out = self._plug(device.hotplug, monitor) ver_out = device.verify_hotplug(out, monitor) if ver_out is False: with _LOCK: @@ -307,9 +330,10 @@ class BlockDevicesPlug(object): with _LOCK: self.vm.devices.set_dirty() - out = device.unplug(monitor) - if not utils_misc.wait_for(lambda: device.verify_unplug( - out, monitor) is True, first=1, step=5, timeout=60): + out = self._plug(device.unplug, monitor) + if not utils_misc.wait_for( + lambda: device.verify_unplug(out, monitor) is True, + first=1, step=5, timeout=self.VERIFY_UNPLUG_TIMEOUT): with _LOCK: self.vm.devices.set_clean() return out, device.verify_unplug(out, monitor) @@ -320,18 +344,27 @@ class BlockDevicesPlug(object): drive = device.get_param("drive") if drive: if self.vm.check_capability(Flags.BLOCKDEV): - format_node = self.vm.devices[drive] - nodes = [format_node] - nodes.extend((n for n in format_node.get_child_nodes())) + # top node + node = self.vm.devices[drive] + nodes = [node] + + # Build the full nodes list + for node in nodes: + child_nodes = node.get_child_nodes() + nodes.extend(child_nodes) + for node in nodes: - if not node.verify_unplug(node.unplug(monitor), monitor): + parent_node = node.get_parent_node() + child_nodes = node.get_child_nodes() + recursive = True if len(child_nodes) > 0 else False + if not node.verify_unplug( + self._plug(node.unplug, monitor), monitor): raise DeviceUnplugError( node, "Failed to unplug blockdev node.", self) with _LOCK: - self.vm.devices.remove(node, True if isinstance( - node, qdevices.QBlockdevFormatNode) else False) - if not isinstance(node, qdevices.QBlockdevFormatNode): - format_node.del_child_node(node) + self.vm.devices.remove(node, recursive) + if parent_node: + parent_node.del_child_node(node) else: with _LOCK: self.vm.devices.remove(drive) @@ -354,8 +387,12 @@ class BlockDevicesPlug(object): """ Plug devices. """ for img, devices in devices_dict.items(): for device in devices: - args = (device, monitor) if bus is None else (device, monitor, bus) - self._qmp_outputs[device.get_qid()] = getattr( + args = (device, monitor) + if (isinstance(device, qdevices.QDevice) and + bus is not None and + self.vm.devices.is_pci_device(device['driver'])): + args += (bus,) + _QMP_OUTPUT[device.get_qid()] = getattr( self, '_%s_atomic' % action)(*args) time.sleep(self._interval) @@ -365,7 +402,8 @@ class BlockDevicesPlug(object): """ logging.info("Start to hotplug devices \"%s\" by monitor %s." % ( ' '.join(images), monitor.name)) - self._create_devices(images) + args = (images, {'aobject': 'pci.0' if bus is None else bus.aobject}) + self._create_devices(*args) self._plug_devs(HOTPLUG, self._hotplugged_devs, monitor, bus) def _unplug_devs(self, images, monitor): @@ -373,19 +411,22 @@ class BlockDevicesPlug(object): Unplug the block devices which are defined by images. """ self._unplugged_devs.clear() - devs = [dev for dev in self.vm.devices if isinstance(dev, qdevices.QDevice)] + devs = [dev for dev in self.vm.devices if isinstance( + dev, (qdevices.QDevice, qdevices.QObject))] for img in images: self._unplugged_devs[img] = [] for dev in devs: - if dev.get_qid() == img: - self._unplugged_devs[img].append(dev) - break + qid = dev.get_qid() + if qid == img or qid.startswith('%s_' % img): + self._unplugged_devs[img].insert(0, dev) + if qid == img: + break else: raise TestError('No such device \'%s\' in VM\'s devices.' % img) # Search the corresponding HBA device to be unplugged. for img in list(self._unplugged_devs.keys()): - _dev = self._unplugged_devs[img][0] + _dev = next((_ for _ in self._unplugged_devs[img] if _.get_qid() == img)) _dev_bus = _dev.get_param('bus') if _dev_bus: bus_name = _dev_bus.rsplit('.')[0] diff --git a/provider/block_dirty_bitmap.py b/provider/block_dirty_bitmap.py index 6a9a9a21ad132fa6a08c9d5697657b693710a46b..66c9553c69bb620cb3ad524f9a8978dda420e15d 100644 --- a/provider/block_dirty_bitmap.py +++ b/provider/block_dirty_bitmap.py @@ -186,3 +186,58 @@ def get_bitmaps_in_device(vm, device): out = vm.monitor.cmd("query-block") bitmaps = get_bitmaps(out) return bitmaps.get(device, list()) + + +@fail_on +def handle_block_dirty_bitmap_transaction(vm, disabled_params=None, + added_params=None, + merged_params=None): + """ + Add/disable/merge bitmaps in one transaction. + :param vm: an active VM object + :param disabled_params: dict for bitmaps to be disabled, + required: bitmap_device_node, bitmap_name + optional: bitmap_disable_cmd + :param added_params: dict for bitmaps to be added, + required: bitmap_device_node, bitmap_name + optional: bitmap_add_cmd, bitmap_granularity, + bitmap_persistent, bitmap_disabled + :param merged_params: dict for bitmaps to be merged + required: bitmap_device_node, bitmap_target, bitmap_sources + optional: bitmap_merge_cmd + """ + actions = [] + + if disabled_params: + bitmap_disable_cmd = disabled_params.get('bitmap_disable_cmd', + 'block-dirty-bitmap-disable') + bitmap_data = {"node": disabled_params['bitmap_device_node'], + "name": disabled_params['bitmap_name']} + actions.append({"type": bitmap_disable_cmd, "data": bitmap_data}) + + if added_params: + bitmap_add_cmd = added_params.get('bitmap_add_cmd', + 'block-dirty-bitmap-add') + bitmap_data = {"node": added_params['bitmap_device_node'], + "name": added_params['bitmap_name']} + if added_params.get('bitmap_granularity'): + bitmap_data['granularity'] = added_params['granularity'] + + mapping = {'on': True, 'yes': True, 'off': False, 'no': False} + if added_params.get('bitmap_persistent'): + bitmap_data['persistent'] = mapping[added_params['bitmap_persistent']] + if added_params.get('bitmap_disabled'): + bitmap_data['disabled'] = mapping[added_params['bitmap_disabled']] + actions.append({'type': bitmap_add_cmd, 'data': bitmap_data}) + + if merged_params: + bitmap_merge_cmd = merged_params.get('bitmap_merge_cmd', + 'block-dirty-bitmap-merge') + bitmap_data = {'node': merged_params['bitmap_device_node'], + 'target': merged_params['bitmap_target'], + 'bitmaps': merged_params['bitmap_sources']} + actions.append({'type': bitmap_merge_cmd, 'data': bitmap_data}) + + if actions: + arguments = {"actions": actions} + vm.monitor.cmd("transaction", arguments) diff --git a/provider/blockdev_base.py b/provider/blockdev_base.py index 91f2946a820c46fc3ae28983ce9696dcc4bd6f46..308d7101547b360dd196eb724204e85e871501ed 100644 --- a/provider/blockdev_base.py +++ b/provider/blockdev_base.py @@ -1,4 +1,7 @@ import logging +import time + +from functools import partial from avocado.core import exceptions from avocado.utils import memory @@ -12,6 +15,7 @@ from virttest import utils_disk from virttest.qemu_capabilities import Flags from provider import backup_utils +from provider.job_utils import get_block_job_by_id from provider.virt_storage.storage_admin import sp_admin @@ -56,6 +60,13 @@ class BlockdevBaseTest(object): def preprocess_data_disks(self): for tag in self.params.objects("source_images"): params = self.params.object_params(tag) + if params.get("force_create_image") == "yes": + # vt takes care of the image creation + continue + elif params.get("image_create_support") == "no": + # image creation is not supported, e.g. nbd storage + continue + if params.get("random_cluster_size") == "yes": blacklist = list( map(int, params.objects("cluster_size_blacklist"))) @@ -95,7 +106,11 @@ class BlockdevBaseTest(object): timeout = params.get_numeric("create_tempfile_timeout", 720) backup_utils.generate_tempfile( self.main_vm, self.disks_info[tag][1], filename, image_size, timeout) - self.files_info[tag] = [filename] + + if tag not in self.files_info: + self.files_info[tag] = [filename] + else: + self.files_info[tag].append(filename) def prepare_data_disk(self, tag): """ @@ -157,7 +172,7 @@ class BlockdevBaseTest(object): error_context.context("Create target disk") for tag in self.params.objects("source_images"): image_params = self.params.object_params(tag) - for img in image_params.objects("image_chain"): + for img in image_params.objects("image_backup_chain"): disk = self.target_disk_define_by_params(self.params, img) disk.hotplug(self.main_vm) self.trash.append(disk) @@ -199,6 +214,94 @@ class BlockdevBaseTest(object): """ for img in set(self.trash): try: + # A QemuImg object img.remove() + except AttributeError: + # A StorageVolume object + sp_admin.remove_volume(img) except Exception as e: logging.warn(str(e)) + + def is_block_job_started(self, jobid, tmo=10): + """ + offset should greater than 0 when block job starts, + return True if offset > 0 in tmo, or return False + """ + for i in range(tmo): + time.sleep(1) + job = get_block_job_by_id(self.main_vm, jobid) + if not job: + logging.warn('job %s cancelled unexpectedly' % jobid) + break + elif job['offset'] > 0: + return True + else: + logging.warn('block job %s never starts in %s' % (jobid, tmo)) + return False + + def check_block_jobs_started(self, jobid_list, tmo=10): + """ + Test failed if any block job failed to start + """ + func = partial(self.is_block_job_started, tmo=tmo) + if not all(list(map(func, jobid_list))): + self.test.fail('Not all block jobs start successfully') + + def is_block_job_running(self, jobid, tmo=200): + """ + offset should increase when block job keeps running, + return True if offset increases in tmo, or return False + """ + offset = None + for i in range(tmo): + job = get_block_job_by_id(self.main_vm, jobid) + if not job: + logging.warn('job %s cancelled unexpectedly' % jobid) + break + elif offset is None: + offset = job['offset'] + elif job['offset'] > offset: + return True + time.sleep(1) + else: + logging.warn('offset never changed for block job %s in %s' + % (jobid, tmo)) + return False + + def check_block_jobs_running(self, jobid_list, tmo=200): + """ + Test failed if any block job's offset never increased + """ + func = partial(self.is_block_job_running, tmo=tmo) + if not all(list(map(func, jobid_list))): + self.test.fail('Not all block jobs are running') + + def is_block_job_paused(self, jobid, tmo=50): + """ + offset should stay the same when mirror job paused, + return True if offset never changed in tmo, or return False + """ + offset = None + time.sleep(10) + + for i in range(tmo): + time.sleep(1) + job = get_block_job_by_id(self.main_vm, jobid) + if not job: + logging.warn('job %s cancelled unexpectedly' % jobid) + break + elif offset is None: + offset = job['offset'] + elif offset != job['offset']: + logging.warn('offset %s changed for job %s in %s' + % (offset, jobid, tmo)) + return False + return True + + def check_block_jobs_paused(self, jobid_list, tmo=50): + """ + Test failed if any block job's offset changed + """ + func = partial(self.is_block_job_paused, tmo=tmo) + if not all(list(map(func, jobid_list))): + self.test.fail('Not all block jobs are paused') diff --git a/provider/blockdev_commit_base.py b/provider/blockdev_commit_base.py new file mode 100644 index 0000000000000000000000000000000000000000..d5cb87960126bb5614497798abf70c71e4c0d119 --- /dev/null +++ b/provider/blockdev_commit_base.py @@ -0,0 +1,165 @@ +import logging + +from virttest import qemu_storage +from virttest import data_dir +from virttest import utils_disk + +from provider import backup_utils +from provider import job_utils + +from provider.virt_storage.storage_admin import sp_admin + + +class BlockDevCommitTest(object): + + def __init__(self, test, params, env): + self.env = env + self.test = test + self.params = params + self.disks_info = list() + self.files_info = list() + self.main_vm = self.prepare_main_vm() + + @staticmethod + def get_node_name(tag): + return "drive_%s" % tag + + def prepare_main_vm(self): + return self.env.get_vm(self.params["main_vm"]) + + def get_image_by_tag(self, name): + image_dir = data_dir.get_data_dir() + image_params = self.params.object_params(name) + return qemu_storage.QemuImg(image_params, image_dir, name) + + def prepare_snapshot_file(self, snapshot_tags): + self.snapshot_images = list( + map(self.get_image_by_tag, snapshot_tags)) + params = self.params.copy() + params.setdefault("target_path", data_dir.get_data_dir()) + for tag in snapshot_tags: + image = sp_admin.volume_define_by_params(tag, params) + image.hotplug(self.main_vm) + + def verify_data_file(self): + for info in self.files_info: + mount_point, filename = info[0], info[1] + backup_utils.verify_file_md5( + self.main_vm, mount_point, filename) + + def create_snapshots(self, snapshot_tags, device): + options = ["node", "overlay"] + cmd = "blockdev-snapshot" + for idx, tag in enumerate(snapshot_tags): + params = self.params.object_params(tag) + arguments = params.copy_from_keys(options) + arguments["overlay"] = self.get_node_name(tag) + if idx == 0: + arguments["node"] = self.device_node + else: + arguments["node"] = self.get_node_name( + snapshot_tags[idx - 1]) + self.main_vm.monitor.cmd(cmd, dict(arguments)) + for info in self.disks_info: + if device in info: + self.generate_tempfile(info[1], tag) + + def commit_snapshots(self): + job_id_list = [] + for device in self.params["device_tag"].split(): + device_params = self.params.object_params(device) + snapshot_tags = device_params["snapshot_tags"].split() + self.device_node = self.get_node_name(device) + options = ["base-node", "top-node", "speed"] + arguments = self.params.copy_from_keys(options) + arguments["base-node"] = self.get_node_name(device) + arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) + device = self.get_node_name(snapshot_tags[-1]) + if len(self.params["device_tag"].split()) == 1: + backup_utils.block_commit(self.main_vm, device, **arguments) + else: + commit_cmd = backup_utils.block_commit_qmp_cmd + cmd, args = commit_cmd(device, **arguments) + job_id = args.get("job-id", device) + job_id_list.append(job_id) + self.main_vm.monitor.cmd(cmd, args) + for job_id in job_id_list: + job_utils.wait_until_block_job_completed(self.main_vm, job_id) + + @staticmethod + def get_linux_disk_path(session, disk_size): + disks = utils_disk.get_linux_disks(session, True) + for kname, attr in disks.items(): + if attr[1] == disk_size and attr[2] == "disk": + return kname + return None + + def configure_disk(self, tag): + """ + support configuration on both system and data disk + """ + if tag == self.params["images"].split()[0]: + self.configure_system_disk(tag) + else: + self.configure_data_disk(tag) + + def configure_system_disk(self, tag): + self.disks_info.append(["", self.params["mount_point"], tag]) + + def configure_data_disk(self, tag): + os_type = self.params["os_type"] + disk_params = self.params.object_params(tag) + disk_size = disk_params["image_size"] + session = self.main_vm.wait_for_login() + try: + if os_type != "windows": + disk_id = self.get_linux_disk_path(session, disk_size) + assert disk_id, "Disk not found in guest!" + mount_point = utils_disk.configure_empty_linux_disk( + session, disk_id, disk_size)[0] + self.disks_info.append([ + r"/dev/%s1" % + disk_id, mount_point, tag]) + else: + disk_id = utils_disk.get_windows_disks_index( + session, disk_size) + driver_letter = utils_disk.configure_empty_windows_disk( + session, disk_id, disk_size)[0] + mount_point = r"%s:\\" % driver_letter + self.disks_info.append([disk_id, mount_point, tag]) + finally: + session.close() + + def generate_tempfile(self, root_dir, filename="data", + size="10M", timeout=360): + backup_utils.generate_tempfile( + self.main_vm, root_dir, filename, size, timeout) + self.files_info.append([root_dir, filename]) + + def pre_test(self): + if not self.main_vm.is_alive(): + self.main_vm.create() + self.main_vm.verify_alive() + for device in self.params["device_tag"].split(): + device_params = self.params.object_params(device) + snapshot_tags = device_params["snapshot_tags"].split() + self.device_node = self.get_node_name(device) + self.configure_disk(device) + self.prepare_snapshot_file(snapshot_tags) + self.create_snapshots(snapshot_tags, device) + + def post_test(self): + try: + self.main_vm.destroy() + for image in self.snapshot_images: + image.remove() + except Exception as error: + logging.error(str(error)) + + def run_test(self): + self.pre_test() + try: + self.commit_snapshots() + self.verify_data_file() + finally: + self.post_test() diff --git a/provider/blockdev_mirror_base.py b/provider/blockdev_mirror_base.py new file mode 100644 index 0000000000000000000000000000000000000000..cf4f551f34a3c5b5b25847f1206eedb03db3cd05 --- /dev/null +++ b/provider/blockdev_mirror_base.py @@ -0,0 +1,146 @@ +""" +Module for providing a framwork for block-mirror test cases. + +The test strategy for block-mirror test cases: + 1. prepare data disks to create files, take md5sum + 2. hotplug the target disks for block-mirror + 3. do block-mirror + 4. check the target disks are attached + 5. restart vm with the target disks + 6. check the files and md5sum + 7. remove files created on system disk + +Note: + 1. blockdev_mirror must be implemented for different mirror test scenarios. + 2. There are three specific modules derived from this module, which cover + almost all test cases: + + blockdev_mirror_wait: do block-mirror and wait job completed, note that + this module is used for mirroring a single disk + in our testing scenarios. + blockdev_mirror_nowait: do block-mirror for disks one by one, and never + wait job completed. + blockdev_mirror_parallel: do block-mirror for several disks, as well as + other tests in parallel, for block jobs, wait + till all jobs completed. + +""" + +import six + +from provider import blockdev_base + + +class BlockdevMirrorBaseTest(blockdev_base.BlockdevBaseTest): + """ + block-mirror basic test class + """ + + def __init__(self, test, params, env): + super(BlockdevMirrorBaseTest, self).__init__(test, params, env) + self.clone_vm = None + self._source_images = params.objects("source_images") + self._target_images = params.objects("target_images") + self._source_nodes = ["drive_%s" % src for src in self._source_images] + self._target_nodes = ["drive_%s" % tgt for tgt in self._target_images] + self._backup_options = list(map(self._get_backup_options, + self._source_images)) + + def _get_backup_options(self, source_image): + params = self.params.object_params(source_image) + opts = params.objects("backup_options") + backup_options = params.copy_from_keys(opts) + + for k, v in six.iteritems(backup_options): + if v in ("yes", "true", "on"): + backup_options[k] = True + elif v in ("no", "false", "off"): + backup_options[k] = False + + return backup_options + + def _configure_system_disk(self, tag): + self.disks_info[tag] = [ + "system", self.params.get("mnt_on_sys_dsk", "/var/tmp")] + + def _configure_data_disk(self, tag): + self.format_data_disk(tag) + + def remove_files_from_system_image(self, tmo=60): + """Remove testing files from system image""" + tag_dir_list = [(t, d[1]) for t, d in six.iteritems(self.disks_info) if d[0] == "system"] + if tag_dir_list: + tag, root_dir = tag_dir_list[0] + files = ["%s/%s" % (root_dir, f) for f in self.files_info[tag]] + rm_cmd = "rm -f %s" % " ".join(files) + + # restart main vm for the original system image is offlined + # and the mirror image is attached after block-mirror + self.prepare_main_vm() + session = self.main_vm.wait_for_login() + try: + session.cmd(rm_cmd, timeout=tmo) + finally: + session.close() + + def prepare_data_disk(self, tag): + """ + data disk can be a system disk or a non-system disk + """ + if tag == self.params["images"].split()[0]: + self._configure_system_disk(tag) + else: + self._configure_data_disk(tag) + self.generate_data_file(tag) + + def clone_vm_with_mirrored_images(self): + """Boot VM with mirrored data disks""" + if self.main_vm.is_alive(): + self.main_vm.destroy() + + params = self.main_vm.params.copy() + system_image = params.objects("images")[0] + images = [system_image] + \ + self._target_images if self._source_images[0] != system_image else self._target_images + params["images"] = " ".join(images) + + self.clone_vm = self.main_vm.clone(params=params) + self.clone_vm.create() + self.clone_vm.verify_alive() + + self.env.register_vm("%s_clone" % self.clone_vm.name, self.clone_vm) + + def add_target_data_disks(self): + """Hot plug target disks to VM with qmp monitor""" + for tag in self._target_images: + disk = self.target_disk_define_by_params( + self.params.object_params(tag), tag) + disk.hotplug(self.main_vm) + self.trash.append(disk) + + def _check_mirrored_block_node_attached(self, source_qdev, target_node): + out = self.main_vm.monitor.query("block") + for item in out: + if (item["qdev"] == source_qdev + and item["inserted"].get("node-name") == target_node): + break + else: + self.test.fail("Device(%s) is not attached to target node(%s)" + % (source_qdev, target_node)) + + def check_mirrored_block_nodes_attached(self): + """All source devices attach to the mirrored nodes""" + for idx, target in enumerate(self._target_nodes): + self._check_mirrored_block_node_attached( + self._source_images[idx], target) + + def blockdev_mirror(self): + """Need to be implemented in specific test case""" + raise NotImplementedError + + def do_test(self): + self.blockdev_mirror() + self.check_mirrored_block_nodes_attached() + self.clone_vm_with_mirrored_images() + self.verify_data_files() + self.remove_files_from_system_image() diff --git a/provider/blockdev_mirror_nowait.py b/provider/blockdev_mirror_nowait.py new file mode 100644 index 0000000000000000000000000000000000000000..f9843ee909ed0227e161658f904225cc439b2005 --- /dev/null +++ b/provider/blockdev_mirror_nowait.py @@ -0,0 +1,44 @@ +""" +Module specified for test cases that don't need to wait job done, +please refer to blockdev_mirror_base for detailed test strategy. +""" + +from functools import partial + +from avocado.utils import memory + +from virttest import utils_misc + +from provider import backup_utils +from provider import blockdev_mirror_base +from provider import job_utils + + +class BlockdevMirrorNowaitTest(blockdev_mirror_base.BlockdevMirrorBaseTest): + """ + block-mirror test module without waiting mirror job done + """ + + def __init__(self, test, params, env): + super(BlockdevMirrorNowaitTest, self).__init__(test, params, env) + self._jobs = [] + + def blockdev_mirror(self): + """Run block-mirror without waiting job completed""" + for idx, source_node in enumerate(self._source_nodes): + self._jobs.append( + backup_utils.blockdev_mirror_nowait( + self.main_vm, source_node, + self._target_nodes[idx], + **self._backup_options[idx] + ) + ) + + def wait_mirror_jobs_completed(self): + """Wait till all mirror jobs completed in parallel""" + targets = [partial(job_utils.wait_until_block_job_completed, + vm=self.main_vm, job_id=j) for j in self._jobs] + try: + utils_misc.parallel(targets) + finally: + memory.drop_caches() diff --git a/provider/blockdev_mirror_parallel.py b/provider/blockdev_mirror_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..6a311ef68731cd9a0a106a4ab65c9398edc0a1d9 --- /dev/null +++ b/provider/blockdev_mirror_parallel.py @@ -0,0 +1,43 @@ +""" +Module specified for parallel test cases, i.e. +running block-mirror and some other tests in paralell, and wait +till all mirror jobs done. +Please refer to blockdev_mirror_base for detailed test strategy. +""" + +from functools import partial + +from avocado.utils import memory + +from virttest import utils_misc + +from provider import backup_utils +from provider import blockdev_mirror_base + + +class BlockdevMirrorParallelTest(blockdev_mirror_base.BlockdevMirrorBaseTest): + """ + block-mirror parallel test module + """ + + def blockdev_mirror(self): + """Run block-mirror and other operations in parallel""" + # parallel_tests includes function names separated by space + # e.g. parallel_tests = 'stress_test', we should define stress_test + # function with no argument + parallel_tests = self.params.objects("parallel_tests") + targets = list([getattr(self, t) + for t in parallel_tests if hasattr(self, t)]) + + # block-mirror on all source nodes is in parallel too + for idx, source_node in enumerate(self._source_nodes): + targets.append( + partial(backup_utils.blockdev_mirror, vm=self.main_vm, + source=source_node, target=self._target_nodes[idx], + **self._backup_options[idx]) + ) + + try: + utils_misc.parallel(targets) + finally: + memory.drop_caches() diff --git a/provider/blockdev_mirror_wait.py b/provider/blockdev_mirror_wait.py new file mode 100644 index 0000000000000000000000000000000000000000..51c85f4d9f26f36730c18cd93630d54d0250eb1c --- /dev/null +++ b/provider/blockdev_mirror_wait.py @@ -0,0 +1,25 @@ +""" +Module specified for non-parallel test cases that need to wait job done, +please refer to blockdev_mirror_base for detailed test strategy. +""" + +from avocado.utils import memory + +from provider import backup_utils +from provider import blockdev_mirror_base + + +class BlockdevMirrorWaitTest(blockdev_mirror_base.BlockdevMirrorBaseTest): + """ + block-mirror test module, waiting mirror job done + """ + + def blockdev_mirror(self): + """Run block-mirror and wait job done""" + try: + for idx, source_node in enumerate(self._source_nodes): + backup_utils.blockdev_mirror(self.main_vm, source_node, + self._target_nodes[idx], + **self._backup_options[idx]) + finally: + memory.drop_caches() diff --git a/provider/blockdev_stream_base.py b/provider/blockdev_stream_base.py index dc2a501977357f20006c9bba49dbb01940e15103..1c7060f2df022728112f83e9aac040edc524654c 100644 --- a/provider/blockdev_stream_base.py +++ b/provider/blockdev_stream_base.py @@ -7,6 +7,27 @@ from provider.blockdev_snapshot_base import BlockDevSnapshotTest class BlockDevStreamTest(BlockDevSnapshotTest): + def __init__(self, test, params, env): + super(BlockDevStreamTest, self).__init__(test, params, env) + self._stream_options = {} + self._top_device = "drive_%s" % self.snapshot_tag + self._init_stream_options() + if self.base_tag == self.params.objects("images")[0]: + self.disks_info.append( + ["system", self.params.get("mnt_on_sys_dsk", "/var/tmp")] + ) + + def _init_stream_options(self): + if self.params.get("speed"): + self._stream_options["speed"] = int(self.params["speed"]) + if self.params.get("base_node"): + self._stream_options["base-node"] = self.params["base_node"] + if self.params.get("on_error"): + self._stream_options["on-error"] = self.params["on_error"] + if self.params.get("block_stream_timeout"): + self._stream_options["timeout"] = int( + self.params["block_stream_timeout"]) + def snapshot_test(self): for info in self.disks_info: self.generate_tempfile(info[1], filename="base") @@ -15,14 +36,11 @@ class BlockDevStreamTest(BlockDevSnapshotTest): self.generate_tempfile(info[1], filename="sn1") def blockdev_stream(self): - arguments = {} - if self.is_blockdev_mode(): - device = "drive_%s" % self.snapshot_tag - else: - device = self.params["device"] - arguments["base"] = self.base_image.image_filename - arguments["speed"] = int(self.params.get("speed", 0)) - backup_utils.blockdev_stream(self.main_vm, device, **arguments) + if not self.is_blockdev_mode(): + self._stream_options["base"] = self.base_image.image_filename + self._top_device = self.params["device"] + backup_utils.blockdev_stream(self.main_vm, self._top_device, + **self._stream_options) time.sleep(0.5) def check_backing_file(self): @@ -32,14 +50,37 @@ class BlockDevStreamTest(BlockDevSnapshotTest): backing_file = info.get("backing-filename") assert not backing_file, "Unexpect backing file(%s) found!" % backing_file + def mount_data_disks(self): + if self.base_tag != self.params.objects("images")[0]: + super(BlockDevStreamTest, self).mount_data_disks() + + def remove_files_from_system_image(self, tmo=60): + """Remove testing files from system image""" + if self.base_tag == self.params.objects("images")[0]: + files = ["%s/%s" % (info[0], info[1]) for info in self.files_info] + if files: + self.main_vm = self.main_vm.clone() + self.main_vm.create() + self.main_vm.verify_alive() + + try: + session = self.main_vm.wait_for_login() + session.cmd("rm -f %s" % " ".join(files), timeout=tmo) + session.close() + finally: + self.main_vm.destroy() + + def do_test(self): + self.snapshot_test() + self.blockdev_stream() + self.check_backing_file() + self.clone_vm.create() + self.mount_data_disks() + self.verify_data_file() + def run_test(self): self.pre_test() try: - self.snapshot_test() - self.blockdev_stream() - self.check_backing_file() - self.clone_vm.create() - self.mount_data_disks() - self.verify_data_file() + self.do_test() finally: self.post_test() diff --git a/provider/blockdev_stream_nowait.py b/provider/blockdev_stream_nowait.py new file mode 100644 index 0000000000000000000000000000000000000000..439b3737e6a4925b2987603ea4ba0397d871e077 --- /dev/null +++ b/provider/blockdev_stream_nowait.py @@ -0,0 +1,32 @@ +""" +Module specified for stream test cases that don't need to wait job done +""" + +from avocado.utils import memory + +from provider import backup_utils +from provider import blockdev_stream_base +from provider import job_utils + + +class BlockdevStreamNowaitTest(blockdev_stream_base.BlockDevStreamTest): + """ + block-stream test module without waiting job done + """ + + def __init__(self, test, params, env): + super(BlockdevStreamNowaitTest, self).__init__(test, params, env) + self._job = None + + def blockdev_stream(self): + """Run block-stream without waiting job completed""" + self._job = backup_utils.blockdev_stream_nowait(self.main_vm, + self._top_device, + **self._stream_options) + + def wait_stream_job_completed(self): + """Wait till the stream job completed""" + try: + job_utils.wait_until_block_job_completed(self.main_vm, self._job) + finally: + memory.drop_caches() diff --git a/provider/blockdev_stream_parallel.py b/provider/blockdev_stream_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..f2a75cbe33bd8aa347c291e32ae1cb26740c326f --- /dev/null +++ b/provider/blockdev_stream_parallel.py @@ -0,0 +1,41 @@ +""" +Module specified for block stream parallel test cases, i.e. +running block-stream and some other tests in paralell, and wait +till all block jobs done. +""" + +from functools import partial + +from avocado.utils import memory + +from virttest import utils_misc + +from provider import backup_utils +from provider import blockdev_stream_base + + +class BlockdevStreamParallelTest(blockdev_stream_base.BlockDevStreamTest): + """ + block-stream parallel test module + """ + + def blockdev_stream(self): + """ + Run block-stream and other operations in parallel + + parallel_tests includes function names separated by space + e.g. parallel_tests = 'stress_test', we should define stress_test + function with no argument + """ + parallel_tests = self.params.objects("parallel_tests") + targets = list([getattr(self, t) + for t in parallel_tests if hasattr(self, t)]) + targets.append( + partial(backup_utils.blockdev_stream, vm=self.main_vm, + device=self._top_device, **self._stream_options) + ) + + try: + utils_misc.parallel(targets) + finally: + memory.drop_caches() diff --git a/provider/cpu_utils.py b/provider/cpu_utils.py index 8edc25886ae43145c9e01b87012f60beed1cd8fe..c1fc45a01abf881d640aa31d87e7fbda813ceca1 100644 --- a/provider/cpu_utils.py +++ b/provider/cpu_utils.py @@ -1,7 +1,10 @@ import re import logging -from virttest.utils_test import VMStress +from avocado.utils import process + +from virttest import utils_misc +from virttest.utils_test import VMStress, StressError class VMStressBinding(VMStress): @@ -11,8 +14,7 @@ class VMStressBinding(VMStress): def __init__(self, vm, params, stress_args=""): super(VMStressBinding, self).__init__(vm, "stress", params, stress_args=stress_args) - self.install() # pylint: disable=E0203 - self.install = lambda: None + self.install() def load_stress_tool(self, cpu_id): """ @@ -20,8 +22,17 @@ class VMStressBinding(VMStress): :param cpu_id: CPU id you want to bind """ - self.stress_cmds = "taskset -c %s stress" % cpu_id # pylint: disable=W0201 - super(VMStressBinding, self).load_stress_tool() + cmd = "setsid taskset -c {} {} {} > /dev/null".format(cpu_id, + self.stress_cmds, + self.stress_args) + logging.info("Launch stress with command: %s", cmd) + self.cmd_launch(cmd) + # wait for stress to start and then check, if not raise StressError + if not utils_misc.wait_for(self.app_running, + self.stress_wait_for_timeout, + first=2.0, step=1.0, + text="wait for stress app to start"): + raise StressError("Stress does not running as expected.") def get_guest_cpu_ids(session, os_type): @@ -38,7 +49,8 @@ def get_guest_cpu_ids(session, os_type): return set() cmd = "grep processor /proc/cpuinfo" output = session.cmd_output(cmd) - return set(map(int, re.findall(r"(\d+)$", output, re.M))) + return set(map(int, re.findall(r"processor\s+(?::\s)?(\d+)", + output, re.M))) def check_guest_cpu_topology(session, os_type, cpuinfo): @@ -89,3 +101,32 @@ def check_guest_cpu_topology(session, os_type, cpuinfo): logging.debug("CPU infomation of guest:\n%s", out) return is_matched + + +def check_cpu_flags(params, flags, test, session=None): + """ + Check cpu flags on host or guest.(only for Linux now) + :param params: Dictionary with the test parameters + :param flags: checked flags + :param test: QEMU test object + :param session: guest session + """ + cmd = "lscpu | grep Flags | awk -F ':' '{print $2}'" + func = process.getoutput + if session: + func = session.cmd_output + out = func(cmd).split() + missing = [f for f in flags.split() if f not in out] + if session: + logging.info("Check cpu flags inside guest") + if missing: + test.fail("Flag %s not in guest" % missing) + no_flags = params.get("no_flags") + if no_flags: + err_flags = [f for f in no_flags.split() if f in out] + if err_flags: + test.fail("Flag %s should not be present in guest" % err_flags) + else: + logging.info("Check cpu flags on host") + if missing: + test.cancel("This host doesn't support flag %s" % missing) diff --git a/provider/input_tests.py b/provider/input_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..6a7c50693c63a4c80fa0658f374497fef6d9f6a3 --- /dev/null +++ b/provider/input_tests.py @@ -0,0 +1,304 @@ +"""Input test related functions""" + +import json +import os +import time +import logging + +from collections import Counter +from virttest import error_context +from virttest import graphical_console +from virttest import data_dir +from provider import input_event_proxy + + +def get_keycode_cfg(filename): + """ + Get keyname to keycode cfg table. + + :param filename: filename that key to keycode file. + """ + + keycode_cfg_path = os.path.join(data_dir.get_deps_dir("key_keycode"), filename) + with open(keycode_cfg_path) as f: + return json.load(f) + + +@error_context.context_aware +def key_tap_test(test, params, console, listener, wait_time): + """ + key tap test, single key press and release. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param console: graphical console. + :param listener: listening the mouse button event in guest. + :param keys_file: a file include all tested keys. + :param wait_time: wait event received in listener event queue. + """ + + keys_file = params.get("key_table_file") + keys_dict = get_keycode_cfg(keys_file) + for key in keys_dict.keys(): + error_context.context("Send %s key tap event" % key, logging.info) + console.key_tap(key) + time.sleep(wait_time) + + logging.info("Check guest received %s key event is " + "matched with expected key event" % key) + keycode = keys_dict[key] + exp_events = [(keycode, "KEYDOWN"), (keycode, "KEYUP")] + event_queue = listener.events + key_events = [] + while not event_queue.empty(): + event = event_queue.get() + if event["type"] == "POINTERMOVE": + continue + key_events.append((event["keyCode"], event["type"])) + + if key_events != exp_events: + test.fail("Received key event didn't match expected event.\n" + "Received key event as: %s\n Expected event as: %s" + % (key_events, exp_events)) + + +@error_context.context_aware +def mouse_btn_test(test, params, console, listener, wait_time): + """ + Mouse button test, include button: left, right, middle, side, extra. + Only do single button test here. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param console: graphical console. + :param listener: listening the mouse button event in guest. + :param wait_time: wait event received in listener event queue. + """ + mouse_btn_map = {'left': 'BTN_LEFT', + 'right': 'BTN_RIGHT', + 'middle': 'BTN_MIDDLE', + 'side': 'BTN_SIDE', + 'extra': 'BTN_EXTRA'} + btns = params.objects("btns") + for btn in btns: + error_context.context("Click mouse %s button" % btn, logging.info) + console.btn_click(btn) + + keycode = mouse_btn_map[btn] + exp_events = [(keycode, "KEYDOWN"), (keycode, "KEYUP")] + time.sleep(wait_time) + events_queue = listener.events + btn_event = list() + + error_context.context("Check correct button event is received", + logging.info) + while not events_queue.empty(): + events = events_queue.get() + # some windows os will return pointer move event first + # before return btn event, so filter them here. + if events["type"] == "POINTERMOVE": + continue + btn_event.append((events["keyCode"], events["type"])) + + if btn_event != exp_events: + test.fail("Received btn events don't match expected events.\n" + "Received btn events as: %s\n Expected events as: %s" + % (btn_event, exp_events)) + + +@error_context.context_aware +def mouse_scroll_test(test, params, console, listener, wait_time, count=1): + """ + Mouse scroll test. + + :param test: kvm test object. + :param params: Dictionary with the test parameters + :param console: graphical console. + :param listener: listening the mouse button event in guest. + :param wait_time: wait event received in listener event queue. + :param count: wheel event counts, default count=1. + """ + scrolls = params.objects("scrolls") + exp_events = {'wheel-up': ("WHEELFORWARD", 0), + 'wheel-down': ('WHEELBACKWARD', 0)} + for scroll in scrolls: + error_context.context("Scroll mouse %s" % scroll, logging.info) + if "up" in scroll: + console.scroll_forward(count) + else: + console.scroll_backward(count) + + events_queue = listener.events + time.sleep(wait_time) + error_context.context("Check correct scroll event is received", + logging.info) + exp_event = exp_events.get(scroll) + samples = [] + while not events_queue.empty(): + event = events_queue.get() + # some windows os will return pointer move event first + # before return scroll event, so filter them here. + if event["type"] == "POINTERMOVE": + continue + samples.append((event["type"], event["hScroll"])) + + counter = Counter(samples) + num = counter.pop(exp_event, 0) + if num != count: + test.fail("Received scroll number %s don't match expected" + "scroll count %s" % (num, count)) + if counter: + test.fail("Received scroll events don't match expected events" + "Received scroll events as: %s\n Expected events as: %s" + % (counter, exp_event)) + + +@error_context.context_aware +def mouse_move_test(test, params, console, listener, + wait_time, end_pos, absolute): + """ + Mouse move test, default move trace is uniform linear motion. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param console: graphical console. + :param listener: listening the mouse button event in guest. + :param wait_time: wait event received in listener event queue. + :param end_pos: a tuple of mouse destination position. + :param absolute: Mouse move type is absolute or relative. + """ + move_rate = int(params.get("move_rate", 80)) + move_duration = int(params.get("move_duration", 1)) + line = graphical_console.uniform_linear(move_duration, move_rate) + width, height = console.screen_size + events_queue = listener.events + event_lst = [] + start_pos = console.pointer_pos + x0, y0 = start_pos + xn, yn = end_pos + # Compute a line y=kx+b through start_pos and end_pos. + if (xn - x0) != 0: + vertical = 0 + k = (yn - y0) / (xn - x0) + b = yn - (k * xn) + else: + vertical = 1 + + error_context.context("Moving pointer from %s to %s" + % (start_pos, end_pos), logging.info) + console.pointer_move(end_pos, motion=line, absolute=absolute) + time.sleep(wait_time) + + error_context.context("Collecting all pointer move events from guest", + logging.info) + while not events_queue.empty(): + event = events_queue.get() + xpos, ypos = event["xPos"], event["yPos"] + # Filter beyond screen size events. + # Due to os will ignores/corrects these events. + if 0 <= xpos <= width and 0 <= ypos <= height: + event_lst.append((event["xPos"], event["yPos"])) + + xn_guest, yn_guest = event_lst[-1] + tolerance = int(params.get("tolerance")) + error_context.context("Compare if pointer move to destination pos (%s, %s)" + "the missed value should in tolerance scope." + % end_pos, logging.info) + if (abs(xn - xn_guest) > tolerance) or (abs(yn - yn_guest) > tolerance): + test.fail("pointer did not move to destination position." + "it move to pos (%s, %s) in guest, but exepected pos is" + "(%s, %s)" % (xn_guest, yn_guest, xn, yn)) + + error_context.context("Compare if pointer move trace nearby destination line," + "the missed value should in tolerance scope.", + logging.info) + + for i, (x, y) in enumerate(event_lst): + if not vertical: + if abs((k * x + b) - y) > tolerance: + test.fail("Received pointer pos beyond line's tolerance scope " + "when move from {0} to {1}. Received pos is ({2}, {3})," + "it didn't nearby the expected line " + "y={4}x+{5}.".format(start_pos, end_pos, x, y, k, b)) + elif k == 0: + # for horizontal direction line, only x value will change. + if i > 0: + dx = [x2 - x1 for x1, x2 in zip(event_lst[i-1], event_lst[i])][0] + if (xn - x0 > 0 and dx <= 0): + test.fail("pointer move direction is wrong when " + "move from {0} to {1}.".format(start_pos, end_pos)) + elif (xn - x0 < 0 and dx >= 0): + test.fail("pointer move direction is wrong when " + "move from {0} to {1}.".format(start_pos, end_pos)) + else: + # for vertical direction line, only y value will change. + if i > 0: + dy = [y2 - y1 for y1, y2 in zip(event_lst[i-1], event_lst[i])][1] + if (yn - y0 > 0 and dy <= 0) or (yn - y0 < 0 and dy >= 0): + test.fail("pointer move to incorrect direction when " + "move from {0} to {1}.".format(start_pos, end_pos)) + + +def query_mice_status(vm, mice_name): + """ + Query which mice enabled currently in guest. + + :param vm: VM object + :param mice_name: query mice name + """ + events = vm.monitor.query_mice() + for event in events: + if event['name'] == mice_name: + return event + + +def keyboard_test(test, params, vm, wait_time): + """ + Keyboard tests, only include key_tap_test currently. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param vm: VM object + :param wait_time: wait event received in listener event queue. + """ + console = graphical_console.GraphicalConsole(vm) + listener = input_event_proxy.EventListener(vm) + key_tap_test(test, params, console, listener, wait_time) + listener.clear_events() + listener.cleanup() + + +def mouse_test(test, params, vm, wait_time, count=1): + """ + Mouse test, include button test, scroll test and move test. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param vm: VM object + :param wait_time: wait event received in listener event queue. + :param count: wheel event counts, default count=1. + """ + console = graphical_console.GraphicalConsole(vm) + listener = input_event_proxy.EventListener(vm) + mice_name = params.get("mice_name", "QEMU PS/2 Mouse") + mice_info = query_mice_status(vm, mice_name) + absolute = True if mice_info["absolute"] else False + error_context.context("Check if %s device is working" % mice_name, + logging.info) + if not mice_info["current"]: + test.fail("%s does not worked currently" % mice_name) + + mouse_btn_test(test, params, console, listener, wait_time) + mouse_scroll_test(test, params, console, listener, wait_time, count=count) + if not params.get("target_pos", None): + width, height = console.screen_size + x_max, y_max = width-1, height-1 + target_pos = [(1, 0), (x_max, 0), (1, y_max), (x_max, y_max)] + else: + # suggest set target_pos if want to test one target position. + target_pos = [tuple([int(i) for i in params.objects("target_pos")])] + for end_pos in target_pos: + mouse_move_test(test, params, console, listener, wait_time, + end_pos, absolute) + listener.clear_events() + listener.cleanup() diff --git a/provider/job_utils.py b/provider/job_utils.py index 3c9a52f536988b4a7c77c0fbc709635b466030a9..06b749178dd21624f9665209b815711f033055a5 100644 --- a/provider/job_utils.py +++ b/provider/job_utils.py @@ -69,13 +69,16 @@ def wait_until_block_job_completed(vm, job_id, timeout=900): assert finished, "wait for block job complete event timeout in %s seconds" % timeout +@fail_on +def job_complete(vm, job_id, timeout=120): + wait_until_job_status_match(vm, "ready", job_id, timeout) + arguments = {"id": job_id} + vm.monitor.cmd("job-complete", arguments) + + @fail_on def block_job_complete(vm, job_id, timeout=120): - info = get_job_by_id(vm, job_id) - if info.get("type") == "mirror": - wait_until_job_status_match(vm, "ready", job_id, timeout) - arguments = {"device": job_id} - vm.monitor.cmd("block-job-complete", arguments) + job_complete(vm, job_id, timeout) @fail_on @@ -85,7 +88,7 @@ def block_job_dismiss(vm, job_id, timeout=120): """ job = get_block_job_by_id(vm, job_id) if job.get("auto-dismiss", True) is False: - _job_dismiss(vm, job_id, True, timeout) + _job_dismiss(vm, job_id, timeout) time.sleep(0.1) job = get_block_job_by_id(vm, job_id) assert not job, "Block job '%s' exists" % job_id @@ -94,40 +97,37 @@ def block_job_dismiss(vm, job_id, timeout=120): @fail_on def job_dismiss(vm, job_id, timeout=120): """dismiss job when job status is concluded""" - _job_dismiss(vm, job_id, False, timeout) + _job_dismiss(vm, job_id, timeout) time.sleep(0.1) job = get_job_by_id(vm, job_id) assert not job, "Job '%s' exists" % job_id -@fail_on -def _job_dismiss(vm, job_id, is_block_job=False, timeout=120): +def _job_dismiss(vm, job_id, timeout=120): """dismiss job when job status is concluded""" wait_until_job_status_match(vm, "concluded", job_id, timeout) - cmd = "block-job-dismiss" if is_block_job else "job-dismiss" arguments = {"id": job_id} - return vm.monitor.cmd(cmd, arguments) + return vm.monitor.cmd("job-dismiss", arguments) def block_job_finalize(vm, job_id, timeout=120): """Finalize block job when job in pending state""" job = get_block_job_by_id(vm, job_id) if job.get("auto-finalize", True) is False: - return _job_finalize(vm, job_id, True, timeout) + return _job_finalize(vm, job_id, timeout) def job_finalize(vm, job_id, timeout=120): """Finalize job when job in pending state""" - return _job_finalize(vm, job_id, False, timeout) + return _job_finalize(vm, job_id, timeout) @fail_on def _job_finalize(vm, job_id, is_block_job=False, timeout=120): """Finalize job when job in pending state""" wait_until_job_status_match(vm, "pending", job_id, timeout) - cmd = "block-job-finalize" if is_block_job else "job-finalize" arguments = {"id": job_id} - vm.monitor.cmd(cmd, arguments) + vm.monitor.cmd("job-finalize", arguments) @fail_on diff --git a/provider/nbd_image_export.py b/provider/nbd_image_export.py new file mode 100644 index 0000000000000000000000000000000000000000..e89702a1865e5018273e9295ebfda5c27ec9ece6 --- /dev/null +++ b/provider/nbd_image_export.py @@ -0,0 +1,217 @@ +""" +Module for providing interfaces for exporting local image with +qemu-nbd and qemu internal nbd server. + +Available classes: +- QemuNBDExportImage: Export local image with qemu-nbd +- InternalNBDExportImage: Export image with vm qemu internal nbd server + +Available methods: +- create_image: Create a local image with qemu-img or + with user defined command +- export_image: Export the local image +- stop_export: Stop exporting image +- list_exported_image: List nbd image with qemu-nbd +- hotplug_tls: Hotplug tls creds object for internal nbd server +- hotplug_image: Hotplug local image to be exported +- get_export_name: Get export name for internal nbd export +- start_nbd_server: Start internal nbd server +- add_nbd_image: Add image to internal nbd server +- remove_nbd_image: Remove image from internal nbd server +- stop_nbd_server: Stop internal nbd server +""" + +import os +import signal +import logging + +from avocado.utils import process +from avocado.core import exceptions + +from virttest import nbd +from virttest import data_dir +from virttest import qemu_storage +from virttest import utils_misc +from virttest import qemu_devices + + +class NBDExportImage(object): + """NBD local image export base class""" + + def __init__(self, params, local_image): + """ + Initialize object. + :param local_image: local image tag + :param params: dictionary containing all test parameters. + """ + self._tag = local_image + self._params = params + self._image_params = self._params.object_params(self._tag) + + def create_image(self): + if self._image_params.get('create_image_cmd'): + result = process.run(self._image_params['create_image_cmd'], + ignore_status=True, shell=True) + elif not self._image_params.get_boolean("force_create_image"): + _, result = qemu_storage.QemuImg( + self._image_params, + data_dir.get_data_dir(), + self._tag + ).create(self._image_params) + + if result.exit_status != 0: + raise exceptions.TestFail('Failed to create image, error: %s' + % result.stderr.decode()) + + def export_image(self): + raise NotImplementedError() + + def stop_export(self): + raise NotImplementedError() + + +class QemuNBDExportImage(NBDExportImage): + """Export local image with qemu-nbd command""" + + def __init__(self, params, local_image): + super(QemuNBDExportImage, self).__init__(params, local_image) + self._qemu_nbd = utils_misc.get_qemu_nbd_binary(self._params) + filename_repr = 'json' if self._image_params.get( + 'nbd_export_format') == 'luks' else 'filename' + self._local_filename = qemu_storage.get_image_repr( + self._tag, self._image_params, + data_dir.get_data_dir(), filename_repr) + self._nbd_server_pid = None + + def export_image(self): + logging.info("Export image with qemu-nbd") + self._nbd_server_pid = nbd.export_image(self._qemu_nbd, + self._local_filename, + self._tag, self._image_params) + if self._nbd_server_pid is None: + raise exceptions.TestFail('Failed to export image') + + def list_exported_image(self, nbd_image, nbd_image_params): + logging.info("List the nbd image with qemu-nbd") + result = nbd.list_exported_image(self._qemu_nbd, nbd_image, + nbd_image_params) + if result.exit_status != 0: + raise exceptions.TestFail('Failed to list nbd image: %s' + % result.stderr.decode()) + + def stop_export(self): + if self._nbd_server_pid is not None: + try: + # when qemu-nbd crashes unexpectedly, we can handle it + os.kill(self._nbd_server_pid, signal.SIGKILL) + except Exception as e: + logging.warn("Error occurred when killing nbd server: %s" + % str(e)) + finally: + self._nbd_server_pid = None + + +class InternalNBDExportImage(NBDExportImage): + """Export image with qemu internal nbd server""" + + def __init__(self, vm, params, local_image): + super(InternalNBDExportImage, self).__init__(params, local_image) + self._tls_creds_id = None + self._node_name = None + self._image_devices = None + self._vm = vm + + def get_export_name(self): + """export name is the node name if nbd_export_name is not set""" + return self._image_params['nbd_export_name'] if self._image_params.get( + 'nbd_export_name') else self._node_name + + def hotplug_image(self): + """Hotplug the image to be exported""" + devices = self._vm.devices.images_define_by_params(self._tag, + self._image_params, + 'disk') + + # Only hotplug protocol and format node and the related objects + devices.pop() + self._node_name = devices[-1].get_qid() + self._image_devices = devices + + logging.info("Plug devices(without image device driver)") + for dev in devices: + ret = self._vm.devices.simple_hotplug(dev, self._vm.monitor) + if not ret[1]: + raise exceptions.TestFail("Failed to hotplug device '%s': %s." + % (dev, ret[0])) + + def hotplug_tls(self): + """Hotplug tls creds object for nbd server""" + if self._image_params.get('nbd_unix_socket'): + logging.info('TLS is only supported with IP') + elif self._image_params.get('nbd_server_tls_creds'): + logging.info("Plug server tls creds device") + self._tls_creds_id = '%s_server_tls_creds' % self._tag + dev = qemu_devices.qdevices.QObject('tls-creds-x509') + dev.set_param("id", self._tls_creds_id) + dev.set_param("endpoint", "server") + dev.set_param("dir", self._image_params['nbd_server_tls_creds']) + ret = self._vm.devices.simple_hotplug(dev, self._vm.monitor) + if not ret[1]: + raise exceptions.TestFail("Failed to hotplug device '%s': %s." + % (dev, ret[0])) + + def start_nbd_server(self): + """Start internal nbd server""" + server = { + 'type': 'unix', + 'path': self._image_params['nbd_unix_socket'] + } if self._image_params.get('nbd_unix_socket') else { + 'type': 'inet', + 'host': '0.0.0.0', + 'port': self._image_params.get('nbd_port', '10809') + } + + logging.info("Start internal nbd server") + return self._vm.monitor.nbd_server_start(server, self._tls_creds_id) + + def add_nbd_image(self, node_name=None): + """ + Add an image(to be exported) to internal nbd server. + :param node_name: block node name, the node might be hotplugged + by other utils, or the node has already been + present in VM. + """ + if node_name: + self._node_name = node_name + + logging.info("Add image node to nbd server") + return self._vm.monitor.nbd_server_add( + self._node_name, + self._image_params.get('nbd_export_name'), + self._image_params.get('nbd_export_writable'), + self._image_params.get('nbd_export_bitmap')) + + def remove_nbd_image(self): + """Remove the exported image from internal nbd server""" + logging.info("Remove image from nbd server") + return self._vm.monitor.nbd_server_remove( + self.get_export_name(), + self._image_params.get('nbd_remove_mode') + ) + + def stop_nbd_server(self): + """Stop internal nbd server, it also unregisters all devices""" + logging.info("Stop nbd server") + return self._vm.monitor.nbd_server_stop() + + def export_image(self): + """ + For internal nbd server, in order to export an image, start the + internal nbd server first, then add a local image to server. + """ + self.start_nbd_server() + self.add_nbd_image() + + def stop_export(self): + self.remove_nbd_image() + self.stop_nbd_server() diff --git a/provider/throttle_utils.py b/provider/throttle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..990384a753774739d69164d617ab841b76a81da7 --- /dev/null +++ b/provider/throttle_utils.py @@ -0,0 +1,727 @@ +""" +Module for IO throttling relevant interfaces. +""" +import copy +import json +import logging +import random +import re +import string +import tempfile +from math import ceil +from multiprocessing.pool import ThreadPool +from time import sleep + +from virttest.utils_misc import get_linux_drive_path + +from virttest.qemu_monitor import QMPCmdError + +from virttest.qemu_devices.qdevices import QThrottleGroup + + +class ThrottleError(Exception): + """ General Throttle error""" + pass + + +class ThrottleGroupManager(object): + """ + General operations for Throttle group. + """ + + def __init__(self, vm): + """ + :param vm:VM object. + """ + self._vm = vm + self._monitor = vm.monitor + + def set_monitor(self, monitor): + """ + Set the default monitor. + + :param monitor: QMPMonitor monitor. + """ + self._monitor = monitor + + # object-add + def add_throttle_group(self, group_id, props): + """ + hot-plug throttle group object. + + :param group_id: Throttle group id. + :param props: Dict of throttle group properties. + :return: QThrottleGroup object. + """ + + dev = QThrottleGroup(group_id, props) + try: + self._vm.devices.simple_hotplug(dev, self._monitor) + return dev + except QMPCmdError: + self._vm.devices.remove(dev) + + # object-del + def delete_throttle_group(self, group_id): + """ + hot-unplug throttle group object. + + :param group_id: Throttle group id. + :return: True for succeed. + """ + + dev = self.get_throttle_group(group_id) + if dev: + self._vm.devices.simple_unplug(dev, self._monitor) + return True + else: + logging.error("Can not find throttle group") + return False + + def get_throttle_group(self, group_id): + """ + Search throttle group in vm devices. + + :param group_id: Throttle group id. + :return: QThrottleGroup object. None for not found or something wrong. + """ + + devs = self._vm.devices.get_by_qid(group_id) + if len(devs) != 1: + logging.error("There are %d devices %s" % (len(devs), group_id)) + return None + return devs[0] + + def get_throttle_group_props(self, group_id): + """ + Get the attributes of throttle group object via qmp command. + + :param group_id: Throttle group id. + :return: Dictionary of throttle group properties. + """ + + try: + return self._monitor.qom_get(group_id, "limits") + except QMPCmdError as e: + logging.error("qom_get %s %s " % (group_id, str(e))) + + # qom-set + def update_throttle_group(self, group_id, props): + """ + Update throttle group properties. + + :param group_id: Throttle group id. + :param props: New throttle group properties. + """ + + dev = self.get_throttle_group(group_id) + if dev: + tmp_dev = QThrottleGroup(group_id, props) + self._monitor.qom_set(group_id, "limits", tmp_dev.raw_limits) + dev.raw_limits = tmp_dev.raw_limits + else: + raise ThrottleError("Can not find throttle group") + + # x-blockdev-reopen + def change_throttle_group(self, image, group_id): + """ + Change image to other throttle group. + + :param image: Image name of disk. + :param group_id: New throttle group id. + """ + + node_name = "drive_" + image + + throttle_blockdev = self._vm.devices.get_by_qid(node_name)[0] + + old_throttle_group = self._vm.devices.get_by_qid( + throttle_blockdev.get_param("throttle-group"))[0] + new_throttle_group = self._vm.devices.get_by_qid(group_id)[0] + file = throttle_blockdev.get_param("file") + args = {"driver": "throttle", "node-name": node_name, "file": file, + "throttle-group": group_id} + self._monitor.x_blockdev_reopen(args) + + for bus in old_throttle_group.child_bus: + bus.remove(throttle_blockdev) + + throttle_blockdev.parent_bus = ( + {"busid": group_id}, {"type": "ThrottleGroup"}) + throttle_blockdev.set_param("throttle-group", group_id) + + for bus in new_throttle_group.child_bus: + bus.insert(throttle_blockdev) + + +def _online_disk_windows(session, index, timeout=360): + """ + Online disk in windows guest. + + :param session: Session object connect to guest. + :param index: Physical disk index. + :param timeout: Timeout for cmd execution in seconds. + :return: The output of cmd + """ + + disk = "disk_" + ''.join( + random.sample(string.ascii_letters + string.digits, 4)) + online_cmd = "echo select disk %s > " + disk + online_cmd += " && echo online disk noerr >> " + disk + online_cmd += " && echo clean >> " + disk + online_cmd += " && echo attributes disk clear readonly >> " + disk + online_cmd += " && echo detail disk >> " + disk + online_cmd += " && diskpart /s " + disk + online_cmd += " && del /f " + disk + return session.cmd(online_cmd % index, timeout=timeout) + + +def _get_drive_path(session, params, image): + """ + Get the disk name by image serial in guest. + + :param session: Session object connect to guest. + :param params: params of running ENV. + :param image: image name of disk in qemu. + :return: The disk path in guest + """ + + image_params = params.object_params(image) + os_type = params['os_type'] + extra_params = image_params["blk_extra_params"] + serial = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) + if os_type == "windows": + cmd = "wmic diskdrive where SerialNumber='%s' get Index,Name" + disks = session.cmd_output(cmd % serial) + info = disks.splitlines() + if len(info) > 1: + attr = info[1].split() + _online_disk_windows(session, attr[0]) + return attr[1] + + return get_linux_drive_path(session, serial) + + +class ThrottleTester(object): + """ + FIO test for in throttle group disks, It contains building general fio + command and check the result of fio command output. + Example of usage: + ... + fio = generate_instance(params, vm, 'fio') + tt = ThrottleTester("group1",["img1","img2"]) + tt.set_fio(fio) + tt.build_default_option() + tt.build_images_fio_option() + tt.start() + + """ + # Default data struct of expected result. + raw_expected = { + "burst": {"read": 0, "write": 0, "total": 0, "burst_time": 0, + "burst_empty_time": 0}, + "normal": {"read": 0, "write": 0, "total": 0}} + # Default data struct of raw image data. + raw_image_data = {"name": "", "fio_option": "", "output": {}} + + def __init__(self, test, params, vm, session, group, images=None): + """ + + :param test: Context of test. + :param params: params of running ENV. + :param vm: VM object. + :param session: Session object connect to guest. + :param group: Throttle group name. + :param images: list of relevant images names. + """ + + self._test = test + self._vm = vm + self._session = session + self._monitor = vm.monitor + self._fio = None + self._params = params + self.group = group + # shared fio option without --filename + self._fio_option = "" + self.images = images.copy() if images else [] + self._throttle = { + "images": {image: copy.deepcopy(ThrottleTester.raw_image_data) for + image in images}, + "expected": copy.deepcopy(ThrottleTester.raw_expected)} + self._margin = 0.3 + + @staticmethod + def _generate_output_by_json(output): + """ + Convert fio command output to dict object. + + :param output: fio command output with option --output-format=json. + :return: dict of fio command output. + """ + + with tempfile.TemporaryFile(mode="w+") as tmp: + tmp.write(output) + tmp.seek(0) + line = tmp.readline() + begin_flag = False + block_index = 1 + block = {} + data = "" + while line: + if line == "{\n": + if not begin_flag: + begin_flag = True + else: + # error + break + if begin_flag: + data += line + if line == "}\n": + if begin_flag: + begin_flag = False + else: + # error + break + block[block_index] = json.loads(data) + data = "" + block_index += 1 + + line = tmp.readline() + + if begin_flag: + logging.error("Wrong data format") + return {} + return block + + def set_fio(self, fio): + """ + Set fio instance. + + :param fio: fio instance. + """ + + self._fio = fio + + def run_fio(self, *args): + """ + Start to fio command in guest. + + :param args: image data,data struct refer to raw_image_data. + :return: fio command output. + """ + + if not self._fio: + self._test.error("Please set fio first") + image_info = args[0] + fio_option = image_info["fio_option"] + session = self._vm.wait_for_login() + cmd = ' '.join((self._fio.cfg.fio_path, fio_option)) + burst = self._throttle["expected"]["burst"] + expected_burst = burst["read"] + burst["write"] + burst["total"] + if expected_burst: + cmd += " && " + cmd + logging.info("run_fio:" + cmd) + out = session.cmd(cmd, 1800) + image_info["output"] = self._generate_output_by_json(out) + return image_info["output"] + + def check_output(self, images): + """ + Check the output whether match the expected result. + + :param images: list of participating images. + :return: True for succeed. + """ + + burst = self._throttle["expected"]["burst"] + expected_burst = burst["read"] + burst["write"] + burst["total"] + + normal = self._throttle["expected"]["normal"] + expected_normal = normal["read"] + normal["write"] + normal["total"] + + # Indeed no throttle + if expected_normal == 0: + logging.warning("Skipping checking on the empty throttle") + return True + + sum_burst = 0 + sum_normal = 0 + num_images = len(images) + for image in images: + output = self._throttle["images"][image]["output"] # type: dict + num_samples = len(output) + logging.debug("Check %s in total %d images." % (image, num_images)) + if expected_burst: + if num_samples < 2: + self._test.error( + "At lease 2 Data samples:%d" % num_samples) + read = output[1]["jobs"][0]["read"]["iops"] + write = output[1]["jobs"][0]["write"]["iops"] + total = read + write + sum_burst += total + else: + if num_samples < 1: + self._test.error( + "At lease 1 Data samples:%d" % num_samples) + + read = output[num_samples]["jobs"][0]["read"]["iops"] + write = output[num_samples]["jobs"][0]["write"]["iops"] + total = read + write + sum_normal += total + + logging.debug("expected_burst:%d %d expected_normal:%d %d" % ( + expected_burst, sum_burst, expected_normal, sum_normal)) + if expected_burst: + real_gap = abs(expected_burst - sum_burst) + if real_gap <= expected_burst * self._margin: + logging.debug( + "Passed burst %d %d" % (expected_burst, sum_burst)) + else: + self._test.fail( + "Failed burst %d %d" % (expected_burst, sum_burst)) + + if abs(expected_normal - sum_normal) <= expected_normal * self._margin: + logging.debug("Passed normal verification %d %d" % ( + expected_normal, sum_normal)) + else: + self._test.fail( + "Failed normal %d %d" % (expected_normal, sum_normal)) + + return True + + def start_one_image_test(self, image): + """ + Process one disk throttle testing. + + :param image: name of image + :return: True for succeed,False or test error raised if failed. + """ + + logging.debug("Start one image run_fio :" + image) + self.run_fio(self._throttle["images"][image]) + return self.check_output([image]) + + def start_all_images_test(self): + """ + Process multi disks throttle testing parallel. + + :return: True for succeed,False or test error raised if failed. + """ + + num = len(self.images) + pool = ThreadPool(num) + + for img in self.images: + logging.debug("Start all images run_fio :" + img) + pool.apply_async(self.run_fio, (self._throttle["images"][img],)) + pool.close() + pool.join() + return self.check_output(self.images) + + def start(self): + """ + Process one disk and multi disks throttle testing. + + :return: True for succeed,False or test error raised if failed. + """ + + ret = False + num = len(self.images) + if num: + ret = self.start_one_image_test(self.images[0]) + if ret and num > 1: + self.wait_empty_burst() + ret = self.start_all_images_test() + + return ret + + def wait_empty_burst(self): + """ + Wait some time to empty burst + """ + burst = self._throttle["expected"]["burst"] + if "burst_empty_time" in burst.keys(): + logging.debug("Wait empty %d" % burst["burst_empty_time"]) + sleep(burst["burst_empty_time"]) + + def set_image_fio_option(self, image, option): + """ + Set fio option for specific image. + + :param image: image name + :param option: full fio option for image,which executed by run_fio + """ + + self._throttle["images"][image]["fio_option"] = option + + def set_throttle_expected(self, expected, reset=False): + """ + Set expected result for testing. it stores in throttle["expected"]. + The key-value pairs refer to default_expected. + + :param expected: Dict of the expected result + :param reset: True for reset data before update. + """ + + if reset: + self._throttle["expected"] = copy.deepcopy( + ThrottleTester.raw_expected) + if expected: + for k, v in expected.items(): + if isinstance(v, dict): + self._throttle["expected"][k].update(expected[k]) + else: + self._throttle["expected"][k] = expected[k] + + def set_fio_option(self, option): + """ + Set the default fio option for all images without --filename + + :param option: the fio option + """ + + self._fio_option = option + + def build_default_option(self): + """ + Generate default fio option for all images. It also generates expected + result according to throttle group property. + """ + + tgm = ThrottleGroupManager(self._vm) + attrs = tgm.get_throttle_group_props(self.group) + + option = "--direct=1 --name=test --iodepth=1 --thread" + option += " --output-format=json " + + iops_size = attrs["iops-size"] + iops_size = 4096 if iops_size == 0 else iops_size + + bps_read = attrs["bps-read"] + bps_read_max = attrs["bps-read-max"] + bps_read_max_length = attrs["bps-read-max-length"] + bps_total = attrs["bps-total"] + bps_total_max = attrs["bps-total-max"] + bps_total_max_length = attrs["bps-total-max-length"] + bps_write = attrs["bps-write"] + bps_write_max = attrs["bps-write-max"] + bps_write_max_length = attrs["bps-write-max-length"] + iops_read = attrs["iops-read"] + iops_read_max = attrs["iops-read-max"] + iops_read_max_length = attrs["iops-read-max-length"] + iops_total = attrs["iops-total"] + iops_total_max = attrs["iops-total-max"] + iops_total_max_length = attrs["iops-total-max-length"] + iops_write = attrs["iops-write"] + iops_write_max = attrs["iops-write-max"] + iops_write_max_length = attrs["iops-write-max-length"] + + burst_read_iops = 0 + burst_write_iops = 0 + burst_total_iops = 0 + normal_read_iops = 0 + normal_write_iops = 0 + normal_total_iops = 0 + + burst_time = 0 + burst_empty_time = 0 + + # reset expected result + self.set_throttle_expected(None, True) + + def _count_normal_iops(variables, iops_type): + iops_val = variables["iops_%s" % iops_type] + bps_val = variables["bps_%s" % iops_type] + normal_iops = 0 + if iops_val != 0 or bps_val != 0: + bps = int(bps_val / iops_size) + iops = iops_val + if (iops >= bps != 0) or iops == 0: + normal_iops = bps + elif (bps >= iops != 0) or bps == 0: + normal_iops = iops + self.set_throttle_expected({"normal": {iops_type: normal_iops}}) + return normal_iops + + def _count_burst_iops(variables, iops_type): + iops_max = variables["iops_%s_max" % iops_type] + iops_length = variables["iops_%s_max_length" % iops_type] + bps_max = variables["bps_%s_max" % iops_type] + bps_length = variables["bps_%s_max_length" % iops_type] + normal_iops = variables["normal_%s_iops" % iops_type] + burst_iops = 0 + empty_time = burst_empty_time + full_time = burst_time + if iops_max != 0 or bps_max != 0: + bps = int(bps_max * bps_length / iops_size) + iops = iops_max * iops_length + burst_full = 0 + if (iops >= bps != 0) or iops == 0: + burst_full = bps + burst_iops = int(bps_max / iops_size) + elif (bps >= iops != 0) or bps == 0: + burst_full = iops + burst_iops = iops_max + + empty_time = burst_full / normal_iops + full_time = burst_full / (burst_iops - normal_iops) + empty_time = ceil(max(empty_time, burst_empty_time)) + full_time = int(max(full_time, burst_time)) + + self.set_throttle_expected({"burst": {iops_type: burst_iops}}) + return burst_iops, empty_time, full_time + + # count normal property + local_vars = locals() + normal_write_iops = _count_normal_iops(local_vars, "write") + normal_read_iops = _count_normal_iops(local_vars, "read") + normal_total_iops = _count_normal_iops(local_vars, "total") + + # count burst property + local_vars = locals() + burst_write_iops, burst_empty_time, burst_time = _count_burst_iops( + local_vars, "write") + burst_read_iops, burst_empty_time, burst_time = _count_burst_iops( + local_vars, "read") + burst_total_iops, burst_empty_time, burst_time = _count_burst_iops( + local_vars, "total") + + runtime = 60 + if burst_time: + runtime = burst_time + self.set_throttle_expected({"burst": { + "burst_time": burst_time, + "burst_empty_time": burst_empty_time}}) + + if (normal_read_iops and normal_write_iops) or normal_total_iops: + mode = "randrw" + elif normal_read_iops: + mode = "randread" + elif normal_write_iops: + mode = "randwrite" + else: + mode = "randrw" + + option += " --rw=%s --bs=%d --runtime=%d" % (mode, iops_size, runtime) + + logging.debug(self._throttle["expected"]) + logging.debug("fio_option:" + option) + self._fio_option = option + + def build_image_fio_option(self, image): + """ + Build fio relevant info for image. + + :param image: name of image + :return: dict of image relevant data. + """ + + if image not in self._throttle["images"].keys(): + self._throttle["images"].update({image: copy.deepcopy( + ThrottleTester.raw_image_data)}) + + name = _get_drive_path(self._session, self._params, image) + image_data = self._throttle["images"][image] + image_data["name"] = name + image_data["fio_option"] = self._fio_option + " --filename=%s" % name + return image_data + + def build_images_fio_option(self): + """ + Build fio relevant info for all images. + + :return: dict of all images relevant data. + """ + + for image in self.images: + self.build_image_fio_option(image) + return self._throttle["images"] + + def attach_image(self, image): + """ + Attach new image into throttle group. + + :param image: image name. + """ + + self.images.append(image) + + def detach_image(self, image): + """ + Detach image from throttle group. + + :param image: image name. + """ + + self.images.remove(image) + + +class ThrottleGroupsTester(object): + """ + This class mainly testing multi groups parallel or specified group + Example of usage: + ... + fio = generate_instance(params, vm, 'fio') + t1 = ThrottleTester("group1",["img1","img2"]) + t1.set_fio(fio) + t1.build_default_option() + t1.build_images_fio_option() + t2 = ThrottleTester("group1",["img1","img2"]) + t2.set_fio(fio) + t2.build_default_option() + t2.build_images_fio_option() + testers = ThrottleGroupsTester([t1,t2]) + testers.start() + """ + + def __init__(self, testers): + self.testers = testers.copy() + + @staticmethod + def proc_wrapper(func): + """Wrapper to log exception""" + try: + return func() + except Exception as e: + logging.exception(e) + raise + + def start_group_test(self, group): + """ + Start one group testing. + + :param group: group name + """ + for tester in self.testers: + if tester.group == group: + tester.start() + break + else: + raise ThrottleError("No found the corresponding group tester.") + + def start(self): + """ + Start multi groups testing parallel. + """ + num = len(self.testers) + pool = ThreadPool(num) + + results = {} + for tester in self.testers: + logging.debug("Start tester :" + tester.group) + result = pool.apply_async(self.proc_wrapper, (tester.start,)) + results[tester.group] = result + pool.close() + pool.join() + + success = True + for group, result in results.items(): + if not result.successful(): + logging.error("Find unexpected result on %s", group) + success = False + + if not success: + raise ThrottleError("Throttle testing failed,please check log.") + + logging.debug("ThrottleGroupsParallelTester End") diff --git a/provider/virt_storage/storage_volume.py b/provider/virt_storage/storage_volume.py index bf1f00635abb2c675563632fe59df97f05836113..5ffd682c6c9b9bfe6c50321d460db082560ae05e 100644 --- a/provider/virt_storage/storage_volume.py +++ b/provider/virt_storage/storage_volume.py @@ -137,6 +137,10 @@ class StorageVolume(object): self.format.set_param("backing", backing_node) self.format.set_param("file", self.protocol.get_param("node-name")) + # keep the same setting with libvirt when blockdev-add a format node + readonly = params.get("image_readonly", "off") + self.format.set_param("read-only", readonly) + def refresh_protocol_by_params(self, params): if self.protocol.TYPE == "file": aio = params.get("image_aio", "threads") @@ -145,6 +149,12 @@ class StorageVolume(object): else: raise NotImplementedError + # keep the same setting with libvirt when blockdev-add a protocol node + auto_readonly = params.get("image_auto_readonly", "on") + discard = params.get("image_discard_request", "unmap") + self.protocol.set_param("auto-read-only", auto_readonly) + self.protocol.set_param("discard", discard) + def info(self): out = dict() out["name"] = self.name diff --git a/qemu/deps/Meinberg_NTP/install_ntp.ini b/qemu/deps/Meinberg_NTP/install_ntp.ini new file mode 100644 index 0000000000000000000000000000000000000000..7a4e5e7b106c09433dd72a53d08d3373e27a439d --- /dev/null +++ b/qemu/deps/Meinberg_NTP/install_ntp.ini @@ -0,0 +1,20 @@ +[Installer] +InstallDir=C:\NTP +UpgradeMode=Reinstall +Logfile=C:\Meinberg_NTP\ntp_silent.log +Silent=Yes +[Components] +InstallTools=yes +InstallDocs=no +InstallOpenSSL=yes +CreateStartMenuEntries=yes +[Service] +StartAfterInstallation=yes +AutoStart=yes +ServiceAccount=@SYSTEM +DisableOthers=yes +AllowBigInitialTimestep=yes +EnableMMTimer=yes +ModifyFirewall=yes +[Configuration] +UseConfigFile=C:\Meinberg_NTP\ntp.conf diff --git a/qemu/deps/Meinberg_NTP/ntp-4.2.8p14-win32-setup.exe b/qemu/deps/Meinberg_NTP/ntp-4.2.8p14-win32-setup.exe new file mode 100644 index 0000000000000000000000000000000000000000..06bcf54e767d57cdc590850a5325f0a8a793524b Binary files /dev/null and b/qemu/deps/Meinberg_NTP/ntp-4.2.8p14-win32-setup.exe differ diff --git a/qemu/deps/Meinberg_NTP/ntp.conf b/qemu/deps/Meinberg_NTP/ntp.conf new file mode 100644 index 0000000000000000000000000000000000000000..a7374ad5e856671dcd9f6843ca0a60ac88569454 --- /dev/null +++ b/qemu/deps/Meinberg_NTP/ntp.conf @@ -0,0 +1,9 @@ +restrict default nomodify notrap nopeer noquery +restrict -6 default nomodify notrap nopeer noquery +restrict 127.0.0.1 +restrict -6 ::1 + +driftfile "C:\NTP\etc\ntp.drift" + +# Use specific NTP servers +server clock.redhat.com iburst minpoll 6 maxpoll 7 diff --git a/qemu/deps/dgreadiness/DG_Readiness_Tool_v3.6.ps1 b/qemu/deps/dgreadiness/DG_Readiness_Tool_v3.6.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..a7f91114e7de8b025d708bcba02a1b40dd0447b1 --- /dev/null +++ b/qemu/deps/dgreadiness/DG_Readiness_Tool_v3.6.ps1 @@ -0,0 +1,1530 @@ +# Script to find out if machine is Device Guard compliant +# requires driver verifier on system. +param([switch]$Capable, [switch]$Ready, [switch]$Enable, [switch]$Disable, $SIPolicyPath, [switch]$AutoReboot, [switch]$DG, [switch]$CG, [switch]$HVCI, [switch]$HLK, [switch]$Clear, [switch]$ResetVerifier) + +$path = "C:\DGLogs\" +$LogFile = $path + "DeviceGuardCheckLog.txt" + +$CompatibleModules = New-Object System.Text.StringBuilder +$FailingModules = New-Object System.Text.StringBuilder +$FailingExecuteWriteCheck = New-Object System.Text.StringBuilder + +$DGVerifyCrit = New-Object System.Text.StringBuilder +$DGVerifyWarn = New-Object System.Text.StringBuilder +$DGVerifySuccess = New-Object System.Text.StringBuilder + +$Sys32Path = "$env:windir\system32" +$DriverPath = "$env:windir\system32\drivers" + +#generated by certutil -encode +$SIPolicy_Encoded = "BQAAAA43RKLJRAZMtVH2AW5WMHbk9wcuTBkgTbfJb0SmxaI0BACNkAgAAAAAAAAA +HQAAAAIAAAAAAAAAAAAKAEAAAAAMAAAAAQorBgEEAYI3CgMGDAAAAAEKKwYBBAGC +NwoDBQwAAAABCisGAQQBgjc9BAEMAAAAAQorBgEEAYI3PQUBDAAAAAEKKwYBBAGC +NwoDFQwAAAABCisGAQQBgjdMAwEMAAAAAQorBgEEAYI3TAUBDAAAAAEKKwYBBAGC +N0wLAQEAAAAGAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AQAAAAYAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA +BgAAAAEAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAA +AQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAUAAAABAAAA +AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAEAAAABAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAABAAAAAgAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAEAAAADAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAQAAAAUAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAABAAAADgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAEAAAAOAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AQAAAA4AAAABAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA +DgAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAA +AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAA4AAAABAAAA +AgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAEAAAADAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAQAAAAEAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAQAAAABAAAAAQAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAPye3j3MoJGGstO/m3OKIFDLGlVN +otyttV8/cu4XchN4AQAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AQAAAAYAAAABAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA +DgAAAAEAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAA +AQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAoAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAABAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAQAAAAYAAAABAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAABAAAABwAAAAEAAAAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAABAAAAFAAAAIMAAAAAAAAADIAAAAsAAAAAAAAAAAAAAAEAAAAAAAAA +AgAAAAAAAAADAAAAAAAAAAQAAAAAAAAABQAAAAAAAAALAAAAAAAAAAwAAAAAAAAA +DQAAAAAAAAAOAAAAAAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAMAAAAAAAAAAyAAAASAAAABgAAAAAAAAAHAAAAAAAAAAgAAAAAAAAA +CQAAAAAAAAAKAAAAAAAAABMAAAAAAAAADwAAAAAAAAAQAAAAAAAAABEAAAAAAAAA +EgAAAAAAAAAUAAAAAAAAABUAAAAAAAAAGgAAAAAAAAAbAAAAAAAAABwAAAAAAAAA +FgAAAAAAAAAXAAAAAAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAgAAABQAAABQAG8AbABpAGMAeQBJAG4AZgBvAAAAAAAWAAAA +SQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAQAAABJAGQAAAAAAAMAAAAMAAAA +MAAzADEAMAAxADcAAAAAABQAAABQAG8AbABpAGMAeQBJAG4AZgBvAAAAAAAWAAAA +SQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAgAAABOAGEAbQBlAAAAAAADAAAA +JgAAAEQAZQBmAGEAdQBsAHQAVwBpAG4AZABvAHcAcwBBAHUAZABpAHQAAAAAAAAA +AwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAA +BQAAAAYAAAA=" + +$HSTITest_Encoded = "TVqQAAMAAAAEAAAA//8AALgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAA4fug4AtAnNIbgBTM0hVGhpcyBwcm9ncmFtIGNhbm5vdCBiZSBydW4gaW4gRE9TIG1vZGUuDQ0KJAAAAAAAAADxXZfstTz5v7U8+b+1PPm/2GH4vrc8+b+8RGq/ojz5v9hh+r63PPm/2GH9vr48+b+1PPi/qjz5v9hh+b60PPm/2GHwvrc8+b/YYfu+tDz5v1JpY2i1PPm/AAAAAAAAAABQRQAAZIYFAGt3EVgAAAAAAAAAAPAAIiALAg4AABIAAAAaAAAAAAAAkBsAAAAQAAAAAACAAQAAAAAQAAAAAgAACgAAAAoAAAAKAAAAAAAAAABwAAAABAAAxcwAAAMAYEEAAAQAAAAAAAAQAAAAAAAAAAAQAAAAAAAAEAAAAAAAAAAAAAAQAAAAEDkAAGQAAAB0OQAABAEAAAAAAAAAAAAAAFAAACABAAAAAAAAAAAAAABgAAAYAAAAwDUAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQMAAA0AAAAAAAAAAAAAAA4DAAAEgBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAudGV4dAAAAMURAAAAEAAAABIAAAAEAAAAAAAAAAAAAAAAAAAgAABgLnJkYXRhAAB4DwAAADAAAAAQAAAAFgAAAAAAAAAAAAAAAAAAQAAAQC5kYXRhAAAAwAUAAABAAAAAAgAAACYAAAAAAAAAAAAAAAAAAEAAAMAucGRhdGEAACABAAAAUAAAAAIAAAAoAAAAAAAAAAAAAAAAAABAAABALnJlbG9jAAAYAAAAAGAAAAACAAAAKgAAAAAAAAAAAAAAAAAAQAAAQgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABIiVwkCFVWV0FWQVdIi+xIg+wwM/9IjUU4TIv5iX1ISI1NSIl9QEUzyYl9OEyNRUBIiUQkIDPS6AwJAACL2D1XAAeAD4WrAAAAi0VASGnYDCIAAP8V/yAAAI13CEyLw0iLyIvW/xX2IAAATIvwSIXAdQe7DgAHgOtxi104/xXWIAAARIvDi9ZIi8j/FdAgAABIi/BIhcB1B7sOAAeA6x5IjUU4TIvOTI1FQEiJRCQgSYvWSI1NSOiNCAAAi9j/FZUgAABNi8Yz0kiLyP8VlyAAAEiF9nQU/xV8IAAATIvGM9JIi8j/FX4gAAA5fUhAD5THQYk/i8NIi1wkYEiDxDBBX0FeX15dw8zMzMzMzMzMzOkzCAAAzMzMzMzMzEiJXCQYSIl0JCBXSIHscAEAAEiLBbsuAABIM8RIiYQkYAEAAA8QBRkhAACL8kiL+TPSSI1MJGBBuPQAAADzD39EJFDo6g4AAEiDZCQwAEiNTCRQg2QkQABFM8nHRCQogAAAALoAAABAx0QkIAMAAABFjUEB/xWSHwAASIvYSIP4/3RGQbkCAAAARTPAM9JIi8j/FX0fAACD+P90HkiDZCQgAEyNTCRARIvGSIvXSIvL/xVmHwAAhcB1Bv8VPB8AAEiLy/8VYx8AAEiLjCRgAQAASDPM6AsLAABMjZwkcAEAAEmLWyBJi3MoSYvjX8PMzMzMzMxIg+woM9JMi8lIhcl0Hrr///9/M8BEi8I4AXQJSP/BSYPoAXXzTYXAdSEz0rhXAAeAM8mFwEgPScp4C41RAUmLyejG/v//SIPEKMNJK9Dr4czMzMzMzMzMSIlcJAhIiXQkEFdIg+wgQYvZSYv4SIvy6Iv///+L00iLz+iN/v//SIvOSItcJDBIi3QkOEiDxCBf6Wr////MzMzMzMyJVCQQSIPsKAkRSI0Nsx8AAOhO////ugQAAABIjUwkOOhL/v//SI0NqB8AAOgz////SIPEKMPMzMzMzMxAVVNWV0FUQVVBVkFXSI1sJOFIgeyYAAAASIsF6CwAAEgzxEiJRQ9FM/ZIiVXnM9JIiU3vRIl1p0GL3kiJXbdJi8BIiUXXTYvpRIl1r0GL/kSJdfdFi+ZIiVX7RYv+SIlVA0yJdc9IhckPhBEFAABIhcAPhAgFAABNhckPhP8EAABBgzkBdBHHRaeAAAAAvwJAAIDp7QQAAEiNDQkfAADohP7//0WLfQREiX2/SWnfDCIAAP8Vtx0AAEyLw7oIAAAASIvI/xWuHQAATIvgSIXAdShIjQ3vHgAA6Er+////FUwdAAAPt/iBzwAAB4CFwA9O+EmL3umLBAAASI0N9x4AAOgi/v//RIl1s0WF/w+EiwIAAEmNXQhIiV3HSY20JAwCAABIjQ32HgAA6Pn9//+LQwiJhvT9//+FwHktPbsAAMB1EUiNDe4eAADo2f3//+kaAgAASI0N/R4AAOjI/f//g02nQOkFAgAAixtJA92DOwN0Gw+6bacIugEAAABIjY78/f//6Dv+///p4AEAAEyNhgD+//+6BAAAAEmLwEiNSwgPEAFIjYmAAAAADxEASI2AgAAAAA8QSZAPEUiQDxBBoA8RQKAPEEmwDxFIsA8QQcAPEUDADxBJ0A8RSNAPEEHgDxFA4A8QSfAPEUjwSIPqAXWuQbkAAgAASI0VgB4AAEiNDYEeAADodP3//4uLCAIAALoAEAAAQYv+TI0ES0iBwQwCAABMA8FIi85MK8ZIjYL+7/9/SIXAdBdBD7cECGaFwHQNZokBSIPBAkiD6gF13UiF0nUJSIPpAr96AAeAZkSJMUiNFSYeAABIjQ0nHgAAQbkAIAAATIvG6AH9//9MjXMEQYsOjUH/g/gDD4fDAAAA/0SN90iNFQMeAACJjvj9//9BuQQAAABIjQ34HQAATYvG6Mj8//9BiwaDfIX3AXZESI2O/P3//7oEAAAA6PH8//9Biw6D6QF0JYPpAXQag+kBdA+D+QEPhaIAAACDTacI63eDTacE63GDTacC62uDTacB62WD+AF1YIuDCAIAAEyNRa9BuQQAAACJRa9IjRWTHQAASI0NrB0AAOhP/P//RTP2RDl1r3UOD7ptpwlBjVYI6TX+//9IjYMMAgAASIlFz+sZD7ptpwlIjY78/f//ugIAAADoWfz//0Uz9otFs0iBxgwiAABIg0XHDP/AiUWzQTvHcxdIi13H6ZP9//+/BUAAgEiLXbfp5wEAAEQ5dad0DkiNDU0dAADoePv//+vji12v/xW1GgAARIvDuggAAABIi8j/FawaAABIiUW3SIvYSIXAdRZIjQ1JHQAA6ET7//+/FwAA0OmXAQAASI0NYx0AAOgu+///i0WvRI2wBgEAAEaNNHBEiXWzRYX/D4TFAAAASY1cJAhJjXUISI0N+xsAAOj++v//gXv4uwAAwHUOSI0N/hsAAOjp+v//63xEOXYEcxS6EAAAAA+6bacJSIvL6Gv7///rYosOSQPNi4EIAgAAO0WvdAe6CAAAAOvaRTPATI0MQUyNFAhEOUWvdjpMi3W3Qw+2jBAMAgAA99FDhIwIDAIAAHQID7ptpwmDCyBDioQIDAIAAEMIBDBB/8BEO0Wvcs5Ei3WzSIPGDEiBwwwiAABJg+8BD4VM////RIt9v0iLXbdFM/ZEOXWndBFIjQ0OHAAA6Dn6///pkQAAAEGL9kQ5da8PhoQAAABMi3W3TIttz0iNDYgcAADoE/r//4vGTI1Fq0G5AQAAAEiNFZgcAABCigwwSo0cKCILiE2rSI0NlBwAAOg/+v//QbkBAAAASI0VkhwAAEyLw0iNDZgcAADoI/r//4oDOEWrdBBIjQ2dHAAA6Lj5//+DTacg/8Y7da9yjuly+///v1cAB4BIjQ2sHAAA6Jf5//9BuQQAAABMjUWnSI0VphwAAEiNDa8cAADo0vn//02F5HRdTIt150iLdddNhfZ0NEQ5PnIvSI0NnBwAAOhX+f//QYvHSYvUTGnADCIAAEmLzuh0BwAASI0NmxwAAOg2+f//6wW/VwAHgESJPv8VbhgAAE2LxDPSSIvI/xVwGAAASIXbdBT/FVUYAABMi8Mz0kiLyP8VVxgAAEiLRe9IhcB0BYtNp4kIi8dIi00PSDPM6NMDAABIgcSYAAAAQV9BXkFdQVxfXltdw8zMzMzMzMxIi8RIiVgISIloEEiJcBhXQVZBV0iD7DCDYNgATYvxSYv4TI1I2EiL8kyL+UUzwDPSuaYAAAD/FWwYAACL2D0EAADAdAkPuusc6dkAAACDfCQgFHMKuwVAAIDpyAAAAItcJCD/FacXAABEi8O6CAAAAEiLyP8VnhcAAEiL6EiFwHUKuw4AB4DpmwAAAESLRCQgRTPJSIvQuaYAAAD/FQYYAACL2IXAeQYPuusc6zdIjQ2TGwAA6A74//+LVCQgSIvN6A73//9IjQ2LGwAA6Pb3//9Mi81Mi8dIi9ZJi8/ovfj//4vYSIt8JHCLdCQgSIX/dBk5N3IVTYX2dBBEi8ZIi9VJi87o8AUAAOsFu1cAB4CJN/8V9xYAAEyLxTPSSIvI/xX5FgAASItsJFiLw0iLXCRQSIt0JGBIg8QwQV9BXl/DzMzMzMzMSIlcJAhXSIPsIIP6AXU8SI0VmhcAAEiNDYsXAADoaAMAAIXAdAczwOmjAAAASI0VbBcAAEiNDV0XAADoVgMAAP8FKiUAAOmAAAAAhdJ1fDkVUyUAAHRtSIsNGiUAAOgxAgAASIsNFiUAAEiL+OgiAgAASI1Y+OsXSIsL6BQCAABIhcB0Bv8VBRcAAEiD6whIO99z5IM9DSUAAAR2FP8VJRYAAEyLxzPSSIvI/xUnFgAA6O4BAABIiQXDJAAASIkFtCQAAIMlpSQAAAC4AQAAAEiLXCQwSIPEIF/DzMzMzMzMzMzMzMzMzMzMzMzMzMzMSIlcJAhIiXQkEFdIg+wgSYv4i9pIi/GD+gF1BeijAQAATIvHi9NIi85Ii1wkMEiLdCQ4SIPEIF/pBwAAAMzMzMzMzMxMiUQkGIlUJBBIiUwkCFNWV0iB7JAAAACL+kiL8cdEJCABAAAAhdJ1EzkVDSQAAHULM9uJXCQg6d8AAACNQv+D+AF3MkyLhCTAAAAA6Hv+//+L2IlEJCDrFTPbiVwkIIu8JLgAAABIi7QksAAAAIXbD4SlAAAATIuEJMAAAACL10iLzujoAQAAi9iJRCQg6xUz24lcJCCLvCS4AAAASIu0JLAAAACD/wF1SIXbdURFM8Az0kiLzui1AQAA6xOLvCS4AAAASIu0JLAAAACLXCQgRTPAM9JIi87o7/3//+sTi7wkuAAAAEiLtCSwAAAAi1wkIIX/dAWD/wN1IEyLhCTAAAAAi9dIi87ov/3//4vYiUQkIOsGM9uJXCQgi8NIgcSQAAAAX15bw8zMzMzMzMzMzMxmZg8fhAAAAAAASDsN6SIAAHUQSMHBEGb3wf//dQHDSMHJEOmSAQAAzMzMzMzMSP8ltRQAAMzMzMzMzMzMzDPJSP8lmxQAAMzMzMzMzMxIiVwkIFVIi+xIg+wgSINlGABIuzKi3y2ZKwAASIsFiSIAAEg7ww+FjwAAAEiNTRj/FU4UAABIi0UYSIlFEP8VABQAAIvASDFFEP8V/BMAAIvASDFFEP8VIBQAAIvASMHgGEgxRRD/FRAUAACLwEiNTRBIM0UQSDPBSI1NIEiJRRD/FeUTAACLRSBIuf///////wAASMHgIEgzRSBIM0UQSCPBSLkzot8tmSsAAEg7w0gPRMFIiQXxIQAASItcJEhI99BIiQXqIQAASIPEIF3DzMzMzMzM/yXYEgAAzMzMzMzM/yXEEgAAzMzMzMzMzMxIg+wog/oBdQb/FTUTAAC4AQAAAEiDxCjDzMzMzMzMzMzMzMzMzMzMzMzMzMIAAMzMzMzMzMzMzEBTSIPsIEiL2TPJ/xWTEgAASIvL/xWCEgAA/xUUEwAASIvIugkEAMBIg8QgW0j/JfgSAADMzMzMzMzMzMzMzMzMzMzMSIlMJAhIgeyIAAAASI0NHSIAAP8VLxMAAEiLBQgjAABIiUQkSEUzwEiNVCRQSItMJEj/FSATAABIiUQkQEiDfCRAAHRCSMdEJDgAAAAASI1EJFhIiUQkMEiNRCRgSIlEJChIjQXHIQAASIlEJCBMi0wkQEyLRCRISItUJFAzyf8VyxIAAOsjSIsFOiIAAEiLAEiJBZAiAABIiwUpIgAASIPACEiJBR4iAABIiwV3IgAASIkF6CAAAEiLhCSQAAAASIkF6SEAAMcFvyAAAAkEAMDHBbkgAAABAAAAxwXDIAAAAwAAALgIAAAASGvAAEiNDbsgAABIxwQBAgAAALgIAAAASGvAAUiNDaMgAABIixUsIAAASIkUAbgIAAAASGvAAkiNDYggAABIixUZIAAASIkUAbgIAAAASGvAAEiLDf0fAABIiUwEaLgIAAAASGvAAUiLDfAfAABIiUwEaEiNDdwPAADoU/7//0iBxIgAAADDzMzMzMzMzMzMzMzMzMzMzMzMzMzM/yWUEAAAzMzMzMzM/yWQEAAAzMzMzMzM/yWMEAAAzMzMzMzMzMxIg+woTYtBOEiLykmL0egRAAAAuAEAAABIg8Qow8zMzMzMzMxAU0WLGEiL2kGD4/hMi8lB9gAETIvRdBNBi0AITWNQBPfYTAPRSGPITCPRSWPDSosUEEiLQxCLSAhIA0sI9kEDD3QMD7ZBA4Pg8EiYTAPITDPKSYvJW+kl/P//zMzMzMzMzMzMzMxmZg8fhAAAAAAA/+DMzMzMzMxAVUiD7CBIi+pIiU04SIsBixCJVSRIiU1AM8BIg8QgXcPMQFVIg+wgSIvqSIlNSEiLAYsQiVUoSIlNUDPASIPEIF3DzEBVSIPsIEiL6kiJTVhIiwGLEIlVLEiJTWAzwEiDxCBdw8xAVUiD7CBIi+pIiU1oSIsBixCJVTBIiU1wM8BIg8QgXcPMQFVIg+wgSIvqSIlNeEiLAYsQiVU0SImNgAAAADPASIPEIF3DzEBVSIPsIEiL6kiDxCBdw8wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFBAAIABAAAA8EAAgAEAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAgAEAAAAAAAAAAAAAAAAAAAAAAAAAKDIAgAEAAAAwMgCAAQAAAFgyAIABAAAABQAAAAAAAAAANQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeD4AAAAAAABkPwAAAAAAAG4/AAAAAAAAAAAAAAAAAADOOwAAAAAAAMA7AAAAAAAAAAAAAAAAAAAQPQAAAAAAACw9AAAAAAAA6j4AAAAAAAAAAAAAAAAAAPo+AAAAAAAA2D4AAAAAAADMPgAAAAAAAAAAAAAAAAAACD8AAAAAAAAAAAAAAAAAAFI8AAAAAAAAFj8AAAAAAABGPAAAAAAAAAAAAAAAAAAA9DwAAAAAAAAAAAAAAAAAAJ48AAAAAAAAtDwAAAAAAABePQAAAAAAAEo9AAAAAAAAAAAAAAAAAACEPAAAAAAAAAAAAAAAAAAA5DwAAAAAAADKPAAAAAAAAAAAAAAAAAAAZDwAAAAAAAB0PAAAAAAAAAAAAAAAAAAAsD4AAAAAAAD6OwAAAAAAACg8AAAAAAAADjwAAAAAAAAAAAAAAAAAAHAeAIABAAAAACEAgAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQAAAgEQAAkBsAAHAeAADAHgAAAAAAAC5caHN0aXRyYWNlLmxvZwAgUHJvdmlkZXJFcnJvcjoAOlByb3ZpZGVyRXJyb3IgAERldGVybWluaW5nIENvdW50LiAAAAAAAAAAAAAAAAAAICEhISBFcnJvciBidWZmZXIgZmFpbGVkIGFsbG9jYXRpb24gISEhIAAAAAAAAAAARGV0ZXJtaW5lIFNlY3VyaXR5RmVhdHVyZXNTaXplLiAAAAAAAAAAAExvb3AuLi4gAAAAAAAAAAAAAAAAAAAAACBVbnN1cHBvcnRlZCBBSVAgaWdub3JlZCAAAAAAAAAAICEhISBVRUZJIFByb3RvY29sIEVycm9yIERldGVjdGVkICEhISAAADpJRCAAAAAAIElEOgAAAAA6RVJST1IgACBFUlJPUjoAOlJPTEUgAAAgUk9MRToAAAAAAAAAAAAAOnNlY3VyaXR5RmVhdHVyZXNTaXplIAAAAAAAAAAAAAAgc2VjdXJpdHlGZWF0dXJlc1NpemU6AAAAAAAAAAAAACAhISEgRXJyb3IgZGV0ZWN0ZWQsIGJhaWxpbmcgb3V0ICEhISAAAAAAAAAAAAAAAFZlcmlmaWVkIGJ1ZmZlciBhbGxvY2F0aW9uIGZhaWxlZC4AAAAAAAAAAAAAAAAAAExvb3Bpbmcgb24gcHJvdmlkZXJzIHRvIGFjY3VtdWxhdGUgaW1wbGVtZW50ZWQgYW5kIHZlcmlmaWVkLgAAAABDb21wYXJpbmcgcmVxdWlyZWQgYnl0ZSB0byB2ZXJpZmllZC4uLgAAOlZFUklGSUVEIAAAAAAAACBWRVJJRklFRDoAAAAAAAA6UkVRVUlSRUQgAAAAAAAAIFJFUVVJUkVEOgAAAAAAAAAAAAAAAAAAISEhIHZlcmlmaWVkIGJ5dGUgZG9lcyBub3QgbWF0Y2ggcmVxdWlyZWQgISEhAAAAQ0xFQU5VUCAAAAAAAAAAADpPVkVSQUxMAAAAAAAAAABPVkVSQUxMOgAAAAAAAAAAUHJvdmlkZXIgRXJyb3JzIGNvcHkgc3RhcnQAAAAAAABQcm92aWRlciBFcnJvcnMgY29weSBlbmQAAAAAAAAAAEJMT0IgU3RhcnQ6AAAAAAA6QkxPQiBFbmQgIAAAAAAAAAAAAGt3EVgAAAAAAgAAACUAAAD4NQAA+BsAAAAAAABrdxFYAAAAAA0AAACgAQAAIDYAACAcAABSU0RT1J4Ttoijw0G4zY0uYG3g7wEAAABIc3RpVGVzdC5wZGIAAAAAR0NUTAAQAADwEAAALnRleHQkbW4AAAAA8CAAABIAAAAudGV4dCRtbiQwMAACIQAAwwAAAC50ZXh0JHgAADAAAOAAAAAucmRhdGEkYnJjAADgMAAASAEAAC5pZGF0YSQ1AAAAACgyAAAQAAAALjAwY2ZnAAA4MgAACAAAAC5DUlQkWENBAAAAAEAyAAAIAAAALkNSVCRYQ1oAAAAASDIAAAgAAAAuQ1JUJFhJQQAAAABQMgAACAAAAC5DUlQkWElaAAAAAFgyAAAYAAAALmNmZ3VhcmQAAAAAcDIAAIgDAAAucmRhdGEAAPg1AADIAQAALnJkYXRhJHp6emRiZwAAAMA3AABQAQAALnhkYXRhAAAQOQAAZAAAAC5lZGF0YQAAdDkAAPAAAAAuaWRhdGEkMgAAAABkOgAAFAAAAC5pZGF0YSQzAAAAAHg6AABIAQAALmlkYXRhJDQAAAAAwDsAALgDAAAuaWRhdGEkNgAAAAAAQAAAEAAAAC5kYXRhAAAAEEAAALAFAAAuYnNzAAAAAABQAAAgAQAALnBkYXRhAAABEwgAEzQMABNSDPAK4AhwB2AGUBkkBwASZDMAEjQyABIBLgALcAAAbCAAAGABAAABBAEABEIAAAEPBgAPZAcADzQGAA8yC3ABCAEACEIAABknCgAZARMADfAL4AnQB8AFcARgAzACUGwgAACIAAAAARgKABhkDAAYVAsAGDQKABhSFPAS4BBwGRgFABgBEgARcBBgDzAAAEYgAAAGAAAAGBwAAC0cAAAIIQAALRwAAEocAABkHAAAKiEAAGQcAACCHAAAkRwAAEwhAACRHAAApBwAALMcAABuIQAAsxwAAM8cAADpHAAAkCEAAOkcAAD5GwAA7xwAALUhAAAAAAAAAQYCAAYyAlABCgQACjQGAAoyBnAAAAAAAQAAAAENBAANNAkADTIGUAEGAgAGMgIwAQwCAAwBEQABAAAAAQIBAAIwAAAAAAAAAAAAAAAAAAAAAAAAd24RWAAAAABMOQAAAQAAAAIAAAACAAAAODkAAEA5AABIOQAAEBAAACARAABZOQAAYzkAAAAAAQBIU1RJVEVTVC5kbGwAUXVlcnlIU1RJAFF1ZXJ5SFNUSWRldGFpbHMAmDoAAAAAAAAAAAAA2jsAAAAxAACYOwAAAAAAAAAAAAA8PAAAADIAAAA7AAAAAAAAAAAAAHI9AABoMQAAgDsAAAAAAAAAAAAAkj0AAOgxAABYOwAAAAAAAAAAAACyPQAAwDEAADA7AAAAAAAAAAAAANY9AACYMQAAaDsAAAAAAAAAAAAAAD4AANAxAAAgOwAAAAAAAAAAAAAkPgAAiDEAALA6AAAAAAAAAAAAAE4+AAAYMQAAeDoAAAAAAAAAAAAAkD4AAOAwAADQOgAAAAAAAAAAAAAiPwAAODEAAPA6AAAAAAAAAAAAAEI/AABYMQAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4PgAAAAAAAGQ/AAAAAAAAbj8AAAAAAAAAAAAAAAAAAM47AAAAAAAAwDsAAAAAAAAAAAAAAAAAABA9AAAAAAAALD0AAAAAAADqPgAAAAAAAAAAAAAAAAAA+j4AAAAAAADYPgAAAAAAAMw+AAAAAAAAAAAAAAAAAAAIPwAAAAAAAAAAAAAAAAAAUjwAAAAAAAAWPwAAAAAAAEY8AAAAAAAAAAAAAAAAAAD0PAAAAAAAAAAAAAAAAAAAnjwAAAAAAAC0PAAAAAAAAF49AAAAAAAASj0AAAAAAAAAAAAAAAAAAIQ8AAAAAAAAAAAAAAAAAADkPAAAAAAAAMo8AAAAAAAAAAAAAAAAAABkPAAAAAAAAHQ8AAAAAAAAAAAAAAAAAACwPgAAAAAAAPo7AAAAAAAAKDwAAAAAAAAOPAAAAAAAAAAAAAAAAAAABwBfaW5pdHRlcm1fZQAGAF9pbml0dGVybQBhcGktbXMtd2luLWNvcmUtY3J0LWwyLTEtMC5kbGwAANACUnRsQ2FwdHVyZUNvbnRleHQAjQRSdGxMb29rdXBGdW5jdGlvbkVudHJ5AAC3BVJ0bFZpcnR1YWxVbndpbmQAAG50ZGxsLmRsbAAGAEhlYXBGcmVlAAAAAEdldFByb2Nlc3NIZWFwAAAEAEVuY29kZVBvaW50ZXIAAQBEZWNvZGVQb2ludGVyAAAAUXVlcnlQZXJmb3JtYW5jZUNvdW50ZXIADQBHZXRDdXJyZW50UHJvY2Vzc0lkABEAR2V0Q3VycmVudFRocmVhZElkAAAUAEdldFN5c3RlbVRpbWVBc0ZpbGVUaW1lABgAR2V0VGlja0NvdW50AAABAERpc2FibGVUaHJlYWRMaWJyYXJ5Q2FsbHMAEQBVbmhhbmRsZWRFeGNlcHRpb25GaWx0ZXIAAA8AU2V0VW5oYW5kbGVkRXhjZXB0aW9uRmlsdGVyAAwAR2V0Q3VycmVudFByb2Nlc3MATQBUZXJtaW5hdGVQcm9jZXNzAABhcGktbXMtd2luLWNvcmUtaGVhcC1sMS0yLTAuZGxsAGFwaS1tcy13aW4tY29yZS11dGlsLWwxLTEtMC5kbGwAYXBpLW1zLXdpbi1jb3JlLXByb2ZpbGUtbDEtMS0wLmRsbAAAYXBpLW1zLXdpbi1jb3JlLXByb2Nlc3N0aHJlYWRzLWwxLTEtMi5kbGwAYXBpLW1zLXdpbi1jb3JlLXN5c2luZm8tbDEtMi0xLmRsbAAAYXBpLW1zLXdpbi1jb3JlLWxpYnJhcnlsb2FkZXItbDEtMi0wLmRsbAAAYXBpLW1zLXdpbi1jb3JlLWVycm9yaGFuZGxpbmctbDEtMS0xLmRsbAAAAABfX0Nfc3BlY2lmaWNfaGFuZGxlcgAAYXBpLW1zLXdpbi1jb3JlLWNydC1sMS0xLTAuZGxsAADbAU50UXVlcnlTeXN0ZW1JbmZvcm1hdGlvbgAAWQBXcml0ZUZpbGUAUwBTZXRGaWxlUG9pbnRlcgAABQBHZXRMYXN0RXJyb3IAAAUAQ3JlYXRlRmlsZUEAAABDbG9zZUhhbmRsZQACAEhlYXBBbGxvYwBhcGktbXMtd2luLWNvcmUtZmlsZS1sMS0yLTEuZGxsAGFwaS1tcy13aW4tY29yZS1oYW5kbGUtbDEtMS0wLmRsbAAzAG1lbWNweQAANwBtZW1zZXQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyot8tmSsAAM1dINJm1P//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQAAAXEQAAwDcAACwRAAAaEgAA1DcAACASAABwEgAA8DcAAHgSAAC2EgAA+DcAALwSAADyEgAACDgAAPgSAABRGQAAEDgAAFgZAACaGgAAMDgAAKAaAAB7GwAAyDgAAJAbAADNGwAA+DcAANQbAAD8HAAASDgAABAdAAAuHQAA2DgAAFQdAAAkHgAA3DgAAEQeAABdHgAA8DcAAHweAACwHgAA6DgAAMAeAAAxIAAA8DgAAGwgAACJIAAA8DcAAJAgAADrIAAA/DgAAAAhAAACIQAA+DgAAAghAAAqIQAAwDgAACohAABMIQAAwDgAAEwhAABuIQAAwDgAAG4hAACQIQAAwDgAAJAhAAC1IQAAwDgAALUhAADFIQAAwDgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAABgAAAAAoAigaKCAoIigkKAoojCiAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + +function Log($message) +{ + $message | Out-File $LogFile -Append -Force +} + +function LogAndConsole($message) +{ + Write-Host $message + Log $message +} + +function LogAndConsoleWarning($message) +{ + Write-Host $message -foregroundcolor "Yellow" + Log $message +} + +function LogAndConsoleSuccess($message) +{ + Write-Host $message -foregroundcolor "Green" + Log $message +} + +function LogAndConsoleError($message) +{ + Write-Host $message -foregroundcolor "Red" + Log $message +} + +function IsExempted([System.IO.FileInfo] $item) +{ + $cert = (Get-AuthenticodeSignature $item.FullName).SignerCertificate + if($cert.ToString().Contains("CN=Microsoft Windows, O=Microsoft Corporation, L=Redmond, S=Washington, C=US")) + { + Log $item.FullName + "MS Exempted" + return 1 + } + else + { + Log $item.FullName + "Not-exempted" + Log $cert.ToString() + return 0 + } +} + +function CheckExemption($_ModName) +{ + $mod1 = Get-ChildItem $Sys32Path $_ModName + $mod2 = Get-ChildItem $DriverPath $_ModName + if($mod1) + { + Log "NonDriver module" + $mod1.FullName + return IsExempted($mod1) + } + elseif($mod2) + { + Log "Driver Module" + $mod2.FullName + return IsExempted($mod2) + } + +} + +function CheckFailedDriver($_ModName, $CIStats) +{ + Log "Module: " $_ModName.Trim() + if(CheckExemption($_ModName.Trim()) - eq 1) + { + $CompatibleModules.AppendLine("Windows Signed: " + $_ModName.Trim()) | Out-Null + return + } + $index = $CIStats.IndexOf("execute pool type count:".ToLower()) + if($index -eq -1) + { + return + } + $_tempStr = $CIStats.Substring($index) + $Result = "PASS" + $separator = "`r`n","" + $option = [System.StringSplitOptions]::RemoveEmptyEntries + $stats = $_tempStr.Split($separator,$option) + Log $stats.Count + + $FailingStat = "" + foreach( $stat in $stats) + { + $_t =$stat.Split(":") + if($_t.Count -eq 2 -and $_t[1].trim() -ne "0") + { + $Result = "FAIL" + $FailingStat = $stat + break + } + } + if($Result.Contains("PASS")) + { + $CompatibleModules.AppendLine($_ModName.Trim()) | Out-Null + } + elseif($FailingStat.Trim().Contains("execute-write")) + { + $FailingExecuteWriteCheck.AppendLine("Module: "+ $_ModName.Trim() + "`r`n`tReason: " + $FailingStat.Trim() ) | Out-Null + } + else + { + $FailingModules.AppendLine("Module: "+ $_ModName.Trim() + "`r`n`tReason: " + $FailingStat.Trim() ) | Out-Null + } + Log "Result: " $Result +} + +function ListCIStats($_ModName, $str1) +{ + $i1 = $str1.IndexOf("Code Integrity Statistics:".ToLower()) + if($i1 -eq -1 ) + { + Log "String := " $str1 + Log "Warning! CI Stats are missing for " $_ModName + return + } + $temp_str1 = $str1.Substring($i1) + $CIStats = $temp_str1.Substring(0).Trim() + + CheckFailedDriver $_ModName $CIStats +} + +function ListDrivers($str) +{ + $_tempStr= $str + + $separator = "module:","" + $option = [System.StringSplitOptions]::RemoveEmptyEntries + $index1 = $_tempStr.IndexOf("MODULE:".ToLower()) + if($index1 -lt 0) + { + return + } + $_tempStr = $_tempStr.Substring($Index1) + $_SplitStr = $_tempStr.Split($separator,$option) + + Log $_SplitStr.Count + LogAndConsole "Verifying each module please wait .... " + foreach($ModuleDetail in $_Splitstr) + { + #LogAndConsole $Module + $Index2 = $ModuleDetail.IndexOf("(") + if($Index2 -eq -1) + { + "Skipping .." + continue + } + $ModName = $ModuleDetail.Substring(0,$Index2-1) + Log "Driver: " $ModName + Log "Processing module: " $ModName + ListCIStats $ModName $ModuleDetail + } + + $DriverScanCompletedMessage = "Completed scan. List of Compatible Modules can be found at " + $LogFile + LogAndConsole $DriverScanCompletedMessage + + if($FailingModules.Length -gt 0 -or $FailingExecuteWriteCheck.Length -gt 0 ) + { + $WarningMessage = "Incompatible HVCI Kernel Driver Modules found" + if($HLK) + { + LogAndConsoleError $WarningMessage + } + else + { + LogAndConsoleWarning $WarningMessage + } + + LogAndConsoleError $FailingExecuteWriteCheck.ToString() + if($HLK) + { + LogAndConsoleError $FailingModules.ToString() + } + else + { + LogAndConsoleWarning $FailingModules.ToString() + } + if($FailingModules.Length -ne 0 -or $FailingExecuteWriteCheck.Length -ne 0 ) + { + if($HLK) + { + $DGVerifyCrit.AppendLine($WarningMessage) | Out-Null + } + else + { + $DGVerifyWarn.AppendLine($WarningMessage) | Out-Null + } + } + } + else + { + LogAndConsoleSuccess "No Incompatible Drivers found" + } +} + +function ListSummary() +{ + if($DGVerifyCrit.Length -ne 0 ) + { + LogAndConsoleError "Machine is not Device Guard / Credential Guard compatible because of the following:" + LogAndConsoleError $DGVerifyCrit.ToString() + LogAndConsoleWarning $DGVerifyWarn.ToString() + if(!$HVCI -and !$DG) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "CG_Capable" /t REG_DWORD /d 0 /f ' + } + if(!$CG) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "DG_Capable" /t REG_DWORD /d 0 /f ' + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "HVCI_Capable" /t REG_DWORD /d 0 /f ' + } + + } + elseif ($DGVerifyWarn.Length -ne 0 ) + { + LogAndConsoleSuccess "Device Guard / Credential Guard can be enabled on this machine.`n" + LogAndConsoleWarning "The following additional qualifications, if present, can enhance the security of Device Guard / Credential Guard on this system:" + LogAndConsoleWarning $DGVerifyWarn.ToString() + if(!$HVCI -and !$DG) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "CG_Capable" /t REG_DWORD /d 1 /f ' + } + if(!$CG) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "DG_Capable" /t REG_DWORD /d 1 /f ' + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "HVCI_Capable" /t REG_DWORD /d 1 /f ' + } + } + else + { + LogAndConsoleSuccess "Machine is Device Guard / Credential Guard Ready.`n" + if(!$HVCI -and !$DG) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "CG_Capable" /t REG_DWORD /d 2 /f ' + } + if(!$CG) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "DG_Capable" /t REG_DWORD /d 2 /f ' + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "HVCI_Capable" /t REG_DWORD /d 2 /f ' + } + } +} + +function Instantiate-Kernel32 { + try + { + Add-Type -TypeDefinition @" + using System; + using System.Diagnostics; + using System.Runtime.InteropServices; + + public static class Kernel32 + { + [DllImport("kernel32", SetLastError=true, CharSet = CharSet.Ansi)] + public static extern IntPtr LoadLibrary( + [MarshalAs(UnmanagedType.LPStr)]string lpFileName); + + [DllImport("kernel32", CharSet=CharSet.Ansi, ExactSpelling=true, SetLastError=true)] + public static extern IntPtr GetProcAddress( + IntPtr hModule, + string procName); + } + +"@ + } + catch + { + Log $_.Exception.Message + LogAndConsole "Instantiate-Kernel32 failed" + } +} + +function Instantiate-HSTI { + try + { + Add-Type -TypeDefinition @" + using System; + using System.Diagnostics; + using System.Runtime.InteropServices; + using System.Net; + + public static class HstiTest3 + { + [DllImport("hstitest.dll", CharSet = CharSet.Unicode)] + public static extern int QueryHSTIdetails( + ref HstiOverallError pHstiOverallError, + [In, Out] HstiProviderErrorDuple[] pHstiProviderErrors, + ref uint pHstiProviderErrorsCount, + byte[] hstiPlatformSecurityBlob, + ref uint pHstiPlatformSecurityBlobBytes); + + [DllImport("hstitest.dll", CharSet = CharSet.Unicode)] + public static extern int QueryHSTI(ref bool Pass); + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct HstiProviderErrorDuple + { + internal uint protocolError; + internal uint role; + internal HstiProviderErrors providerError; + [MarshalAs(UnmanagedType.ByValTStr, SizeConst = 256)] + internal string ID; + [MarshalAs(UnmanagedType.ByValTStr, SizeConst = 4096)] + internal string ErrorString; + } + + [FlagsAttribute] + public enum HstiProviderErrors : int + { + None = 0x00000000, + VersionMismatch = 0x00000001, + RoleUnknown = 0x00000002, + RoleDuplicated = 0x00000004, + SecurityFeatureSizeMismatch = 0x00000008, + SizeTooSmall = 0x00000010, + VerifiedMoreThanImplemented = 0x00000020, + VerifiedNotMatchImplemented = 0x00000040 + } + + [FlagsAttribute] + public enum HstiOverallError : int + { + None = 0x00000000, + RoleTooManyPlatformReference = 0x00000001, + RoleTooManyIbv = 0x00000002, + RoleTooManyOem = 0x00000004, + RoleTooManyOdm = 0x00000008, + RoleMissingPlatformReference = 0x00000010, + VerifiedIncomplete = 0x00000020, + ProtocolErrors = 0x00000040, + BlobVersionMismatch = 0x00000080, + PlatformSecurityVersionMismatch = 0x00000100, + ProviderError = 0x00000200 + } + } +"@ + + $LibHandle = [Kernel32]::LoadLibrary("C:\Windows\System32\hstitest.dll") + $FuncHandle = [Kernel32]::GetProcAddress($LibHandle, "QueryHSTIdetails") + $FuncHandle2 = [Kernel32]::GetProcAddress($LibHandle, "QueryHSTI") + + if ([System.IntPtr]::Size -eq 8) + { + #assuming 64 bit + Log "`nKernel32::LoadLibrary 64bit --> 0x$("{0:X16}" -f $LibHandle.ToInt64())" + Log "HstiTest2::QueryHSTIdetails 64bit --> 0x$("{0:X16}" -f $FuncHandle.ToInt64())" + } + else + { + return + } + $overallError = New-Object HstiTest3+HstiOverallError + $providerErrorDupleCount = New-Object int + $blobByteSize = New-Object int + $hr = [HstiTest3]::QueryHSTIdetails([ref] $overallError, $null, [ref] $providerErrorDupleCount, $null, [ref] $blobByteSize) + + [byte[]]$blob = New-Object byte[] $blobByteSize + [HstiTest3+HstiProviderErrorDuple[]]$providerErrors = New-Object HstiTest3+HstiProviderErrorDuple[] $providerErrorDupleCount + $hr = [HstiTest3]::QueryHSTIdetails([ref] $overallError, $providerErrors, [ref] $providerErrorDupleCount, $blob, [ref] $blobByteSize) + $string = $null + $blob | foreach { $string = $string + $_.ToString("X2")+"," } + + $hstiStatus = New-Object bool + $hr = [HstiTest3]::QueryHSTI([ref] $hstiStatus) + + LogAndConsole "HSTI Duple Count: $providerErrorDupleCount" + LogAndConsole "HSTI Blob size: $blobByteSize" + LogAndConsole "String: $string" + LogAndConsole "HSTIStatus: $hstiStatus" + if(($blobByteSize -gt 512) -and ($providerErrorDupleCount -gt 0) -and $hstiStatus) + { + LogAndConsoleSuccess "HSTI validation successful" + } + elseif(($providerErrorDupleCount -eq 0) -or ($blobByteSize -le 512)) + { + LogAndConsoleWarning "HSTI is absent" + $DGVerifyWarn.AppendLine("HSTI is absent") | Out-Null + } + else + { + $ErrorMessage = "HSTI validation failed" + if($HLK) + { + LogAndConsoleError $ErrorMessage + $DGVerifyCrit.AppendLine($ErrorMessage) | Out-Null + } + else + { + LogAndConsoleWarning $ErrorMessage + $DGVerifyWarn.AppendLine("HSTI is absent") | Out-Null + } + } + + } + catch + { + LogAndConsoleError $_.Exception.Message + LogAndConsoleError "Instantiate-HSTI failed" + } +} + +function CheckDGRunning($_val) +{ + $DGObj = Get-CimInstance -classname Win32_DeviceGuard -namespace root\Microsoft\Windows\DeviceGuard + for($i=0; $i -lt $DGObj.SecurityServicesRunning.length; $i++) + { + if($DGObj.SecurityServicesRunning[$i] -eq $_val) + { + return 1 + } + + } + return 0 +} + +function CheckDGFeatures($_val) +{ + $DGObj = Get-CimInstance -classname Win32_DeviceGuard -namespace root\Microsoft\Windows\DeviceGuard + Log "DG_obj $DG_obj" + Log "DG_obj.AvailableSecurityProperties.length $DG_obj.AvailableSecurityProperties.length" + for($i=0; $i -lt $DGObj.AvailableSecurityProperties.length; $i++) + { + if($DGObj.AvailableSecurityProperties[$i] -eq $_val) + { + return 1 + } + + } + return 0 +} + +function PrintConfigCIDetails($_ConfigCIState) +{ + $_ConfigCIRunning = "Config-CI is enabled and running." + $_ConfigCIDisabled = "Config-CI is not running." + $_ConfigCIMode = "Not Enabled" + switch ($_ConfigCIState) + { + 0 { $_ConfigCIMode = "Not Enabled" } + 1 { $_ConfigCIMode = "Audit mode" } + 2 { $_ConfigCIMode = "Enforced mode" } + default { $_ConfigCIMode = "Not Enabled" } + } + + if($_ConfigCIState -ge 1) + { + LogAndConsoleSuccess "$_ConfigCIRunning ($_ConfigCIMode)" + } + else + { + LogAndConsoleWarning "$_ConfigCIDisabled ($_ConfigCIMode)" + } +} + +function PrintHVCIDetails($_HVCIState) +{ + $_HvciRunning = "HVCI is enabled and running." + $_HvciDisabled = "HVCI is not running." + + if($_HVCIState) + { + LogAndConsoleSuccess $_HvciRunning + } + else + { + LogAndConsoleWarning $_HvciDisabled + } +} + +function PrintCGDetails ($_CGState) +{ + $_CGRunning = "Credential-Guard is enabled and running." + $_CGDisabled = "Credential-Guard is not running." + + if($_CGState) + { + LogAndConsoleSuccess $_CGRunning + } + else + { + LogAndConsoleWarning $_CGDisabled + } +} + +if(![IO.Directory]::Exists($path)) +{ + New-Item -ItemType directory -Path $path +} +else +{ + #Do Nothing!! +} + +function IsRedstone +{ + $_osVersion = [environment]::OSVersion.Version + Log $_osVersion + #Check if build Major is Windows 10 + if($_osVersion.Major -lt 10) + { + return 0 + } + #Check if the build is post Threshold2 (1511 release) => Redstone + if($_osVersion.Build -gt 10586) + { + return 1 + } + #default return False + return 0 +} + +function ExecuteCommandAndLog($_cmd) +{ + try + { + Log "Executing: $_cmd" + $CmdOutput = Invoke-Expression $_cmd | Out-String + Log "Output: $CmdOutput" + } + catch + { + Log "Exception while exectuing $_cmd" + Log $_.Exception.Message + } +} + +function PrintRebootWarning +{ + LogAndConsoleWarning "Please reboot the machine, for settings to be applied." +} + +function AutoRebootHelper +{ + if($AutoReboot) + { + LogAndConsole "PC will restart in 30 seconds" + ExecuteCommandAndLog 'shutdown /r /t 30' + } + else + { + PrintRebootWarning + } + +} + +function VerifierReset +{ + $verifier_state = verifier /query | Out-String + if(!$verifier_state.ToString().Contains("No drivers are currently verified.")) + { + ExecuteCommandAndLog 'verifier.exe /reset' + } + AutoRebootHelper +} + +function PrintHardwareReq +{ + LogAndConsole "###########################################################################" + LogAndConsole "OS and Hardware requirements for enabling Device Guard and Credential Guard" + LogAndConsole " 1. OS SKUs: Available only on these OS Skus - Enterprise, Server, Education, Enterprise IoT, Pro, and Home" + LogAndConsole " 2. Hardware: Recent hardware that supports virtualization extension with SLAT" + LogAndConsole "To learn more please visit: https://aka.ms/dgwhcr" + LogAndConsole "########################################################################### `n" +} + +function CheckDriverCompat +{ + $_HVCIState = CheckDGRunning(2) + if($_HVCIState) + { + LogAndConsoleWarning "HVCI is already enabled on this machine, driver compat list might not be complete." + LogAndConsoleWarning "Please disable HVCI and run the script again..." + } + $verifier_state = verifier /query | Out-String + if($verifier_state.ToString().Contains("No drivers are currently verified.")) + { + LogAndConsole "Enabling Driver verifier" + verifier.exe /flags 0x02000000 /all /log.code_integrity + + LogAndConsole "Enabling Driver Verifier and Rebooting system" + Log $verifier_state + LogAndConsole "Please re-execute this script after reboot...." + if($AutoReboot) + { + LogAndConsole "PC will restart in 30 seconds" + ExecuteCommandAndLog 'shutdown /r /t 30' + } + else + { + LogAndConsole "Please reboot manually and run the script again...." + } + exit + } + else + { + LogAndConsole "Driver verifier already enabled" + Log $verifier_state + ListDrivers($verifier_state.Trim().ToLowerInvariant()) + } +} +function IsDomainController +{ + $_isDC = 0 + $CompConfig = Get-WmiObject Win32_ComputerSystem + foreach ($ObjItem in $CompConfig) + { + $Role = $ObjItem.DomainRole + Log "Role=$Role" + Switch ($Role) + { + 0 { Log "Standalone Workstation" } + 1 { Log "Member Workstation" } + 2 { Log "Standalone Server" } + 3 { Log "Member Server" } + 4 + { + Log "Backup Domain Controller" + $_isDC=1 + break + } + 5 + { + Log "Primary Domain Controller" + $_isDC=1 + break + } + default { Log "Unknown Domain Role" } + } + } + return $_isDC +} + +function CheckOSSKU +{ + $osname = $((gwmi win32_operatingsystem).Name).ToLower() + $_SKUSupported = 0 + Log "OSNAME:$osname" + $SKUarray = @("Enterprise", "Education", "IoT", "Windows Server", "Pro", "Home") + $HLKAllowed = @("microsoft windows 10 pro") + foreach ($SKUent in $SKUarray) + { + if($osname.ToString().Contains($SKUent.ToLower())) + { + $_SKUSupported = 1 + break + } + } + + # For running HLK tests only, professional SKU's are marked as supported. + if($HLK) + { + if($osname.ToString().Contains($HLKAllowed.ToLower())) + { + $_SKUSupported = 1 + } + } + $_isDomainController = IsDomainController + if($_SKUSupported) + { + LogAndConsoleSuccess "This PC edition is Supported for DeviceGuard"; + if(($_isDomainController -eq 1) -and !$HVCI -and !$DG) + { + LogAndConsoleError "This PC is configured as a Domain Controller, Credential Guard is not supported on DC." + } + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "OSSKU" /t REG_DWORD /d 2 /f ' + } + else + { + LogAndConsoleError "This PC edition is Unsupported for Device Guard" + $DGVerifyCrit.AppendLine("OS SKU unsupported") | Out-Null + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "OSSKU" /t REG_DWORD /d 0 /f ' + } +} + +function CheckOSArchitecture +{ + $OSArch = $(gwmi win32_operatingsystem).OSArchitecture + Log $OSArch + if($OSArch.Contains("64-bit")) + { + LogAndConsoleSuccess "64 bit arch....." + } + elseif($OSArch.Contains("32-bit")) + { + LogAndConsoleError "32 bit arch...." + $DGVerifyCrit.AppendLine("32 Bit OS, OS Architecture failure..") | Out-Null + } + else + { + LogAndConsoleError "Unknown architecture" + $DGVerifyCrit.AppendLine("Unknown OS, OS Architecture failure..") | Out-Null + } +} + +function CheckSecureBootState +{ + $_secureBoot = Confirm-SecureBootUEFI + Log $_secureBoot + if($_secureBoot) + { + LogAndConsoleSuccess "Secure Boot is present" + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "SecureBoot" /t REG_DWORD /d 2 /f ' + } + else + { + LogAndConsoleError "Secure Boot is absent / not enabled." + LogAndConsoleError "If Secure Boot is supported on the system, enable Secure Boot in the BIOS and run the script again." + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "SecureBoot" /t REG_DWORD /d 0 /f ' + $DGVerifyCrit.AppendLine("Secure boot validation failed.") | Out-Null + } +} + +function CheckVirtualization +{ + $_vmmExtension = $(gwmi -Class Win32_processor).VMMonitorModeExtensions + $_vmFirmwareExtension = $(gwmi -Class Win32_processor).VirtualizationFirmwareEnabled + $_vmHyperVPresent = (gcim -Class Win32_ComputerSystem).HypervisorPresent + Log "VMMonitorModeExtensions $_vmmExtension" + Log "VirtualizationFirmwareEnabled $_vmFirmwareExtension" + Log "HyperVisorPresent $_vmHyperVPresent" + + #success if either processor supports and enabled or if hyper-v is present + if(($_vmmExtension -and $_vmFirmwareExtension) -or $_vmHyperVPresent ) + { + LogAndConsoleSuccess "Virtualization firmware check passed" + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "Virtualization" /t REG_DWORD /d 2 /f ' + } + else + { + LogAndConsoleError "Virtualization firmware check failed." + LogAndConsoleError "If Virtualization extensions are supported on the system, enable hardware virtualization (Intel Virtualization Technology, Intel VT-x, Virtualization Extensions, or similar) in the BIOS and run the script again." + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "Virtualization" /t REG_DWORD /d 0 /f ' + $DGVerifyCrit.AppendLine("Virtualization firmware check failed.") | Out-Null + } +} + +function CheckTPM +{ + $TPMLockout = $(get-tpm).LockoutCount + + if($TPMLockout) + { + + if($TPMLockout.ToString().Contains("Not Supported for TPM 1.2")) + { + if($HLK) + { + LogAndConsoleSuccess "TPM 1.2 is present." + } + else + { + $WarningMsg = "TPM 1.2 is Present. TPM 2.0 is Preferred." + LogAndConsoleWarning $WarningMsg + $DGVerifyWarn.AppendLine($WarningMsg) | Out-Null + } + } + else + { + LogAndConsoleSuccess "TPM 2.0 is present." + } + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "TPM" /t REG_DWORD /d 2 /f ' + } + else + { + $WarningMsg = "TPM is absent or not ready for use" + if($HLK) + { + LogAndConsoleError $WarningMsg + $DGVerifyCrit.AppendLine($WarningMsg) | Out-Null + } + else + { + LogAndConsoleWarning $WarningMsg + $DGVerifyWarn.AppendLine($WarningMsg) | Out-Null + } + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "TPM" /t REG_DWORD /d 0 /f ' + } +} + +function CheckSecureMOR +{ + $isSecureMOR = CheckDGFeatures(4) + Log "isSecureMOR= $isSecureMOR " + if($isSecureMOR -eq 1) + { + LogAndConsoleSuccess "Secure MOR is available" + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "SecureMOR" /t REG_DWORD /d 2 /f ' + } + else + { + $WarningMsg = "Secure MOR is absent" + if($HLK) + { + LogAndConsoleError $WarningMsg + $DGVerifyCrit.AppendLine($WarningMsg) | Out-Null + } + else + { + LogAndConsoleWarning $WarningMsg + $DGVerifyWarn.AppendLine($WarningMsg) | Out-Null + } + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "SecureMOR" /t REG_DWORD /d 0 /f ' + } +} + +function CheckNXProtection +{ + $isNXProtected = CheckDGFeatures(5) + Log "isNXProtected= $isNXProtected " + if($isNXProtected -eq 1) + { + LogAndConsoleSuccess "NX Protector is available" + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "UEFINX" /t REG_DWORD /d 2 /f ' + } + else + { + LogAndConsoleWarning "NX Protector is absent" + $DGVerifyWarn.AppendLine("NX Protector is absent") | Out-Null + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "UEFINX" /t REG_DWORD /d 0 /f ' + } +} + +function CheckSMMProtection +{ + $isSMMMitigated = CheckDGFeatures(6) + Log "isSMMMitigated= $isSMMMitigated " + if($isSMMMitigated -eq 1) + { + LogAndConsoleSuccess "SMM Mitigation is available" + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "SMMProtections" /t REG_DWORD /d 2 /f ' + } + else + { + LogAndConsoleWarning "SMM Mitigation is absent" + $DGVerifyWarn.AppendLine("SMM Mitigation is absent") | Out-Null + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "SMMProtections" /t REG_DWORD /d 0 /f ' + } +} + +function CheckHSTI +{ + LogAndConsole "Copying HSTITest.dll" + try + { + $HSTITest_Decoded = [System.Convert]::FromBase64String($HSTITest_Encoded) + [System.IO.File]::WriteAllBytes("$env:windir\System32\hstitest.dll",$HSTITest_Decoded) + + } + catch + { + LogAndConsole $_.Exception.Message + LogAndConsole "Copying and loading HSTITest.dll failed" + } + + Instantiate-Kernel32 + Instantiate-HSTI +} + +function PrintToolVersion +{ + LogAndConsole "###########################################################################" + LogAndConsole "Readiness Tool Version 3.4 Release. `nTool to check if your device is capable to run Device Guard and Credential Guard." + LogAndConsole "###########################################################################" + +} + +PrintToolVersion + +if(!($Ready) -and !($Capable) -and !($Enable) -and !($Disable) -and !($Clear) -and !($ResetVerifier)) +{ + #Print Usage if none of the options are specified + LogAndConsoleWarning "How to read the output:" + LogAndConsoleWarning " 1. Red Errors: Basic things are missing that will prevent enabling and using DG/CG" + LogAndConsoleWarning " 2. Yellow Warnings: This device can be used to enable and use DG/CG, but additional security benefits will be absent. To learn more please go through: https://aka.ms/dgwhcr" + LogAndConsoleWarning " 3. Green Messages: This device is fully compliant with DG/CG requirements`n" + LogAndConsoleWarning "###########################################################################" + LogAndConsoleWarning "Hardware requirements for enabling Device Guard and Credential Guard" + LogAndConsoleWarning " 1. Hardware: Recent hardware that supports virtualization extension with SLAT" + LogAndConsoleWarning "########################################################################### `n" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -[Capable/Ready/Enable/Disable/Clear] -[DG/CG/HVCI] -[AutoReboot] -Path" + LogAndConsoleWarning "Log file with details is found here: C:\DGLogs `n" + + LogAndConsoleWarning "To Enable DG/CG. If you have a custom SIPolicy.p7b then use the -Path parameter else the hardcoded default policy is used" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Enable OR DG_Readiness.ps1 -Enable -Path `n" + + LogAndConsoleWarning "To Enable only HVCI" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Enable -HVCI `n" + + LogAndConsoleWarning "To Enable only CG" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Enable -CG `n" + + LogAndConsoleWarning "To Verify if DG/CG is enabled" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Ready `n" + + LogAndConsoleWarning "To Disable DG/CG." + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Disable `n" + + LogAndConsoleWarning "To Verify if DG/CG is disabled" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Ready `n" + + LogAndConsoleWarning "To Verify if this device is DG/CG Capable" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Capable" + + LogAndConsoleWarning "To Verify if this device is HVCI Capable" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -Capable -HVCI" + + LogAndConsoleWarning "To Auto reboot with each option" + LogAndConsoleWarning "Usage: DG_Readiness.ps1 -[Capable/Enable/Disable] -AutoReboot" + LogAndConsoleWarning "###########################################################################" + LogAndConsoleWarning "Readiness Tool with '-capable' is run the following RegKey values are set:" + LogAndConsoleWarning "HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities" + LogAndConsoleWarning "CG_Capable" + LogAndConsoleWarning "DG_Capable" + LogAndConsoleWarning "HVCI_Capable" + LogAndConsoleWarning "Value 0 = not possible to enable DG/CG/HVCI on this device" + LogAndConsoleWarning "Value 1 = not fully compatible but has sufficient firmware/hardware/software features to enable DG/CG/HVCI" + LogAndConsoleWarning "Value 2 = fully compatible for DG/CG/HVCI" + LogAndConsoleWarning "########################################################################### `n" +} + +$user = [Security.Principal.WindowsIdentity]::GetCurrent(); +$TestForAdmin = (New-Object Security.Principal.WindowsPrincipal $user).IsInRole([Security.Principal.WindowsBuiltinRole]::Administrator) + +if(!$TestForAdmin) +{ + LogAndConsoleError "Not an Admin user, pls execute this script as an Admin user exiting..." + exit +} + +$isRunningOnVM = (get-wmiobject win32_computersystem).model +if($isRunningOnVM.Contains("Virtual")) +{ + LogAndConsoleWarning "Running on a Virtual Machine. DG/CG is supported only if both guest VM and host machine are running with Windows 10, version 1703 or later with English localization." +} + +<# Check the DG status if enabled or disabled, meaning if the device is ready or not #> +if($Ready) +{ + PrintHardwareReq + + $DGRunning = $(Get-CimInstance -classname Win32_DeviceGuard -namespace root\Microsoft\Windows\DeviceGuard).SecurityServicesRunning + $_ConfigCIState = $(Get-CimInstance -classname Win32_DeviceGuard -namespace root\Microsoft\Windows\DeviceGuard).CodeIntegrityPolicyEnforcementStatus + Log "Current DGRunning = $DGRunning, ConfigCI= $_ConfigCIState" + $_HVCIState = CheckDGRunning(2) + $_CGState = CheckDGRunning(1) + + if($HVCI) + { + Log "_HVCIState: $_HVCIState" + PrintHVCIDetails $_HVCIState + } + elseif($CG) + { + Log "_CGState: $_CGState" + PrintCGDetails $_CGState + + if($_CGState) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "CG_Running" /t REG_DWORD /d 1 /f' + } + else + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "CG_Running" /t REG_DWORD /d 0 /f' + } + } + elseif($DG) + { + Log "_HVCIState: $_HVCIState, _ConfigCIState: $_ConfigCIState" + + PrintHVCIDetails $_HVCIState + PrintConfigCIDetails $_ConfigCIState + + if($_ConfigCIState -and $_HVCIState) + { + LogAndConsoleSuccess "HVCI, and Config-CI are enabled and running." + + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "DG_Running" /t REG_DWORD /d 1 /f' + } + else + { + LogAndConsoleWarning "Not all services are running." + + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "DG_Running" /t REG_DWORD /d 0 /f' + } + } + else + { + Log "_CGState: $_CGState, _HVCIState: $_HVCIState, _ConfigCIState: $_ConfigCIState" + + PrintCGDetails $_CGState + PrintHVCIDetails $_HVCIState + PrintConfigCIDetails $_ConfigCIState + + if(($DGRunning.Length -ge 2) -and ($_CGState) -and ($_HVCIState) -and ($_ConfigCIState -ge 1)) + { + LogAndConsoleSuccess "HVCI, Credential-Guard, and Config-CI are enabled and running." + } + else + { + LogAndConsoleWarning "Not all services are running." + } + } +} + +<# Enable and Disable #> +if($Enable) +{ + PrintHardwareReq + + LogAndConsole "Enabling Device Guard and Credential Guard" + LogAndConsole "Setting RegKeys to enable DG/CG" + + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "EnableVirtualizationBasedSecurity" /t REG_DWORD /d 1 /f' + #Only SecureBoot is required as part of RequirePlatformSecurityFeatures + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "RequirePlatformSecurityFeatures" /t REG_DWORD /d 1 /f' + + $_isRedstone = IsRedstone + if(!$_isRedstone) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "Unlocked" /t REG_DWORD /d 1 /f' + } + else + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "Locked" /t REG_DWORD /d 0 /f' + } + + if(!$HVCI -and !$DG) + { + # value is 2 for both Th2 and RS1 + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\Lsa" /v "LsaCfgFlags" /t REG_DWORD /d 2 /f' + } + if(!$CG) + { + if(!$_isRedstone) + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "HypervisorEnforcedCodeIntegrity" /t REG_DWORD /d 1 /f' + } + else + { + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Scenarios\HypervisorEnforcedCodeIntegrity" /v "Enabled" /t REG_DWORD /d 1 /f' + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Scenarios\HypervisorEnforcedCodeIntegrity" /v "Locked" /t REG_DWORD /d 0 /f' + } + } + + try + { + if(!$HVCI -and !$CG) + { + if(!$SIPolicyPath) + { + Log "Writing Decoded SIPolicy.p7b" + $SIPolicy_Decoded = [System.Convert]::FromBase64String($SIPolicy_Encoded) + [System.IO.File]::WriteAllBytes("$env:windir\System32\CodeIntegrity\SIPolicy.p7b",$SIPolicy_Decoded) + } + else + { + LogAndConsole "Copying user provided SIpolicy.p7b" + $CmdOutput = Copy-Item $SIPolicyPath "$env:windir\System32\CodeIntegrity\SIPolicy.p7b" | Out-String + Log $CmdOutput + } + } + } + catch + { + LogAndConsole "Writing SIPolicy.p7b file failed" + } + + LogAndConsole "Enabling Hyper-V and IOMMU" + $_isRedstone = IsRedstone + if(!$_isRedstone) + { + LogAndConsole "OS Not Redstone, enabling IsolatedUserMode separately" + #Enable/Disable IOMMU seperately + ExecuteCommandAndLog 'DISM.EXE /Online /Enable-Feature:IsolatedUserMode /NoRestart' + } + $CmdOutput = DISM.EXE /Online /Enable-Feature:Microsoft-Hyper-V-Hypervisor /All /NoRestart | Out-String + if(!$CmdOutput.Contains("The operation completed successfully.")) + { + $CmdOutput = DISM.EXE /Online /Enable-Feature:Microsoft-Hyper-V-Online /All /NoRestart | Out-String + } + + Log $CmdOutput + if($CmdOutput.Contains("The operation completed successfully.")) + { + LogAndConsoleSuccess "Enabling Hyper-V and IOMMU successful" + #Reg key for HLK validation of DISM.EXE step + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "HyperVEnabled" /t REG_DWORD /d 1 /f' + } + else + { + LogAndConsoleWarning "Enabling Hyper-V failed please check the log file" + #Reg key for HLK validation of DISM.EXE step + ExecuteCommandAndLog 'REG ADD "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities\" /v "HyperVEnabled" /t REG_DWORD /d 0 /f' + } + AutoRebootHelper +} + +if($Disable) +{ + LogAndConsole "Disabling Device Guard and Credential Guard" + LogAndConsole "Deleting RegKeys to disable DG/CG" + + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "EnableVirtualizationBasedSecurity" /f' + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "RequirePlatformSecurityFeatures" /f' + + $_isRedstone = IsRedstone + if(!$_isRedstone) + { + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "NoLock" /f' + } + else + { + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "Locked" /f' + } + + if(!$CG) + { + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard" /v "HypervisorEnforcedCodeIntegrity" /f' + if($_isRedstone) + { + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Scenarios\HypervisorEnforcedCodeIntegrity" /f' + } + } + + if(!$HVCI -and !$DG) + { + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\Lsa" /v "LsaCfgFlags" /f' + } + + if(!$HVCI -and !$CG) + { + ExecuteCommandAndLog 'del "$env:windir\System32\CodeIntegrity\SIPolicy.p7b"' + } + + if(!$HVCI -and !$DG -and !$CG) + { + LogAndConsole "Disabling Hyper-V and IOMMU" + $_isRedstone = IsRedstone + if(!$_isRedstone) + { + LogAndConsole "OS Not Redstone, disabling IsolatedUserMode separately" + #Enable/Disable IOMMU seperately + ExecuteCommandAndLog 'DISM.EXE /Online /disable-Feature /FeatureName:IsolatedUserMode /NoRestart' + } + $CmdOutput = DISM.EXE /Online /disable-Feature /FeatureName:Microsoft-Hyper-V-Hypervisor /NoRestart | Out-String + if(!$CmdOutput.Contains("The operation completed successfully.")) + { + $CmdOutput = DISM.EXE /Online /disable-Feature /FeatureName:Microsoft-Hyper-V-Online /NoRestart | Out-String + } + Log $CmdOutput + if($CmdOutput.Contains("The operation completed successfully.")) + { + LogAndConsoleSuccess "Disabling Hyper-V and IOMMU successful" + } + else + { + LogAndConsoleWarning "Disabling Hyper-V failed please check the log file" + } + + #set of commands to run SecConfig.efi to delete UEFI variables if were set in pre OS + #these steps can be performed even if the UEFI variables were not set - if not set it will lead to No-Op but this can be run in general always + #this requires a reboot and accepting the prompt in the Pre-OS which is self explanatory in the message that is displayed in pre-OS + $FreeDrive = ls function:[s-z]: -n | ?{ !(test-path $_) } | random + Log "FreeDrive=$FreeDrive" + ExecuteCommandAndLog 'mountvol $FreeDrive /s' + $CmdOutput = Copy-Item "$env:windir\System32\SecConfig.efi" $FreeDrive\EFI\Microsoft\Boot\SecConfig.efi -Force | Out-String + LogAndConsole $CmdOutput + ExecuteCommandAndLog 'bcdedit /create "{0cb3b571-2f2e-4343-a879-d86a476d7215}" /d DGOptOut /application osloader' + ExecuteCommandAndLog 'bcdedit /set "{0cb3b571-2f2e-4343-a879-d86a476d7215}" path \EFI\Microsoft\Boot\SecConfig.efi' + ExecuteCommandAndLog 'bcdedit /set "{bootmgr}" bootsequence "{0cb3b571-2f2e-4343-a879-d86a476d7215}"' + ExecuteCommandAndLog 'bcdedit /set "{0cb3b571-2f2e-4343-a879-d86a476d7215}" loadoptions DISABLE-LSA-ISO,DISABLE-VBS' + ExecuteCommandAndLog 'bcdedit /set "{0cb3b571-2f2e-4343-a879-d86a476d7215}" device partition=$FreeDrive' + ExecuteCommandAndLog 'mountvol $FreeDrive /d' + #steps complete + + } + AutoRebootHelper +} + +if($Clear) +{ + ExecuteCommandAndLog 'REG DELETE "HKLM\SYSTEM\CurrentControlSet\Control\DeviceGuard\Capabilities" /f' + VerifierReset +} + +if($ResetVerifier) +{ + VerifierReset +} + +<# Is machine Device Guard / Cred Guard Capable and Verify #> +if($Capable) +{ + PrintHardwareReq + + LogAndConsole "Checking if the device is DG/CG Capable" + + $_isRedstone = IsRedstone + if(!$_isRedstone) + { + LogAndConsoleWarning "Capable is currently fully supported in Redstone only.." + } + $_StepCount = 1 + if(!$CG) + { + LogAndConsole " ====================== Step $_StepCount Driver Compat ====================== " + $_StepCount++ + CheckDriverCompat + } + + LogAndConsole " ====================== Step $_StepCount Secure boot present ====================== " + $_StepCount++ + CheckSecureBootState + + if(!$HVCI -and !$DG -and !$CG) + { + #check only if sub-options are absent + LogAndConsole " ====================== Step $_StepCount MS UEFI HSTI tests ====================== " + $_StepCount++ + CheckHSTI + } + + LogAndConsole " ====================== Step $_StepCount OS Architecture ====================== " + $_StepCount++ + CheckOSArchitecture + + LogAndConsole " ====================== Step $_StepCount Supported OS SKU ====================== " + $_StepCount++ + CheckOSSKU + + LogAndConsole " ====================== Step $_StepCount Virtualization Firmware ====================== " + $_StepCount++ + CheckVirtualization + + if(!$HVCI -and !$DG) + { + LogAndConsole " ====================== Step $_StepCount TPM version ====================== " + $_StepCount++ + CheckTPM + + LogAndConsole " ====================== Step $_StepCount Secure MOR ====================== " + $_StepCount++ + CheckSecureMOR + } + + LogAndConsole " ====================== Step $_StepCount NX Protector ====================== " + $_StepCount++ + CheckNXProtection + + LogAndConsole " ====================== Step $_StepCount SMM Mitigation ====================== " + $_StepCount++ + CheckSMMProtection + + LogAndConsole " ====================== End Check ====================== " + + LogAndConsole " ====================== Summary ====================== " + ListSummary + LogAndConsole "To learn more about required hardware and software please visit: https://aka.ms/dgwhcr" +} + + +# SIG # Begin signature block +# MIIkiAYJKoZIhvcNAQcCoIIkeTCCJHUCAQExDzANBglghkgBZQMEAgEFADB5Bgor +# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG +# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCCHxqndWS8H7Xe0 +# yxglOZe0uxNr5UZCwWfGVssI1GUNHaCCDYEwggX/MIID56ADAgECAhMzAAABA14l +# HJkfox64AAAAAAEDMA0GCSqGSIb3DQEBCwUAMH4xCzAJBgNVBAYTAlVTMRMwEQYD +# VQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNy +# b3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25p +# bmcgUENBIDIwMTEwHhcNMTgwNzEyMjAwODQ4WhcNMTkwNzI2MjAwODQ4WjB0MQsw +# CQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9u +# ZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMR4wHAYDVQQDExVNaWNy +# b3NvZnQgQ29ycG9yYXRpb24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +# AQDRlHY25oarNv5p+UZ8i4hQy5Bwf7BVqSQdfjnnBZ8PrHuXss5zCvvUmyRcFrU5 +# 3Rt+M2wR/Dsm85iqXVNrqsPsE7jS789Xf8xly69NLjKxVitONAeJ/mkhvT5E+94S +# nYW/fHaGfXKxdpth5opkTEbOttU6jHeTd2chnLZaBl5HhvU80QnKDT3NsumhUHjR +# hIjiATwi/K+WCMxdmcDt66VamJL1yEBOanOv3uN0etNfRpe84mcod5mswQ4xFo8A +# DwH+S15UD8rEZT8K46NG2/YsAzoZvmgFFpzmfzS/p4eNZTkmyWPU78XdvSX+/Sj0 +# NIZ5rCrVXzCRO+QUauuxygQjAgMBAAGjggF+MIIBejAfBgNVHSUEGDAWBgorBgEE +# AYI3TAgBBggrBgEFBQcDAzAdBgNVHQ4EFgQUR77Ay+GmP/1l1jjyA123r3f3QP8w +# UAYDVR0RBEkwR6RFMEMxKTAnBgNVBAsTIE1pY3Jvc29mdCBPcGVyYXRpb25zIFB1 +# ZXJ0byBSaWNvMRYwFAYDVQQFEw0yMzAwMTIrNDM3OTY1MB8GA1UdIwQYMBaAFEhu +# ZOVQBdOCqhc3NyK1bajKdQKVMFQGA1UdHwRNMEswSaBHoEWGQ2h0dHA6Ly93d3cu +# bWljcm9zb2Z0LmNvbS9wa2lvcHMvY3JsL01pY0NvZFNpZ1BDQTIwMTFfMjAxMS0w +# Ny0wOC5jcmwwYQYIKwYBBQUHAQEEVTBTMFEGCCsGAQUFBzAChkVodHRwOi8vd3d3 +# Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY0NvZFNpZ1BDQTIwMTFfMjAx +# MS0wNy0wOC5jcnQwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAgEAn/XJ +# Uw0/DSbsokTYDdGfY5YGSz8eXMUzo6TDbK8fwAG662XsnjMQD6esW9S9kGEX5zHn +# wya0rPUn00iThoj+EjWRZCLRay07qCwVlCnSN5bmNf8MzsgGFhaeJLHiOfluDnjY +# DBu2KWAndjQkm925l3XLATutghIWIoCJFYS7mFAgsBcmhkmvzn1FFUM0ls+BXBgs +# 1JPyZ6vic8g9o838Mh5gHOmwGzD7LLsHLpaEk0UoVFzNlv2g24HYtjDKQ7HzSMCy +# RhxdXnYqWJ/U7vL0+khMtWGLsIxB6aq4nZD0/2pCD7k+6Q7slPyNgLt44yOneFuy +# bR/5WcF9ttE5yXnggxxgCto9sNHtNr9FB+kbNm7lPTsFA6fUpyUSj+Z2oxOzRVpD +# MYLa2ISuubAfdfX2HX1RETcn6LU1hHH3V6qu+olxyZjSnlpkdr6Mw30VapHxFPTy +# 2TUxuNty+rR1yIibar+YRcdmstf/zpKQdeTr5obSyBvbJ8BblW9Jb1hdaSreU0v4 +# 6Mp79mwV+QMZDxGFqk+av6pX3WDG9XEg9FGomsrp0es0Rz11+iLsVT9qGTlrEOla +# P470I3gwsvKmOMs1jaqYWSRAuDpnpAdfoP7YO0kT+wzh7Qttg1DO8H8+4NkI6Iwh +# SkHC3uuOW+4Dwx1ubuZUNWZncnwa6lL2IsRyP64wggd6MIIFYqADAgECAgphDpDS +# AAAAAAADMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJVUzETMBEGA1UECBMK +# V2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0 +# IENvcnBvcmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUm9vdCBDZXJ0aWZpY2F0 +# ZSBBdXRob3JpdHkgMjAxMTAeFw0xMTA3MDgyMDU5MDlaFw0yNjA3MDgyMTA5MDla +# MH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdS +# ZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMT +# H01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTEwggIiMA0GCSqGSIb3DQEB +# AQUAA4ICDwAwggIKAoICAQCr8PpyEBwurdhuqoIQTTS68rZYIZ9CGypr6VpQqrgG +# OBoESbp/wwwe3TdrxhLYC/A4wpkGsMg51QEUMULTiQ15ZId+lGAkbK+eSZzpaF7S +# 35tTsgosw6/ZqSuuegmv15ZZymAaBelmdugyUiYSL+erCFDPs0S3XdjELgN1q2jz +# y23zOlyhFvRGuuA4ZKxuZDV4pqBjDy3TQJP4494HDdVceaVJKecNvqATd76UPe/7 +# 4ytaEB9NViiienLgEjq3SV7Y7e1DkYPZe7J7hhvZPrGMXeiJT4Qa8qEvWeSQOy2u +# M1jFtz7+MtOzAz2xsq+SOH7SnYAs9U5WkSE1JcM5bmR/U7qcD60ZI4TL9LoDho33 +# X/DQUr+MlIe8wCF0JV8YKLbMJyg4JZg5SjbPfLGSrhwjp6lm7GEfauEoSZ1fiOIl +# XdMhSz5SxLVXPyQD8NF6Wy/VI+NwXQ9RRnez+ADhvKwCgl/bwBWzvRvUVUvnOaEP +# 6SNJvBi4RHxF5MHDcnrgcuck379GmcXvwhxX24ON7E1JMKerjt/sW5+v/N2wZuLB +# l4F77dbtS+dJKacTKKanfWeA5opieF+yL4TXV5xcv3coKPHtbcMojyyPQDdPweGF +# RInECUzF1KVDL3SV9274eCBYLBNdYJWaPk8zhNqwiBfenk70lrC8RqBsmNLg1oiM +# CwIDAQABo4IB7TCCAekwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0OBBYEFEhuZOVQ +# BdOCqhc3NyK1bajKdQKVMBkGCSsGAQQBgjcUAgQMHgoAUwB1AGIAQwBBMAsGA1Ud +# DwQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFHItOgIxkEO5FAVO +# 4eqnxzHRI4k0MFoGA1UdHwRTMFEwT6BNoEuGSWh0dHA6Ly9jcmwubWljcm9zb2Z0 +# LmNvbS9wa2kvY3JsL3Byb2R1Y3RzL01pY1Jvb0NlckF1dDIwMTFfMjAxMV8wM18y +# Mi5jcmwwXgYIKwYBBQUHAQEEUjBQME4GCCsGAQUFBzAChkJodHRwOi8vd3d3Lm1p +# Y3Jvc29mdC5jb20vcGtpL2NlcnRzL01pY1Jvb0NlckF1dDIwMTFfMjAxMV8wM18y +# Mi5jcnQwgZ8GA1UdIASBlzCBlDCBkQYJKwYBBAGCNy4DMIGDMD8GCCsGAQUFBwIB +# FjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2RvY3MvcHJpbWFyeWNw +# cy5odG0wQAYIKwYBBQUHAgIwNB4yIB0ATABlAGcAYQBsAF8AcABvAGwAaQBjAHkA +# XwBzAHQAYQB0AGUAbQBlAG4AdAAuIB0wDQYJKoZIhvcNAQELBQADggIBAGfyhqWY +# 4FR5Gi7T2HRnIpsLlhHhY5KZQpZ90nkMkMFlXy4sPvjDctFtg/6+P+gKyju/R6mj +# 82nbY78iNaWXXWWEkH2LRlBV2AySfNIaSxzzPEKLUtCw/WvjPgcuKZvmPRul1LUd +# d5Q54ulkyUQ9eHoj8xN9ppB0g430yyYCRirCihC7pKkFDJvtaPpoLpWgKj8qa1hJ +# Yx8JaW5amJbkg/TAj/NGK978O9C9Ne9uJa7lryft0N3zDq+ZKJeYTQ49C/IIidYf +# wzIY4vDFLc5bnrRJOQrGCsLGra7lstnbFYhRRVg4MnEnGn+x9Cf43iw6IGmYslmJ +# aG5vp7d0w0AFBqYBKig+gj8TTWYLwLNN9eGPfxxvFX1Fp3blQCplo8NdUmKGwx1j +# NpeG39rz+PIWoZon4c2ll9DuXWNB41sHnIc+BncG0QaxdR8UvmFhtfDcxhsEvt9B +# xw4o7t5lL+yX9qFcltgA1qFGvVnzl6UJS0gQmYAf0AApxbGbpT9Fdx41xtKiop96 +# eiL6SJUfq/tHI4D1nvi/a7dLl+LrdXga7Oo3mXkYS//WsyNodeav+vyL6wuA6mk7 +# r/ww7QRMjt/fdW1jkT3RnVZOT7+AVyKheBEyIXrvQQqxP/uozKRdwaGIm1dxVk5I +# RcBCyZt2WwqASGv9eZ/BvW1taslScxMNelDNMYIWXTCCFlkCAQEwgZUwfjELMAkG +# A1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQx +# HjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEoMCYGA1UEAxMfTWljcm9z +# b2Z0IENvZGUgU2lnbmluZyBQQ0EgMjAxMQITMwAAAQNeJRyZH6MeuAAAAAABAzAN +# BglghkgBZQMEAgEFAKCB3jAZBgkqhkiG9w0BCQMxDAYKKwYBBAGCNwIBBDAcBgor +# BgEEAYI3AgELMQ4wDAYKKwYBBAGCNwIBFTAvBgkqhkiG9w0BCQQxIgQg7JuFbMny +# prWUtQLsUQtPig0r2O7J0E6gX1e2E8EVS74wcgYKKwYBBAGCNwIBDDFkMGKgJIAi +# AEQARwBfAFIAZQBhAGQAaQBuAGUAcwBzAF8AdgAzAC4ANqE6gDhcXHdpbnNlY3Rc +# c2NyYXRjaFxhbmRyZXdkYVxkZ3JlYWRpbmVzc1xkZ3JlYWRpbmVzc192My42IDAN +# BgkqhkiG9w0BAQEFAASCAQAGOszu1Q5CcLNirOE4ii1niIXKYuNI1TwR75E2Nz5N +# /2R9A8Y7nBA6spI3CU789yMxeXcnL+miNVvGfJ9/TZbEJOHjpdI9/pcAbi2hWgS1 +# 4IMOKlSK5BydA5TiWIEeUcUl+T9VF07RJ1bo3FcJjnlqa3CMOUeWRQTr0e1PwVrW +# B/fOpEVJMH+1MmPhXzGORPhzAoifT/2RebgK89bi41bpZQoqiuUwlRecGC6DJuVq +# U1Ozu1WkKlQqimzWtcNfk87IPrDnSfOdfSYBS9glAFZPEjNjUCuaYhme7f3Neudp +# 40e/7FjmKk9xDEyUs8rSHHR0baT6Q3Stb3uP2fh8rP55oYITtzCCE7MGCisGAQQB +# gjcDAwExghOjMIITnwYJKoZIhvcNAQcCoIITkDCCE4wCAQMxDzANBglghkgBZQME +# AgEFADCCAVgGCyqGSIb3DQEJEAEEoIIBRwSCAUMwggE/AgEBBgorBgEEAYRZCgMB +# MDEwDQYJYIZIAWUDBAIBBQAEIPLbteV6DqdVXi1De0Js60tbDSgFRatUr2vRlQHn +# NARGAgZbrQRCU6gYEzIwMTgxMDE5MjIxNzQyLjY0NlowBwIBAYACAfSggdSkgdEw +# gc4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdS +# ZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKTAnBgNVBAsT +# IE1pY3Jvc29mdCBPcGVyYXRpb25zIFB1ZXJ0byBSaWNvMSYwJAYDVQQLEx1UaGFs +# ZXMgVFNTIEVTTjo3MjhELUM0NUYtRjlFQjElMCMGA1UEAxMcTWljcm9zb2Z0IFRp +# bWUtU3RhbXAgU2VydmljZaCCDx8wggT1MIID3aADAgECAhMzAAAA09CUVp0OvYMG +# AAAAAADTMA0GCSqGSIb3DQEBCwUAMHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpX +# YXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQg +# Q29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAy +# MDEwMB4XDTE4MDgyMzIwMjY0MFoXDTE5MTEyMzIwMjY0MFowgc4xCzAJBgNVBAYT +# AlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYD +# VQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKTAnBgNVBAsTIE1pY3Jvc29mdCBP +# cGVyYXRpb25zIFB1ZXJ0byBSaWNvMSYwJAYDVQQLEx1UaGFsZXMgVFNTIEVTTjo3 +# MjhELUM0NUYtRjlFQjElMCMGA1UEAxMcTWljcm9zb2Z0IFRpbWUtU3RhbXAgU2Vy +# dmljZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK7ynC6AF22joS/v +# TPZsIG82oovZ8kXNQcF6/17dZtRllU6pCGV8zMxSQOXTWD2MZRJ/OqfHUSYCNTPa +# knetNsrZhstlFNT09QBjjeVXayDG/aI8JPy91P5riOAFk/gvjnQCdcoV65OBF286 +# bs2lgUa6rc2qKHwDVpR1w+2jXrS8Jtz6omUgfB7CMpw1ZwMeQ/+Fb43EAIxeNXB5 +# uq/ZYPDA+iMitkdhrjQJgPKKQqhPiYcz3KdrAk34V6y/zUw8FuJ9Zi89actfoS0e +# AdSdWYDATi6oIiPAioWYQuwx6ZY+e5U8HcjGiA1bg9pnufqcnVLzInBxr8DVp1im +# mAhtkfUCAwEAAaOCARswggEXMB0GA1UdDgQWBBQoUcoPr2oQO5sHaVpYVKDsatRn +# eDAfBgNVHSMEGDAWgBTVYzpcijGQ80N7fEYbxTNoWoVtVTBWBgNVHR8ETzBNMEug +# SaBHhkVodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpL2NybC9wcm9kdWN0cy9N +# aWNUaW1TdGFQQ0FfMjAxMC0wNy0wMS5jcmwwWgYIKwYBBQUHAQEETjBMMEoGCCsG +# AQUFBzAChj5odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpL2NlcnRzL01pY1Rp +# bVN0YVBDQV8yMDEwLTA3LTAxLmNydDAMBgNVHRMBAf8EAjAAMBMGA1UdJQQMMAoG +# CCsGAQUFBwMIMA0GCSqGSIb3DQEBCwUAA4IBAQA9YvD9FBa0sIj/Q8252GXwW0qQ +# aEm/oZXTh4eI6htIKASVxX8y1g4IVeD6O8YyXdBlzQUgr76B70pDgqyynwmJK6KB +# pg2bf6KOeHImc4pmofFc9EhYLZgXPXwqHJY1Rgwt4X1kCNNK6PTGeFlJproYry38 +# a8AuUm0oLJpf46TLC4wQv89vfyEhBed/Wv95Ro5fqn/tAQc8S/c0eq1CAdkMDzsJ +# q7lZmiEAMaVF0vKrcRvtVu7T5BZcTmP6bHNtzcDxnn7rB6TUgSREnWP5Di46Z9P6 +# 0XraNff0Ttit5Msy8ivsrcEa2CIxUgscbYDxAaWR8Ghb/rTVIEEWYBAVrF9vMIIG +# cTCCBFmgAwIBAgIKYQmBKgAAAAAAAjANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UE +# BhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAc +# BgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0 +# IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTAwHhcNMTAwNzAxMjEzNjU1 +# WhcNMjUwNzAxMjE0NjU1WjB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGlu +# Z3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBv +# cmF0aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMDCC +# ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKkdDbx3EYo6IOz8E5f1+n9p +# lGt0VBDVpQoAgoX77XxoSyxfxcPlYcJ2tz5mK1vwFVMnBDEfQRsalR3OCROOfGEw +# WbEwRA/xYIiEVEMM1024OAizQt2TrNZzMFcmgqNFDdDq9UeBzb8kYDJYYEbyWEeG +# MoQedGFnkV+BVLHPk0ySwcSmXdFhE24oxhr5hoC732H8RsEnHSRnEnIaIYqvS2SJ +# UGKxXf13Hz3wV3WsvYpCTUBR0Q+cBj5nf/VmwAOWRH7v0Ev9buWayrGo8noqCjHw +# 2k4GkbaICDXoeByw6ZnNPOcvRLqn9NxkvaQBwSAJk3jN/LzAyURdXhacAQVPIk0C +# AwEAAaOCAeYwggHiMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBTVYzpcijGQ +# 80N7fEYbxTNoWoVtVTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTALBgNVHQ8E +# BAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTV9lbLj+iiXGJo0T2U +# kFvXzpoYxDBWBgNVHR8ETzBNMEugSaBHhkVodHRwOi8vY3JsLm1pY3Jvc29mdC5j +# b20vcGtpL2NybC9wcm9kdWN0cy9NaWNSb29DZXJBdXRfMjAxMC0wNi0yMy5jcmww +# WgYIKwYBBQUHAQEETjBMMEoGCCsGAQUFBzAChj5odHRwOi8vd3d3Lm1pY3Jvc29m +# dC5jb20vcGtpL2NlcnRzL01pY1Jvb0NlckF1dF8yMDEwLTA2LTIzLmNydDCBoAYD +# VR0gAQH/BIGVMIGSMIGPBgkrBgEEAYI3LgMwgYEwPQYIKwYBBQUHAgEWMWh0dHA6 +# Ly93d3cubWljcm9zb2Z0LmNvbS9QS0kvZG9jcy9DUFMvZGVmYXVsdC5odG0wQAYI +# KwYBBQUHAgIwNB4yIB0ATABlAGcAYQBsAF8AUABvAGwAaQBjAHkAXwBTAHQAYQB0 +# AGUAbQBlAG4AdAAuIB0wDQYJKoZIhvcNAQELBQADggIBAAfmiFEN4sbgmD+BcQM9 +# naOhIW+z66bM9TG+zwXiqf76V20ZMLPCxWbJat/15/B4vceoniXj+bzta1RXCCtR +# gkQS+7lTjMz0YBKKdsxAQEGb3FwX/1z5Xhc1mCRWS3TvQhDIr79/xn/yN31aPxzy +# mXlKkVIArzgPF/UveYFl2am1a+THzvbKegBvSzBEJCI8z+0DpZaPWSm8tv0E4XCf +# Mkon/VWvL/625Y4zu2JfmttXQOnxzplmkIz/amJ/3cVKC5Em4jnsGUpxY517IW3D +# nKOiPPp/fZZqkHimbdLhnPkd/DjYlPTGpQqWhqS9nhquBEKDuLWAmyI4ILUl5WTs +# 9/S/fmNZJQ96LjlXdqJxqgaKD4kWumGnEcua2A5HmoDF0M2n0O99g/DhO3EJ3110 +# mCIIYdqwUB5vvfHhAN/nMQekkzr3ZUd46PioSKv33nJ+YWtvd6mBy6cJrDm77MbL +# 2IK0cs0d9LiFAR6A+xuJKlQ5slvayA1VmXqHczsI5pgt6o3gMy4SKfXAL1QnIffI +# rE7aKLixqduWsqdCosnPGUFN4Ib5KpqjEWYw07t0MkvfY3v1mYovG8chr1m1rtxE +# PJdQcdeh0sVV42neV8HR3jDA/czmTfsNv11P6Z0eGTgvvM9YBS7vDaBQNdrvCScc +# 1bN+NR4Iuto229Nfj950iEkSoYIDrTCCApUCAQEwgf6hgdSkgdEwgc4xCzAJBgNV +# BAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4w +# HAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKTAnBgNVBAsTIE1pY3Jvc29m +# dCBPcGVyYXRpb25zIFB1ZXJ0byBSaWNvMSYwJAYDVQQLEx1UaGFsZXMgVFNTIEVT +# Tjo3MjhELUM0NUYtRjlFQjElMCMGA1UEAxMcTWljcm9zb2Z0IFRpbWUtU3RhbXAg +# U2VydmljZaIlCgEBMAkGBSsOAwIaBQADFQBnQlpxrvQi2lklNcOL1G5qmRJdZ6CB +# 3jCB26SB2DCB1TELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAO +# BgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEp +# MCcGA1UECxMgTWljcm9zb2Z0IE9wZXJhdGlvbnMgUHVlcnRvIFJpY28xJzAlBgNV +# BAsTHm5DaXBoZXIgTlRTIEVTTjo1N0Y2LUMxRTAtNTU0QzErMCkGA1UEAxMiTWlj +# cm9zb2Z0IFRpbWUgU291cmNlIE1hc3RlciBDbG9jazANBgkqhkiG9w0BAQUFAAIF +# AN90i2AwIhgPMjAxODEwMjAwMDU3MDRaGA8yMDE4MTAyMTAwNTcwNFowdDA6Bgor +# BgEEAYRZCgQBMSwwKjAKAgUA33SLYAIBADAHAgEAAgIbuDAHAgEAAgIVjTAKAgUA +# 33Xc4AIBADA2BgorBgEEAYRZCgQCMSgwJjAMBgorBgEEAYRZCgMBoAowCAIBAAID +# FuNgoQowCAIBAAIDB6EgMA0GCSqGSIb3DQEBBQUAA4IBAQBN4fOQLT2kR2D+iliM +# qCyd/OohEKUylqoyzmbzHUcPoCOffrEfwv62lLeZGdZt+IvIcPK1SflWPZIhGkl1 +# aCiGf1R505R28926nkarMfFt6C7z4ZrpoEyt3N4YGtl1183lVrRh2xLAh4iPdisc +# bumnWk/n/SPjD5VYBulF6G/TQ0AeVH/d3YjH/DqSZ2W19kvLQEGaTFtGPsUr27zt +# V3O1Wv8Tmyz61GjZYQYFfOrNkemP6aq9JN4nls3np+dcjRUCMZMM7hSsIAirZw4v +# LHR/0b2LbjCZLJyql8SVseNMta3SChDy66eU5VxCK7V2WM2odfVkftCA5GoZYKxj +# YXf2MYIC9TCCAvECAQEwgZMwfDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hp +# bmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jw +# b3JhdGlvbjEmMCQGA1UEAxMdTWljcm9zb2Z0IFRpbWUtU3RhbXAgUENBIDIwMTAC +# EzMAAADT0JRWnQ69gwYAAAAAANMwDQYJYIZIAWUDBAIBBQCgggEyMBoGCSqGSIb3 +# DQEJAzENBgsqhkiG9w0BCRABBDAvBgkqhkiG9w0BCQQxIgQgGjZ7SYiSQ2SycmjH +# mRU95/+zeeGqS6SRp9VT53z7lqIwgeIGCyqGSIb3DQEJEAIMMYHSMIHPMIHMMIGx +# BBRnQlpxrvQi2lklNcOL1G5qmRJdZzCBmDCBgKR+MHwxCzAJBgNVBAYTAlVTMRMw +# EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVN +# aWNyb3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0 +# YW1wIFBDQSAyMDEwAhMzAAAA09CUVp0OvYMGAAAAAADTMBYEFFy/e6WhhXZMooPh +# kcyhgus6M/jaMA0GCSqGSIb3DQEBCwUABIIBAJaEchohlJu1U88lEtvq9B34RuR+ +# GcmQ+wI6es1eEc+DSbh+glwks1I/yb8KHh6PPu0PiGuVmEH760Bt6JxmgD16yxF0 +# 9rzwnq5RkxrXQxxGVfvBBio5PsFOahIv2qqENqUb62ikL2sPxZpHdNe7JzcJVvqy +# eOr9YbK7w75Jl2gbdzar4loQVLLD9bojPaPciSztqRIUsVuiqj2Dq1Vvpujpc8ny +# MZAsWZ0Y5u2lQaVd6z150snjbOMqedGa32j/khlp0sWaOei3kS+tYddPIIAvTKso +# ScY1cd1u4uFUxk6uJEasG1IrsSmsgqowwQyjOv0mFEBBtxdWxfX5ewzysyg= +# SIG # End signature block diff --git a/qemu/deps/diskspd/diskspd.exe b/qemu/deps/diskspd/diskspd.exe new file mode 100644 index 0000000000000000000000000000000000000000..43f2f800a66af7cd09c80a885b567626617a0b8b Binary files /dev/null and b/qemu/deps/diskspd/diskspd.exe differ diff --git a/qemu/deps/performance/MoonGen.zip b/qemu/deps/performance/MoonGen.zip new file mode 100644 index 0000000000000000000000000000000000000000..85f2b03cb0b72342d82ead24197e6af6a0d8b09d Binary files /dev/null and b/qemu/deps/performance/MoonGen.zip differ diff --git a/qemu/deps/performance/start_testpmd.py b/qemu/deps/performance/start_testpmd.py new file mode 100644 index 0000000000000000000000000000000000000000..bc1a70aa0541253d73de1dc27b20cdaabeebd5f8 --- /dev/null +++ b/qemu/deps/performance/start_testpmd.py @@ -0,0 +1,148 @@ +import logging +import sys +import time +import locale +import pexpect + +from six import string_types + + +nic1_driver = sys.argv[1] +nic2_driver = sys.argv[2] +nic1 = sys.argv[3] +nic2 = sys.argv[4] +cores = int(sys.argv[5]) +queues = int(sys.argv[6]) +running_time = int(sys.argv[7]) + +ENCODING = locale.getpreferredencoding() + + +class TestPMD(object): + + def __init__(self): + + self.proc = None + testpmd_cmd = "/usr/bin/testpmd " + self.testpmd_exec = testpmd_cmd + + def launch(self, nic1_driver, nic2_driver, nic1, nic2, cores, queues): + + cmd = ("-l 1,2,3 -n 4 -d %s -d %s" + " -w %s -w %s " + " -- " + " -i --nb-cores=%d " + " --disable-rss --rxd=512 --txd=512 " + " --rxq=%d --txq=%d" % ( + nic1_driver, nic2_driver, nic1, nic2, cores, queues, queues)) + cmd_str = self.testpmd_exec + cmd + logging.info("[cmd] %s" % cmd_str) + try: + self.proc = pexpect.spawn(cmd_str) + self.proc.expect("testpmd>") + except pexpect.ExceptionPexpect as e: + logging.error(e) + return False + + def start(self): + self.command("start") + + def stop(self): + self.command("stop") + + def quit(self): + self.proc.sendline("quit") + logging.info("testpmd> quit") + print("testpmd> quit") + self.proc.expect("Bye...") + logging.info(self.proc.before) + line_list = to_text(self.proc.before).split("\n") + for subline in line_list: + if len(subline.strip()) > 0: + print(subline) + return to_text(self.proc.before) + + def set_port_stats(self): + self.command("show port stats all") + + def set_portlist(self, portlist): + self.command("set portlist %s" % portlist) + + def get_config_fwd(self): + self.command("show config fwd") + + def set_fwd_mac_retry(self): + self.command('set fwd mac retry') + + def set_vlan_0(self): + self.command('vlan set strip on 0') + + def set_vlan_1(self): + self.command('vlan set strip on 1') + + def command(self, cmd): + self.proc.sendline(cmd) + self.proc.expect("testpmd>") + logging.info("testpmd> %s" % cmd) + print("testpmd> %s" % cmd) + logging.info(self.proc.before) + line_list = to_text(self.proc.before).split("\n") + for subline in line_list: + if len(subline.strip()) > 0: + print(subline) + + return to_text(self.proc.before) + + +def start_testpmd(nic1_driver, nic2_driver, nic1, nic2, cores, queues): + + my_testpmd = TestPMD() + my_testpmd.launch( + nic1_driver=nic1_driver, + nic2_driver=nic2_driver, + nic1=nic1, + nic2=nic2, + cores=cores, + queues=queues) + + my_testpmd.set_fwd_mac_retry() + my_testpmd.set_vlan_0() + my_testpmd.set_vlan_1() + + my_testpmd.start() + my_testpmd.set_port_stats() + + # testmpd will quit after running_time + start_time = time.time() + end_time = start_time + running_time + while time.time() < end_time: + time.sleep(10) + print("time.time=%s" % time.time) + my_testpmd.stop() + my_testpmd.set_port_stats() + my_testpmd.quit() + + +def to_text(data): + """ + Convert anything to text decoded text + + When the data is bytes, it's decoded. When it's not of string types + it's re-formatted into text and returned. Otherwise (it's string) + it's returned unchanged. + + :param data: data to be transformed into text + :type data: either bytes or other data that will be returned + unchanged + """ + if isinstance(data, bytes): + return data.decode(ENCODING) + elif not isinstance(data, string_types): + if sys.version_info[0] < 3: + return unicode(data) # pylint: disable=E0602 + else: + return str(data) + return data + + +start_testpmd(nic1_driver, nic2_driver, nic1, nic2, cores, queues) diff --git a/qemu/deps/playbook.yml b/qemu/deps/playbook.yml index f5f571f87172313716c5d97eafd9b545bfa245b5..851f19f3b162e1aec224a036b0eb6c047179df4b 100644 --- a/qemu/deps/playbook.yml +++ b/qemu/deps/playbook.yml @@ -11,7 +11,7 @@ bs_options: "{{bootstrap_options}}" host_log_dir: "{{host_log_files_dir}}" cmd_line: "{{command_line}}" - flag: True + acc_cancel: "{{accept_cancel}}" tasks: @@ -40,18 +40,35 @@ /usr/bin/update-ca-trust extract when: dnld_cert_output.changed -# The following two tasks need to be modified, check if kar already deployed, if true, skip. - - name: clone kar - git: - repo: '{{kar_repo}}' - dest: '{{kar_local}}' - force: yes + - name: Check kar folder exists + stat: + path: '{{kar_local}}' + register: kar_folder - - name: Run bootstrap script - command: ./Bootstrap.sh {{bs_options}} - args: - chdir: '{{kar_local}}' - register: bootstrap_result + - block: + - name: Clone kar + git: + repo: '{{kar_repo}}' + dest: '{{kar_local}}' + when: not kar_folder.stat.exists + + - name: Run Bootstrap script + command: ./Bootstrap.sh {{bs_options}} + args: + chdir: '{{kar_local}}' + when: not kar_folder.stat.exists + register: bootstrap_result + rescue: + - name: Remove kar folder when bootstrap failed + file: + path: '{{kar_local}}' + state: absent + - name: Clean env when bootstrap failed + pip: + name: aexpect, avocado-framework, avocado-framework-plugin-vt, avocado-framework-plugin-result-html + state: absent + - fail: + msg: "bootstrap script failed" - name: Run case shell: @@ -70,6 +87,14 @@ msg: "Log dir is created" when: guest_log_dir.stdout != "" + - name: Get results.json file from guest vm + slurp: + src: '{{ guest_log_dir.stdout}}/latest/results.json' + register: results_json + + - set_fact: + results_json: "{{ results_json.content|b64decode|from_json }}" + - name: Compress log directory to log.bz2 archive: path: '{{guest_log_dir.stdout}}/latest/' @@ -89,3 +114,12 @@ shell: echo "Run test case failed" failed_when: run_case_result is failed + + - debug: + var: results_json.cancel + + - name: Handle cancelled test cases + fail: + msg: "Test failed! There are unexpected cancelled test cases." + when: + - results_json.cancel != 0 and not acc_cancel|bool diff --git a/qemu/deps/virtio-trace/Makefile b/qemu/deps/virtio-trace/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..7843ebcda71dec9d0616663675eabd40106fdbfc --- /dev/null +++ b/qemu/deps/virtio-trace/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +CC = gcc +CFLAGS = -O2 -Wall -pthread + +all: trace-agent + +.c.o: + $(CC) $(CFLAGS) -c $^ -o $@ + +trace-agent: trace-agent.o trace-agent-ctl.o trace-agent-rw.o + $(CC) $(CFLAGS) -o $@ $^ + +clean: + rm -f *.o trace-agent diff --git a/qemu/deps/virtio-trace/README b/qemu/deps/virtio-trace/README new file mode 100644 index 0000000000000000000000000000000000000000..b64845b823abde33b051563585d4c718a051a501 --- /dev/null +++ b/qemu/deps/virtio-trace/README @@ -0,0 +1,118 @@ +Trace Agent for virtio-trace +============================ + +Trace agent is a user tool for sending trace data of a guest to a Host in low +overhead. Trace agent has the following functions: + - splice a page of ring-buffer to read_pipe without memory copying + - splice the page from write_pipe to virtio-console without memory copying + - write trace data to stdout by using -o option + - controlled by start/stop orders from a Host + +The trace agent operates as follows: + 1) Initialize all structures. + 2) Create a read/write thread per CPU. Each thread is bound to a CPU. + The read/write threads hold it. + 3) A controller thread does poll() for a start order of a host. + 4) After the controller of the trace agent receives a start order from a host, + the controller wake read/write threads. + 5) The read/write threads start to read trace data from ring-buffers and + write the data to virtio-serial. + 6) If the controller receives a stop order from a host, the read/write threads + stop to read trace data. + + +Files +===== + +README: this file +Makefile: Makefile of trace agent for virtio-trace +trace-agent.c: includes main function, sets up for operating trace agent +trace-agent.h: includes all structures and some macros +trace-agent-ctl.c: includes controller function for read/write threads +trace-agent-rw.c: includes read/write threads function + + +Setup +===== + +To use this trace agent for virtio-trace, we need to prepare some virtio-serial +I/Fs. + +1) Make FIFO in a host + virtio-trace uses virtio-serial pipe as trace data paths as to the number +of CPUs and a control path, so FIFO (named pipe) should be created as follows: + # mkdir /tmp/virtio-trace/ + # mkfifo /tmp/virtio-trace/trace-path-cpu{0,1,2,...,X}.{in,out} + # mkfifo /tmp/virtio-trace/agent-ctl-path.{in,out} + +For example, if a guest use three CPUs, the names are + trace-path-cpu{0,1,2}.{in.out} +and + agent-ctl-path.{in,out}. + +2) Set up of virtio-serial pipe in a host + Add qemu option to use virtio-serial pipe. + + ##virtio-serial device## + -device virtio-serial-pci,id=virtio-serial0\ + ##control path## + -chardev pipe,id=charchannel0,path=/tmp/virtio-trace/agent-ctl-path\ + -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,\ + id=channel0,name=agent-ctl-path\ + ##data path## + -chardev pipe,id=charchannel1,path=/tmp/virtio-trace/trace-path-cpu0\ + -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel0,\ + id=channel1,name=trace-path-cpu0\ + ... + +If you manage guests with libvirt, add the following tags to domain XML files. +Then, libvirt passes the same command option to qemu. + + + + +
+ + + + +
+ + ... +Here, chardev names are restricted to trace-path-cpuX and agent-ctl-path. For +example, if a guest use three CPUs, chardev names should be trace-path-cpu0, +trace-path-cpu1, trace-path-cpu2, and agent-ctl-path. + +3) Boot the guest + You can find some chardev in /dev/virtio-ports/ in the guest. + + +Run +=== + +0) Build trace agent in a guest + $ make + +1) Enable ftrace in the guest + + # echo 1 > /sys/kernel/debug/tracing/events/sched/enable + +2) Run trace agent in the guest + This agent must be operated as root. + # ./trace-agent +read/write threads in the agent wait for start order from host. If you add -o +option, trace data are output via stdout in the guest. + +3) Open FIFO in a host + # cat /tmp/virtio-trace/trace-path-cpu0.out +If a host does not open these, trace data get stuck in buffers of virtio. Then, +the guest will stop by specification of chardev in QEMU. This blocking mode may +be solved in the future. + +4) Start to read trace data by ordering from a host + A host injects read start order to the guest via virtio-serial. + # echo 1 > /tmp/virtio-trace/agent-ctl-path.in + +5) Stop to read trace data by ordering from a host + A host injects read stop order to the guest via virtio-serial. + # echo 0 > /tmp/virtio-trace/agent-ctl-path.in diff --git a/qemu/deps/virtio-trace/trace-agent-ctl.c b/qemu/deps/virtio-trace/trace-agent-ctl.c new file mode 100644 index 0000000000000000000000000000000000000000..73d253d4b559a7274dc20c35c801f3a9ac669054 --- /dev/null +++ b/qemu/deps/virtio-trace/trace-agent-ctl.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Controller of read/write threads for virtio-trace + * + * Copyright (C) 2012 Hitachi, Ltd. + * Created by Yoshihiro Yunomae + * Masami Hiramatsu + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "trace-agent.h" + +#define HOST_MSG_SIZE 256 +#define EVENT_WAIT_MSEC 100 + +static volatile sig_atomic_t global_signal_val; +bool global_sig_receive; /* default false */ +bool global_run_operation; /* default false*/ + +/* Handle SIGTERM/SIGINT/SIGQUIT to exit */ +static void signal_handler(int sig) +{ + global_signal_val = sig; +} + +int rw_ctl_init(const char *ctl_path) +{ + int ctl_fd; + + ctl_fd = open(ctl_path, O_RDONLY); + if (ctl_fd == -1) { + pr_err("Cannot open ctl_fd\n"); + goto error; + } + + return ctl_fd; + +error: + exit(EXIT_FAILURE); +} + +static int wait_order(int ctl_fd) +{ + struct pollfd poll_fd; + int ret = 0; + + while (!global_sig_receive) { + poll_fd.fd = ctl_fd; + poll_fd.events = POLLIN; + + ret = poll(&poll_fd, 1, EVENT_WAIT_MSEC); + + if (global_signal_val) { + global_sig_receive = true; + pr_info("Receive interrupt %d\n", global_signal_val); + + /* Wakes rw-threads when they are sleeping */ + if (!global_run_operation) + pthread_cond_broadcast(&cond_wakeup); + + ret = -1; + break; + } + + if (ret < 0) { + pr_err("Polling error\n"); + goto error; + } + + if (ret) + break; + }; + + return ret; + +error: + exit(EXIT_FAILURE); +} + +/* + * contol read/write threads by handling global_run_operation + */ +void *rw_ctl_loop(int ctl_fd) +{ + ssize_t rlen; + char buf[HOST_MSG_SIZE]; + int ret; + + /* Setup signal handlers */ + signal(SIGTERM, signal_handler); + signal(SIGINT, signal_handler); + signal(SIGQUIT, signal_handler); + + while (!global_sig_receive) { + + ret = wait_order(ctl_fd); + if (ret < 0) + break; + + rlen = read(ctl_fd, buf, sizeof(buf)); + if (rlen < 0) { + pr_err("read data error in ctl thread\n"); + goto error; + } + + if (rlen == 2 && buf[0] == '1') { + /* + * If host writes '1' to a control path, + * this controller wakes all read/write threads. + */ + global_run_operation = true; + pthread_cond_broadcast(&cond_wakeup); + pr_debug("Wake up all read/write threads\n"); + } else if (rlen == 2 && buf[0] == '0') { + /* + * If host writes '0' to a control path, read/write + * threads will wait for notification from Host. + */ + global_run_operation = false; + pr_debug("Stop all read/write threads\n"); + } else + pr_info("Invalid host notification: %s\n", buf); + } + + return NULL; + +error: + exit(EXIT_FAILURE); +} diff --git a/qemu/deps/virtio-trace/trace-agent-rw.c b/qemu/deps/virtio-trace/trace-agent-rw.c new file mode 100644 index 0000000000000000000000000000000000000000..ddfe7875eb16a861203b13661eb4d929c8d5b8a4 --- /dev/null +++ b/qemu/deps/virtio-trace/trace-agent-rw.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Read/write thread of a guest agent for virtio-trace + * + * Copyright (C) 2012 Hitachi, Ltd. + * Created by Yoshihiro Yunomae + * Masami Hiramatsu + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include "trace-agent.h" + +#define READ_WAIT_USEC 100000 + +void *rw_thread_info_new(void) +{ + struct rw_thread_info *rw_ti; + + rw_ti = zalloc(sizeof(struct rw_thread_info)); + if (rw_ti == NULL) { + pr_err("rw_thread_info zalloc error\n"); + exit(EXIT_FAILURE); + } + + rw_ti->cpu_num = -1; + rw_ti->in_fd = -1; + rw_ti->out_fd = -1; + rw_ti->read_pipe = -1; + rw_ti->write_pipe = -1; + rw_ti->pipe_size = PIPE_INIT; + + return rw_ti; +} + +void *rw_thread_init(int cpu, const char *in_path, const char *out_path, + bool stdout_flag, unsigned long pipe_size, + struct rw_thread_info *rw_ti) +{ + int data_pipe[2]; + + rw_ti->cpu_num = cpu; + + /* set read(input) fd */ + rw_ti->in_fd = open(in_path, O_RDONLY); + if (rw_ti->in_fd == -1) { + pr_err("Could not open in_fd (CPU:%d)\n", cpu); + goto error; + } + + /* set write(output) fd */ + if (!stdout_flag) { + /* virtio-serial output mode */ + rw_ti->out_fd = open(out_path, O_WRONLY); + if (rw_ti->out_fd == -1) { + pr_err("Could not open out_fd (CPU:%d)\n", cpu); + goto error; + } + } else + /* stdout mode */ + rw_ti->out_fd = STDOUT_FILENO; + + if (pipe2(data_pipe, O_NONBLOCK) < 0) { + pr_err("Could not create pipe in rw-thread(%d)\n", cpu); + goto error; + } + + /* + * Size of pipe is 64kB in default based on fs/pipe.c. + * To read/write trace data speedy, pipe size is changed. + */ + if (fcntl(*data_pipe, F_SETPIPE_SZ, pipe_size) < 0) { + pr_err("Could not change pipe size in rw-thread(%d)\n", cpu); + goto error; + } + + rw_ti->read_pipe = data_pipe[1]; + rw_ti->write_pipe = data_pipe[0]; + rw_ti->pipe_size = pipe_size; + + return NULL; + +error: + exit(EXIT_FAILURE); +} + +/* Bind a thread to a cpu */ +static void bind_cpu(int cpu_num) +{ + cpu_set_t mask; + + CPU_ZERO(&mask); + CPU_SET(cpu_num, &mask); + + /* bind my thread to cpu_num by assigning zero to the first argument */ + if (sched_setaffinity(0, sizeof(mask), &mask) == -1) + pr_err("Could not set CPU#%d affinity\n", (int)cpu_num); +} + +static void *rw_thread_main(void *thread_info) +{ + ssize_t rlen, wlen; + ssize_t ret; + struct rw_thread_info *ts = (struct rw_thread_info *)thread_info; + + bind_cpu(ts->cpu_num); + + while (1) { + /* Wait for a read order of trace data by Host OS */ + if (!global_run_operation) { + pthread_mutex_lock(&mutex_notify); + pthread_cond_wait(&cond_wakeup, &mutex_notify); + pthread_mutex_unlock(&mutex_notify); + } + + if (global_sig_receive) + break; + + /* + * Each thread read trace_pipe_raw of each cpu bounding the + * thread, so contention of multi-threads does not occur. + */ + rlen = splice(ts->in_fd, NULL, ts->read_pipe, NULL, + ts->pipe_size, SPLICE_F_MOVE | SPLICE_F_MORE); + + if (rlen < 0) { + pr_err("Splice_read in rw-thread(%d)\n", ts->cpu_num); + goto error; + } else if (rlen == 0) { + /* + * If trace data do not exist or are unreadable not + * for exceeding the page size, splice_read returns + * NULL. Then, this waits for being filled the data in a + * ring-buffer. + */ + usleep(READ_WAIT_USEC); + pr_debug("Read retry(cpu:%d)\n", ts->cpu_num); + continue; + } + + wlen = 0; + + do { + ret = splice(ts->write_pipe, NULL, ts->out_fd, NULL, + rlen - wlen, + SPLICE_F_MOVE | SPLICE_F_MORE); + + if (ret < 0) { + pr_err("Splice_write in rw-thread(%d)\n", + ts->cpu_num); + goto error; + } else if (ret == 0) + /* + * When host reader is not in time for reading + * trace data, guest will be stopped. This is + * because char dev in QEMU is not supported + * non-blocking mode. Then, writer might be + * sleep in that case. + * This sleep will be removed by supporting + * non-blocking mode. + */ + sleep(1); + wlen += ret; + } while (wlen < rlen); + } + + return NULL; + +error: + exit(EXIT_FAILURE); +} + + +pthread_t rw_thread_run(struct rw_thread_info *rw_ti) +{ + int ret; + pthread_t rw_thread_per_cpu; + + ret = pthread_create(&rw_thread_per_cpu, NULL, rw_thread_main, rw_ti); + if (ret != 0) { + pr_err("Could not create a rw thread(%d)\n", rw_ti->cpu_num); + exit(EXIT_FAILURE); + } + + return rw_thread_per_cpu; +} diff --git a/qemu/deps/virtio-trace/trace-agent.c b/qemu/deps/virtio-trace/trace-agent.c new file mode 100644 index 0000000000000000000000000000000000000000..cdfe77c2b4c8fa561671b1671a9788d457b973c4 --- /dev/null +++ b/qemu/deps/virtio-trace/trace-agent.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Guest agent for virtio-trace + * + * Copyright (C) 2012 Hitachi, Ltd. + * Created by Yoshihiro Yunomae + * Masami Hiramatsu + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include "trace-agent.h" + +#define PAGE_SIZE (sysconf(_SC_PAGE_SIZE)) +#define PIPE_DEF_BUFS 16 +#define PIPE_MIN_SIZE (PAGE_SIZE*PIPE_DEF_BUFS) +#define PIPE_MAX_SIZE (1024*1024) +#define READ_PATH_FMT \ + "/sys/kernel/debug/tracing/per_cpu/cpu%d/trace_pipe_raw" +#define WRITE_PATH_FMT "/dev/virtio-ports/trace-path-cpu%d" +#define CTL_PATH "/dev/virtio-ports/agent-ctl-path" + +pthread_mutex_t mutex_notify = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cond_wakeup = PTHREAD_COND_INITIALIZER; + +static int get_total_cpus(void) +{ + int nr_cpus = (int)sysconf(_SC_NPROCESSORS_CONF); + + if (nr_cpus <= 0) { + pr_err("Could not read cpus\n"); + goto error; + } else if (nr_cpus > MAX_CPUS) { + pr_err("Exceed max cpus(%d)\n", (int)MAX_CPUS); + goto error; + } + + return nr_cpus; + +error: + exit(EXIT_FAILURE); +} + +static void *agent_info_new(void) +{ + struct agent_info *s; + int i; + + s = zalloc(sizeof(struct agent_info)); + if (s == NULL) { + pr_err("agent_info zalloc error\n"); + exit(EXIT_FAILURE); + } + + s->pipe_size = PIPE_INIT; + s->use_stdout = false; + s->cpus = get_total_cpus(); + s->ctl_fd = -1; + + /* read/write threads init */ + for (i = 0; i < s->cpus; i++) + s->rw_ti[i] = rw_thread_info_new(); + + return s; +} + +static unsigned long parse_size(const char *arg) +{ + unsigned long value, round; + char *ptr; + + value = strtoul(arg, &ptr, 10); + switch (*ptr) { + case 'K': case 'k': + value <<= 10; + break; + case 'M': case 'm': + value <<= 20; + break; + default: + break; + } + + if (value > PIPE_MAX_SIZE) { + pr_err("Pipe size must be less than 1MB\n"); + goto error; + } else if (value < PIPE_MIN_SIZE) { + pr_err("Pipe size must be over 64KB\n"); + goto error; + } + + /* Align buffer size with page unit */ + round = value & (PAGE_SIZE - 1); + value = value - round; + + return value; +error: + return 0; +} + +static void usage(char const *prg) +{ + pr_err("usage: %s [-h] [-o] [-s ]\n", prg); +} + +static const char *make_path(int cpu_num, bool this_is_write_path) +{ + int ret; + char *buf; + + buf = zalloc(PATH_MAX); + if (buf == NULL) { + pr_err("Could not allocate buffer\n"); + goto error; + } + + if (this_is_write_path) + /* write(output) path */ + ret = snprintf(buf, PATH_MAX, WRITE_PATH_FMT, cpu_num); + else + /* read(input) path */ + ret = snprintf(buf, PATH_MAX, READ_PATH_FMT, cpu_num); + + if (ret <= 0) { + pr_err("Failed to generate %s path(CPU#%d):%d\n", + this_is_write_path ? "read" : "write", cpu_num, ret); + goto error; + } + + return buf; + +error: + free(buf); + return NULL; +} + +static const char *make_input_path(int cpu_num) +{ + return make_path(cpu_num, false); +} + +static const char *make_output_path(int cpu_num) +{ + return make_path(cpu_num, true); +} + +static void *agent_info_init(struct agent_info *s) +{ + int cpu; + const char *in_path = NULL; + const char *out_path = NULL; + + /* init read/write threads */ + for (cpu = 0; cpu < s->cpus; cpu++) { + /* set read(input) path per read/write thread */ + in_path = make_input_path(cpu); + if (in_path == NULL) + goto error; + + /* set write(output) path per read/write thread*/ + if (!s->use_stdout) { + out_path = make_output_path(cpu); + if (out_path == NULL) + goto error; + } else + /* stdout mode */ + pr_debug("stdout mode\n"); + + rw_thread_init(cpu, in_path, out_path, s->use_stdout, + s->pipe_size, s->rw_ti[cpu]); + } + + /* init controller of read/write threads */ + s->ctl_fd = rw_ctl_init((const char *)CTL_PATH); + + return NULL; + +error: + exit(EXIT_FAILURE); +} + +static void *parse_args(int argc, char *argv[], struct agent_info *s) +{ + int cmd; + unsigned long size; + + while ((cmd = getopt(argc, argv, "hos:")) != -1) { + switch (cmd) { + /* stdout mode */ + case 'o': + s->use_stdout = true; + break; + /* size of pipe */ + case 's': + size = parse_size(optarg); + if (size == 0) + goto error; + s->pipe_size = size; + break; + case 'h': + default: + usage(argv[0]); + goto error; + } + } + + agent_info_init(s); + + return NULL; + +error: + exit(EXIT_FAILURE); +} + +static void agent_main_loop(struct agent_info *s) +{ + int cpu; + pthread_t rw_thread_per_cpu[MAX_CPUS]; + + /* Start all read/write threads */ + for (cpu = 0; cpu < s->cpus; cpu++) + rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]); + + rw_ctl_loop(s->ctl_fd); + + /* Finish all read/write threads */ + for (cpu = 0; cpu < s->cpus; cpu++) { + int ret; + + ret = pthread_join(rw_thread_per_cpu[cpu], NULL); + if (ret != 0) { + pr_err("pthread_join() error:%d (cpu %d)\n", ret, cpu); + exit(EXIT_FAILURE); + } + } +} + +static void agent_info_free(struct agent_info *s) +{ + int i; + + close(s->ctl_fd); + for (i = 0; i < s->cpus; i++) { + close(s->rw_ti[i]->in_fd); + close(s->rw_ti[i]->out_fd); + close(s->rw_ti[i]->read_pipe); + close(s->rw_ti[i]->write_pipe); + free(s->rw_ti[i]); + } + free(s); +} + +int main(int argc, char *argv[]) +{ + struct agent_info *s = NULL; + + s = agent_info_new(); + parse_args(argc, argv, s); + + agent_main_loop(s); + + agent_info_free(s); + + return 0; +} diff --git a/qemu/deps/virtio-trace/trace-agent.h b/qemu/deps/virtio-trace/trace-agent.h new file mode 100644 index 0000000000000000000000000000000000000000..e67885969f0e1ee2704844ecfe5b7df0c029d416 --- /dev/null +++ b/qemu/deps/virtio-trace/trace-agent.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TRACE_AGENT_H__ +#define __TRACE_AGENT_H__ +#include +#include + +#define MAX_CPUS 256 +#define PIPE_INIT (1024*1024) + +/* + * agent_info - structure managing total information of guest agent + * @pipe_size: size of pipe (default 1MB) + * @use_stdout: set to true when o option is added (default false) + * @cpus: total number of CPUs + * @ctl_fd: fd of control path, /dev/virtio-ports/agent-ctl-path + * @rw_ti: structure managing information of read/write threads + */ +struct agent_info { + unsigned long pipe_size; + bool use_stdout; + int cpus; + int ctl_fd; + struct rw_thread_info *rw_ti[MAX_CPUS]; +}; + +/* + * rw_thread_info - structure managing a read/write thread a cpu + * @cpu_num: cpu number operating this read/write thread + * @in_fd: fd of reading trace data path in cpu_num + * @out_fd: fd of writing trace data path in cpu_num + * @read_pipe: fd of read pipe + * @write_pipe: fd of write pipe + * @pipe_size: size of pipe (default 1MB) + */ +struct rw_thread_info { + int cpu_num; + int in_fd; + int out_fd; + int read_pipe; + int write_pipe; + unsigned long pipe_size; +}; + +/* use for stopping rw threads */ +extern bool global_sig_receive; + +/* use for notification */ +extern bool global_run_operation; +extern pthread_mutex_t mutex_notify; +extern pthread_cond_t cond_wakeup; + +/* for controller of read/write threads */ +extern int rw_ctl_init(const char *ctl_path); +extern void *rw_ctl_loop(int ctl_fd); + +/* for trace read/write thread */ +extern void *rw_thread_info_new(void); +extern void *rw_thread_init(int cpu, const char *in_path, const char *out_path, + bool stdout_flag, unsigned long pipe_size, + struct rw_thread_info *rw_ti); +extern pthread_t rw_thread_run(struct rw_thread_info *rw_ti); + +static inline void *zalloc(size_t size) +{ + return calloc(1, size); +} + +#define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__) +#define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__) +#ifdef DEBUG +#define pr_debug(format, ...) fprintf(stderr, format, ## __VA_ARGS__) +#else +#define pr_debug(format, ...) do {} while (0) +#endif + +#endif /*__TRACE_AGENT_H__*/ diff --git a/qemu/deps/watchdog/deadlock_test/Makefile b/qemu/deps/watchdog/deadlock_test/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..63f0b1de9cb40d82094efbe1dc1cf59cce3b57c6 --- /dev/null +++ b/qemu/deps/watchdog/deadlock_test/Makefile @@ -0,0 +1,11 @@ +obj-m := deadlock_test.o + +KVERS = $(shell uname -r) + +build: kernel_modules + +kernel_modules: + make -C /lib/modules/$(KVERS)/build M=$(CURDIR) modules + +clean: + make -C /lib/modules/$(KVERS)/build M=$(CURDIR) clean diff --git a/qemu/deps/watchdog/deadlock_test/deadlock_test.c b/qemu/deps/watchdog/deadlock_test/deadlock_test.c new file mode 100644 index 0000000000000000000000000000000000000000..de2886b646646c305e03dab65eb04fd2ec6bb79e --- /dev/null +++ b/qemu/deps/watchdog/deadlock_test/deadlock_test.c @@ -0,0 +1,33 @@ +#include +#include +#include + +static int __init hello_init(void) +{ + unsigned long current_i, stamp_30; + printk(KERN_INFO "insmod deadlock_test!!!\n"); + current_i = jiffies; + stamp_30 = current_i + 30*HZ; + printk(KERN_INFO "%lu\n", current_i); + printk(KERN_INFO "%lu\n", stamp_30); + local_irq_disable(); + while(current_i != stamp_30) + { + current_i = jiffies; + } + printk(KERN_INFO "30s is over!!!\n"); + return 0; +} + +module_init(hello_init); + +static void __exit hello_exit(void) +{ + printk(KERN_INFO "deadlockup test exit\n"); +} +module_exit(hello_exit); + +MODULE_AUTHOR("zhk "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("deadlockup test module"); +MODULE_ALIAS("a simplest module"); diff --git a/qemu/tests/aio_test.py b/qemu/tests/aio_test.py index 6f13c235818237b47a6c0922fde9b7f0fac9a718..4405e054ea16f8e3d109af36bd1ee48e7cdbe9d1 100644 --- a/qemu/tests/aio_test.py +++ b/qemu/tests/aio_test.py @@ -95,7 +95,7 @@ def run_aio_tests(target): with chcwd(path): logging.debug("compile source code of QEMU") process.run("./configure") - cpu_count = cpu.online_cpus_count() + cpu_count = cpu.online_count() aio_path = "tests/test-aio" make_cmd = "make {aio_path} -j{cpu_count}".format( aio_path=aio_path, cpu_count=cpu_count) diff --git a/qemu/tests/ansible_test.py b/qemu/tests/ansible_test.py new file mode 100644 index 0000000000000000000000000000000000000000..60a0eef43b2b18f616d47a8f0672b97512057b4b --- /dev/null +++ b/qemu/tests/ansible_test.py @@ -0,0 +1,84 @@ +import os +import json +import logging + +from avocado.utils import process +from avocado.utils import software_manager + +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Ansible playbook basic test: + 1) Check ansible package exists + 2) Launch the guest + 3) Clone an ansible playbook repo + 4) Generate the ansible-playbook command + 5) Execute the playbook and verify the return status + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + sm = software_manager.SoftwareManager() + if not (sm.check_installed("ansible") or sm.install("ansible")): + test.cancel("ansible package install failed") + + guest_user = params["username"] + guest_passwd = params["password"] + ansible_callback_plugin = params.get("ansible_callback_plugin") + ansible_addl_opts = params.get("ansible_addl_opts", "") + ansible_ssh_extra_args = params["ansible_ssh_extra_args"] + ansible_extra_vars = params.get("ansible_extra_vars", "{}") + playbook_repo = params["playbook_repo"] + playbook_timeout = params.get_numeric("playbook_timeout") + playbook_dir = params.get("playbook_dir", + os.path.join(test.workdir, "ansible_playbook")) + toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) + # Use this directory to copy some logs back from the guest + test_harness_log_dir = test.logdir + + guest_ip_list = [] + for vm in env.get_all_vms(): + vm.verify_alive() + vm.wait_for_login() + guest_ip_list.append(vm.get_address()) + + logging.info("Cloning %s", playbook_repo) + process.run("git clone {src} {dst}".format(src=playbook_repo, + dst=playbook_dir), verbose=False) + + error_context.base_context("Generate playbook related options.", + logging.info) + extra_vars = {"ansible_ssh_extra_args": ansible_ssh_extra_args, + "ansible_ssh_pass": guest_passwd, + "test_harness_log_dir": test_harness_log_dir} + extra_vars.update(json.loads(ansible_extra_vars)) + + ansible_cmd_options = ["ansible-playbook", + "-u {}".format(guest_user), + "-i {},".format(",".join(guest_ip_list)), + "-e '{}'".format(json.dumps(extra_vars)), + ansible_addl_opts, + toplevel_playbook] + ansible_cmd = r" ".join(ansible_cmd_options) + + error_context.context("Execute the ansible playbook.", logging.info) + env_vars = ({"ANSIBLE_STDOUT_CALLBACK": ansible_callback_plugin} + if ansible_callback_plugin else None) + logging.info("Command of ansible playbook: '%s'", ansible_cmd) + play_s, play_o = process.getstatusoutput(ansible_cmd, + timeout=playbook_timeout, + shell=False, env=env_vars) + ansible_log = "ansible_playbook.log" + with open(os.path.join(test_harness_log_dir, ansible_log), "w") as log_file: + log_file.write(play_o) + log_file.flush() + + if play_s != 0: + test.fail("Ansible playbook execution failed, please check the {} " + "for details.".format(ansible_log)) + logging.info("Ansible playbook execution passed.") diff --git a/qemu/tests/apicv_test.py b/qemu/tests/apicv_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7b99f703bcf440a0790e3d95554ce7b0b3d4ccb7 --- /dev/null +++ b/qemu/tests/apicv_test.py @@ -0,0 +1,109 @@ +import re +import os +import time +import logging + +from avocado.utils import process +from virttest import error_context +from virttest import env_process +from virttest import data_dir +from virttest import utils_net +from virttest import utils_netperf + + +@error_context.context_aware +def run(test, params, env): + """ + apicv test: + 1) Check if apicv is enabled on host, if not, enable it + 2) Boot guest and run netperf inside guest + 3) Record throughput and shutdown guest + 4) Disable apicv on host + 5) Boot guest and run netperf inside guest again + 6) Compare benchmark scores with step 3) + 7) Restore env, set apicv back + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def reload_module(value): + """ + Reload module + """ + process.system("rmmod %s" % module) + cmd = "modprobe %s %s=%s" % (module, mod_param, value) + process.system(cmd) + + def run_netperf(): + """ + Run netperf test, return average throughput + """ + error_context.context("Run netperf test", logging.info) + n_server.start() + n_client.session = session + throughput = 0 + for i in range(repeat_times): + output = n_client.start(server_address=host_ip, + test_option=params.get("test_option")) + throughput += float(re.findall(r"580\s+\d+\.?\d+\s+(\d+\.?\d+)", + output)[0]) + time.sleep(1) + n_server.stop() + return throughput/repeat_times + + module = params["module_name"] + mod_param = params["mod_param"] + error_context.context("Enable apicv on host", logging.info) + cmd = "cat /sys/module/%s/parameters/%s" % (module, mod_param) + ori_apicv = process.getoutput(cmd) + if ori_apicv != 'Y': + reload_module("Y") + + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params['main_vm']) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + + host_ip = utils_net.get_host_ip_address(params) + n_server = utils_netperf.NetperfServer( + address=host_ip, + netperf_path=params["server_path"], + netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), + params.get("netperf_server_link")), + username=params.get("host_username", "root"), + password=params.get("host_password")) + + n_client = utils_netperf.NetperfClient( + address=vm.get_address(), + netperf_path=params["client_path"], + netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), + params.get("netperf_client_link")), + client=params.get("shell_client", "ssh"), + port=params.get("shell_port"), + username=params.get("username"), + password=params.get("password"), + prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#")) + + repeat_times = params.get_numeric("repeat_times", 10) + try: + value_on = run_netperf() + logging.info("When enable apicv, average throughput is %s", value_on) + vm.destroy() + + error_context.context("Disable apicv on host", logging.info) + reload_module("N") + vm.create(params=params) + session = vm.wait_for_login() + value_off = run_netperf() + logging.info("When disable apicv, average throughput is %s", value_off) + threshold = float(params.get("threshold", 0.9)) + if value_on <= value_off*threshold: + test.fail("Throughput is smaller when apicv is on than off") + finally: + n_server.cleanup(True) + n_client.cleanup(True) + session.close() + vm.destroy() + reload_module(ori_apicv) diff --git a/qemu/tests/block_discard_hotplug.py b/qemu/tests/block_discard_hotplug.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb4a8ff11c1070bebf190e887811f50f261447e --- /dev/null +++ b/qemu/tests/block_discard_hotplug.py @@ -0,0 +1,110 @@ +""" +hot-plug discard disk testing +""" +import logging + +from avocado.utils import process + +from virttest import storage +from virttest import error_context +from provider.block_devices_plug import BlockDevicesPlug +from virttest import data_dir + + +@error_context.context_aware +def run(test, params, env): + """ + Qemu discard hotplug support test: + + 1) Boot vm + 2) Hot-plug the data disk with discard option + 3) Format the data disk and mount it in guest + 4) Execute dd command to the mounted disk in guest + 5) Check disk size allocation + 6) Execute rm and fstrim command in guest + 7) Check disk size allocation + 8) Hot-unplug the data disk + 9) Reboot vm + + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def get_scsi_debug_disk(): + """" + Get scsi debug disk on host which created as scsi-block. + """ + cmd = "lsblk -S -n -p|grep scsi_debug" + status, output = process.getstatusoutput(cmd) + + if status != 0: + test.fail("Can not find scsi_debug disk") + + return output.split()[0] + + def check_disk_allocation(): + """ + Get the disk size allocation + """ + if scsi_debug == "yes": + cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" + output = process.system_output(cmd).decode().split(",") + return sum([abs(eval(i)) for i in output if i != ""]) + + cmd = "stat -c %b " + disk_name + return int(process.system_output(cmd).decode()) + + vm_name = params["main_vm"] + scsi_debug = params.get("scsi_debug", "no") + data_tag = params["data_tag"] + + vm = env.get_vm(vm_name) + vm.verify_alive() + + if scsi_debug == "yes": + disk_name = get_scsi_debug_disk() + vm.params["image_name_%s" % data_tag] = disk_name + else: + image_params = params.object_params(data_tag) + disk_name = storage.get_image_filename(image_params, + data_dir.get_data_dir()) + + timeout = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + plug = BlockDevicesPlug(vm) + error_context.context("Hot-plug discarded disk in guest.", logging.info) + plug.hotplug_devs_serial(data_tag) + guest_disk_name = '/dev/' + plug[0] + + guest_format_command = params["guest_format_command"].format( + guest_disk_name) + guest_dd_command = params["guest_dd_command"] + guest_rm_command = params["guest_rm_command"] + + error_context.context("Format disk in guest.", logging.info) + session.cmd(guest_format_command) + + error_context.context("Fill data disk in guest.", logging.info) + session.cmd(guest_dd_command, ignore_all_errors=True) + + old_count = check_disk_allocation() + error_context.context("Blocks before trim: %d" % old_count, logging.info) + + error_context.context("Remove data from disk in guest.", logging.info) + session.cmd(guest_rm_command) + + guest_fstrim_command = params["guest_fstrim_command"] + session.cmd(guest_fstrim_command) + new_count = check_disk_allocation() + error_context.context("Blocks after trim: %d" % new_count, logging.info) + if new_count >= old_count: + test.fail("Unexpected fstrim result") + + error_context.context("Hot-unplug discarded disk in guest.", logging.info) + plug.unplug_devs_serial(data_tag) + + error_context.context("Reboot guest.", logging.info) + vm.reboot() + vm.wait_for_login(timeout=timeout) diff --git a/qemu/tests/block_hotplug.py b/qemu/tests/block_hotplug.py index 9e1f964f0793be097438c590793e89bafc8e6672..01f8ffc90cc7d7813feea8d82a12e6c3967cebc1 100644 --- a/qemu/tests/block_hotplug.py +++ b/qemu/tests/block_hotplug.py @@ -11,6 +11,33 @@ from virttest.qemu_capabilities import Flags from virttest.qemu_devices import qdevices +def find_all_disks(session, windows): + """ Find all disks in guest. """ + global all_disks + if windows: + all_disks = set(session.cmd('wmic diskdrive get index').split()[1:]) + else: + all_disks = utils_misc.list_linux_guest_disks(session) + return all_disks + + +def wait_plug_disks(session, action, disks_before_plug, excepted_num, + windows, test): + """ Wait plug disks completely. """ + if not utils_misc.wait_for(lambda: len(disks_before_plug ^ find_all_disks( + session, windows)) == excepted_num, 60, step=1.5): + disks_info_win = ('wmic logicaldisk get drivetype,name,description ' + '& wmic diskdrive list brief /format:list') + disks_info_linux = 'lsblk -a' + disks_info = session.cmd( + disks_info_win if windows else disks_info_linux) + logging.debug("The details of disks:\n %s" % disks_info) + test.fail("Failed to {0} devices from guest, need to {0}: {1}, " + "actual {0}: {2}".format(action, excepted_num, + len(disks_before_plug ^ all_disks))) + return disks_before_plug ^ all_disks + + @error_context.context_aware def run(test, params, env): """ @@ -25,34 +52,12 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - def _find_all_disks(session): - """ Find all disks in guest. """ - global all_disks - if windows: - all_disks = set(session.cmd('wmic diskdrive get index').split()[1:]) - else: - all_disks = utils_misc.list_linux_guest_disks(session) - return all_disks def run_sub_test(test_name): """ Run subtest before/after hotplug/unplug device. """ error_context.context("Running sub test '%s'." % test_name, logging.info) utils_test.run_virt_sub_test(test, params, env, test_name) - def wait_plug_disks(session, action, disks_before_plug, excepted_num): - """ Wait plug disks completely. """ - if not utils_misc.wait_for(lambda: len(disks_before_plug ^ _find_all_disks( - session)) == excepted_num, 60, step=1.5): - disks_info_win = ('wmic logicaldisk get drivetype,name,description ' - '& wmic diskdrive list brief /format:list') - disks_info_linux = 'lsblk -a' - disks_info = session.cmd(disks_info_win if windows else disks_info_linux) - logging.debug("The details of disks:\n %s" % disks_info) - test.fail("Failed to {0} devices from guest, need to {0}: {1}, " - "actual {0}: {2}".format(action, excepted_num, - len(disks_before_plug ^ all_disks))) - return disks_before_plug ^ all_disks - def create_block_devices(image): """ Create block devices. """ return vm.devices.images_define_by_params( @@ -71,7 +76,7 @@ def run(test, params, env): error_context.context("%s block device (iteration %d)" % (action.capitalize(), iteration), logging.info) session = vm.wait_for_login(timeout=timeout) - disks_before_plug = _find_all_disks(session) + disks_before_plug = find_all_disks(session, windows) plug_devices = plug_devices if action == 'hotplug' else plug_devices[::-1] for dev in plug_devices: ret = getattr(vm.devices, 'simple_%s' % action)(dev, vm.monitor) @@ -79,7 +84,7 @@ def run(test, params, env): test.fail("Failed to %s device '%s', %s." % (action, dev, ret[0])) num = 1 if action == 'hotplug' else len(data_imgs) - plugged_disks = wait_plug_disks(session, action, disks_before_plug, num) + plugged_disks = wait_plug_disks(session, action, disks_before_plug, num, windows, test) session.close() return plugged_disks @@ -170,9 +175,8 @@ def run(test, params, env): vm.verify_alive() for iteration in range(int(params.get("repeat_times", 3))): - data_imgs_devs = {img: create_block_devices(img) for img in data_imgs} - for index, img in enumerate(data_imgs_devs): - data_devs = data_imgs_devs[img] + for index, img in enumerate(data_imgs): + data_devs = create_block_devices(img) if need_plug: new_disk = plug_block_devices('hotplug', data_devs).pop() diff --git a/qemu/tests/block_hotplug_in_pause.py b/qemu/tests/block_hotplug_in_pause.py index e49215814ece4212d9a8e12282cde111989132f3..84d8db45c7aeae3d76a3630095b8e98019c9a7ae 100644 --- a/qemu/tests/block_hotplug_in_pause.py +++ b/qemu/tests/block_hotplug_in_pause.py @@ -6,6 +6,7 @@ from virttest import utils_misc from virttest import utils_disk from virttest.qemu_devices import qdevices from virttest.qemu_capabilities import Flags +from virttest.qemu_devices.utils import (DeviceError, DeviceUnplugError) @error_context.context_aware @@ -76,17 +77,78 @@ def run(test, params, env): devs = [dev for dev in devs if not isinstance(dev, dtype)] return devs - def block_unplug(device_list): + def verify_deleted_event(device_list, timeout=120): + def get_deleted_event(dev_qid): + for event in vm.monitor.get_events(): + if ('DEVICE_DELETED' in event.get("event") and + 'device' in event.get('data') and + dev_qid == event.get('data')['device']): + return True + return False + + for dev in device_list: + dev_qid = dev.get_qid() + if not utils_misc.wait_for( + lambda: get_deleted_event(dev_qid), timeout, 0, 0): + test.fail('Failed to get deleted event of %s ' + 'during %s sec.' % (dev_qid, timeout)) + vm.monitor.clear_event('DEVICE_DELETED') + + def verify_unplug_devices_by_qtree(device_list, timeout=30): + """verify the unplug devices in qtree""" + for dev in device_list: + if not utils_misc.wait_for( + lambda: dev.verify_unplug('', vm.monitor), timeout, 1, 5): + test.error('The %s is still in qtree after unplugging.' % dev) + + def unplug_backend_devices(device_list): + """Unplug the backend devices""" + for dev in device_list: + try: + dev.unplug_hook() + drive = dev.get_param("drive") + if drive: + if Flags.BLOCKDEV in vm.devices.caps: + format_node = vm.devices[drive] + nodes = [format_node] + nodes.extend((n for n in format_node.get_child_nodes())) + for node in nodes: + if not node.verify_unplug( + node.unplug(vm.monitor), vm.monitor): + raise DeviceUnplugError( + node, "Failed to unplug blockdev node.", vm.devices) + vm.devices.remove(node, True if isinstance( + node, qdevices.QBlockdevFormatNode) else False) + if not isinstance(node, qdevices.QBlockdevFormatNode): + format_node.del_child_node(node) + else: + vm.devices.remove(drive) + vm.devices.remove(dev, True) + + except (DeviceError, KeyError) as exc: + dev.unplug_unhook() + raise DeviceUnplugError(dev, exc, vm.devices) + + def block_unplug(device_list, verify_del_event=True, + verify_qtree=True, unplug_backend=True): """ Unplug disks and verify it in qtree :param device_list: List of objectes for unplug disks """ for dev in reversed(device_list): - ret = vm.devices.simple_unplug(dev, vm.monitor) - if ret[1] is False: - test.fail("Failed to unplug device '%s'." - "Ouptut:\n%s" % (dev, ret[0])) + out = dev.unplug(vm.monitor) + if out: + test.fail("Failed to unplug device '%s'.Ouptut:\n%s" % (dev, out)) + + if verify_del_event: + verify_deleted_event(device_list) + + if verify_qtree: + verify_unplug_devices_by_qtree(device_list) + + if unplug_backend: + unplug_backend_devices(device_list) def block_check_in_guest(session, disks, blk_num, get_disk_cmd, plug_tag="hotplug"): @@ -165,6 +227,7 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.verify_alive() + is_vm_paused = False session = vm.wait_for_login() for iteration in range(repeat_times): @@ -178,6 +241,7 @@ def run(test, params, env): if params.get("stop_vm_before_hotplug", "no") == "yes": error_context.context("Stop VM before hotplug") vm.pause() + is_vm_paused = True for num in range(blk_num): image_name = img_list[num + 1] @@ -185,9 +249,10 @@ def run(test, params, env): if devs: device_list.extend(devs) - if vm.is_paused() and params.get("resume_vm_after_hotplug", "yes") == "yes": + if is_vm_paused and params.get("resume_vm_after_hotplug", "yes") == "yes": error_context.context("Resume vm after hotplug") vm.resume() + is_vm_paused = False block_check_in_guest(session, disks_before_plug, blk_num, get_disk_cmd) if params.get("disk_op_cmd"): @@ -202,19 +267,27 @@ def run(test, params, env): device_list.append(device) error_context.context("Unplug device", logging.info) - if not vm.is_paused(): + if not is_vm_paused: disks_before_unplug = find_disk(session, get_disk_cmd) if params.get("stop_vm_before_unplug", "yes") == "yes": error_context.context("Stop vm before unplug") vm.pause() + is_vm_paused = True else: blk_num = 0 disks_before_unplug = disks_before_plug - block_unplug(device_list) + block_unplug(device_list, not is_vm_paused, + not is_vm_paused, not is_vm_paused) - if vm.is_paused(): + if is_vm_paused: error_context.context("Resume vm after unplug") vm.resume() + is_vm_paused = False + # verify the unplugged device in qtree and unplug + # the backend only under the running status. + verify_deleted_event(device_list) + verify_unplug_devices_by_qtree(device_list) + unplug_backend_devices(device_list) block_check_in_guest(session, disks_before_unplug, blk_num, get_disk_cmd, plug_tag="unplug") diff --git a/qemu/tests/block_hotplug_passthrough.py b/qemu/tests/block_hotplug_passthrough.py new file mode 100644 index 0000000000000000000000000000000000000000..67ae10bf111c82ef92aa8032f2850e81fdcb6d39 --- /dev/null +++ b/qemu/tests/block_hotplug_passthrough.py @@ -0,0 +1,92 @@ +import logging + +from virttest import error_context +from virttest import utils_test +from virttest import utils_disk +from avocado.utils import process +from provider.storage_benchmark import generate_instance +from provider.block_devices_plug import BlockDevicesPlug + + +@error_context.context_aware +def run(test, params, env): + """ + Hotplug/unplug passthrough disk test: + 1) Create passthrough disk with scsi_debug tool. + 2) Start the guest. + 3) Hotplug this passthrough disk. + 4) Create partition on this disk and format it. + 5) Do iozone/dd test on this disk. + 6) Reboot the guest. + 7) Unplug this passthrough disk. + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def create_path_disk(): + """Create a passthrough disk with scsi_debug """ + process.getoutput(params["pre_command"], shell=True) + disks_old = process.getoutput("ls -1d /dev/sd*", shell=True).split() + process.system_output(params["create_command"], timeout=300, + shell=True, verbose=False) + disks_new = process.getoutput("ls -1d /dev/sd*", shell=True).split() + return list(set(disks_new) - set(disks_old))[0] + + def hotplug_path_disk(vm, path_dev): + """Hotplug passthrough disk.""" + error_context.context("Hotplug passthrough device", logging.info) + vm.params["image_name_stg0"] = path_dev + plug = BlockDevicesPlug(vm) + plug.hotplug_devs_serial() + return plug[0] + + def format_plug_disk(session, did): + """Format new hotpluged disk.""" + stg_image_size = params["stg_image_size"] + ostype = params["os_type"] + if ostype == "windows": + if not utils_disk.update_windows_disk_attributes(session, did): + test.fail("Failed to clear readonly for all disks and online " + "them in guest") + partition = utils_disk.configure_empty_disk(session, did, + stg_image_size, ostype) + if not partition: + test.fail("Fail to format disks.") + return partition[0] + + def run_io_test(session, partition): + """ Run io test on the hot plugged disk. """ + iozone_options = params.get('iozone_options') + dd_test = params.get('dd_test') + if iozone_options: + error_context.context( + "Run iozone test on the plugged disk.", logging.info) + iozone = generate_instance(params, vm, 'iozone') + iozone.run(iozone_options.format(partition[0])) + if dd_test: + error_context.context( + "Do dd test on the plugged disk", logging.info) + partition = partition.split("/")[-1] + session.cmd(dd_test.format(partition)) + + def unplug_path_disk(vm): + """Unplug passthrough disk.""" + error_context.context("Unplug passthrouth device", logging.info) + plug = BlockDevicesPlug(vm) + plug.unplug_devs_serial() + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + + if params["os_type"] == "windows": + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, params["driver_name"]) + + drive_index = hotplug_path_disk(vm, create_path_disk()) + run_io_test(session, format_plug_disk(session, drive_index)) + session = vm.reboot(session) + unplug_path_disk(vm) + session.close() diff --git a/qemu/tests/block_multifunction.py b/qemu/tests/block_multifunction.py new file mode 100644 index 0000000000000000000000000000000000000000..c81a79a79d216dcd25e5f996712c3b26a4be764a --- /dev/null +++ b/qemu/tests/block_multifunction.py @@ -0,0 +1,207 @@ +import logging + +from avocado.core import exceptions + +from virttest import env_process +from virttest import error_context +from virttest import utils_misc +from virttest import utils_disk +from virttest.qemu_monitor import QMPCmdError +from virttest.qemu_devices.qdevices import QDevice, QDrive + +from qemu.tests import block_hotplug +from provider.block_devices_plug import BlockDevicesPlug + + +def set_addr(image_name, slot, function, params, multifunction='on'): + """ + Specify the multifunciton address for image device + + :param image_name: The image to be assigned address + :param slot: The slot of address + :param function: The function of addresss + :param params: Params object + :param multifunction: on/off + """ + if params['drive_format'].startswith('scsi'): + param_name = 'bus_extra_params_%s' % image_name + else: + param_name = 'blk_extra_params_%s' % image_name + if function % 8 == 0: + logging.info('Set multifunction=on for %s' % image_name) + params[param_name] = 'multifunction=%s' % multifunction + if function == 0: + return + addr_pattern = 'addr=%s.%s' % (hex(slot), hex(function % 8)) + logging.info('Set addr of %s to %s' % (image_name, addr_pattern)) + extra_param = params.get(param_name) + if extra_param: + params[param_name] = extra_param + ',' + addr_pattern + else: + params[param_name] = addr_pattern + + +def io_test(session, disk_op_cmd, disks, + windows=False, image_size=None): + """ + Perform io test on disks + :param session: vm session + :param disk_op_cmd: The disk operation command + :param plug_disks: The list of disks + :param windows: If it is windows guest + :param image_size: The size of images, only for windows + """ + for index, disk in enumerate(disks): + if windows: + if not utils_disk.update_windows_disk_attributes(session, disk): + raise exceptions.TestError("Failed to clear readonly for all" + " disks and online them in guest") + partition = utils_disk.configure_empty_windows_disk( + session, disk, image_size) + test_cmd = disk_op_cmd % (partition[0], partition[0]) + test_cmd = utils_misc.set_winutils_letter(session, test_cmd) + else: + test_cmd = disk_op_cmd % (disk, disk) + session.cmd(test_cmd, timeout=360) + + +@error_context.context_aware +def run(test, params, env): + """ + Test multi disk with multifunction on and off. + + 1) Boot guest with system disk(multifunction=on) + 2) Hotplug 7 disks with addr 0x0.0x1~0x0.0x7 + 3) hotplug 1 disk with multifunction=on (addr 0x0) + 4) Check disks in guest + 5) Run dd/iozone test on all data disks + 6) Reboot guest, check disks in guest + 7) Unplug disk8, and remove disk 1-7 + 8) Hotplug disk8 with multifunction=off + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def get_image_device(qdev, img_name): + """ + Get the image device(virtio-blk-pci/virtio-scsi-pci) + :param qdev: DevContainer object + :param img_name: The image name + """ + dev = qdev.get(img_name) + devs = [dev] + if params['drive_format'].startswith('scsi'): + devs.append(qdev.get_by_properties( + {'aid': dev.get_param('bus').split('.')[0]})[0]) + return devs + + image = params.objects('images')[0] + vm_name = params['main_vm'] + set_addr(image, 0, 0, params) # Add multifunction=on option before start vm + params['start_vm'] = 'yes' + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + qdev = vm.devices + windows = params["os_type"] == 'windows' + disk_op_cmd = params.get("disk_op_cmd") + session = vm.wait_for_login() + + q35 = params['machine_type'] == 'q35' + dev_slot = 0 if q35 else 9 + parent_bus = 'pcie_extra_root_port_0' if q35 else 'pci.0' + image_size = '1G' + # Generate the data disk devices to be plugged + for i in range(1, 9): + stg = 'stg%s' % i + vm.params['images'] += ' %s' % stg + vm.params['image_name_%s' % stg] = 'images/%s' % stg + vm.params['image_size_%s' % stg] = image_size + vm.params['remove_image_%s' % stg] = 'yes' + vm.params['force_create_image_%s' % stg] = 'yes' + vm.params['boot_drive_%s' % stg] = 'no' + # Specify the address of the device, plug them into same slot + set_addr(stg, dev_slot, i, vm.params) + if params['drive_format'].startswith('scsi'): + # Create oen new scsi bus for each block device + vm.params['drive_bus_%s' % stg] = i + # To create those image files + env_process.process_images(env_process.preprocess_image, test, vm.params) + + plug = BlockDevicesPlug(vm) + parent_bus_obj = qdev.get_buses({'aobject': parent_bus})[0] + plug.hotplug_devs_serial(bus=parent_bus_obj) + + # Run io test on all the plugged disks + io_test(session, disk_op_cmd, plug, windows, image_size) + + # Reboot the guest and check if all the disks still exist + disks_before_reboot = block_hotplug.find_all_disks(session, windows) + session = vm.reboot(session) + block_hotplug.wait_plug_disks( + session, 'check', disks_before_reboot, 0, windows, test) + session.close() + + # Unplug the disk on function 7 and 0, and check if all the disks been removed + images = vm.params.objects('images') + unplug_dev = images[-1] + unplug_timeout = params['unplug_timeout'] + try: + plug.unplug_devs_serial(images=unplug_dev, timeout=unplug_timeout) + except exceptions.TestError as e: + if 'Actual: 8 disks. Expected: ' not in str(e): + raise + else: + test.fail('All the plugged disks should be removed when' + ' the device at function 0 is removed.') + + # replug disk 2-7 + rest_dev = images[1:-1] + # Remove them from DevContainer first, they are unplugged by qemu + # but still in DevContainer + for img in rest_dev: + devs_rm = get_image_device(qdev, img) + list(map(lambda x: qdev.remove(x, recursive=False), devs_rm)) + plug._create_devices(rest_dev, {'aobject': parent_bus}) + for img, devs in plug._hotplugged_devs.items(): + if img not in rest_dev: + continue + for dev in devs: + args = (dev, vm.monitor) + if isinstance(dev, QDevice): + pci_device = qdev.is_pci_device(dev['driver']) + if pci_device: + args += (parent_bus_obj,) + elif not dev['driver'].startswith('scsi'): + continue + elif not isinstance(dev, QDrive): + continue + try: + plug._hotplug_atomic(*args) + except NotImplementedError: + # Insert might fail for file node 1-7 not been removed + # from vm.devices, which can be ignored + pass + + # Replug disk 8 on slot 0 with multifunction='off' + set_addr(images[-1], dev_slot, 0, vm.params, multifunction='off') + plug._create_devices(unplug_dev.split(), {'aobject': parent_bus}) + for img, devs in plug._hotplugged_devs.items(): + for dev in devs: + if (img == images[-1] and + isinstance(dev, QDevice) and + qdev.is_pci_device(dev['driver'])): + dev['addr'] = hex(dev_slot) # for pci bus addr might be reset + try: + parent_bus_obj.prepare_hotplug(dev) + dev.hotplug(vm.monitor) + except QMPCmdError as e: + if 'single function' not in str(e): + raise + else: + test.fail('It should fail to hotplug a single function device' + ' to the address where multifunction already on.') + break + else: + plug._hotplug_atomic(dev, vm.monitor) diff --git a/qemu/tests/block_multifunction_scale.py b/qemu/tests/block_multifunction_scale.py new file mode 100644 index 0000000000000000000000000000000000000000..beca85c02fae6b1f96f1a6fe4614be7798559a47 --- /dev/null +++ b/qemu/tests/block_multifunction_scale.py @@ -0,0 +1,77 @@ +from virttest import env_process +from virttest import error_context + +from provider.block_devices_plug import BlockDevicesPlug +from qemu.tests.block_multifunction import set_addr, io_test + + +@error_context.context_aware +def run(test, params, env): + """ + Hotplug many disks with multifunction on. + + 1) Boot guest with system disk(multifunction=on) + 2) Hotplug disks with addr 0x0.0x1~0xn.0x7 + 3) Check disks in guest + 4) Run iozone test on all data disks for Windows guest + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def generate_image(dev_slots, plug, params, qdev, image_size, pcie, test): + """ + Generate the data disk devices to be plugged + + :param dev_slots: All the slots to be plugged + :param plug: BlockDevicesPlug + :param params: vm.params + :param qdev: DevContainer + :param image_size: The image size to be specified + :param pcie: if itis pcie bus + """ + disks = [] + for slot in dev_slots: + scsi_bus = 1 + parent_bus = 'pcie_extra_root_port_%s' % slot if pcie else 'pci.0' + images = [] + for i in range(1, 9): + stg = 'stg%s%s' % (slot, i) + images.append(stg) + params['images'] += ' %s' % stg + params['image_name_%s' % stg] = 'images/%s' % stg + params['image_size_%s' % stg] = image_size + params['remove_image_%s' % stg] = 'yes' + params['force_create_image_%s' % stg] = 'no' + params['create_image_%s' % stg] = 'yes' + params['boot_drive_%s' % stg] = 'no' + # Specify the address of the device, plug them into same slot + addr = 0 if pcie else slot + set_addr(stg, addr, i, params) + if params['drive_format'].startswith('scsi'): + # Create oen new scsi bus for each block device + params['drive_bus_%s' % stg] = scsi_bus + scsi_bus += 1 + env_process.process_images(env_process.preprocess_image, test, params) + parent_bus_obj = qdev.get_buses({'aobject': parent_bus})[0] + plug._hotplug_devs(images, vm.monitor, bus=parent_bus_obj) + disks.extend(plug) + return disks + + image_size = '500M' + vm_name = params['main_vm'] + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + qdev = vm.devices + windows = params["os_type"] == 'windows' + disk_op_cmd = params.get("disk_op_cmd") + session = vm.wait_for_login() + pcie = params['machine_type'] == 'q35' + dev_slots = range(0, 3) if pcie else (7, 10) + + plug = BlockDevicesPlug(vm) + disks = generate_image( + dev_slots, plug, vm.params, qdev, image_size, pcie, test) + if windows: + io_test(session, disk_op_cmd, disks, windows, image_size) diff --git a/qemu/tests/block_scsi_generic_inquiry.py b/qemu/tests/block_scsi_generic_inquiry.py new file mode 100644 index 0000000000000000000000000000000000000000..30b261c8a8492777372c2c5299c8b7365caa61d3 --- /dev/null +++ b/qemu/tests/block_scsi_generic_inquiry.py @@ -0,0 +1,83 @@ +import logging + +from avocado.utils import process + +from virttest import data_dir +from virttest import env_process +from virttest import utils_misc + +from virttest.iscsi import Iscsi +from virttest.utils_disk import get_linux_disks + + +def run(test, params, env): + """ + Test to install the guest OS on the lvm device which is created + on an iSCSI target. + Steps: + 1) Setup iSCSI initiator on local host. + 2) Discovery and login the above iSCSI target. + 3) Send sg_inq to get information on the host. + 4) Boot guest with this lun as a block device as the second + disk, with scsi=on,format=raw,werror=stop,rerror=stop. + 5) In the guest, sg_inq should show similar information in + step 3. + 6) Logout iscsi server. + 7) Check the disk info with sg_inq inside guest, should show + fail information. + 8) Run dd on this disk, the guest should stop. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def fetch_sg_info(device, session=None): + cmd = params['cmd_sg_inq'] % device + if session: + return session.cmd_output(cmd) + return process.getoutput(cmd, 60, ignore_status=False) + + iscsi = Iscsi.create_iSCSI(params, data_dir.get_data_dir()) + try: + iscsi.login() + if not utils_misc.wait_for(lambda: iscsi.get_device_name(), 60): + test.error('Can not get the iSCSI device.') + + cmd_get_disk_path = params['cmd_get_disk_path'] + disk_path = process.system_output(cmd_get_disk_path, 60, + shell=True).decode() + + host_sg_info = fetch_sg_info(disk_path) + logging.info('The scsi generic info from host: %s', host_sg_info) + + image_data_tag = params['image_data_tag'] + params['image_name_%s' % image_data_tag] = disk_path + params['image_size'] = params['emulated_image_size'] + image_params = params.object_params(image_data_tag) + env_process.preprocess_image(test, image_params, image_data_tag) + + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + + data_disk = '/dev/' + list(get_linux_disks(session).keys()).pop() + guest_sg_info = fetch_sg_info(data_disk, session) + logging.info('The scsi generic info from guest: %s', guest_sg_info) + + for info in guest_sg_info.split(): + if info not in host_sg_info: + test.fail('The guest scsi generic info is not similar to host.') + + iscsi.logout() + if params['sg_fail_info'] not in fetch_sg_info(data_disk, session): + test.fail('No found the fail information after logout iscsi server.') + + session.cmd_output(params['cmd_dd'] % data_disk) + vm_status_paused = params['vm_status_paused'] + if not utils_misc.wait_for( + lambda: vm.monitor.verify_status(vm_status_paused), 120, step=3): + test.fail('The vm status is not %s.' % vm_status_paused) + finally: + iscsi.delete_target() diff --git a/qemu/tests/block_with_iommu.py b/qemu/tests/block_with_iommu.py index 9a70c9a402fc3fabff56502c5f8badc388f9d0fc..55e76fc69b12f0754afe4636026eb84b4edecb11 100644 --- a/qemu/tests/block_with_iommu.py +++ b/qemu/tests/block_with_iommu.py @@ -1,4 +1,5 @@ import logging +import re from virttest import cpu from virttest import error_context @@ -13,12 +14,32 @@ def run(test, params, env): 1. Add "intel_iommu=on" to kernel line of q35 guest. 2. Boot a guest with virtio-scsi with iommu_platform=on. 3. Verify IOMMU enabled in the guest. + 4. Reload kernel then reboot guest. - :param test: QEMU test object - :param params: Dictionary with the test parameters + :param test: QEMU test object. + :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _get_boot_file(cmd_get_boot_file): + """Get the boot file.""" + current_kernel = session.cmd_output(params.get('cmd_get_kernel_ver')) + boot_files = session.cmd_output(cmd_get_boot_file).splitlines() + if len(boot_files) > 1: + for boot_file in boot_files: + if current_kernel not in boot_file: + return boot_file + return boot_files[0] + + def reload_kernel(session): + """Reload kernel.""" + error_context.context('Reload kernel.', logging.info) + vmlinuz = _get_boot_file(params.get('cmd_get_boot_vmlinuz')) + initrd = _get_boot_file(params.get('cmd_get_boot_initramfs')) + orig_cmdline = session.cmd_output(params.get('cmd_get_boot_cmdline')) + new_cmdline = re.sub(r'vmlinuz\S+', vmlinuz, orig_cmdline).strip() + session.cmd(params.get('reload_kernel_cmd') % (vmlinuz, initrd, new_cmdline)) + def verify_iommu_enabled(): """ Verify whether the iommu is enabled. """ error_context.context( @@ -37,3 +58,7 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login(timeout=360) verify_iommu_enabled() + + if params.get('reload_kernel_cmd'): + reload_kernel(session) + vm.reboot(session) diff --git a/qemu/tests/block_with_write_threshold.py b/qemu/tests/block_with_write_threshold.py index 1b7704c48cae46bd6f163b3a9014f4a57fd0b172..07e279793f0104c103b5c829fb9d9210dcde30b8 100644 --- a/qemu/tests/block_with_write_threshold.py +++ b/qemu/tests/block_with_write_threshold.py @@ -5,10 +5,10 @@ from avocado.utils.wait import wait_for from virttest import data_dir from virttest import error_context -from virttest import storage from virttest import utils_disk from virttest import utils_test from virttest import utils_misc +from virttest.qemu_storage import QemuImg, get_image_json from provider.storage_benchmark import generate_instance @@ -36,10 +36,12 @@ def run(test, params, env): def get_node_name(image_tag): """ Get the node name. """ img_params = params.object_params(image_tag) - file = storage.get_image_filename(img_params, data_dir.get_data_dir()) - for block in vm.monitor.info("block"): - if file == block['inserted']['file']: - return block['inserted']['node-name'] + root_dir = data_dir.get_data_dir() + img = QemuImg(img_params, root_dir, image_tag) + filename = img.image_filename + if img.image_format == 'luks': + filename = get_image_json(image_tag, img_params, root_dir) + return vm.get_block({"filename": filename}) def set_block_write_threshold(monitor, node_name, size): """ Set block write threshold for the block drive. """ diff --git a/qemu/tests/blockdev_commit.py b/qemu/tests/blockdev_commit.py index aec53ca3a3510cac748a758aaa5cd153bc3343a4..379e2a22569f8c73eca73be9f7af9500ff59eb43 100644 --- a/qemu/tests/blockdev_commit.py +++ b/qemu/tests/blockdev_commit.py @@ -1,179 +1,4 @@ -import logging - -from virttest import qemu_storage -from virttest import data_dir -from virttest import utils_disk -from virttest.qemu_capabilities import Flags - -from provider import backup_utils - -from provider.virt_storage.storage_admin import sp_admin - - -class BlockDevCommitTest(object): - - def __init__(self, test, params, env): - self.env = env - self.test = test - self.params = params - self.device_node = self.get_node_name(params["device_tag"]) - self.snapshot_tags = params.objects("snapshot_tags") - self.disks_info = list() - self.files_info = list() - self.main_vm = self.prepare_main_vm() - self.snapshot_images = list( - map(self.get_image_by_tag, self.snapshot_tags)) - - @staticmethod - def get_node_name(tag): - return "drive_%s" % tag - - def is_blockdev_mode(self): - return self.main_vm.check_capability(Flags.BLOCKDEV) - - def prepare_main_vm(self): - return self.env.get_vm(self.params["main_vm"]) - - def get_image_by_tag(self, name): - image_dir = data_dir.get_data_dir() - image_params = self.params.object_params(name) - return qemu_storage.QemuImg(image_params, image_dir, name) - - def prepare_snapshot_file(self): - if self.is_blockdev_mode(): - params = self.params.copy() - params.setdefault("target_path", data_dir.get_data_dir()) - for tag in self.snapshot_tags: - image = sp_admin.volume_define_by_params(tag, params) - image.hotplug(self.main_vm) - else: - if self.params.get("mode") == "existing": - for image in self.snapshot_images: - image.create() - - def mount_data_disks(self): - if self.params["os_type"] == "windows": - return - session = self.clone_vm.wait_for_login() - try: - for info in self.disks_info: - disk_path = info[0] - mount_point = info[1] - utils_disk.mount(disk_path, mount_point, session=session) - finally: - session.close() - - def verify_data_file(self): - for idx, tag in enumerate(self.snapshot_tags): - for info in self.files_info: - mount_point, filename = info[0], info[1] - backup_utils.verify_file_md5( - self.main_vm, mount_point, filename) - - def create_snapshots(self): - if self.is_blockdev_mode(): - options = ["node", "overlay"] - cmd = "blockdev-snapshot" - else: - options = ["device", "mode", "snapshot-file", "format"] - cmd = "blockdev-snapshot-sync" - for idx, tag in enumerate(self.snapshot_tags): - params = self.params.object_params(tag) - arguments = params.copy_from_keys(options) - if not self.is_blockdev_mode(): - arguments["snapshot-file"] = self.snapshot_images[idx].image_filename - arguments["device"] = self.device_node - else: - arguments["overlay"] = self.get_node_name(tag) - if idx == 0: - arguments["node"] = self.device_node - else: - arguments["node"] = self.get_node_name( - self.snapshot_tags[idx - 1]) - self.main_vm.monitor.cmd(cmd, dict(arguments)) - for info in self.disks_info: - self.generate_tempfile(info[1], tag) - - def commit_snapshots(self): - if self.is_blockdev_mode(): - options = ["base-node", "top-node", "speed"] - arguments = self.params.copy_from_keys(options) - arguments["base-node"] = self.get_node_name( - self.params["base_tag"]) - arguments["top-node"] = self.get_node_name(self.params["top_tag"]) - device = self.get_node_name(self.snapshot_tags[-1]) - else: - options = ["base", "top", "speed"] - arguments = self.params.copy_from_keys(options) - base_image = self.get_image_by_tag(self.params["base_tag"]) - top_image = self.get_image_by_tag(self.params['top_tag']) - arguments["base"] = base_image.image_filename - arguments["top"] = top_image.image_filename - device = self.device_node - backup_utils.block_commit(self.main_vm, device, **arguments) - - @staticmethod - def get_linux_disk_path(session, disk_size): - disks = utils_disk.get_linux_disks(session, True) - for kname, attr in disks.items(): - if attr[1] == disk_size and attr[2] == "disk": - return kname - return None - - def configure_data_disk(self): - os_type = self.params["os_type"] - tag = self.params["device_tag"] - disk_params = self.params.object_params(tag) - disk_size = disk_params["image_size"] - session = self.main_vm.wait_for_login() - try: - if os_type != "windows": - disk_id = self.get_linux_disk_path(session, disk_size) - assert disk_id, "Disk not found in guest!" - mount_point = utils_disk.configure_empty_linux_disk( - session, disk_id, disk_size)[0] - self.disks_info.append([ - r"/dev/%s1" % - disk_id, mount_point]) - else: - disk_id = utils_disk.get_windows_disks_index( - session, disk_size) - driver_letter = utils_disk.configure_empty_windows_disk( - session, disk_id, disk_size)[0] - mount_point = r"%s:\\" % driver_letter - self.disks_info.append([disk_id, mount_point]) - finally: - session.close() - - def generate_tempfile(self, root_dir, filename="data", - size="10M", timeout=360): - backup_utils.generate_tempfile( - self.main_vm, root_dir, filename, size, timeout) - self.files_info.append([root_dir, filename]) - - def pre_test(self): - if not self.main_vm.is_alive(): - self.main_vm.create() - self.main_vm.verify_alive() - self.configure_data_disk() - self.prepare_snapshot_file() - - def post_test(self): - try: - self.main_vm.destroy() - for image in self.snapshot_images: - image.remove() - except Exception as error: - logging.error(str(error)) - - def run_test(self): - self.pre_test() - try: - self.create_snapshots() - self.commit_snapshots() - self.verify_data_file() - finally: - self.post_test() +from provider.blockdev_commit_base import BlockDevCommitTest def run(test, params, env): diff --git a/qemu/tests/blockdev_commit_install.py b/qemu/tests/blockdev_commit_install.py new file mode 100644 index 0000000000000000000000000000000000000000..22b12ebdf8c1cfbdb2c8f5a64147a3fb129344dd --- /dev/null +++ b/qemu/tests/blockdev_commit_install.py @@ -0,0 +1,58 @@ +import time +import logging +import random +import re + +from virttest import utils_test +from virttest import utils_misc +from virttest.tests import unattended_install + +from provider.blockdev_commit_base import BlockDevCommitTest + + +class BlockdevCommitInstall(BlockDevCommitTest): + def configure_system_disk(self, tag): + pass + + +def run(test, params, env): + """ + Block commit base Test + + 1. Install guest + 2. create 4 snapshots during guest installation + 3. commit snapshot 3 to base + 4. installation can be finished after commit + """ + def tag_for_install(vm, tag): + if vm.serial_console: + serial_output = vm.serial_console.get_output() + if serial_output and re.search(tag, serial_output, re.M): + return True + logging.info("vm has not started yet") + return False + + block_test = BlockdevCommitInstall(test, params, env) + args = (test, params, env) + bg = utils_test.BackgroundTest(unattended_install.run, args) + bg.start() + if bg.is_alive(): + tag = params.get("tag_for_install_start", "Starting Login Service") + if utils_misc.wait_for(lambda: tag_for_install(block_test.main_vm, tag), 240, 10, 5): + logging.info("sleep random time before do snapshots") + time.sleep(random.randint(10, 120)) + block_test.pre_test() + try: + block_test.commit_snapshots() + try: + bg.join(timeout=1200) + except Exception: + raise + reboot_method = params.get("reboot_method", "system_reset") + block_test.main_vm.reboot(method=reboot_method) + finally: + block_test.post_test() + else: + test.fail("Failed to install guest") + else: + test.fail("Installation failed to start") diff --git a/qemu/tests/blockdev_commit_query_named_block_nodes.py b/qemu/tests/blockdev_commit_query_named_block_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..5c02d26bf910ae7c82677c3dd7dd4239b42cd49a --- /dev/null +++ b/qemu/tests/blockdev_commit_query_named_block_nodes.py @@ -0,0 +1,39 @@ +from provider import backup_utils +from provider import job_utils + +from provider.blockdev_commit_base import BlockDevCommitTest + + +class BlockdevCommitQueryNamedBlockNodes(BlockDevCommitTest): + + def commit_snapshots(self): + device = self.params.get("device_tag") + device_params = self.params.object_params(device) + snapshot_tags = device_params["snapshot_tags"].split() + self.device_node = self.get_node_name(device) + options = ["base-node", "top-node", "speed"] + arguments = self.params.copy_from_keys(options) + arguments["base-node"] = self.get_node_name(device) + arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) + device = self.get_node_name(snapshot_tags[-1]) + commit_cmd = backup_utils.block_commit_qmp_cmd + cmd, args = commit_cmd(device, **arguments) + self.main_vm.monitor.cmd(cmd, args) + job_id = args.get("job-id", device) + self.main_vm.monitor.cmd("query-named-block-nodes") + job_utils.wait_until_block_job_completed(self.main_vm, job_id) + + +def run(test, params, env): + """ + Block commit base Test + + 1. boot guest with data disk + 2. create 4 snapshots and save file in each snapshot + 3. commit snapshot 4 to snapshot 3 + 4. during commit, query named block nodes + 5. verify files's md5 after commit + """ + + block_test = BlockdevCommitQueryNamedBlockNodes(test, params, env) + block_test.run_test() diff --git a/qemu/tests/blockdev_commit_reboot.py b/qemu/tests/blockdev_commit_reboot.py new file mode 100644 index 0000000000000000000000000000000000000000..b37d6545a03e8d68191804adf4151b0d933a1033 --- /dev/null +++ b/qemu/tests/blockdev_commit_reboot.py @@ -0,0 +1,38 @@ +from provider import backup_utils +from provider import job_utils + +from provider.blockdev_commit_base import BlockDevCommitTest + + +class BlockdevCommitReboot(BlockDevCommitTest): + def commit_snapshots(self): + device = self.params.get("device_tag") + device_params = self.params.object_params(device) + snapshot_tags = device_params["snapshot_tags"].split() + self.device_node = self.get_node_name(device) + options = ["base-node", "top-node", "speed"] + arguments = self.params.copy_from_keys(options) + arguments["base-node"] = self.get_node_name(device) + arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) + device = self.get_node_name(snapshot_tags[-1]) + commit_cmd = backup_utils.block_commit_qmp_cmd + cmd, args = commit_cmd(device, **arguments) + self.main_vm.monitor.cmd(cmd, args) + job_id = args.get("job-id", device) + self.main_vm.reboot(method="system_reset") + job_utils.wait_until_block_job_completed(self.main_vm, job_id) + + +def run(test, params, env): + """ + Block commit base Test + + 1. boot guest with data disk + 2. create 4 snapshots and save file in each snapshot + 3. commit snapshot 4 to snapshot 3 + 4. during commit, reboot guest + 5. verify files's md5 after commit + """ + + block_test = BlockdevCommitReboot(test, params, env) + block_test.run_test() diff --git a/qemu/tests/blockdev_commit_server_down.py b/qemu/tests/blockdev_commit_server_down.py new file mode 100644 index 0000000000000000000000000000000000000000..e2a61286d89308c6961b95f03f97ed42584e4458 --- /dev/null +++ b/qemu/tests/blockdev_commit_server_down.py @@ -0,0 +1,102 @@ +import time +import socket + +from provider import job_utils +from provider import backup_utils + +from provider.blockdev_commit_base import BlockDevCommitTest +from provider.nbd_image_export import QemuNBDExportImage + +from virttest import env_process + + +class BlockdevCommitServerDown(BlockDevCommitTest): + + def __init__(self, test, params, env): + params['nbd_export_format'] = params['image_format'] + self.nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) + self.nbd_export.create_image() + self.nbd_export.export_image() + + localhost = socket.gethostname() + params['nbd_server'] = localhost if localhost else 'localhost' + params['images'] += ' %s' % params['nbd_image_tag'] + env_process.preprocess_vm(test, params, env, params["main_vm"]) + super(BlockdevCommitServerDown, self).__init__(test, params, env) + + def check_commit_running(self): + tmo = self.params.get_numeric('commit_start_timeout', 5) + + # make sure commit is running, i.e. offset > 0 + for i in range(tmo): + time.sleep(1) + job = job_utils.get_block_job_by_id(self.main_vm, + self.commit_job) + if job['offset'] > 0: + break + else: + self.test.fail("offset is 0 after %s seconds" % tmo) + + def check_commit_process(self): + offset = None + tmo = self.params.get_numeric('server_down_elapsed_time') + + # stop nbd server + self.nbd_export.stop_export() + + # check commit job should hang + for i in range(tmo): + time.sleep(1) + job = job_utils.get_block_job_by_id(self.main_vm, + self.commit_job) + if not job: + self.test.fail("job cancelled in %d seconds" % tmo) + if offset is None: + offset = job['offset'] + elif offset != job['offset']: + self.test.fail("offset changed: %s vs. %s" + % (offset, job['offset'])) + # resume nbd access + self.nbd_export.export_image() + + # set max speed + self.main_vm.monitor.set_block_job_speed(self.commit_job, 0) + + # commit job should complete + job_utils.wait_until_block_job_completed(self.main_vm, self.commit_job) + + def commit_snapshots(self): + device_params = self.params.object_params(self.params['nbd_image_tag']) + snapshot_tags = device_params["snapshot_tags"].split() + args = self.params.copy_from_keys(['speed']) + device = self.get_node_name(snapshot_tags[-1]) + + cmd, arguments = backup_utils.block_commit_qmp_cmd(device, **args) + self.main_vm.monitor.cmd(cmd, arguments) + job = job_utils.query_block_jobs(self.main_vm)[0] + self.commit_job = job['device'] + self.check_commit_running() + self.check_commit_process() + + def post_test(self): + self.params['images'] += ' %s' % self.params.get("local_image_tag") + self.nbd_export.stop_export() + super(BlockdevCommitServerDown, self).post_test() + + +def run(test, params, env): + """ + Block commit remote storage server down test + + 1. create a data disk and export it by qemu-nbd + 2. boot vm with the exported nbd disk as its data disk + 3. do live snapshots for the data disk + 4. create a file on data disk and do live commit + 5. stop nbd server + 6. check the commit process should hang, offset keeps the same + 7. start nbd server to export disk again + 8. live commit should complete + """ + + block_test = BlockdevCommitServerDown(test, params, env) + block_test.run_test() diff --git a/qemu/tests/blockdev_commit_stop_cont.py b/qemu/tests/blockdev_commit_stop_cont.py new file mode 100644 index 0000000000000000000000000000000000000000..d6da526b3876c8b580b4411d227fb55a914a8904 --- /dev/null +++ b/qemu/tests/blockdev_commit_stop_cont.py @@ -0,0 +1,41 @@ +from provider import backup_utils +from provider import job_utils + +from provider.blockdev_commit_base import BlockDevCommitTest + + +class BlockdevCommitStopCont(BlockDevCommitTest): + + def commit_snapshots(self): + device = self.params.get("device_tag") + device_params = self.params.object_params(device) + snapshot_tags = device_params["snapshot_tags"].split() + self.device_node = self.get_node_name(device) + options = ["base-node", "top-node", "speed"] + arguments = self.params.copy_from_keys(options) + arguments["base-node"] = self.get_node_name(device) + arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) + device = self.get_node_name(snapshot_tags[-1]) + commit_cmd = backup_utils.block_commit_qmp_cmd + cmd, args = commit_cmd(device, **arguments) + self.main_vm.pause() + self.main_vm.monitor.cmd(cmd, args) + job_id = args.get("job-id", device) + self.main_vm.resume() + job_utils.wait_until_block_job_completed(self.main_vm, job_id) + + +def run(test, params, env): + """ + Block commit base Test + + 1. boot guest with system disk + 2. create 4 snapshots and save file in each snapshot + 3. stop vm + 4. commit snapshot 3 to base + 5. resume vm + 6. verify files's md5 after commit + """ + + block_test = BlockdevCommitStopCont(test, params, env) + block_test.run_test() diff --git a/qemu/tests/blockdev_commit_stress.py b/qemu/tests/blockdev_commit_stress.py new file mode 100644 index 0000000000000000000000000000000000000000..6f2f293c95470b4d4c6848fcd1e677207cd5c39c --- /dev/null +++ b/qemu/tests/blockdev_commit_stress.py @@ -0,0 +1,51 @@ +from virttest import utils_test + +from provider.blockdev_commit_base import BlockDevCommitTest + + +class BlockdevCommitStress(BlockDevCommitTest): + def run_stress_test(self): + self.stress_test = utils_test.VMStress(self.main_vm, "stress", + self.params) + self.stress_test.load_stress_tool() + + def stress_running_check(self): + if not self.stress_test.app_running(): + self.test.fail("Stress app does not running as expected") + + def pre_test(self): + if not self.main_vm.is_alive(): + self.main_vm.create() + self.main_vm.verify_alive() + for device in self.params["device_tag"].split(): + device_params = self.params.object_params(device) + snapshot_tags = device_params["snapshot_tags"].split() + self.device_node = self.get_node_name(device) + self.configure_disk(device) + self.run_stress_test() + self.prepare_snapshot_file(snapshot_tags) + self.create_snapshots(snapshot_tags, device) + + def run_test(self): + self.pre_test() + try: + self.commit_snapshots() + self.stress_running_check() + self.verify_data_file() + finally: + self.post_test() + + +def run(test, params, env): + """ + Block commit base Test + + 1. boot guest with system disk + 2. run stress test in guest + 3. create 4 snapshots and save file in each snapshot + 4. commit snapshot 3 to base + 5. verify if stress test still running and verify file's md5 after commit + """ + + block_test = BlockdevCommitStress(test, params, env) + block_test.run_test() diff --git a/qemu/tests/blockdev_commit_top.py b/qemu/tests/blockdev_commit_top.py new file mode 100644 index 0000000000000000000000000000000000000000..b1170094f49f4c32de3bcc12fa4ad712f90d0de8 --- /dev/null +++ b/qemu/tests/blockdev_commit_top.py @@ -0,0 +1,32 @@ +from provider import backup_utils + +from provider.blockdev_commit_base import BlockDevCommitTest + + +class BlockdevCommitTop(BlockDevCommitTest): + + def commit_snapshots(self): + device = self.params.get("device_tag") + device_params = self.params.object_params(device) + snapshot_tags = device_params["snapshot_tags"].split() + self.device_node = self.get_node_name(device) + options = ["base-node", "top-node", "speed"] + arguments = self.params.copy_from_keys(options) + arguments["base-node"] = self.get_node_name(device) + device = self.get_node_name(snapshot_tags[-1]) + arguments["top-node"] = device + backup_utils.block_commit(self.main_vm, device, **arguments) + + +def run(test, params, env): + """ + Block commit base Test + + 1. boot guest with data disk + 2. create 4 snapshots and save file in each snapshot + 3. commit snapshot 4 to base + 4. verify files's md5 after commit + """ + + block_test = BlockdevCommitTop(test, params, env) + block_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_after_commit.py b/qemu/tests/blockdev_inc_backup_after_commit.py new file mode 100644 index 0000000000000000000000000000000000000000..dd273c33135e8c0db9c083e5a059873c9b306d19 --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_after_commit.py @@ -0,0 +1,168 @@ +from virttest.qemu_devices.qdevices import QBlockdevFormatNode + +from provider import backup_utils +from provider import blockdev_base +from provider import block_dirty_bitmap + + +class BlockdevIncbkAfterCommitTest(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncbkAfterCommitTest, self).__init__(test, params, env) + self._source_nodes = [] + self._full_bk_nodes = [] + self._inc_bk_nodes = [] + self._inc_bk_images = [] + self._bitmaps = [] + self._snap_images = [] + self._snap_nodes = [] + self._source_images = params.objects("source_images") + list(map(self._init_arguments_by_params, self._source_images)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + image_chain = image_params.objects("image_backup_chain") + self._source_nodes.append("drive_%s" % tag) + self._full_bk_nodes.append("drive_%s" % image_chain[0]) + self._inc_bk_nodes.append("drive_%s" % image_chain[1]) + self._inc_bk_images.append(image_chain[1]) + self._snap_images.append(image_params["snap_image"]) + self._snap_nodes.append("drive_%s" % self._snap_images[-1]) + self._bitmaps.append("bitmap_%s" % tag) + + # Add the full backup image only before full backup + self.params["image_backup_chain_%s" % tag] = image_chain[0] + + def add_images_for_incremental_backup(self): + """add incremental backup images with qmp command""" + for idx, tag in enumerate(self._source_images): + self.params["image_backup_chain_%s" % + tag] = self._inc_bk_images[idx] + self.add_target_data_disks() + + def add_images_for_data_image_snapshots(self): + """add snapshot images with backing:null""" + for tag in self._snap_images: + # create image with qemu-img + disk = self.source_disk_define_by_params(self.params, tag) + disk.create(self.params) + self.trash.append(disk) + + # hotplug image with blockdev-add(format and protocol only) + params = self.params.object_params(tag) + devices = self.main_vm.devices.images_define_by_params(tag, + params, + 'disk') + devices.pop() + for dev in devices: + if self.main_vm.devices.get_by_qid(dev.get_qid()): + continue + if isinstance(dev, QBlockdevFormatNode): + dev.params["backing"] = None + ret = self.main_vm.devices.simple_hotplug(dev, + self.main_vm.monitor) + if not ret[1]: + self.test.fail("Failed to hotplug '%s': %s." + % (dev, ret[0])) + + def do_full_backup(self): + """full backup: data->base""" + extra_options = {"sync": "full"} + backup_utils.blockdev_batch_backup( + self.main_vm, + self._source_nodes, + self._full_bk_nodes, + self._bitmaps, + **extra_options) + + def generate_new_files(self): + return list(map(self.generate_data_file, self._source_images)) + + def do_incremental_backup(self): + """incremental backup: data->inc""" + extra_options = {"sync": "incremental"} + backup_utils.blockdev_batch_backup( + self.main_vm, + self._source_nodes, + self._inc_bk_nodes, + self._bitmaps, + **extra_options) + + def clone_vm_with_incremental_images(self): + """clone VM with incremental backup images as vm's data images""" + if self.main_vm.is_alive(): + self.main_vm.destroy() + + params = self.main_vm.params.copy() + images = [params.objects("images")[0]] + self._inc_bk_images + params["images"] = " ".join(images) + + self.clone_vm = self.main_vm.clone(params=params) + self.clone_vm.create() + self.clone_vm.verify_alive() + + self.env.register_vm("%s_clone" % self.clone_vm.name, self.clone_vm) + + def take_snapshots_on_data_images(self): + """take snapshots on data images""" + snapshot_options = {} + for idx, source_node in enumerate(self._source_nodes): + backup_utils.blockdev_snapshot(self.main_vm, source_node, + self._snap_nodes[idx], + **snapshot_options) + + def commit_snapshots_on_data_images(self): + """commit snapshots onto data images""" + commit_options = {} + for idx, snap_node in enumerate(self._snap_nodes): + backup_utils.block_commit(self.main_vm, snap_node, + **commit_options) + + def check_bitmaps(self): + for idx, bitmap in enumerate(self._bitmaps): + info = block_dirty_bitmap.get_bitmap_by_name( + self.main_vm, self._source_nodes[idx], bitmap) + if info: + if info["count"] <= 0: + self.test.fail("count in bitmap must be greater than 0") + else: + self.test.fail("Failed to find bitmap %s" % bitmap) + + def do_test(self): + self.do_full_backup() + self.generate_new_files() + self.add_images_for_data_image_snapshots() + self.take_snapshots_on_data_images() + self.generate_new_files() + self.commit_snapshots_on_data_images() + self.check_bitmaps() + self.add_images_for_incremental_backup() + self.do_incremental_backup() + self.clone_vm_with_incremental_images() + self.verify_data_files() + + +def run(test, params, env): + """ + Do incremental backup after block commit + + test steps: + 1. boot VM with a 2G data disk + 2. format data disk and mount it, create a file + 3. hotplug an image for full backup + 4. do full backup(data->base) and add non-persistent bitmap + 5. create another file + 6. add an image(backing:null) for data image snapshot + 7. create another file + 8. commit snapshot image on data image + 9. hotplug an image(backing:snapshot image) for incremental backup + 9. do incremental backup(data->inc) + 10. clone VM with inc image as its data image + 11. check files and md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncbkAfterCommitTest(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py b/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py index 49da7eb7e393ed0f1bbec334f65c237193b23ef1..d3b22ca7117e3721b91e27f42a560b4ebcced2e7 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py @@ -23,7 +23,7 @@ class BlockdevIncreamentalBackupBitmapTest(blockdev_base.BlockdevBaseTest): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) - image_chain = image_params.objects("image_chain") + image_chain = image_params.objects("image_backup_chain") self.source_images.append("drive_%s" % tag) self.full_backups.append("drive_%s" % image_chain[0]) self.inc_backups.append("drive_%s" % image_chain[1]) @@ -109,7 +109,7 @@ class BlockdevIncreamentalBackupBitmapTest(blockdev_base.BlockdevBaseTest): def _compare_image(self, src_tag): src_params = self.params.object_params(src_tag) - overlay_tag = src_params.objects("image_chain")[-1] + overlay_tag = src_params.objects("image_backup_chain")[-1] src_img = self.disk_define_by_params(self.params, src_tag) dst_img = self.disk_define_by_params(self.params, overlay_tag) result = src_img.compare_to(dst_img) diff --git a/qemu/tests/blockdev_inc_backup_inc_success.py b/qemu/tests/blockdev_inc_backup_inc_success.py new file mode 100644 index 0000000000000000000000000000000000000000..f6c781c5fd6d756c11c8a9a89c6596f1125e6abf --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_inc_success.py @@ -0,0 +1,121 @@ +from provider import backup_utils +from provider import blockdev_base +from provider import block_dirty_bitmap + +from virttest import utils_misc + + +class BlockdevIncbkIncSyncSuccBitmapTest(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncbkIncSyncSuccBitmapTest, self).__init__( + test, params, env) + self.source_images = [] + self.full_backups = [] + self.inc_backups = [] + self.inc_backup_tags = [] + self.bitmaps = [] + self.src_img_tags = params.objects("source_images") + self.inc_sync_mode = params["inc_sync_mode"] + self.inc_bitmap_mode = params["inc_bitmap_mode"] + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + image_chain = image_params.objects("image_backup_chain") + self.source_images.append("drive_%s" % tag) + self.full_backups.append("drive_%s" % image_chain[0]) + self.inc_backups.append("drive_%s" % image_chain[1]) + self.inc_backup_tags.append(image_chain[1]) + self.bitmaps.append("bitmap_%s" % tag) + + def do_full_backup(self): + extra_options = {"sync": "full", "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.full_backups, + self.bitmaps, + **extra_options) + + def generate_inc_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def do_incremental_backup(self): + extra_options = {"sync": self.inc_sync_mode, + "bitmap-mode": self.inc_bitmap_mode, + "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.inc_backups, + self.bitmaps, + **extra_options) + + def get_bitmaps_info(self): + out = [] + for idx, bitmap in enumerate(self.bitmaps): + node = self.source_images[idx] + info = block_dirty_bitmap.get_bitmap_by_name( + self.main_vm, node, bitmap) + out.append(info) + return out + + def do_test(self): + self.do_full_backup() + self.generate_inc_files() + self.do_incremental_backup() + self.check_bitmaps() + self.clone_main_vm() + self.check_images() + + def check_bitmaps(self): + def _check_bitmaps(): + bitmaps = self.get_bitmaps_info() + if not bitmaps: + return False + + for info in bitmaps: + if info["count"] != 0: + return False + else: + return True + + refresh_timeout = self.params.get_numeric('refresh_timeout', 10) + if not utils_misc.wait_for(lambda: _check_bitmaps(), + refresh_timeout, 0, 1): + self.test.fail('count of bitmap should be 0 ' + 'after incremental backup') + + def check_images(self): + self.verify_data_files() + + def clone_main_vm(self): + self.main_vm.destroy() + imgs = [self.params['images'].split()[0]] + self.inc_backup_tags + self.params['images'] = ' '.join(imgs) + self.prepare_main_vm() + self.clone_vm = self.main_vm + + +def run(test, params, env): + """ + Blockdev incremental backup test + + test steps: + 1. boot VM with a 2G data disk + 2. format data disk and mount it, create a file + 3. add target disks for backup to VM via qmp commands + 4. do full backup and add non-persistent bitmap + 5. create another file + 6. do inc bakcup(sync: incremental, bitmap-mode: on-success) + 7. check bitmap, count should be 0 + 8. shutdown VM + 9. start VM with inc image, check md5 + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncbkIncSyncSuccBitmapTest(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_non_persistent_bitmap.py b/qemu/tests/blockdev_inc_backup_non_persistent_bitmap.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b25eba60d1cc3893be88d3c491d20651e0d46d --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_non_persistent_bitmap.py @@ -0,0 +1,114 @@ +from provider import backup_utils +from provider import blockdev_base +from provider import block_dirty_bitmap + + +class BlockdevIncBackupNonPersistentBitmapTest(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncBackupNonPersistentBitmapTest, self).__init__(test, + params, + env) + self.source_images = [] + self.full_backups = [] + self.bitmaps = [] + self.src_img_tags = params.objects("source_images") + self.bitmap_count = 0 + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + image_chain = image_params.objects("image_backup_chain") + self.source_images.append("drive_%s" % tag) + self.full_backups.append("drive_%s" % image_chain[0]) + self.bitmaps.append("bitmap_%s" % tag) + + def do_full_backup(self): + extra_options = {"sync": "full", "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.full_backups, + self.bitmaps, + **extra_options) + + def get_bitmaps_info(self): + out = [] + for idx, bitmap in enumerate(self.bitmaps): + node = self.source_images[idx] + info = block_dirty_bitmap.get_bitmap_by_name( + self.main_vm, node, bitmap) + out.append(info) + return out + + def prepare_data_disk(self, tag): + """ + Override this function, only make fs and mount it + :param tag: image tag + """ + self.format_data_disk(tag) + + def write_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def check_bitmaps(self, file_write=False): + bitmaps = self.get_bitmaps_info() + if not bitmaps: + self.test.fail('No bitmap was found.') + + for info in bitmaps: + # check if bitmap is non-persistent + if info['persistent']: + self.test.fail('It should be non-persistent') + + # check if count is changed after file writing + if file_write: + if info["count"] <= self.bitmap_count: + self.test.fail('count of bitmap should be greater than ' + 'the original after writing a file') + else: + self.bitmap_count = info['count'] + + def check_image_info(self): + # make sure non-persistent bitmaps never exist after VM shutdown + for tag in self.params.objects("source_images"): + params = self.params.object_params(tag) + disk = self.source_disk_define_by_params(params, tag) + out = disk.info() + + if out: + if self.params['check_bitmaps'] in out: + self.test.fail('bitmap should not be in image') + else: + self.test.error('Error when querying image info by qemu-img') + + def do_test(self): + self.do_full_backup() + self.check_bitmaps(file_write=False) + self.write_files() + self.check_bitmaps(file_write=True) + self.destroy_vms() + self.check_image_info() + + +def run(test, params, env): + """ + Blockdev incremental backup test: Add a non-persistent bitmap to image + + test steps: + 1. boot VM with a 2G data disk + 2. format data disk and mount it + 3. add target disks for backup to VM via qmp commands + 4. do full backup and add non-persistent bitmap + 5. check bitmap, persistent is False + 6. create another file + 7. check bitmap, count changed + 8. shutdown VM + 9. check non-persistent bitmaps never exist in image + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncBackupNonPersistentBitmapTest(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_pull_mode_diff.py b/qemu/tests/blockdev_inc_backup_pull_mode_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..9890391eb082a91db2b14c88c2beeef4941cdf75 --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_pull_mode_diff.py @@ -0,0 +1,361 @@ +import six +import json +import socket + +from functools import partial + +from provider import backup_utils +from provider import blockdev_base +from provider import job_utils +from provider import block_dirty_bitmap + +from provider.nbd_image_export import InternalNBDExportImage + +from virttest import qemu_storage +from virttest import utils_disk +from virttest import utils_misc + +from avocado.utils import process + + +class BlockdevIncBackupPullModeDiff(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncBackupPullModeDiff, self).__init__(test, + params, + env) + self.source_images = [] + self.fleecing_full_backups = [] + self.fleecing_inc_backups = [] + self.full_backup_tags = [] + self.inc_backup_tags = [] + self.full_backup_bitmaps = [] # added along with full backup + self.before_2nd_inc_bitmaps = [] # added before 2nd inc files + self.merged_bitmaps = [] # merge above two into this one + self.inc_backup_bitmaps = [] # added along with inc backup + self.backup_jobs = [] + self.full_backup_nbd_objs = [] + self.inc_backup_nbd_objs = [] + self.full_backup_client_images = [] + self.inc_backup_client_images = [] + self.full_backup_nbd_images = [] + self.inc_backup_nbd_images = [] + self.src_img_tags = params.objects("source_images") + localhost = socket.gethostname() + self.params['nbd_server'] = localhost if localhost else 'localhost' + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + self.source_images.append("drive_%s" % tag) + + # fleecing images + bk_tags = image_params.objects("image_backup_chain") + self.fleecing_full_backups.append("drive_%s" % bk_tags[0]) + self.fleecing_inc_backups.append("drive_%s" % bk_tags[1]) + + # bitmaps + self.full_backup_bitmaps.append("full_bitmap_%s" % tag) + self.before_2nd_inc_bitmaps.append("before_2nd_inc_bitmap_%s" % tag) + self.merged_bitmaps.append("merged_bitmap_%s" % tag) + self.inc_backup_bitmaps.append("inc_bitmap_%s" % tag) + self.params['nbd_export_bitmap_%s' % + bk_tags[1]] = self.merged_bitmaps[-1] + + # nbd images + nbd_image = self.params['nbd_image_%s' % bk_tags[0]] + self.full_backup_nbd_images.append( + self.source_disk_define_by_params(self.params, nbd_image)) + nbd_image = self.params['nbd_image_%s' % bk_tags[1]] + self.inc_backup_nbd_images.append( + self.source_disk_define_by_params(self.params, nbd_image)) + + # target 'fullbk' image, copy data from exported full bk image to it + fullbk = self.params['client_image_%s' % bk_tags[0]] + disk = self.source_disk_define_by_params(self.params, fullbk) + disk.create(disk.params) + self.trash.append(disk) + self.full_backup_client_images.append(disk) + + # target 'incbk' image, copy data from exported inc bk image to it + incbk = self.params['client_image_%s' % bk_tags[1]] + disk = self.source_disk_define_by_params(self.params, incbk) + disk.create(disk.params) + self.trash.append(disk) + self.inc_backup_client_images.append(disk) + + # Only hotplug fleecing images for full backup before full-backup + self.params['image_backup_chain_%s' % tag] = bk_tags[0] + + self.full_backup_tags.append(bk_tags[0]) + self.inc_backup_tags.append(bk_tags[1]) + + def init_nbd_exports(self): + # nbd export objects, used for exporting local images + for i, tag in enumerate(self.src_img_tags): + self.full_backup_nbd_objs.append( + InternalNBDExportImage(self.main_vm, self.params, + self.full_backup_tags[i])) + self.inc_backup_nbd_objs.append( + InternalNBDExportImage(self.main_vm, self.params, + self.inc_backup_tags[i])) + + def _copy_data_from_export(self, nbd_imgs, target_imgs, bitmaps=None): + for i, nbd_obj in enumerate(nbd_imgs): + if bitmaps is None: + self.copyif(nbd_obj, target_imgs[i]) + else: + self.copyif(nbd_obj, target_imgs[i], bitmaps[i]) + + def copy_full_data_from_export(self): + self._copy_data_from_export(self.full_backup_nbd_images, + self.full_backup_client_images) + + def copy_inc_data_from_export(self): + self._copy_data_from_export(self.inc_backup_nbd_images, + self.inc_backup_client_images, + self.merged_bitmaps) + + def copyif(self, nbd_img_obj, img_obj, bitmap=None): + qemu_img = utils_misc.get_qemu_img_binary(self.params) + qemu_io = utils_misc.get_qemu_io_binary(self.params) + + args = '' + if bitmap is None: + args = '-f %s %s' % (nbd_img_obj.image_format, + nbd_img_obj.image_filename) + else: + opts = qemu_storage.filename_to_file_opts( + nbd_img_obj.image_filename) + opts[self.params['dirty_bitmap_opt'] + ] = 'qemu:dirty-bitmap:%s' % bitmap + args = "'json:%s'" % json.dumps(opts) + + img_obj.base_image_filename = nbd_img_obj.image_filename + img_obj.base_format = nbd_img_obj.image_format + img_obj.base_tag = nbd_img_obj.tag + img_obj.rebase(img_obj.params) + + map_cmd = '{qemu_img} map --output=json {args}'.format( + qemu_img=qemu_img, args=args) + result = process.run(map_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + self.test.fail('Failed to run map command: %s' + % result.stderr.decode()) + + for item in json.loads(result.stdout.decode().strip()): + io_cmd = '{io} -C -c "read {s} {l}" -f {fmt} {fn}'.format( + io=qemu_io, s=item['start'], l=item['length'], + fmt=img_obj.image_format, fn=img_obj.image_filename + ) + result = process.run(io_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + self.test.fail('Failed to run qemu-io command: %s' + % result.stderr.decode()) + + img_obj.base_tag = 'null' + img_obj.rebase(img_obj.params) + + def _export_fleecing_images(self, nbd_objs, nodes): + for i, obj in enumerate(nbd_objs): + obj.start_nbd_server() + obj.add_nbd_image(nodes[i]) + + def _stop_export_fleecing_images(self, nbd_objs): + for obj in nbd_objs: + obj.stop_export() + + def export_full_bk_fleecing_imgs(self): + self._export_fleecing_images(self.full_backup_nbd_objs, + self.fleecing_full_backups) + + def stop_export_full_bk_fleecing_imgs(self): + self._stop_export_fleecing_images(self.full_backup_nbd_objs) + + def export_inc_bk_fleecing_imgs(self): + self._export_fleecing_images(self.inc_backup_nbd_objs, + self.fleecing_inc_backups) + + def stop_export_inc_bk_fleecing_imgs(self): + self._stop_export_fleecing_images(self.inc_backup_nbd_objs) + + def cancel_backup_jobs(self): + for job_id in self.backup_jobs: + arguments = {'id': job_id} + self.main_vm.monitor.cmd('job-cancel', arguments) + + def generate_inc_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def hotplug_inc_backup_images(self): + for idx, tag in enumerate(self.src_img_tags): + self.params['image_backup_chain_%s' % + tag] = self.inc_backup_tags[idx] + self.add_target_data_disks() + + def _do_backup(self, backup_nodes, bitmaps): + extra_options = {"sync": "none", "wait_job_complete": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + backup_nodes, + bitmaps, + ** extra_options) + self.backup_jobs = [job['id'] + for job in job_utils.query_jobs(self.main_vm)] + + def do_full_backup(self): + self._do_backup(self.fleecing_full_backups, self.full_backup_bitmaps) + + def do_incremental_backup(self): + self._do_backup(self.fleecing_inc_backups, self.inc_backup_bitmaps) + + def restart_vm_with_incbk_images(self): + """restart vm with incbk as its data disk""" + self.main_vm.destroy() + images = self.params['images'] + self.params['images'] = ' '.join( + [images.split()[0]] + [o.tag for o in self.inc_backup_client_images]) + self.prepare_main_vm() + self.clone_vm = self.main_vm + self.params['images'] = images + + def rebase_inc_onto_full(self): + # rebase target 'incbk' onto target 'fullbk' + rebase_funcs = [] + for i, tag in enumerate(self.inc_backup_tags): + incbk = self.params['client_image_%s' % tag] + fullbk = self.params['client_image_%s' % self.full_backup_tags[i]] + image_params = self.params.object_params(incbk) + image_params['image_chain'] = '%s %s' % (fullbk, incbk) + disk = self.source_disk_define_by_params(image_params, incbk) + rebase_funcs.append(partial(disk.rebase, params=image_params)) + utils_misc.parallel(rebase_funcs) + + def check_data_files(self): + non_existed_files = {} + disks_info = {} + + # The last file should not exist + for i, data_img in enumerate(self.src_img_tags): + non_existed_files[data_img] = self.files_info[data_img].pop() + disks_info[data_img] = self.disks_info[data_img] + + # Check md5sum for the first three files + super(BlockdevIncBackupPullModeDiff, self).verify_data_files() + + # Check the files should not exist + try: + session = self.clone_vm.wait_for_login() + for tag, info in six.iteritems(disks_info): + utils_disk.mount(info[0], info[1], session=session) + file_path = "%s/%s" % (info[1], non_existed_files[tag]) + cat_cmd = "cat %s" % file_path + + s, o = session.cmd_status_output(cat_cmd) + if s == 0: + self.test.fail('File (%s) exists' % non_existed_files[tag]) + elif 'No such file' not in o.strip(): + self.test.fail('Unknown error: %s' % o) + finally: + session.close() + + def _handle_bitmaps(self, disabled_list, new_list, **extra): + for idx, bitmap in enumerate(disabled_list): + block_dirty_bitmap.block_dirty_bitmap_disable( + self.main_vm, self.source_images[idx], bitmap) + + for idx, bitmap in enumerate(new_list): + bitmap_params = {} + bitmap_params['bitmap_name'] = bitmap + bitmap_params['target_device'] = self.source_images[idx] + bitmap_params['disabled'] = extra.pop('disabled', 'off') + block_dirty_bitmap.block_dirty_bitmap_add(self.main_vm, + bitmap_params) + + merged_list = extra.pop('merged_list', []) + for idx, target in enumerate(merged_list): + src_list = [v[idx] for v in extra.values()] + block_dirty_bitmap.block_dirty_bitmap_merge( + self.main_vm, self.source_images[idx], src_list, target) + + def add_bitmaps_transaction(self): + for i, bitmap in enumerate(self.full_backup_bitmaps): + disabled_params = {'bitmap_device_node': self.source_images[i], + 'bitmap_name': bitmap} + added_params = {'bitmap_device_node': self.source_images[i], + 'bitmap_name': self.before_2nd_inc_bitmaps[i]} + block_dirty_bitmap.handle_block_dirty_bitmap_transaction( + self.main_vm, disabled_params, added_params) + + def merge_bitmaps_transaction(self): + for i, bitmap in enumerate(self.before_2nd_inc_bitmaps): + disabled_params = {'bitmap_device_node': self.source_images[i], + 'bitmap_name': bitmap} + added_params = {'bitmap_device_node': self.source_images[i], + 'bitmap_name': self.merged_bitmaps[i], + 'bitmap_disabled': 'on'} + merged_params = {'bitmap_device_node': self.source_images[i], + 'bitmap_target': self.merged_bitmaps[i], + 'bitmap_sources': [self.full_backup_bitmaps[i], + self.before_2nd_inc_bitmaps[i]]} + block_dirty_bitmap.handle_block_dirty_bitmap_transaction( + self.main_vm, disabled_params, added_params, merged_params) + + def do_test(self): + self.init_nbd_exports() + self.do_full_backup() + self.export_full_bk_fleecing_imgs() + self.generate_inc_files() + self.copy_full_data_from_export() + self.cancel_backup_jobs() + self.stop_export_full_bk_fleecing_imgs() + self.add_bitmaps_transaction() + self.generate_inc_files() + self.merge_bitmaps_transaction() + self.hotplug_inc_backup_images() + self.do_incremental_backup() + self.export_inc_bk_fleecing_imgs() + self.generate_inc_files() + self.copy_inc_data_from_export() + self.cancel_backup_jobs() + self.stop_export_inc_bk_fleecing_imgs() + self.rebase_inc_onto_full() + self.restart_vm_with_incbk_images() + self.check_data_files() + + +def run(test, params, env): + """ + Blockdev incremental backup test + + test steps: + 1. boot VM with one data disk + 2. make filesystem on data disk + 3. create file and save its md5sum on data disk + 4. add fleecing disk for full backup to VM via qmp commands + 5. do full backup(sync=none) with bitmap + 6. export the full backup image by internal nbd server + 7. create the 2nd file and save its md5sum on data disk + 8. copy data from nbd image exported in step 6 + into an image, e.g. fullbk + 9. cancel full backup job and stop nbd server + 10. add aother fleecing disk for inc backup to VM via qmp commands + 11. do inc backup(sync=none) with another new bitmap + as well as disable the first bitmap + 12. export the inc backup image by internal nbd server + 13. create the 3rd file and save its md5sum on data disk + 14. copy data from nbd image exported in step 12 with + the disabled bitmap into an image, e.g. incbk + 15. cancel inc backup job and stop nbd server + 16. rebase incbk onto fullbk + 17. restart vm with incbk as its data image + 18. check md5sum for the first two files on incbk, and make sure + the 3rd file doesn't exist + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncBackupPullModeDiff(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_pull_mode_test.py b/qemu/tests/blockdev_inc_backup_pull_mode_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c852179f7a95970b44c05bfac4af24f155d9a5f7 --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_pull_mode_test.py @@ -0,0 +1,322 @@ +import six +import json +import socket +import logging + +from provider import backup_utils +from provider import blockdev_base +from provider import job_utils + +from provider.nbd_image_export import InternalNBDExportImage +from provider.virt_storage.storage_admin import sp_admin + +from virttest import qemu_storage +from virttest import utils_disk +from virttest import utils_misc + +from avocado.utils import process + + +class BlockdevIncBackupPullModeTest(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncBackupPullModeTest, self).__init__(test, + params, + env) + self.source_images = [] + self.full_backups = [] + self.inc_backups = [] + self.full_backup_bitmaps = [] + self.inc_backup_bitmaps = [] + self.disabled_bitmaps = [] + self.backup_jobs = [] + self.full_backup_nbd_objs = [] + self.inc_backup_nbd_objs = [] + self.full_backup_client_images = [] + self.inc_backup_client_images = [] + self.full_backup_nbd_images = [] + self.inc_backup_nbd_images = [] + self.src_img_tags = params.objects("source_images") + localhost = socket.gethostname() + self.params['nbd_server'] = localhost if localhost else 'localhost' + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + bk_tags = image_params.objects("backup_images") + self.source_images.append("drive_%s" % tag) + + # fleecing image used for full backup, to be exported by nbd + self.full_backups.append("drive_%s" % bk_tags[0]) + self.full_backup_bitmaps.append("full_bitmap_%s" % tag) + + # fleecing image used for inc backup, to be exported by nbd + self.inc_backups.append("drive_%s" % bk_tags[1]) + self.inc_backup_bitmaps.append("inc_bitmap_%s" % tag) + + # nbd export image used full backup + nbd_image = self.params['nbd_image_%s' % bk_tags[0]] + disk = qemu_storage.QemuImg(self.params.object_params(nbd_image), + None, nbd_image) + self.full_backup_nbd_images.append(disk) + + # nbd export image used for inc backup + nbd_image = self.params['nbd_image_%s' % bk_tags[1]] + disk = qemu_storage.QemuImg(self.params.object_params(nbd_image), + None, nbd_image) + self.inc_backup_nbd_images.append(disk) + + # local image used for copying data from nbd export image(full backup) + client_image = self.params['client_image_%s' % bk_tags[0]] + disk = self.source_disk_define_by_params( + self.params.object_params(client_image), client_image) + disk.create(self.params) + self.trash.append(disk) + self.full_backup_client_images.append(disk) + + # local image used for copying data from nbd export images(inc backup) + client_image = self.params['client_image_%s' % bk_tags[1]] + disk = self.source_disk_define_by_params( + self.params.object_params(client_image), client_image) + disk.create(self.params) + self.trash.append(disk) + self.inc_backup_client_images.append(disk) + + # disable bitmap created in full backup when doing inc backup + self.disabled_bitmaps.append("full_bitmap_%s" % tag) + + def init_nbd_exports(self): + def _init_nbd_exports(tag): + bk_tags = self.params.object_params(tag).objects("backup_images") + + self.full_backup_nbd_objs.append( + InternalNBDExportImage(self.main_vm, self.params, bk_tags[0])) + + self.params['nbd_export_bitmap_%s' % + bk_tags[1]] = "full_bitmap_%s" % tag + self.inc_backup_nbd_objs.append( + InternalNBDExportImage(self.main_vm, self.params, bk_tags[1])) + + list(map(_init_nbd_exports, self.src_img_tags)) + + def full_copyif(self): + for i, nbd_obj in enumerate(self.full_backup_nbd_images): + self.copyif(nbd_obj, self.full_backup_client_images[i]) + + def inc_copyif(self): + for i, nbd_obj in enumerate(self.inc_backup_nbd_images): + self.copyif(nbd_obj, self.inc_backup_client_images[i], + self.full_backup_bitmaps[i]) + + def copyif(self, nbd_img_obj, img_obj, bitmap=None): + qemu_img = utils_misc.get_qemu_img_binary(self.params) + qemu_io = utils_misc.get_qemu_io_binary(self.params) + + args = '' + if bitmap is None: + args = '-f %s %s' % (nbd_img_obj.image_format, + nbd_img_obj.image_filename) + else: + opts = qemu_storage.filename_to_file_opts( + nbd_img_obj.image_filename) + opts[self.params['dirty_bitmap_opt'] + ] = 'qemu:dirty-bitmap:%s' % bitmap + args = "'json:%s'" % json.dumps(opts) + + img_obj.base_image_filename = nbd_img_obj.image_filename + img_obj.base_format = nbd_img_obj.image_format + img_obj.base_tag = nbd_img_obj.tag + img_obj.rebase(img_obj.params) + + map_cmd = '{qemu_img} map --output=json {args}'.format( + qemu_img=qemu_img, args=args) + result = process.run(map_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + self.test.fail('Failed to run map command: %s' + % result.stderr.decode()) + + for item in json.loads(result.stdout.decode().strip()): + io_cmd = '{io} -C -c "read {s} {l}" -f {fmt} {fn}'.format( + io=qemu_io, s=item['start'], l=item['length'], + fmt=img_obj.image_format, fn=img_obj.image_filename + ) + result = process.run(io_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + self.test.fail('Failed to run qemu-io command: %s' + % result.stderr.decode()) + + img_obj.base_tag = 'null' + img_obj.rebase(img_obj.params) + + def export_full_backups(self): + for i, obj in enumerate(self.full_backup_nbd_objs): + obj.start_nbd_server() + obj.add_nbd_image(self.full_backups[i]) + + def stop_export_full_backups(self): + for obj in self.full_backup_nbd_objs: + obj.stop_export() + + def export_inc_backups(self): + for i, obj in enumerate(self.inc_backup_nbd_objs): + obj.start_nbd_server() + obj.add_nbd_image(self.inc_backups[i]) + + def stop_export_inc_backups(self): + for obj in self.inc_backup_nbd_objs: + obj.stop_export() + + def cancel_backup_jobs(self): + for job_id in self.backup_jobs: + arguments = {'id': job_id} + self.main_vm.monitor.cmd('job-cancel', arguments) + + def do_full_backup(self): + extra_options = {"sync": "none", "wait_job_complete": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.full_backups, + self.full_backup_bitmaps, + **extra_options) + self.backup_jobs = [job['id'] + for job in job_utils.query_jobs(self.main_vm)] + + def generate_inc_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def add_target_data_disks(self, bktype='full'): + """Hot add target disk to VM with qmp monitor""" + for tag in self.params.objects("source_images"): + image_params = self.params.object_params(tag) + img = image_params['full_backup_image'] if bktype == 'full' else image_params['inc_backup_image'] + disk = self.target_disk_define_by_params(self.params, img) + disk.hotplug(self.main_vm) + self.trash.append(disk) + + def do_incremental_backup(self): + extra_options = {"sync": "none", + "disabled_bitmaps": self.disabled_bitmaps, + "wait_job_complete": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.inc_backups, + self.inc_backup_bitmaps, + **extra_options) + self.backup_jobs = [job['id'] + for job in job_utils.query_jobs(self.main_vm)] + + def restart_vm_with_backup_images(self): + """restart vm with back2 as its data disk""" + self.main_vm.destroy() + images = self.params["images"].split()[0] + for obj in self.inc_backup_client_images: + images += ' %s' % obj.tag + self.params['images'] = images + self.prepare_main_vm() + self.clone_vm = self.main_vm + + def clean_images(self): + for img in self.trash: + try: + if hasattr(img, 'remove'): + img.remove() + else: + sp_admin.remove_volume(img) + except Exception as e: + logging.warn(str(e)) + + def rebase_backup_image(self): + """rebase image back2 onto back1""" + for i, img_obj in enumerate(self.inc_backup_client_images): + target_img_obj = self.full_backup_client_images[i] + img_obj.base_image_filename = target_img_obj.image_filename + img_obj.base_format = target_img_obj.image_format + img_obj.base_tag = target_img_obj.tag + img_obj.rebase(img_obj.params) + + def verify_data_files(self): + non_existed_files = {} + disks_info = {} + + # The last file should not exist on back2 + for i, data_img in enumerate(self.src_img_tags): + non_existed_files[data_img] = self.files_info[data_img].pop() + disks_info[data_img] = self.disks_info[data_img] + + # Check md5sum for the first two files + super(BlockdevIncBackupPullModeTest, self).verify_data_files() + + # Check the files should not exist on back2 + session = self.clone_vm.wait_for_login() + try: + for tag, info in six.iteritems(disks_info): + utils_disk.mount(info[0], info[1], session=session) + file_path = "%s/%s" % (info[1], non_existed_files[tag]) + cat_cmd = "cat %s" % file_path + + logging.info('Check %s should not exist' % file_path) + s, o = session.cmd_status_output(cat_cmd) + if s == 0: + self.test.fail('File (%s) exists' % non_existed_files[tag]) + elif 'No such file' not in o.strip(): + self.test.fail('Unknown error: %s' % o) + finally: + if session: + session.close() + + def do_test(self): + self.init_nbd_exports() + self.do_full_backup() + self.export_full_backups() + self.generate_inc_files() + self.full_copyif() + self.cancel_backup_jobs() + self.stop_export_full_backups() + self.add_target_data_disks('inc') + self.do_incremental_backup() + self.export_inc_backups() + self.generate_inc_files() + self.inc_copyif() + self.cancel_backup_jobs() + self.stop_export_inc_backups() + self.rebase_backup_image() + self.restart_vm_with_backup_images() + self.verify_data_files() + + +def run(test, params, env): + """ + Blockdev incremental backup test + + test steps: + 1. boot VM with one data disk + 2. make filesystem on data disk + 3. create file and save its md5sum on data disk + 4. add fleecing disk for full backup to VM via qmp commands + 5. do full backup(sync=none) with bitmap + 6. export the full backup image by internal nbd server + 7. create the 2nd file and save its md5sum on data disk + 8. copy data from nbd image exported in step 6 + into an image, e.g. back1 + 9. cancel full backup job and stop nbd server + 10. add aother fleecing disk for inc backup to VM via qmp commands + 11. do inc backup(sync=none) with another new bitmap + as well as disable the first bitmap + 12. export the inc backup image by internal nbd server + 13. create the 3rd file and save its md5sum on data disk + 14. copy data from nbd image exported in step 12 with + the disabled bitmap into an image, e.g. back2 + 15. cancel inc backup job and stop nbd server + 16. rebase back2 onto back1 + 17. restart vm with back2 as its data image + 18. check md5sum for the first two files on back2, and make sure + the 3rd file doesn't exist + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncBackupPullModeTest(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_resize.py b/qemu/tests/blockdev_inc_backup_resize.py new file mode 100644 index 0000000000000000000000000000000000000000..da50e34854db08cec07d47e8b334bf320c4fab1a --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_resize.py @@ -0,0 +1,130 @@ +from virttest import utils_numeric + +from provider import backup_utils +from provider import blockdev_base +from provider import block_dirty_bitmap + + +class BlockdevIncBackupResizeTest(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncBackupResizeTest, self).__init__(test, params, env) + self.source_images = [] + self.full_backups = [] + self.bitmaps = [] + self.src_img_tags = params.objects("source_images") + self.src_img_sizes = [] + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + image_chain = image_params.objects("image_backup_chain") + self.source_images.append("drive_%s" % tag) + self.full_backups.append("drive_%s" % image_chain[0]) + self.bitmaps.append("bitmap_%s" % tag) + + # Extend or shrink image size based on its original size + self.src_img_sizes.append( + int(float( + utils_numeric.normalize_data_size(image_params['image_size'], + order_magnitude="B"))) + ) + + def do_full_backup(self): + extra_options = {"sync": "full", + "persistent": True, + "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.full_backups, + self.bitmaps, + **extra_options) + + def prepare_data_disk(self, tag): + """ + Override this function, only make fs and mount it + :param tag: image tag + """ + self.format_data_disk(tag) + + def gen_inc_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def check_bitmaps(self, node_name, bitmap_name): + bitmap = block_dirty_bitmap.get_bitmap_by_name(self.main_vm, + node_name, + bitmap_name) + # check if bitmap exists + if bitmap is None: + self.test.fail('Failed to get bitmap') + + # check if bitmap is persistent + if not bitmap['persistent']: + self.test.fail('Bitmap should be persistent') + + def check_image_bitmaps_existed(self): + # make sure persistent bitmaps always exist after VM shutdown + for tag in self.params.objects("source_images"): + disk = self.source_disk_define_by_params(self.params, tag) + out = disk.info() + + if out: + if self.params['check_bitmaps'] not in out: + self.test.fail('Persistent bitmaps should be in image') + else: + self.test.error('Error when querying image info with qemu-img') + + def check_image_size(self, node_name, block_size): + for d in self.main_vm.monitor.cmd("query-block"): + if d['inserted']['node-name'] == node_name: + node = d['inserted']['image'] + break + else: + self.test.error('Error when querying %s with query-block' + % node_name) + + if int(node['virtual-size']) != block_size: + self.test.fail('image size %s != %s after block_resize') + + def block_resize_data_disks(self): + for ratio in self.params.objects('disk_change_ratio'): + for idx, tag in enumerate(self.src_img_tags): + image_params = self.params.object_params(tag) + block_size = int(self.src_img_sizes[idx] * float(ratio)) + args = (None, block_size, self.source_images[idx]) + self.main_vm.monitor.block_resize(*args) + self.check_image_size(self.source_images[idx], block_size) + self.check_bitmaps(self.source_images[idx], self.bitmaps[idx]) + + def do_test(self): + self.do_full_backup() + self.gen_inc_files() + self.main_vm.destroy() + self.prepare_main_vm() + self.block_resize_data_disks() + self.main_vm.destroy() + self.check_image_bitmaps_existed() + + +def run(test, params, env): + """ + block_resize a qcow2 image with persistent bitmap stored on it + + test steps: + 1. boot VM with a 2G data disk + 2. format data disk and mount it + 3. add target disks for backup to VM via qmp commands + 4. do full backup and add persistent bitmap + 5. create another file + 6. keep count of bitmaps, shutdown VM to store dirty maps + 7. start VM, record the count of bitmaps + 8. extend/shrink image size, the count should be the same + 9. shutdown VM to check persistent bitmaps always exist in image + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncBackupResizeTest(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_test.py b/qemu/tests/blockdev_inc_backup_test.py index a00a210a2ccf3b51053ae2d87448ab4430e0196f..092f4f64e11d2f2ea42a767fc23ccbaeac450849 100644 --- a/qemu/tests/blockdev_inc_backup_test.py +++ b/qemu/tests/blockdev_inc_backup_test.py @@ -21,14 +21,16 @@ class BlockdevIncreamentalBackupTest(blockdev_base.BlockdevBaseTest): self.rebase_targets = [] for tag in params.objects('source_images'): image_params = params.object_params(tag) - image_chain = image_params.objects("image_chain") + image_chain = image_params.objects("image_backup_chain") self.source_images.append("drive_%s" % tag) self.full_backups.append("drive_%s" % image_chain[0]) self.inc_backups.append("drive_%s" % image_chain[1]) self.bitmaps.append("bitmap_%s" % tag) inc_img_tag = image_chain[-1] inc_img_params = params.object_params(inc_img_tag) - inc_img_params['image_chain'] = image_params['image_chain'] + + # rebase 'inc' image onto 'base' image, so inc's backing is base + inc_img_params['image_chain'] = image_params['image_backup_chain'] inc_img = self.source_disk_define_by_params( inc_img_params, inc_img_tag) target_func = partial(inc_img.rebase, params=inc_img_params) @@ -94,7 +96,7 @@ class BlockdevIncreamentalBackupTest(blockdev_base.BlockdevBaseTest): clone_params = self.main_vm.params.copy() for tag in self.params.objects("source_images"): img_params = self.params.object_params(tag) - image_chain = img_params.objects('image_chain') + image_chain = img_params.objects('image_backup_chain') images = images.replace(tag, image_chain[-1]) clone_params["images"] = images clone_vm = self.main_vm.clone(params=clone_params) diff --git a/qemu/tests/blockdev_inc_backup_with_guest_agent.py b/qemu/tests/blockdev_inc_backup_with_guest_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4768e7a10f61d7b01741902764e4d5620c752c --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_with_guest_agent.py @@ -0,0 +1,125 @@ +from functools import partial + +from provider import backup_utils +from provider import blockdev_base + +from virttest import utils_misc +from virttest import guest_agent + + +class BlockdevIncbkFSFreezeTest(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncbkFSFreezeTest, self).__init__(test, params, env) + self.source_images = [] + self.full_backups = [] + self.inc_backups = [] + self.inc_backup_tags = [] + self.rebase_funcs = [] + self.bitmaps = [] + self.src_img_tags = params.objects("source_images") + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + image_chain = image_params.objects("image_backup_chain") + self.source_images.append("drive_%s" % tag) + self.full_backups.append("drive_%s" % image_chain[0]) + self.inc_backups.append("drive_%s" % image_chain[1]) + self.inc_backup_tags.append(image_chain[1]) + self.bitmaps.append("bitmap_%s" % tag) + + # rebase 'inc' image onto 'base' image, so inc's backing is base + inc_img_params = self.params.object_params(image_chain[1]) + inc_img_params['image_chain'] = image_params['image_backup_chain'] + inc_img = self.source_disk_define_by_params(inc_img_params, + image_chain[1]) + self.rebase_funcs.append(partial(inc_img.rebase, + params=inc_img_params)) + + def do_full_backup(self): + extra_options = {"sync": "full", "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.full_backups, + self.bitmaps, + **extra_options) + + def generate_inc_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def do_incremental_backup(self): + extra_options = {"sync": "incremental", "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.inc_backups, + self.bitmaps, + **extra_options) + + def restart_vm_with_inc(self): + images = self.params['images'] + self.params['images'] = ' '.join( + [images.split()[0]] + self.inc_backup_tags) + self.prepare_main_vm() + self.clone_vm = self.main_vm + self.params['images'] = images + + def prepare_test(self): + super(BlockdevIncbkFSFreezeTest, self).prepare_test() + params = self.params.object_params(self.params['agent_name']) + params["monitor_filename"] = self.main_vm.get_serial_console_filename( + self.params['agent_name']) + self.guest_agent = guest_agent.QemuAgent( + self.main_vm, self.params['agent_name'], + self.params['agent_serial_type'], params + ) + + # bz1747960, enable virt_qemu_ga_read_nonsecurity_files before freeze, + # if the fix is not backported yet, put SELinux in permissive mode + # no need to restore the setting for a VM reboot can restore it + s = self.main_vm.wait_for_login() + try: + if s.cmd_status(self.params['enable_nonsecurity_files_cmd']) != 0: + s.cmd_status(self.params['enable_permissive_cmd']) + finally: + s.close() + + def rebase_inc_onto_base(self): + return utils_misc.parallel(self.rebase_funcs) + + def do_test(self): + self.do_full_backup() + self.generate_inc_files() + self.guest_agent.fsfreeze() + self.do_incremental_backup() + self.guest_agent.fsthaw() + self.main_vm.destroy() + self.rebase_inc_onto_base() + self.restart_vm_with_inc() + self.verify_data_files() + + +def run(test, params, env): + """ + Do incremental backup with guest-fs-freeze + + test steps: + 1. boot VM with a 2G data disk + 2. format data disk and mount it, create a file + 3. add target disks for backup to VM via qmp commands + 4. do full backup and add non-persistent bitmap + 5. create another file + 6. guest-fsfreeze-freeze + 7. do inc bakcup(sync: incremental) + 8. guest-fsfreeze-thaw + 9. shutdown VM, rebase inc onto base + 10. start VM with inc images, check files' md5 + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncbkFSFreezeTest(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_with_migration.py b/qemu/tests/blockdev_inc_backup_with_migration.py new file mode 100644 index 0000000000000000000000000000000000000000..96ed0d2e1f9b5dd2cf660ef2e3e4b32e4cc612f9 --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_with_migration.py @@ -0,0 +1,154 @@ +import ast + +from functools import partial + +from provider import backup_utils +from provider import blockdev_base +from provider import block_dirty_bitmap + +from virttest import utils_misc + + +class BlockdevIncbkWithMigration(blockdev_base.BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncbkWithMigration, self).__init__(test, params, env) + self.source_images = [] + self.full_backups = [] + self.inc_backups = [] + self.inc_backup_tags = [] + self.bitmaps = [] + self.bitmap_counts = {} + self.rebase_funcs = [] + self.src_img_tags = params.objects("source_images") + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + image_chain = image_params.objects("image_backup_chain") + self.source_images.append("drive_%s" % tag) + self.full_backups.append("drive_%s" % image_chain[0]) + self.inc_backups.append("drive_%s" % image_chain[1]) + self.inc_backup_tags.append(image_chain[1]) + self.bitmaps.append("bitmap_%s" % tag) + self.bitmap_counts["bitmap_%s" % tag] = None + + # rebase 'inc' image onto 'base' image, so inc's backing is base + inc_img_params = self.params.object_params(image_chain[1]) + inc_img_params['image_chain'] = image_params['image_backup_chain'] + inc_img = self.source_disk_define_by_params(inc_img_params, + image_chain[1]) + self.rebase_funcs.append(partial(inc_img.rebase, + params=inc_img_params)) + + # Only hotplug full backup images before full-backup + self.params['image_backup_chain_%s' % tag] = image_chain[0] + + def do_full_backup(self): + extra_options = {"sync": "full", "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.full_backups, + self.bitmaps, + **extra_options) + + def generate_inc_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def do_incremental_backup(self): + extra_options = {"sync": "incremental", "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.inc_backups, + self.bitmaps, + **extra_options) + + def restart_vm_with_inc(self): + images = self.params['images'] + self.params['images'] = ' '.join( + [images.split()[0]] + self.inc_backup_tags) + self.prepare_main_vm() + self.clone_vm = self.main_vm + self.params['images'] = images + + def hotplug_inc_backup_disks(self): + for idx, tag in enumerate(self.src_img_tags): + self.params['image_backup_chain_%s' % + tag] = self.inc_backup_tags[idx] + self.add_target_data_disks() + + def disable_bitmaps(self): + for idx, bitmap in enumerate(self.bitmaps): + # disable function has already checked if the bitmap was disabled + block_dirty_bitmap.block_dirty_bitmap_disable( + self.main_vm, self.source_images[idx], bitmap) + + # record the count of the bitmap + info = block_dirty_bitmap.get_bitmap_by_name( + self.main_vm, self.source_images[idx], bitmap) + self.bitmap_counts[info['name']] = info['count'] + + def get_bitmaps_info(self): + out = [] + for idx, bitmap in enumerate(self.bitmaps): + info = block_dirty_bitmap.get_bitmap_by_name( + self.main_vm, self.source_images[idx], bitmap) + out.append(info) + return out + + def check_bitmaps(self): + for info in self.get_bitmaps_info(): + if info is None: + self.test.fail('Failed to get bitmaps after migration') + if info['status'] != 'disabled': + self.test.fail('Bitmap was not disabled after migration') + if info['count'] != self.bitmap_counts[info['name']]: + self.test.fail('Count of bitmap was changed after migration') + + def migrate_vm(self): + mig_timeout = float(self.params["mig_timeout"]) + mig_protocol = self.params["migration_protocol"] + capabilities = ast.literal_eval(self.params["migrate_capabilities"]) + self.main_vm.migrate(mig_timeout, mig_protocol, + migrate_capabilities=capabilities, env=self.env) + + def rebase_inc_onto_base(self): + return utils_misc.parallel(self.rebase_funcs) + + def do_test(self): + self.do_full_backup() + self.generate_inc_files() + self.disable_bitmaps() + self.migrate_vm() + self.check_bitmaps() + self.hotplug_inc_backup_disks() + self.do_incremental_backup() + self.main_vm.destroy() + self.rebase_inc_onto_base() + self.restart_vm_with_inc() + self.verify_data_files() + + +def run(test, params, env): + """ + Do incremental live backup with bitmap after migrated on shared storage + test steps: + 1. boot VM with a 2G data disk + 2. format data disk and mount it, create a file + 3. add target disks for full backup to VM via qmp commands + 4. do full backup and add non-persistent bitmap + 5. create another file + 6. disable bitmaps + 7. Migrate VM from src to dst, wait till it is finished + 8. add inc backup disks and do inc bakcup(sync: incremental) on dst + 9. shutdown VM on dst + 10. rebase inc images(inc-backup) onto base images(full-backup) + 11. start VM with inc images on dst, check files' md5 + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncbkWithMigration(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_xpt_bitmap.py b/qemu/tests/blockdev_inc_backup_xpt_bitmap.py new file mode 100644 index 0000000000000000000000000000000000000000..4ac5c4694282fba515fe01cd924c87ac38784e79 --- /dev/null +++ b/qemu/tests/blockdev_inc_backup_xpt_bitmap.py @@ -0,0 +1,119 @@ +import json +import socket + +from provider import backup_utils + +from provider.blockdev_base import BlockdevBaseTest +from provider.nbd_image_export import QemuNBDExportImage + +from virttest import utils_misc +from virttest import qemu_storage + +from avocado.utils import process + + +class BlockdevIncBackupXptBitmapTest(BlockdevBaseTest): + + def __init__(self, test, params, env): + super(BlockdevIncBackupXptBitmapTest, self).__init__(test, params, env) + self.source_images = [] + self.full_backups = [] + self.bitmaps = [] + self.nbd_exports = [] + self.nbd_images = [] + self.src_img_tags = params.objects("source_images") + localhost = socket.gethostname() + self.params['nbd_server'] = localhost if localhost else 'localhost' + list(map(self._init_arguments_by_params, self.src_img_tags)) + + def _init_arguments_by_params(self, tag): + image_params = self.params.object_params(tag) + image_chain = image_params.objects("image_backup_chain") + self.source_images.append("drive_%s" % tag) + self.full_backups.append("drive_%s" % image_chain[0]) + self.bitmaps.append("bitmap_%s" % tag) + image_params["nbd_export_bitmap"] = "bitmap_%s" % tag + self.nbd_exports.append(QemuNBDExportImage(image_params, tag)) + self.nbd_images.append( + qemu_storage.QemuImg( + self.params.object_params(image_params['nbd_image_tag']), + None, image_params['nbd_image_tag'] + ) + ) + + def do_full_backup(self): + extra_options = {"sync": "full", + "persistent": True, + "auto_disable_bitmap": False} + backup_utils.blockdev_batch_backup( + self.main_vm, + self.source_images, + self.full_backups, + list(self.bitmaps), + **extra_options) + + def prepare_data_disk(self, tag): + """ + Override this function, only make fs and mount it + :param tag: image tag + """ + self.format_data_disk(tag) + + def gen_inc_files(self): + return list(map(self.generate_data_file, self.src_img_tags)) + + def expose_persistent_bitmaps(self): + for xpt in self.nbd_exports: + xpt.export_image() + + def check_info_from_export_bitmaps(self): + qemu_img = utils_misc.get_qemu_img_binary(self.params) + + for i, nbd_img in enumerate(self.nbd_images): + opts = qemu_storage.filename_to_file_opts(nbd_img.image_filename) + opts[self.params['dirty_bitmap_opt'] + ] = 'qemu:dirty-bitmap:%s' % self.bitmaps[i] + args = "'json:%s'" % json.dumps(opts) + + map_cmd = '{qemu_img} map --output=human {args}'.format( + qemu_img=qemu_img, args=args) + result = process.run(map_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + self.test.fail('Failed to run map command: %s' + % result.stderr.decode()) + if nbd_img.image_filename not in result.stdout_text: + self.test.fail('Failed to get bitmap info.') + + def clean_images(self): + for obj in self.nbd_exports: + obj.stop_export() + super(BlockdevIncBackupXptBitmapTest, self).clean_images() + + def do_test(self): + self.do_full_backup() + self.gen_inc_files() + self.main_vm.destroy() + self.expose_persistent_bitmaps() + self.check_info_from_export_bitmaps() + + +def run(test, params, env): + """ + Expose persistent bitmaps via qemu-nbd + + test steps: + 1. boot VM with a 2G data disk + 2. format data disk and mount it + 3. add target disks for backup to VM via qmp commands + 4. do full backup and add persistent bitmap + 5. create another file + 6. shutdown VM + 7. expose persistent bitmaps with qemu-nbd + 8. qemu-img map can show incremental info from bitmaps + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + inc_test = BlockdevIncBackupXptBitmapTest(test, params, env) + inc_test.run_test() diff --git a/qemu/tests/blockdev_mirror_error.py b/qemu/tests/blockdev_mirror_error.py new file mode 100644 index 0000000000000000000000000000000000000000..0c859528e48309b6f9de541939fa25b579966232 --- /dev/null +++ b/qemu/tests/blockdev_mirror_error.py @@ -0,0 +1,37 @@ +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest +from provider.job_utils import wait_until_job_status_match + + +class BlockdevMirrorErrorTest(BlockdevMirrorNowaitTest): + """Block mirror with error source and target""" + + def check_mirror_job_stopped(self): + tmo = int(self.params.get("mirror_error_stop_timeout", "300")) + status = self.params.get("mirror_error_stop_status", "paused") + for job_id in self._jobs: + wait_until_job_status_match(self.main_vm, status, + job_id, timeout=tmo) + + def do_test(self): + self.blockdev_mirror() + self.check_mirror_job_stopped() + + +def run(test, params, env): + """ + Block mirror with '"on-source-error": "stop", "on-target-error": "stop"' + + test steps: + 1. boot VM with a 2G data disk + 2. format the data disk and mount it + 3. create a file + 4. add a target disk(size = 1G) for mirror to VM via qmp commands + 5. do block-mirror with sync mode full + 6. check the mirror job is stopped + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorErrorTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_firewall.py b/qemu/tests/blockdev_mirror_firewall.py new file mode 100644 index 0000000000000000000000000000000000000000..bfb016c1b1e8b20befcabfd59a09df347e88c9f6 --- /dev/null +++ b/qemu/tests/blockdev_mirror_firewall.py @@ -0,0 +1,125 @@ +import socket + +from avocado.utils import process + +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest +from provider.nbd_image_export import QemuNBDExportImage + + +class BlockdevMirrorFirewallTest(BlockdevMirrorNowaitTest): + """ + Block mirror with firewall test + """ + + def __init__(self, test, params, env): + localhost = socket.gethostname() + params['nbd_server_%s' % params['nbd_image_tag']] = localhost \ + if localhost else 'localhost' + self._offset = None + self._net_down = False + + super(BlockdevMirrorFirewallTest, self).__init__(test, params, env) + + def _create_local_image(self): + image_params = self.params.object_params( + self.params['local_image_tag']) + local_image = self.source_disk_define_by_params( + image_params, self.params['local_image_tag']) + local_image.create(image_params) + self.trash.append(local_image) + + def _export_local_image_with_nbd(self): + self._nbd_export = QemuNBDExportImage(self.params, + self.params["local_image_tag"]) + self._nbd_export.export_image() + + def prepare_test(self): + try: + self._create_local_image() + self._export_local_image_with_nbd() + super(BlockdevMirrorFirewallTest, self).prepare_test() + except Exception: + self.clean_images() + raise + + def add_target_data_disks(self): + tag = self._target_images[0] + devices = self.main_vm.devices.images_define_by_params( + tag, self.params.object_params(tag), 'disk') + devices.pop() # ignore the front end device + + for dev in devices: + ret = self.main_vm.devices.simple_hotplug(dev, + self.main_vm.monitor) + if not ret[1]: + self.test.fail("Failed to hotplug '%s': %s." + % (dev, ret[0])) + + def _run_iptables(self, cmd): + cmd = cmd.format( + s=self.params['nbd_server_%s' % self.params['nbd_image_tag']]) + result = process.run(cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + self.test.error('command error: %s' % result.stderr.decode()) + + def break_net_with_iptables(self): + self._run_iptables(self.params['net_break_cmd']) + self._net_down = True + + def resume_net_with_iptables(self): + self._run_iptables(self.params['net_resume_cmd']) + self._net_down = False + + def clean_images(self): + # recover nbd image access + if self._net_down: + self.resume_net_with_iptables() + + # stop nbd image export + self._nbd_export.stop_export() + + super(BlockdevMirrorFirewallTest, self).clean_images() + + def do_test(self): + self.blockdev_mirror() + self.check_block_jobs_started( + self._jobs, self.params.get_numeric('mirror_started_timeout', 10)) + self.break_net_with_iptables() + self.check_block_jobs_paused( + self._jobs, self.params.get_numeric('mirror_paused_interval', 50)) + self.resume_net_with_iptables() + self.check_block_jobs_running( + self._jobs, self.params.get_numeric('mirror_resmued_timeout', 200)) + self.wait_mirror_jobs_completed() + self.check_mirrored_block_nodes_attached() + self.clone_vm_with_mirrored_images() + self.verify_data_files() + + +def run(test, params, env): + """ + Block mirror with firewall test + + test steps: + 1. boot VM with 2G data disk + 2. format the data disk and mount it + 3. create a file + 4. create a 2G local fs image, exported with qemu-nbd + 5. add the nbd image for mirror to VM via qmp commands + 6. do blockdev-mirror + 7. insert a rule with iptables to drop all packets from + the port to which the nbd image was bound + 8. check mirror paused (offset should not change) + 9. remove the rule with iptables + 10. check mirror job resumed (offset should increase) + 11. wait till mirror job done + 12. check the mirror disk is attached + 13. restart VM with the mirror disk + 14. check the file and its md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorFirewallTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_hotunplug.py b/qemu/tests/blockdev_mirror_hotunplug.py new file mode 100644 index 0000000000000000000000000000000000000000..a2f28fc677f61f3007ca64a048a284be8ceb78b2 --- /dev/null +++ b/qemu/tests/blockdev_mirror_hotunplug.py @@ -0,0 +1,72 @@ +from virttest.qemu_monitor import QMPCmdError + +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest + + +class BlockdevMirrorHotunplugTest(BlockdevMirrorNowaitTest): + """ + Block mirror with hotunplug test + """ + + def hotunplug_frontend_devices(self): + """ + device_del the frontend devices during mirroring, + the devices CAN be removed without any issue + """ + def _device_del(device): + self.main_vm.monitor.cmd('device_del', {'id': device}) + if 'qdev: %s' % device in self.main_vm.monitor.cmd("query-block"): + self.test.fail('Failed to hotunplug the frontend device') + + list(map(_device_del, self._source_images)) + + def hotunplug_format_nodes(self): + """ + blockdev-del the format nodes during mirroring, + the nodes CANNOT be removed for they are busy + """ + def _blockdev_del(node): + try: + self.main_vm.monitor.cmd('blockdev-del', {'node-name': node}) + except QMPCmdError as e: + err = self.params['block_node_busy_error'] % node + if err not in str(e): + self.test.fail('Unexpected error: %s' % str(e)) + else: + self.test.fail('blockdev-del succeeded unexpectedly') + + list(map(_blockdev_del, self._source_nodes)) + + def do_test(self): + self.blockdev_mirror() + self.check_block_jobs_started(self._jobs) + self.hotunplug_frontend_devices() + self.hotunplug_format_nodes() + self.check_block_jobs_running(self._jobs) + self.wait_mirror_jobs_completed() + self.clone_vm_with_mirrored_images() + self.verify_data_files() + + +def run(test, params, env): + """ + Block mirror with hotunplug test + + test steps: + 1. boot VM with a 2G data disk + 2. format the data disk and mount it + 3. create a file + 4. add a target disk for mirror to VM via qmp commands + 5. do block-mirror with sync mode full + 6. hotunplug the frontend device with device_del (OK) + 7. hotunplug the format node with blockdev-del (ERROR) + 8. check mirror continues and wait mirror done + 9. restart VM with the mirror disk + 10. check the file and its md5 + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorHotunplugTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_install.py b/qemu/tests/blockdev_mirror_install.py new file mode 100644 index 0000000000000000000000000000000000000000..d089868be3d01e5c6c4d661ae2c8f2f70d3b31d5 --- /dev/null +++ b/qemu/tests/blockdev_mirror_install.py @@ -0,0 +1,97 @@ +import logging +import random +import re +import time + +from virttest import utils_test +from virttest import utils_misc +from virttest.tests import unattended_install + +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest + + +class BlockdevMirrorVMInstallTest(BlockdevMirrorNowaitTest): + """ + Block mirror test with VM installation + """ + + def _is_install_started(self, start_msg): + # get_output can return None + out = self.main_vm.serial_console.get_output() \ + if self.main_vm.serial_console else None + out = '' if out is None else out + return bool(re.search(start_msg, out, re.M)) + + def _install_vm_in_background(self): + """Install VM in background""" + self.main_vm = self.env.get_vm(self.params["main_vm"]) + args = (self.test, self.params, self.env) + self._bg = utils_test.BackgroundTest(unattended_install.run, args) + self._bg.start() + + logging.info("Wait till '%s'" % self.params["tag_for_install_start"]) + if utils_misc.wait_for( + lambda: self._is_install_started( + self.params["tag_for_install_start"]), + int(self.params.get("timeout_for_install_start", 360)), 10, 5): + logging.info("Sleep some time before block-mirror") + time.sleep(random.randint(10, 120)) + else: + self.test.fail("Failed to start VM installation") + + def _wait_installation_done(self): + # Installation on remote storage may take too much time, + # we keep the same timeout with the default used in VT + self._bg.join( + timeout=int(self.params.get("install_timeout", 4800))) + if self._bg.is_alive(): + self.test.fail("VM installation timed out") + + def _check_clone_vm_login(self): + """Make sure the VM can be well accessed""" + session = self.clone_vm.wait_for_login() + session.close() + + def prepare_test(self): + self._install_vm_in_background() + self.add_target_data_disks() + + def clone_vm_with_mirrored_images(self): + # Disable installation settings + cdrom = self.main_vm.params.objects("cdroms")[0] + self.main_vm.params["cdroms"] = cdrom + self.main_vm.params["boot_once"] = "c" + for opt in ["cdrom_%s" % cdrom, "boot_path", + "kernel_params", "kernel", "initrd"]: + self.main_vm.params[opt] = "" + + super(BlockdevMirrorVMInstallTest, + self).clone_vm_with_mirrored_images() + + def do_test(self): + self.blockdev_mirror() + self._wait_installation_done() + self.wait_mirror_jobs_completed() + self.check_mirrored_block_nodes_attached() + self.clone_vm_with_mirrored_images() + self._check_clone_vm_login() + + +def run(test, params, env): + """ + Block mirror with VM installation + + test steps: + 1. Install VM + 2. add a target disk for mirror to VM via qmp commands + 3. do block-mirror + 4. check the mirror disk is attached + 5. restart VM with the mirror disk + 6. log into VM to make sure VM can be accessed + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorVMInstallTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_multiple_blocks.py b/qemu/tests/blockdev_mirror_multiple_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..6e2e1a09e0dc0bd721f26ad88d6c6d3c60d746bc --- /dev/null +++ b/qemu/tests/blockdev_mirror_multiple_blocks.py @@ -0,0 +1,50 @@ +import re + +from virttest import utils_disk +from virttest import utils_misc + +from provider.blockdev_mirror_parallel import BlockdevMirrorParallelTest + + +class BlockdevMirrorMultipleBlocksTest(BlockdevMirrorParallelTest): + """do block-mirror for multiple disks in parallel""" + + def _get_data_disk_info(self, tag, session): + """Get the disk id and size by serial or wwn in linux""" + disk_params = self.params.object_params(tag) + extra_params = disk_params["blk_extra_params"] + drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) + drive_path = utils_misc.get_linux_drive_path(session, drive_id) + return drive_path[5:], disk_params["image_size"] + + def format_data_disk(self, tag): + session = self.main_vm.wait_for_login() + try: + disk_id, disk_size = self._get_data_disk_info(tag, session) + mnt = utils_disk.configure_empty_linux_disk(session, + disk_id, + disk_size)[0] + self.disks_info[tag] = ["/dev/%s1" % disk_id, mnt] + finally: + session.close() + + +def run(test, params, env): + """ + Multiple block mirror simultaneously + + test steps: + 1. boot VM with two 2G data disks + 2. format data disks and mount it + 3. create a file on both disks + 4. add target disks for mirror to VM via qmp commands + 4. do block-mirror for both disks in parallel + 5. check mirrored disks are attached + 6. restart VM with mirrored disks, check files and md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorMultipleBlocksTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_readonly.py b/qemu/tests/blockdev_mirror_readonly.py new file mode 100644 index 0000000000000000000000000000000000000000..60891cd08a2fed2bc47c1afe8e27fbc34cf08dee --- /dev/null +++ b/qemu/tests/blockdev_mirror_readonly.py @@ -0,0 +1,50 @@ +import re + +from aexpect import ShellCmdError + +from provider.blockdev_mirror_wait import BlockdevMirrorWaitTest + + +class BlockdevMirrorReadonlyDeviceTest(BlockdevMirrorWaitTest): + """ + Block mirror on readonly device test + """ + + def prepare_test(self): + self.preprocess_data_disks() + self.prepare_main_vm() + self.add_target_data_disks() + + def check_mirrored_block_nodes_readonly(self): + for tag in self.params.objects("source_images"): + try: + self.format_data_disk(tag) + except ShellCmdError as e: + if not re.search(self.params['error_msg'], str(e), re.M): + self.test.fail("Unexpected disk format error: %s" % str(e)) + else: + self.test.fail("Unexpected disk format success") + + def do_test(self): + self.blockdev_mirror() + self.check_mirrored_block_nodes_attached() + self.check_mirrored_block_nodes_readonly() + + +def run(test, params, env): + """ + Block mirror on readonly device test + + test steps: + 1. boot VM with a 2G data disk(readonly=on) + 2. add a target disk for mirror to VM via qmp commands + 3. do full block-mirror + 4. check the mirror disk is attached + 5. check the mirror disk is readonly + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorReadonlyDeviceTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_ready_vm_down.py b/qemu/tests/blockdev_mirror_ready_vm_down.py new file mode 100644 index 0000000000000000000000000000000000000000..36f7b312ccff74f5287b11d168da8749f04fa571 --- /dev/null +++ b/qemu/tests/blockdev_mirror_ready_vm_down.py @@ -0,0 +1,67 @@ +import time + +from provider import job_utils + +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest + + +class BlockdevMirrorReadyVMDownTest(BlockdevMirrorNowaitTest): + """ + VM poweroff when mirror job is ready + """ + + def poweroff_vm(self): + self.main_vm.monitor.system_powerdown() + + def wait_mirror_jobs_ready(self): + def _wait_mirror_job_ready(jobid): + tmo = self.params.get_numeric('job_ready_timeout', 600) + job_utils.wait_until_job_status_match(self.main_vm, 'ready', + jobid, tmo) + list(map(_wait_mirror_job_ready, self._jobs)) + + def wait_mirror_jobs_auto_completed(self): + """job completed automatically after vm poweroff""" + def _wait_mirror_job_completed(jobid): + tmo = self.params.get_numeric('job_completed_timeout', 200) + for i in range(tmo): + events = self.main_vm.monitor.get_events() + completed_events = [e for e in events if e.get( + 'event') == job_utils.BLOCK_JOB_COMPLETED_EVENT] + job_events = [e for e in completed_events if e.get( + 'data') and jobid in (e['data'].get('id'), e['data'].get('device'))] + if job_events: + break + time.sleep(1) + else: + self.test.fail('job complete event never received in %s' % tmo) + + list(map(_wait_mirror_job_completed, self._jobs)) + + def do_test(self): + self.blockdev_mirror() + self.wait_mirror_jobs_ready() + self.poweroff_vm() + self.wait_mirror_jobs_auto_completed() + + +def run(test, params, env): + """ + VM poweroff when mirror job is ready + + test steps: + 1. boot VM with 2G data disk + 2. format the data disk and mount it + 3. create a file + 4. add a local fs image for mirror to VM via qmp commands + 5. do blockdev-mirror + 6. wait till mirror job is ready + 7. poweroff vm + 8. check mirror job completed + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorReadyVMDownTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_simple.py b/qemu/tests/blockdev_mirror_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..113ebcd1a03d82a48074c43c66b362dcc2044752 --- /dev/null +++ b/qemu/tests/blockdev_mirror_simple.py @@ -0,0 +1,57 @@ +import random + +from virttest import utils_numeric + +from provider.blockdev_mirror_wait import BlockdevMirrorWaitTest + + +class BlockdevMirrorSimpleTest(BlockdevMirrorWaitTest): + """ + Block mirror simple test: + granularity, buf-size + """ + + def __init__(self, test, params, env): + self._set_granularity(params) + self._set_bufsize(params) + super(BlockdevMirrorSimpleTest, self).__init__(test, params, env) + + def _set_granularity(self, params): + granularities = params.objects("granularity_list") + granularity = random.choice( + granularities) if granularities else params.get("granularity") + + if granularity: + params["granularity"] = int( + utils_numeric.normalize_data_size(granularity, "B") + ) + + def _set_bufsize(self, params): + factors = params.objects("buf_size_factor_list") + if factors: + params["buf-size"] = int( + random.choice(factors) + ) * params["granularity"] + + +def run(test, params, env): + """ + Block mirror granularity test + + test steps: + 1. boot VM with a 2G data disk + 2. format the data disk and mount it + 3. create a file + 4. add a target disk for mirror to VM via qmp commands + 5. do block-mirror with some options: + granularity/buf-size + 6. check the mirror disk is attached + 7. restart VM with the mirror disk + 8. check the file and its md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorSimpleTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_speed.py b/qemu/tests/blockdev_mirror_speed.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c788695151125f6b94491a9912e80d1b3b2846 --- /dev/null +++ b/qemu/tests/blockdev_mirror_speed.py @@ -0,0 +1,87 @@ +from functools import partial + +from virttest.qemu_monitor import QMPCmdError + +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest + + +class BlockdevMirrorSpeedTest(BlockdevMirrorNowaitTest): + """ + blockdev-mirror speed test + """ + + def test_invalid_speeds(self): + """ + Set an invalid speed, make sure we can get the proper error message + """ + def _set_invalid_speed(jobid, speed, error_msg): + try: + self.main_vm.monitor.cmd( + "block-job-set-speed", {'device': jobid, 'speed': speed}) + except QMPCmdError as e: + if error_msg not in str(e): + self.test.fail('Unexpected error: %s' % str(e)) + else: + self.test.fail('block-job-set-speed %s succeeded unexpectedly' + % speed) + + def _invalid_speed_error_tuple(speed): + if '-' in speed: # a negative int + return int(speed), self.params['error_msg_negative'] + elif '.' in speed: # a float number + return float(speed), self.params['error_msg'] + else: # a string + return speed, self.params['error_msg'] + + for speed in self.params.objects('invalid_speeds'): + s, m = _invalid_speed_error_tuple(speed) + func = partial(_set_invalid_speed, speed=s, error_msg=m) + list(map(func, self._jobs)) + + def test_valid_speeds(self): + """ + Set a valid speed, make sure mirror job can go on without any issue + """ + def _set_valid_speed(jobid, speed): + self.main_vm.monitor.cmd( + "block-job-set-speed", {'device': jobid, 'speed': speed}) + + for speed in self.params.objects('valid_speeds'): + func = partial(_set_valid_speed, speed=int(speed)) + list(map(func, self._jobs)) + self.check_block_jobs_running( + self._jobs, + self.params.get_numeric('mirror_running_timeout', 60) + ) + + def do_test(self): + self.blockdev_mirror() + self.check_block_jobs_started( + self._jobs, self.params.get_numeric('mirror_started_timeout', 10)) + self.test_invalid_speeds() + self.test_valid_speeds() + self.wait_mirror_jobs_completed() + self.check_mirrored_block_nodes_attached() + + +def run(test, params, env): + """ + blockdev-mirror speed test + + test steps: + 1. boot VM with 2G data disk + 2. format the data disk and mount it + 3. create a file + 4. add a local fs image for mirror to VM via qmp commands + 5. do blockdev-mirror + 6. set an invalid speed, check error msg + 7. set a valid speed, check mirror job is running + 8. wait till mirror job completed + 9. check mirror nodes attached + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorSpeedTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_stress.py b/qemu/tests/blockdev_mirror_stress.py new file mode 100644 index 0000000000000000000000000000000000000000..0d3d7edf9b465a1902fd634c66ca7d8c22b76d44 --- /dev/null +++ b/qemu/tests/blockdev_mirror_stress.py @@ -0,0 +1,46 @@ +from virttest import utils_test + +from provider.blockdev_mirror_parallel import BlockdevMirrorParallelTest + + +class BlockdevMirrorStressTest(BlockdevMirrorParallelTest): + """Do block-mirror and vm stress test in parallel""" + + def stress_test(self): + """Run stress testing on vm""" + self.stress = utils_test.VMStress(self.main_vm, "stress", self.params) + self.stress.load_stress_tool() + + def check_stress_running(self): + """stress should be running after block-mirror""" + if not self.stress.app_running(): + self.test.fail("stress stopped unexpectedly") + + def do_test(self): + self.blockdev_mirror() + self.check_stress_running() + self.check_mirrored_block_nodes_attached() + self.clone_vm_with_mirrored_images() + self.verify_data_files() + self.remove_files_from_system_image() + + +def run(test, params, env): + """ + Basic block mirror test with stress -- only system disk + + test steps: + 1. boot VM + 2. create a file on system disk + 3. add a target disk for mirror to VM via qmp commands + 4. do block-mirror for system disk and vm stress test in parallel + 5. check the mirrored disk is attached + 6. check stress is still running + 7. restart VM with the mirrored disk, check the file and md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorStressTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_sync_none.py b/qemu/tests/blockdev_mirror_sync_none.py new file mode 100644 index 0000000000000000000000000000000000000000..7fcbd988521de46b1139652b93685f7e247f0dd9 --- /dev/null +++ b/qemu/tests/blockdev_mirror_sync_none.py @@ -0,0 +1,87 @@ +import time + +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest + + +class BlockdevMirrorSyncModeNoneTest(BlockdevMirrorNowaitTest): + """Block mirror with sync mode:none""" + + def _verify_file_not_exist(self, dir_list, none_existed_files): + session = self.clone_vm.wait_for_login() + try: + for idx, f in enumerate(none_existed_files): + file_path = "%s/%s" % (dir_list[idx], f) + cat_cmd = "ls %s" % file_path + + s, o = session.cmd_status_output(cat_cmd) + if s == 0: + self.test.fail('File (%s) exists' % f) + elif 'No such file' not in o.strip(): + self.test.fail('Unknown error: %s' % o) + finally: + session.close() + + def verify_data_files(self): + dir_list = [self.disks_info[t][1] for t in self._source_images] + none_existed_files = [self.files_info[t].pop( + 0) for t in self._source_images] + + # the second file should exist + super(BlockdevMirrorSyncModeNoneTest, self).verify_data_files() + + # the first file should not exist + self._verify_file_not_exist(dir_list, none_existed_files) + + def generate_inc_files(self): + return list(map(self.generate_data_file, self._source_images)) + + def wait_mirror_jobs_completed(self): + # Sleep some time here to wait for block-mirror done, please be noted + # that block-mirror with sync mode "none" is quite different from + # others, the job status turns into 'READY' very quickly after a new + # file is created, and sometimes, the current-progress and + # total-progress are same, but in fact, the mirror is still running. + # This is expected. + time.sleep(int(self.params.get("sync_none_mirror_timeout", "20"))) + super(BlockdevMirrorSyncModeNoneTest, + self).wait_mirror_jobs_completed() + + def reboot_vm(self): + """ + Reboot VM to make sure the data is flushed to disk, then data + generated after block-mirror is copied. + Note: 'dd oflag=direct/sync' cannot guarantee data is flushed. + """ + self.main_vm.reboot(method="system_reset") + + def do_test(self): + self.reboot_vm() + self.blockdev_mirror() + self.generate_inc_files() + self.wait_mirror_jobs_completed() + self.check_mirrored_block_nodes_attached() + self.clone_vm_with_mirrored_images() + self.verify_data_files() + + +def run(test, params, env): + """ + Block mirror with sync mode:none + + test steps: + 1. boot VM with a 2G data disk + 2. format the data disk and mount it + 3. create a file + 4. add a target disk for mirror to VM via qmp commands + 5. do block-mirror with sync mode none + 6. create another file + 7. check the mirror disk is attached + 8. restart VM with the mirror disk + 9. the first file doesn't exist while the second one exists + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorSyncModeNoneTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_sync_top.py b/qemu/tests/blockdev_mirror_sync_top.py new file mode 100644 index 0000000000000000000000000000000000000000..17dc3208034aae45aa4c07dd6eb6f89ecb592eab --- /dev/null +++ b/qemu/tests/blockdev_mirror_sync_top.py @@ -0,0 +1,175 @@ +from virttest.qemu_devices.qdevices import QBlockdevFormatNode + +from provider import backup_utils + +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest + + +class BlockdevMirrorSyncTopTest(BlockdevMirrorNowaitTest): + """ + Block mirror test with sync mode top + """ + + def __init__(self, test, params, env): + super(BlockdevMirrorSyncTopTest, self).__init__(test, params, env) + + # convert source images to convert images + self._convert_images = params.objects("convert_images") + self._convert_nodes = ["drive_%s" % + src for src in self._convert_images] + + # mirror snapshot images of source images to target images + self._snap_images = params.objects("snap_images") + self._snap_nodes = ["drive_%s" % src for src in self._snap_images] + + def _create_images(self, images): + for tag in images: + disk = self.source_disk_define_by_params(self.params, tag) + disk.create(self.params) + self.trash.append(disk) + + def create_convert_images(self): + """create convert images used for converting source images""" + self._create_images(self._convert_images) + + def create_snapshot_images(self): + """create snapshot images of data images""" + self._create_images(self._snap_images) + + def _blockdev_add_images(self, images, is_backing_null=False): + for tag in images: + params = self.params.object_params(tag) + devices = self.main_vm.devices.images_define_by_params(tag, + params, + 'disk') + devices.pop() + for dev in devices: + if self.main_vm.devices.get_by_qid(dev.get_qid()): + continue + if isinstance(dev, QBlockdevFormatNode) and is_backing_null: + dev.params["backing"] = None + ret = self.main_vm.devices.simple_hotplug(dev, + self.main_vm.monitor) + if not ret[1]: + self.test.fail("Failed to hotplug '%s': %s." + % (dev, ret[0])) + + def add_convert_images(self): + """blockdev-add convert images: protocol and format nodes only""" + self._blockdev_add_images(self._convert_images) + + def add_snapshot_images(self): + """blockdev-add snapshot images: protocol and format nodes only""" + self._blockdev_add_images(self._snap_images, True) + + def add_mirror_images(self): + """add mirror images where the snapshot images are mirrored""" + for tag in self._target_images: + disk = self.target_disk_define_by_params( + self.params.object_params(tag), tag) + + # overlay must not have a current backing file, + # achieved by passing "backing": null to blockdev-add + disk.format.params["backing"] = None + + disk.hotplug(self.main_vm) + self.trash.append(disk) + + def mirror_data_snapshots_to_mirror_images(self): + """mirror snapshot images to the mirror images""" + args = {"sync": "top"} + for idx, source_node in enumerate(self._snap_nodes): + self._jobs.append( + backup_utils.blockdev_mirror_nowait( + self.main_vm, source_node, + self._target_nodes[idx], + **args + ) + ) + + def _blockdev_snapshot(self, nodes, overlays): + snapshot_options = {} + for idx, source_node in enumerate(nodes): + backup_utils.blockdev_snapshot(self.main_vm, source_node, + overlays[idx], **snapshot_options) + + def take_snapshot_on_data_images(self): + """snapshot, node: data image node, overlay: snapshot nodes""" + self._blockdev_snapshot(self._source_nodes, self._snap_nodes) + + def take_snapshot_on_convert_images(self): + """snapshot, node: convert image node, overlay: mirror nodes""" + self._blockdev_snapshot(self._convert_nodes, self._target_nodes) + + def generate_inc_files(self): + return list(map(self.generate_data_file, self._source_images)) + + def convert_data_images(self): + """convert data images to the convert images""" + for idx, tag in enumerate(self._source_images): + convert_target = self._convert_images[idx] + convert_params = self.params.object_params(convert_target) + convert_params["convert_target"] = convert_target + img_obj = self.source_disk_define_by_params(self.params, tag) + img_obj.convert(convert_params, img_obj.root_dir) + + def prepare_test(self): + self.preprocess_data_disks() + self.prepare_main_vm() + self.prepare_data_disks() + self.create_snapshot_images() + self.add_snapshot_images() + + def do_test(self): + self.take_snapshot_on_data_images() + self.generate_inc_files() + self.create_convert_images() + self.add_mirror_images() + self.mirror_data_snapshots_to_mirror_images() + self.convert_data_images() + self.add_convert_images() + self.take_snapshot_on_convert_images() + self.wait_mirror_jobs_completed() + self.check_mirrored_block_nodes_attached() + self.clone_vm_with_mirrored_images() + self.verify_data_files() + + +def run(test, params, env): + """ + Block mirror test with sync mode top + + images: data1, data1sn, convert1, convert1sn + operations: , , + + + data1 ---> data1sn + | | + + | | + convert1 ---> convert1sn + + test steps: + 1. boot VM with a 2G data disk + 2. format the data disk and mount it, create a file + 3. add a snapshot image(backing-file: data image), + whose backing node is null + 4. take snapshot, node: data disk node, overlay: snapshot image node + 5. generate a new file on data disk + 6. create a convert image + 7. hotplug a mirror image(backing-file: convert image), + whose backing node is null + 8. mirror snapshot image to mirror image + 9. convert data image to convert image + 10. blockdev-add the convert image + 11. take snapshot, node: convert image node, overlay: mirror image node + 12. wait blockdev-mirror done + 13. restart VM with the mirror image + 14. check both files and md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorSyncTopTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_vm_reboot.py b/qemu/tests/blockdev_mirror_vm_reboot.py new file mode 100644 index 0000000000000000000000000000000000000000..54a812790ca85970c9a5a0dda18875a880040792 --- /dev/null +++ b/qemu/tests/blockdev_mirror_vm_reboot.py @@ -0,0 +1,29 @@ +from provider.blockdev_mirror_parallel import BlockdevMirrorParallelTest + + +class BlockdevMirrorVMRebootTest(BlockdevMirrorParallelTest): + """do block-mirror and vm stop/cont in parallel""" + + def reboot_vm(self): + """Reboot VM with qmp command""" + self.main_vm.reboot(method="system_reset") + + +def run(test, params, env): + """ + Basic block mirror during vm reboot -- only system disk + + test steps: + 1. boot VM + 2. create a file on system disk + 3. add a target disk for mirror to VM via qmp commands + 4. do block-mirror for system disk and vm reboot in parallel + 5. check the mirrored disk is attached + 6. restart VM with the mirrored disk, check the file and md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorVMRebootTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_mirror_vm_stop_cont.py b/qemu/tests/blockdev_mirror_vm_stop_cont.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf93a733f62a285534d0712208ec0d81dc96c12 --- /dev/null +++ b/qemu/tests/blockdev_mirror_vm_stop_cont.py @@ -0,0 +1,35 @@ +import time +import random + +from provider.blockdev_mirror_parallel import BlockdevMirrorParallelTest + + +class BlockdevMirrorVMStopContTest(BlockdevMirrorParallelTest): + """do block-mirror and vm stop/cont in parallel""" + + def stop_cont_vm(self): + """Stop VM for a while, then resume it""" + self.main_vm.pause() + t = int(random.choice(self.params.objects('vm_stop_time_list'))) + time.sleep(t) + self.main_vm.resume() + + +def run(test, params, env): + """ + Basic block mirror during vm stop_cont -- only system disk + + test steps: + 1. boot VM + 2. create a file on system disk + 3. add a target disk for mirror to VM via qmp commands + 4. do block-mirror for system disk and vm stop/continue in parallel + 5. check the mirrored disk is attached + 6. restart VM with the mirrored disk, check the file and md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + mirror_test = BlockdevMirrorVMStopContTest(test, params, env) + mirror_test.run_test() diff --git a/qemu/tests/blockdev_snapshot_install.py b/qemu/tests/blockdev_snapshot_install.py new file mode 100644 index 0000000000000000000000000000000000000000..a6f09a98b9633c663af7b59be693e5349fceea04 --- /dev/null +++ b/qemu/tests/blockdev_snapshot_install.py @@ -0,0 +1,59 @@ +import time +import logging +import random +import re + +from virttest import utils_test +from virttest import utils_misc +from virttest.tests import unattended_install + +from provider.blockdev_snapshot_base import BlockDevSnapshotTest + + +def run(test, params, env): + """ + Backup VM disk test when VM reboot + + 1) Install guest + 2) Do snapshot during guest + 3) Rebase snapshot to base after installation finished + 4) Start guest with snapshot + :param test: test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def tag_for_install(vm, tag): + if vm.serial_console: + serial_output = vm.serial_console.get_output() + if serial_output and re.search(tag, serial_output, re.M): + return True + logging.info("VM has not started yet") + return False + + base_image = params.get("images", "image1").split()[0] + params.update( + {"image_format_%s" % base_image: params["image_format"]}) + snapshot_test = BlockDevSnapshotTest(test, params, env) + args = (test, params, env) + bg = utils_test.BackgroundTest(unattended_install.run, args) + bg.start() + if bg.is_alive(): + tag = params["tag_for_install_start"] + if utils_misc.wait_for(lambda: tag_for_install(snapshot_test.main_vm, tag), 120, 10, 5): + logging.info("sleep random time before do snapshots") + time.sleep(random.randint(120, 600)) + snapshot_test.pre_test() + try: + snapshot_test.create_snapshot() + try: + bg.join(timeout=1200) + except Exception: + raise + snapshot_test.verify_snapshot() + snapshot_test.clone_vm.wait_for_login() + finally: + snapshot_test.post_test() + else: + test.fail("Failed to install guest") + else: + test.fail("Background process:installation not started") diff --git a/qemu/tests/blockdev_stream_install.py b/qemu/tests/blockdev_stream_install.py new file mode 100644 index 0000000000000000000000000000000000000000..61011d468e8a532850dda0dafc8f4d8e3074f578 --- /dev/null +++ b/qemu/tests/blockdev_stream_install.py @@ -0,0 +1,96 @@ +import logging +import random +import re +import time + +from virttest import utils_test +from virttest import utils_misc +from virttest.tests import unattended_install + +from provider.blockdev_stream_nowait import BlockdevStreamNowaitTest + + +class BlockdevStreamVMInstallTest(BlockdevStreamNowaitTest): + """ + Block stream test with VM installation + """ + + def _is_install_started(self, start_msg): + out = self.main_vm.serial_console.get_output() \ + if self.main_vm.serial_console else None + out = '' if out is None else out + return bool(re.search(start_msg, out, re.M)) + + def _install_vm_in_background(self): + """Install VM in background""" + self.main_vm = self.env.get_vm(self.params["main_vm"]) + args = (self.test, self.params, self.env) + self._bg = utils_test.BackgroundTest(unattended_install.run, args) + self._bg.start() + + logging.info("Wait till '%s'" % self.params["tag_for_install_start"]) + if utils_misc.wait_for( + lambda: self._is_install_started( + self.params["tag_for_install_start"]), + int(self.params.get("timeout_for_install_start", 360)), 10, 5): + logging.info("Sleep some time before block-stream") + time.sleep(random.randint(10, 120)) + else: + self.test.fail("Failed to start VM installation") + + def _wait_installation_done(self): + # Keep the same timeout with the default used in VT + self._bg.join( + timeout=int(self.params.get("install_timeout", 4800))) + if self._bg.is_alive(): + self.test.fail("VM installation timed out") + + def _check_clone_vm_login(self): + """Make sure the VM can be well accessed""" + session = self.clone_vm.wait_for_login() + session.close() + + def pre_test(self): + self._install_vm_in_background() + self.prepare_snapshot_file() + + def _clone_vm_with_snapshot_image(self): + if self.main_vm.is_alive(): + self.main_vm.destroy() + + # Disable installation settings + cdrom = self.main_vm.params.objects("cdroms")[0] + self.clone_vm.params["cdroms"] = cdrom + self.clone_vm.params["boot_once"] = "c" + for opt in ["cdrom_%s" % cdrom, "boot_path", + "kernel_params", "kernel", "initrd"]: + self.clone_vm.params[opt] = "" + + self.clone_vm.create() + self.clone_vm.verify_alive() + + def do_test(self): + self.snapshot_test() + self.blockdev_stream() + self._wait_installation_done() + self._clone_vm_with_snapshot_image() + self._check_clone_vm_login() + + +def run(test, params, env): + """ + Block stream with VM installation + test steps: + 1. Install VM on system image + 2. add a snapshot image for system image + 3. take snapshot on system image + 4. do block-stream + 5. wait till stream and installation done + 6. restart VM with the snapshot disk + 7. log into the VM + :param test: test object + :param params: test configuration dict + :param env: env object + """ + stream_test = BlockdevStreamVMInstallTest(test, params, env) + stream_test.run_test() diff --git a/qemu/tests/blockdev_stream_multiple_blocks.py b/qemu/tests/blockdev_stream_multiple_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..db076045d19a925e5091787b1d7600ae2e032c2e --- /dev/null +++ b/qemu/tests/blockdev_stream_multiple_blocks.py @@ -0,0 +1,113 @@ +import re +import logging + +from virttest import utils_disk +from virttest import utils_misc + +from provider import backup_utils + +from provider.blockdev_base import BlockdevBaseTest +from provider.blockdev_stream_parallel import BlockdevStreamParallelTest + + +class BlockdevStreamMultipleBlocksTest(BlockdevStreamParallelTest, + BlockdevBaseTest): + """Do block-stream for multiple disks in parallel""" + + def __init__(self, test, params, env): + super(BlockdevStreamMultipleBlocksTest, self).__init__(test, + params, + env) + self._source_images = self.params.objects("source_images") + self._snapshot_images = self.params.objects("snapshot_images") + self.disks_info = {} # tag, [dev, mount_point] + self.files_info = {} # tag, [file, file...] + self.trash = [] + + def _get_data_disk_info(self, tag, session): + """Get the disk id and size by serial or wwn in linux""" + disk_params = self.params.object_params(tag) + extra_params = disk_params["blk_extra_params"] + drive_id = re.search(r"(serial|wwn)=(\w+)", + extra_params, re.M).group(2) + drive_path = utils_misc.get_linux_drive_path(session, drive_id) + return drive_path[5:], disk_params["image_size"] + + def format_data_disk(self, tag): + session = self.main_vm.wait_for_login() + try: + disk_id, disk_size = self._get_data_disk_info(tag, session) + mnt = utils_disk.configure_empty_linux_disk(session, + disk_id, + disk_size)[0] + self.disks_info[tag] = ["/dev/%s1" % disk_id, mnt] + finally: + session.close() + + def generate_inc_files(self): + """create another file on data disks""" + for tag in self._source_images: + self.generate_data_file(tag) + + def do_block_stream_on_another_image(self): + """block-stream on another image""" + arguments = {} + device = "drive_%s" % self.params.objects("snapshot_images")[-1] + backup_utils.blockdev_stream(self.main_vm, device, **arguments) + + def pre_test(self): + self.prepare_data_disks() + self.add_target_data_disks() + + def clone_vm_with_snapshots(self): + """clone vm with snapshots instead of the original data images""" + if self.main_vm.is_alive(): + self.main_vm.destroy() + + self.clone_vm.params["images"] = " ".join( + [self.clone_vm.params.objects("images")[0]] + self._snapshot_images + ) + self.clone_vm.create() + + def create_snapshots(self): + for idx, source in enumerate(self._source_images): + backup_utils.blockdev_snapshot( + self.main_vm, + "drive_%s" % source, + "drive_%s" % self._snapshot_images[idx] + ) + + def post_test(self): + try: + self.clone_vm.destroy() + self.clean_images() + except Exception as error: + logging.error(str(error)) + + def do_test(self): + self.create_snapshots() + self.generate_inc_files() + self.blockdev_stream() + self.clone_vm_with_snapshots() + self.verify_data_files() + + +def run(test, params, env): + """ + Do block-stream for multiple blocks simultaneously + + test steps: + 1. boot VM with two 2G data disks + 2. format data disks and mount it + 3. create a file on both disks + 4. add snapshot images for both data disks + 5. create another file on both disks + 6. do block-stream for both disks in parallel + 7. restart VM with snapshot disks, check all files and md5sum + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + stream_test = BlockdevStreamMultipleBlocksTest(test, params, env) + stream_test.run_test() diff --git a/qemu/tests/blockdev_stream_stress.py b/qemu/tests/blockdev_stream_stress.py new file mode 100644 index 0000000000000000000000000000000000000000..a2bd908142866c2c7e44dedb6e2801c8a4028c0d --- /dev/null +++ b/qemu/tests/blockdev_stream_stress.py @@ -0,0 +1,55 @@ +from virttest import utils_test + +from provider.blockdev_stream_base import BlockDevStreamTest + + +class BlockdevStreamStressTest(BlockDevStreamTest): + """Do block-stream with vm stress test""" + + def _run_stress_test(self): + """Run stress test before block-stream""" + self.stress = utils_test.VMStress(self.main_vm, "stress", self.params) + self.stress.load_stress_tool() + + def check_stress_running(self): + """stress should be running after block-stream""" + if not self.stress.app_running(): + self.test.fail("stress stopped unexpectedly") + + def pre_test(self): + super(BlockdevStreamStressTest, self).pre_test() + self._run_stress_test() + + def do_test(self): + self.snapshot_test() + self.blockdev_stream() + self.check_stress_running() + self.check_backing_file() + self.clone_vm.create() + self.verify_data_file() + self.clone_vm.destroy() + self.remove_files_from_system_image() + + +def run(test, params, env): + """ + Basic block stream test with stress + + test steps: + 1. boot VM + 2. add a snapshot image for system image + 3. run stress test on VM + 4. create a file on system image + 5. take snapshot for system image + 6. create another file on system image(the active snapshot image) + 7. do block-stream for system image and wait job done + 8. check stress is still running + 9. restart VM with the snapshot image, check both files and md5sum + 10. remove testing files from system image + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + stream_test = BlockdevStreamStressTest(test, params, env) + stream_test.run_test() diff --git a/qemu/tests/blockdev_stream_subchain.py b/qemu/tests/blockdev_stream_subchain.py new file mode 100644 index 0000000000000000000000000000000000000000..678a9889e4f4d96447b570b8913eb27da8311bd7 --- /dev/null +++ b/qemu/tests/blockdev_stream_subchain.py @@ -0,0 +1,150 @@ +import json +import logging + +from virttest import data_dir +from virttest import qemu_storage + +from provider import backup_utils + +from provider.blockdev_stream_base import BlockDevStreamTest +from provider.virt_storage.storage_admin import sp_admin + + +class BlockdevStreamSubChainTest(BlockDevStreamTest): + """Do block-stream based on an existed snapshot in snapshot chain""" + + def __init__(self, test, params, env): + super(BlockdevStreamSubChainTest, self).__init__(test, params, env) + self._snapshot_images = self.params.objects("snapshot_images") + self._base_node_tag = self.params["base_node_tag"] + self._trash = [] + + def snapshot_test(self): + """create one snapshot, create one new file""" + self.generate_tempfile(self.disks_info[0][1], + filename="base", + size=self.params["tempfile_size"]) + + # data->sn1->sn2->sn3 + chain = [self.base_tag] + self._snapshot_images + for idx in range(1, len(chain)): + backup_utils.blockdev_snapshot( + self.main_vm, + "drive_%s" % chain[idx-1], + "drive_%s" % chain[idx] + ) + + self.generate_tempfile(self.disks_info[0][1], + filename=chain[idx], + size=self.params["tempfile_size"]) + + def _disk_define_by_params(self, tag): + params = self.params.copy() + params.setdefault("target_path", data_dir.get_data_dir()) + return sp_admin.volume_define_by_params(tag, params) + + def prepare_snapshot_file(self): + """hotplug all snapshot images""" + for tag in self._snapshot_images: + disk = self._disk_define_by_params(tag) + disk.hotplug(self.main_vm) + self._trash.append(disk) + + def _remove_images(self): + for img in self._trash: + sp_admin.remove_volume(img) + + def post_test(self): + try: + if self.clone_vm.is_alive(): + self.clone_vm.destroy() + self._remove_images() + except Exception as e: + logging.warning(str(e)) + + def _is_same_file(self, file_params, file_opts): + # FIXME: this should be supported in VT + mapping = {'gluster': 'path', 'iscsi': 'lun', + 'nbd': 'server.port', 'rbd': 'image'} + option = mapping.get(file_opts["driver"], 'filename') + return file_params[option] == file_opts[option] + + def _check_backing(self, backing): + data_image_opts = qemu_storage.filename_to_file_opts( + qemu_storage.QemuImg( + self.params.object_params(self.base_tag), + data_dir.get_data_dir(), self.base_tag + ).image_filename + ) + base_image_opts = qemu_storage.filename_to_file_opts( + qemu_storage.QemuImg( + self.params.object_params(self._base_node_tag), + data_dir.get_data_dir(), self._base_node_tag + ).image_filename + ) + + try: + # datasn1->datasn3: check datasn1 is datasn3's backing file + if not self._is_same_file(backing["file"], base_image_opts): + self.test.fail("Failed to get backing file for %s" + % self.snapshot_tag) + # data->datasn1: check data is datasn1's backing file + if not self._is_same_file(backing["backing"]["file"], + data_image_opts): + self.test.fail("Failed to get backing file for %s" + % self._base_node_tag) + except Exception as e: + self.test.fail("Failed to get backing chain: %s" % str(e)) + + def check_backing_chain(self): + """after block-stream, the backing chain: data->datasn1->dtasn3""" + out = self.main_vm.monitor.query("block") + for item in out: + if item["qdev"] == self.base_tag: + backing = item["inserted"].get("backing_file") + if not backing: + self.test.fail("Failed to get backing_file for qdev %s" + % self.base_tag) + backing_dict = json.loads(backing[5:]) + self._check_backing(backing_dict) + break + else: + self.test.fail("Failed to find %s" % self.base_tag) + + def clone_vm_with_snapshot(self): + if self.main_vm.is_alive(): + self.main_vm.destroy() + + # Add image_chain , then VT can add access secret objects + # in qemu-kvm command, qemu-kvm can access the backing files + self.clone_vm.params["image_chain_%s" % self.snapshot_tag] = "%s %s %s" % ( + self.base_tag, self._base_node_tag, self.snapshot_tag) + self.clone_vm.create() + + def do_test(self): + self.snapshot_test() + self.blockdev_stream() + self.check_backing_chain() + self.clone_vm_with_snapshot() + self.mount_data_disks() + self.verify_data_file() + + +def run(test, params, env): + """ + Basic block stream test with stress + test steps: + 1. boot VM with a data image + 2. add snapshot images + 3. create a file(base) on data image + 4. take snapshots(data->sn1->sn2->sn3), + take one snapshot, create one new file(snx, x=1,2,3) + 5. do block-stream (base-node:sn1, device: sn3) + 6. check backing chain(data->sn1->sn3) + 7. restart VM with the sn3, all files should exist + :param test: test object + :param params: test configuration dict + :param env: env object + """ + stream_test = BlockdevStreamSubChainTest(test, params, env) + stream_test.run_test() diff --git a/qemu/tests/blockdev_stream_vm_reboot.py b/qemu/tests/blockdev_stream_vm_reboot.py new file mode 100644 index 0000000000000000000000000000000000000000..f75fa4e499f3615b7ae0ee05a880dae4872e6724 --- /dev/null +++ b/qemu/tests/blockdev_stream_vm_reboot.py @@ -0,0 +1,35 @@ +from provider.blockdev_stream_parallel import BlockdevStreamParallelTest + + +class BlockdevStreamVMRebootTest(BlockdevStreamParallelTest): + """do block-stream and vm reboot in parallel""" + + def reboot_vm(self): + reboot_method = self.params.get("reboot_method", "system_reset") + self.main_vm.reboot(method=reboot_method) + + def do_test(self): + super(BlockdevStreamVMRebootTest, self).do_test() + self.clone_vm.destroy() + self.remove_files_from_system_image() + + +def run(test, params, env): + """ + Do block stream during vm reboot + + test steps: + 1. boot VM + 2. create a file on system disk + 3. add a snapshot disk, take snashot for system disk + 4. create another file + 5. do block-stream for system disk and vm reboot in parallel + 6. restart VM with the snapshot disk, check both files and md5sum + 7. remove testing files from system image + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + stream_test = BlockdevStreamVMRebootTest(test, params, env) + stream_test.run_test() diff --git a/qemu/tests/blockdev_stream_vm_stop_cont.py b/qemu/tests/blockdev_stream_vm_stop_cont.py new file mode 100644 index 0000000000000000000000000000000000000000..0de05d5e96ef1815a8673c75245ea3888ac11a23 --- /dev/null +++ b/qemu/tests/blockdev_stream_vm_stop_cont.py @@ -0,0 +1,42 @@ +import time +import random + +from provider.blockdev_stream_parallel import BlockdevStreamParallelTest + + +class BlockdevStreamVMStopContTest(BlockdevStreamParallelTest): + """do block-stream and vm stop/cont in parallel""" + + def stop_cont_vm(self): + """Stop VM for a while, then resume it""" + self.main_vm.pause() + t = int(random.choice(self.params.objects('vm_stop_time_list'))) + time.sleep(t) + self.main_vm.resume() + + def do_test(self): + super(BlockdevStreamVMStopContTest, self).do_test() + self.clone_vm.destroy() + self.remove_files_from_system_image() + + +def run(test, params, env): + """ + Basic block stream test during vm stop and cont + + test steps: + 1. boot VM + 2. add a snapshot image for the system disk + 3. create a file on system disk + 4. take snapshot for system disk + 5. create another file + 6. do block-stream(system->snapshot) and vm stop/continue in parallel + 7. restart VM with the snapshot disk, check both files and md5sum + 8. remove testing files from system image + + :param test: test object + :param params: test configuration dict + :param env: env object + """ + stream_test = BlockdevStreamVMStopContTest(test, params, env) + stream_test.run_test() diff --git a/qemu/tests/boot_1_N_virtserialports.py b/qemu/tests/boot_N_M_virtserialports.py similarity index 36% rename from qemu/tests/boot_1_N_virtserialports.py rename to qemu/tests/boot_N_M_virtserialports.py index 72948f435e0a14ac0143ed16b3cca3e4bfcbab5a..02a7f2a1e4aadee66bb93817e8e2d567592358aa 100644 --- a/qemu/tests/boot_1_N_virtserialports.py +++ b/qemu/tests/boot_N_M_virtserialports.py @@ -1,34 +1,52 @@ +import logging + from virttest import error_context from virttest import utils_test - +from virttest import env_process from qemu.tests.virtio_serial_file_transfer import transfer_data @error_context.context_aware def run(test, params, env): """ - Boot guest with virtio-serial-device with multiple virtserialport - - 1. Boot a guest with 1 virtio-serial-bus with 3 serial ports - 2. Transfer data from host to guest via port2, port3 - 3. Transfer data from guest to host via port2, port3 + Test guest with virtio-serial-device with multiple virtserialports + Scenario 1: + 1.1. Boot a guest with 1 virtio-serial-bus with 3 serial ports + 1.2. Transfer data via every port + Scenario 2: + 2.1. Start guest with 2 virtio-serial-pci, + 2.2. Each virtio-serial-pci has 3 virtio-serial-ports + 2.3. Transfer data via every port :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + if params.get("start_vm") == 'no': + num_bus = params.get_numeric("numberic_bus") + for i in range(2, num_bus + 1): + serial_name = 'vs%d' % i + params['serials'] = '%s %s' % (params.get('serials', ''), serial_name) + params['serial_type_%s' % serial_name] = "virtserialport" + params['serial_bus_%s' % serial_name] = "" + params['start_vm'] = "yes" + env_process.preprocess(test, params, env) + vm = env.get_vm(params['main_vm']) os_type = params["os_type"] - vm = env.get_vm(params["main_vm"]) - driver_name = params["driver_name"] - session = vm.wait_for_login() if os_type == "windows": + driver_name = params["driver_name"] + session = vm.wait_for_login() session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name) - for port in params.objects("serials")[2:]: + session.close() + + for port in params.objects("serials"): port_params = params.object_params(port) - if not port_params['serial_type'].startswith('virt'): + if not port_params['serial_type'].startswith('virtserial'): continue params['file_transfer_serial_port'] = port + error_context.context("Transfer data with %s" % port, logging.info) transfer_data(params, vm, sender='both') + vm.verify_alive() vm.verify_kernel_crash() diff --git a/qemu/tests/boot_e1000e_with_cpu_flag.py b/qemu/tests/boot_e1000e_with_cpu_flag.py new file mode 100644 index 0000000000000000000000000000000000000000..5daeec43f7e43644077d8f608f9dc58e47615e79 --- /dev/null +++ b/qemu/tests/boot_e1000e_with_cpu_flag.py @@ -0,0 +1,36 @@ +from virttest import error_context +from virttest import utils_net + + +@error_context.context_aware +def run(test, params, env): + """ + Boot a winodws guest add Vendor ID with name "KVMKVMKVM" to cpu model flag + + 1) Boot a vm with 'e1000e + hv_vendor_id=KVMKVMKVM' on q35 machine + 2) Run the bcdedit command as administrator + 3) reboot guest by shell + 4) do ping test + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + login_timeout = params.get_numeric("login_timeout", 360) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session_serial = vm.wait_for_serial_login(timeout=login_timeout) + bcdedit_debug = params["bcdedit_debug"] + bcdedit_cmd = params["bcdedit_cmd"] + ext_host = params.get("ext_host") + + try: + session_serial.cmd(bcdedit_debug) + session_serial.cmd(bcdedit_cmd) + vm.reboot(timeout=login_timeout) + status, output = utils_net.ping(dest=ext_host, count=10, + session=session_serial, timeout=30) + if status: + test.fail("ping is failed, output %s" % output) + finally: + session_serial.close() diff --git a/qemu/tests/boot_from_nbd_image.py b/qemu/tests/boot_from_nbd_image.py new file mode 100644 index 0000000000000000000000000000000000000000..f22197010786280e8caabf6a90c0580a004dbf55 --- /dev/null +++ b/qemu/tests/boot_from_nbd_image.py @@ -0,0 +1,53 @@ +import socket + +from virttest import qemu_storage + +from avocado import fail_on + +from avocado.utils import process + +from provider import qemu_img_utils as img_utils + +from provider.nbd_image_export import QemuNBDExportImage + + +def run(test, params, env): + """ + 1) Create a local raw/qcow2/luks image + 2) Export it with qemu-nbd + 3) Convert remote system image to the exported nbd image + 4) Start VM from the exported image + 5) Log into VM + """ + def _convert_image(): + source = params['images'].split()[0] + target = params['convert_target'] + source_params = params.object_params(source) + target_params = params.object_params(target) + source_image = qemu_storage.QemuImg(source_params, None, source) + + # Convert source to target + fail_on((process.CmdError,))(source_image.convert)( + target_params, None, skip_target_creation=True) + + nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) + nbd_export.create_image() + nbd_export.export_image() + + # we only export image with local nbd server + localhost = socket.gethostname() + params['nbd_server_%s' % params['convert_target'] + ] = localhost if localhost else 'localhost' + + vm = None + try: + _convert_image() + vm = img_utils.boot_vm_with_images(test, params, env, + (params['convert_target'],)) + session = vm.wait_for_login( + timeout=params.get_numeric("login_timeout", 480)) + session.close() + finally: + if vm: + vm.destroy() + nbd_export.stop_export() diff --git a/qemu/tests/boot_from_remote.py b/qemu/tests/boot_from_remote.py new file mode 100644 index 0000000000000000000000000000000000000000..a164fc6a9286ab56c2423cfdee4a4e63a4b67c99 --- /dev/null +++ b/qemu/tests/boot_from_remote.py @@ -0,0 +1,153 @@ +import logging +import random +import re +import os + +from avocado.core import exceptions +from avocado.utils import process + +from virttest import error_context +from virttest import utils_misc +from virttest import env_process +from virttest import utils_numeric + + +@error_context.context_aware +def run(test, params, env): + """ + The following testing scenarios are covered: + 1) boot_with_debug + 2) boot_with_local_image + 3) boot_with_remote_images + Please refer to the specific case for details + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def _get_data_disk(session): + """ Get the data disk. """ + extra_params = params["blk_extra_params_%s" % + params['images'].split()[-1]] + drive_id = re.search(r"(serial|wwn)=(\w+)", + extra_params, re.M).group(2) + return utils_misc.get_linux_drive_path(session, drive_id) + + def _write_disk(session): + disk_op_cmd = params['disk_op_cmd'] + if disk_op_cmd: + disk = _get_data_disk(session) + session.cmd(disk_op_cmd.format(disk=disk)) + + def _get_memory(pid): + cmd = "ps -o vsz,rss -p %s | tail -n1" % pid + out = process.system_output(cmd, shell=True).split() + return [int(i) for i in out] + + def boot_with_debug(): + """ + Boot up a guest with debug level + 1. from 'debug_level_low' to 'debug_level_high' + 2. less than 'debug_level_low' + 3. greater than 'debug_level_high' + VM can start up without any error + """ + # valid debug levels + low = int(params["debug_level_low"]) + high = int(params["debug_level_high"]) + levels = [i for i in range(low, high+1)] + + # invalid debug levels: [low-100, low) and [high+1, high+100) + levels.extend([random.choice(range(low-100, low)), + random.choice(range(high+1, high+100))]) + + for level in levels: + logfile = utils_misc.get_log_filename("debug.level%s" % level) + params["gluster_debug"] = level + params["gluster_logfile"] = logfile + logging.info("debug level: %d, log: %s" % (level, logfile)) + + try: + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + if not os.path.exists(logfile): + raise exceptions.TestFail("Failed to generate log file %s" + % logfile) + os.remove(logfile) + finally: + vm.destroy() + + def boot_with_local_image(): + """ + Boot up a guest with a remote storage system image + as well as a local filesystem image + VM can start up without any error + """ + try: + session = None + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + tm = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=tm) + + _write_disk(session) + finally: + if session: + session.close() + vm.destroy() + + def boot_with_remote_images(): + """ + Boot up a guest with only one remote image, + record memory consumption(vsz, rss) + Boot up a guest with 4 remote images, + record memory consumption(vsz, rss) + The memory increased should not be greater than 'memory_diff' + """ + try: + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + # get vsz, rss when booting with one remote image + single_img_memory = _get_memory(vm.get_pid()) + if not single_img_memory: + raise exceptions.TestError("Failed to get memory when " + "booting with one remote image.") + logging.debug("memory consumption(only one remote image): %s" + % single_img_memory) + + vm.destroy() + + for img in params['images'].split()[1:]: + params['boot_drive_%s' % img] = 'yes' + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + # get vsz, rss when booting with 4 remote image + multi_img_memory = _get_memory(vm.get_pid()) + if not multi_img_memory: + raise exceptions.TestError("Failed to get memory when booting" + " with several remote images.") + logging.debug("memory consumption(total 4 remote images): %s" + % multi_img_memory) + + diff = int(float(utils_numeric.normalize_data_size( + params['memory_diff'], order_magnitude="K"))) + mem_diffs = [i-j for i, j in zip(multi_img_memory, + single_img_memory)] + if mem_diffs[0] > diff: + raise exceptions.TestFail( + "vsz increased '%s', which was more than '%s'" + % (mem_diffs[0], diff)) + if mem_diffs[1] > diff: + raise exceptions.TestFail( + "rss increased '%s', which was more than '%s'" + % (mem_diffs[1], diff)) + finally: + vm.destroy() + + tc = params["scenario"] + fun = locals()[tc] + fun() diff --git a/qemu/tests/boot_nic_with_intel_iommu.py b/qemu/tests/boot_nic_with_iommu.py similarity index 84% rename from qemu/tests/boot_nic_with_intel_iommu.py rename to qemu/tests/boot_nic_with_iommu.py index c775a54adf3bf1cadc23eba56204f99565e829f8..696c5117bfe0b3177bf6152bc75ab3b336f4856e 100644 --- a/qemu/tests/boot_nic_with_intel_iommu.py +++ b/qemu/tests/boot_nic_with_iommu.py @@ -1,4 +1,3 @@ -from avocado.utils import cpu from virttest import error_context from virttest import utils_test @@ -18,9 +17,6 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - if cpu.get_cpu_vendor_name() != 'intel': - test.cancel("This case only support Intel platform") - login_timeout = int(params.get("login_timeout", 360)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -35,7 +31,7 @@ def run(test, params, env): test.fail("Ping returns non-zero value %s" % output) package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: - test.fail("%s packeage lost when ping guest ip %s " % + test.fail("%s package lost when ping guest ip %s " % (package_lost, guest_ip)) finally: session.close() diff --git a/qemu/tests/boot_order_check.py b/qemu/tests/boot_order_check.py index 56ff728e2a0fb6e8b94689fc20524d4c6952b684..4f5233f9a9e6871a1c7b916dfbbcd71b3558f68c 100644 --- a/qemu/tests/boot_order_check.py +++ b/qemu/tests/boot_order_check.py @@ -5,6 +5,7 @@ import time from avocado.utils import process from virttest import error_context +from virttest import utils_misc @error_context.context_aware @@ -29,13 +30,16 @@ def run(test, params, env): device_found = dev break elif dev['class_info']['desc'] == 'PCI bridge': - device_found = _get_device(dev['pci_bridge']['devices'], + pci_bridge_devices = dev['pci_bridge'].get('devices') + if not pci_bridge_devices: + continue + device_found = _get_device(pci_bridge_devices, dev_id) if device_found: break return device_found - def _get_pci_addr_by_devid(vm, dev_id): + def _get_pci_addr_by_devid(dev_id): dev_addr = '' dev_addr_fmt = '%02d:%02d.%d' pci_info = vm.monitor.info('pci', debug=False) @@ -71,6 +75,7 @@ def run(test, params, env): process.system("ifconfig %s down" % nic.ifname) vm.resume() + devices_load_timeout = int(params.get("devices_load_timeout", 10)) timeout = int(params.get("login_timeout", 240)) bootorder_type = params.get("bootorder_type") @@ -83,7 +88,8 @@ def run(test, params, env): for nic in vm.virtnet: boot_index = params['bootindex_%s' % nic.nic_name] - pci_addr = _get_pci_addr_by_devid(vm, nic.device_id) + pci_addr = utils_misc.wait_for(lambda: _get_pci_addr_by_devid(nic.device_id), + timeout=devices_load_timeout) if not pci_addr: test.fail("Cannot get the pci address of %s." % nic.nic_name) list_nic_addr.append((pci_addr, boot_index)) diff --git a/qemu/tests/boot_with_remote_readonly_image.py b/qemu/tests/boot_with_remote_readonly_image.py new file mode 100644 index 0000000000000000000000000000000000000000..1e12c5ef81512ceba0a55426b867d10170402f3f --- /dev/null +++ b/qemu/tests/boot_with_remote_readonly_image.py @@ -0,0 +1,43 @@ +from virttest import qemu_storage +from virttest import data_dir +from virttest import utils_misc +from virttest import utils_test + +from avocado import fail_on + +from avocado.utils import process + +from provider import qemu_img_utils as img_utils + + +def run(test, params, env): + """ + 1) Convert remote readonly system image to the local image + 2) Start VM from the local image, + with the remote iso image as its cdrom + 3) Log into VM + 4) Check readable cdrom + """ + def _convert_image(): + source = params['images'].split()[0] + target = params['convert_target'] + source_params = params.object_params(source) + target_params = params.object_params(target) + source_image = qemu_storage.QemuImg(source_params, None, source) + + # Convert source to target + fail_on((process.CmdError,))(source_image.convert)( + target_params, data_dir.get_data_dir()) + + _convert_image() + vm = img_utils.boot_vm_with_images(test, params, env, + (params['convert_target'],)) + session = vm.wait_for_login( + timeout=params.get_numeric("login_timeout", 360)) + cdroms = utils_misc.wait_for( + lambda: (utils_test.get_readable_cdroms(params, session)), + timeout=params.get_numeric("timeout", 10) + ) + session.close() + if not cdroms: + test.fail("None readable cdrom found in vm.") diff --git a/qemu/tests/cache_sizes_test.py b/qemu/tests/cache_sizes_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1133d0762bf49db6eb923e8b63e2e0dcbfbb064d --- /dev/null +++ b/qemu/tests/cache_sizes_test.py @@ -0,0 +1,36 @@ +import logging + +from provider import qemu_img_utils as img_utils + + +def run(test, params, env): + """ + Cache sizes test for a guest. + + 1. Boot a guest up with different cache sizes. + 2. Check writing data to the guest works fine. + 3. Shut the guest down. + + :param test: Qemu test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment + """ + file = params["guest_file_name"] + initial_tag = params["images"] + cache_sizes = params["cache_sizes"].split() + + logging.info("Boot a guest up from initial image: %s, and create a" + " file %s on the disk.", initial_tag, file) + for cache_size in cache_sizes: + params["drv_extra_params_image1"] = "cache-size=%s" % cache_size + vm = img_utils.boot_vm_with_images(test, params, env) + session = vm.wait_for_login() + guest_temp_file = params["guest_file_name"] + sync_bin = params.get("sync_bin", "sync") + + logging.debug("Create temporary file on guest: %s", guest_temp_file) + img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, + sync_bin) + + session.close() + vm.destroy() diff --git a/qemu/tests/cdrom_block_size_check.py b/qemu/tests/cdrom_block_size_check.py index e20a12ee11f75aa521d316023e7f363a1c5f1d1b..a7db136d08ba4a2e35dd5a59d9cd475bd26c6b00 100644 --- a/qemu/tests/cdrom_block_size_check.py +++ b/qemu/tests/cdrom_block_size_check.py @@ -2,6 +2,7 @@ import re import logging import os import six +import time from avocado.utils import process @@ -10,6 +11,9 @@ from virttest import error_context from virttest import utils_misc from virttest import data_dir from virttest.qemu_capabilities import Flags +from virttest.qemu_monitor import QMPCmdError + +from provider.cdrom import QMPEventCheckCDEject, QMPEventCheckCDChange # This decorator makes the test function aware of context strings @@ -141,12 +145,41 @@ def run(test, params, env): msg += " Output: %s" % output test.error(msg) + def is_tray_open(qdev): + for block in vm.monitor.info("block"): + if qdev == block.get('qdev'): + return block.get('tray_open') + + def wait_for_tray_open(qdev): + if not utils_misc.wait_for(lambda: is_tray_open(qdev), 30, 1, 3): + test.error("The cdrom's tray did not open.") + + def change_media(device, target): + try: + with change_check: + vm.change_media(device, target) + except QMPCmdError as e: + if excepted_qmp_err not in str(e): + test.error(str(e)) + logging.warn(str(e)) + wait_for_tray_open(cdroms) + with change_check: + vm.change_media(device, target) + # FIXME: sleep to wait to sync the status of CD-ROM to VM. + time.sleep(sleep_time_after_change) + + def eject_cdrom(device): + with eject_check: + vm.eject_cdrom(device, True) + cdroms = params["test_cdroms"] params["cdroms"] = cdroms params["start_vm"] = "yes" show_mount_cmd = params.get("show_mount_cmd") mount_cmd = params.get("mount_cdrom_cmd") umount_cmd = params.get("umount_cdrom_cmd") + excepted_qmp_err = params.get('excepted_qmp_err') + sleep_time_after_change = params.get_numeric('sleep_time_after_change', 30) os_type = params["os_type"] error_context.context("Get the main VM", logging.info) main_vm = params["main_vm"] @@ -166,9 +199,12 @@ def run(test, params, env): file_size=file_size) cdrom_device = get_cdrom_device(vm) + eject_check = QMPEventCheckCDEject(vm, cdrom_device) + change_check = QMPEventCheckCDChange(vm, cdrom_device) + error_context.context("Attach a small cd iso file to the cdrom.", logging.info) - vm.change_media(cdrom_device, orig_cdrom) + change_media(cdrom_device, orig_cdrom) if mount_cmd: mount_cdrom(session, guest_cdrom, mount_point, show_mount_cmd, mount_cmd) @@ -188,7 +224,7 @@ def run(test, params, env): test.error(msg) error_context.context("eject the cdrom from monitor.", logging.info) - vm.eject_cdrom(cdrom_device) + eject_cdrom(cdrom_device) cdrom_name = params.get("final_cdrom", "images/final.iso") file_size = params.get("final_cdrom_size", 1000) @@ -196,7 +232,7 @@ def run(test, params, env): file_size=file_size) error_context.context("Attach a bigger cd iso file to the cdrom.", logging.info) - vm.change_media(cdrom_device, final_cdrom) + change_media(cdrom_device, final_cdrom) if mount_cmd: mount_cdrom(session, guest_cdrom, mount_point, show_mount_cmd, mount_cmd) diff --git a/qemu/tests/cfg/ansible_test.cfg b/qemu/tests/cfg/ansible_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d33c452e5c7950cfec157e6d1d32952459adfa52 --- /dev/null +++ b/qemu/tests/cfg/ansible_test.cfg @@ -0,0 +1,11 @@ +- ansible_test: + type = ansible_test + virt_test_type = qemu + playbook_repo = "https://github.com/ansible/test-playbooks.git" + # Top level playbook file + toplevel_playbook = "site.yml" + playbook_timeout = 600 + ansible_callback_plugin = debug + ansible_ssh_extra_args = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + # Here we can define an extra set of variables for the playbook with json format + #ansible_extra_vars = '{"debug_msg": "Hello Ansible!", "force_handlers": true}' diff --git a/qemu/tests/cfg/apicv_test.cfg b/qemu/tests/cfg/apicv_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..69d6bc7abf45859f5bdd4732ef6d775cd90d3571 --- /dev/null +++ b/qemu/tests/cfg/apicv_test.cfg @@ -0,0 +1,32 @@ +- apicv_test: + virt_test_type = qemu + type = apicv_test + start_vm = no + kill_vm_on_error = yes + only HostCpuVendor.intel + only virtio_net + netperf_server_link = netperf-2.7.1.tar.bz2 + server_path = /var/tmp/ + test_option = "-n 4 -l 60 -t TCP_STREAM -- -m 580" + repeat_times = 20 + module_name = kvm_intel + mod_param = enable_apicv + Linux: + netperf_client_link = ${netperf_server_link} + client_path = ${server_path} + Windows: + netperf_client_link = "netperf.exe" + client_path = "c:\\" + variants: + - vhost_on: + vhost_nic1 = on + - vhost_off: + vhost_nic1 = off + variants: + - ioeventfd_on: + no vhost_on + nic_extra_params += ",ioeventfd=on" + threshold = 0.85 + - ioeventfd_off: + nic_extra_params += ",ioeventfd=off" + threshold = 0.75 diff --git a/qemu/tests/cfg/balloon_check.cfg b/qemu/tests/cfg/balloon_check.cfg index 7822bdd647822561e2c04d2a3b13f745fae97fd1..08cf23020b11d74869a5d31006c5516c49e6fdd4 100644 --- a/qemu/tests/cfg/balloon_check.cfg +++ b/qemu/tests/cfg/balloon_check.cfg @@ -60,13 +60,16 @@ - iommu_enabled: only q35 only balloon_evict_and_enlarge + #RHEL guest doesn't support, refer to bug 1791593 + only Windows no WinXP WinVista Win7 Win8 Win8.1 Win2000 Win2003 no Win2008 Win2008..r2 Win2012 Win2012..r2 + intel_iommu = yes virtio_dev_iommu_platform = on + iommu_eim = on enable_guest_iommu = yes virtio_dev_ats = on machine_type_extra_params = "kernel-irqchip=split" - extra_params = "-device intel-iommu,intremap=on,eim=on,device-iotlb=on" variants: - balloon_evict: # Disable balloon_base case as it not run any sub test diff --git a/qemu/tests/cfg/balloon_memhp.cfg b/qemu/tests/cfg/balloon_memhp.cfg index 831711b404f9dfb62dcd1d36d0ba0ef4db3f3e63..4afeef496df831ae25ac3c2a8b80e114edc78ddd 100644 --- a/qemu/tests/cfg/balloon_memhp.cfg +++ b/qemu/tests/cfg/balloon_memhp.cfg @@ -2,12 +2,22 @@ type = balloon_memhp slots_mem = 4 size_mem = 1G + mem_fixed = 4096 maxmem_mem = 64G login_timeout = 600 no Host_RHEL.m6 no RHEL.5 only Linux Win2008.x86_64 Win2012.x86_64 Win2016.x86_64 Win2019.x86_64 guest_numa_nodes = "node0 node1" + mem_devs = "mem0 mem1" + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 + use_mem_mem0 = "no" + use_mem_mem1 = "no" + size_mem_mem0 = 2048M + size_mem_mem1 = 2048M + backend_mem_mem0 = memory-backend-ram + backend_mem_mem1 = memory-backend-ram target_mem = "plug" balloon = balloon0 balloon_dev_devid = balloon0 diff --git a/qemu/tests/cfg/block_discard_hotplug.cfg b/qemu/tests/cfg/block_discard_hotplug.cfg new file mode 100644 index 0000000000000000000000000000000000000000..76ffc959f2706a1743fff5b97473bcfb3846c259 --- /dev/null +++ b/qemu/tests/cfg/block_discard_hotplug.cfg @@ -0,0 +1,35 @@ +- block_discard_hotplug: + no Windows + type = block_discard_hotplug + data_tag = "stg1" + images += " stg0 ${data_tag}" + force_create_image_stg0 = yes + image_size_stg0 = 1G + image_name_stg0 = images/stg0 + drv_extra_params_stg0 += "discard=unmap" + boot_drive_stg1 = no + drv_extra_params_stg1 += "discard=unmap" + guest_mount_point = "/home/test" + guest_test_file = "${guest_mount_point}/test.img" + guest_format_command = "mkdir -p ${guest_mount_point};mkfs.ext4 {0} && mount {0} ${guest_mount_point}" + guest_dd_command = "dd if=/dev/zero of=${guest_test_file}" + guest_rm_command = "rm -rf ${guest_test_file};sync" + guest_fstrim_command = "fstrim ${guest_mount_point}" + variants: + - local_file: + force_create_image_stg1 = yes + image_size_stg1 = 1G + image_name_stg1 = images/stg1 + # It need set discard=on the virtio-blk device under pc mode + virtio_blk..i440fx: + blk_extra_params_stg1 += ",discard=on" + - scsi_debug: + only virtio_scsi + scsi_debug = yes + force_create_image_stg1 = no + pre_command = "modprobe -r scsi_debug; modprobe scsi_debug dev_size_mb=1024 lbpu=1 lbpws=1 lbprz=0" + post_command = "modprobe -r scsi_debug" + drive_format_stg1 = scsi-block + image_format_stg1 = raw + image_raw_device_stg1 = yes + remove_image_stg1 = no diff --git a/qemu/tests/cfg/block_hotplug.cfg b/qemu/tests/cfg/block_hotplug.cfg index 33a7fe77f44e08d648768f0f19e928c9b430e8de..42717526039f3099c7809c8cf1793bbf14948258 100644 --- a/qemu/tests/cfg/block_hotplug.cfg +++ b/qemu/tests/cfg/block_hotplug.cfg @@ -1,6 +1,7 @@ - block_hotplug: install setup image_copy unattended_install.cdrom no RHEL.3.9 no ide + no spapr_vscsi virt_test_type = qemu libvirt type = block_hotplug bootindex_image1 = 0 @@ -128,4 +129,10 @@ - block_scsi: drive_format_stg0 = scsi-hd drive_format_stg1 = scsi-hd + q35, arm64-pci: + drive_bus_stg0 = 0 + drive_bus_stg1 = 1 + virtio_scsi: + drive_bus_stg0 = 1 + drive_bus_stg1 = 2 get_disk_pattern = "^/dev/sd[a-z]*$" diff --git a/qemu/tests/cfg/block_hotplug_in_pause.cfg b/qemu/tests/cfg/block_hotplug_in_pause.cfg index e3c894f5dcb4c1e41e2d15870b116eff937c5d7c..c7ca37fe92b93ec1f5d224caf22017f74c6021d6 100644 --- a/qemu/tests/cfg/block_hotplug_in_pause.cfg +++ b/qemu/tests/cfg/block_hotplug_in_pause.cfg @@ -1,5 +1,6 @@ - block_hotplug_in_pause: install setup image_copy unattended_install.cdrom no ide + no spapr_vscsi virt_test_type = qemu libvirt type = block_hotplug_in_pause bootindex_image1 = 0 @@ -55,3 +56,5 @@ stop_vm_before_hotplug = yes resume_vm_after_hotplug = no stop_vm_before_unplug = no + Windows: + resume_vm_after_hotplug = yes diff --git a/qemu/tests/cfg/block_hotplug_negative.cfg b/qemu/tests/cfg/block_hotplug_negative.cfg index 81cd84de0defb81e9fef065bbb01901682b8d7f7..890392aeba1b2adf583db3406b7e27f0ad58752c 100644 --- a/qemu/tests/cfg/block_hotplug_negative.cfg +++ b/qemu/tests/cfg/block_hotplug_negative.cfg @@ -4,6 +4,7 @@ - block_hotplug_negative: no RHEL.3.9 no ide + no spapr_vscsi virt_test_type = qemu type = block_hotplug_negative bootindex_image1 = 0 diff --git a/qemu/tests/cfg/block_hotplug_passthrough.cfg b/qemu/tests/cfg/block_hotplug_passthrough.cfg new file mode 100644 index 0000000000000000000000000000000000000000..2c9967fa3cbb0f5199c0346a8066035afbe89ff1 --- /dev/null +++ b/qemu/tests/cfg/block_hotplug_passthrough.cfg @@ -0,0 +1,18 @@ +- block_hotplug_passthrough: + type = block_hotplug_passthrough + only virtio_scsi + pre_command = "modprobe -r scsi_debug; modprobe sg" + post_command = "modprobe -r sg" + create_command = "modprobe scsi_debug add_host=1 dev_size_mb=2048" + images += " stg0" + boot_drive_stg0 = no + drive_bus_stg0 = 1 + drive_format_stg0 = scsi-block + image_raw_device_stg0 = yes + image_format_stg0 = raw + stg_image_size = 2G + Linux: + dd_test = "dd if=/dev/zero of=/dev/{0} bs=1M count=40 oflag=direct&& dd if=/dev/{0} of=/dev/null bs=1M count=40 iflag=direct" + Windows: + driver_name = vioscsi + iozone_options = '-az -b c:\{0}_iozone.xls -g 1g -y 32k -i 0 -i 1 -I -f {0}:\test-data' diff --git a/qemu/tests/cfg/block_iscsi_lvm.cfg b/qemu/tests/cfg/block_iscsi_lvm.cfg index 234e838e5c25d12301d72197e91ae963aba5e953..0df73e371e3277868b3dee483993befe0867b1fb 100644 --- a/qemu/tests/cfg/block_iscsi_lvm.cfg +++ b/qemu/tests/cfg/block_iscsi_lvm.cfg @@ -56,6 +56,11 @@ post_commands_iscsi += "${cmd_iscsi_delete_node},${cmd_iscsi_logout}" post_commands_lvm = "${cmd_remove_dm},${cmd_remove_pv},${cmd_remove_vg},${cmd_remove_lv}" post_command_noncritical = yes + ppc64le, ppc64: + # explicitly disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= variants: - with_scsi_hd: drive_format_image0 = scsi-hd @@ -69,13 +74,13 @@ ppc64le, ppc64: cd_format_cd1 = scsi-cd i440fx: - Windows: - cd_format_unattended = ide + cd_format_unattended = ide q35: cd_format_unattended = ahci variants: - extra_cdrom_ks: no WinXP Win2000 Win2003 WinVista + unattended_delivery_method = cdrom cdroms += " unattended" drive_index_unattended = 3 drive_index_cd1 = 1 diff --git a/qemu/tests/cfg/block_multifunction.cfg b/qemu/tests/cfg/block_multifunction.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a1df29d2c2b380a6a11f03078ebbc5d6d8060c5f --- /dev/null +++ b/qemu/tests/cfg/block_multifunction.cfg @@ -0,0 +1,20 @@ +- block_multifunction: + type = block_multifunction + only x86_64,i386,ppc64,ppc64le + only virtio_blk, virtio_scsi + start_vm = no + kill_vm = yes + disk_op_cmd = "dd if=/dev/%s of=/dev/null bs=1k count=1000 iflag=direct &&" + disk_op_cmd += " dd if=/dev/zero of=/dev/%s bs=1k count=1000 oflag=direct" + unplug_timeout = 60 + Windows: + disk_op_cmd = "WIN_UTILS:\Iozone\iozone.exe -azR -r 64k -n 125M -g 512M -M -i 0" + disk_op_cmd += " -i 1 -b %s:\iozone_test -f %s:\testfile" + unplug_timeout = 180 + variants: + - @default: + - multi_disks: + type = block_multifunction_scale + only virtio_blk + start_vm = yes + pcie_extra_root_port = 3 diff --git a/qemu/tests/cfg/block_scsi_generic_inquiry.cfg b/qemu/tests/cfg/block_scsi_generic_inquiry.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4ea6317cf7ab317bd99449cb2e7b3ca2d4dd2d8b --- /dev/null +++ b/qemu/tests/cfg/block_scsi_generic_inquiry.cfg @@ -0,0 +1,28 @@ +- block_scsi_generic_inquiry: + only virtio_blk + only Linux + only RHEL.7 + no q35 + type = block_scsi_generic_inquiry + virt_test_type = qemu + kill_vm = yes + start_vm = no + not_preprocess = yes + target = iqn.2020-04.com.redhat:kvm-qe + image_data_tag = stg + images += ' ${image_data_tag}' + drive_rerror_stg = stop + drive_werror_stg = stop + blk_extra_params_stg = "scsi=on,disable-legacy=off,disable-modern=on" + block_raw_name = ${image_data_tag} + emulated_image = images/${block_raw_name} + emulated_image_size = 1G + force_create_stg = no + image_format_stg = raw + image_raw_device_stg = yes + remove_image_stg = no + cmd_dd = "dd if=/dev/zero of=%s bs=10M count=1000 oflag=direct &" + cmd_get_disk_path = "ls /dev/disk/by-path/* | grep "${target}"" + cmd_sg_inq = "sg_inq %s" + vm_status_paused = paused + sg_fail_info = 'Both SCSI INQUIRY and fetching ATA information failed' diff --git a/qemu/tests/cfg/block_with_iommu.cfg b/qemu/tests/cfg/block_with_iommu.cfg index 9c1d1664a9d5b54234bd6a968daac2912b079d92..87bd2f5acdab26800450aa603def15c44eb64092 100644 --- a/qemu/tests/cfg/block_with_iommu.cfg +++ b/qemu/tests/cfg/block_with_iommu.cfg @@ -3,6 +3,7 @@ only q35 only x86_64, i386 start_vm = yes + intel_iommu = yes virtio_dev_iommu_platform = on enable_guest_iommu = yes virtio_dev_ats = on @@ -13,11 +14,22 @@ only RHEL.8 only Host_RHEL.m8 only virtio_scsi - extra_params = "-device intel-iommu,device-iotlb=on,intremap" virtio_dev_disable_legacy = on virtio_dev_disable_modern = off check_key_words = "DMAR: IOMMU enabled;" check_key_words += "DMAR: Intel(R) Virtualization Technology for Directed I/O" + variants: + - @default: + - reload_kernel: + clone_master = yes + master_images_clone = image1 + remove_image_image1 = yes + force_reset_go_down_check = shell + reload_kernel_cmd = 'kexec -l /boot/%s --initrd=/boot/%s --command-line="%s"' + cmd_get_kernel_ver = uname -r + cmd_get_boot_cmdline = cat /proc/cmdline + cmd_get_boot_vmlinuz = ls /boot/ | grep vmlinuz | grep -vE rescue + cmd_get_boot_initramfs = ls /boot/ | grep initramfs | grep -vE rescue - with_installation: type = unattended_install only Windows @@ -35,7 +47,9 @@ remove_image_image_stg_iommu = yes guest_port_unattended_install = 12323 inactivity_watcher = error - extra_params = "-device intel-iommu,device-iotlb=on,intremap" + ovmf: + restore_ovmf_vars = yes + send_key_at_install = ret variants: - extra_cdrom_ks: unattended_delivery_method = cdrom diff --git a/qemu/tests/cfg/block_with_share_rw.cfg b/qemu/tests/cfg/block_with_share_rw.cfg index 8f88accd4a128ec4ec742472223038c23be09433..7d6ab97a40010d0d1ea07968904750bf32a0227a 100644 --- a/qemu/tests/cfg/block_with_share_rw.cfg +++ b/qemu/tests/cfg/block_with_share_rw.cfg @@ -19,6 +19,8 @@ only virtio_blk drive_format_stg = virtio - with_usb_storage: + aarch64: + no Host_RHEL usbs = " usbtest" usb_bus = "usbtest.0" variants: diff --git a/qemu/tests/cfg/blockdev_commit.cfg b/qemu/tests/cfg/blockdev_commit.cfg index 4c7c9ff20d40645f31a4fe4e03edfcb678b160ca..b9ccf5c69d77be23bf24e17aae87f9b046a9cb04 100644 --- a/qemu/tests/cfg/blockdev_commit.cfg +++ b/qemu/tests/cfg/blockdev_commit.cfg @@ -1,10 +1,14 @@ - blockdev_commit: type = blockdev_commit virt_test_type = qemu + only Linux images += " data" - force_create_image_data = yes - force_remove_image_data = yes + force_create_image = yes + force_remove_image = yes + force_create_image_image1 = no + force_remove_image_image1 = no start_vm = yes + kill_vm = yes storage_pools = default storage_type_default = "directory" storage_pool = default @@ -30,9 +34,32 @@ device_tag = "data" format = qcow2 - #mode = "absolute-paths" - base_tag = sn1 - top_tag = sn3 + rebase_mode = unsafe Host_RHEL.m8: node = ${device} qemu_force_use_drive_expression = no + variants: + - @one_data_disk: + - multi_data_disks: + image_size_data1 = 300M + image_name_data1 = data1 + snapshot_tags_data1 = sn11 sn21 sn31 sn41 + + image_size_sn11 = 300M + image_name_sn11 = sn11 + image_format_sn11 = qcow2 + + image_name_sn21 = sn21 + image_size_sn21 = 300M + image_format_sn21 = qcow2 + + image_name_sn31 = sn31 + image_size_sn31 = 300M + image_format_sn31 = qcow2 + + image_name_sn41 = sn41 + image_size_sn41 = 300M + image_format_sn41 = qcow2 + + device_tag += " data1" + images += " data1" diff --git a/qemu/tests/cfg/blockdev_commit_install.cfg b/qemu/tests/cfg/blockdev_commit_install.cfg new file mode 100644 index 0000000000000000000000000000000000000000..2c71e00e95a42a8a030d1afa7841bdc170db3637 --- /dev/null +++ b/qemu/tests/cfg/blockdev_commit_install.cfg @@ -0,0 +1,43 @@ +- blockdev_commit_install: + type = blockdev_commit_install + virt_test_type = qemu + only Linux + need_install = yes + start_vm = no + image_name_image1 = "images/base_install" + boot_drive_stg = yes + medium = cdrom + installation = cdrom + kernel = vmlinuz + initrd = initrd.img + force_create_image_image1 = yes + image_aio = threads + unattended_delivery_method = cdrom + cdroms += " unattended" + index_enable = no + kill_vm = yes + shutdown_cleanly = no + storage_pools = default + storage_type_default = "directory" + storage_pool = default + snapshot_tags = sn1 sn2 sn3 sn4 + aarch64: + restore_aavmf_vars = yes + + image_name_sn1 = sn1 + image_format_sn1 = qcow2 + + image_name_sn2 = sn2 + image_format_sn2 = qcow2 + + image_name_sn3 = sn3 + image_format_sn3 = qcow2 + + image_name_sn4 = sn4 + image_format_sn4 = qcow2 + + device_tag = "image1" + rebase_mode = unsafe + qemu_force_use_drive_expression = no + no RHEL.5 RHEL.6 RHEL.7 RHEL.8.1 + tag_for_install_start = "Starting Login Service|Starting Update is Completed" diff --git a/qemu/tests/cfg/blockdev_commit_query_named_block_nodes.cfg b/qemu/tests/cfg/blockdev_commit_query_named_block_nodes.cfg new file mode 100644 index 0000000000000000000000000000000000000000..683dd97f4c3e9cdf1194d89b5d2040a109b32485 --- /dev/null +++ b/qemu/tests/cfg/blockdev_commit_query_named_block_nodes.cfg @@ -0,0 +1,35 @@ +- blockdev_commit_query_named_block_nodes: + type = blockdev_commit_query_named_block_nodes + virt_test_type = qemu + only Linux + images += " data" + force_create_image_data = yes + force_remove_image_data = yes + start_vm = yes + kill_vm = yes + storage_pools = default + storage_type_default = "directory" + storage_pool = default + image_size_data = 500M + image_name_data = data + snapshot_tags_data = sn1 sn2 sn3 sn4 + + image_size_sn1 = 500M + image_name_sn1 = sn1 + image_format_sn1 = qcow2 + + image_name_sn2 = sn2 + image_size_sn2 = 500M + image_format_sn2 = qcow2 + + image_name_sn3 = sn3 + image_size_sn3 = 500M + image_format_sn3 = qcow2 + + image_name_sn4 = sn4 + image_size_sn4 = 500M + image_format_sn4 = qcow2 + + device_tag = "data" + rebase_mode = unsafe + qemu_force_use_drive_expression = no diff --git a/qemu/tests/cfg/blockdev_commit_reboot.cfg b/qemu/tests/cfg/blockdev_commit_reboot.cfg new file mode 100644 index 0000000000000000000000000000000000000000..913cf704c3155e0e9777413db734e99f020d3db9 --- /dev/null +++ b/qemu/tests/cfg/blockdev_commit_reboot.cfg @@ -0,0 +1,29 @@ +- blockdev_commit_reboot: + type = blockdev_commit_reboot + virt_test_type = qemu + only Linux + start_vm = yes + kill_vm = yes + storage_pools = default + storage_type_default = "directory" + storage_pool = default + snapshot_tags = sn1 sn2 sn3 sn4 + + image_name_sn1 = sn1 + image_format_sn1 = qcow2 + + image_name_sn2 = sn2 + image_format_sn2 = qcow2 + + image_name_sn3 = sn3 + image_format_sn3 = qcow2 + + image_name_sn4 = sn4 + image_format_sn4 = qcow2 + + device_tag = "image1" + rebase_mode = unsafe + mount_point = "/tmp" + qemu_force_use_drive_expression = no + Windows: + mount_point = "C:\\" diff --git a/qemu/tests/cfg/blockdev_commit_server_down.cfg b/qemu/tests/cfg/blockdev_commit_server_down.cfg new file mode 100644 index 0000000000000000000000000000000000000000..ff071225b2de2c214dbd670e1c8e6ae13736f365 --- /dev/null +++ b/qemu/tests/cfg/blockdev_commit_server_down.cfg @@ -0,0 +1,40 @@ +- blockdev_commit_server_down: + only Linux + only filesystem + virt_test_type = qemu + type = blockdev_commit_server_down + qemu_force_use_drive_expression = no + start_vm = no + not_preprocess = yes + kill_vm = yes + storage_pools = default + storage_type_default = "directory" + storage_pool = default + + # The following is specified for a local data image 'data', + # which will be exported by qemu-nbd + local_image_tag = data + image_size_data = 2G + image_name_data = data + nbd_port_data = 10810 + + # The following is specified for nbd image 'nbddata', + # i.e. the exported 'data', used as a data disk in VM + nbd_image_tag = nbddata + enable_nbd_nbddata = yes + storage_type_nbddata = nbd + nbd_reconnect_delay_nbddata = 30 + nbd_port_nbddata = ${nbd_port_data} + image_format_nbddata = raw + image_size_nbddata = ${image_size_data} + + # snapshot of the nbd image 'nbddata' + snapshot_tags_nbddata = sn1 + image_size_sn1 = 2G + image_name_sn1 = sn1 + image_format_sn1 = qcow2 + + device_tag = "${nbd_image_tag}" + rebase_mode = unsafe + server_down_elapsed_time = 5 + speed = 81920 diff --git a/qemu/tests/cfg/blockdev_commit_stop_cont.cfg b/qemu/tests/cfg/blockdev_commit_stop_cont.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bfb9b2896b913e054246915011d5c04910833989 --- /dev/null +++ b/qemu/tests/cfg/blockdev_commit_stop_cont.cfg @@ -0,0 +1,27 @@ +- blockdev_commit_stop_cont: + type = blockdev_commit_stop_cont + virt_test_type = qemu + only Linux + start_vm = yes + kill_vm = yes + storage_pools = default + storage_type_default = "directory" + storage_pool = default + snapshot_tags = sn1 sn2 sn3 sn4 + + image_name_sn1 = sn1 + image_format_sn1 = qcow2 + + image_name_sn2 = sn2 + image_format_sn2 = qcow2 + + image_name_sn3 = sn3 + image_format_sn3 = qcow2 + + image_name_sn4 = sn4 + image_format_sn4 = qcow2 + + device_tag = "image1" + rebase_mode = unsafe + mount_point = "/tmp" + qemu_force_use_drive_expression = no diff --git a/qemu/tests/cfg/blockdev_commit_stress.cfg b/qemu/tests/cfg/blockdev_commit_stress.cfg new file mode 100644 index 0000000000000000000000000000000000000000..db9870cdfbb45f77cadace4b3002b53c20064170 --- /dev/null +++ b/qemu/tests/cfg/blockdev_commit_stress.cfg @@ -0,0 +1,27 @@ +- blockdev_commit_stress: + type = blockdev_commit_stress + virt_test_type = qemu + only Linux + start_vm = yes + kill_vm = yes + storage_pools = default + storage_type_default = "directory" + storage_pool = default + snapshot_tags = sn1 sn2 sn3 sn4 + + image_name_sn1 = sn1 + image_format_sn1 = qcow2 + + image_name_sn2 = sn2 + image_format_sn2 = qcow2 + + image_name_sn3 = sn3 + image_format_sn3 = qcow2 + + image_name_sn4 = sn4 + image_format_sn4 = qcow2 + + device_tag = "image1" + rebase_mode = unsafe + mount_point = "/tmp" + qemu_force_use_drive_expression = no diff --git a/qemu/tests/cfg/blockdev_commit_top.cfg b/qemu/tests/cfg/blockdev_commit_top.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6b17f23cab1ba1e617d2896d9b199414fddaf6e0 --- /dev/null +++ b/qemu/tests/cfg/blockdev_commit_top.cfg @@ -0,0 +1,35 @@ +- blockdev_commit_top: + type = blockdev_commit_top + virt_test_type = qemu + only Linux + images += " data" + force_create_image_data = yes + force_remove_image_data = yes + start_vm = yes + kill_vm = yes + storage_pools = default + storage_type_default = "directory" + storage_pool = default + image_size_data = 500M + image_name_data = data + snapshot_tags_data = sn1 sn2 sn3 sn4 + + image_size_sn1 = 500M + image_name_sn1 = sn1 + image_format_sn1 = qcow2 + + image_name_sn2 = sn2 + image_size_sn2 = 500M + image_format_sn2 = qcow2 + + image_name_sn3 = sn3 + image_size_sn3 = 500M + image_format_sn3 = qcow2 + + image_name_sn4 = sn4 + image_size_sn4 = 500M + image_format_sn4 = qcow2 + + device_tag = "data" + rebase_mode = unsafe + qemu_force_use_drive_expression = no diff --git a/qemu/tests/cfg/blockdev_full_backup.cfg b/qemu/tests/cfg/blockdev_full_backup.cfg index 17ba134cdda6abc97f6614d5507abd6008dfb004..366605c326b2a443d262e505a389f85853777326 100644 --- a/qemu/tests/cfg/blockdev_full_backup.cfg +++ b/qemu/tests/cfg/blockdev_full_backup.cfg @@ -20,6 +20,7 @@ sync = full source_images = src1 target_images = dst1 + rebase_mode = unsafe variants: - with_data_plane: only Host_RHEL diff --git a/qemu/tests/cfg/blockdev_full_backup_multi_disks.cfg b/qemu/tests/cfg/blockdev_full_backup_multi_disks.cfg index 4c4fed7818c8b507c1a602d8d1b3b8dbec783a8f..92daa662d91a9445d4f518cb22fdd6e1ddafafbb 100644 --- a/qemu/tests/cfg/blockdev_full_backup_multi_disks.cfg +++ b/qemu/tests/cfg/blockdev_full_backup_multi_disks.cfg @@ -29,6 +29,7 @@ target_images = dst1 dst2 type = blockdev_full_backup_simple backup_options = "sync" + rebase_mode = unsafe variants: - with_data_plane: only Host_RHEL diff --git a/qemu/tests/cfg/blockdev_full_mirror.cfg b/qemu/tests/cfg/blockdev_full_mirror.cfg index 996c02b8e23bd6b089280a3a2854d8c75fc93274..2c359334665ab7fb5a02bd1a8363bc4f74773b92 100644 --- a/qemu/tests/cfg/blockdev_full_mirror.cfg +++ b/qemu/tests/cfg/blockdev_full_mirror.cfg @@ -22,6 +22,7 @@ sync = full auto-dismiss = true auto-finalize = true + rebase_mode = unsafe variants: - with_data_plane: only Host_RHEL diff --git a/qemu/tests/cfg/blockdev_inc_backup_after_commit.cfg b/qemu/tests/cfg/blockdev_inc_backup_after_commit.cfg new file mode 100644 index 0000000000000000000000000000000000000000..e4c133e28ba905aca838bf4c8e750407e78f2fe6 --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_after_commit.cfg @@ -0,0 +1,74 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Do incremental backup after block commit +# The backup images are local images(filesystem) + +- blockdev_inc_backup_after_commit: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_after_commit + virt_test_type = qemu + images += " data" + source_images = data + image_backup_chain_data = "base inc" + remove_image_data = yes + force_create_image_data = yes + snap_image_data = datasn + image_chain_datasn = data datasn + backing_inc = datasn + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_datasn = 2G + image_size_base = 2G + image_size_inc = 2G + + image_format_data = qcow2 + image_format_datasn = qcow2 + image_format_base = qcow2 + image_format_inc = qcow2 + + image_name_data = data + image_name_datasn = datasn + image_name_base = base + image_name_inc = inc + + gluster_direct: + enable_gluster_data = yes + ceph: + enable_ceph_data = yes + nbd: + nbd_port_data = 10822 + force_create_image_data = no + image_create_support_data = no + enable_nbd_data = yes + iscsi_direct: + lun_data = 1 + enable_iscsi_data = yes + image_raw_device_data = yes + storage_type_data = iscsi-direct + storage_type_datasn = "local fs" + + rebase_mode = unsafe + + # For local backup images + storage_type_default = directory + enable_iscsi_inc = no + enable_iscsi_base = no + enable_iscsi_datasn = no + enable_ceph_inc = no + enable_ceph_base = no + enable_ceph_datasn = no + enable_gluster_inc = no + enable_gluster_base = no + enable_gluster_datasn = no + enable_nbd_inc = no + enable_nbd_base = no + enable_nbd_datasn = no + image_raw_device_inc = no + image_raw_device_base = no + image_raw_device_datasn = no diff --git a/qemu/tests/cfg/blockdev_inc_backup_bitmap_mode_test.cfg b/qemu/tests/cfg/blockdev_inc_backup_bitmap_mode_test.cfg index 3df93c95dd3b48b4903d003d544c4be27f8933a3..e79b3d1f1a63f4c796b877eb01f56d6dc3ba216e 100644 --- a/qemu/tests/cfg/blockdev_inc_backup_bitmap_mode_test.cfg +++ b/qemu/tests/cfg/blockdev_inc_backup_bitmap_mode_test.cfg @@ -2,7 +2,7 @@ type = blockdev_inc_backup_bitmap_mode_test virt_test_type = qemu images += " data" - image_chain_data = "base inc" + image_backup_chain_data = "base inc" backing_inc = base force_remove_image_image1 = no force_create_image_image1 = no diff --git a/qemu/tests/cfg/blockdev_inc_backup_cluster_test.cfg b/qemu/tests/cfg/blockdev_inc_backup_cluster_test.cfg index b6cf8ebc4a2ae7b33b67c65e207474270d982502..d1581365ebae70cba0486ac670e09846bec3d8d0 100644 --- a/qemu/tests/cfg/blockdev_inc_backup_cluster_test.cfg +++ b/qemu/tests/cfg/blockdev_inc_backup_cluster_test.cfg @@ -2,7 +2,7 @@ type = blockdev_inc_backup_test virt_test_type = qemu images += " data" - image_chain_data = "base inc" + image_backup_chain_data = "base inc" force_remove_image_image1 = no force_create_image_image1 = no force_create_image_data = yes diff --git a/qemu/tests/cfg/blockdev_inc_backup_inc_success.cfg b/qemu/tests/cfg/blockdev_inc_backup_inc_success.cfg new file mode 100644 index 0000000000000000000000000000000000000000..dec9ded322a846371df47d9425090d66787b3ee2 --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_inc_success.cfg @@ -0,0 +1,51 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Do incremental backup with sync:incremental and bitmap:on-success +# The backup images are local images(filesystem) + +- blockdev_inc_backup_inc_success: + only Linux + no libcurl, libssh + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_inc_success + virt_test_type = qemu + images += " data" + source_images = "data" + image_backup_chain_data = "base inc" + backing_inc = base + remove_image_data = yes + force_create_image_data = yes + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_base = 2G + image_size_inc = 2G + + image_format_data = qcow2 + image_format_base = qcow2 + image_format_inc = qcow2 + + image_name_data = data + image_name_base = base + image_name_inc = inc + + inc_sync_mode = incremental + inc_bitmap_mode = on-success + rebase_mode = unsafe + + # For local target inc image + enable_iscsi_inc = no + enable_ceph_inc = no + enable_gluster_inc = no + enable_nbd_inc = no + image_raw_device_inc = no + storage_type_default = "directory" + nbd: + force_create_image_data = no + nbd_port_data = 10822 + image_create_support_data = no + iscsi_direct: + lun_data = 1 diff --git a/qemu/tests/cfg/blockdev_inc_backup_non_persistent_bitmap.cfg b/qemu/tests/cfg/blockdev_inc_backup_non_persistent_bitmap.cfg new file mode 100644 index 0000000000000000000000000000000000000000..96bd7346ee568e8b383888961baf1f552a09f26a --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_non_persistent_bitmap.cfg @@ -0,0 +1,39 @@ +# Storage backends: +# filesystem, iscsi_direct, gluster_direct, ceph, nbd +# The following testing scenario is covered: +# Add non-persistent bitmap on data disk +# The backup image is the local image(filesystem) + +- blockdev_inc_backup_non_persistent_bitmap: + only Linux + no libcurl, libssh + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_non_persistent_bitmap + virt_test_type = qemu + images += " data" + source_images = "data" + image_backup_chain_data = "base" + remove_image_data = yes + force_create_image_data = yes + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_base = 2G + + image_format_data = qcow2 + image_format_base = qcow2 + + image_name_data = data + image_name_base = base + rebase_mode = unsafe + check_bitmaps = 'bitmaps:' + + storage_type_default = "directory" + nbd: + force_create_image_data = no + nbd_port_data = 10822 + image_create_support_data = no + iscsi_direct: + lun_data = 1 diff --git a/qemu/tests/cfg/blockdev_inc_backup_pull_mode_diff.cfg b/qemu/tests/cfg/blockdev_inc_backup_pull_mode_diff.cfg new file mode 100644 index 0000000000000000000000000000000000000000..8ac1780cef7b9e275832188ea4fda69f016951e1 --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_pull_mode_diff.cfg @@ -0,0 +1,71 @@ +# Storage backends: +# filesystem +# The following testing scenario is covered: +# Do differential live backup via pull mode +# The fleecing images must be local fs images + + +- blockdev_inc_backup_pull_mode_diff: + only Linux + only filesystem + virt_test_type = qemu + type = blockdev_inc_backup_pull_mode_diff + qemu_force_use_drive_expression = no + start_vm = no + images += " data" + rebase_mode = unsafe + dirty_bitmap_opt = x-dirty-bitmap + storage_pools = default + storage_pool = default + storage_type_default = "directory" + + # data image, used as the backup source + source_images = "data" + image_size_data = 2G + image_name_data = data + image_format_data = qcow2 + remove_image_data = yes + force_create_image_data = yes + + # local backup images, used as the backup targets + client_image_full = fullbk + client_image_inc = incbk + image_size_fullbk = ${image_size_data} + image_size_incbk = ${image_size_data} + image_format_fullbk = qcow2 + image_format_incbk = qcow2 + image_name_fullbk = fullbk + image_name_incbk = incbk + + # fleecing images, used as data porting 'interfaces' + image_backup_chain_data = "full inc" + backing_full = data + backing_inc = data + image_size_full = ${image_size_data} + image_size_inc = ${image_size_data} + image_format_full = qcow2 + image_format_inc = qcow2 + image_name_full = full + image_name_inc = inc + + # For internal nbd server + nbd_port_full = 10810 + nbd_port_inc = 10811 + nbd_export_name_full = nbd_full_image + nbd_export_name_inc = nbd_inc_image + nbd_export_writable_full = no + nbd_export_writable_inc = no + + # For the exported nbd images + nbd_image_full = nbdfull + nbd_image_inc = nbdinc + nbd_port_nbdfull = ${nbd_port_full} + nbd_port_nbdinc = ${nbd_port_inc} + nbd_export_name_nbdfull = ${nbd_export_name_full} + nbd_export_name_nbdinc = ${nbd_export_name_inc} + enable_nbd_nbdfull = yes + enable_nbd_nbdinc = yes + storage_type_nbdfull = nbd + storage_type_nbdinc = nbd + image_format_nbdfull = raw + image_format_nbdinc = raw diff --git a/qemu/tests/cfg/blockdev_inc_backup_pull_mode_test.cfg b/qemu/tests/cfg/blockdev_inc_backup_pull_mode_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9f14cb094ab592335e323db23dbea03616998598 --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_pull_mode_test.cfg @@ -0,0 +1,67 @@ +- blockdev_inc_backup_pull_mode: + only Linux + only filesystem + virt_test_type = qemu + type = blockdev_inc_backup_pull_mode_test + qemu_force_use_drive_expression = no + images += " data" + + # fleecing images full and inc + full_backup_image_data = full + inc_backup_image_data = inc + backup_images_data = "${full_backup_image_data} ${inc_backup_image_data}" + backing_full = data + backing_inc = data + force_create_image_data = yes + force_remove_image_data = yes + start_vm = no + storage_pools = default + storage_pool = default + storage_type_default = "directory" + + image_size_data = 2G + image_size_full = 2G + image_size_inc = 2G + + image_format_data = qcow2 + image_format_full = qcow2 + image_format_inc = qcow2 + + image_name_data = data + image_name_full = full + image_name_inc = inc + source_images = "data" + rebase_mode = unsafe + dirty_bitmap_opt = x-dirty-bitmap + + # conf of fleecing images exported, + # used for internal nbd server + nbd_export_writable = no + nbd_port_full = 10810 + nbd_port_inc = 10811 + nbd_export_name_full = nbd_full_image + nbd_export_name_inc = nbd_inc_image + + # conf of nbd images, when full and inc are exported, + # use the conf here to access them + nbd_image_full = nbdfull + nbd_image_inc = nbdinc + nbd_port_nbdfull = ${nbd_port_full} + nbd_port_nbdinc = ${nbd_port_inc} + nbd_export_name_nbdfull = ${nbd_export_name_full} + nbd_export_name_nbdinc = ${nbd_export_name_inc} + enable_nbd_nbdfull = yes + enable_nbd_nbdinc = yes + image_format_nbdfull = raw + image_format_nbdinc = raw + + # conf of local backup images, copy data from + # nbd images into these local images by rebase + client_image_full = back1 + client_image_inc = back2 + image_size_back1 = 2G + image_size_back2 = 2G + image_format_back1 = qcow2 + image_format_back2 = qcow2 + image_name_back1 = back1 + image_name_back2 = back2 diff --git a/qemu/tests/cfg/blockdev_inc_backup_resize.cfg b/qemu/tests/cfg/blockdev_inc_backup_resize.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4de20279d7d73a8e20bf557e0c3dec9ba6cf7c11 --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_resize.cfg @@ -0,0 +1,41 @@ +# Storage backends: +# filesystem, gluster_direct, ceph +# The following testing scenario is covered: +# resize a qcow2 image with persistent bitmap stored on it + + +- blockdev_inc_backup_resize: + only Linux + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_resize + virt_test_type = qemu + images += " data" + source_images = "data" + image_backup_chain_data = "base" + remove_image_data = yes + force_create_image_data = yes + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_base = 2G + + image_format_data = qcow2 + image_format_base = qcow2 + + image_name_data = data + image_name_base = base + rebase_mode = unsafe + check_bitmaps = 'bitmaps:' + storage_type_default = "directory" + + # settings for block_resize + no RHEL.4 + only filesystem ceph gluster_direct + extend_ratio = 1.5 + shrink_ratio = 0.75 + disk_change_ratio = "${extend_ratio} ${shrink_ratio}" + Host_RHEL.m6, Host_RHEL.m7.u0, Host_RHEL.m7.u1, Host_RHEL.m7.u2, Host_RHEL.m7.u3, Host_RHEL.m7.u4, Host_RHEL.m7.u5: + # shrinking is not supported + disk_change_ratio = ${extend_ratio} diff --git a/qemu/tests/cfg/blockdev_inc_backup_test.cfg b/qemu/tests/cfg/blockdev_inc_backup_test.cfg index f6d6755eadfc6698405d1fc67d9a559f09249b12..57c33badcae429d235ed75d92118114f799c6c52 100644 --- a/qemu/tests/cfg/blockdev_inc_backup_test.cfg +++ b/qemu/tests/cfg/blockdev_inc_backup_test.cfg @@ -2,7 +2,7 @@ type = blockdev_inc_backup_test virt_test_type = qemu images += " data" - image_chain_data = "base inc" + image_backup_chain_data = "base inc" force_remove_image_image1 = no force_create_image_image1 = no force_create_image_data = yes @@ -46,7 +46,7 @@ - @granularity_default: - multi_data_disks: images += " data2" - image_chain_data2 = "base2 inc2" + image_backup_chain_data2 = "base2 inc2" force_create_image_data2 = yes force_remove_image_data2 = yes image_size_data2 = 3G diff --git a/qemu/tests/cfg/blockdev_inc_backup_with_guest_agent.cfg b/qemu/tests/cfg/blockdev_inc_backup_with_guest_agent.cfg new file mode 100644 index 0000000000000000000000000000000000000000..39e588fb78bd61cb3e82c7a6972a437c5500da4f --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_with_guest_agent.cfg @@ -0,0 +1,62 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Do incremental backup with guest-agent +# The backup images are local images(filesystem) + +- blockdev_inc_backup_with_guest_agent: + no libcurl, libssh + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_with_guest_agent + virt_test_type = qemu + images += " data" + source_images = "data" + image_backup_chain_data = "base inc" + remove_image_data = yes + force_create_image_data = yes + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_base = 2G + image_size_inc = 2G + + image_format_data = qcow2 + image_format_base = qcow2 + image_format_inc = qcow2 + + image_name_data = data + image_name_base = base + image_name_inc = inc + rebase_mode = unsafe + + # For guest agent + only Fedora, RHEL + no Fedora.8, Fedora.9, Fedora.10, Fedora.11, Fedora.12, Fedora.13, Fedora.14, Fedora.15 + agent_name = "org.qemu.guest_agent.0" + agent_serial_type = virtio + serials += " org.qemu.guest_agent.0" + serial_type_org.qemu.guest_agent.0 = "virtserialport" + enable_nonsecurity_files_cmd = "setsebool virt_qemu_ga_read_nonsecurity_files=on" + enable_permissive_cmd = "setenforce 0" + + # For local backup images + enable_iscsi_inc = no + enable_iscsi_base = no + enable_ceph_inc = no + enable_ceph_base = no + enable_gluster_inc = no + enable_gluster_base = no + enable_nbd_inc = no + enable_nbd_base = no + image_raw_device_inc = no + image_raw_device_base = no + storage_type_default = "directory" + + nbd: + force_create_image_data = no + nbd_port_data = 10822 + image_create_support_data = no + iscsi_direct: + lun_data = 1 diff --git a/qemu/tests/cfg/blockdev_inc_backup_with_migration.cfg b/qemu/tests/cfg/blockdev_inc_backup_with_migration.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6c767c55ec943b27ceb844c0f09a067046ec6ba1 --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_with_migration.cfg @@ -0,0 +1,60 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Do incremental live backup with bitmap after migrated on shared storage +# The backup images are local images(filesystem) + +- blockdev_inc_backup_with_migration: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_with_migration + virt_test_type = qemu + images += " data" + source_images = "data" + image_backup_chain_data = "base inc" + remove_image_data = yes + force_create_image_data = yes + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_base = 2G + image_size_inc = 2G + + image_format_data = qcow2 + image_format_base = qcow2 + image_format_inc = qcow2 + + image_name_data = data + image_name_base = base + image_name_inc = inc + rebase_mode = unsafe + + # For local backup images + enable_iscsi_inc = no + enable_iscsi_base = no + enable_ceph_inc = no + enable_ceph_base = no + enable_gluster_inc = no + enable_gluster_base = no + enable_nbd_inc = no + enable_nbd_base = no + image_raw_device_inc = no + image_raw_device_base = no + storage_type_default = "directory" + + # For migration + mig_timeout = 1800 + pre_command = "sync && echo 3 > /proc/sys/vm/drop_caches" + migration_protocol = "tcp" + migrate_capabilities = "{'events': 'on', 'dirty-bitmaps': 'on'}" + + storage_type_default = "directory" + nbd: + force_create_image_data = no + nbd_port_data = 10822 + image_create_support_data = no + iscsi_direct: + lun_data = 1 diff --git a/qemu/tests/cfg/blockdev_inc_backup_with_throttling.cfg b/qemu/tests/cfg/blockdev_inc_backup_with_throttling.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9ad6858fbe65b38e473a6e3c9958a7b276a32c24 --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_with_throttling.cfg @@ -0,0 +1,60 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Do incremental backup with throttling +# The backup images are local images(filesystem) + + +- blockdev_inc_backup_with_throttling: + only Linux + only filesystem, iscsi_direct, ceph, nbd, gluster_direct + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_test + virt_test_type = qemu + start_vm = no + images += " data" + source_images = "data" + remove_image_data = yes + force_create_image_data = yes + image_backup_chain_data = "base inc" + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_base = 2G + image_size_inc = 2G + + image_format_data = qcow2 + image_format_base = qcow2 + image_format_inc = qcow2 + + image_name_data = data + image_name_base = base + image_name_inc = inc + + nbd: + nbd_port_data = 10822 + force_create_image_data = no + image_create_support_data = no + iscsi_direct: + lun_data = 1 + + rebase_mode = unsafe + + # For local backup images + storage_type_default = "directory" + enable_iscsi_inc = no + enable_iscsi_base = no + enable_ceph_inc = no + enable_ceph_base = no + enable_gluster_inc = no + enable_gluster_base = no + enable_nbd_inc = no + enable_nbd_base = no + image_raw_device_inc = no + image_raw_device_base = no + + # For throttling + # Use the default throttling settings for the basic test + throttle_groups= group1 + image_throttle_group_data = group1 diff --git a/qemu/tests/cfg/blockdev_inc_backup_xpt_bitmap.cfg b/qemu/tests/cfg/blockdev_inc_backup_xpt_bitmap.cfg new file mode 100644 index 0000000000000000000000000000000000000000..72e9d6badc52d1b65317e17bcf2078ca1657702e --- /dev/null +++ b/qemu/tests/cfg/blockdev_inc_backup_xpt_bitmap.cfg @@ -0,0 +1,41 @@ +# Storage backends: +# filesystem +# The following testing scenario is covered: +# Expose persistent bitmap via qemu-nbd +# The data image must be a local fs image + + +- blockdev_inc_backup_xpt_bitmap: + only Linux + only filesystem + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_inc_backup_xpt_bitmap + virt_test_type = qemu + images += " data" + source_images = "data" + image_backup_chain_data = "base" + remove_image_data = yes + force_create_image_data = yes + storage_pools = default + storage_pool = default + + image_size_data = 2G + image_size_base = 2G + + image_format_data = qcow2 + image_format_base = qcow2 + + image_name_data = data + image_name_base = base + rebase_mode = unsafe + storage_type_default = "directory" + dirty_bitmap_opt = x-dirty-bitmap + + nbd_export_format_data = qcow2 + nbd_port_data = 10850 + + nbd_image_tag_data = nbddata + nbd_port_nbddata = ${nbd_port_data} + enable_nbd_nbddata = yes + storage_type_nbddata = nbd diff --git a/qemu/tests/cfg/blockdev_mirror_error.cfg b/qemu/tests/cfg/blockdev_mirror_error.cfg new file mode 100644 index 0000000000000000000000000000000000000000..21d1a8c6e70c6b055bec150f53cd8eba66db58c7 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_error.cfg @@ -0,0 +1,43 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror with '"on-source-error": "stop", "on-target-error": "stop"' +# The mirror image is a local image(filesystem) + +- blockdev_mirror_error: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_error + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1 = sync on-source-error on-target-error + sync = full + tempfile_size = 1024M + on-source-error = stop + on-target-error = stop + storage_pools = default + storage_pool = default + storage_type_default = directory + + image_size_data1 = 2G + image_size_mirror1 = 1G + + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + + image_name_data1 = data1 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + iscsi_direct: + lun_data1 = 1 diff --git a/qemu/tests/cfg/blockdev_mirror_firewall.cfg b/qemu/tests/cfg/blockdev_mirror_firewall.cfg new file mode 100644 index 0000000000000000000000000000000000000000..80829d0bc88d4976d97f33f8babee1dd9d102e59 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_firewall.cfg @@ -0,0 +1,75 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror with firewall test +# The mirror image is a nbd image + + +- blockdev_mirror_firewall: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + kill_vm = yes + qemu_force_use_drive_expression = no + type = blockdev_mirror_firewall + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1 = sync speed + sync = full + speed_data1 = 90000000 + tempfile_size = 1024M + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_mirror1 = ${image_size_data1} + + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + + image_name_data1 = data1 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + remove_image_data1 = no + iscsi_direct: + lun_data1 = 1 + + # Settings for a local fs image 'data', + # which will be exported by qemu-nbd + local_image_tag = data + storage_type_default = directory + image_size_data = ${image_size_data1} + image_name_data = data + image_format_data = qcow2 + preallocated_data = falloc + nbd_port_data = 10810 + nbd_export_format_data = raw + nbd_server_tls_creds_data = '' + enable_iscsi_data = no + enable_ceph_data = no + enable_gluster_data = no + enable_nbd_data = no + image_raw_device_data = no + + # Settings for nbd image 'mirror1', + # i.e. the exported 'data' + nbd_image_tag = mirror1 + enable_nbd_mirror1 = yes + storage_type_mirror1 = nbd + nbd_port_mirror1 = ${nbd_port_data} + force_create_image_mirror1 = no + image_create_support_mirror1 = no + image_format_mirror1 = ${image_format_data} + + # commands used for break connection to nbd image + net_break_cmd = iptables -I INPUT -s {s} -p tcp --dport ${nbd_port_mirror1} -j REJECT + net_resume_cmd = iptables -D INPUT -s {s} -p tcp --dport ${nbd_port_mirror1} -j REJECT diff --git a/qemu/tests/cfg/blockdev_mirror_hotunplug.cfg b/qemu/tests/cfg/blockdev_mirror_hotunplug.cfg new file mode 100644 index 0000000000000000000000000000000000000000..aadca2c7cb64e8e0f2626f10757892ebf6eeb412 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_hotunplug.cfg @@ -0,0 +1,50 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror with hotunplug test +# The mirror image is a local fs image + + +- blockdev_mirror_hotunplug: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + kill_vm = yes + qemu_force_use_drive_expression = no + type = blockdev_mirror_hotunplug + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1 = sync speed + sync = full + speed_data1 = 90000000 + tempfile_size = 1024M + storage_pools = default + storage_pool = default + rebase_mode = unsafe + block_node_busy_error = "Node '%s' is busy: block device is in use by block job: mirror" + + image_size_data1 = 2G + image_size_mirror1 = ${image_size_data1} + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + image_name_data1 = data1 + image_name_mirror1 = mirror1 + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + iscsi_direct: + lun_data1 = 1 + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_mirror_install.cfg b/qemu/tests/cfg/blockdev_mirror_install.cfg new file mode 100644 index 0000000000000000000000000000000000000000..49c5620c67fecbeef5462991236c1c8ddbf4496f --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_install.cfg @@ -0,0 +1,80 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror with vm installation test +# The mirror image is a local filesystem image + +- blockdev_mirror_install: + only Linux + no RHEL.5 RHEL.6 RHEL.7 RHEL.8.1 + only filesystem iscsi_direct ceph nbd gluster_direct + type = blockdev_mirror_install + qemu_force_use_drive_expression = no + need_install = yes + start_vm = no + kill_vm = yes + virt_test_type = qemu + source_images = image1 + target_images = mirror1 + force_create_image_image1 = yes + medium = cdrom + installation = cdrom + kernel = vmlinuz + initrd = initrd.img + image_aio = threads + unattended_delivery_method = cdrom + cdroms += " unattended" + index_enable = no + shutdown_cleanly = no + backup_options_image1 = sync + sync = full + storage_pools = default + storage_pool = default + aarch64: + restore_aavmf_vars = yes + + image_format_mirror1 = qcow2 + image_name_mirror1 = mirror1 + + tag_for_install_start = "Starting Login Service|Starting Update is Completed" + rebase_mode = unsafe + + # Always access cd1 and unattended as local storage + enable_nbd_cd1 = no + enable_nbd_unattended = no + enable_gluster_cd1 = no + enable_gluster_unattended = no + enable_ceph_cd1 = no + enable_ceph_unattended = no + enable_iscsi_cd1 = no + enable_iscsi_unattended = no + + # Install VM on a new image, which can avoid the + # original system image crash when hitting a failure + image_name_image1 = "images/system_image" + iscsi_direct: + lun_image1 = 1 + image_size_mirror1 = 21G + nbd: + force_create_image_image1 = no + qcow2: + nbd_port_image1 = 10820 + raw: + nbd_port_image1 = 10819 + luks: + nbd_port_image1 = 10821 + image_secret_image1 = redhat + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no + + variants: + # Share the defined installation configurations + - @with_installation: + variants: + - @extra_cdrom_ks: diff --git a/qemu/tests/cfg/blockdev_mirror_multiple_blocks.cfg b/qemu/tests/cfg/blockdev_mirror_multiple_blocks.cfg new file mode 100644 index 0000000000000000000000000000000000000000..803cbcf91fa2823f50fee1bc3db25ac93d020ccf --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_multiple_blocks.cfg @@ -0,0 +1,73 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Multiple block mirror simultaneously +# The mirror images are local images(filesystem) + +- blockdev_mirror_multiple_blocks: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_multiple_blocks + virt_test_type = qemu + images += " data1 data2" + source_images = "data1 data2" + target_images = "mirror1 mirror2" + remove_image_data1 = yes + remove_image_data2 = yes + force_create_image_data1 = yes + force_create_image_data2 = yes + backup_options_data1 = sync speed + backup_options_data2 = sync + sync = full + speed_data1 = 20000000 + tempfile_size = 1000M + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_data2 = 2G + image_size_mirror1 = ${image_size_data1} + image_size_mirror2 = ${image_size_data2} + + image_format_data1 = qcow2 + image_format_data2 = qcow2 + image_format_mirror1 = qcow2 + image_format_mirror2 = qcow2 + + image_name_data1 = data1 + image_name_data2 = data2 + image_name_mirror1 = mirror1 + image_name_mirror2 = mirror2 + rebase_mode = unsafe + + nbd: + force_create_image_data1 = no + force_create_image_data2 = no + nbd_port_data1 = 10822 + nbd_port_data2 = 10823 + image_create_support_data1 = no + image_create_support_data2 = no + iscsi_direct: + lun_data1 = 1 + lun_data2 = 2 + + blk_extra_params_data1 = "serial=DATA_DISK1" + blk_extra_params_data2 = "serial=DATA_DISK2" + Host_RHEL.m6..ide: + blk_extra_params_data1 = "wwn=0x5000123456789abc" + blk_extra_params_data2 = "wwn=0x5000123456789cba" + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_iscsi_mirror2 = no + enable_ceph_mirror1 = no + enable_ceph_mirror2 = no + enable_gluster_mirror1 = no + enable_gluster_mirror2 = no + enable_nbd_mirror1 = no + enable_nbd_mirror2 = no + image_raw_device_mirror1 = no + image_raw_device_mirror2 = no diff --git a/qemu/tests/cfg/blockdev_mirror_readonly.cfg b/qemu/tests/cfg/blockdev_mirror_readonly.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1f93a678c8e63622e4f6706e237a6ef9555ada17 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_readonly.cfg @@ -0,0 +1,49 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror on readonly device test +# The mirror image is a local image(filesystem) + +- blockdev_mirror_readonly: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_readonly + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + image_readonly_data1 = yes + backup_options_data1 = sync + sync = full + error_msg = 'because it is opened read-only' + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_mirror1 = ${image_size_data1} + + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + + image_name_data1 = data1 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + iscsi_direct: + lun_data1 = 1 + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_mirror_ready_vm_down.cfg b/qemu/tests/cfg/blockdev_mirror_ready_vm_down.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0bb34bd77b5911e83d655e4760dda9edecb82554 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_ready_vm_down.cfg @@ -0,0 +1,46 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# vm poweroff when mirror job is ready +# The mirror image is a local image(filesystem) + + +- blockdev_mirror_ready_vm_down: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_ready_vm_down + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1 = sync + sync = full + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_mirror1 = ${image_size_data1} + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + image_name_data1 = data1 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + iscsi_direct: + lun_data1 = 1 + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_mirror_simple.cfg b/qemu/tests/cfg/blockdev_mirror_simple.cfg new file mode 100644 index 0000000000000000000000000000000000000000..db2f6e684d29d37fc845d4f1c111b9cb1db24840 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_simple.cfg @@ -0,0 +1,70 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror granularity test +# The mirror image is a local image(filesystem) +# granularity is 512, 64M and a random one from (512, 64M) +# Block mirror buf-size test +# The mirror image is a local image(filesystem) +# buf-size = count * granularity, when +# count is a random value from [1, 10], while +# granularity is a random value from (512, 64M) + +- blockdev_mirror_simple: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_simple + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1 = sync granularity + sync = full + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_mirror1 = ${image_size_data1} + + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + + image_name_data1 = data1 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + iscsi_direct: + lun_data1 = 1 + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no + + variants: + - granularity_512: + granularity = 512 + tempfile_size = 10M + - granularity_64M: + granularity = 64M + tempfile_size = 1000M + - granularity_between_512_64M: + # Get a random one from (512, 64M) + granularity_list = 1K 2K 4K 8K 16K 32K 64K 128K 256K 512K 1M 2M 4M 8M 16M 32M + + variants: + - buf_size_default: + - buf_size_random: + buf_size_factor_list = 2 3 4 5 6 7 8 9 10 + backup_options_data1 += " buf-size" diff --git a/qemu/tests/cfg/blockdev_mirror_speed.cfg b/qemu/tests/cfg/blockdev_mirror_speed.cfg new file mode 100644 index 0000000000000000000000000000000000000000..42c4d49c8ad6f8daf16ceff29a549cd2eff25e59 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_speed.cfg @@ -0,0 +1,52 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# blockdev-mirror speed test +# The mirror image is a local image(filesystem) + +- blockdev_mirror_speed: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_speed + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1 = sync speed buf-size + sync = full + speed = 10240 + # set a small buf-size to get a frequent offset update in job status + buf-size = 1024 + valid_speeds = 1 12345 0 + invalid_speeds = -1 8.0 abc + error_msg = "Invalid parameter type for 'speed', expected: integer" + error_msg_negative = "Parameter 'speed' expects a non-negative value" + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_mirror1 = ${image_size_data1} + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + image_name_data1 = data1 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + iscsi_direct: + lun_data1 = 1 + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_mirror_stress.cfg b/qemu/tests/cfg/blockdev_mirror_stress.cfg new file mode 100644 index 0000000000000000000000000000000000000000..2071e0af492c468ee78ed48f7c29f750fc7d9e33 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_stress.cfg @@ -0,0 +1,34 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Basic block mirror test with stress -- only system disk +# The mirror image is a local image(filesystem) + +- blockdev_mirror_stress: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_stress + virt_test_type = qemu + source_images = image1 + target_images = mirror1 + image_create_support_image1 = no + backup_options_image1 = sync + parallel_tests = stress_test + stress_args = --cpu 2 --vm 2 --io 2 --vm-bytes 1024 + sync = full + storage_pools = default + storage_pool = default + + image_format_mirror1 = qcow2 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_mirror_sync_none.cfg b/qemu/tests/cfg/blockdev_mirror_sync_none.cfg new file mode 100644 index 0000000000000000000000000000000000000000..3f8469b5e5d80301f97a857c40540df73cfe764c --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_sync_none.cfg @@ -0,0 +1,48 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror with sync mode:none +# The mirror image is a local image(filesystem) + +- blockdev_mirror_sync_none: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_sync_none + virt_test_type = qemu + images += " data1" + source_images = data1 + target_images = mirror1 + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1 = sync + sync = none + tempfile_size = 600M + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_mirror1 = ${image_size_data1} + + image_format_data1 = qcow2 + image_format_mirror1 = qcow2 + + image_name_data1 = data1 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + iscsi_direct: + lun_data1 = 1 + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_mirror_sync_top.cfg b/qemu/tests/cfg/blockdev_mirror_sync_top.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b1ae98192e6142363e704af9263a8f65567383f3 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_sync_top.cfg @@ -0,0 +1,76 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block mirror with sync mode top test +# The mirror image is a local image(filesystem) + + +- blockdev_mirror_sync_top: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + kill_vm = yes + qemu_force_use_drive_expression = no + type = blockdev_mirror_sync_top + virt_test_type = qemu + images += " data1" + source_images = data1 + convert_images = convert1 + snap_images = data1sn + target_images = convert1sn + backing_convert1sn = convert1 + image_chain_data1sn = data1 data1sn + remove_image_data1 = yes + force_create_image_data1 = yes + backup_options_data1sn = sync + sync = top + storage_pools = default + storage_pool = default + + image_size_data1 = 2G + image_size_data1sn = ${image_size_data1} + image_size_convert1 = ${image_size_data1} + image_size_convert1sn = ${image_size_data1} + + image_format_data1 = qcow2 + image_format_data1sn = qcow2 + image_format_convert1 = qcow2 + image_format_convert1sn = qcow2 + + image_name_data1 = data1 + image_name_data1sn = data1sn + image_name_convert1 = convert1 + image_name_convert1sn = convert1sn + rebase_mode = unsafe + + gluster_direct: + enable_gluster_data1 = yes + ceph: + enable_ceph_data1 = yes + nbd: + nbd_port_data1 = 10822 + force_create_image_data1 = no + image_create_support_data1 = no + enable_nbd_data1 = yes + iscsi_direct: + lun_data1 = 1 + enable_iscsi_data1 = yes + image_raw_device_data1 = yes + + # For local mirror images + storage_type_default = directory + enable_iscsi_convert1 = no + enable_iscsi_convert1sn = no + enable_iscsi_data1sn = no + enable_ceph_convert1 = no + enable_ceph_convert1sn = no + enable_ceph_data1sn = no + enable_gluster_convert1 = no + enable_gluster_convert1sn = no + enable_gluster_data1sn = no + enable_nbd_convert1 = no + enable_nbd_convert1sn = no + enable_nbd_data1sn = no + image_raw_device_convert1 = no + image_raw_device_convert1sn = no + image_raw_device_data1sn = no diff --git a/qemu/tests/cfg/blockdev_mirror_vm_reboot.cfg b/qemu/tests/cfg/blockdev_mirror_vm_reboot.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f5e018ca9b86f223d9d32b54aabe9071d42f47cb --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_vm_reboot.cfg @@ -0,0 +1,33 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Basic block mirror during vm reboot -- only system disk +# The mirror image is a local image(filesystem) + +- blockdev_mirror_vm_reboot: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_vm_reboot + virt_test_type = qemu + source_images = image1 + target_images = mirror1 + image_create_support_image1 = no + backup_options_image1 = sync + parallel_tests = reboot_vm + sync = full + storage_pools = default + storage_pool = default + + image_format_mirror1 = qcow2 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_mirror_vm_stop_cont.cfg b/qemu/tests/cfg/blockdev_mirror_vm_stop_cont.cfg new file mode 100644 index 0000000000000000000000000000000000000000..e9c3564723e78ace1ab87b99e22e28768967d6f2 --- /dev/null +++ b/qemu/tests/cfg/blockdev_mirror_vm_stop_cont.cfg @@ -0,0 +1,35 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Basic block mirror during vm stop_cont -- only system disk +# The mirror image is a local image(filesystem) + +- blockdev_mirror_vm_stop_cont: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = no + qemu_force_use_drive_expression = no + type = blockdev_mirror_vm_stop_cont + virt_test_type = qemu + source_images = image1 + target_images = mirror1 + image_create_support_image1 = no + backup_options_image1 = sync speed + parallel_tests = stop_cont_vm + sync = full + speed_image1 = 90000000 + vm_stop_time_list = 1 5 10 + storage_pools = default + storage_pool = default + + image_format_mirror1 = qcow2 + image_name_mirror1 = mirror1 + rebase_mode = unsafe + + # For local mirror images + storage_type_default = directory + enable_iscsi_mirror1 = no + enable_ceph_mirror1 = no + enable_gluster_mirror1 = no + enable_nbd_mirror1 = no + image_raw_device_mirror1 = no diff --git a/qemu/tests/cfg/blockdev_snapshot.cfg b/qemu/tests/cfg/blockdev_snapshot.cfg index e0f808422db9d164cf2a12f236b0321d6a0c749d..e7f8bf4cc4cddae46cf411ea2074b61bb4c45f37 100644 --- a/qemu/tests/cfg/blockdev_snapshot.cfg +++ b/qemu/tests/cfg/blockdev_snapshot.cfg @@ -17,6 +17,8 @@ image_format_sn1 = qcow2 device = "drive_data" format = qcow2 + rebase_mode = unsafe + only Linux #mode = "absolute-paths" Host_RHEL.m8: node = "drive_data" diff --git a/qemu/tests/cfg/blockdev_snapshot_install.cfg b/qemu/tests/cfg/blockdev_snapshot_install.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c541268d8f7cf05de8b7a6cdfa578a9c1a27f19d --- /dev/null +++ b/qemu/tests/cfg/blockdev_snapshot_install.cfg @@ -0,0 +1,32 @@ +- blockdev_snapshot_install: + type = blockdev_snapshot_install + virt_test_type = qemu + need_install = yes + start_vm = no + image_name_image1 = "images/snapshot_install" + medium = cdrom + installation = cdrom + kernel = vmlinuz + initrd = initrd.img + force_create_image_image1 = yes + image_aio = threads + unattended_delivery_method = cdrom + cdroms += " unattended" + index_enable = no + kill_vm = yes + tag_for_install_start = "Starting Login Service|Starting Update is Completed" + storage_type_default = "directory" + storage_pool = default + snapshot_tag = sn1 + image_format_sn1 = qcow2 + image_name_sn1 = images/sn1 + device = "drive_image1" + base_tag = "image1" + rebase_mode = unsafe + only Linux + node = "drive_image1" + overlay = "drive_sn1" + qemu_force_use_drive_expression = no + no RHEL.5 RHEL.6 RHEL.7 RHEL.8.0 RHEL8.1 + aarch64: + restore_aavmf_vars = yes diff --git a/qemu/tests/cfg/blockdev_snapshot_multi_disks.cfg b/qemu/tests/cfg/blockdev_snapshot_multi_disks.cfg index 58cd11089a6f72f677a43cc2a3b9ec881d565a5d..ca4a0315fbe5df45a9c5493dc731f2d82f0721f0 100644 --- a/qemu/tests/cfg/blockdev_snapshot_multi_disks.cfg +++ b/qemu/tests/cfg/blockdev_snapshot_multi_disks.cfg @@ -27,3 +27,4 @@ image_format_sn1 = qcow2 image_name_sn1 = images/sn1 image_name_sn2 = images/sn2 + only Linux diff --git a/qemu/tests/cfg/blockdev_snapshot_reboot.cfg b/qemu/tests/cfg/blockdev_snapshot_reboot.cfg index f50483787dc3bc9232cd6d40adceefc3ccd930bb..7b57ca1a104c5b7e4ac0e111581c393fa4ac7ff8 100644 --- a/qemu/tests/cfg/blockdev_snapshot_reboot.cfg +++ b/qemu/tests/cfg/blockdev_snapshot_reboot.cfg @@ -10,6 +10,7 @@ device = "drive_image1" base_tag = "image1" rebase_mode = unsafe + only Linux Host_RHEL.m8: node = "drive_image1" overlay = "drive_sn1" diff --git a/qemu/tests/cfg/blockdev_snapshot_stop_cont.cfg b/qemu/tests/cfg/blockdev_snapshot_stop_cont.cfg index c9179aeaa86116aa9bb9a5ef21ad5344c13a624d..c4f77ad3ee659e090a4c91fa8490c2284d4bde46 100644 --- a/qemu/tests/cfg/blockdev_snapshot_stop_cont.cfg +++ b/qemu/tests/cfg/blockdev_snapshot_stop_cont.cfg @@ -10,6 +10,7 @@ device = "drive_image1" base_tag = "image1" rebase_mode = unsafe + only Linux Host_RHEL.m8: node = "drive_image1" overlay = "drive_sn1" diff --git a/qemu/tests/cfg/blockdev_snapshot_stress.cfg b/qemu/tests/cfg/blockdev_snapshot_stress.cfg index 2d77781b1807c75d97d48b81dcd52c9ea0342446..7938ce8df30c0ffd60f42d465850441f54ee5d73 100644 --- a/qemu/tests/cfg/blockdev_snapshot_stress.cfg +++ b/qemu/tests/cfg/blockdev_snapshot_stress.cfg @@ -10,6 +10,7 @@ device = "drive_image1" base_tag = "image1" rebase_mode = unsafe + only Linux Host_RHEL.m8: node = "drive_image1" overlay = "drive_sn1" diff --git a/qemu/tests/cfg/blockdev_stream.cfg b/qemu/tests/cfg/blockdev_stream.cfg index 895d67c89cc80d09844a8e5efa1113f040769c65..45a528f57f35ac2d60fd64c057e6f66283ad6f33 100644 --- a/qemu/tests/cfg/blockdev_stream.cfg +++ b/qemu/tests/cfg/blockdev_stream.cfg @@ -17,6 +17,8 @@ image_format_sn1 = qcow2 device = "drive_data" format = qcow2 + rebase_mode = unsafe + only Linux #mode = "absolute-paths" Host_RHEL.m8: node = "drive_data" diff --git a/qemu/tests/cfg/blockdev_stream_install.cfg b/qemu/tests/cfg/blockdev_stream_install.cfg new file mode 100644 index 0000000000000000000000000000000000000000..277332d9b4ff2fa24d55fef7b33870e34d98284d --- /dev/null +++ b/qemu/tests/cfg/blockdev_stream_install.cfg @@ -0,0 +1,79 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Block stream test with vm installation +# The snapshot image is a local filesystem image + +- blockdev_stream_install: + only Linux + no RHEL.5 RHEL.6 RHEL.7 RHEL.8.1 + only filesystem iscsi_direct ceph nbd gluster_direct + type = blockdev_stream_install + qemu_force_use_drive_expression = no + need_install = yes + start_vm = no + kill_vm = yes + virt_test_type = qemu + force_create_image_image1 = yes + medium = cdrom + installation = cdrom + kernel = vmlinuz + initrd = initrd.img + image_aio = threads + unattended_delivery_method = cdrom + cdroms += " unattended" + index_enable = no + shutdown_cleanly = no + storage_pools = default + storage_pool = default + aarch64: + restore_aavmf_vars = yes + + base_tag = image1 + node = "drive_image1" + snapshot_tag = datasn + image_format_datasn = qcow2 + image_name_datasn = datasn + + tag_for_install_start = "Starting Login Service|Starting Update is Completed" + rebase_mode = unsafe + + # Always access cd1 and unattended as local storage + enable_nbd_cd1 = no + enable_nbd_unattended = no + enable_gluster_cd1 = no + enable_gluster_unattended = no + enable_ceph_cd1 = no + enable_ceph_unattended = no + enable_iscsi_cd1 = no + enable_iscsi_unattended = no + + # Install VM on a new image, which can avoid the + # original system image crash when hitting a failure + image_name_image1 = "images/system_image" + iscsi_direct: + lun_image1 = 1 + image_size_datasn = 21G + nbd: + force_create_image_image1 = no + qcow2: + nbd_port_image1 = 10820 + raw: + nbd_port_image1 = 10819 + luks: + nbd_port_image1 = 10821 + image_secret_image1 = redhat + + # For local mirror images + storage_type_default = directory + enable_iscsi_datasn = no + enable_ceph_datasn = no + enable_gluster_datasn = no + enable_nbd_datasn = no + image_raw_device_datasn = no + + variants: + # Share the defined installation configurations + - @with_installation: + variants: + - @extra_cdrom_ks: diff --git a/qemu/tests/cfg/blockdev_stream_multiple_blocks.cfg b/qemu/tests/cfg/blockdev_stream_multiple_blocks.cfg new file mode 100644 index 0000000000000000000000000000000000000000..e69da78101aa3769c36506734e0a4353d95729a3 --- /dev/null +++ b/qemu/tests/cfg/blockdev_stream_multiple_blocks.cfg @@ -0,0 +1,77 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Multiple block stream simultaneously +# The snapshot images are local fs images + + +- blockdev_stream_multiple_blocks: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + qemu_force_use_drive_expression = no + type = blockdev_stream_multiple_blocks + virt_test_type = qemu + start_vm = yes + kill_vm = yes + images += " data1 data2" + source_images = "data1 data2" + snapshot_images = "data1sn data2sn" + image_backup_chain_data1 = data1sn + image_backup_chain_data2 = data2sn + node = drive_data1 + base_tag = data1 + snapshot_tag = data1sn + remove_image_data1 = yes + remove_image_data2 = yes + force_create_image_data1 = yes + force_create_image_data2 = yes + tempfile_size = 500M + parallel_tests = do_block_stream_on_another_image + storage_pools = default + storage_pool = default + rebase_mode = unsafe + + image_size_data1 = 2G + image_size_data2 = 2G + image_size_data1sn = ${image_size_data1} + image_size_data2sn = ${image_size_data2} + + image_format_data1 = qcow2 + image_format_data2 = qcow2 + image_format_data1sn = qcow2 + image_format_data2sn = qcow2 + + image_name_data1 = data1 + image_name_data2 = data2 + image_name_data1sn = data1sn + image_name_data2sn = data2sn + + nbd: + force_create_image_data1 = no + force_create_image_data2 = no + nbd_port_data1 = 10822 + nbd_port_data2 = 10823 + image_create_support_data1 = no + image_create_support_data2 = no + iscsi_direct: + lun_data1 = 1 + lun_data2 = 2 + + blk_extra_params_data1 = "serial=DATA_DISK1" + blk_extra_params_data2 = "serial=DATA_DISK2" + Host_RHEL.m6..ide: + blk_extra_params_data1 = "wwn=0x5000123456789abc" + blk_extra_params_data2 = "wwn=0x5000123456789cba" + + # For local snapshot images + storage_type_default = directory + enable_iscsi_data1sn = no + enable_iscsi_data2sn = no + enable_ceph_data1sn = no + enable_ceph_data2sn = no + enable_gluster_data1sn = no + enable_gluster_data2sn = no + enable_nbd_data1sn = no + enable_nbd_data2sn = no + image_raw_device_data1sn = no + image_raw_device_data2sn = no diff --git a/qemu/tests/cfg/blockdev_stream_stress.cfg b/qemu/tests/cfg/blockdev_stream_stress.cfg new file mode 100644 index 0000000000000000000000000000000000000000..7d27d99892edf80cafe3dfec4e0e7ed3babefe92 --- /dev/null +++ b/qemu/tests/cfg/blockdev_stream_stress.cfg @@ -0,0 +1,33 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Basic block stream test with stress +# The snapshot image is a local image(filesystem) + +- blockdev_stream_stress: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + start_vm = yes + kill_vm = yes + qemu_force_use_drive_expression = no + type = blockdev_stream_stress + virt_test_type = qemu + stress_args = --cpu 2 --vm 2 --io 2 --vm-bytes 50M + base_tag = image1 + node = drive_image1 + snapshot_tag = image1sn + block_stream_timeout = 1200 + storage_pools = default + storage_pool = default + rebase_mode = unsafe + + image_name_image1sn = image1sn + image_format_image1sn = qcow2 + + # For the local snapshot image + storage_type_default = directory + enable_iscsi_image1sn = no + enable_ceph_image1sn = no + enable_gluster_image1sn = no + enable_nbd_image1sn = no + image_raw_device_image1sn = no diff --git a/qemu/tests/cfg/blockdev_stream_subchain.cfg b/qemu/tests/cfg/blockdev_stream_subchain.cfg new file mode 100644 index 0000000000000000000000000000000000000000..865fe75fb5fc5700edd662e32504662bf0a7249b --- /dev/null +++ b/qemu/tests/cfg/blockdev_stream_subchain.cfg @@ -0,0 +1,68 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Basic block stream test based on an existed snapshot +# data -> datasn1 -> datasn2 -> datasn3 +# The snapshot images are local fs images +# block-strem: {device: datasn3, base-node: datasn1} + + +- blockdev_stream_subchain: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + qemu_force_use_drive_expression = no + type = blockdev_stream_subchain + virt_test_type = qemu + start_vm = yes + kill_vm = yes + images += " data" + base_tag = data + node = drive_data + remove_image_data = yes + force_create_image_data = yes + snapshot_images = "datasn1 datasn2 datasn3" + snapshot_tag = datasn3 + base_node_tag = datasn1 + base_node = drive_datasn1 + tempfile_size = 200M + storage_pools = default + storage_pool = default + rebase_mode = unsafe + + image_size_data = 2G + image_size_datasn1 = ${image_size_data} + image_size_datasn2 = ${image_size_data} + image_size_datasn3 = ${image_size_data} + image_name_data = data + image_name_datasn1 = datasn1 + image_name_datasn2 = datasn2 + image_name_datasn3 = datasn3 + image_format_data = qcow2 + image_format_datasn1 = qcow2 + image_format_datasn2 = qcow2 + image_format_datasn3 = qcow2 + + gluster_direct: + enable_gluster_data = yes + ceph: + enable_ceph_data = yes + nbd: + enable_nbd_data = yes + nbd_port_data = 10822 + iscsi_direct: + lun_data = 1 + enable_iscsi_data = yes + image_raw_device_data = yes + + # For the local snapshot images + storage_type_default = directory + enable_iscsi_datasn3 = no + enable_iscsi_datasn1 = no + enable_ceph_datasn3 = no + enable_ceph_datasn1 = no + enable_gluster_datasn3 = no + enable_gluster_datasn1 = no + enable_nbd_datasn3 = no + enable_nbd_datasn1 = no + image_raw_device_datasn3 = no + image_raw_device_datasn1 = no diff --git a/qemu/tests/cfg/blockdev_stream_vm_reboot.cfg b/qemu/tests/cfg/blockdev_stream_vm_reboot.cfg new file mode 100644 index 0000000000000000000000000000000000000000..05f12a60efb8ee0345e3a0418b61f9f0356a3da0 --- /dev/null +++ b/qemu/tests/cfg/blockdev_stream_vm_reboot.cfg @@ -0,0 +1,33 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# block stream during vm reboot +# The snapshot image is a local image(filesystem) + +- blockdev_stream_vm_reboot: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + qemu_force_use_drive_expression = no + virt_test_type = qemu + type = blockdev_stream_vm_reboot + start_vm = yes + kill_vm = yes + base_tag = image1 + node = drive_image1 + snapshot_tag = image1sn + parallel_tests = reboot_vm + storage_pools = default + storage_pool = default + block_stream_timeout = 1200 + rebase_mode = unsafe + + image_name_image1sn = image1sn + image_format_image1sn = qcow2 + + # For the local snapshot image + storage_type_default = directory + enable_iscsi_image1sn = no + enable_ceph_image1sn = no + enable_gluster_image1sn = no + enable_nbd_image1sn = no + image_raw_device_image1sn = no diff --git a/qemu/tests/cfg/blockdev_stream_vm_stop_cont.cfg b/qemu/tests/cfg/blockdev_stream_vm_stop_cont.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c0bd74b73986087adbb0f957af2b4602054d29d9 --- /dev/null +++ b/qemu/tests/cfg/blockdev_stream_vm_stop_cont.cfg @@ -0,0 +1,34 @@ +# Storage backends: +# filesystem, iscsi_direct, ceph, nbd, gluster_direct +# The following testing scenario is covered: +# Basic block stream test during vm stop and cont +# The snapshot image is a local image(filesystem) + +- blockdev_stream_vm_stop_cont: + only Linux + only filesystem iscsi_direct ceph nbd gluster_direct + qemu_force_use_drive_expression = no + type = blockdev_stream_vm_stop_cont + virt_test_type = qemu + start_vm = yes + kill_vm = yes + base_tag = image1 + node = drive_image1 + snapshot_tag = image1sn + parallel_tests = stop_cont_vm + vm_stop_time_list = 1 5 10 + block_stream_timeout = 1200 + storage_pools = default + storage_pool = default + rebase_mode = unsafe + + image_name_image1sn = image1sn + image_format_image1sn = qcow2 + + # For the local snapshot image + storage_type_default = directory + enable_iscsi_image1sn = no + enable_ceph_image1sn = no + enable_gluster_image1sn = no + enable_nbd_image1sn = no + image_raw_device_image1sn = no diff --git a/qemu/tests/cfg/boot_e1000e_with_cpu_flag.cfg b/qemu/tests/cfg/boot_e1000e_with_cpu_flag.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6c135872293be237d297dbbc3b231a4517031792 --- /dev/null +++ b/qemu/tests/cfg/boot_e1000e_with_cpu_flag.cfg @@ -0,0 +1,9 @@ +- boot_e1000e_with_cpu_flag: + only q35 + only e1000e + only Windows + type = boot_e1000e_with_cpu_flag + cpu_model_flags += ",hv_vendor_id=KVMKVMKVM" + bcdedit_debug = "bcdedit /debug on" + bcdedit_cmd = "bcdedit /dbgsettings net hostip:192.168.0.12 port:50000 key:this.key.isnt.secure" + ext_host = "www.redhat.com" diff --git a/qemu/tests/cfg/boot_nic_with_intel_iommu.cfg b/qemu/tests/cfg/boot_nic_with_iommu.cfg similarity index 32% rename from qemu/tests/cfg/boot_nic_with_intel_iommu.cfg rename to qemu/tests/cfg/boot_nic_with_iommu.cfg index 1765be17905d912e39f079b8f9b5254d7d0acc34..43df26d8a47774faa772abc8d09dde2117fcfc76 100644 --- a/qemu/tests/cfg/boot_nic_with_intel_iommu.cfg +++ b/qemu/tests/cfg/boot_nic_with_iommu.cfg @@ -1,15 +1,17 @@ -- boot_nic_with_intel_iommu: - only q35 +- boot_nic_with_iommu: + x86_64, i386: + only q35 + no WinXP WinVista Win7 Win8 Win8.1 Win2003 + no Win2008 Win2008..r2 Win2012 Win2012..r2 + machine_type_extra_params = "kernel-irqchip=split" + HostCpuVendor.intel: + intel_iommu = yes + Linux: + enable_guest_iommu = yes only virtio_net - no WinXP WinVista Win7 Win8 Win8.1 Win2003 - no Win2008 Win2008..r2 Win2012 Win2012..r2 - type = boot_nic_with_intel_iommu - extra_params = "-device intel-iommu,intremap=on,device-iotlb=on" - machine_type_extra_params = "kernel-irqchip=split" + type = boot_nic_with_iommu virtio_dev_disable_legacy = on virtio_dev_disable_modern = off virtio_dev_iommu_platform = on virtio_dev_ats = on vhostforce = on - Linux: - enable_guest_iommu = yes diff --git a/qemu/tests/cfg/boot_order_check.cfg b/qemu/tests/cfg/boot_order_check.cfg index bee4b6b0365679fd1aa9d8d944b50c03ddb43bfc..49caf092640c6ef7a3845623a6475ec0af8fee25 100644 --- a/qemu/tests/cfg/boot_order_check.cfg +++ b/qemu/tests/cfg/boot_order_check.cfg @@ -6,6 +6,7 @@ kill_vm = yes boot_menu = on enable_sga = yes + devices_load_timeout = 10 # we have QEMU machine with three NICs (virtio, e1000, rtl8139) # and two disks (default, IDE). firmware should try to boot from the bootindex=1 # first. If this fails, it should try the bootindex=2 next, and so on. @@ -26,6 +27,10 @@ force_create_image_stg2 = yes remove_image_stg2 = yes nic_addr_filter = "%s.*?Bus\s+(\d+),\s+device\s+(\d+),\s+function\s+(\d+)" + # The default cdroms, which obtain bootindex automatically, would cause + # bootindex conflict in this case, so cleanup the default cdroms that are + # redundant for this case. + cdroms = '' variants: - bootorder0: # Some firmware has limitations on which devices can be considered for diff --git a/qemu/tests/cfg/cache_sizes_test.cfg b/qemu/tests/cfg/cache_sizes_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9a2abae04f8c24bfd20728be96fc84c7515010e8 --- /dev/null +++ b/qemu/tests/cfg/cache_sizes_test.cfg @@ -0,0 +1,17 @@ +- cache_sizes_test: + only qcow2 + virt_test_type = qemu + type = cache_sizes_test + start_vm = no + kill_vm = yes + force_create_image = no + tmp_dir = /var/tmp + tmp_file_name = ${tmp_dir}/testfile + guest_file_name = ${tmp_file_name} + cache_sizes = "0 2 1024 65525 65527" + Windows: + guest_file_name = C:\testfile + x86_64: + sync_bin = WIN_UTILS:\Sync\sync64.exe /accepteula + i386, i686: + sync_bin = WIN_UTILS:\Sync\sync.exe /accepteula diff --git a/qemu/tests/cfg/cdrom_block_size_check.cfg b/qemu/tests/cfg/cdrom_block_size_check.cfg index 617c6e541b78e464dded95a177702390918682de..7022bd4c4f83313c950888d8fe46d7b130e96aca 100644 --- a/qemu/tests/cfg/cdrom_block_size_check.cfg +++ b/qemu/tests/cfg/cdrom_block_size_check.cfg @@ -3,6 +3,13 @@ start_vm = no test_cdroms = none cdrom_without_file = yes + excepted_qmp_err = "Device '${test_cdroms}' is locked and force was not specified, " + excepted_qmp_err += "wait for tray to open and try again" + virtio_scsi: + # disable iothread + iothread_scheme ?= + iothreads ?= + image_iothread ?= Linux: check_cdrom_size_cmd = cat /sys/block/sr0/size mount_cdrom_cmd = "mount %s %s" diff --git a/qemu/tests/cfg/cdrom_test.cfg b/qemu/tests/cfg/cdrom_test.cfg index 429a655c898ce8783315ae6e2e9218b4f9e5f8c5..41f453b0cc96128bd1aadfce7a3d6c250ec76712 100644 --- a/qemu/tests/cfg/cdrom_test.cfg +++ b/qemu/tests/cfg/cdrom_test.cfg @@ -9,6 +9,11 @@ tray_check_src = tray_open.py # wait before eject $cdrom (let OS initialize cdrom ...) workaround_eject_time = 5 + virtio_scsi: + # disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= variants: - cdrom_default: cdrom_test_eject = yes diff --git a/qemu/tests/cfg/cgroup.cfg b/qemu/tests/cfg/cgroup.cfg index 78c36e0957663e241ab1b577d6cab6e0d50fe31b..d66ce8a87b1169230f366db608d04801b0aff259 100644 --- a/qemu/tests/cfg/cgroup.cfg +++ b/qemu/tests/cfg/cgroup.cfg @@ -64,6 +64,10 @@ # cgroup_use_half_smp, cgroup_test_time, cgroup_limit, cgroup_cpuset, cgroup_verify cgroup_use_half_smp = 'yes' cgroup_test_time = 10 + ##8.3 will cpu system time. prior to 8.3 will use cpu user time + cpu_time_type = 2 + Host_RHEL.m6,Host_RHEL.m7,Host_RHEL.m8.u0,Host_RHEL.m8.u1,Host_RHEL.m8.u2: + cpu_time_type = 0 # cgroup_cpuset = [[None, '0,3', '1', '2', '1-2'], # cgroup_cpuset += [None, '0', '1', '0-1', '0-1']] # cgroup_verify = [[50, 100, 100, 50], [100, 100, 5, 5]] diff --git a/qemu/tests/cfg/change_media.cfg b/qemu/tests/cfg/change_media.cfg index b4b75319abd833965bfe43ca9905259b90521376..994856d9a5d07c8b74198791477d1f953b802467 100644 --- a/qemu/tests/cfg/change_media.cfg +++ b/qemu/tests/cfg/change_media.cfg @@ -8,6 +8,11 @@ orig_img_name = /tmp/orig.iso new_img_name = /tmp/new.iso cdrom_cd1 = /tmp/orig.iso + virtio_scsi: + # disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= Linux: cd_mount_cmd = mount %s /mnt cd_umount_cmd = umount /mnt diff --git a/qemu/tests/cfg/chardev_acpi.cfg b/qemu/tests/cfg/chardev_acpi.cfg new file mode 100644 index 0000000000000000000000000000000000000000..09d57d15069f9d70d1537b7355627fe9e4ea8e6b --- /dev/null +++ b/qemu/tests/cfg/chardev_acpi.cfg @@ -0,0 +1,8 @@ +- chardev_acpi: + only x86_64, i386 + only RHEL + type = chardev_acpi + serials += ' vs1 ' + chardev_backend_vs1 = tty + chardev_path_vs1 = /dev/ttyS0 + check_cmd = 'cat /sys/class/tty/ttyS0/device/resources' diff --git a/qemu/tests/cfg/chardev_free_port.cfg b/qemu/tests/cfg/chardev_free_port.cfg new file mode 100644 index 0000000000000000000000000000000000000000..160c791d3c9c1d83d58a6a3c71126c3d6723321b --- /dev/null +++ b/qemu/tests/cfg/chardev_free_port.cfg @@ -0,0 +1,8 @@ +- chardev_free_port: + only Linux + type = chardev_free_port + vms = 'vm0 vm1' + start_vm = no + image_snapshot = yes + extra_chardevs = "channel1" + chardev_backend_channel1 = "tcp_socket" diff --git a/qemu/tests/cfg/chardev_legacy_unplug.cfg b/qemu/tests/cfg/chardev_legacy_unplug.cfg new file mode 100644 index 0000000000000000000000000000000000000000..44ca23ad6792291e8eab362752caadd9d073fb10 --- /dev/null +++ b/qemu/tests/cfg/chardev_legacy_unplug.cfg @@ -0,0 +1,7 @@ +- chardev_legacy_unplug: + only RHEL + only x86_64 + type = chardev_legacy_unplug + serials += ' vs1 ' + serial_type_vs1 = isa-serial + start_vm = no diff --git a/qemu/tests/cfg/chardev_pci_serial_login.cfg b/qemu/tests/cfg/chardev_pci_serial_login.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9d77978e8081796930f3ddc71052cfa8d4cc71df --- /dev/null +++ b/qemu/tests/cfg/chardev_pci_serial_login.cfg @@ -0,0 +1,6 @@ +- chardev_pci_serial_login: + only RHEL + only x86_64 + type = chardev_serial_login + serial_type = pci-serial + start_vm = no diff --git a/qemu/tests/cfg/check_block_size.cfg b/qemu/tests/cfg/check_block_size.cfg index 8895abe3128c84720688b6651dbc950b8724b3d1..c2d82acab623e133f6ecf3d334db591735369c2f 100644 --- a/qemu/tests/cfg/check_block_size.cfg +++ b/qemu/tests/cfg/check_block_size.cfg @@ -17,6 +17,11 @@ chk_phy_blk_cmd = "cat /sys/block/%s/queue/physical_block_size" chk_log_blk_cmd = "cat /sys/block/%s/queue/logical_block_size" chk_blks_cmd_windows = "powershell "get-disk|format-list"" + virtio_scsi: + # disable iothread + iothread_scheme ?= + iothreads ?= + image_iothread ?= variants: - extra_cdrom_ks: cdroms += " unattended" @@ -36,6 +41,8 @@ restore_ovmf_vars = yes Windows: send_key_at_install = ret + aarch64: + restore_aavmf_vars = yes - base: only 4096_4096 variants: diff --git a/qemu/tests/cfg/cpu_device_hotplug_during_boot.cfg b/qemu/tests/cfg/cpu_device_hotplug_during_boot.cfg index 0a1a7c818e97a63e494dc3f603a05d4057c7f6ad..2a6445ba82694daf9acb1507f345886940119ad2 100644 --- a/qemu/tests/cfg/cpu_device_hotplug_during_boot.cfg +++ b/qemu/tests/cfg/cpu_device_hotplug_during_boot.cfg @@ -1,13 +1,17 @@ - cpu_device_hotplug_during_boot: cpu_device_hotpluggable required_qemu = [2.6.0, ) + ppc64, ppc64le: + required_qemu = [2.12.0, ) virt_test_type = qemu type = cpu_device_hotplug_during_boot qemu_sandbox = on vcpu_devices = vcpu1 only Linux + no aarch64 no ovmf no RHEL.6 variants: - @only_plug: - with_unplug: + no s390x unplug_during_boot = yes diff --git a/qemu/tests/cfg/cpu_device_hotplug_maximum.cfg b/qemu/tests/cfg/cpu_device_hotplug_maximum.cfg index 6910a5f222b0b4de6aea4f3a034785271fb8a126..b918e468eb251e9fb0689f7f46cd733fbab18aec 100644 --- a/qemu/tests/cfg/cpu_device_hotplug_maximum.cfg +++ b/qemu/tests/cfg/cpu_device_hotplug_maximum.cfg @@ -5,18 +5,22 @@ no RHEL.6 only x86_64 ppc64 ppc64le required_qemu = [2.6.0, ) + ppc64, ppc64le: + required_qemu = [2.12.0, ) start_vm = no qemu_sandbox = on - allow_pcpu_overcommit = yes + allow_pcpu_overcommit = no # Require long time to reboot if CPU overcommit reboot_timeout = 360 vcpu_maxcpus = 0 smp = 1 q35: machine_type_extra_params = "kernel-irqchip=split" - extra_params = "-device intel-iommu,intremap=on,eim=on" + intel_iommu = yes + virtio_dev_iommu_platform = on variants: - max_socket: + only Linux vcpu_sockets = 0 vcpu_cores = 1 vcpu_threads = 1 @@ -25,15 +29,20 @@ vcpu_cores = 0 vcpu_threads = 1 - max_thread: + only ppc64 ppc64le + smp = 8 vcpu_sockets = 1 - vcpu_cores = 1 - vcpu_threads = 0 - ppc64, ppc64le: - smp = 8 - vcpu_threads = 8 - vcpu_cores = 0 + vcpu_cores = 0 + vcpu_threads = 8 variants: - @default: - with_hugepages: hugepage = yes extra_params += " -mem-path /mnt/kvm_hugepage" + - offline_vcpu: + only Linux + ppc64, ppc64le: + only max_thread + ! ppc64, ppc64le: + only max_core + offline_vcpu_after_hotplug = yes diff --git a/qemu/tests/cfg/cpu_device_hotpluggable.cfg b/qemu/tests/cfg/cpu_device_hotpluggable.cfg index f38ae6c36102d37e7164766501337e3afc6049c0..fb398f311f549e5c5c4ae3d032c5e24accd709fa 100644 --- a/qemu/tests/cfg/cpu_device_hotpluggable.cfg +++ b/qemu/tests/cfg/cpu_device_hotpluggable.cfg @@ -1,13 +1,18 @@ # Notes: # For ppc64/ppc64le, please manually specify cpu_model in your environment - cpu_device_hotpluggable: install setup image_copy unattended_install.cdrom - only ppc64 ppc64le x86_64 + no aarch64 Windows: - no WinXP, WinVista, Win7, Win8, Win10, Win2000, Win2003 + # TBD: Please update the list when new supported Win OS comes out + only Win2008, Win2012, Win2016, Win2019 + Win2008, Win2012, s390x: + check_cpu_topology = no # ovmf does not support hotpluggable vCPU yet, this line will be removed # when it is fully supported. no ovmf required_qemu = [2.6.0, ) + ppc64, ppc64le: + required_qemu = [2.12.0, ) virt_test_type = qemu type = cpu_device_hotpluggable # Sleep for a while after vCPUs change, make guest stable @@ -35,6 +40,8 @@ hotplug: Linux: sub_test_after_migrate = reboot hotunplug + s390x: + sub_test_after_migrate = reboot Windows: sub_test_after_migrate = reboot hotunplug: @@ -44,6 +51,8 @@ pause_vm_before_hotplug = yes Linux: sub_test_type = pause_resume + s390x: + del sub_test_type - with_online_offline: only hotplug only Linux @@ -59,13 +68,24 @@ install_path = "C:\Program Files\JAM Software\HeavyLoad" install_cmd = "start /wait %s:\HeavyLoadSetup.exe /verysilent" - with_numa: + no s390x only hotplug only multi_vcpu type = cpu_device_hotpluggable_with_numa start_vm = no + mem_fixed = 4096 guest_numa_nodes = node0 node1 numa_nodeid_node0 = 0 numa_nodeid_node1 = 1 + mem_devs = "mem0 mem1" + size_mem0 = 2048M + size_mem1 = 2048M + use_mem_mem0 = "no" + use_mem_mem1 = "no" + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 + backend_mem_mem0 = memory-backend-ram + backend_mem_mem1 = memory-backend-ram variants: - single_vcpu: vcpu_devices = vcpu1 @@ -77,5 +97,6 @@ vcpu_enable = no - hotunplug: only Linux + no s390x hotpluggable_test = hotunplug vcpu_enable = yes diff --git a/qemu/tests/cfg/cpu_info_check.cfg b/qemu/tests/cfg/cpu_info_check.cfg new file mode 100644 index 0000000000000000000000000000000000000000..408d73d21188904344ff87e0d9650b0007e2bf2a --- /dev/null +++ b/qemu/tests/cfg/cpu_info_check.cfg @@ -0,0 +1,14 @@ +- cpu_info_check: + type = cpu_info_check + start_vm = no + remove_list = '' + cpu_model_2_12_0 = 'Skylake-Client Skylake-Server' + cpu_model_3_1_0 = 'Cascadelake-Server Icelake-Client Icelake-Server KnightsMill' + cpu_model_8_2 = 'Cascadelake-Server-noTSX Icelake-Client-noTSX Icelake-Server-noTSX' + cpu_model_8_2 += ' Skylake-Client-noTSX-IBRS Skylake-Server-noTSX-IBRS' + cpu_model_8_3 = 'Cooperlake EPYC-Rome' + cpu_model_8 = '${cpu_model_8_2} ${cpu_model_8_3}' + Host_RHEL.m8.u2, Host_RHEL.m8.u1, Host_RHEL.m8.u0: + remove_list = ${cpu_model_8_3} + Host_RHEL.m7, Host_RHEL.m6: + remove_list = ${cpu_model_8} diff --git a/qemu/tests/cfg/cpu_model_inter_generation.cfg b/qemu/tests/cfg/cpu_model_inter_generation.cfg new file mode 100644 index 0000000000000000000000000000000000000000..7d6df85aefc6aedea9c8db2cc6cb620bfbe6f468 --- /dev/null +++ b/qemu/tests/cfg/cpu_model_inter_generation.cfg @@ -0,0 +1,11 @@ +- cpu_model_inter_generation: + type = cpu_model_inter_generation + start_vm = no + variants: + - @default: + cpu_model_flags = '' + warning_text = "qemu-kvm: warning: host doesn't support requested feature" + - enforce: + boot_expected = no + cpu_model_flags = ',enforce' + warning_text = "qemu-kvm: Host doesn't support requested features" diff --git a/qemu/tests/cfg/cpu_model_negative.cfg b/qemu/tests/cfg/cpu_model_negative.cfg new file mode 100644 index 0000000000000000000000000000000000000000..abf322b75d070bbc2cc01df5e4ffd7d3d34257d8 --- /dev/null +++ b/qemu/tests/cfg/cpu_model_negative.cfg @@ -0,0 +1,15 @@ +- cpu_model_negative: + type = cpu_model_negative + start_vm = no + warning_msg = '' + variants: + - enforce: + enforce_flag = 'avx512_bf16' + wrong_cmd = "-cpu 'CPU_MODEL',+${enforce_flag},check,enforce" + warning_msg = "Property '.${enforce_flag}' not found" + - smp_lt_maxcpu: + wrong_cmd = "-smp 8,maxcpus=4,cores=2,threads=2,dies=1,sockets=2" + warning_msg = 'maxcpus must be equal to or greater than smp' + - smp_out_of_range: + wrong_cmd = "-machine MACHINE_TYPE -smp OUT_OF_RANGE" + warning_msg = "Invalid SMP CPUs SMP_VALUE. The max CPUs supported by machine 'MACHINE_TYPE' is MAX_VALUE" diff --git a/qemu/tests/cfg/cpu_offline_online.cfg b/qemu/tests/cfg/cpu_offline_online.cfg new file mode 100644 index 0000000000000000000000000000000000000000..aa5457c7f2a054d1bb6134202f65086820f27531 --- /dev/null +++ b/qemu/tests/cfg/cpu_offline_online.cfg @@ -0,0 +1,5 @@ +- cpu_offline_online: + virt_test_type = qemu + type = cpu_offline_online + start_vm = no + only Linux diff --git a/qemu/tests/cfg/cpu_rdrand.cfg b/qemu/tests/cfg/cpu_rdrand.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f8f6b68ac9cebe02a01668a0a4dcc296eea3f45e --- /dev/null +++ b/qemu/tests/cfg/cpu_rdrand.cfg @@ -0,0 +1,12 @@ +- cpu_rdrand: + type = cpu_rdrand + Windows: + test_bin = RdRand.exe + source_file = ${test_bin} + guest_path = C:\ + delete_cmd = "del /f ${test_bin}" + Linux: + test_bin = rdrand + source_file = ${test_bin}.c + guest_path = /var/tmp/ + delete_cmd = "rm -f ${test_bin} ${source_file}" diff --git a/qemu/tests/cfg/cpu_topology_test.cfg b/qemu/tests/cfg/cpu_topology_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0feafae178462cb7f87948b188673da66e5fa4f5 --- /dev/null +++ b/qemu/tests/cfg/cpu_topology_test.cfg @@ -0,0 +1,9 @@ +- cpu_topology_test: + no smp2 + no WinXP WinVista Win7 Win8 Win8.1 Win2000 Win2003 + no Win2008 Win2008..r2 Win2012 Win2012..r2 + type = cpu_topology_test + start_vm = no + Linux: + x86_64: + check_siblings_cmd = 'cat /proc/cpuinfo |grep siblings |uniq |cut -d ":" -f 2' diff --git a/qemu/tests/cfg/create_large_raw_img.cfg b/qemu/tests/cfg/create_large_raw_img.cfg index 95a8387837c3567796f7b9f1c298209c7550a297..84a2b0fb5bf205dd92cf749d7ae9c073d7fb99c4 100755 --- a/qemu/tests/cfg/create_large_raw_img.cfg +++ b/qemu/tests/cfg/create_large_raw_img.cfg @@ -11,7 +11,8 @@ - over_xfs: file_sys = "xfs" image_size_large = 10240000T - err_info = "Image size must be less than 8 EiB!" + err_info = "Image size must be less than 8 EiB!;" + err_info = '${err_info}Invalid image size specified. Must be between 0 and 9223372036854775807.' - over_ext4: file_sys = "ext4" image_size_large = 16T diff --git a/qemu/tests/cfg/device_option_check.cfg b/qemu/tests/cfg/device_option_check.cfg old mode 100644 new mode 100755 index 879de870d7a388d6aaea5a1890834f1eace24daf..ecf453023952f16310f6868332aac38318a62587 --- a/qemu/tests/cfg/device_option_check.cfg +++ b/qemu/tests/cfg/device_option_check.cfg @@ -48,6 +48,11 @@ blk_extra_params_image1 = wwn=0x5000c50015ea71ad qtree_check_value = image1 - wwn_cdrom: + virtio_scsi: + # disable iothread + iothread_scheme ?= + iothreads ?= + image_iothread ?= params_name = blk_extra_params_cd1 blk_extra_params_cd1 = wwn=0x5000c50015ea71ad qtree_check_value = cd1 @@ -74,6 +79,7 @@ Windows: cmd = WIN_UTILS:\hddsn.exe C: pattern = %s + sg_vpd_cmd = WIN_UTILS:\sg_vpd.exe -v --hex --page=0x80 C: check_in_qtree = yes qtree_check_keyword = id qtree_check_value = image1 diff --git a/qemu/tests/cfg/disk_extension.cfg b/qemu/tests/cfg/disk_extension.cfg index 46d1bf1fa1d208c8cf45a8cf3c411dbf75f9ab5a..dc4a8c264c8c46fe011c8ebe97a10f95c3a7abc5 100644 --- a/qemu/tests/cfg/disk_extension.cfg +++ b/qemu/tests/cfg/disk_extension.cfg @@ -7,7 +7,6 @@ drive_werror = stop drive_rerror = stop tmpfs_folder = "/tmp/xtmpfs" - loop_device = /dev/loop6 # Please keep consistent unit begin_size = 50M max_size = 500M @@ -29,7 +28,6 @@ remove_image_stg1 = no disk_serial = TARGET_DISK0 blk_extra_params_stg1 += "serial=${disk_serial}" - image_name_stg1 = ${loop_device} image_format_stg1 = qcow2 image_size_stg1 = ${max_size} Linux: diff --git a/qemu/tests/cfg/driver_load.cfg b/qemu/tests/cfg/driver_load.cfg index 809c6e91b55378e8abf1df996886797f8fa2f86b..4949681b1767fd4da4ef74a36a8f7685c341df86 100644 --- a/qemu/tests/cfg/driver_load.cfg +++ b/qemu/tests/cfg/driver_load.cfg @@ -3,6 +3,9 @@ kill_vm_on_error = yes login_timeout = 240 repeats = 1 + Windows: + backup_image_before_testing = yes + restore_image_after_testing = yes variants: - with_nic: no e1000 @@ -39,7 +42,7 @@ - with_block: drive_format_image1 = ide - pseries: + pseries, aarch64: drive_format_image1 = scsi-hd q35: drive_format_image1 = ahci @@ -56,7 +59,7 @@ - with_vioscsi: cd_format_fixed = ide drive_format_image1 = ide - pseries: + pseries, aarch64: cd_format_fixed = scsi-cd drive_format_image1 = virtio q35: diff --git a/qemu/tests/cfg/dump_guest_memory.cfg b/qemu/tests/cfg/dump_guest_memory.cfg new file mode 100644 index 0000000000000000000000000000000000000000..80a2741697894582d6ebc6c58f812cf1eb60b953 --- /dev/null +++ b/qemu/tests/cfg/dump_guest_memory.cfg @@ -0,0 +1,66 @@ +- dump_guest_memory: + type = dump_guest_memory + virt_test_type = qemu + no Windows + monitors = 'qmp1' + monitor_type_qmp1 = qmp + dump_file_timeout = 30 + dump_file = "/home/dump" + crash_script = "/home/crash.cmd" + x86_64: + extra_params = "-device vmcoreinfo" + variants: + - with_detach_params: + cmd_result_check = contain + check_dump = True + query_qmp_cmd = "query-dump" + query_cmd_return_value = "{'status': 'completed'}" + qmp_cmd = "dump-guest-memory detach=true, paging=false, protocol=file:${dump_file}" + cmd_return_value = "{}" + - verify_diff_format_dump_file: + cmd_result_check = contain + paging_false_dump_cmd = "dump-guest-memory paging=false, protocol=file:${dump_file}" + paging_true_dump_cmd = "dump-guest-memory paging=true, protocol=file:${dump_file}" + query_qmp_cmd = "query-dump" + query_cmd_return_value = "{'status': 'completed'}" + variants: + - default: + check_dump = True + qmp_cmd = ${paging_false_dump_cmd} + cmd_return_value = "{}" + - query_dump_guest_memory_capability: + check_dump = False + qmp_cmd = "query-dump-guest-memory-capability" + cmd_return_value = "{'formats': ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy", "win-dmp"]}" + - quux_format_dump: + check_dump = False + qmp_cmd = "${paging_false_dump_cmd}, format=quux" + cmd_return_value = "{'desc': "Invalid parameter 'quux'"}" + - elf_formate_dump: + check_dump = True + qmp_cmd = "${paging_false_dump_cmd}, format=elf" + cmd_return_value = "{}" + - zlib_formate_dump: + check_dump = True + qmp_cmd = "${paging_false_dump_cmd}, format=kdump-zlib" + cmd_return_value = "{}" + - lzo_formate_dump: + check_dump = True + qmp_cmd = "${paging_false_dump_cmd}, format=kdump-lzo" + cmd_return_value = "{}" + - snappy_formate_dump: + check_dump = True + qmp_cmd = "${paging_false_dump_cmd}, format=kdump-snappy" + cmd_return_value = "{}" + - paging_true_snappy_format_dump: + check_dump = False + qmp_cmd = "${paging_true_dump_cmd}, format=kdump-snappy" + cmd_return_value = "{"desc": "kdump-compressed format doesn't support paging or filter"}" + - paging_true_zlib_format_dump: + check_dump = False + qmp_cmd = "${paging_true_dump_cmd}, format=kdump-zlib" + cmd_return_value = "{"desc": "kdump-compressed format doesn't support paging or filter"}" + - paging_true_lzo_format_dump: + check_dump = False + qmp_cmd = "${paging_true_dump_cmd}, format=kdump-lzo" + cmd_return_value = "{"desc": "kdump-compressed format doesn't support paging or filter"}" diff --git a/qemu/tests/cfg/eject_media.cfg b/qemu/tests/cfg/eject_media.cfg index a60f8ab9edaeb8223eed34fbc771d175e67ed067..a7aa58e7a9fb6465f9e2bb0d66a10446c22ae4b1 100644 --- a/qemu/tests/cfg/eject_media.cfg +++ b/qemu/tests/cfg/eject_media.cfg @@ -9,6 +9,11 @@ post_command += "rm -rf /tmp/orig.iso /tmp/new.iso /tmp/orig /tmp/new;" new_img_name = /tmp/new.iso cdrom_cd1 = /tmp/orig.iso + virtio_scsi: + # disable iothread + iothread_scheme ?= + iothreads ?= + image_iothread ?= variants: - force_eject: force_eject = yes diff --git a/qemu/tests/cfg/ept_test.cfg b/qemu/tests/cfg/ept_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..eec73924a159d18604dcdfd8bb90a94ebaf603a5 --- /dev/null +++ b/qemu/tests/cfg/ept_test.cfg @@ -0,0 +1,9 @@ +- ept_test: + virt_test_type = qemu + type = ept_test + start_vm = no + kill_vm_on_error = yes + login_timeout = 240 + unload_cmd = "modprobe -r kvm_intel" + load_cmd = "modprobe kvm_intel ept=%s" + read_cmd = "cat /sys/module/kvm_intel/parameters/%s" diff --git a/qemu/tests/cfg/fio_linux.cfg b/qemu/tests/cfg/fio_linux.cfg index 3a098752f5e3bec766695733203ee61f2be667bb..7b2e7964c8f723bc006c5ef1d78803dd61fef570 100644 --- a/qemu/tests/cfg/fio_linux.cfg +++ b/qemu/tests/cfg/fio_linux.cfg @@ -17,6 +17,10 @@ fio_options += '${fio_default_options} --rw=randrw' s390x: fio_install_timeout = 600 + nvme_direct: + fio_filename = '/home/test' + boot_drive_stg0 = no + force_create_image_stg0 = no variants: - aio_native: image_aio_stg0 = native diff --git a/qemu/tests/cfg/fio_windows.cfg b/qemu/tests/cfg/fio_windows.cfg index 4deaec5609068b89335b226a7de6e41dfa885520..b5b4d3aa2c46c88f6812ab25c402caf60e6584dd 100644 --- a/qemu/tests/cfg/fio_windows.cfg +++ b/qemu/tests/cfg/fio_windows.cfg @@ -9,6 +9,10 @@ disk_index = 1 disk_letter = I fio_file_name = "C\:\\fio_system_disk_test:I\:\\fio_data_disk_test" + nvme_direct: + fio_file_name = "C\:\\fio_system_disk_test + boot_drive_disk1 = no + force_create_image_disk1 = no fio_log_file = "C:\fio_log.txt" cmd_timeout = 300 variants: diff --git a/qemu/tests/cfg/guest_iommu_test.cfg b/qemu/tests/cfg/guest_iommu_test.cfg index b35594ccdb8bcc8d48027ce70eba974083d49e7c..5dcccb6e9d7ad5353ff6bb9a428ca308f890f83b 100644 --- a/qemu/tests/cfg/guest_iommu_test.cfg +++ b/qemu/tests/cfg/guest_iommu_test.cfg @@ -6,8 +6,8 @@ no WinXP WinVista Win7 Win8 Win8.1 Win2003 no Win2008 Win2008..r2 Win2012 Win2012..r2 no Host_RHEL.m7 - extra_params = "-device intel-iommu,device-iotlb=on,intremap=on" machine_type_extra_params = "kernel-irqchip=split" + intel_iommu = yes virtio_dev_iommu_platform = on virtio_dev_ats = on images += " stg0 stg1" diff --git a/qemu/tests/cfg/guest_memory_dump_analysis.cfg b/qemu/tests/cfg/guest_memory_dump_analysis.cfg deleted file mode 100644 index c9572d0c830557e26694424224c8a60c55132928..0000000000000000000000000000000000000000 --- a/qemu/tests/cfg/guest_memory_dump_analysis.cfg +++ /dev/null @@ -1,7 +0,0 @@ -- guest_memory_dump_analysis: install setup image_copy unattended_install.cdrom - only x86_64 - only JeOS, Fedora, RHEL - type = guest_memory_dump_analysis - mem = 4096 - monitors = qmp1 - monitor_type = qmp diff --git a/qemu/tests/cfg/hotplug_mem.cfg b/qemu/tests/cfg/hotplug_mem.cfg index 04c2b038ab485513bf37bc60e43ad6ea16101ffd..58396dd9fd7941ae70d5b370e3130e7897fbd38b 100644 --- a/qemu/tests/cfg/hotplug_mem.cfg +++ b/qemu/tests/cfg/hotplug_mem.cfg @@ -30,19 +30,26 @@ prealloc_mem = yes share_mem = yes discard-data = yes - only one + only two only no_policy + only pluged_memory - op_off: dump_mem = no merge_mem = no prealloc_mem = no share_mem = no discard-data = no - only one + only two only no_policy + only pluged_memory variants numa_nodes: - one: guest_numa_nodes = "node0" + mem_devs += " memN0" + use_mem_memN0 = "no" + size_mem_memN0 = 4096M + backend_mem_memN0 = memory-backend-ram + numa_memdev_node0 = mem-memN0 del numa_mem del numa_cpus del numa_nodeid @@ -53,7 +60,15 @@ del numa_mem del numa_cpus numa_nodeid = 0 - mem_devs += " mem2" + mem_devs += " mem2 memN0 memN1" + numa_memdev_node0 = mem-memN0 + numa_memdev_node1 = mem-memN1 + use_mem_memN0 = "no" + use_mem_memN1 = "no" + size_mem_memN0 = 2048M + size_mem_memN1 = 2048M + backend_mem_memN0 = memory-backend-ram + backend_mem_memN1 = memory-backend-ram node_dimm_mem2 = 0 node_dimm_mem1 = 1 numa_nodeid_node0 = 0 @@ -105,13 +120,18 @@ - buildin_memory: mem_devs += " buildin" target_mems = "buildin" + only pause_vm + only during - pluged_memory: plug_mem = "plug" target_mems = "plug" + no during.pause_vm - unused_memory: target_mems = "unused" mem_devs += " ${target_mems}" use_mem_unused = no + only guest_reboot + only after - hotplug: target_mems = "plug1 plug2" variants sub_test: @@ -119,11 +139,14 @@ sub_type = boot reboot_method = system_reset sleep_before_reset = 0 + no during + only backend_ram.policy_default backend_file.policy_bind - guest_reboot: sub_type = boot reboot_method = shell kill_vm_on_error = yes reboot_count = 1 + no during - guest_migration: sub_type = migration max_vms = 2 @@ -146,9 +169,10 @@ only after - pause_vm: sub_type = stop_continue - pause_time = 300 + pause_time = 10 wait_resume_timeout = "${pause_time}" - sub_test_wait_time = 60 + sub_test_wait_time = 10 + only backend_ram.policy_bind backend_file.policy_default - stress: sub_test_wait_time = 60 Windows: @@ -167,6 +191,7 @@ Linux: sub_type = linux_stress test_timeout = 1800 + only backend_ram.policy_default backend_file.policy_bind variants stage: - before: - after: diff --git a/qemu/tests/cfg/hotplug_mem_negative.cfg b/qemu/tests/cfg/hotplug_mem_negative.cfg new file mode 100644 index 0000000000000000000000000000000000000000..28f8bb677f457b73d3e44947779040d36937862b --- /dev/null +++ b/qemu/tests/cfg/hotplug_mem_negative.cfg @@ -0,0 +1,48 @@ +- hotplug_mem_negative: + type = hotplug_mem_negative + start_vm = no + mem_fixed = 4096 + slots_mem = 4 + size_mem = 1G + maxmem_mem = 32G + guest_numa_nodes = "node0" + mem_devs = "mem0" + numa_memdev_node0 = mem-mem0 + use_mem_mem0 = "no" + size_mem_mem0 = 4096M + backend_mem_mem0 = memory-backend-ram + ppc64,ppc64le: + threshold = 0.15 + policy_mem = default + target_mems = "plug1" + set_addr = no + variants: + - overcommit_host_mem: + size_mem = + maxmem_mem = 8T + keywords = "Cannot allocate memory" + - min_mem_size: + size_mem = 0G + keywords = "doesn\'t take value \'0\'" + - invalid_backend_ram_option: + policy_mem_plug1 = bind + keywords = "cannot bind memory to host NUMA nodes|host-nodes must be set for policy bind" + - invalid_numa_node: + node_dimm_plug1 = 2 + keywords = "DIMM property node has value 2" + - wrong_size: + size_mem = 3 + keywords = "memory size must be" + - invalid_device_addr: + set_addr = yes + target_mems = "plug0 plug1 plug2 plug3 plug4" + addr_dimm_plug0 = 0x2 + keywords_plug0 = "address must be aligned" + addr_dimm_plug1 = 0x200000 + keywords_plug1 = "can\'t add memory" + addr_dimm_plug2 = 0x150000000 + keywords_plug2 = "Hotplug memory successful|can\'t add memory" + addr_dimm_plug3 = 0x15000000000000 + keywords_plug3 = "can\'t add memory" + addr_dimm_plug4 = 0x150800000 + keywords_plug4 = "address range conflicts" diff --git a/qemu/tests/cfg/hotplug_unplug_during_io_repeat.cfg b/qemu/tests/cfg/hotplug_unplug_during_io_repeat.cfg index 99e95fd2ef5d2587234d88466b3c54e051cdd361..32de43b78671c17846769df555eca9a439954566 100644 --- a/qemu/tests/cfg/hotplug_unplug_during_io_repeat.cfg +++ b/qemu/tests/cfg/hotplug_unplug_during_io_repeat.cfg @@ -1,6 +1,7 @@ - hotplug_unplug_during_io_repeat: no RHEL.3.9 no ide + no spapr_vscsi virt_test_type = qemu type = hotplug_unplug_during_io_repeat images += " stg0" diff --git a/qemu/tests/cfg/hpt.cfg b/qemu/tests/cfg/hpt.cfg index 8626e3b9d9c2db47aa94987f4b3a057b562c4e73..0621652b27843265247f2b09722344fc7dd3eab8 100644 --- a/qemu/tests/cfg/hpt.cfg +++ b/qemu/tests/cfg/hpt.cfg @@ -6,6 +6,7 @@ no Host_RHEL.6 no RHEL.5 RHEL.6 only pseries + machine_type_extra_params = "max-cpu-compat=power8" variants: - increase_reboot: sub_type = "increase_reboot" @@ -25,3 +26,13 @@ - negative: sub_type = "negative" increment_sequence = "-20 -25 -30 -100 20 30 100" + - with_huge_page: + setup_hugepages = yes + extra_params += " -mem-path /mnt/kvm_hugepage" + variants: + - increase: + sub_type = "huge_page_increase" + increment_sequence = "1" + - reduce: + sub_type = "huge_page_reduce" + increment_sequence = "1 -1 1 -1" diff --git a/qemu/tests/cfg/hpt_max_page_size.cfg b/qemu/tests/cfg/hpt_max_page_size.cfg index a43a1ce1080d48f065b46244cae0c91eea04f487..7e0941e60a87faf5dc142cc1c481f7413dbee6e2 100644 --- a/qemu/tests/cfg/hpt_max_page_size.cfg +++ b/qemu/tests/cfg/hpt_max_page_size.cfg @@ -25,3 +25,4 @@ machine_type_extra_params += ",accel=tcg" disable_kvm = yes auto_cpu_model = yes + login_timeout = 1800 diff --git a/qemu/tests/cfg/hugepage_mem_stress.cfg b/qemu/tests/cfg/hugepage_mem_stress.cfg index 8985605516792a8a4da113f48e7d55ec7b70b36c..73ea5e7e19edcda2370532b8dd6ff14da71f7a97 100644 --- a/qemu/tests/cfg/hugepage_mem_stress.cfg +++ b/qemu/tests/cfg/hugepage_mem_stress.cfg @@ -3,10 +3,19 @@ virt_test_type = qemu kill_vm = yes setup_hugepages = yes - extra_params += " -mem-path /mnt/kvm_hugepage" Linux: del stress_args stress_custom_args = "--vm %d --vm-bytes 256M --timeout 30s" Windows: - install_path = "C:\Program Files\JAM Software\HeavyLoad" + x86_64: + install_path = "C:\Program Files (x86)\JAM Software\HeavyLoad" + i386,i686: + install_path = "C:\Program Files\JAM Software\HeavyLoad" install_cmd = "start /wait %s:\HeavyLoadSetup.exe /verysilent" + variants: + - @default: + extra_params += " -mem-path /mnt/kvm_hugepage" + - non_existent_mem_path: + start_vm = no + not_preprocess = yes + non_existent_point = yes diff --git a/qemu/tests/cfg/hugepage_specify_node.cfg b/qemu/tests/cfg/hugepage_specify_node.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f904ad48959a92dd4fd4e53e964b468a72ed6dc2 --- /dev/null +++ b/qemu/tests/cfg/hugepage_specify_node.cfg @@ -0,0 +1,9 @@ +- hugepage_specify_node: + type = hugepage_specify_node + virt_test_type = qemu + kill_vm = yes + start_vm = no + not_preprocess = yes + mem = 4096 + idle_node_mem = 1024 + extra_params += " -mem-path /mnt/kvm_hugepage" diff --git a/qemu/tests/cfg/hv_check_cpu_utilization.cfg b/qemu/tests/cfg/hv_check_cpu_utilization.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b724b2f9a27cc7bc0db9656ec2a3be90f4782906 --- /dev/null +++ b/qemu/tests/cfg/hv_check_cpu_utilization.cfg @@ -0,0 +1,24 @@ +- hv_check_cpu_utilization: + only Windows + type = hv_check_cpu_utilization + clone_master = yes + master_images_clone = image1 + remove_image_image1 =yes + nics = "" + cdroms = "" + cpu_model_flags = "hv_crash" + host_check_times = 900 + host_check_interval = 2 + serives_to_stop = wuauserv bits dosvc SysMain + service_check_cmd = sc query %s + service_stop_cmd = sc stop {0} & sc config {0} start=disabled + reg_cmd = reg add "HKLM\SYSTEM\CurrentControlSet\Services\SecurityHealthService" /v Start /d 4 /t REG_DWORD /f + host_check_cmd = top -H -p %s -n ${host_check_times} -d ${host_check_interval} -b > fixed-top-pc-result + vcpn_thread_pattern = r'thread_id.?[:|=]\s*(\d+)' + thread_process_cmd = "cat fixed-top-pc-result |grep %s|awk -F ' ' ' {print $9;}'|awk '{sum+=$1} END {print sum/NR}'" + thread_cpu_level = 5 + post_command = rm -f fixed-top-pc-result + variants: + - @default: + - with_migration: + do_migration = yes diff --git a/qemu/tests/cfg/hv_cpu_hotplug.cfg b/qemu/tests/cfg/hv_cpu_hotplug.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0c1ba9b4cc870430a9998cf5a623cac40fdfaf5f --- /dev/null +++ b/qemu/tests/cfg/hv_cpu_hotplug.cfg @@ -0,0 +1,13 @@ +- hv_cpu_hotplug: + type = cpu_device_hotpluggable + only Win2008, Win2012, Win2016, Win2019 + Win2008, Win2012: + check_cpu_topology = no + no ovmf + cpu_model_flags += hv_crash + login_timeout = 360 + sub_test_type = reboot + reboot_method = shell + vcpu_devices = vcpu1 vcpu2 vcpu3 vcpu4 + hotpluggable_test = hotplug + vcpu_enable = no diff --git a/qemu/tests/cfg/hv_crash.cfg b/qemu/tests/cfg/hv_crash.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d29a5e1e59b5e908dde95873c7ebe0788fac112b --- /dev/null +++ b/qemu/tests/cfg/hv_crash.cfg @@ -0,0 +1,11 @@ +- hv_crash: + only Windows + type = hv_crash + not_preprocess = yes + start_vm = no + enable_pvpanic = no + + hv_crash_flag = hv_crash + set_nmi_cmd = 'wmic class stdregprov call SetDwordValue hDefKey="&h80000002" ' + set_nmi_cmd += 'sSubKeyName="SYSTEM\CurrentControlSet\Control\CrashControl" ' + set_nmi_cmd += 'sValueName="NMICrashDump" uValue=1' diff --git a/qemu/tests/cfg/hv_kvm_unit_test.cfg b/qemu/tests/cfg/hv_kvm_unit_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..fffe74628aadacd6ab3fc8f57dca1eec77656647 --- /dev/null +++ b/qemu/tests/cfg/hv_kvm_unit_test.cfg @@ -0,0 +1,14 @@ +- hv_kvm_unit_test: + type = hv_kvm_unit_test + only Windows + only q35 + start_vm = no + cpu_model_flags += ",hv_crash" + compile_cmd = "cd %s && cp %s/kvm-unit-tests.tar.gz . && tar -xvzf kvm-unit-tests.tar.gz" + compile_cmd += " && cd kvm-unit-tests && ./configure && make" + test_cmd = "cd %s/kvm-unit-tests" + test_cmd += " && ./x86/run x86/%s -cpu %s -device hyperv-testdev -M q35" + + unit_tests_mapping = '{"hyperv_synic.flat": ["PASS", 1], "hyperv_stimer.flat": ["PASS", 6], "hyperv_connections.flat": ["PASS", 7]}' + Host_RHEL.m7, Host_RHEL.m8.u0, Host_RHEL.m8.u1, Host_RHEL.m8.u2: + skip_tests = "hyperv_connections.flat" diff --git a/qemu/tests/cfg/hv_reset.cfg b/qemu/tests/cfg/hv_reset.cfg new file mode 100644 index 0000000000000000000000000000000000000000..596e43d9fba46dbcabb2c936489b67c7133d7475 --- /dev/null +++ b/qemu/tests/cfg/hv_reset.cfg @@ -0,0 +1,4 @@ +- hv_reset: + only Windows + type = hv_reset + cpu_model_flags += hv_reset,hv_crash diff --git a/qemu/tests/cfg/hv_time.cfg b/qemu/tests/cfg/hv_time.cfg new file mode 100644 index 0000000000000000000000000000000000000000..33ab671619e156d390977972433d891eb482136b --- /dev/null +++ b/qemu/tests/cfg/hv_time.cfg @@ -0,0 +1,22 @@ +- hv_time: + only Windows + type = hv_time + clone_master = yes + master_images_clone = image1 + remove_image_image1 = yes + close_pltclk_cmd = bcdedit /set useplatformclock no + check_pltclk_cmd = bcdedit /enum all + copy_cmd = "copy /y WIN_UTILS:\hv_tools\%%PROCESSOR_ARCHITECTURE%%\%s c:\" + delete_cmd = "del /f c:\%s" + hv_time_flags = hv_time hv_stimer hv_stimer_direct + extra_params += " -no-hpet" + + i386: + executable_name = gettime_cycles-x86.exe + gettime_filenames = cyggcc_s-1.dll cygwin1.dll ${executable_name} + + x86_64: + executable_name = gettime_cycles.exe + gettime_filenames = cygwin1.dll ${executable_name} + + run_gettime_cmd = "c:\${executable_name}" diff --git a/qemu/tests/cfg/hv_tlbflush.cfg b/qemu/tests/cfg/hv_tlbflush.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a21d2c6798ef693c59c55c1d7870901a33761875 --- /dev/null +++ b/qemu/tests/cfg/hv_tlbflush.cfg @@ -0,0 +1,28 @@ +- hv_tlbflush: + type = hv_tlbflush + only Windows + no Host_RHEL.m7 + Host_RHEL.m8.u0, Host_RHEL.m8.u1, Host_RHEL.m8.u2: + only product_av + + cpu_model_flags += hv_crash,hv_reset + hv_flags_to_ignore = hv_tlbflush hv_vpindex hv_reset hv_ipi hv_synic hv_stimer hv_stimer_direct + + copy_tlbflush_cmd = "copy /y WIN_UTILS:\hv_tools\%%PROCESSOR_ARCHITECTURE%%\%s c:\" + delete_tlbflush_cmd = "del /f c:\%s" + i386: + executable_name = hv_tlbflush-x86.exe + tlbflush_filenames = cyggcc_s-1.dll cygwin1.dll ${executable_name} + x86_64: + executable_name = hv_tlbflush.exe + tlbflush_filenames = cygwin1.dll ${executable_name} + + test_file = "c:\tlb_flush_test.txt" + test_file_size = 1G + delete_test_file_cmd = "del /f ${test_file}" + create_test_file_cmd = "${delete_test_file_cmd} && fsutil file createnew ${test_file} %s" + + run_tlbflush_cmd = 'powershell -command "$m=0; for ($i=0; $i -lt 30; $i++) ' + run_tlbflush_cmd += '{ $s = Get-Date; c:\${executable_name} ${test_file}; $e=Get-Date; ' + run_tlbflush_cmd += '$t = $e - $s; $m=$m+$t; Write-Host $t }; Write-Host $m"' + run_tlbflush_timeout = 3600 diff --git a/qemu/tests/cfg/hv_vapic_test.cfg b/qemu/tests/cfg/hv_vapic_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..41c7ff7927110856ffb3fdace96da2340be59855 --- /dev/null +++ b/qemu/tests/cfg/hv_vapic_test.cfg @@ -0,0 +1,25 @@ +- hv_vapic_test: + type = hv_vapic_test + only Windows + start_vm = no + not_preprocess = yes + cpu_model_flags += hv_crash + hv_vapic_flag = hv_vapic + + timeout = 360 + tmpfs_image_name = disk1 + images += " ${tmpfs_image_name}" + image_size_disk1 = 2G + image_format_disk1 = raw + drive_format_disk1 = ide + q35: + drive_format_disk1 = ahci + drive_cache_disk1 = writethrough + image_boot_disk1 = no + image_boot_image1 = yes + + disk_name_key = image_name_disk1 + + fio_options = " --name=fio-rand-RW --filename=fio-rand-RW --directory=%s\:\ --rw=randwrite" + fio_options += " --bs=512B --direct=1 --numjobs=1 --time_based=1 --runtime=300 --size=1G --iodepth=1" + bw_search_reg = 'WRITE:\s+bw=(\d+)' diff --git a/qemu/tests/cfg/image_create_with_preallocation.cfg b/qemu/tests/cfg/image_create_with_preallocation.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6f9bfb19f78a8a04cf8738b4979f5c25c6fca3e4 --- /dev/null +++ b/qemu/tests/cfg/image_create_with_preallocation.cfg @@ -0,0 +1,23 @@ +- image_create_with_preallocation: + virt_test_type = qemu + type = image_create_with_preallocation + luks: + required_qemu = [4.1.0-18, ) + start_vm = no + create_image_stg = no + images = stg + image_size_stg = 2G + image_name_stg = "images/stg" + remove_image_stg = yes + actual_size = 2147483648 + variants: + - off: + preallocated_stg = off + - full: + preallocated_stg = full + - falloc: + preallocated_stg = falloc + trace_event = fallocate + - metadata: + only qcow2 + preallocated_stg = metadata diff --git a/qemu/tests/cfg/insert_media.cfg b/qemu/tests/cfg/insert_media.cfg new file mode 100644 index 0000000000000000000000000000000000000000..078eb32ec5c76d6f03ad92477c8c17e3ca0adba0 --- /dev/null +++ b/qemu/tests/cfg/insert_media.cfg @@ -0,0 +1,20 @@ +- insert_media: + only Linux + only virtio_scsi + type = insert_media + virt_test_type = qemu + kill_vm = yes + start_vm = no + not_preprocess = yes + monitor_type = qmp + pre_command = "dd if=/dev/zero of=/tmp/new bs=10M count=1 && " + pre_command += "mkisofs -o /tmp/new.iso /tmp/new" + post_command = "rm -rf /tmp/new.iso /tmp/new" + cdrom_cd1 = /tmp/new.iso + tray_move_event = DEVICE_TRAY_MOVED + paused_after_start_vm = yes + force_drive_format_cd1 = scsi-cd + # disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= diff --git a/qemu/tests/cfg/interrupt_check.cfg b/qemu/tests/cfg/interrupt_check.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a33511f13e54bb7ade33bed210700d381368d022 --- /dev/null +++ b/qemu/tests/cfg/interrupt_check.cfg @@ -0,0 +1,46 @@ +- interrupt_check: + virt_test_type = qemu + type = interrupt_check + only Linux + variants: + - disk_interrupt: + increase_test = dd + images += " stg" + image_name_stg = images/stg + image_size_stg = 1G + force_create_image_stg = yes + remove_image_stg = yes + dd_write = "dd if=/dev/zero of=/dev/%s bs=${image_size_stg} count=1" + dd_read = "dd if=/dev/%s of=/dev/null bs=${image_size_stg} count=1" + variants: + - scsi_device: + irq_pattern = "virtio.*-request" + drive_format_stg = scsi-hd + - usb_device: + irq_pattern = "xhci_hcd" + usbs = usb_xhci + usb_bus = "usb_xhci.0" + usb_type_usb_xhci = qemu-xhci + usb_controller = xhci + drive_format_stg = usb3 + - net_interrupt: + increase_test = ping + variants: + - virtio_device: + nic_model_nic1 = virtio + irq_pattern = "virtio[0-9]*-request" + - spapr_device: + only ppc64, ppc64le + nic_model_nic1 = spapr-vlan + irq_pattern = "{ifname}" + - event_interrupt: + only ppc64, ppc64le + variants: + - IPI: + increase_test = standby + irq_pattern = "IPI" + standby_time = 5 + - RAS_HOTPLUG: + increase_test = hotplug + vcpu_devices = vcpu1 + irq_pattern = "RAS_HOTPLUG" diff --git a/qemu/tests/cfg/invalid_cpu_device_hotplug.cfg b/qemu/tests/cfg/invalid_cpu_device_hotplug.cfg index 2c28223738577980946a3fae04916ed3e50e80ff..06d02dab7344c21425ea05d23b6d46fa66616900 100644 --- a/qemu/tests/cfg/invalid_cpu_device_hotplug.cfg +++ b/qemu/tests/cfg/invalid_cpu_device_hotplug.cfg @@ -1,8 +1,10 @@ - invalid_cpu_device_hotplug: - only x86_64 ppc64 ppc64le + no aarch64 virt_test_type = qemu type = invalid_cpu_device_hotplug required_qemu = [2.6.0, ) + ppc64, ppc64le: + required_qemu = [2.12.0, ) # ovmf does not support hotpluggable vCPU yet no ovmf no RHEL.6 @@ -16,8 +18,11 @@ error_desc = "core {0} already populated" x86_64: Windows: - no WinXP, WinVista, Win7, Win8, Win10, Win2000, Win2003 + # TBD: Please update the list when new supported Win OS comes out + only Win2008, Win2012, Win2016, Win2019 error_desc = "CPU\[{0}\] with APIC ID \d+ exists" + s390x: + error_desc = "Unable to add CPU with core-id: {0}, it already exists" - invalid_id: execute_test = invalid_vcpu ppc64, ppc64le: @@ -33,6 +38,9 @@ ppc64, ppc64le: error_desc = "invalid core id {0}" invalid_ids = 1 -1 -2 + s390x: + error_desc = "Parameter 'core-id' expects uint32_t" + invalid_ids = -1 - nr_threads: invalid_property = nr-threads only ppc64 ppc64le @@ -44,3 +52,5 @@ error_desc = "core id {0} out of range" x86_64: error_desc = "Invalid CPU {1}: {0} must be in range 0:{2}" + s390x: + error_desc = "Unable to add CPU with core-id: {0}, maximum core-id: {2}" diff --git a/qemu/tests/cfg/ioeventfd.cfg b/qemu/tests/cfg/ioeventfd.cfg index 76b89cd466f268bb4a2acc795dcc0dde05401615..0f30e6408bfdca18eac112a18e97d05ede23c1c1 100644 --- a/qemu/tests/cfg/ioeventfd.cfg +++ b/qemu/tests/cfg/ioeventfd.cfg @@ -5,6 +5,11 @@ start_vm = no orig_ioeventfd = "ioeventfd=off" new_ioeventfd = "ioeventfd=on" + virtio_scsi: + # explicitly disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= variants dev_type: - @block: only virtio_blk virtio_scsi diff --git a/qemu/tests/cfg/kvm_unit_test_nested.cfg b/qemu/tests/cfg/kvm_unit_test_nested.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a53235b59d753b9a2b4ac7112a9b40cf3cf07df9 --- /dev/null +++ b/qemu/tests/cfg/kvm_unit_test_nested.cfg @@ -0,0 +1,18 @@ +- kvm_unit_test_nested: + only x86_64 + type = kvm_unit_test_nested + start_vm = yes + vms = "vm1 vm2 vm3 vm4" + clone_master = yes + master_images_clone = image1 + remove_image_image1 = yes + kill_vm = yes + clone_cmd = "git clone --depth 1 -q https://gitlab.com/kvm-unit-tests/kvm-unit-tests.git %s" + compile_cmd = "cd %s && ./configure && make" + HostCpuVendor.amd: + test_cmd = "cd %s && ./x86/run ./x86/svm.flat -cpu host,+svm" + HostCpuVendor.intel: + test_cmd = "cd %s && ./x86/run ./x86/vmx.flat -cpu host,+vmx" + test_cmd += " -append '-exit_monitor_from_l2_test -ept_access* -vmx_smp* -vmx_vmcs_shadow_test -atomic_switch_overflow_msrs_test -vmx_init_signal_test -vmx_apic_passthrough_tpr_threshold_test'" + test_cmd += "|grep FAIL|grep -v XFAIL" + kvm_unit_test_timeout = 60 diff --git a/qemu/tests/cfg/live_backup_add_bitmap.cfg b/qemu/tests/cfg/live_backup_add_bitmap.cfg index 1ce5b310877b78bd087fda803bd2805308f327a8..71e4263f6263440b1abd65f0800445ad7fa78fdb 100644 --- a/qemu/tests/cfg/live_backup_add_bitmap.cfg +++ b/qemu/tests/cfg/live_backup_add_bitmap.cfg @@ -11,6 +11,7 @@ bitmaps = bitmap0 target_image_bitmap0 = stg shutdown_timeout = 360 + only Linux variants: - with_qcow2: image_format_stg = qcow2 diff --git a/qemu/tests/cfg/macvtap_guest_communicate.cfg b/qemu/tests/cfg/macvtap_guest_communicate.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c5d9f337591c95b1b99956f83cdcfb82f81e095f --- /dev/null +++ b/qemu/tests/cfg/macvtap_guest_communicate.cfg @@ -0,0 +1,23 @@ +- macvtap_guest_communicate: + virt_test_type = qemu + type = macvtap_guest_communicate + only macvtap + macvtap_mode = vepa + vms += " vm2" + clone_master = yes + master_images_clone = image1 + remove_image_image1 =yes + netperf_server_link = netperf-2.7.1.tar.bz2 + netperf_client_link = ${netperf_server_link} + server_path = /var/tmp/ + client_path = ${server_path} + netperf_test_duration = 120 + netperf_para_sessions = 1 + test_protocols = TCP_STREAM + netperf_output_unit = m + netperf_sizes = 1024 + Windows: + netperf_server_link = "netserver-2.6.0.exe" + netperf_client_link = "netperf.exe" + server_path = "c:\\" + client_path = "c:\\" diff --git a/qemu/tests/cfg/memhp_threads.cfg b/qemu/tests/cfg/memhp_threads.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1361d7c295bbdf787a63ad921ccc2523304ccaa9 --- /dev/null +++ b/qemu/tests/cfg/memhp_threads.cfg @@ -0,0 +1,29 @@ +- memhp_threads: + type = memhp_threads + required_qemu = [5.0, ) + paused_after_start_vm = yes + mem_fixed = 4096 + slots_mem = 4 + maxmem_mem = 32G + smp = 4 + guest_numa_nodes = "node0" + mem_devs = mem0 + use_mem_mem0 = "no" + size_mem_mem0 = 4096M + backend_mem_mem0 = memory-backend-ram + numa_memdev_node0 = mem-mem0 + no Host_RHEL.6 + no RHEL.5 + no Windows..i386 + no WinXP Win2000 Win2003 WinVista + ppc64,ppc64le: + threshold = 0.15 + target_mems = "plug" + size_mem_plug = 10G + mem-path_plug = /mnt/test.img + pre_command = "truncate -s ${size_mem_plug} ${mem-path_plug}" + post_command = "rm -rf ${mem-path_plug}" + backend_mem_plug = memory-backend-file + prealloc_mem_plug = yes + prealloc-threads_mem_plug = 4 + get_threads_cmd = "pstree -p %s | wc -l" diff --git a/qemu/tests/cfg/microcode_test.cfg b/qemu/tests/cfg/microcode_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..7c4369325af4eafa670c0954f76e902bae204c01 --- /dev/null +++ b/qemu/tests/cfg/microcode_test.cfg @@ -0,0 +1,9 @@ +- microcode_test: + virt_test_type = qemu + type = microcode_test + kill_vm_on_error = yes + only Linux + only x86_64, i386 + auto_cpu_model = no + cpu_model = host + get_microcode_cmd = "grep microcode /proc/cpuinfo | uniq" diff --git a/qemu/tests/cfg/migrate.cfg b/qemu/tests/cfg/migrate.cfg index 2d2f48d09d10f9d866b74af458fbafd1bbabafef..724d0e372868cffe14caefdf062954dfadced085 100644 --- a/qemu/tests/cfg/migrate.cfg +++ b/qemu/tests/cfg/migrate.cfg @@ -95,6 +95,8 @@ - with_reboot: iterations = 1 type = migration_with_reboot + # Disable force-go-down-check as it's not reliable with reboot + force_reset_go_down_check = none - after_vm_paused: only tcp only Linux @@ -206,9 +208,9 @@ # when using remote URL, pkg_md5sum must be setted. for example: # when using local file, support both relative path and absolute path # when using the relative path, the base dir is "shared/deps/". - #netperf_download_link = ftp://ftp.netperf.org/netperf/netperf-2.6.0.tar.bz2 + #netperf_download_link = ftp://ftp.netperf.org/netperf/netperf-2.7.1.tar.bz2 #pkg_md5sum = 9654ffdfd4c4f2c93ce3733cd9ed9236 - netperf_link = netperf-2.6.0.tar.bz2 + netperf_link = netperf-2.7.1.tar.bz2 RHEL.4: netperf_link = netperf-2.4.5.tar.bz2 server_path = /var/tmp @@ -219,8 +221,8 @@ #server_md5sum = 8f107e9df4f501d20cf46cccc426f4a9 netperf_server_link_win = "netserver-2.6.0.exe" netperf_client_link_win = "netperf.exe" - server_path_win = "c:\" - client_path_win = "c:\" + server_path_win = "c:\\" + client_path_win = "c:\\" shell_client = nc - with_pio_op: only RHEL diff --git a/qemu/tests/cfg/migration_with_block.cfg b/qemu/tests/cfg/migration_with_block.cfg index 992df31cf5175fc03ed24448df0c21ab27446895..8782eb6a88857082d2e1f35b0b275099b48e7af8 100644 --- a/qemu/tests/cfg/migration_with_block.cfg +++ b/qemu/tests/cfg/migration_with_block.cfg @@ -42,6 +42,10 @@ blk_extra_params_stg0 = "scsi=on,disable-legacy=off,disable-modern=on" set_dst_params = "{'blk_extra_params_stg0': 'scsi=off,disable-legacy=off,disable-modern=on'}" - with_change_cdrom: + # explicitly disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= src_addition_desc = 'with cdrom' dst_addition_desc = 'with new cdrom' only virtio_scsi diff --git a/qemu/tests/cfg/migration_with_numa.cfg b/qemu/tests/cfg/migration_with_numa.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d89b2a62d1dca6dc01e8f977df40f1db02165661 --- /dev/null +++ b/qemu/tests/cfg/migration_with_numa.cfg @@ -0,0 +1,18 @@ +- migration_with_numa: + type = migration_with_numa + kill_vm = yes + start_vm = no + pre_command = "sync && echo 3 >/proc/sys/vm/drop_caches;" + smp = 4 + mem = 4096 + vcpu_maxcpus = ${smp} + use_mem = no + backend_mem = memory-backend-ram + mem_devs = "mem0 mem1" + size_mem_mem0 = 3072M + size_mem_mem1 = 1024M + prealloc_mem = yes + policy_mem = bind + guest_numa_nodes = "node0 node1" + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 diff --git a/qemu/tests/cfg/migration_with_remote.cfg b/qemu/tests/cfg/migration_with_remote.cfg index 255cbdeff28ddf160ebc3ec92942531774428fad..9c14328dc5669d684f430f732c84b7e59b68f235 100644 --- a/qemu/tests/cfg/migration_with_remote.cfg +++ b/qemu/tests/cfg/migration_with_remote.cfg @@ -1,12 +1,14 @@ # Network storage backends: # iscsi_direct # ceph +# gluster_direct +# nbd # The following testing scenarios are covered: # migrate VM on the same host # migrate VM with postcopy on the same host - migration_with_remote_storage: install setup image_copy unattended_install.cdrom - only iscsi_direct ceph + only iscsi_direct ceph gluster_direct nbd virt_test_type = qemu type = migration migration_test_command = help diff --git a/qemu/tests/cfg/mmu_basic.cfg b/qemu/tests/cfg/mmu_basic.cfg new file mode 100644 index 0000000000000000000000000000000000000000..33bc6edf285a917f0bddb87761420337fc913097 --- /dev/null +++ b/qemu/tests/cfg/mmu_basic.cfg @@ -0,0 +1,17 @@ +- mmu_basic: + type = mmu_basic + only pseries + only Host_RHEL.m8 + no RHEL.6 + virt_test_type = qemu + kill_vm = yes + kernel_extra_params_remove = disable_radix + image_snapshot = yes + mmu_option = yes + variants: + - @default: + only RHEL.8 + - hpt: + machine_type_extra_params = "max-cpu-compat=power8" + RHEL.7: + mmu_option = no diff --git a/qemu/tests/cfg/mq_change_qnum.cfg b/qemu/tests/cfg/mq_change_qnum.cfg index cdac103629488996fb83a5d1eb7d27390c6803b0..6049f1a18ce4fa77d53f0ab0dbf54a0963e341ca 100644 --- a/qemu/tests/cfg/mq_change_qnum.cfg +++ b/qemu/tests/cfg/mq_change_qnum.cfg @@ -7,8 +7,10 @@ vectors = 10 virt_test_type = qemu type = mq_change_qnum - #In this test need set snapshot for our test will chang guest msi support - image_snapshot = yes + #In this test need to clone the guest to change guest msi support. + clone_master = yes + master_images_clone = image1 + remove_image_image1 =yes #set repeat_counts for chang queues number repeat_counts = 100 ext_host_get_cmd = "ip route | awk '/default/ { print $3 }'" @@ -20,6 +22,8 @@ kill_vm = yes variants: - under_pktgen: + only Host_RHEL.m7 + only RHEL.6 RHEL.7 ping_after_changing_queues = yes final_ping_time = 30 wait_bg_time = 60 @@ -32,7 +36,6 @@ - guest_guest: pktgen_server = vm2 vms += " vm2" - image_snapshot = yes - host_guest: #host as server password_pktgen_server = redhat @@ -99,4 +102,4 @@ change_list = 1,2,3,4,5 - disable_enable_queues: queues = 4 - change_list = 1,2,1,3,1,4,1,4 + change_list = 1,2,1,3,1,4,1 diff --git a/qemu/tests/cfg/msi_change_flag.cfg b/qemu/tests/cfg/msi_change_flag.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c8d233208cfc999aa2f8713c768b58ac62fc366b --- /dev/null +++ b/qemu/tests/cfg/msi_change_flag.cfg @@ -0,0 +1,35 @@ +- msi_change_flag: + only virtio_net + type = msi_change_flag + #In this test need to clone the guest to change guest msi support. + clone_master = yes + master_images_clone = image1 + remove_image_image1 =yes + filesize = 4000 + file_md5_check_timeout = 600 + dd_cmd = "dd if=/dev/zero of=%s oflag=direct bs=1M count=${filesize}" + Linux: + tmp_dir = "/var/tmp/" + delete_cmd = "rm -rf %s" + Windows: + get_irq_cmd = '%sdevcon.exe resources @"%s" | find "IRQ"' + tmp_dir = "C:\\" + delete_cmd = "del /f %s" + i386: + devcon_folder = "WIN_UTILS:\devcon\x86\" + x86_64: + devcon_folder = "WIN_UTILS:\devcon\amd64\" + variants: + - disable_pci_msi: + only Linux + disable_pci_msi = no + - by_registry: + only Windows + msi_cmd = "reg add "HKLM\System\CurrentControlSet\Enum\%s\Device Parameters\Interrupt Management\MessageSignaledInterruptProperties" /v MSISupported /d %d /t REG_DWORD /f" + driver_name = netkvm + device_name = "Red Hat VirtIO Ethernet Adapter" + variants: + - vhostforce_on: + netdev_extra_params += ",vhostforce=on" + - vhostforce_off: + netdev_extra_params += ",vhostforce=off" diff --git a/qemu/tests/cfg/multi_disk.cfg b/qemu/tests/cfg/multi_disk.cfg index 28ebce9535511b7cb9610f91cc8d90657f23d8d5..b170ed368d6d4e4c177495e4e9f0640eeae2e4e3 100644 --- a/qemu/tests/cfg/multi_disk.cfg +++ b/qemu/tests/cfg/multi_disk.cfg @@ -5,6 +5,7 @@ remove_image = yes remove_image_image1 = no cmd_timeout = 1000 + create_timeout = 1800 black_list = C: vt_ulimit_nofile = 8192 labeltype = mbr @@ -54,7 +55,7 @@ Windows: # The CD-ROM has occupied a driver letter. stg_image_num = 23 - stg_image_size = 5G + stg_image_size = 2G stg_image_boot = no Linux: image_size_image1 = 20G @@ -124,13 +125,13 @@ stg_params += "drive_cache:writeback " - multi_lun: Linux: - stg_params += "drive_port:range(0,16383,63) " + stg_params += "drive_port:range(0,16383,127) " Windows: - stg_params += "drive_port:range(0,255,1) " + stg_params += "drive_port:range(0,255,4) " need_reboot = yes need_shutdown = yes - multi_scsiid_lun: - stg_params += "drive_unit:range(0,255,1,3) " + stg_params += "drive_unit:range(0,255,16,3) " Linux: stg_params += "drive_port:range(0,16383,8191) " Windows: @@ -139,9 +140,9 @@ need_shutdown = yes - multi_bus_scsiid_lun: ide, virtio_blk: - stg_params += "drive_bus:range(0,15,1,9) " + stg_params += "drive_bus:range(0,15,2,9) " virtio_scsi: - stg_params += "drive_bus:range(1,15,1,9) " + stg_params += "drive_bus:range(1,15,2,9) " stg_params += "drive_unit:range(0,255,127,3) " Linux: stg_params += "drive_port:range(0,16383,8191) " diff --git a/qemu/tests/cfg/multi_disk_random_hotplug.cfg b/qemu/tests/cfg/multi_disk_random_hotplug.cfg index a22958e8ab5d07eb0aa1cdfcf561f066d86be036..bcebd8cfecc2090751549b995f800a2fea82d751 100644 --- a/qemu/tests/cfg/multi_disk_random_hotplug.cfg +++ b/qemu/tests/cfg/multi_disk_random_hotplug.cfg @@ -1,19 +1,39 @@ - multi_disk_random_hotplug: install setup image_copy unattended_install.cdrom type = multi_disk_random_hotplug + start_vm = no + not_preprocess = yes force_create_image = yes force_create_image_image1 = no remove_image = yes remove_image_image1 = no stg_image_name = "images/stg%s" stg_image_num = 20 + stg_image_size = 128M repeat_times = 3 wait_between_hotplugs = 2 wait_after_hotplug = 10 wait_between_unplugs = 2 + vt_ulimit_nofile = 8192 + no spapr_vscsi ppc64le,ppc64: - wait_between_unplugs = 20 + wait_between_unplugs = 20 + q35: + pcie_extra_root_port = ${stg_image_num} # since image check is executed after unplug wait can be 0 wait_after_unplug = 10 + Windows: + virtio_blk: + driver_name = viostor + virtio_scsi: + driver_name = vioscsi + # explicitly disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= + iozone_cmd_option = '-azR -r 64k -n 100M -g 100M -M -i 0 -i 1 -I -b iozone_{0}.xls -f {0}:\testfile' + iozone_timeout = 1800 + luks: + connect_timeout = 1800 Linux: # We have multiple disks so just ignor first one of each type no_stress_cmds = 100 @@ -26,6 +46,8 @@ stress_stop_cmd = kill -19 `cat /tmp/disk_stress` stress_cont_cmd = kill -18 `cat /tmp/disk_stress` stress_kill_cmd = "rm -f /tmp/disk_stress" + dd_cmd = 'dd if={0} of=/dev/null bs=1M count=100 iflag=direct ' + dd_cmd += '&& dd if=/dev/zero of={0} bs=1M count=100 oflag=direct' variants: - all_types: stg_params = "fmt:virtio,virtio_scsi,lsi_scsi,usb2" @@ -36,8 +58,26 @@ Host_RHEL.m6: usbs= "ehci" usb_type_ehci = ich9-usb-ehci1 + Linux: + dd_timeout = 1800 - single_type: no ide, ahci, scsi + virtio_scsi: + vt_ulimit_nofile = 8192 + stg_params = "fmt:virtio_scsi" + set_drive_bus = no + Linux: + stg_image_num = 254 + plug_timeout = 1800 + Windows: + # The CD-ROM has occupied a driver letter. + stg_image_num = 23 + q35: + Linux: + stg_image_num = 200 + plug_timeout = 1800 + set_drive_bus = yes + pcie_extra_root_port = ${stg_image_num} variants: - @serial: - parallel: @@ -47,3 +87,30 @@ monitor_type_TestQMP2 = qmp monitor_type_TestQMP3 = qmp monitor_type_TestQMP4 = qmp + luks: + acquire_lock_timeout = 1800 + ppc64le, ppc64: + acquire_lock_timeout = 7200 + verify_unplug_timeout = 300 + parallel.single_type: + virtio_scsi: + no Windows + monitors += " TestQMP5 TestQMP6 TestQMP7 TestQMP8 TestQMP9 TestQMP10" + monitors += " TestQMP11 TestQMP12 TestQMP13 TestQMP15 TestQMP15 TestQMP16" + monitors += " TestQMP17 TestQMP18 TestQMP19 TestQMP20" + monitor_type_TestQMP5 = qmp + monitor_type_TestQMP6 = qmp + monitor_type_TestQMP7 = qmp + monitor_type_TestQMP8 = qmp + monitor_type_TestQMP9 = qmp + monitor_type_TestQMP10 = qmp + monitor_type_TestQMP11 = qmp + monitor_type_TestQMP12 = qmp + monitor_type_TestQMP13 = qmp + monitor_type_TestQMP14 = qmp + monitor_type_TestQMP15 = qmp + monitor_type_TestQMP16 = qmp + monitor_type_TestQMP17 = qmp + monitor_type_TestQMP18 = qmp + monitor_type_TestQMP19 = qmp + monitor_type_TestQMP20 = qmp diff --git a/qemu/tests/cfg/multi_nics_hotplug.cfg b/qemu/tests/cfg/multi_nics_hotplug.cfg index 23ac0fd2c3d9e88912498337cc36824e31092bd0..39d90cde057e176b1323d7cdcc95ae23c160a297 100644 --- a/qemu/tests/cfg/multi_nics_hotplug.cfg +++ b/qemu/tests/cfg/multi_nics_hotplug.cfg @@ -12,7 +12,7 @@ additional_operation = yes variants: - nic_8139: - no ppc64 ppc64le + only i386, x86_64 pci_model = rtl8139 nics = "" extra_params += "-net none" @@ -23,19 +23,19 @@ nics = "" extra_params += "-net none" - nic_e1000: - no ppc64 ppc64le + only i386, x86_64 no RHEL.8 pci_model = e1000 nics = "" extra_params += "-net none" - nic_e1000e: - no ppc64 ppc64le + only i386, x86_64 required_qemu = [2.6.0, ) pci_model = e1000e nics = "" extra_params += "-net none" - combination: - no ppc64 ppc64le + only i386, x86_64 pci_model = virtio-net-pci pci_model_hotplug_nic1 = rtl8139 pci_model_hotplug_nic3 = e1000 diff --git a/qemu/tests/cfg/nested_libguestfs_unittest.cfg b/qemu/tests/cfg/nested_libguestfs_unittest.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9084b46b2411ecb414b4548e5c68fc1045f4831e --- /dev/null +++ b/qemu/tests/cfg/nested_libguestfs_unittest.cfg @@ -0,0 +1,26 @@ +- nested_libguestfs_unittest: + type = nested_libguestfs_unittest + virt_test_type = qemu + no RHEL.6, RHEL.7 + no Host_RHEL.m6, Host_RHEL.m7 + required_qemu = [3.0, ) + kvm_probe_module_parameters = "nested=1" + clone_master = yes + master_images_clone = image1 + remove_image_image1 =yes + start_vm = no + unittest_timeout = 600 + # TODO: Add support for s390 + only ppc64le, x86_64 + variants nested_flag: + - nested_flag_default: + - nested_flag_on: + ppc64le: + machine_type_extra_params += "cap-nested-hv=on" + x86_64: + cpu_model_flags += ",+{}" + - nested_flag_off: + ppc64le: + machine_type_extra_params += "cap-nested-hv=off" + x86_64: + cpu_model_flags += ",-{}" diff --git a/qemu/tests/cfg/nested_test.cfg b/qemu/tests/cfg/nested_test.cfg index 2bc60598ff0916a936f640a40ee79a7045940e3d..15fe6a8d134cb426849c1b50389050c2c06c1d9b 100644 --- a/qemu/tests/cfg/nested_test.cfg +++ b/qemu/tests/cfg/nested_test.cfg @@ -2,16 +2,21 @@ kar_repo = cert_url = nested_bs_options = "" + accept_cancel = no + l2_kar_options = "" + auto_cpu_model = yes + cpu_model_flags = ",+vmx" no Host_RHEL.m7 - Host_RHEL.m8.u0, Host_RHEL.m8.u1: - auto_cpu_model = no - cpu_model = host only RHEL.8 type = nested_test test_type = testcase install_node = no - test_timeout = 3600 - variants: + login_timeout = 360 + variants nested_test: - boot_l2: case_name = boot install_node = yes + test_timeout = 5400 + - check_cpu_model_l2: + case_name = x86_cpu_model + test_timeout = 1800 diff --git a/qemu/tests/cfg/nic_hotplug.cfg b/qemu/tests/cfg/nic_hotplug.cfg index a8e137a62a5e261f8a32e4d2828790e276e52667..63bd5549cee2d418d6f1d6ed5d64fe57a28389cc 100644 --- a/qemu/tests/cfg/nic_hotplug.cfg +++ b/qemu/tests/cfg/nic_hotplug.cfg @@ -34,7 +34,8 @@ reboot_method = system_reset variants: - nic_8139: - no ppc64, ppc64le, s390x, q35 + only i386, x86_64 + no q35 pci_model = rtl8139 - nic_virtio: #TODO: Confirm this works with libvirt @@ -44,12 +45,13 @@ s390x: pci_model = virtio-net-ccw - nic_e1000: - no ppc64, ppc64le, s390x, q35 + only i386, x86_64 + no q35 RHEL: only RHEL.6 RHEL.7 pci_model = e1000 - nic_e1000e: - no ppc64, ppc64le, s390x + only i386, x86_64 pci_model = e1000e variants: - one_pci: diff --git a/qemu/tests/cfg/numa.cfg b/qemu/tests/cfg/numa.cfg index 9720cf79e6920c9073a1a6724bcd6884af8040d2..b14abe535b100562528d6ef0097e6d682f1d3b8e 100644 --- a/qemu/tests/cfg/numa.cfg +++ b/qemu/tests/cfg/numa.cfg @@ -11,8 +11,13 @@ only Linux type = numa_consistency start_vm = no + threshold = 0.05 + ppc64,ppc64le: + threshold = 0.15 - numa_stress: only Linux type = numa_stress del stress_args mem_ratio = 0.8 + # Replace "count" in "dd commands" to avoid lack of disk space + #tmpfs_size = 1024 diff --git a/qemu/tests/cfg/numa_cpu.cfg b/qemu/tests/cfg/numa_cpu.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b7da4e33141361622d9d1603197ede07df011714 --- /dev/null +++ b/qemu/tests/cfg/numa_cpu.cfg @@ -0,0 +1,122 @@ +- numa_cpu: + type = numa_cpu + kill_vm = yes + no Host_RHEL.m6, Host_RHEL.m7 + # '-numa cpu' was added from QEMU 3.0 and 'dies' was added from QEMU 4.1 + # considering libvirt 6.6.0 which consum QEMU 5.1 still have not support + # '-numa cpu', so test from QEMU 4.1 is not risky + required_qemu = [4.1.0, ) + mem = 4096 + ppc64, ppc64le: + # ppc use vcpu_dies = INVALID, means 1 when computing cpu topology + smp = 8 + vcpu_sockets = 2 + vcpu_cores = 2 + vcpu_threads = 2 + x86_64, i386: + smp = 16 + vcpu_sockets = 2 + vcpu_dies = 2 + vcpu_cores = 2 + vcpu_threads = 2 + HostCpuVendor.amd: + smp = 8 + # To adapt all amd system, test single thread only + vcpu_threads = 1 + vcpu_maxcpus = ${smp} + backend_mem = memory-backend-ram + use_mem = no + guest_numa_nodes = "node0 node1 node2 node3" + mem_devs = "mem0 mem1 mem2 mem3" + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 + numa_memdev_node2 = mem-mem2 + numa_memdev_node3 = mem-mem3 + numa_nodeid_node0 = 0 + numa_nodeid_node1 = 1 + numa_nodeid_node2 = 2 + numa_nodeid_node3 = 3 + size_mem = 1024M + guest_numa_cpus = "cpu0 cpu1 cpu2 cpu3" + variants: + - @default: + # For ppc, codeid must be a multiple of 'vcpu_threads' + ppc64, ppc64le: + numa_cpu_nodeid_cpu0 = 0 + numa_cpu_coreid_cpu0 = 0 + + numa_cpu_nodeid_cpu1 = 1 + numa_cpu_coreid_cpu1 = 2 + + numa_cpu_nodeid_cpu2 = 2 + numa_cpu_coreid_cpu2 = 4 + + numa_cpu_nodeid_cpu3 = 3 + numa_cpu_coreid_cpu3 = 6 + x86_64, i386: + numa_cpu_nodeid_cpu0 = 0 + numa_cpu_socketid_cpu0 = 0 + + numa_cpu_nodeid_cpu1 = 1 + numa_cpu_socketid_cpu1 = 1 + numa_cpu_dieid_cpu1 = 0 + + numa_cpu_nodeid_cpu2 = 2 + numa_cpu_socketid_cpu2 = 1 + numa_cpu_dieid_cpu2 = 1 + numa_cpu_coreid_cpu2 = 0 + + numa_cpu_nodeid_cpu3 = 3 + numa_cpu_socketid_cpu3 = 1 + numa_cpu_dieid_cpu3 = 1 + numa_cpu_coreid_cpu3 = 1 + HostCpuVendor.intel: + numa_cpu_threadid_cpu3 = 0 + + guest_numa_cpus += " cpu4" + numa_cpu_nodeid_cpu4 = 3 + numa_cpu_socketid_cpu4 = 1 + numa_cpu_dieid_cpu4 = 1 + numa_cpu_coreid_cpu4 = 1 + numa_cpu_threadid_cpu4 = 1 + - with_preconfig: + qemu_preconfig = on + # Ensure all node have corresponding cpu in qemu cli + ppc64le, ppc64: + vcpu_sockets = 4 + vcpu_threads = 1 + numa_cpu_nodeid_cpu0 = 0 + numa_cpu_coreid_cpu0 = 0 + + numa_cpu_nodeid_cpu1 = 1 + numa_cpu_coreid_cpu1 = 2 + + numa_cpu_nodeid_cpu2 = 2 + numa_cpu_coreid_cpu2 = 4 + + numa_cpu_nodeid_cpu3 = 3 + numa_cpu_coreid_cpu3 = 6 + x86_64, i386: + numa_cpu_nodeid_cpu0 = 0 + numa_cpu_socketid_cpu0 = 0 + numa_cpu_dieid_cpu0 = 0 + numa_cpu_coreid_cpu0 = 0 + numa_cpu_threadid_cpu0 = 0 + + numa_cpu_nodeid_cpu1 = 1 + numa_cpu_socketid_cpu1 = 0 + numa_cpu_dieid_cpu1 = 1 + numa_cpu_coreid_cpu1 = 0 + numa_cpu_threadid_cpu1 = 0 + + numa_cpu_nodeid_cpu2 = 2 + numa_cpu_socketid_cpu2 = 1 + numa_cpu_dieid_cpu2 = 0 + numa_cpu_coreid_cpu2 = 0 + numa_cpu_threadid_cpu2 = 0 + + numa_cpu_nodeid_cpu3 = 3 + numa_cpu_socketid_cpu3 = 1 + numa_cpu_dieid_cpu3 = 1 + numa_cpu_coreid_cpu3 = 0 + numa_cpu_threadid_cpu3 = 0 diff --git a/qemu/tests/cfg/numa_dist.cfg b/qemu/tests/cfg/numa_dist.cfg new file mode 100644 index 0000000000000000000000000000000000000000..300d81ec94e0979bc95e9d62476f146423f6b561 --- /dev/null +++ b/qemu/tests/cfg/numa_dist.cfg @@ -0,0 +1,28 @@ +- numa_dist: + type = numa_dist + kill_vm = yes + kill_vm_gracefully = no + mem = 4096 + backend_mem = memory-backend-ram + use_mem = no + guest_numa_nodes = "node0 node1 node2 node3" + numa_nodeid_node0 = 0 + numa_nodeid_node1 = 1 + numa_nodeid_node2 = 2 + numa_nodeid_node3 = 3 + mem_devs = "mem0 mem1 mem2 mem3" + size_mem = 1024M + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 + numa_memdev_node2 = mem-mem2 + numa_memdev_node3 = mem-mem3 + variants: + - symmetric: + numa_dist_node0 = [[1, 20], [2, 30], [3, 40]] + numa_dist_node1 = [[2, 20], [3, 30]] + numa_dist_node2 = [[3, 20]] + - asymmetric: + numa_dist_node0 = [[1, 20], [2, 30], [3, 40]] + numa_dist_node1 = [[0, 50], [2, 60], [3, 70]] + numa_dist_node2 = [[0, 80], [1, 90], [3, 100]] + numa_dist_node3 = [[0, 110], [1, 120], [2, 130]] diff --git a/qemu/tests/cfg/numa_negative.cfg b/qemu/tests/cfg/numa_negative.cfg new file mode 100644 index 0000000000000000000000000000000000000000..7db3611058dbd7e4f4a516a0d9cf0eb053f10278 --- /dev/null +++ b/qemu/tests/cfg/numa_negative.cfg @@ -0,0 +1,45 @@ +- numa_negative: + type = numa_negative + kill_vm = yes + kill_vm_gracefully = no + backend_mem = memory-backend-ram + use_mem = no + start_vm = no + smp = 2 + vcpu_maxcpus = ${smp} + mem = 2048M + mem_devs = 'mem0 mem1' + size_mem = 1024M + guest_numa_nodes = 'node0 node1' + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 + numa_cpus_node0 = 0 + numa_cpus_node1 = 1 + negative_type = fatal + variants: + - threads_test: + vcpu_sockets = 1 + vcpu_dies = 1 + vcpu_cores = 1 + vcpu_threads = 2 + ppc64, ppc64le: + error_msg = "cpus=1.*CPU is already assigned to node-id: 0" + i386, x86_64: + negative_type = non-fatal + - mem_mismatch: + mem = 3072 + error_msg = "qemu-kvm: total memory for NUMA nodes \(0x[0-9A-Fa-f]+\)" + error_msg += " should equal RAM size" + - cpu_mismatch: + smp = 4 + vcpu_maxcpus = ${smp} + error_msg = "qemu-kvm: warning: CPU\(s\) not present in any NUMA nodes" + negative_type = non-fatal + - nodeid_mismatch: + numa_nodeid_node0 = 0 + numa_nodeid_node1 = 0 + error_msg = "Duplicate NUMA nodeid: 0" + - mem_zero: + size_mem_mem0 = 0M + size_mem_mem1 = 2048M + error_msg = "property 'size' of memory-backend-ram doesn't take value '0'" diff --git a/qemu/tests/cfg/numa_opts.cfg b/qemu/tests/cfg/numa_opts.cfg index f19ee16a6540aefffc8177baf292197b8c4bccbf..b845dc327047ad12dd7a95aa6a9bc1620aef1e4a 100644 --- a/qemu/tests/cfg/numa_opts.cfg +++ b/qemu/tests/cfg/numa_opts.cfg @@ -1,108 +1,75 @@ - numa_opts: - type = numa_opts - # paused VMs are enough for our purposes, no need to boot them: - paused_after_start_vm = yes - kill_vm = yes - kill_vm_gracefully = no - encode_video_files = no - pre_command = "sync && echo 3 >/proc/sys/vm/drop_caches;" - smp = 6 - mem = 4096 - vcpu_maxcpus = ${smp} - variants: - - nodes.0: - # no extra parameters => zero nodes - numa_nodes = 0 - # on ppc,no extra parameters => one node - ppc64le, ppc64: - numa_nodes = 1 - - nodes.1: - numa_nodes = 1 - extra_params += " -numa node" - numa_node0_cpus = "0 1 2 3 4 5" - numa_node0_size = 4096 - - nodes.2: - numa_nodes = 2 - variants: - # default (interleaved) mode: - - defaults: - extra_params += " -numa node -numa node" - numa_node0_cpus = "0 1 2" - numa_node1_cpus = "3 4 5" - numa_node0_size = 2048 - numa_node1_size = 2048 - Host_RHEL.m6, Host_RHEL.m7.u0, Host_RHEL.m7.u1: - numa_node0_cpus = "0 2 4" - numa_node1_cpus = "1 3 5" - ppc64,ppc64le: - Host_RHEL.m7.u2: - numa_node0_cpus = "0 2 4" - numa_node1_cpus = "1 3 5" - # custom memory and CPU values: - - custom_values: - numa_node0_cpus = "4 5" - numa_node0_size = 1024 - numa_node1_cpus = "0 1 2 3" - numa_node1_size = 3072 - variants: - - implicit_ids: - extra_params += " -numa node,mem=1024,cpus=4-5" - extra_params += " -numa node,mem=3072,cpus=0-3" - - unordered_ids: - extra_params += " -numa node,mem=3072,cpus=0-3,nodeid=1" - extra_params += " -numa node,mem=1024,cpus=4-5,nodeid=0" - - nodes.3: - numa_nodes = 3 - variants: - # default (interleaved) mode: - - defaults: - extra_params += " -numa node -numa node -numa node" - # nodes are 8MB-aligned: - numa_node0_cpus = "0 1 2" - numa_node1_cpus = "3 4 5" - numa_node2_cpus = "" - numa_node0_size = 1360 - numa_node1_size = 1368 - numa_node2_size = 1368 - ppc64,ppc64le: - numa_node0_size = 1280 - numa_node1_size = 1280 - numa_node2_size = 1536 - Host_RHEL.m6, Host_RHEL.m7.u0, Host_RHEL.m7.u1: - numa_node0_cpus = "0 3" - numa_node1_cpus = "1 4" - numa_node2_cpus = "2 5" - # custom memory and CPU values: - - custom_values: - numa_node0_cpus = "3 4" - numa_node0_size = 512 - numa_node1_cpus = "0 1 2" - numa_node1_size = 1024 - numa_node2_cpus = "5" - numa_node2_size = 2560 - variants: - - implicit_ids: - extra_params += " -numa node,mem=512,cpus=3-4" - extra_params += " -numa node,mem=1024,cpus=0-2" - extra_params += " -numa node,mem=2560,cpus=5" - - unordered_ids: - extra_params += " -numa node,mem=2560,cpus=5,nodeid=2" - extra_params += " -numa node,mem=512,cpus=3-4,nodeid=0" - extra_params += " -numa node,mem=1024,cpus=0-2,nodeid=1" - - nodes.128: - type = numa_maxnodes - numa_nodes = 128 - mem_fixed = 4G - vm_mem_minimum = 4G - use_mem = no - node_size = 32M - ppc64,ppc64le: - mem_fixed = 32G - node_size = 256M - vm_mem_minimum = 32G - paused_after_start_vm = no - prealloc_mem = yes - policy_mem = default - Linux: - numa_cmd = "cat /sys/devices/system/node/possible" - numa_expected = "0-127" + type = numa_opts + kill_vm = yes + kill_vm_gracefully = no + encode_video_files = no + pre_command = "sync && echo 3 >/proc/sys/vm/drop_caches;" + smp = 6 + mem = 4096 + vcpu_maxcpus = ${smp} + backend_mem = memory-backend-ram + use_mem = no + variants: + - nodes.0: + # no extra parameters => one node in guest os + guest_expect_nodes = 1 + # no extra parameters => zero nodes with qemu monitor + monitor_expect_nodes = 0 + # on ppc,no extra parameters => one node and size = mem with qemu monitor + ppc64le, ppc64: + monitor_expect_nodes = 1 + variants: + - @default: + - with_maxmem: + only q35 + required_qemu = [5.1.0,) + # no extra parameters, with maxmem and without slots => + # one node in monitor, guest os, and size = mem + maxmem_mem = 32G + monitor_expect_nodes = 1 + - nodes.1: + monitor_expect_nodes = 1 + mem_devs = "mem0" + size_mem0 = 4096M + guest_numa_nodes = "node0" + numa_memdev_node0 = mem-mem0 + numa_cpus_node0 = "0,1,2,3,4,5" + - nodes.2: + monitor_expect_nodes = 2 + mem_devs = "mem0 mem1" + size_mem0 = 1024M + size_mem1 = 3072M + guest_numa_nodes = "node0 node1" + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 + numa_cpus_node0 = "4,5" + numa_cpus_node1 = "0,1,2,3" + - nodes.3: + monitor_expect_nodes = 3 + mem_devs = "mem0 mem1 mem2" + size_mem0 = 512M + size_mem1 = 1024M + size_mem2 = 2560M + guest_numa_nodes = "node0 node1 node2" + numa_memdev_node0 = mem-mem0 + numa_memdev_node1 = mem-mem1 + numa_memdev_node2 = mem-mem2 + numa_cpus_node0 = "0,1" + numa_cpus_node1 = "2,3" + numa_cpus_node2 = "4,5" + - nodes.128: + type = numa_maxnodes + numa_nodes = 128 + mem_fixed = 4G + vm_mem_minimum = 4G + node_size = 32M + start_vm = no + ppc64,ppc64le: + mem_fixed = 32G + node_size = 256M + vm_mem_minimum = 32G + prealloc_mem = yes + policy_mem = default + Linux: + numa_cmd = "cat /sys/devices/system/node/possible" + numa_expected = "0-127" diff --git a/qemu/tests/cfg/nvdimm.cfg b/qemu/tests/cfg/nvdimm.cfg index 5a4a2c3797ff569298b5bb7d738ee06f49fe371c..644b32dc4d4940715dc84582f1cda23f45527d82 100644 --- a/qemu/tests/cfg/nvdimm.cfg +++ b/qemu/tests/cfg/nvdimm.cfg @@ -5,7 +5,13 @@ vm_mem_limit = 30G kill_vm_on_error = yes login_timeout = 240 - only x86_64 + only x86_64, ppc64le + ppc64le: + required_qemu = [5, ) + only nvdimm_basic + # PowerPC guests need to create the persistent memory device manually so far + nvdimm_ns_create_cmd = "ndctl create-namespace" + dimm_extra_params = "label-size=256m" no Windows no RHEL.6 RHEL.5 RHEL.4 RHEL.3 no RHEL.7.2 RHEL.7.1 RHEL.7.0 @@ -18,15 +24,15 @@ mem-path = ${nv_backend} mount_dir = "/mnt" pre_command = "dd if=/dev/zero of=${nv_backend} bs=1M count=1024" - pmem = /dev/pmem0 - format_command = "mkfs.xfs -f ${pmem}" + dev_path = /dev/pmem0 + format_command = "mkfs.xfs -f ${dev_path}" RHEL.8: format_command += " -m reflink=0" - mount_command = "mount -o dax ${pmem} ${mount_dir}" + mount_command = "mount -o dax ${dev_path} ${mount_dir}" post_command = "rm -rf ${nv_backend}" mem_devs = mem1 size_mem = 1G - share_mem = on + share_mem = yes nv_file = "${mount_dir}/nv0" variants: @@ -39,6 +45,10 @@ mem-path = ${nv_backend} del pre_command del post_command + variants: + - @pmem_default: + - pmem_on: + pmem_mem = on - nvdimm_dax: start_vm = "no" nvdimm_dax = "yes" @@ -49,3 +59,36 @@ ndctl_install_cmd = "rpm -q ndctl || yum install -y ndctl" create_dax_cmd = "ndctl create-namespace -m dax -e namespace0.0 -f -v -a 4096" del_dax_cmd = "ndctl create-namespace -m memory -e namespace0.0 -f -v -a 4096" + - nvdimm_mapsync: + type = nvdimm_mapsync + no Host_RHEL.m7 + pmem_mem = on + nv_backend = nvdimm0 + mount_dir = "/mnt/pmem" + mem-path = "${mount_dir}/${nv_backend}" + format_command = "mkfs.xfs -f ${dev_path} -m reflink=0" + mount_command = "mkdir -p ${mount_dir} && mount -o dax ${dev_path} ${mount_dir}" + truncate_command = "truncate -s ${size_mem} ${mount_dir}/${nv_backend}" + check_command = "grep -A21 '${nv_backend}' /proc/%s/smaps" + clean_command = "rm -rf ${mount_dir}/${nv_backend} && umount ${mount_dir} && rm -rf ${mount_dir}" + start_vm = no + - nvdimm_mode: + type = nvdimm_mode + mem_devs = mem1 mem2 + mem-path_mem1 = /tmp/test1.img + mem-path_mem2 = /tmp/test2.img + align_mem1 = 128M + align_mem2 = 128M + dimm_extra_params = "label-size=2M" + pre_command = "dd if=/dev/zero of=${mem-path_mem1} bs=1M count=1024" + pre_command += "; dd if=/dev/zero of=${mem-path_mem2} bs=1M count=1024" + post_command = "rm -rf ${mem-path_mem1} ${mem-path_mem2}" + create_dax_cmd = "ndctl create-namespace -m dax -e namespace%s.0 -f -v -a 4096" + ndctl_check_cmd = "ndctl list" + del mount_dir + del dev_path + del format_command + del mount_command + - nvdimm_hotplug: + del mem_devs + target_mems = mem1 diff --git a/qemu/tests/cfg/nvme_plug.cfg b/qemu/tests/cfg/nvme_plug.cfg new file mode 100644 index 0000000000000000000000000000000000000000..e40e480b2da56a8c4a855f97b1f6c61fecf83d3b --- /dev/null +++ b/qemu/tests/cfg/nvme_plug.cfg @@ -0,0 +1,71 @@ +- nvme_plug: + only nvme_direct + type = nvme_plug + start_vm = no + kill_vm = yes + kill_vm_gracefully = yes + shutdown_cleanly = yes + shutdown_cleanly_timeout = 120 + guest_port_unattended_install = 12323 + kernel = vmlinuz + initrd = initrd.img + inactivity_watcher = error + inactivity_treshold = 1800 + image_verify_bootable = no + image_copy_on_error = no + lowest_mem = 512 + install_trigger_time = 0 + install_timeout = 7200 + random_trigger = "no" + unattended_delivery_method = cdrom + cdroms += " unattended" + enable_nvme_cd1 = no + enable_nvme_unattended = no + enable_nvme_winutils = no + drive_index_unattended = 1 + drive_index_cd1 = 2 + boot_once = d + medium = cdrom + virtio_scsi: + iothread_scheme ?= + iothreads ?= + image_iothread ?= + images = 'sys stg' + data_img_tag = stg + enable_nvme_sys = no + image_backend_sys = filesystem + image_name_sys = images/sys + image_format_sys = qcow2 + image_size_sys = 30G + force_create_image_sys = yes + remove_image_sys = yes + default_cdroms = winutils + image_size_stg = 5G + force_create_image_stg = yes + boot_drive_stg = no + remove_image_stg = no + fio_default_options = '--direct=1 --bs=64K --size=2G --name=test --iodepth=4' + Windows: + blk_extra_params_sys = "serial=SYSTEM_DISK0" + fio_filename = fio_data_disk_test + i440fx: + cd_format_unattended = ide + cd_format_cd1 = ide + cd_format_winutils = ide + q35: + cd_format_unattended = ahci + cd_format_cd1 = ahci + cd_format_winutils = ahci + Linux: + fio_default_options += ' --ioengine=libaio ' + remove_options = kernel initrd kernel_params + fio_options = '${fio_default_options} --rw=read;' + fio_options += '${fio_default_options} --rw=write;' + fio_options += '${fio_default_options} --rw=randread;' + fio_options += '${fio_default_options} --rw=randwrite;' + fio_options += '${fio_default_options} --rw=randrw' + variants: + # Below variants is to share configurations related to installation defined. + - @with_installation: + variants: + - @extra_cdrom_ks: diff --git a/qemu/tests/cfg/pcie_hotplug_opt.cfg b/qemu/tests/cfg/pcie_hotplug_opt.cfg new file mode 100644 index 0000000000000000000000000000000000000000..cd1943563ecbd02a655d6faf808be70026d52036 --- /dev/null +++ b/qemu/tests/cfg/pcie_hotplug_opt.cfg @@ -0,0 +1,31 @@ +- pcie_hotplug_opt: + only q35 + virt_test_type = qemu + type = pcie_hotplug_opt + kill_vm_on_error = yes + images += " stg0 stg1 plug0" + drive_format_stg0 = virtio + drive_format_stg1 = scsi-hd + scsi_hba_stg1 = virtio-scsi-pci + boot_drive_stg0 = yes + boot_drive_stg1 = yes + force_create_image_stg0 = yes + force_create_image_stg1 = yes + image_name_stg0 = images/stg0_blk + image_name_stg1 = images/stg1_scsi + image_size_stg0 = 1G + image_size_stg1 = 1G + remove_image_stg0 = yes + remove_image_stg1 = yes + nics = 'nic1' + nic_model_nic1 = virtio + pcie_root_port_params = 'hotplug=off' + # used for hotplug virtio-blk-pci + drive_format_plug0 = virtio + boot_drive_plug0 = no + force_create_image_plug0 = yes + image_name_plug0 = images/plug0 + image_size_plug0 = 1G + remove_image_plug0 = yes + hotplug_error = "Hot-plug failed: unsupported by the port device '%s'" + unplug_error = "Hot-unplug failed: unsupported by the port device '%s'" diff --git a/qemu/tests/cfg/pktgen.cfg b/qemu/tests/cfg/pktgen.cfg index e6e514848a574f73fd9e3f42a5772f83d70eed23..7aec2ae8789cec394eec1442d91f4fab8b5bc121 100644 --- a/qemu/tests/cfg/pktgen.cfg +++ b/qemu/tests/cfg/pktgen.cfg @@ -1,7 +1,8 @@ - pktgen: install setup image_copy unattended_install.cdrom no JeOS no Windows - no Host_RHEL.m5, Host_RHEL.m6 + only Host_RHEL.m7 + only RHEL.6 RHEL.7 virt_test_type = qemu type = pktgen kill_vm = yes diff --git a/qemu/tests/cfg/ple_test.cfg b/qemu/tests/cfg/ple_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1b942d4d3a834d808898c43f9384b06f66b47664 --- /dev/null +++ b/qemu/tests/cfg/ple_test.cfg @@ -0,0 +1,18 @@ +- ple_test: + virt_test_type = qemu + type = ple_test + start_vm = no + kill_vm_on_error = yes + only Linux + only x86_64, i386 + unixbench_dir = "/tmp/byte-unixbench" + get_unixbench = "git clone --depth=1 https://github.com/kdlucas/byte-unixbench.git ${unixbench_dir}" + run_unixbench = "cd ${unixbench_dir}/UnixBench && ./Run" + HostCpuVendor.intel: + ple_value = 128 + mod_param = "ple_gap" + module_name = "kvm_intel" + HostCpuVendor.amd: + ple_value = 300 + mod_param = "pause_filter_count" + module_name = "kvm_amd" diff --git a/qemu/tests/cfg/plug_cdrom.cfg b/qemu/tests/cfg/plug_cdrom.cfg index eb91ba4f88ba7b375484a8d0d68c684ff7e70ec0..145a6af691d6a5f5c5a2ef91a5a6f292efed981e 100644 --- a/qemu/tests/cfg/plug_cdrom.cfg +++ b/qemu/tests/cfg/plug_cdrom.cfg @@ -2,6 +2,10 @@ only virtio_scsi virt_test_type = qemu type = plug_cdrom + # explicitly disable iothread + iothread_scheme ?= + image_iothread ?= + iothreads ?= cdroms = 'cd2' iso_name_cd2 = new cdrom_cd2 = /tmp/${iso_name_cd2}.iso diff --git a/qemu/tests/cfg/pmu_nmi_watchdog.cfg b/qemu/tests/cfg/pmu_nmi_watchdog.cfg new file mode 100644 index 0000000000000000000000000000000000000000..3d882be27fba1ff866f8b30cd44f71780914fbfe --- /dev/null +++ b/qemu/tests/cfg/pmu_nmi_watchdog.cfg @@ -0,0 +1,58 @@ +- pmu_nmi_watchdog: install setup image_copy unattended_install.cdrom + type = pmu_nmi_watchdog + start_vm = no + deadlock_test_link = "watchdog/deadlock_test" + deadlock_test_path = "/root" + pre_cmd = "cd /root/deadlock_test && make" + deadlock_test_cmd = "nohup insmod /root/deadlock_test/deadlock_test.ko >/dev/null 2>&1 &" + rmmod_deadlock_cmd = "rmmod deadlock_test.ko" + monitors = qmp1 + monitor_type = qmp + mem = 2048 + only aarch64 + variants: + - nmi_watchdog_test: + test_type = nmi_watchdog_test + variants: + - 64u16g: + custom_smp = 64 + vcpu_maxcpus = 64 + mem = 16384 + - 1u2g: + custom_smp = 1 + vcpu_maxcpus = 1 + mem = 2048 + - 8u8g_hyper_threads: + custom_smp = 8 + vcpu_maxcpus = 8 + used_cpus = 8 + vcpu_cores = 4 + vcpu_threads = 2 + mem = 8192 + - nmi_watchdog_switch: + test_type = nmi_watchdog_edit + switch_cmd0 = "echo 0 > /proc/sys/kernel/nmi_watchdog" + switch_cmd1 = "echo 1 > /proc/sys/kernel/nmi_watchdog" + - cmdline_test: + test_type = cmdline_test + variants: + - pesudo_nmi_0: + boot_option_removed = "irqchip.gicv3_pseudo_nmi=1" + boot_option_added = "irqchip.gicv3_pseudo_nmi=0" + - del_pesudo_nmi: + boot_option_removed = "irqchip.gicv3_pseudo_nmi=1" + boot_option_added = "" + - del_pmu_nmi_enable: + boot_option_removed = "pmu_nmi_enable" + boot_option_added = "" + - del_hardlockup_cpu_freq: + boot_option_removed = "hardlockup_cpu_freq=auto" + boot_option_added = "" + - nmi_watchdog_0: + boot_option_removed = "nmi_watchdog=1" + boot_option_added = "nmi_watchdog=0" + - del_disable_sdei_nmi_watchdog: + boot_option_removed = "disable_sdei_nmi_watchdog" + boot_option_added = "" + - workwith_i6300esb: + test_type = workwith_i6300esb diff --git a/qemu/tests/cfg/ppc_check_cpu_and_mmu.cfg b/qemu/tests/cfg/ppc_check_cpu_and_mmu.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bb5a537570298bb2941b4437a00f385c665a185e --- /dev/null +++ b/qemu/tests/cfg/ppc_check_cpu_and_mmu.cfg @@ -0,0 +1,10 @@ +- ppc_check_cpu_and_mmu: + type = ppc_check_cpu_and_mmu + only ppc64le + # Older kernel versions do not support get MMU mode from cpuinfo + no RHEL.6, RHEL.7 + no Host_RHEL.7 + auto_cpu_model = no + cpu_model = host + kill_vm_on_error = yes + kernel_extra_params_remove = disable_radix diff --git a/qemu/tests/cfg/ppc_ic_mode_check.cfg b/qemu/tests/cfg/ppc_ic_mode_check.cfg new file mode 100644 index 0000000000000000000000000000000000000000..8f1824a93fd5c3249ee46f620b99321123a026f3 --- /dev/null +++ b/qemu/tests/cfg/ppc_ic_mode_check.cfg @@ -0,0 +1,16 @@ +- ppc_ic_mode_check: + type = ppc_ic_mode_check + virt_test_type = qemu + only ppc64 ppc64le + required_qemu = [4, ) + start_vm = no + variants ic_mode: + - xics: + - xive: + only RHEL.8 + variants kernel_irqchip: + - in-kernel: + irqchip = on + - emulated: + irqchip = off + machine_type_extra_params += ,ic-mode=${ic_mode},kernel-irqchip=${irqchip} diff --git a/qemu/tests/cfg/qemu_guest_agent.cfg b/qemu/tests/cfg/qemu_guest_agent.cfg index 647c6ad8e29f82ad85e5eafd354dd49587e0b34f..b2b1c3cadae30601c1eb605f4024b7dd3c72696d 100644 --- a/qemu/tests/cfg/qemu_guest_agent.cfg +++ b/qemu/tests/cfg/qemu_guest_agent.cfg @@ -4,6 +4,7 @@ type = qemu_guest_agent gagent_name = "org.qemu.guest_agent.0" gagent_install_cmd = "yum install -y qemu-guest-agent" + gagent_uninstall_cmd = "rpm -e --nodeps qemu-guest-agent" gagent_start_cmd = "systemctl start qemu-guest-agent.service" gagent_restart_cmd = "systemctl restart qemu-guest-agent.service" gagent_stop_cmd = "systemctl stop qemu-guest-agent.service" @@ -11,6 +12,11 @@ gagent_pkg_check_cmd = "rpm -q qemu-guest-agent" black_list_change_cmd = "sed -i 's/%s//g' /etc/sysconfig/qemu-ga" black_list_check_cmd = "grep -nr 'BLACKLIST_RPC=.*%s' /etc/sysconfig/qemu-ga" + setsebool_cmd = "setsebool virt_qemu_ga_read_nonsecurity_files %s" + getsebool_cmd = "getsebool -a | grep virt_qemu_ga_read_nonsecurity_files |awk '{print$3}'" + backup_file = /etc/sysconfig/qemu-ga-bk + black_list_backup = /bin/cp -f /etc/sysconfig/qemu-ga ${backup_file} + recovery_black_list = mv -f ${backup_file} /etc/sysconfig/qemu-ga #only for rhel8,virt module stream is rhel or 8.1,8.2,8.3 and so on virt_module_stream = rhel # Please update your file share web server url before test @@ -124,6 +130,10 @@ Windows: get_guest_time_cmd = python2.7 -c "import time; print(int(time.time()))" move_time_cmd = "date --rfc-3339=seconds --utc; date --set='now - 1 week' > /dev/null; date --rfc-3339=seconds --utc" + variants: + - @default: + - invalid_time: + invalid_time_test = yes - check_time_sync: only Windows gagent_check_type = time_sync @@ -163,9 +173,11 @@ gagent_check_type = get_interfaces image_snapshot = yes - check_fsfreeze: - gagent_fs_test_cmd = "rm -f /tmp/foo; echo foo > /tmp/foo" + gagent_fs_test_cmd = "echo foo > %s/foo" + mountpoint_def = "/tmp" Windows: - gagent_fs_test_cmd = "echo 'fsfreeze test' > C:\test_file.txt" + gagent_fs_test_cmd = "echo 'fsfreeze test' > %s\test_file.txt" + mountpoint_def = "C:" write_cmd_timeout = 8 variants: - @default: @@ -175,11 +187,22 @@ gagent_check_type = fsfreeze_vss_test gagent_fs_test_cmd = "start /b WIN_UTILS:\fsfreeze-write.py" freeze_timeout = 10 + - fsfreeze_list: + gagent_check_type = fsfreeze_list + images += " stg0" + image_name_stg0 = images/storage0 + image_size_stg0 = 1G + force_create_image_stg0 = yes - check_reboot_shutdown_fsfreeze: gagent_check_type = reboot_shutdown - check_snapshot: type = qemu_guest_agent_snapshot gagent_check_type = fsfreeze + gagent_fs_test_cmd = "echo foo > %s/foo" + mountpoint_def = "/tmp" + Windows: + gagent_fs_test_cmd = "echo 'fsfreeze test' > %s\test_file.txt" + mountpoint_def = "C:" - check_suspend: type = qemu_guest_agent_suspend services_up_timeout = 30 @@ -254,10 +277,12 @@ image_snapshot = yes - check_fsinfo: gagent_check_type = fsinfo - blk_extra_params = "serial=GAGENT_TEST" - cmd_get_disk = cat /proc/mounts |grep -v rootfs |awk '$2~/^\%s$/{print $1,$3}' + blk_extra_params_image1 = "serial=GAGENT_TEST" + cmd_get_disk = cat /proc/mounts |grep -v rootfs |awk '$2~/^%s$/{print $1,$3}' + cmd_get_disk_usage = df -hlk | egrep "%s$" | awk '{print$2*1024,$3*1024}' Windows: - cmd_get_disk = wmic volume where "DriveLetter='%s'" get FileSystem,DeviceID |findstr /v /i FileSystem + cmd_get_disk = wmic volume where "DeviceID='%s'" get FileSystem,DeviceID |findstr /v /i FileSystem + cmd_get_disk_usage = wmic volume where "DriveLetter='%s'" get Capacity,FreeSpace |findstr /v /i FreeSpace - check_nonexistent_cmd: gagent_check_type = nonexistent_cmd wrong_cmd = "system_reset" @@ -267,6 +292,86 @@ image_name_stg0 = images/storage0 image_size_stg0 = 1G force_create_image_stg0 = yes + - check_virtio_device: + only Windows + gagent_check_type = virtio_device + check_driver_powershell_cmd = 'powershell -command "Get-WmiObject Win32_PnPSignedDriver -Filter \"%s = '%s'\"' + check_driver_powershell_cmd += ' | select devicename, driverversion, driverdate, deviceid, friendlyname"' + # with balloon device + balloon = balloon0 + balloon_dev_devid = balloon0 + balloon_dev_add_bus = yes + # with viostor/scsi + bootindex_image1 = 0 + images += " stg0 stg1" + image_name_stg0 = "images/stg0" + image_name_stg1 = "images/stg1" + image_size_stg0 = 5G + image_size_stg1 = 5G + remove_image_stg0 = yes + remove_image_stg1 = yes + force_create_image_stg0 = yes + force_create_image_stg1 = yes + boot_drive_stg0 = yes + boot_drive_stg1 = yes + drive_format_stg0 = virtio + drive_format_stg1 = scsi-hd + # with netkvm + nics += " nic2" + nic_model_nic2 = virtio + # with rng + virtio_rngs = rng0 + backend_rng0 = rng-random + backend_type = passthrough + filename_passthrough = /dev/urandom + # with input + variants: + - default: + - with_input_device: + required_qemu = [2.4.0, ) + no Win2008..sp2 + inputs = input1 input2 input3 + input_dev_bus_type_input1 = virtio + input_dev_bus_type_input2 = virtio + input_dev_bus_type_input3 = virtio + input_dev_type_input1 = keyboard + input_dev_type_input2 = mouse + input_dev_type_input3 = tablet + - check_os_basic_info: + no RHEL.6, RHEL.7 + gagent_check_type = os_basic_info + cmd_get_host_name = hostname + cmd_set_host_name = hostnamectl set-hostname newhostname && cat /etc/hostname + cmd_get_timezone_name = date +%Z + cmd_get_timezone_offset = date +%:z + cmd_get_users = who + cmd_get_users_name = who | awk -F ' ' '{print $1}' + cmd_get_user = who |grep %s + time_pattern = "\S+\s+\S+\s+(.*)\s+\(" + cmd_time_trans = date -d "%s" +%%s + Windows: + cmd_get_timezone_name = wmic timezone get StandardName |findstr /vi StandardName + cmd_get_timezone_dlight_name = wmic timezone get DaylightName |findstr /vi DaylightName + cmd_get_timezone_offset = wmic timezone get Caption |findstr /vi Caption + cmd_get_users = query user |findstr /v USERNAME + cmd_get_user = query user |findstr /i %s + cmd_get_user_domain = wmic useraccount where name='%s' get domain |findstr /vi domain + # 3/3/2020 9:35 AM + time_pattern = " (\d+/\d+/\d+ \d+:\d+ [APap][Mm])" + cmd_time_trans = python2.7 -c "import time; dt='%s'; t=time.mktime(time.strptime(dt, '%%m/%%d/%%Y %%I:%%M %%p')); print(t)" + - check_os_info: + no RHEL.6 + gagent_check_type = os_info + os_id = rhel + cmd_get_full_name = cat /etc/redhat-release + cmd_get_kernel_ver = uname -v + cmd_get_kernel_rel = uname -r + cmd_get_machine_type = uname -m + Windows: + os_id = mswindows + cmd_get_full_name = wmic os get caption |findstr /vi caption + cmd_get_kernel_ver = wmic os get version |findstr /vi version + cmd_get_machine_type = wmic os get osarchitecture |findstr /vi osarchitecture - gagent_check_blacklist: only Linux gagent_check_type = blacklist @@ -302,6 +407,8 @@ - gagent_user_logoff: only Windows gagent_check_type = user_logoff + - gagent_qgastatus_after_remove_qga: + gagent_check_type = qgastatus_after_remove_qga variants: - virtio_serial: gagent_serial_type = virtio diff --git a/qemu/tests/cfg/qemu_img_check_fragmentation.cfg b/qemu/tests/cfg/qemu_img_check_fragmentation.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c2b21c952462365fbf4eb138a713c3b416fdc99d --- /dev/null +++ b/qemu/tests/cfg/qemu_img_check_fragmentation.cfg @@ -0,0 +1,18 @@ +- qemu_img_check_fragmentation: + only raw + virt_test_type = qemu + type = qemu_img_check_fragmentation + required_qemu = [5.1.0-2, ) + start_vm = no + force_create_image_stg = yes + images = stg + image_size_stg = 10G + image_format_stg = raw + image_name_stg = "images/stg" + offsets = "0 4096" + timeout = 900 + fragmentation_maximum = 10000 + check_fragmentation_cmd = "filefrag %s" + extents_number_pattern = ":\s+(\d+)\s+extents found" + qemu_img_bench_cmd = "qemu-img bench -f ${image_format_stg} -t none -n -w %s -c 1000000 -S 8192 -o %s" + remove_image_stg = yes diff --git a/qemu/tests/cfg/luks_compare.cfg b/qemu/tests/cfg/qemu_img_compare.cfg similarity index 96% rename from qemu/tests/cfg/luks_compare.cfg rename to qemu/tests/cfg/qemu_img_compare.cfg index 2f7c0d51d29a1bac1d6cc96984f59ad5a38eec88..87148a1a7e64b4f7372f56567a310f13834ee331 100644 --- a/qemu/tests/cfg/luks_compare.cfg +++ b/qemu/tests/cfg/qemu_img_compare.cfg @@ -1,5 +1,4 @@ -- luks_compare: - only luks +- qemu_img_compare: virt_test_type = qemu type = luks_convert start_vm = no diff --git a/qemu/tests/cfg/qemu_img_measure.cfg b/qemu/tests/cfg/qemu_img_measure.cfg index de13a07500ebe396f9ae283304a14c0e9eb72766..f9ea3fd40d4fdb8ff04a3b4888235e2057802cb9 100644 --- a/qemu/tests/cfg/qemu_img_measure.cfg +++ b/qemu/tests/cfg/qemu_img_measure.cfg @@ -3,6 +3,8 @@ kill_vm = yes start_vm = no required_qemu = [2.10.0, ) + luks: + required_qemu = [5.0.0-2, ) images = "test1" image_name_test1 = "images/test1" variants: @@ -28,3 +30,9 @@ target_format = raw image_format_tgt1 = ${target_format} image_format_tgt2 = ${target_format} + - convert_to_luks: + target_format = luks + image_format_tgt1 = ${target_format} + image_format_tgt2 = ${target_format} + image_secret_tgt1 = convert + image_secret_tgt2 = convert diff --git a/qemu/tests/cfg/qemu_img_negative.cfg b/qemu/tests/cfg/qemu_img_negative.cfg index c6a450a427ab49d8424529d5942bf26667245a52..49ea90c76438b5df43fc3e53bb837ab719d85e13 100644 --- a/qemu/tests/cfg/qemu_img_negative.cfg +++ b/qemu/tests/cfg/qemu_img_negative.cfg @@ -1,7 +1,7 @@ - qemu_img_negative: - only qcow2 variants: - rebase: + only qcow2 type = rebase_negative_test images = "base sn" image_name_base = "images/rebase_negative_base" @@ -26,3 +26,14 @@ image_name_new = "images/rebase_negative_new" create_image_new = no rebase_list = "sn > new" + - luks_with_non_utf8_secret: + only luks + type = image_creation_luks_with_non_utf8_secret + start_vm = no + create_image = no + images = stg + err_info = "Data from secret\s+\w+\s+is not valid UTF-8" + echo_non_utf8_secret_cmd = "echo -n -e '\x3a\x3c\x3b\xff' > %s" + qemu_img_create_cmd = "qemu-img create -f luks --object secret,id=sec0,file=%s -o key-secret=sec0 %s 10M" + image_name_stg = "images/stg" + remove_image_stg = yes diff --git a/qemu/tests/cfg/qemu_option_check.cfg b/qemu/tests/cfg/qemu_option_check.cfg index 30a0e95e5bb01bc449af68566b9c83800530b5c0..ec4a9e7f298311c7bb114de2cc33c64b34c7efe0 100644 --- a/qemu/tests/cfg/qemu_option_check.cfg +++ b/qemu/tests/cfg/qemu_option_check.cfg @@ -10,13 +10,13 @@ - e1000: RHEL: only RHEL.6 RHEL.7 - no ppc64,ppc64le,s390x + only i386, x86_64 device_name = e1000 - e1000e: - no ppc64,ppc64le,s390x + only i386, x86_64 device_name = e1000e - rtl8139: - no ppc64,ppc64le,s390x + only i386, x86_64 device_name = rtl8139 - spapr-vlan: only ppc64,ppc64le diff --git a/qemu/tests/cfg/qmp_command.cfg b/qemu/tests/cfg/qmp_command.cfg index 6d134d3ca47cb89a509e7c17ad0fdb26151f8191..96cd7fb3d96ad429ab666d915d0ab336d4d60aec 100644 --- a/qemu/tests/cfg/qmp_command.cfg +++ b/qemu/tests/cfg/qmp_command.cfg @@ -175,11 +175,21 @@ - qmp_device-list-properties: qmp_cmd = "device-list-properties typename=virtio-balloon-pci" cmd_result_check = contain - cmd_return_value = "['command_serr_enable', 'multifunction']" + cmd_return_value = ['multifunction'] + Host_RHEL.m7, Host_RHEL.m8.u0, Host_RHEL.m8.u1, Host_RHEL.m8.u2: + cmd_return_value = ['command_serr_enable', 'multifunction'] s390x: qmp_cmd = "device-list-properties typename=virtio-balloon-ccw" cmd_return_value = "['notify_on_empty', 'ioeventfd', 'any_layout', 'devno', 'indirect_desc', 'guest-stats', 'guest-stats-polling-interval', 'event_idx', 'virtio-backend', 'iommu_platform', 'deflate-on-oom', 'max_revision']" + aarch64: + cmd_return_value = "['multifunction', 'ats', 'notify_on_empty']" - qmp_query-command-line-options: qmp_cmd = "query-command-line-options" cmd_result_check = contain cmd_return_value = "[{'option': 'option-rom'}, {'name': 'bootindex'}, {'name': 'romfile'}]" + - qmp_query-target: + qmp_cmd = "query-target" + cmd_result_check = equal + - qmp_query-machines: + qmp_cmd = "query-machines" + cmd_result_check = "contain" diff --git a/qemu/tests/cfg/qmp_event_notification.cfg b/qemu/tests/cfg/qmp_event_notification.cfg index 960a6e371e1222af72d332f6145f74895e2ef2ec..cefeb143c1a0d26fd4f9eb9c437999da7aabb4bb 100644 --- a/qemu/tests/cfg/qmp_event_notification.cfg +++ b/qemu/tests/cfg/qmp_event_notification.cfg @@ -11,7 +11,7 @@ monitor_type_qmp2 = qmp variants: - from_guest: - only qmp_system_reset qmp_system_powerdown qmp_rtc_change qmp_watchdog qmp_suspend qmp_disk_suspend qmp_guest_panicked + only qmp_system_reset qmp_system_powerdown qmp_rtc_change qmp_watchdog qmp_guest_panicked event_cmd_type = guest_cmd - from_qmp: only qmp_quit qmp_system_reset qmp_system_powerdown @@ -52,29 +52,9 @@ no Windows, aarch64 event_cmd = hwclock --systohc event_check = "RTC_CHANGE" - - qmp_suspend: - no Windows - usbs = "" - usb_devices = "" - event_cmd = pm-suspend - event_check = "SUSPEND" - event_cmd_options = "ignore_all_errors=True" - extra_params += " -global PIIX4_PM.disable_s3=0" - q35: - extra_params += " -global ICH9-LPC.disable_s3=0" - post_event_cmd = system_wakeup - post_event_cmd_type = qmp_cmd - - qmp_disk_suspend: - no Windows - usbs = "" - usb_devices = "" - event_cmd = pm-hibernate - event_check = "SUSPEND_DISK" - event_cmd_options = "ignore_all_errors=True" - extra_params += " -global PIIX4_PM.disable_s4=0" - q35: - extra_params += " -global ICH9-LPC.disable_s4=0" - qmp_watchdog: + aarch64: + no Host_RHEL no Windows event_cmd = echo 0 > /dev/watchdog event_check = "WATCHDOG" diff --git a/qemu/tests/cfg/queues_number_test.cfg b/qemu/tests/cfg/queues_number_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6ea0f92b97d338c15fa0c5459fca4770d5d75a40 --- /dev/null +++ b/qemu/tests/cfg/queues_number_test.cfg @@ -0,0 +1,22 @@ +- queues_number_test: + only Linux + only virtio_net + queues = 4 + virt_test_type = qemu + type = queues_number_test + # netperf server is host, the netperf client is the main vm + wait_bg_time = 45 + run_bgstress = netperf_stress + repeat_counts = 90 + background_ping_time = 180 + netperf_test_duration = 180 + # netperf_para_sessions should bigger than queues parameter. + netperf_para_sessions = 6 + test_protocol = TCP_STREAM + netperf_output_unit = m + netperf_sizes = 1024 + netperf_server_link = netperf-2.7.1.tar.bz2 + netperf_client_link = ${netperf_server_link} + server_path = /var/tmp/ + client_path = ${server_path} + change_list = 1,2,1,3,1,4,1 diff --git a/qemu/tests/cfg/rebase_second_snapshot_to_base.cfg b/qemu/tests/cfg/rebase_second_snapshot_to_base.cfg index cc5c66c93f8559ac8800e2749c3cf2c55bff3b38..e15fdb388031fadfee987c1d46e18915bd481d58 100644 --- a/qemu/tests/cfg/rebase_second_snapshot_to_base.cfg +++ b/qemu/tests/cfg/rebase_second_snapshot_to_base.cfg @@ -1,5 +1,4 @@ - rebase_second_snapshot_to_base: - only raw, luks virt_test_type = qemu type = rebase_second_snapshot_to_base kill_vm = yes diff --git a/qemu/tests/cfg/remote_block_hotplug.cfg b/qemu/tests/cfg/remote_block_hotplug.cfg index ce6e03ac5a422b1c9bfda07bb7ee8f78a371259e..12df57feecfb956a54ae9bba63e1ccca8d61ce4a 100644 --- a/qemu/tests/cfg/remote_block_hotplug.cfg +++ b/qemu/tests/cfg/remote_block_hotplug.cfg @@ -1,13 +1,15 @@ # Network storage backends: # iscsi_direct # ceph +# gluster_direct +# nbd # The following testing scenarios are covered: # hotplug/hotunplug a remote virtio-scsi image # hotplug/hotunplug a remote virtio-blk image - block_hotplug_with_remote_storage: install setup image_copy unattended_install.cdrom no RHEL.3.9 - only iscsi_direct ceph + only iscsi_direct ceph gluster_direct nbd virt_test_type = qemu type = block_hotplug bootindex_image1 = 0 @@ -26,14 +28,24 @@ iscsi_direct: check_disk_size = no lun_stg0 = 1 + nbd: + remove_image_stg0 = no + force_create_image_stg0 = no variants: - fmt_qcow2: image_format_stg0 = qcow2 + nbd: + nbd_port_stg0 = 10820 - fmt_raw: image_format_stg0 = raw + nbd: + nbd_port_stg0 = 10819 - fmt_luks: image_format_stg0 = luks image_secret_stg0 = hotplug + nbd: + nbd_port_stg0 = 10821 + image_secret_stg0 = redhat variants: - with_block: drive_format_stg0 = virtio diff --git a/qemu/tests/cfg/remote_block_resize.cfg b/qemu/tests/cfg/remote_block_resize.cfg index 4a6aa1030dea41d3193f75ffd197a72c4760288a..c4b25fd6d8926d92e1a3dfc428b3530eda4e2527 100644 --- a/qemu/tests/cfg/remote_block_resize.cfg +++ b/qemu/tests/cfg/remote_block_resize.cfg @@ -1,5 +1,6 @@ # Network storage backends: # ceph +# gluster_direct # The following testing scenarios are covered: # extend remote image # shrink remote image @@ -8,7 +9,7 @@ - block_resize_with_remote_storage: no RHEL.4 - only ceph + only ceph gluster_direct type = remote_block_resize extend_ratio = 1.5 shrink_ratio = 0.9 diff --git a/qemu/tests/cfg/remote_boot.cfg b/qemu/tests/cfg/remote_boot.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9a8a97119660a193bb752ae776d5057ffbd72b2a --- /dev/null +++ b/qemu/tests/cfg/remote_boot.cfg @@ -0,0 +1,141 @@ +# Network storage backends: +# gluster_direct +# nbd +# iscsi_direct +# ceph +# The following testing scenarios are covered: +# - with_local_image +# boot from remote storage with a local image attached +# - with_remote_images +# boot VM with another 3 remote images, check if there +# is memory leak for qemu-kvm +# - with_debug +# boot VM with valid and invalid debug levels, make sure +# the VM can start up without any error +# - with_nbd_boot_image +# - xpt_raw_as_raw_format_raw +# create a raw image +# export the raw image in raw format +# convert system image to the exported image +# boot from the exported image in raw format +# - xpt_qcow2_as_raw_format_qcow2 +# create a qcow2(full/falloc) image +# export the qcow2 image in raw format +# convert system image to the exported image +# boot from the exported image in qcow2 format +# - xpt_luks_as_raw_format_luks +# create a luks image +# export the luks image in raw format +# convert system image to the exported image +# boot from the exported image in luks format +# - xpt_qcow2_as_qcow2_format_raw +# create a qcow2(no preallocation) image +# export the qcow2 image in qcow2 format +# convert system image to the exported image +# boot from the exported image in raw format +# - xpt_luks_as_luks_format_raw +# create a luks image +# export the luks image in luks format +# convert system image to the exported image +# boot from the exported image in raw format +# - repeated_reboot +# repeated reboot testing + +- boot_from_remote_storage: + type = boot_from_remote + virt_test_type = qemu + bootindex_image1 = 0 + variants: + - repeated_reboot: + only iscsi_direct ceph gluster_direct nbd + type = boot + reboot_count = 5 + reboot_method = shell + login_timeout = 480 + - with_nbd_boot_image: + only nbd + type = boot_from_nbd_image + start_vm = 'no' + + # The following is specified for a local image 'stg0', + # used for qemu-nbd export with ip access + local_image_tag = 'stg0' + image_name_stg0 = images/stg0 + images += ' ${local_image_tag}' + enable_nbd_stg0 = 'no' + storage_type_stg0 ='filesystem' + remove_image_stg0 = 'yes' + nbd_port_stg0 = 10850 + + # The following is specified for nbd image 'stg1', + # i.e. the exported 'stg0', used for nbd image accessing + convert_target = 'stg1' + nbd_port_stg1 = ${nbd_port_stg0} + variants: + - xpt_raw_as_raw_format_raw: + image_format_stg1 = 'raw' + image_format_stg0 = 'raw' + nbd_export_format_stg0 = 'raw' + - xpt_qcow2_as_raw_format_qcow2: + only Host_RHEL.m8 Host_RHEL.m7.u2 Host_RHEL.m7.u3 + image_format_stg1 = 'qcow2' + image_format_stg0 = 'qcow2' + nbd_export_format_stg0 = 'raw' + variants: + - falloc: + preallocated = falloc + - full: + preallocated = full + - xpt_luks_as_raw_format_luks: + image_format_stg1 = 'luks' + image_format_stg0 = 'luks' + nbd_export_format_stg0 = 'raw' + image_secret = redhat + - xpt_qcow2_as_qcow2_format_raw: + image_format_stg1 = 'raw' + image_format_stg0 = 'qcow2' + nbd_export_format_stg0 = 'qcow2' + - xpt_luks_as_luks_format_raw: + image_format_stg1 = 'raw' + image_format_stg0 = 'luks' + nbd_export_format_stg0 = 'luks' + image_secret = redhat + - with_debug: + only gluster_direct + scenario = boot_with_debug + debug_level_low = 0 + debug_level_high = 9 + - with_local_image: + only gluster_direct + scenario = boot_with_local_image + images += " stg" + image_size_stg = 1G + image_name_stg = images/stg + enable_gluster_stg = no + remove_image_stg = yes + force_create_image_stg = yes + blk_extra_params_stg = "serial=TARGET_DISK0" + Host_RHEL.m6..ide: + blk_extra_params_stg = "wwn=0x5000123456789abc" + disk_op_cmd = 'dd if=/dev/urandom of={disk} bs=1M count=200 oflag=direct' + - with_remote_images: + only gluster_direct + only virtio_scsi virtio_blk + scenario = boot_with_remote_images + images += " stg1 stg2 stg3" + boot_drive_stg1 = no + boot_drive_stg2 = no + boot_drive_stg3 = no + image_name_stg1 = "images/stg1" + image_name_stg2 = "images/stg2" + image_name_stg3 = "images/stg3" + image_size_stg1 = 1G + image_size_stg2 = 1G + image_size_stg3 = 1G + remove_image_stg1 = yes + remove_image_stg2 = yes + remove_image_stg3 = yes + force_create_image_stg1 = yes + force_create_image_stg2 = yes + force_create_image_stg3 = yes + memory_diff = 100M diff --git a/qemu/tests/cfg/remote_image_access.cfg b/qemu/tests/cfg/remote_image_access.cfg new file mode 100644 index 0000000000000000000000000000000000000000..31356642da7820858d7fff5e3ca06d8516fc255d --- /dev/null +++ b/qemu/tests/cfg/remote_image_access.cfg @@ -0,0 +1,119 @@ +# Network storage backends: +# nbd +# +# The following testing scenarios are covered: +# - with_nmap +# Create a local image with qemu-img +# Export it with qemu-nbd +# Scan the port +# - with_guestfish +# Access system image with guestfish without booting up vm +# - with_qemu_io.drop_connection +# Create a local image with qemu-img +# Export it with qemu-nbd +# Access nbd image with qemu-io -c 'quit' +# - with_qemu_io.drop_sync +# Create a text file with echo +# Export it with qemu-nbd +# Access nbd image with qemu-io -c 'read' +# - with_unix_socket +# Clone system image with qemu-img +# Export it with qemu-nbd(unix socket access) +# Boot VM from the exported image +# - with_ncat +# Clone system image with qemu-img +# Add it into VM(without device driver) +# Export it with internal NBD server +# Access it with ncat +# Boot from the exported nbd image + +- access_with_remote_storage: + only nbd + virt_test_type = qemu + start_vm = 'no' + + # The following is specified for a local image 'stg0', + # used for qemu-nbd export with ip access + local_image_tag = 'stg0' + image_name_stg0 = images/stg0 + enable_nbd_stg0 = 'no' + storage_type_stg0 ='filesystem' + image_size_stg0 = '1G' + remove_image_stg0 = 'yes' + nbd_export_format_stg0 = 'raw' + nbd_port_stg0 = 10850 + + # The following is specified for nbd image 'stg1', + # i.e. the exported 'stg0', used for accessing the nbd image + nbd_image_tag = 'stg1' + nbd_port_stg1 = ${nbd_port_stg0} + variants: + - with_ncat: + type = remote_image_ncat_access + kill_vm = 'yes' + cmds_installed_host += ' ncat' + + # ncat quits only if timeout + errmsg_check = 'Idle timeout expired' + nbd_export_writable = 'yes' + image_format_stg1 = 'raw' + variants: + - with_inet: + ncat_cmd = 'ncat {localhost} ${nbd_port_stg1} -i 2' + + # for tcp(type=inet) only + nbd_unix_socket = '' + - with_socket: + # for socket(type=unix) only + nbd_server_stg0 = '' + nbd_server_stg1 = '' + nbd_unix_socket_stg0 = /var/run/nbd_${nbd_image_tag}.sock + nbd_unix_socket_stg1 = ${nbd_unix_socket_stg0} + ncat_cmd = 'ncat -U ${nbd_unix_socket_stg1} -i 2' + - with_unix_socket: + type = remote_image_unix_socket_access + images += ' ${local_image_tag}' + + # for socket(type=unix) only + nbd_server_stg0 = '' + nbd_server_stg1 = '' + nbd_unix_socket_stg0 = /var/run/nbd_${nbd_image_tag}.sock + nbd_unix_socket_stg1 = ${nbd_unix_socket_stg0} + - with_nmap: + type = remote_image_nmap_access + cmds_installed_host += ' nmap' + images = ${local_image_tag} + force_create_image_stg0 = 'yes' + msg_check = '${nbd_port_stg0}/tcp open' + nmap_cmd = 'nmap {localhost} -p ${nbd_port_stg1}' + + # for tcp(type=inet) only + nbd_unix_socket = '' + - with_qemu_io: + type = remote_image_qemu_io_access + images = ${local_image_tag} + qemu_io_cmd = '{qemu_io} {tls_creds} {secret} {fmt} -c "{subcmd}" {filename}' + variants: + - drop_connection: + qemu_io_subcmd = 'quit' + - drop_sync: + image_format_stg0 = 'raw' + image_format_stg1 = ${nbd_export_format_stg0} + nbd_export_name_stg0 = nbd_image_stg + nbd_export_name_stg1 = ${nbd_export_name_stg0} + create_description_cmd_stg0 = 'printf %02048d' + create_image_cmd_stg0 = 'echo "{desc}" > {filename}' + count_bytes = '100' + qemu_io_subcmd = 'read 0 ${count_bytes}' + msg_check = 'read ${count_bytes}/${count_bytes} bytes' + - with_guestfish: + only raw qcow2 + only Linux + type = remote_image_guestfish_access + start_vm = 'yes' + kill_vm = 'yes' + cmds_installed_host += ' guestfish' + msg_check = 'Access image without booting up vm' + guest_file_name = /tmp/guestfish.testfile + write_cmd = 'export LIBGUESTFS_BACKEND=direct; guestfish --format={fmt} -a {uri} -i sh "echo ${msg_check} > ${guest_file_name}"' + read_cmd = 'export LIBGUESTFS_BACKEND=direct; guestfish --format={fmt} -a {uri} -i sh "cat ${guest_file_name}"' diff --git a/qemu/tests/cfg/remote_image_convert.cfg b/qemu/tests/cfg/remote_image_convert.cfg index 388f2e5f5281e320c5764172ffb7f54a092af671..8d9ee5bad76690995a928a91a647f39b349fc8b7 100644 --- a/qemu/tests/cfg/remote_image_convert.cfg +++ b/qemu/tests/cfg/remote_image_convert.cfg @@ -1,15 +1,20 @@ # Network storage backends: # iscsi_direct # ceph +# gluster_direct +# nbd # Cache mode: # remote image1 -> remote raw image2 # remote image1 -> local raw image2 -> remote raw image3 +# Compression: +# remote image1 -> local qcow2 image -> remote qcow2 image2 +# remote image1 -> remote qcow2 image2 # The following testing scenarios are covered: # remote image1 -> local image -> remote image2 # remote image1 -> remote image2 - remote_image_convert: - only iscsi_direct ceph + only iscsi_direct ceph gluster_direct nbd virt_test_type = qemu type = remote_convert start_vm = no @@ -38,20 +43,36 @@ cache_mode = directsync - writethrough: cache_mode = writethrough + - with_compression: + # only qcow2 img can be the target converted img + convert_compressed = yes + only fmt_qcow2_convert + no fmt_raw_remote fmt_luks_remote variants: # target image is 'convert' - fmt_raw_convert: image_format_convert = raw + remote_to_remote: + nbd: + nbd_port_convert = 10819 - fmt_qcow2_convert: image_format_convert = qcow2 + remote_to_remote: + nbd: + nbd_port_convert = 10820 - fmt_luks_convert: image_format_convert = luks - image_secret_convert = convert + image_secret_convert = redhat + remote_to_remote: + nbd: + nbd_port_convert = 10821 variants: - remote_to_local_to_remote: # target image is 'convert', converted from remote enable_iscsi_convert = no enable_ceph_convert = no + enable_gluster_convert = no + enable_nbd_convert = no image_raw_device_convert = no storage_type_convert = filesystem @@ -60,16 +81,21 @@ image_name_remote = "images/image_convert_remote" iscsi_direct: lun_remote = 1 - variants: # target image is 'remote' - fmt_raw_remote: image_format_remote = raw + nbd: + nbd_port_remote = 10819 - fmt_qcow2_remote: image_format_remote = qcow2 + nbd: + nbd_port_remote = 10820 - fmt_luks_remote: image_format_remote = luks - image_secret_remote = convert_remote + image_secret_remote = redhat + nbd: + nbd_port_remote = 10821 - remote_to_remote: # target image is 'convert', converted from remote iscsi_direct: diff --git a/qemu/tests/cfg/remote_image_multiwrite.cfg b/qemu/tests/cfg/remote_image_multiwrite.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b25ec1b880dd689a5098c2039b629df1b83d2970 --- /dev/null +++ b/qemu/tests/cfg/remote_image_multiwrite.cfg @@ -0,0 +1,33 @@ +# Network storage backends: +# iscsi_direct +# ceph +# gluster_direct +# The following testing scenario is covered: +# Format a data disk in VM: +# rhel7+: xfs and ext4 covered +# rhel6: ext4 covered + +- remote_image_multiwrite: + only iscsi_direct ceph gluster_direct + only RHEL + virt_test_type = qemu + type = remote_image_multiwrite + start_vm = yes + kill_vm = yes + images += " stg" + image_name_stg = images/stg + image_size_stg = 1G + remove_image_stg = yes + force_create_image_stg = yes + blk_extra_params_stg = "serial=TARGET_DISK0" + Host_RHEL.m6..ide: + blk_extra_params_stg = "wwn=0x5000123456789abc" + iscsi_direct: + lun_stg = 1 + variants: + - xfs: + no RHEL.3 RHEL.4 RHEL.5 RHEL.6 + disk_format_stg = xfs + - ext4: + no RHEL.3 RHEL.4 RHEL.5 + disk_format_stg = ext4 diff --git a/qemu/tests/cfg/remote_image_snapshot.cfg b/qemu/tests/cfg/remote_image_snapshot.cfg index c7d32a233ae384d44f498a91b13eb3408942b329..47a7f63904dd5f68691196fe0336bb90c84f5885 100644 --- a/qemu/tests/cfg/remote_image_snapshot.cfg +++ b/qemu/tests/cfg/remote_image_snapshot.cfg @@ -1,9 +1,16 @@ +# Network storage backends: +# iscsi_direct +# ceph +# gluster_direct +# nbd +# libcurl (read-only) +# libssh (read-only) # These tcs only focus on the following scenarios: # remote image -> local snapshot # remote image -> remote snapshot - remote_image_snapshot: - only iscsi_direct ceph + only iscsi_direct ceph gluster_direct nbd libcurl libssh virt_test_type = qemu type = qemu_disk_img_info image_chain= "image1 snA" @@ -29,9 +36,16 @@ - to_local: enable_iscsi_snA = no enable_ceph_snA = no + enable_gluster_snA = no + enable_nbd_snA = no + enable_curl_snA = no + enable_ssh_snA = no image_raw_device_snA = no storage_type_snA = filesystem + libcurl, libssh: + image_readonly_snA = no - to_remote: + no nbd libcurl libssh iscsi_direct: # make sure size of lun_snA equals to size of lun_image1 # hard code here for avocado-vt cannot select luns by now diff --git a/qemu/tests/cfg/remote_negative.cfg b/qemu/tests/cfg/remote_negative.cfg index 7b720d1bd6ac05bb294dd04ded601ddf910a8f62..25167759bc98b5e3e140b7f1b0b672d3eb17b40a 100644 --- a/qemu/tests/cfg/remote_negative.cfg +++ b/qemu/tests/cfg/remote_negative.cfg @@ -1,14 +1,64 @@ # Network storage backends: -# iscsi_direct ceph +# iscsi_direct +# gluster_direct +# nbd # The following testing scenarios are covered: -# Failed to boot VM from a readonly image +# - readonly_image +# Failed to boot VM from a readonly image +# - remote_server_disconnected +# Make remote server inaccessible one by one +# (make sure that at least one server is accessible), +# qemu can access VM without any issue +# - long_export_name +# Export an image with long export name(4096 bytes) +# Accessing it with qemu-img can get proper error msg - remote_storage_negative: - only iscsi_direct ceph virt_test_type = qemu + kill_vm = yes variants: + - long_export_name: + only nbd + type = nbd_long_export_name + start_vm = no + + # local image to be exported + images = 'stg0' + image_name_stg0 = images/stg0 + enable_nbd_stg0 = 'no' + storage_type_stg0 ='filesystem' + image_size_stg0 = '1G' + image_format_stg0 = 'raw' + force_create_image_stg0 = 'yes' + remove_image_stg0 = 'yes' + nbd_export_format_stg0 = 'raw' + nbd_port_stg0 = 10850 + + # nbd image to be accessed, i.e. + # the exported 'stg0' + nbd_image_tag = 'stg1' + nbd_port_stg1 = ${nbd_port_stg0} + image_format_stg1 = 'raw' + nbd_unix_socket = '' + + create_export_name_cmd = printf %{length}d 1|tr ' ' a + max_export_name_len = 4096 + access_export_name_lens = 4097 4000 4095 + errmsg_check_4097 = 'export name too long to send to server' + errmsg_check_4000 = 'Requested export not available' + errmsg_check_4095 = 'Requested export not available' - readonly_image: only iscsi_direct type = negative_create start_vm = no error_msg = "(LUN is write protected|Block node is read-only)" + - remote_server_disconnected: + only gluster_direct + type = remote_server_disconnected + qemu_force_use_drive_expression = no + cmds_installed_host += " ping iptables" + disconn_cmd = "iptables -I INPUT -s {source} -j DROP" + recover_cmd = "iptables -D INPUT -s {source} -j DROP" + conn_check_cmd = "ping -c 2 {source} -w 5" + disk_op_cmd = "dd if=/dev/urandom of=/tmp/remote_down.tmp oflag=direct bs=4k count=1024" + disk_op_timeout = 300 diff --git a/qemu/tests/cfg/remote_readonly_storage.cfg b/qemu/tests/cfg/remote_readonly_storage.cfg new file mode 100644 index 0000000000000000000000000000000000000000..3c296efaa95307e001fdb8870bbe09282088a4d1 --- /dev/null +++ b/qemu/tests/cfg/remote_readonly_storage.cfg @@ -0,0 +1,53 @@ +# Network storage backends: +# libcurl +# The following testing scenarios are covered: +# - boot_with_readonly_image +# boot vm with an iso image from https server +# - access_with_qemu_info +# access image with qemu-img info +# - cookie_with_secret_object +# check cookie data in tcpdump output + +- remote_readonly_storage: + only libcurl + virt_test_type = qemu + start_vm = 'no' + + # cd1 is the libcurl iso image + remote_image_tag = 'cd1' + cdroms = "${remote_image_tag}" + cdrom_cd1 = Fedora-Server-dvd-x86_64-30-1.2.iso + image_format_cd1 = 'raw' + curl_server_cd1 = archives.fedoraproject.org + curl_protocol_cd1 = https + curl_path_cd1 = pub/archive/fedora/linux/releases/30/Server/x86_64/iso/Fedora-Server-dvd-x86_64-30-1.2.iso + curl_sslverify_cd1 = '' + curl_username_cd1 = '' + curl_password_cd1 = '' + curl_cookie_secret_cd1 = '' + curl_timeout_cd1 = 30 + variants: + - cookie_with_secret_object: + only Linux + type = curl_cookie_with_secret + tcpdump_cmd = 'tcpdump -U -c 5 -A host {server} -w {dump_file} &' + curl_protocol_cd1 = http + curl_cookie_secret_cd1 = 'curl_cookie_data=redhat' + - boot_with_readonly_image: + only Linux + type = boot_with_remote_readonly_image + kill_vm = 'yes' + + # stg0 is the local image, where VM starts + convert_target = 'stg0' + bootindex_stg0 = 0 + image_name_stg0 = images/stg0 + images += ' ${convert_target}' + enable_curl_stg0 = 'no' + storage_type_stg0 ='filesystem' + remove_image_stg0 = 'yes' + image_readonly_stg0 = 'no' + - access_with_qemu_info: + type = remote_image_qemu_info_access + ascii_symbol = '%5f' + replace_symbol = '_' diff --git a/qemu/tests/cfg/resize_qemu_img.cfg b/qemu/tests/cfg/resize_qemu_img.cfg index 8ae2638e1b4b49aaf0845f8b13fb4e51c842b0fa..ba0ea251275daeac1103aa0aab05c1ea850a3731 100644 --- a/qemu/tests/cfg/resize_qemu_img.cfg +++ b/qemu/tests/cfg/resize_qemu_img.cfg @@ -6,6 +6,16 @@ type = resize_qemu_img images = "test" image_name_test = images/test + variants: + - @default: + - with_preallocation: + variants: + - off: + preallocation = off + - full: + preallocation = full + - falloc: + preallocation = falloc variants: - increase_size: # These two params must be provided to calculate size changes. @@ -16,3 +26,4 @@ required_qemu = [2.12.0, ) image_size_test = 10G size_changes = "-2G" + no full falloc diff --git a/qemu/tests/cfg/rng_bat.cfg b/qemu/tests/cfg/rng_bat.cfg index d0af4ad9e04460ad479a57f2aa98d0b3a4c94eed..14e299340f66b9175aab7fa4f2110c5e9d9fae78 100644 --- a/qemu/tests/cfg/rng_bat.cfg +++ b/qemu/tests/cfg/rng_bat.cfg @@ -30,8 +30,20 @@ only q35 no WinXP WinVista Win7 Win8 Win8.1 Win2000 Win2003 no Win2008 Win2008..r2 Win2012 Win2012..r2 + no Host_RHEL.m7 + intel_iommu = yes virtio_dev_iommu_platform = on - enable_guest_iommu = yes + iommu_eim = on virtio_dev_ats = on machine_type_extra_params = "kernel-irqchip=split" - extra_params = "-device intel-iommu,intremap=on,eim=on,device-iotlb=on" + variants: + - @default: + Linux: + enable_guest_iommu = no + - scenario1: + only Linux + enable_guest_iommu = yes + - scenario2: + only Linux + enable_guest_iommu = yes + guest_iommu_option = pt diff --git a/qemu/tests/cfg/seabios_order_once.cfg b/qemu/tests/cfg/seabios_order_once.cfg index 936e9529c0744f902b2d65b888216ae4c58d7b2e..636830a2abe236b86d417cfecb789be86aced524 100644 --- a/qemu/tests/cfg/seabios_order_once.cfg +++ b/qemu/tests/cfg/seabios_order_once.cfg @@ -4,6 +4,7 @@ boot_menu = on start_vm =no enable_sga = yes + image_boot = no images = "stg" image_name_stg = "images/stg" image_size_stg = 100M diff --git a/qemu/tests/cfg/single_driver_install.cfg b/qemu/tests/cfg/single_driver_install.cfg index 34509edc8db2a84722bd77393800041115a90f61..e961426cc32d89dcfbd1f2af965199428d97d699 100644 --- a/qemu/tests/cfg/single_driver_install.cfg +++ b/qemu/tests/cfg/single_driver_install.cfg @@ -102,3 +102,29 @@ input_dev_type_input1 = keyboard - device_tablet: input_dev_type_input1 = tablet + - with_viofs: + no Host_RHEL.m6 Host_RHEL.m7 Host_RHEL.m8.u0 Host_RHEL.m8.u1 + no Win2008 Win7 + virt_test_type = qemu + required_qemu = [4.2.0,) + filesystems = fs + fs_driver = virtio-fs + fs_source_type = mount + fs_source_dir = virtio_fs_test/ + force_create_fs_source = yes + remove_fs_source = yes + fs_target = 'myfs' + fs_driver_props = {"queue-size": 1024} + mem = 4096 + mem_devs = mem1 + backend_mem_mem1 = memory-backend-file + mem-path_mem1 = /dev/shm + size_mem1 = 4G + use_mem_mem1 = no + share_mem = yes + guest_numa_nodes = shm0 + numa_memdev_shm0 = mem-mem1 + numa_nodeid_shm0 = 0 + driver_name = viofs + device_name = "VirtIO FS Device" + device_hwid = '"PCI\VEN_1AF4&DEV_105A"' diff --git a/qemu/tests/cfg/slof_hugepage.cfg b/qemu/tests/cfg/slof_hugepage.cfg index 193f1a47c8ff2c5dc4458a6f42859c75ef1c09b2..f3dec2c29e41463bab97b913c6c2a6429ac4b228 100644 --- a/qemu/tests/cfg/slof_hugepage.cfg +++ b/qemu/tests/cfg/slof_hugepage.cfg @@ -12,9 +12,9 @@ pre_command_noncritical = yes free_mem_cmd = free -b | grep -E 'Mem' | awk '{print $2}' mem_devs = mem1 - backend_mem1 = memory-backend-file - policy_mem1 = default - mem-path_mem1 = /mnt/kvm_hugepage + backend_mem_mem1 = memory-backend-file + policy_mem_mem1 = default + mem-path_mem_mem1 = /mnt/kvm_hugepage plug_mem_name = plug policy_plug = bind mem-path_plug = /mnt/kvm_hugepage @@ -30,9 +30,9 @@ # The unit of total_hugepage_size is 'MB' total_hugepage_size = 4096 extra_params += " -mem-path /mnt/kvm_hugepage" - size_mem1 = 1G + size_mem_mem1 = 1G size_plug = 1G - without_backing_file_hotplug: total_hugepage_size = 8192 - size_mem1 = 2G + size_mem_mem1 = 2G size_plug = 2G diff --git a/qemu/tests/cfg/slof_user_interface.cfg b/qemu/tests/cfg/slof_user_interface.cfg index ce9ef292250ea796f956e143358af4f7a6cfa534..4445220485ac31bc5652f139befbc945ab8da82f 100644 --- a/qemu/tests/cfg/slof_user_interface.cfg +++ b/qemu/tests/cfg/slof_user_interface.cfg @@ -18,59 +18,87 @@ only ppc64le ppc64 only Linux # Select one drive format since just test slof user interface. - only virtio_scsi - no spapr_vscsi variants: - send_f12: - virtio_scsi_pci_addr_image1 = 0xe - bus_extra_params_image1 = 'addr=${virtio_scsi_pci_addr_image1}' - images += " stg0 stg1" - drive_pci_addr_stg0 = 8 - drive_format_stg0 = virtio - image_name_stg0 = images/stg0 - image_size_stg0 = 25G - remove_image_stg0 = yes - force_create_image_stg0 = yes - spapr_vscsi_reg = 0x71000001 - scsi_hba_stg1 = spapr-vscsi - bus_extra_params_stg1 = 'reg=${spapr_vscsi_reg}' - image_name_stg1 = images/stg1 - image_size_stg1 = 25G - remove_image_stg1 = yes - force_create_image_stg1 = yes + send_keys = 'f12' + start_f12 = 'Select boot device' + end_f12 = ' net : ' + start_select = 'Trying to load' + end_select = 'Successfully loaded' cdroms = 'cd1' cdrom_cd1 = /tmp/new.iso pre_command_noncritical = no pre_command = 'dd if=/dev/urandom of=/tmp/new bs=10M count=1 &&' pre_command += ' mkisofs -o /tmp/new.iso /tmp/new' post_command = "rm -rf /tmp/new.iso /tmp/new" - virtio_scsi_pci_addr_cd1 = 0xb - drive_bus_cd1 = 1 - bus_extra_params_cd1 = 'addr=${virtio_scsi_pci_addr_cd1}' - virtio_net: - nic_pci_addr_nic1 = 0xa - menu_option5 = "net pci ethernet ${nic_pci_addr_nic1}" - spapr-vlan: - spapr_vlan_reg = 0x71000003 - nic_extra_params_nic1 = 'reg=${spapr_vlan_reg}' - menu_option5 = "net vdevice l-lan ${spapr_vlan_reg}" - send_keys = 'f12' - start_f12 = 'Select boot device' - end_f12 = ' net : ' - start_select = 'Trying to load' - end_select = 'Successfully loaded' - boot_dev_num = 5 - menu_option1 = "cdrom pci scsi ${virtio_scsi_pci_addr_cd1}" - menu_option2 = "disk pci scsi ${drive_pci_addr_stg0}" - menu_option3 = "disk pci scsi ${virtio_scsi_pci_addr_image1}" - menu_option4 = "disk vdevice v-scsi ${spapr_vscsi_reg}" - bootable_index = 3 + nic_pci_addr_nic1 = 0xa + variants: + - @default: + only virtio_scsi + virtio_scsi_pci_addr_image1 = 0xe + bus_extra_params_image1 = 'addr=${virtio_scsi_pci_addr_image1}' + images += " stg0 stg1" + drive_pci_addr_stg0 = 8 + drive_format_stg0 = virtio + image_name_stg0 = images/stg0 + image_size_stg0 = 25G + remove_image_stg0 = yes + force_create_image_stg0 = yes + spapr_vscsi_reg = 0x71000001 + scsi_hba_stg1 = spapr-vscsi + bus_extra_params_stg1 = 'reg=${spapr_vscsi_reg}' + image_name_stg1 = images/stg1 + image_size_stg1 = 25G + remove_image_stg1 = yes + force_create_image_stg1 = yes + virtio_scsi_pci_addr_cd1 = 0xb + drive_bus_cd1 = 1 + bus_extra_params_cd1 = 'addr=${virtio_scsi_pci_addr_cd1}' + virtio_net: + menu_option5 = "net pci ethernet ${nic_pci_addr_nic1}" + spapr-vlan: + spapr_vlan_reg = 0x71000003 + nic_extra_params_nic1 = 'reg=${spapr_vlan_reg}' + menu_option5 = "net vdevice l-lan ${spapr_vlan_reg}" + boot_dev_num = 5 + menu_option1 = "cdrom pci scsi ${virtio_scsi_pci_addr_cd1}" + menu_option2 = "disk pci scsi ${drive_pci_addr_stg0}" + menu_option3 = "disk pci scsi ${virtio_scsi_pci_addr_image1}" + menu_option4 = "disk vdevice v-scsi ${spapr_vscsi_reg}" + bootable_index = 3 + - with_vscsi_cdrom: + only spapr_vscsi + only virtio_net + spapr_vscsi_reg = 0x71000001 + bus_extra_params_image1 = 'reg=${spapr_vscsi_reg}' + drive_bus_cd1 = 0 + drive_port_cd1 = 1 + boot_dev_num = 3 + menu_option1 = "cdrom vdevice v-scsi ${spapr_vscsi_reg}" + menu_option2 = "disk vdevice v-scsi ${spapr_vscsi_reg}" + menu_option3 = "net pci ethernet ${nic_pci_addr_nic1}" + bootable_index = 2 + variants: + - with_scsi_id: + variants: + - with_id_0: + drive_unit_cd1 = 0 + - with_id_63: + drive_unit_cd1 = 63 + - with_channel_id: + variants: + - with_id_0: + blk_extra_params_cd1 = channel=0 + - with_id_7: + blk_extra_params_cd1 = channel=7 - send_boot: + only virtio_scsi send_keys = 'boot' bootindex_image1 = 0 start_boot = 'Trying to load' end_boot = 'Successfully loaded' - send_reset_all: + only virtio_scsi send_keys = 'reset-all' start_reset_all = '0 >' end_reset_all = 'Successfully loaded' diff --git a/qemu/tests/cfg/smbios_default_check.cfg b/qemu/tests/cfg/smbios_default_check.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c56b70afcbea18933b9a13125d2d93127d06f8b4 --- /dev/null +++ b/qemu/tests/cfg/smbios_default_check.cfg @@ -0,0 +1,25 @@ +- smbios_default_check: install setup image_copy unattended_install.cdrom + only q35 + no Host_RHEL.m6,Host_RHEL.m7,Host_RHEL.m8.u0,Host_RHEL.m8.u1 + # dmidecode not available (and no plans for it) on s390 + no s390x + type = smbios_default_check + kill_vm = yes + required_qemu= [4.2.0,) + # Expected output + Host_RHEL: + System_Manufacturer = "^Red Hat$" + System_SKU_Number = "^8.[23456789].0|9.\d.0$" + Baseboard_Manufacturer = "^Red Hat$" + Baseboard_Product_Name = "^RHEL-AV$" + # Command to get BIOS info + RHEL: + get_sys_manufacturer = dmidecode -s system-manufacturer + get_sys_SKUNumber = dmidecode -t 1 | grep SKU | awk '{print $3}' + get_baseboard_manufacturer = dmidecode -s baseboard-manufacturer + get_baseboard_product_name = dmidecode -s baseboard-product-name + Windows: + get_sys_manufacturer = wmic computersystem get Manufacturer + get_sys_SKUNumber = wmic computersystem get SystemSKUNumber + get_baseboard_manufacturer = wmic Baseboard get Manufacturer + get_baseboard_product_name = wmic Baseboard get Product \ No newline at end of file diff --git a/qemu/tests/cfg/smt_test.cfg b/qemu/tests/cfg/smt_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..269a4e007dd6119b1602c6411317534581bba346 --- /dev/null +++ b/qemu/tests/cfg/smt_test.cfg @@ -0,0 +1,44 @@ +- smt_test: + virt_test_type = qemu + type = smt_test + start_vm = no + kill_vm_on_error = yes + only x86_64, i386 + only HostCpuVendor.amd + cpu_model_flags += ",enforce" + auto_cpu_model = no + smp = 2 + vcpu_maxcpus = ${smp} + vcpu_threads = 2 + vcpu_cores = 1 + vcpu_sockets = 1 + test_negative_thread = yes + Windows: + get_cores_cmd = wmic cpu get NumberOfCores /value + get_sockets_cmd = wmic cpu get SocketDesignation /value + x86_64: + install_path = "C:\Program Files (x86)\JAM Software\HeavyLoad" + i386,i686: + install_path = "C:\Program Files\JAM Software\HeavyLoad" + install_cmd = "start /wait %s:\HeavyLoadSetup.exe /verysilent" + Linux: + get_threads_cmd = lscpu | grep "Thread(s) per core" + stress_args = --cpu 2 --vm 2 --io 4 --vm-bytes 1024 + get_cpu_usage_cmd = top -1 -n 1 | grep Cpu + variants: + - with_EPYC: + cpu_model = EPYC + variants: + - @default: + expected_threads = 2 + - no_topoext: + expected_threads = 1 + cpu_model_flags += ",-topoext" + - with_Opteron: + cpu_model = Opteron_G3 + variants: + - @default: + expected_threads = 1 + - with_topoext: + expected_threads = 2 + cpu_model_flags += ",+topoext,xlevel=0x8000001e" diff --git a/qemu/tests/cfg/spapr_vty_console_login.cfg b/qemu/tests/cfg/spapr_vty_console_login.cfg new file mode 100644 index 0000000000000000000000000000000000000000..54a7af5489e8ee036804c3947a226eaa63d4fc5e --- /dev/null +++ b/qemu/tests/cfg/spapr_vty_console_login.cfg @@ -0,0 +1,10 @@ +- spapr_vty_console_login: + only ppc64 ppc64le + type = chardev_serial_login + serial_type = spapr-vty + start_vm = no + variants: + - @default: + - with_vga_none: + inactivity_watcher = none + take_regular_screendumps = no diff --git a/qemu/tests/cfg/spapr_vty_multi_backends.cfg b/qemu/tests/cfg/spapr_vty_multi_backends.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b60f29e963712ae2d3441fb9ba9503d88cce38ac --- /dev/null +++ b/qemu/tests/cfg/spapr_vty_multi_backends.cfg @@ -0,0 +1,25 @@ +- spapr_vty_multi_backends: + only ppc64 ppc64le + type = spapr_vty_multi_backends + serial_type = spapr-vty + image_snapshot = yes + serials = "vs1" + create_delete_file = "touch file.txt && rm -f file.txt" + variants: + - all_current_backend: + serials += " vs2 vs3 vs4 vs5" + chardev_backend_vs1 = unix_socket + chardev_backend_vs2 = tcp_socket + chardev_backend_vs3 = pty + chardev_backend_vs4 = file + chardev_backend_vs5 = null + - max_backend: + serials += " vs2 vs3 vs4 vs5 vs6 vs7 vs8" + chardev_backend_vs1 = unix_socket + chardev_backend_vs2 = tcp_socket + chardev_backend_vs3 = pty + chardev_backend_vs4 = file + chardev_backend_vs5 = unix_socket + chardev_backend_vs6 = unix_socket + chardev_backend_vs7 = tcp_socket + chardev_backend_vs8 = pty diff --git a/qemu/tests/cfg/steal_time.cfg b/qemu/tests/cfg/steal_time.cfg new file mode 100644 index 0000000000000000000000000000000000000000..35a19b24736d7bbea7758a6836c6188579be3d99 --- /dev/null +++ b/qemu/tests/cfg/steal_time.cfg @@ -0,0 +1,16 @@ +- steal_time: + type = steal_time + virt_test_type = qemu + only Linux + only x86_64, i386 + kill_vm = yes + vms += " vm0" + image_snapshot = yes + qemu_command_prefix = "taskset -c 1 " + smp = 1 + vcpu_maxcpus = 1 + vcpu_sockets = 1 + vcpu_threads = 1 + stress_args = --cpu 1 --vm 2 --vm-bytes 1024 + top_cmd = top -n 1 | grep Cpu + stat_cmd = cat /proc/stat | head -1 diff --git a/qemu/tests/cfg/thin_provision_check_mode.cfg b/qemu/tests/cfg/thin_provision_check_mode.cfg new file mode 100644 index 0000000000000000000000000000000000000000..85b26cbcc8a1110f7c2aa274699ebb9a4952302a --- /dev/null +++ b/qemu/tests/cfg/thin_provision_check_mode.cfg @@ -0,0 +1,24 @@ +- thin_provision_check_mode: + only Linux + only virtio_scsi + type = thin_provision_check_mode + start_vm = no + kill_vm = yes + disk_size = 1024 + post_command = "modprobe -r scsi_debug" + target_mode = "unmap" + disk_serial = TARGET_DISK1 + data_tag = "stg1" + images += " ${data_tag}" + drv_extra_params_stg1 += "discard=unmap" + blk_extra_params_stg1 += "serial=${disk_serial}" + image_format_stg1 = raw + image_raw_device_stg1 = yes + force_create_image_stg1 = no + remove_image_stg1 = no + not_preprocess = yes + variants: + - unmap: + pre_command = "modprobe -r scsi_debug; modprobe scsi_debug dev_size_mb=${disk_size} lbpu=1 lbpws=1 lbprz=0" + - writesame: + pre_command = "modprobe -r scsi_debug; modprobe scsi_debug dev_size_mb=${disk_size} lbpu=0 lbpws=1 lbprz=0" diff --git a/qemu/tests/cfg/thin_provision_guest_fstrim.cfg b/qemu/tests/cfg/thin_provision_guest_fstrim.cfg new file mode 100644 index 0000000000000000000000000000000000000000..8dcb29b594f40c87667edb5763e57ceea50d4004 --- /dev/null +++ b/qemu/tests/cfg/thin_provision_guest_fstrim.cfg @@ -0,0 +1,41 @@ +- thin_provision_guest_fstrim: + only Linux + only virtio_scsi + type = thin_provision_guest_fstrim + start_vm = no + not_preprocess = yes + kill_vm = yes + data_tag = "stg1" + images += " ${data_tag}" + drv_extra_params_stg1 += "discard=unmap" + image_format_stg1 = raw + image_raw_device_stg1 = yes + force_create_image_stg1 = no + drive_format_stg1 = scsi-block + remove_image_stg1 = no + disk_size = 256 + pre_command = "modprobe -r scsi_debug; modprobe scsi_debug dev_size_mb=${disk_size} lbpu=1 lbpws=1 lbprz=0" + post_command = "modprobe -r scsi_debug" +# serial port setting + serials += " org.qemu.guest_agent.0" + serial_type_org.qemu.guest_agent.0 = virtserialport + guest_agent_name = "org.qemu.guest_agent.0" + guest_agent_serial_type = virtio + variants: + - ext4: + format = "ext4" + - xfs: + format = "xfs" + variants: + - scsi_block: + drive_format_stg1 = scsi-block + - scsi_hd: + drive_format_stg1 = scsi-hd + disk_serial = TARGET_DISK1 + blk_extra_params_stg1 += "serial=${disk_serial}" +# test command in guest + guest_mount_point = "/home/test" + guest_test_file = "${guest_mount_point}/test.img" + guest_format_command = "mkdir -p ${guest_mount_point};mkfs.${format} {0} && mount {0} ${guest_mount_point}" + guest_dd_command = "dd if=/dev/zero of=${guest_test_file}" + guest_rm_command = "rm -rf ${guest_test_file};sync" diff --git a/qemu/tests/cfg/throttle_parameter_test.cfg b/qemu/tests/cfg/throttle_parameter_test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..773cf742bfdbf608bf20a30dbf777ad21eb60d6c --- /dev/null +++ b/qemu/tests/cfg/throttle_parameter_test.cfg @@ -0,0 +1,119 @@ +- throttle_parameter_test: + type = throttle_parameter_test + qemu_force_use_drive_expression = no + images += " stg1 stg2 stg3 stg4" + remove_image = yes + force_create_image_image1 = no + force_create_image = yes + kill_vm = yes + + image_name_stg1 = images/stg1 + image_name_stg2 = images/stg2 + image_name_stg3 = images/stg3 + image_name_stg4 = images/stg4 + image_size_stg1 = 11G + image_size_stg2 = 12G + image_size_stg3 = 13G + image_size_stg4 = 14G + + serial_stg1 = "TARGET_DISK1" + serial_stg2 = "TARGET_DISK2" + serial_stg3 = "TARGET_DISK3" + serial_stg4 = "TARGET_DISK4" + blk_extra_params_stg4 = "serial=${serial_stg4}" + blk_extra_params_stg1 = "serial=${serial_stg1}" + blk_extra_params_stg2 = "serial=${serial_stg2}" + blk_extra_params_stg3 = "serial=${serial_stg3}" + + # throttle property + throttle_groups = "group1 group2" + image_throttle_group_stg1 = "group1" + image_throttle_group_stg2 = "group1" + image_throttle_group_stg3 = "group2" + image_throttle_group_stg4 = "group2" + + throttle_group_member_group1 = "stg1 stg2" + throttle_group_member_group2 = "stg3 stg4" + variants: + - normal_read: + variants: + - with_iops: + group1 = {"iops-read":40} + group2 = {"iops-read":50} + - with_bps: + group1 = {"bps-read":163840} + group2 = {"bps-read":204800} + - with_iops_bps: + group1 = {"bps-read":163840,"iops-read":40} + group2 = {"bps-read":204800,"iops-read":50} + - normal_write: + variants: + - with_iops: + group1 = {"iops-write":40} + group2 = {"iops-write":50} + - with_bps: + group1 = {"bps-write":163840} + group2 = {"bps-write":204800} + - with_iops_bps: + group1 = {"bps-write":163840,"iops-write":40} + group2 = {"bps-write":204800,"iops-write":50} + - normal_rw: + variants: + - with_iops: + group1 = {"iops-total":40} + group2 = {"iops-total":50} + - with_bps: + group1 = {"bps-total":163840} + group2 = {"bps-total":204800} + - with_iops_bps: + group1 = {"bps-total":163840,"iops-total":40} + group2 = {"bps-total":204800,"iops-total":50} + - with_iops_bps_rw: + group1 = {"bps-read":163840,"iops-read":20,"bps-write":163840,"iops-write":20} + group2 = {"bps-read":204800,"iops-read":25,"bps-write":204800,"iops-write":25} + - burst_read: + variants: + - with_iops: + group1 = {"iops-read":40,"iops-read-max":50,"iops-read-max-length":10} + group2 = {"iops-read":50,"iops-read-max":60,"iops-read-max-length":10} + - with_bps: + group1 = {"bps-read":163840,"bps-read-max":204800,"bps-read-max-length":10} + group2 = {"bps-read":204800,"bps-read-max":245760,"bps-read-max-length":10} + - with_iops_bps: + group1 = {"bps-read":163840,"iops-read":40,"iops-read-max":50,"iops-read-max-length":10,"bps-read-max":204800,"bps-read-max-length":10} + group2 = {"bps-read":204800,"iops-read":50,"iops-read-max":60,"iops-read-max-length":10,"bps-read-max":245760,"bps-read-max-length":10} + - burst_write: + variants: + - with_iops: + group1 = {"iops-write":40,"iops-write-max":50,"iops-write-max-length":10} + group2 = {"iops-write":50,"iops-write-max":60,"iops-write-max-length":10} + - with_bps: + group1 = {"bps-write":163840,"bps-write-max":204800,"bps-write-max-length":10} + group2 = {"bps-write":204800,"bps-write-max":245760,"bps-write-max-length":10} + - with_iops_bps: + group1 = {"bps-write":163840,"iops-write":40,"iops-write-max":50,"iops-write-max-length":10,"bps-write-max":204800,"bps-write-max-length":10} + group2 = {"bps-write":204800,"iops-write":50,"iops-write-max":60,"iops-write-max-length":10,"bps-write-max":245760,"bps-write-max-length":10} + - burst_rw: + variants: + - with_iops: + group1 = {"iops-total":40,"iops-total-max":50,"iops-total-max-length":10} + group2 = {"iops-total":50,"iops-total-max":60,"iops-total-max-length":10} + - with_bps: + group1 = {"bps-total":163840,"bps-total-max":204800,"bps-total-max-length":10} + group2 = {"bps-total":204800,"bps-total-max":245760,"bps-total-max-length":10} + - with_iops_bps: + group1 = {"bps-total":163840,"iops-total":40,"iops-total-max":50,"iops-total-max-length":10,"bps-total-max":204800,"bps-total-max-length":10} + group2 = {"bps-total":204800,"iops-total":50,"iops-total-max":60,"iops-total-max-length":10,"bps-total-max":245760,"bps-total-max-length":10} + - mix: + throttle_groups = "group1" + images = "image1 stg1 stg2" + variants: + - with_iops_size: + group1 = {"iops-total":60,"iops-size":8192} + - with_iops_size_burst: + group1 = {"iops-total":40,"iops-total-max":50,"iops-total-max-length":10,"iops-size":8192} + - with_iops_bps_all: + group1 = {"iops-read":20,"iops-read-max":30,"iops-read-max-length":20,"iops-write":20,"iops-write-max":30,"iops-write-max-length":20,"bps-read":163840,"bps-read-max":204800,"bps-read-max-length":20,"bps-write":163840,"bps-write-max":204800,"bps-write-max-length":20} + + throttle_group_parameters_group1 = ${group1} + throttle_group_parameters_group2 = ${group2} diff --git a/qemu/tests/cfg/timedrift_adjust_time.cfg b/qemu/tests/cfg/timedrift_adjust_time.cfg index 51a02090dbfc4af57bb922bc0243fbff1bbf7fd1..9dc9494ec18e8fbad6f8f289043b78a6e50da007 100644 --- a/qemu/tests/cfg/timedrift_adjust_time.cfg +++ b/qemu/tests/cfg/timedrift_adjust_time.cfg @@ -14,7 +14,14 @@ host_epoch_time_cmd = 'epoch=$(date +%s); datetime=$(date);' host_epoch_time_cmd += 'echo "datetime: $datetime epoch: $epoch"' time_difference = 0 + hwclock_time_command = "hwclock -u" + hwclock_time_filter_re = "(\d+-\d+-\d+ \d+:\d+:\d+).*" + hwclock_time_format = "%Y-%m-%d %H:%M:%S" + RHEL.6, RHEL.7: + hwclock_time_filter_re = "(\S+ \S+\s+\d+ \d+:\d+:\d+ \d+).*" + hwclock_time_format = "%a %b %d %H:%M:%S %Y" variants: + - no_adjust_clock: - adjust_host_clock: seconds_to_forward = 1800 set_host_time_cmd = 'date -s "${seconds_to_forward} seconds"; hwclock -w' @@ -41,13 +48,16 @@ rtc_base = localtime variants: - guest_pause_resume: - only adjust_host_clock + only no_adjust_clock adjust_host_clock vm_action = pause_resume sleep_seconds = 1800 - time_difference = 3600 + time_difference = 1800 + time_difference_hwclock = 0 - guest_reboot: + no no_adjust_clock vm_action = reboot time_difference = 0 + time_difference_hwclock = 0 - guest_s3: only adjust_host_clock vm_action = suspend_resume @@ -61,10 +71,17 @@ read_clock_source_cmd += "/clocksource0/current_clocksource" timedrift_adjust_time.guest_reboot.clock_vm.adjust_host_clock: time_difference = 1800 + time_difference_hwclock = 1800 timedrift_adjust_time.guest_s3.clock_vm.adjust_host_clock: time_difference = 1800 timedrift_adjust_time.guest_s3.clock_host.adjust_host_clock: time_difference = 1800 + timedrift_adjust_time.guest_pause_resume.clock_vm.no_adjust_clock: + time_difference = 1800 + time_difference_hwclock = 1800 + timedrift_adjust_time.guest_pause_resume.clock_vm.adjust_host_clock: + time_difference = 1800 + time_difference_hwclock = 3600 RHEL.6: timedrift_adjust_time.guest_reboot..adjust_guest_clock: time_difference = 1800 @@ -75,7 +92,7 @@ guest_epoch_time_cmd = 'powershell -command "& {$datetime=get-date -uformat "%c";' guest_epoch_time_cmd += '$epoch=[int](get-date (get-date).touniversaltime() -uformat "%s");' guest_epoch_time_cmd += 'write-host "datetime: ${datetime} epoch: ${epoch}"}"' - timedrift_adjust_time.guest_pause_resume.clock_host.adjust_host_clock: + timedrift_adjust_time.guest_pause_resume..adjust_host_clock: time_difference = 3600 timedrift_adjust_time.guest_reboot.clock_host.adjust_guest_clock: time_difference = 1800 @@ -85,6 +102,8 @@ time_difference = 1800 timedrift_adjust_time.guest_s3.clock_vm.adjust_host_clock: time_difference = 1800 + timedrift_adjust_time.guest_pause_resume..no_adjust_clock: + time_difference = 1800 Win7, Win2008: timedrift_adjust_time.guest_pause_resume.clock_host.adjust_host_clock: time_difference = 0 diff --git a/qemu/tests/cfg/timedrift_check_clock_offset.cfg b/qemu/tests/cfg/timedrift_check_clock_offset.cfg index e0b59f6ebe73ea2f6a7daf77fcc73143065d6a63..5f036c0a82cc1b727e86e7c17ea615ad6ce35d3d 100644 --- a/qemu/tests/cfg/timedrift_check_clock_offset.cfg +++ b/qemu/tests/cfg/timedrift_check_clock_offset.cfg @@ -76,12 +76,18 @@ clocksource = "kvm-clock" ppc64le,ppc64: clocksource = "timebase" + #only for rhel8,virt module stream is rhel or 8.1,8.2,8.3 and so on + virt_module_stream = rhel + # Please update your file share web server url before test + download_root_url = gagent_name = "org.qemu.guest_agent.0" gagent_install_cmd = "yum install -y qemu-guest-agent" + gagent_restart_cmd = "systemctl restart qemu-guest-agent.service" gagent_start_cmd = "systemctl start qemu-guest-agent" gagent_stop_cmd = "systemctl stop qemu-guest-agent" gagent_status_cmd = "systemctl status qemu-guest-agent" RHEL.6: + gagent_restart_cmd = "service qemu-ga restart" gagent_start_cmd = "service qemu-ga start" gagent_stop_cmd = "service qemu-ga stop" gagent_status_cmd = "service qemu-ga status" diff --git a/qemu/tests/cfg/timerdevice.cfg b/qemu/tests/cfg/timerdevice.cfg index f9a6053130a2ba4cd0d53a61c9b014abf44092a7..b0a4d439b78755c30f1d3820b4a946048dacf6f9 100644 --- a/qemu/tests/cfg/timerdevice.cfg +++ b/qemu/tests/cfg/timerdevice.cfg @@ -50,6 +50,7 @@ - boot_test: no RHEL.3 RHEL.4 RHEL.5 RHEL.6 type = timerdevice_boot + start_vm = no i386, x86_64: rtc_drift = slew timerdevice_drift_threshold = 3 @@ -86,22 +87,35 @@ only RHEL only ppc64 ppc64le timerdevice_clksource = "timebase" + variants: + - without_host_load: + - with_host_load: + login_timeout = 600 + only clock_host + no clksource_tsc + reboot_immediately: + only Linux + with_boot: + repeat_nums = 6 + sleep_time = 600 + Linux: + timerdevice_host_load_cmd = "for (( I=0; I<%s; I++ ));" + timerdevice_host_load_cmd += " do taskset -c $I /bin/bash -c" + timerdevice_host_load_cmd += " 'for ((;;)); do X=1; done &'; done" + timerdevice_host_load_stop_cmd = "pkill -f 'do X=1'" + Windows: + stress_install_from_repo = "no" + download_url_stress = 'stress/stress-1.0.4.tar.gz' + timerdevice_host_load_cmd = "--cpu %s --io 4 --vm 2 --vm-bytes 256M" + timerdevice_host_load_stop_cmd = "pkill -9 stress" variants: - with_boot: - with_reboot: timerdevice_reboot_test = yes - variants: - - without_host_load: - - with_host_load: - only Linux - login_timeout = 600 - timerdevice_host_load_cmd = "for (( I=0; I<`grep processor /proc/cpuinfo" - timerdevice_host_load_cmd += " | wc -l`; I++ )); do taskset -c $I /bin/bash -c" - timerdevice_host_load_cmd += " 'for ((;;)); do X=1; done &'; done" - timerdevice_host_load_stop_cmd = "pkill -f 'do X=1'" variants: - reboot_immediately: - reboot_after_sleep: + only with_host_load timerdevice_sleep_time = 3600 - tscsync: only Linux diff --git a/qemu/tests/cfg/timerdevice_check_ntp_offset.cfg b/qemu/tests/cfg/timerdevice_check_ntp_offset.cfg new file mode 100644 index 0000000000000000000000000000000000000000..486c60bedfb0daf03180bb6f338eafce6195641b --- /dev/null +++ b/qemu/tests/cfg/timerdevice_check_ntp_offset.cfg @@ -0,0 +1,30 @@ +- timerdevice_check_ntp_offset: + only i386, x86_64 + only Windows + type = timerdevice_check_ntp_offset + start_vm = no + qemu_stop = off + ntp_cmd = "(systemctl stop chronyd || service ntpdate stop)" + ntp_cmd += " && (chronyd -q 'server clock.redhat.com iburst'" + ntp_cmd += " || ntpdate clock.redhat.com)" + diskspd_dir = "diskspd" + diskspd_name = "diskspd.exe" + dst_path = "C:\" + ntp_dir = "Meinberg_NTP" + ntp_name = "ntp-4.2.8p14-win32-setup.exe" + ntp_dst_path = "C:\Meinberg_NTP" + ntp_unattend_file = "install_ntp.ini" + install_ntp_cmd = "%s /USE_FILE=${ntp_dst_path}\%s" + ntp_install_path = "C:\NTP" + ntp_uninstall_cmd = "taskkill /f /t /im ntpd.exe && rd /s /q C:\NTP" + diskspd_run_cmd = "start /b diskspd -c1G -b4K -t2 -d60000 testfile.dat" + diskspd_check_cmd = "tasklist | findstr /I diskspd" + diskspd_end_cmd = "taskkill /f /t /im diskspd.exe" + sub_test = win_video_play + mplayer_path = "WIN_UTILS:\mplayer.exe" + play_video_cmd = "start /MIN %s %s -loop 0 -fs" + video_url = http://FILESHARE.COM/pub/section2/kvmauto/video/big_buck_bunny_480p_stereo.avi + check_offset_cmd = "C:\NTP\bin\ntpq -p" + sleep_time = 360 + nums = 10 + tolerance = 100 diff --git a/qemu/tests/cfg/timerdevice_tsc_enable.cfg b/qemu/tests/cfg/timerdevice_tsc_enable.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1afeceec16abdaf19252da0967cb2f5d0cd947b5 --- /dev/null +++ b/qemu/tests/cfg/timerdevice_tsc_enable.cfg @@ -0,0 +1,13 @@ +- timerdevice_tsc_enable: + type = timerdevice_tsc_enable + only Linux + no Host_RHEL.m6 no Host_RHEL.m7 + no RHEL.6 RHEL.7 + cpu_model_flags += ",+invtsc" + clksrc_path = "/sys/devices/system/clocksource/clocksource0" + cur_clksrc_cmd = "cat ${clksrc_path}/current_clocksource" + avl_clksrc_cmd = "cat ${clksrc_path}/available_clocksource" + expect_cur_clk = "tsc" + expect_avl_clk = "tsc kvm-clock acpi_pm" + expect_tsc_flag = "nonstop_tsc" + check_tsc_flag_cmd = "lscpu | grep -o ${expect_tsc_flag}" diff --git a/qemu/tests/cfg/tpm_unattended_install.cfg b/qemu/tests/cfg/tpm_unattended_install.cfg new file mode 100644 index 0000000000000000000000000000000000000000..32ccf1d3098f89ef363ea06730bdcdf9866a1bc4 --- /dev/null +++ b/qemu/tests/cfg/tpm_unattended_install.cfg @@ -0,0 +1,82 @@ +- tpm_unattended_install: + virt_test_type = qemu + type = tpm_unattended_install + start_vm = no + kill_vm = yes + kill_vm_gracefully = yes + shutdown_cleanly = yes + shutdown_cleanly_timeout = 120 + guest_port_unattended_install = 12323 + kernel = vmlinuz + initrd = initrd.img + inactivity_watcher = error + inactivity_treshold = 1800 + image_verify_bootable = no + image_copy_on_error = no + lowest_mem = 512 + install_trigger_time = 0 + install_timeout = 7200 + random_trigger = "no" + unattended_delivery_method = cdrom + cdroms += " unattended" + drive_index_unattended = 1 + drive_index_cd1 = 2 + boot_once = d + medium = cdrom + virtio_scsi: + iothread_scheme ?= + iothreads ?= + image_iothread ?= + images = 'sys' + image_backend_sys = filesystem + image_name_sys = images/sys + image_format_sys = qcow2 + image_size_sys = 30G + force_create_image_sys = yes + remove_image_sys = yes + tpms = tpm0 + tpm_model_tpm0 = tpm-crb + x86_64: + only q35 + only ovmf + required_qemu= [4.2.0,) + restore_ovmf_vars = yes + ovmf_vars_secboot_fd = 'OVMF_VARS.secboot.fd' + ppc64le, ppc64: + required_qemu= [5.0.0,) + tpm_model_tpm0 = tpm-spapr + aarch64: + required_qemu= [5.1.0,) + tpm_model_tpm0 = tpm-tis-device + restore_aavmf_vars = yes + tpm_type_tpm0 = emulator + tpm_version_tpm0 = 2.0 + Windows: + blk_extra_params_sys = "serial=SYSTEM_DISK0" + cd_format_unattended = ahci + cd_format_cd1 = ahci + cd_format_winutils = ahci + send_key_at_install = ret + default_cdrom = winutils + Linux: + check_cmd_names = dmesg ls + cmd_dmesg = dmesg | grep -i tpm2 + pattern_output_dmesg = ACPI:\s+TPM2 + cmd_ls = ls -l /dev/tpm* + pattern_output_ls = /dev/tpm0; /dev/tpmrm0 + ppc64le, ppc64: + cmd_dmesg = dmesg | grep -i tpm_ibmvtpm + pattern_output_dmesg = tpm_ibmvtpm\s+\d+\:\s+CRQ\s+initialized; + pattern_output_dmesg += tpm_ibmvtpm\s+\d+\:\s+CRQ\s+initialization\s+completed + Windows: + check_cmd_names = wmic get_tpm + cmd_wmic = wmic /namespace:\\root\cimv2\security\microsofttpm path win32_tpm get * /format:textvaluelist.xsl + pattern_output_wmic = IsActivated_InitialValue=TRUE;IsEnabled_InitialValue=TRUE + cmd_get_tpm = powershell -command "get-tpm" + pattern_output_get_tpm = TpmPresent\s+:\s+True;TpmReady\s+:\s+True + cmd_check_secure_boot_enabled = 'powershell -command "Confirm-SecureBootUEFI"' + variants: + # Below variants is to share configurations related to installation defined. + - @with_installation: + variants: + - @extra_cdrom_ks: diff --git a/qemu/tests/cfg/tpm_verify_device.cfg b/qemu/tests/cfg/tpm_verify_device.cfg new file mode 100644 index 0000000000000000000000000000000000000000..35cddecbaf89c41259931fe26b6f9efdf0927d37 --- /dev/null +++ b/qemu/tests/cfg/tpm_verify_device.cfg @@ -0,0 +1,79 @@ +- tpm_verify_device: + virt_test_type = qemu + type = tpm_verify_device + start_vm = yes + kill_vm = yes + tpms = tpm0 + tpm_version_tpm0 = 2.0 + x86_64: + only q35 + only ovmf + required_qemu= [4.2.0,) + ppc64le, ppc64: + required_qemu= [5.0.0,) + aarch64: + required_qemu = [5.1.0,) + Linux: + check_cmd_names = dmesg ls + cmd_dmesg = dmesg | grep -i tpm + pattern_output_dmesg = ACPI:\s+TPM2 + cmd_ls = ls -l /dev/tpm* + pattern_output_ls = /dev/tpm0;/dev/tpmrm0 + ppc64le, ppc64: + cmd_dmesg = dmesg | grep -i tpm_ibmvtpm + pattern_output_dmesg = tpm_ibmvtpm\s+\d+\:\s+CRQ\s+initialized; + pattern_output_dmesg += tpm_ibmvtpm\s+\d+\:\s+CRQ\s+initialization\s+completed + Windows: + check_cmd_names = wmic get_tpm + cmd_wmic = wmic /namespace:\\root\cimv2\security\microsofttpm path win32_tpm get * /format:textvaluelist.xsl + pattern_output_wmic = IsActivated_InitialValue=TRUE;IsEnabled_InitialValue=TRUE + cmd_get_tpm = 'powershell -command "get-tpm"' + pattern_output_get_tpm = TpmPresent\s+:\s+True;TpmReady\s+:\s+True + variants: + - with_emulator: + tpm_type_tpm0 = emulator + tpm_model_tpm0 = tpm-crb + ppc64le, ppc64: + tpm_model_tpm0 = tpm-spapr + aarch64: + tpm_model_tpm0 = tpm-tis-device + tpm_version_tpm0 = 2.0 + variants: + - @default: + variants: + - @default: + - with_check_ovmf: + only x86_64 + ovmf_log = "/tmp/ovmf.log" + extra_params += "-global isa-debugcon.iobase=0x402 -debugcon file:${ovmf_log}" + cmd_check_log = grep TPM2 ${ovmf_log} + pattern_check_log = Tcg2ConfigPeimEntryPoint: TPM2 detected;TPM2Startup: TPM_RC_SUCCESS; + pattern_check_log += WARNING: TPM2 Event log has HashAlg unsupported by PCR bank; + pattern_check_log += Process2ndPassCmdAddPointer: checking for ACPI header in "etc/acpi/tables"; + pattern_check_log += \[TPM2PP\] mPpi=\w+ version=2; + pattern_check_log += \[TPM2PP\] PPRequest=\w+ \(PPRequestParameter=\w+\); + Windows: + pattern_check_log += TPM2 Tcg2Dxe Measure Data when ReadyToBoot; + - with_reboot: + reboot_method = system_reset + repeat_times = 20 + - with_multi_vms: + vms = vm1 vm2 vm3 + mem = 4096 + clone_master = yes + master_images_clone = image1 + remove_image_image1 = yes + - with_passthrough: + no aarch64 + start_vm = no + not_preprocess = yes + cmd_check_tpm_device = ls /dev/tpm0 + cmd_get_tpm_version = "dmesg | grep ' TPM ' | awk '{print $5}'" + required_tmp_version = 1.2 + tpm_type_tpm0 = passthrough + tpm_path_tpm0 = /dev/tpm0 + Linux: + pattern_output_dmesg = tpm_tis\s+.*\s+${required_tmp_version}\s+TPM + pattern_output_ls = /dev/tpm0 + x86_64: + tpm_model_tpm0 = tpm-tis diff --git a/qemu/tests/cfg/tpm_with_bitlocker.cfg b/qemu/tests/cfg/tpm_with_bitlocker.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c4e02c5ff8d5586834746cb15da55abb3236349a --- /dev/null +++ b/qemu/tests/cfg/tpm_with_bitlocker.cfg @@ -0,0 +1,22 @@ +- tpm_with_bitlocker: + only Windows + only q35 + only ovmf + virt_test_type = qemu + type = tpm_with_bitlocker + required_qemu= [4.2.0,) + start_vm = yes + kill_vm = yes + clone_master = yes + master_images_clone = image1 + remove_image_image1 = yes + tpms = tpm0 + tpm_type_tpm0 = emulator + tpm_model_tpm0 = tpm-crb + tpm_version_tpm0 = 2.0 + Win2019, Win2008, Win2016, Win2012: + cmd_install_bitlocker = 'powershell -command "Install-WindowsFeature -Name BitLocker"' + cmd_bdehdcfg = 'bdehdcfg -target c: shrink -newdriveletter s: -size 606 -quiet' + cmd_manage_bde_on = 'manage-bde -on c:' + cmd_manage_bde_status = 'manage-bde -status' + finished_keywords = 'Percentage Encrypted: 100.0%' diff --git a/qemu/tests/cfg/uefi_check_debugcon.cfg b/qemu/tests/cfg/uefi_check_debugcon.cfg index 540623543d5e9ed5b2f5257368daa8bb75ca1d1c..7a455b6c790ddc8e5dbbc30a0475cc1d30d31b62 100644 --- a/qemu/tests/cfg/uefi_check_debugcon.cfg +++ b/qemu/tests/cfg/uefi_check_debugcon.cfg @@ -1,6 +1,7 @@ - uefi_check_debugcon: only q35 only ovmf + only Linux type = uefi_check_debugcon timeout = 120 trace_output = "trace.dat" diff --git a/qemu/tests/cfg/uefi_check_log_info.cfg b/qemu/tests/cfg/uefi_check_log_info.cfg new file mode 100644 index 0000000000000000000000000000000000000000..118c550ecc8550a4fcb5d680e28480063df06311 --- /dev/null +++ b/qemu/tests/cfg/uefi_check_log_info.cfg @@ -0,0 +1,43 @@ +- uefi_check_log_info: + only q35 + only ovmf + type = uefi_check_log_info + start_vm = no + variants: + - boot_option: + boot_menu = on + del boot_once + del boot_order + del boot_strict + splash_time_pattern = "SetVariable\(Timeout,\s%d\)" + variants: + - splash_time_10: + boot_splash_time = 10000 + - splash_time_12: + boot_splash_time = 12000 + - bootindex: + cdroms = "test" + cdrom_test = "/tmp/test.iso" + cd_format_test = ahci + images = "stg" + image_name_stg = "images/stg" + image_size_stg = 100M + force_create_image_stg = yes + remove_image_stg = yes + drive_format_stg = scsi-hd + image_boot = no + variants: + - hard_disk: + bootindex_stg = 0 + bootindex_test = 1 + check_info_pattern = "Booting UEFI QEMU QEMU HARDDISK" + - cd_rom: + bootindex_stg = 1 + bootindex_test = 0 + check_info_pattern = "Booting UEFI QEMU DVD-ROM" + - rng_device: + virtio_rngs += " rng0" + backend_rng0 = rng-random + backend_type = passthrough + filename_passthrough = /dev/urandom + check_info_pattern = "InstallProtocolInterface\:\s3152BCA5-EADE-433D-862E-C01CDC291F44" diff --git a/qemu/tests/cfg/uefi_check_resolution.cfg b/qemu/tests/cfg/uefi_check_resolution.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b10dff581d39ac28e785fe596a2cec949ea0850d --- /dev/null +++ b/qemu/tests/cfg/uefi_check_resolution.cfg @@ -0,0 +1,17 @@ +- uefi_check_resolution: + only q35 + only ovmf + type = uefi_check_resolution + timeout = 360 + boot_menu = on + enable_sga = yes + boot_menu_hint = "Boot Options" + enter_change_preferred = "esc;down;kp_enter;down;down;down;down;kp_enter" + default_resolution_key = "f9;y" + variants: + - save_with_f10: + esc_boot_menu_key = "esc;esc;down;down;down;kp_enter" + save_change = "f10;y" + - save_with_commit: + esc_boot_menu_key = "esc;down;down;down;kp_enter" + save_change = "down;kp_enter" diff --git a/qemu/tests/cfg/uefi_check_secure_mor.cfg b/qemu/tests/cfg/uefi_check_secure_mor.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a6e616e2a32c3c06918f0a93d49f5c57d9d0c779 --- /dev/null +++ b/qemu/tests/cfg/uefi_check_secure_mor.cfg @@ -0,0 +1,20 @@ +- uefi_check_secure_mor: + only q35 + only ovmf + only x86_64 + only Win2016, Win10 + type = uefi_check_secure_mor + timeout = 360 + restore_ovmf_vars = yes + auto_cpu_model = no + cpu_model = host + login_timeout = 360 + start_vm = no + check_secure_boot_enabled_cmd = 'powershell -command "Confirm-SecureBootUEFI"' + dst_path = "C:\dgreadiness" + dgreadiness_path_command = "cd ${dst_path}\dgreadiness" + executionPolicy_command = 'powershell -command "Set-ExecutionPolicy -ExecutionPolicy Unrestricte"' + enable_command = 'powershell -command ".\DG_Readiness_Tool_v3.6.ps1 -Enable"' + ready_command = 'powershell -command ".\DG_Readiness_Tool_v3.6.ps1 -Ready"' + check_enable_info = 'Enabling Hyper-V and IOMMU successful' + check_ready_info = 'HVCI, Credential-Guard, and Config-CI are enabled and running' diff --git a/qemu/tests/cfg/unattended_install_reboot_driftfix.cfg b/qemu/tests/cfg/unattended_install_reboot_driftfix.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4e37f2c0ffe2491e7b07e18799591d73cc922307 --- /dev/null +++ b/qemu/tests/cfg/unattended_install_reboot_driftfix.cfg @@ -0,0 +1,55 @@ +- unattended_install_reboot_driftfix: + virt_test_type = qemu libvirt + type = unattended_install_reboot_driftfix + no WinXP Win2000 Win2003 WinVista + only Windows + extra_params += "-global kvm-pit.lost_tick_policy=delay" + start_vm = no + kill_vm = yes + kill_vm_gracefully = yes + kill_vm_on_error = yes + shutdown_cleanly = no + reboot_method = shell + force_create_image = yes + images = "timer" + image_name_timer = "images/drfitfix" + guest_port_unattended_install = 12323 + kernel = vmlinuz + initrd = initrd.img + inactivity_watcher = error + inactivity_treshold = 1800 + image_verify_bootable = no + image_copy_on_error = no + image_aio = native + unattended_delivery_method = cdrom + cdroms += " unattended" + serial_name = SYSTEM_DISK0 + blk_extra_params_image1 = "serial=${serial_name}" + i440fx: + cd_format_unattended = ide + cd_format_cd1 = ide + cd_format_winutils = ide + q35: + cd_format_unattended = ahci + cd_format_cd1 = ahci + cd_format_winutils = ahci + drive_index_unattended = 1 + drive_index_cd1 = 2 + boot_once = d + medium = cdrom + redirs += " unattended_install" + virtio_scsi: + # disable iothread + iothread_scheme ?= + iothreads ?= + image_iothread ?= + variants: + - default: + rtc_drift = slew + - no_driftfix: + rtc_drift = none + variants: + # Below variants is to share configurations related to installation defined. + - @with_installation: + variants: + - @extra_cdrom_ks: diff --git a/qemu/tests/cfg/usb.cfg b/qemu/tests/cfg/usb.cfg index ca4ea7d12e35290af60396fe1904444e842aeaf4..a109620b3620ba528a240c63d8cbaa60a4398794 100644 --- a/qemu/tests/cfg/usb.cfg +++ b/qemu/tests/cfg/usb.cfg @@ -6,16 +6,22 @@ kill_vm_on_error = yes usbs = " usbtest" usb_bus = "usbtest.0" + aarch64: + Host_RHEL: + only qemu-xhci + no usb_storage # usb controllers variants: - piix3-uhci: + only usb_boot, usb_reboot usb_type_usbtest = piix3-usb-uhci usb_controller = uhci max_ports_usbtest = 2 drive_format_stg = "usb1" no ppc64 ppc64le - piix4-uhci: + only usb_boot, usb_reboot usb_type_usbtest = piix4-usb-uhci usb_controller = uhci max_ports_usbtest = 2 @@ -23,13 +29,22 @@ no ppc64 ppc64le no Windows - ich9-uhci: + only usb_boot, usb_reboot usb_type_usbtest = ich9-usb-uhci6 usb_controller = uhci max_ports_usbtest = 2 drive_format_stg = "usb1" no ppc64 ppc64le no Host_RHEL.m6 + - pci-ohci: + only usb_boot, usb_reboot + usb_type_usbtest = pci-ohci + usb_controller = ohci + max_ports_usbtest = 2 + drive_format_stg = "usb1" + no x86_64 - ich9-ehci: + only usb_boot, usb_reboot usb_controller = ehci max_ports = 6 drive_format_stg = "usb2" @@ -45,6 +60,7 @@ only usb_hotplug..usb_negative_test, usb_multi_disk usb_type = ich9-usb-ehci2 - usb-ehci: + only usb_boot, usb_reboot usb_type_usbtest = usb-ehci usb_type = usb-ehci usb_controller = ehci @@ -64,7 +80,7 @@ no RHEL.5 no Host_RHEL.m6, Host_RHEL.m7.u0, Host_RHEL.m7.u1 no Host_RHEL.m7.u2, Host_RHEL.m7.u3 - usb_type_usbtest = nec-usb-xhci + usb_type_usbtest = qemu-xhci usb_type = qemu-xhci usb_controller = xhci max_ports = 4 @@ -76,7 +92,7 @@ variants: - without_usb_hub: - with_usb_hub: - no usb_host, usb_multi_disk + no usb_host, usb_redir, usb_multi_disk no usb-ehci usb_devices += " hub1" usb_type_hub1 = usb-hub @@ -106,7 +122,7 @@ # usb devices variants: - @usb_nodev: - only usb_storage, usb_host, usb_multi_disk + only usb_storage, usb_host, usb_redir, usb_multi_disk - usb_kbd: Host_RHEL.m6: no usb-ehci @@ -232,6 +248,8 @@ create_image_stg = yes remove_image_stg = yes image_size_stg = 16G + drive_rerror_stg = stop + drive_werror_stg = stop check_serial_option = yes check_removable_option = yes check_io_size_option = yes @@ -266,15 +284,46 @@ check_serial_option = no check_removable_option = no check_io_size_option = no + clean_partition_cmd = "echo select disk 1 > cmd &&" + clean_partition_cmd += " echo clean >> cmd &&" + clean_partition_cmd += " echo exit >> cmd &&" + clean_partition_cmd += " diskpart /s cmd" + variants: + - @default: + with_hotplug_unplug = no + - hotplug_unplug_one_time: + usb_repeat_times = 1 + with_hotplug_unplug = yes + boot_drive_stg = no + check_serial_option = no + check_removable_option = no + check_io_size_option = no + - hotplug_unplug_multi_times: + no piix3-uhci, piix4-uhci, ich9-uhci + usb_repeat_times = 300 + with_hotplug_unplug = yes + boot_drive_stg = no + check_serial_option = no + check_removable_option = no + check_io_size_option = no - usb_host: - # must configure which device should be used - #usb_host_device = ":" type = usb_host - #XXX: Now usb_host doesn't support QMP monitor. - # Limit it to human monitor only. - monitor_type = human - monitors = hmp1 - main_monitor = hmp1 + only Linux + usb_devices += " usbhostdev" + usb_type_usbhostdev = usb-host + variants options: + # must configure which device should be used + - @with_no_hostdev: + usb_devices = "" + only usb_negative_test + - with_vendorid_productid: + no usb_negative_test + #usbdev_option_vendorid_usbhostdev = xxxx + #usbdev_option_productid_usbhostdev = xxxx + - with_hostbus_hostaddr: + no usb_negative_test + #usbdev_option_hostbus_usbhostdev = xxxx + #usbdev_option_hostaddr_usbhostdev = xxxx variants: - usb_normal_test: - usb_negative_test: @@ -282,13 +331,15 @@ Host_RHEL.m6: # Now no output from monitor for negative test on Host_RHEL.m6 usb_reply_msg = "" - usb_reply_msg = "Property 'usb-host.productid' doesn't take value" + usb_reply_msg = "Property 'usb-host.productid' doesn't take value;" usb_reply_msg += "Parameter 'productid' expects an int64 value or range;" usb_reply_msg += "Parameter 'vendorid' expects an int64 value or range;" usb_reply_msg += "productid out of range;" usb_reply_msg += "vendorid out of range;" usb_reply_msg += "Parameter 'productid' expects uint32_t;" usb_reply_msg += "Parameter 'vendorid' expects uint32_t;" + usb_reply_msg += "Parameter 'productid' expects uint64;" + usb_reply_msg += "Parameter 'vendorid' expects uint64" usb_host_device_list = "aaa:aaa,aaa:111,21231:11231333,11231333:21231,21231:1123132233,2123133:1123132233111,1123132233111:111" - usb_check_isobufs: usb_check_isobufs = "yes" @@ -298,12 +349,47 @@ - usb_multi_times: no usb_negative_test usb_repeat_times = 20 + - usb_redir: + type = usb_redir + only Linux + usbredirdev_name = usbredir1 + chardev_backend_usbredir1 = spicevmc + chardev_name_usbredir1 = usbredir + start_vm = no + # must configure which device should be used + #usbredir_vendorid = xxxx + #usbredir_productid = xxxx + variants option: + - @basic: + display = spice + - with_bootindex: + display = spice + boot_menu = on + enable_sga = yes + boot_menu_key = "esc" + Host_RHEL.m6: + boot_menu_key = "f12" + boot_menu_hint = "Press .*(F12|ESC) for boot menu" + boot_entry_info = "Booting from Hard Disk..." + usbdev_option_bootindex_usbredir1 = 0 + - with_filter: + display = spice + variants policy: + - allow: + usbdev_option_filter_usbredir1 = "'-1:0x${usbredir_vendorid}:0x${usbredir_productid}:-1:1|-1:-1:-1:-1:0'" + - deny: + usbdev_option_filter_usbredir1 = "'-1:0x${usbredir_vendorid}:0x${usbredir_productid}:-1:0|-1:-1:-1:-1:1'" + - with_negative_config: + display = spice + usbredir_unconfigured_value = -1 - usb_multi_disk: no piix3-uhci, piix4-uhci, ich9-uhci Host_RHEL.m6: no usb-ehci Linux: file_system = "ext4" + Windows: + labeltype = "msdos" type = multi_disk cmd_timeout = 1000 black_list = C: S: diff --git a/qemu/tests/cfg/usb_device_check.cfg b/qemu/tests/cfg/usb_device_check.cfg index 1f679f6df062477a666b70976e701bd267049b03..623592e04c75b8fe73e207fdaddc99013cae466a 100644 --- a/qemu/tests/cfg/usb_device_check.cfg +++ b/qemu/tests/cfg/usb_device_check.cfg @@ -19,6 +19,9 @@ usb_mouse_for_guest = "QEMU USB Mouse" usb_kbd_for_guest = "QEMU USB Keyboard" usb_tablet_for_guest = "QEMU USB Tablet" + aarch64: + Host_RHEL: + only qemu-xhci Windows: chk_usb_info_cmd = 'wmic path Win32_USBControllerDevice get Dependent | find "USB"' # usb device info for Windows guest diff --git a/qemu/tests/cfg/valgrind_memalign.cfg b/qemu/tests/cfg/valgrind_memalign.cfg index 087264314d9c6cae0797f3f1f8991d30491b10c7..230db76c0d714fa1202412b1694573bafa9ead3f 100644 --- a/qemu/tests/cfg/valgrind_memalign.cfg +++ b/qemu/tests/cfg/valgrind_memalign.cfg @@ -7,6 +7,8 @@ disable_kvm = yes enable-kvm = no start_vm = no + # bz 1800495, valgrind don't support "sandbox" option + del qemu_sandbox Ubuntu: valgrind_install_cmd = "apt-get install -y valgrind" s390x: diff --git a/qemu/tests/cfg/vioinput_hotplug.cfg b/qemu/tests/cfg/vioinput_hotplug.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1d93b7b33a60361afbbeb0c272a23558b8be2c98 --- /dev/null +++ b/qemu/tests/cfg/vioinput_hotplug.cfg @@ -0,0 +1,36 @@ +- vioinput_hotplug: + type = vioinput_hotplug + driver_name = "vioinput" + extra_driver_verify = "viohidkmdf hidclass.sys hidparse.sys" + del usb_devices + input_dev_bus_type = virtio + q35: + pcie_extra_root_port = 1 + variants: + - device_keyboard: + input_name = kbd0 + input_dev_type = keyboard + key_table_file = key_to_keycode_win.json + sub_test = keyboard_test + - device_mouse: + input_name = mouse0 + input_dev_type = mouse + mice_name = "QEMU Virtio Mouse" + tolerance = 40 + mtype = rel + sub_test = mouse_test + move_rate = 80 + move_duration = 1 + btns = "left right middle side extra" + scrolls = "wheel-up wheel-down" + - device_tablet: + input_name = tablet0 + input_dev_type = tablet + mice_name = "QEMU Virtio Tablet" + tolerance = 5 + mtype = abs + sub_test = mouse_test + move_rate = 80 + move_duration = 1 + btns = "left right middle side extra" + scrolls = "wheel-up wheel-down" diff --git a/qemu/tests/cfg/vioinput_keyboard.cfg b/qemu/tests/cfg/vioinput_keyboard.cfg index 41b288a258514e3e0d9a497e45f359edc309661d..5cc9df62e4607ab81ee80b21c7c95217e63fa6de 100644 --- a/qemu/tests/cfg/vioinput_keyboard.cfg +++ b/qemu/tests/cfg/vioinput_keyboard.cfg @@ -14,7 +14,8 @@ only q35 no WinXP WinVista Win7 Win8 Win8.1 Win2000 Win2003 no Win2008..r2 Win2012 Win2012..r2 + intel_iommu = yes virtio_dev_iommu_platform = on + iommu_eim = on virtio_dev_ats = on machine_type_extra_params = "kernel-irqchip=split" - extra_params = "-device intel-iommu,intremap=on,eim=on,device-iotlb=on" diff --git a/qemu/tests/cfg/vioinput_mice.cfg b/qemu/tests/cfg/vioinput_mice.cfg index 05e3aece1f8a42fb8d6d1777e08c827f6e5bc744..abfb61519174f0f0351c995de99825db0ddc3ca3 100644 --- a/qemu/tests/cfg/vioinput_mice.cfg +++ b/qemu/tests/cfg/vioinput_mice.cfg @@ -27,7 +27,7 @@ only q35 no WinXP WinVista Win7 Win8 Win8.1 Win2000 Win2003 no Win2008..r2 Win2012 Win2012..r2 + intel_iommu = yes virtio_dev_iommu_platform = on virtio_dev_ats = on machine_type_extra_params = "kernel-irqchip=split" - extra_params = "-device intel-iommu,intremap=on,eim=on,device-iotlb=on" diff --git a/qemu/tests/cfg/vioser_in_use.cfg b/qemu/tests/cfg/vioser_in_use.cfg index 50bfb10c7798d4e7f8124107c0476e107f48bc77..41bbfbd3d6dd6e53ccc61161d6e1f8b27bfb25bf 100644 --- a/qemu/tests/cfg/vioser_in_use.cfg +++ b/qemu/tests/cfg/vioser_in_use.cfg @@ -27,6 +27,9 @@ file_sender = guest - host_to_guest: file_sender = host + - transfer_both: + only with_vcpu_hotplug + file_sender = both variants: - with_stop_continue: only Windows @@ -50,3 +53,10 @@ interrupt_test = live_migration_guest mig_speed = 512M pre_migrate = "mig_set_speed" + - with_vcpu_hotplug: + only Windows + only transfer_both + interrupt_test = vcpu_hotplug_guest + vcpu_hotplug = yes + smp = 2 + vcpu_maxcpus = 8 diff --git a/qemu/tests/cfg/virtio_blk_with_discard_write_zeroes.cfg b/qemu/tests/cfg/virtio_blk_with_discard_write_zeroes.cfg index 17cfc6271d365a28604368a22dac3190107c3640..fd27a62ac29940d091661774af9e1c72c4ca775a 100644 --- a/qemu/tests/cfg/virtio_blk_with_discard_write_zeroes.cfg +++ b/qemu/tests/cfg/virtio_blk_with_discard_write_zeroes.cfg @@ -28,3 +28,25 @@ check_cmd_zeroes += "dd if={0} bs=64k count=10000 | tr -d '\0' | read -n 1 && " check_cmd_zeroes += "echo "PASS (not all zeroes)" || echo "FAIL"" status_checked = "{'check_cmd_discard': 'FAIL', 'check_cmd_zeroes': 'PASS (not all zeroes)'}" + - with_boundary_value: + blk_extra_params_stg0 += ",discard=on,write-zeroes=on" + attributes_checked = "{'discard': 'true', 'write-zeroes': 'true', " + variants: + - positive_min: + blk_extra_params_stg0 += ",max-write-zeroes-sectors=1,max-discard-sectors=1" + attributes_checked += "'max-discard-sectors': '1 (0x1)', 'max-write-zeroes-sectors': '1 (0x1)'}" + - positive_max: + blk_extra_params_stg0 += ",max-write-zeroes-sectors=4194303,max-discard-sectors=4194303" + attributes_checked += "'max-discard-sectors': '4194303 (0x3fffff)', 'max-write-zeroes-sectors': '4194303 (0x3fffff)'}" + - negative_min: + start_vm = no + not_preprocess = yes + boot_drive_stg0 = no + blk_extra_params_image1 += ",max-write-zeroes-sectors=0,max-discard-sectors=0" + error_msg = invalid max-discard-sectors property (0), must be between 1 and 4194303 + - negative_max: + start_vm = no + not_preprocess = yes + boot_drive_stg0 = no + blk_extra_params_image1 += ",max-write-zeroes-sectors=4194304,max-discard-sectors=4194304" + error_msg = invalid max-discard-sectors property (4194304), must be between 1 and 4194303 diff --git a/qemu/tests/cfg/virtio_chardev_trace.cfg b/qemu/tests/cfg/virtio_chardev_trace.cfg new file mode 100644 index 0000000000000000000000000000000000000000..eea1894f95744ead186485eb829b65796b9a4fb2 --- /dev/null +++ b/qemu/tests/cfg/virtio_chardev_trace.cfg @@ -0,0 +1,11 @@ +- virtio_chardev_trace: + only Linux + type = virtio_chardev_trace + start_vm = no + smp = 2 + serials += ' agentctl ' + serial_name_agentctl = 'agent-ctl-path' + chardev_backend_agentctl = 'pipe' + chardev_path_agentctl = '/tmp/virtio-trace/agent-ctl-path' + serial_type_agentctl = 'virtserialport' + auto_create_pipe = no diff --git a/qemu/tests/cfg/virtio_driver_sign_check.cfg b/qemu/tests/cfg/virtio_driver_sign_check.cfg index 282ee16e054eda6feb071a862c4df52a37213d2d..1cefa3d25a118c2a025e2d3776e92af5c9dfb04f 100644 --- a/qemu/tests/cfg/virtio_driver_sign_check.cfg +++ b/qemu/tests/cfg/virtio_driver_sign_check.cfg @@ -40,3 +40,27 @@ tested_driver = "vioscsi" - with_viostor: tested_driver = "viostor" + - with_viofs: + no Host_RHEL.m6 Host_RHEL.m7 Host_RHEL.m8.u0 Host_RHEL.m8.u1 + no Win2008 Win7 + virt_test_type = qemu + required_qemu = [4.2.0,) + filesystems = fs + fs_driver = virtio-fs + fs_source_type = mount + fs_source_dir = virtio_fs_test/ + force_create_fs_source = yes + remove_fs_source = yes + fs_target = 'myfs' + fs_driver_props = {"queue-size": 1024} + mem = 4096 + mem_devs = mem1 + backend_mem_mem1 = memory-backend-file + mem-path_mem1 = /dev/shm + size_mem1 = 4G + use_mem_mem1 = no + share_mem = yes + guest_numa_nodes = shm0 + numa_memdev_shm0 = mem-mem1 + numa_nodeid_shm0 = 0 + tested_driver = viofs diff --git a/qemu/tests/cfg/virtio_fs_multi_vms.cfg b/qemu/tests/cfg/virtio_fs_multi_vms.cfg new file mode 100644 index 0000000000000000000000000000000000000000..78787cf86b32404a2b4a1b64aa13363f94bfc742 --- /dev/null +++ b/qemu/tests/cfg/virtio_fs_multi_vms.cfg @@ -0,0 +1,84 @@ +- virtio_fs_multi_vms: + no s390x + no RHEL.6 RHEL.7 RHEL.8.0 RHEL.8.1 + no Win2008 Win7 + no Host_RHEL.m6 Host_RHEL.m7 Host_RHEL.m8.u0 Host_RHEL.m8.u1 + type = virtio_fs_multi_vms + required_qemu = [4.2.0,) + kill_vm = yes + start_vm = yes + mem = 4096 + mem_devs = mem1 + backend_mem_mem1 = memory-backend-file + size_mem1 = 4G + use_mem_mem1 = no + share_mem = yes + io_timeout = 600 + vms = 'vm1 vm2 vm3' + clone_master = yes + master_images_clone = image1 + remove_image_image1 = yes + cmd_dd = 'dd if=/dev/urandom of=%s bs=1M count=2048 oflag=direct' + cmd_md5 = 'md5sum %s' + fs_source_type = mount + fs_driver = virtio-fs + fs_driver_props = {"queue-size": 1024} + force_create_fs_source = yes + remove_fs_source = yes + mem-path_mem1_vm1 = /dev/shm + mem-path_mem1_vm2 = /dev/shn + mem-path_mem1_vm3 = /dev/sho + guest_numa_nodes_vm1 = shm0 + guest_numa_nodes_vm2 = shn0 + guest_numa_nodes_vm3 = sho0 + numa_memdev_shm0 = mem-mem1 + numa_memdev_shn0 = mem-mem1 + numa_memdev_sho0 = mem-mem1 + numa_nodeid_shm0 = 0 + numa_nodeid_shn0 = 0 + numa_nodeid_sho0 = 0 + Windows: + driver_name = viofs + # install winfsp tool + i386, i686: + install_path = 'C:\Program Files' + x86_64: + install_path = 'C:\Program Files (x86)' + wfsp_install_cmd = 'msiexec /i WIN_UTILS:\winfsp-1.7.20172.msi /qn' + check_installed_cmd = 'dir "${install_path}" |findstr /I winfsp' + start_vfs_cmd = "start /b %s -d -1 -D C:\\viofs_log.txt" + check_virtiofs_cmd = 'wmic process where caption="virtiofs.exe" list brief' + virtio_win_media_type = iso + cdroms += " virtio" + cmd_dd = 'dd if=/dev/random of=%s bs=1M count=200' + cmd_md5 = "%s: && md5sum.exe %s" + variants: + - with_multi_fs_sources: + no Windows + filesystems_vm1 = fs1 + filesystems_vm2 = fs2 + filesystems_vm3 = fs3 + fs_target_fs1_vm1 = myfs1 + fs_target_fs2_vm2 = myfs2 + fs_target_fs3_vm3 = myfs3 + fs_source_dir_fs1_vm1 = '/tmp/virtio_fs1_test' + fs_source_dir_fs2_vm2 = '/tmp/virtio_fs2_test' + fs_source_dir_fs3_vm3 = '/tmp/virtio_fs3_test' + fs_dest_fs1_vm1 = '/mnt/${fs_target_fs1_vm1}' + fs_dest_fs2_vm2 = '/mnt/${fs_target_fs2_vm2}' + fs_dest_fs3_vm3 = '/mnt/${fs_target_fs3_vm3}' + - share_fs_source: + fs_name_list = 'fs0 fs0 fs0' + shared_fs_source_dir = '/tmp/virtio_fs_test' + filesystems_vm1 = fs0 + filesystems_vm2 = fs0 + filesystems_vm3 = fs0 + fs_target_fs0_vm1 = myfs1 + fs_target_fs0_vm2 = myfs2 + fs_target_fs0_vm3 = myfs3 + fs_source_dir_fs0_vm1 = ${shared_fs_source_dir} + fs_source_dir_fs0_vm2 = ${shared_fs_source_dir} + fs_source_dir_fs0_vm3 = ${shared_fs_source_dir} + fs_dest_fs0_vm1 = '/mnt/${fs_target_fs0_vm1}' + fs_dest_fs0_vm2 = '/mnt/${fs_target_fs0_vm2}' + fs_dest_fs0_vm3 = '/mnt/${fs_target_fs0_vm3}' diff --git a/qemu/tests/cfg/virtio_fs_readonly.cfg b/qemu/tests/cfg/virtio_fs_readonly.cfg new file mode 100644 index 0000000000000000000000000000000000000000..7236e1835de7d3c29b703bc5068551c73528af50 --- /dev/null +++ b/qemu/tests/cfg/virtio_fs_readonly.cfg @@ -0,0 +1,30 @@ +- virtio_fs_readonly: + only Linux + no RHEL.6 RHEL.7 RHEL.8.0 RHEL.8.1 + no Host_RHEL.m6 Host_RHEL.m7 Host_RHEL.m8.u0 Host_RHEL.m8.u1 + no s390x + type = virtio_fs_readonly + required_qemu = [4.2.0,) + kill_vm = yes + start_vm = yes + filesystems = fs + fs_driver = virtio-fs + fs_source_type = mount + fs_source_dir = virtio_fs_test/ + force_create_fs_source = yes + remove_fs_source = yes + fs_target = 'myfs' + fs_driver_props = {"queue-size": 1024} + fs_dest = '/mnt/${fs_target}' + mem = 4096 + mem_devs = mem1 + backend_mem_mem1 = memory-backend-file + mem-path_mem1 = /dev/shm + size_mem1 = 4G + use_mem_mem1 = no + share_mem = yes + guest_numa_nodes = shm0 + numa_memdev_shm0 = mem-mem1 + numa_nodeid_shm0 = 0 + cmd_create_file = 'touch ${fs_dest}/fs_file' + check_str = 'Read-only file system' diff --git a/qemu/tests/cfg/virtio_fs_share_data.cfg b/qemu/tests/cfg/virtio_fs_share_data.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f3809c804cbccd5de894a53e0f15c8efae84029d --- /dev/null +++ b/qemu/tests/cfg/virtio_fs_share_data.cfg @@ -0,0 +1,134 @@ +- virtio_fs_share_data: + no s390x + no RHEL.6 RHEL.7 RHEL.8.0 RHEL.8.1 + no Win2008 Win7 + no Host_RHEL.m6 Host_RHEL.m7 Host_RHEL.m8.u0 Host_RHEL.m8.u1 + type = virtio_fs_share_data + virt_test_type = qemu + required_qemu = [4.2.0,) + kill_vm = yes + start_vm = yes + filesystems = fs + fs_driver = virtio-fs + fs_source_type = mount + fs_source_dir = virtio_fs_test/ + force_create_fs_source = yes + remove_fs_source = yes + fs_target = 'myfs' + fs_driver_props = {"queue-size": 1024} + mem = 4096 + mem_devs = mem1 + backend_mem_mem1 = memory-backend-file + mem-path_mem1 = /dev/shm + size_mem1 = 4G + use_mem_mem1 = no + share_mem = yes + guest_numa_nodes = shm0 + numa_memdev_shm0 = mem-mem1 + numa_nodeid_shm0 = 0 + io_timeout = 600 + fs_dest = '/mnt/${fs_target}' + driver_name = viofs + Windows: + # install winfsp tool + i386, i686: + install_path = 'C:\Program Files' + x86_64: + install_path = 'C:\Program Files (x86)' + install_cmd = 'msiexec /i WIN_UTILS:\winfsp-1.7.20172.msi /qn' + check_installed_cmd = 'dir "%s" |findstr /I winfsp' + start_vfs_cmd = "start /b %s -d -1 -D C:\\viofs_log.txt" + check_virtiofs_cmd = 'wmic process where caption="virtiofs.exe" list brief' + virtio_win_media_type = iso + cdroms += " virtio" + variants: + - with_cache: + variants: + - @default: + - @extra_parameters: + no run_stress..extra_parameters + variants: + - lock_posix_off: + fs_binary_extra_options += ",no_posix_lock" + - lock_posix_on: + fs_binary_extra_options += ",posix_lock" + variants: + - flock_on: + fs_binary_extra_options += ",flock" + - flock_off: + fs_binary_extra_options += ",no_flock" + variants: + - xattr_on: + fs_binary_extra_options += ",xattr" + - xattr_off: + fs_binary_extra_options += ",no_xattr" + - @default: + variants: + - auto: + fs_binary_extra_options = " -o cache=auto" + - always: + fs_binary_extra_options = " -o cache=always" + - none: + fs_binary_extra_options = " -o cache=none" + - with_no_writeback: + fs_binary_extra_options = " -o no_writeback " + variants: + - @default: + cmd_dd = 'dd if=/dev/urandom of=%s bs=1M count=2048 oflag=direct' + cmd_md5 = 'md5sum %s' + io_timeout = 120 + Windows: + cmd_dd = 'dd if=/dev/random of=%s bs=1M count=200' + cmd_md5 = "%s: && md5sum.exe %s" + default..with_cache.none: + io_timeout = 600 + variants: + - @default: + - with_multi_fs_sources: + no Windows + no with_multi_fs_sources..with_no_writeback + with_multi_fs_sources.with_cache.none: + io_timeout = 600 + filesystems = fs1 fs2 fs3 fs4 fs5 + fs_source_dir_fs1 = '/tmp/virtio_fs1_test' + fs_source_dir_fs2 = '/tmp/virtio_fs2_test' + fs_source_dir_fs3 = '/tmp/virtio_fs3_test' + fs_source_dir_fs4 = '/tmp/virtio_fs4_test' + fs_source_dir_fs5 = '/tmp/virtio_fs5_test' + fs_target_fs1 = 'myfs1' + fs_target_fs2 = 'myfs2' + fs_target_fs3 = 'myfs3' + fs_target_fs4 = 'myfs4' + fs_target_fs5 = 'myfs5' + fs_dest_fs1 = '/mnt/${fs_target_fs1}' + fs_dest_fs2 = '/mnt/${fs_target_fs2}' + fs_dest_fs3 = '/mnt/${fs_target_fs3}' + fs_dest_fs4 = '/mnt/${fs_target_fs4}' + fs_dest_fs5 = '/mnt/${fs_target_fs5}' + - run_stress: + variants: + - with_fio: + no with_fio..with_no_writeback + smp = 8 + aarch64: + vcpu_maxcpus = 8 + io_timeout = 2000 + fio_options = '--name=stress --filename=%s --ioengine=libaio --rw=write --direct=1 ' + fio_options += '--bs=4K --size=1G --iodepth=256 --numjobs=128 --runtime=1800' + Windows: + fio_options = '--name=stress --filename=%s --ioengine=windowsaio --rw=write --direct=1 ' + fio_options += '--bs=4K --size=1G --iodepth=256 --numjobs=128 --runtime=1800 --thread' + - with_pjdfstest: + no Windows + no with_pjdfstest..with_no_writeback + io_timeout = 1800 + with_pjdfstest..with_cache.none: + io_timeout = 7200 + pjdfstest_pkg = pjdfstest-0.1.tar.bz2 + cmd_unpack = 'tar -zxvf {0}/${pjdfstest_pkg} -C {0}' + cmd_yum_deps = 'yum install -y perl-Test-Harness' + cmd_autoreconf = 'autoreconf -ifs %s/pjdfstest/' + cmd_configure = '{0}/pjdfstest/configure && ' + cmd_configure += 'mv config.* {0}/pjdfstest/ && mv Makefile {0}/pjdfstest/ && mv stamp-h1 {0}/pjdfstest/' + cmd_make = 'make %s/pjdfstest/pjdfstest' + cmd_pjdfstest = 'prove -rv %s/pjdfstest/tests' diff --git a/qemu/tests/cfg/virtio_net_dpdk.cfg b/qemu/tests/cfg/virtio_net_dpdk.cfg new file mode 100644 index 0000000000000000000000000000000000000000..98c00ca9e3a56610ac007cf27f02311bc81f1196 --- /dev/null +++ b/qemu/tests/cfg/virtio_net_dpdk.cfg @@ -0,0 +1,81 @@ +- virtio_net_dpdk: + no JeOS + no Windows + no Host_RHEL.m5, Host_RHEL.m6 + only bridge + virt_test_type = qemu + type = virtio_net_dpdk + kill_vm = yes + machine_type = q35 + get_status_in_guest = no + auto_cpu_model = no + cpu_model = "host" + category = "rx tx" + login_timeout = 90 + testpmd_running_time = 20 + mac_changeable = yes + kvm_ver_chk_cmd = "rpm -qa qemu-kvm-rhev && rpm -qa qemu-kvm" + guest_ver_cmd = "uname -r" + guest_dpdk_cmd = "rpm -qa dpdk" + record_list = "pkt_size mpps" + pkt_size = 60 + numa_node = 2 + nics = 'nic1 nic2 nic3' + nic_mode = tap + netdst = switch + nic_model_nic1 = rtl8139 + nic_model_nic2 = virtio + nic_model_nic3 = virtio + netdst_nic1 = switch + vhost_nic1 = + vhost_nic2 = vhost=on + nettype_nic2 = macvtap + netdst_nic2 = shell:ip link | sed -n '/90:e2:ba:7d:33:3d/{g;1!p;};h'|awk -F': ' '{print $2}'|head -1 + nic_extra_params_nic2 = + pf_filter_re = 82599 + device_name_nic3 = shell:ip link | sed -n '/90:e2:ba:7d:33:3c/{g;1!p;};h'|awk -F': ' '{print $2}'|head -1 + vf_filter_re = "Virtual Function" + device_driver = vfio-pci + pci_assignable_nic3 = vf + pci_assignable_nic2 = no + pci_assignable_nic1 = no + pre_command = echo 8 > /sys/devices/system/node/node1/hugepages/hugepages-1048576kB/nr_hugepages + pre_command += ; modprobe vfio; modprobe vfio-pci + pre_command += ; echo 2 > /sys/class/net/enp6s0f0/device/sriov_numvfs + post_command = modprobe -r ixgbe; modprobe ixgbe + #intel-iommu device + extra_params = " -object memory-backend-file,id=mem,size=8G,mem-path=/dev/hugepages,share=on -numa node,memdev=mem -mem-prealloc " + machine_type_extra_params = "kernel-irqchip=split" + + #set iommu_platform=on for all virtio pci device + intel_iommu = yes + virtio_dev_iommu_platform = on + iommu_caching_mode = on + #set ats=on for all virtio pci device + virtio_dev_ats = on + + # Use 'intel_iommu=on, iommu=pt' by default, tester can modify it as needed + enable_guest_iommu = yes + guest_iommu_option = pt + kernel_extra_params_add = "default_hugepagesz=1G" + + #generator host configuration + generator = 10.73.8.4 + username_generator = root + password_generator = kvmautotest + shell_client_generator = ssh + shell_port_generator = 22 + shell_prompt_generator = \[root@.{0,50}][\#\$] + generator_hugepages_cmd = echo 10 > /sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages + generator_hugepages_cmd += ; echo 10 > /sys/devices/system/node/node1/hugepages/hugepages-1048576kB/nr_hugepages + # VM configuration + package_install = tuned-profiles-cpu-partitioning dpdk + env_hugepages_cmd = echo 8 > /sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages + env_pkg = tuned-profiles-cpu-partitioning dpdk dpdk-devel dpdk-tools + nic_driver = Virtio 82599 + nic1_dpdk_driver = /usr/lib64/librte_pmd_virtio.so + nic2_dpdk_driver = /usr/lib64/librte_pmd_ixgbe.so + moongen_pkg = MoonGen.zip + moongen_dpdk_nic = 0000:05:00.0 0000:05:00.1 + testpmd_exec = start_testpmd.py + testpmd_queues = 1 diff --git a/qemu/tests/cfg/virtio_port_hotplug.cfg b/qemu/tests/cfg/virtio_port_hotplug.cfg index 3fb49a6a2d224f4bf27a026a50f2dd1a382f43ac..a6ed92eef92c01c5afdefc1e48f5d11ceb68bbd1 100644 --- a/qemu/tests/cfg/virtio_port_hotplug.cfg +++ b/qemu/tests/cfg/virtio_port_hotplug.cfg @@ -29,14 +29,21 @@ unplug_chardev_vc1 = yes unplug_chardev_vc2 = yes - @unplug_port: - serials += " vc1 vc2" - serial_type_vc1 = virtserialport - serial_type_vc2 = virtconsole - file_transfer_serial_port = vc1 - unplug_device = vc1 vc2 + serials += " vs1 vs2" + serial_type_vs1 = virtserialport + serial_type_vs2 = virtserialport + unplug_device = vs1 vs2 + check_module = no + unplug_chardev_vs1 = no + unplug_chardev_vs2 = no filesize = 100 - unplug_chardev_vc1 = no - unplug_chardev_vc2 = no + variants: + - @default: + bg_test = no + repeat_times = 1 + - with_bg_test: + Linux: + filesize = 2000 - unplug_port_chardev_pci: type = virtio_serial_unplug_port_chardev_pci sleep_time = 0.5 @@ -88,6 +95,21 @@ extra_chardevs = "channel1" extra_serials = "vs1" serial_type_vs1 = virtserialport + - max_chardevs: + type = virtio_serial_hotplug_max_chardevs + start_vm = yes + sleep_time = 1 + extra_chardevs = "channel0" + numberic_chardev = 31 + #Port number 0 on virtio-serial devices reserved for + #virtconsole devices for backward compatibility + virtio_serial_ports = 30 + - remove_pending_watches: + type = chardev_remove_pending_watches + serials += " vs1" + serial_type_vs1 = "virtserialport" + file_transfer_serial_port = "vs1" + filesize = 2000 - hotplug_various_chardev: type = virtio_serial_various_chardev_hotplug serials = "" @@ -102,3 +124,8 @@ ringbuf_read_size = 1000 ringbuf_data = 'adcdefgh' ringbuf_format = 'utf8' + - hotplug_existed_port_pci: + type = virtio_serial_hotplug_existed_port_pci + serials += ' vs1 ' + serial_type_vs1 = 'virtserialport' + chardev_backend_channel1 = "tcp_socket" diff --git a/qemu/tests/cfg/virtio_scsi_cdrom.cfg b/qemu/tests/cfg/virtio_scsi_cdrom.cfg index 33a4235119e6561efc1aca2640101046bfd85df2..5a087608a1109a6985413c6f2f770da1aedddc55 100644 --- a/qemu/tests/cfg/virtio_scsi_cdrom.cfg +++ b/qemu/tests/cfg/virtio_scsi_cdrom.cfg @@ -20,9 +20,15 @@ only x86_64, i386, ppc64, ppc64le only virtio_scsi, virtio_blk virtio_drive_letter = 'D' + virtio_scsi: + # disable iothread for scsi devices explicitly + iothread_scheme ?= + iothreads ?= + image_iothread ?= variants: - with_installation: cd_format_cd1 = scsi-cd + image_iothread_cd1 = i440fx: Windows: cd_format_unattended = ide @@ -66,6 +72,8 @@ image_size_equal = 1G ahci: images = "image1 stg stg2 stg3 stg4 stg5" + default_bios..Windows: + images = "image1 stg stg2 stg3 stg4 stg5" variants: - aio_native: image_aio = native @@ -77,6 +85,7 @@ no WinXP Win2000 Win2003 WinVista unattended_delivery_method = cdrom cdroms += " unattended" + image_iothread_unattended = drive_index_unattended = 3 drive_index_cd1 = 1 variants: diff --git a/qemu/tests/cfg/virtio_serial_file_transfer.cfg b/qemu/tests/cfg/virtio_serial_file_transfer.cfg index 6f01604463f1ce5524643f371d344bcf9496f544..ac1bb98eee15a5c8b33f67da31fd760bc10605a7 100644 --- a/qemu/tests/cfg/virtio_serial_file_transfer.cfg +++ b/qemu/tests/cfg/virtio_serial_file_transfer.cfg @@ -30,14 +30,25 @@ - from_guest_to_host: file_sender = guest - boot_with_multiple_virtserialports: + type = boot_N_M_virtserialports + only unix_socket + filesize = 512 variants: - 1_N: - only unix_socket - type = boot_1_N_virtserialports - driver_name = vioser serials += " vs2 vs3" serial_type_vs2 = virtserialport serial_type_vs3 = virtserialport + - N_M: + serials += " vs2 vs3 vs4 vs5 vs6" + serial_type_vs2 = virtserialport + serial_type_vs3 = virtserialport + serial_type_vs4 = virtserialport + serial_bus_vs4 = "" + serial_type_vs5 = virtserialport + serial_type_vs6 = virtserialport + - N_1: + start_vm = no + numberic_bus = 26 - max_ports: only unix_socket type = virtio_serial_file_transfer_max_ports @@ -56,11 +67,12 @@ only unix_socket no WinXP WinVista Win7 Win8 Win8.1 Win2000 Win2003 no Win2008 Win2008..r2 Win2012 Win2012..r2 + intel_iommu = yes virtio_dev_iommu_platform = on + iommu_eim = on enable_guest_iommu = yes virtio_dev_ats = on machine_type_extra_params = "kernel-irqchip=split" - extra_params = "-device intel-iommu,intremap=on,eim=on,device-iotlb=on" variants: - unix_socket: - tcp_socket: diff --git a/qemu/tests/cfg/virtio_trace_pipenb.cfg b/qemu/tests/cfg/virtio_trace_pipenb.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0d5cd229ab210f01c9849d5fd23d9e778bff7e31 --- /dev/null +++ b/qemu/tests/cfg/virtio_trace_pipenb.cfg @@ -0,0 +1,7 @@ +- virtio_trace_pipenb: + only Linux + type = virtio_trace_pipenb + smp = 1 + serials += " vs2" + serial_type_vs2 = virtserialport + chardev_backend_vs2 = pipe diff --git a/qemu/tests/cfg/watchdog.cfg b/qemu/tests/cfg/watchdog.cfg index 3601c07a66a4016d49167e68299648cda87f14a4..d5ebac8bbf51aa04f1842ed0691f6acedf402e94 100644 --- a/qemu/tests/cfg/watchdog.cfg +++ b/qemu/tests/cfg/watchdog.cfg @@ -3,6 +3,9 @@ enable_watchdog = yes start_vm = no type = watchdog + aarch64: + no Host_RHEL + watchdog_type_check = " -M virt -watchdog '?'" variants: -i6300esb: no RHEL.4 diff --git a/qemu/tests/cfg/win_irq_check.cfg b/qemu/tests/cfg/win_irq_check.cfg index 07f3a1492219f8b647e128b42b7eebd1d83f37ef..81102b1dc371f366d4c4cc886dad07f71b2d2df1 100644 --- a/qemu/tests/cfg/win_irq_check.cfg +++ b/qemu/tests/cfg/win_irq_check.cfg @@ -87,12 +87,9 @@ no with_balloon, with_viorng msi_cmd = "reg add "HKLM\System\CurrentControlSet\Enum\%s\Device Parameters\Interrupt Management\MessageSignaledInterruptProperties" /v MSISupported /d %d /t REG_DWORD /f" - by_vectors: - only with_vioscsi, with_viostor, with_netkvm + only with_viostor, with_netkvm vectors = 0 with_viostor: virtio_blk: blk_extra_params_image1 += ",vectors=${vectors}" blk_extra_params_stg += ",vectors=${vectors}" - with_vioscsi: - bus_extra_params_image1 = "vectors=${vectors}" - bus_extra_params_stg = "vectors=${vectors}" diff --git a/qemu/tests/cfg/win_nics_teaming.cfg b/qemu/tests/cfg/win_nics_teaming.cfg new file mode 100644 index 0000000000000000000000000000000000000000..04f31c450023429306e09a1e762f0c6f2a146aa4 --- /dev/null +++ b/qemu/tests/cfg/win_nics_teaming.cfg @@ -0,0 +1,15 @@ +- win_nics_teaming: + virt_test_type = qemu + only Win2016 Win2019 + only x86_64 + no macvtap + type = win_nics_teaming + nics = "nic1 nic2 nic3 nic4" + clone_master = yes + master_images_clone = image1 + remove_image_image1 =yes + filesize = 8000 + dd_cmd = "dd if=/dev/zero of=%s oflag=direct bs=1M count=${filesize}" + tmp_dir = "C:\\" + delete_cmd = "del /f %s" + setup_cmd = 'powershell -command "New-NetLbfoTeam -Name \"Team\" -TeamMembers \"%s\",\"%s\",\"%s\",\"%s\""' diff --git a/qemu/tests/cfg/win_sigverif.cfg b/qemu/tests/cfg/win_sigverif.cfg index 78f47a3ad91c2f45d4b616bfdd1e143710dc8adf..87450a9742f63332aa017029f74f68da04c51dba 100644 --- a/qemu/tests/cfg/win_sigverif.cfg +++ b/qemu/tests/cfg/win_sigverif.cfg @@ -67,3 +67,28 @@ input_dev_bus_type_input1 = virtio input_dev_type_input1 = mouse device_name = "VirtIO Input Driver" + - with_viofs: + no Host_RHEL.m6 Host_RHEL.m7 Host_RHEL.m8.u0 Host_RHEL.m8.u1 + no Win2008 Win7 + virt_test_type = qemu + required_qemu = [4.2.0,) + filesystems = fs + fs_driver = virtio-fs + fs_source_type = mount + fs_source_dir = virtio_fs_test/ + force_create_fs_source = yes + remove_fs_source = yes + fs_target = 'myfs' + fs_driver_props = {"queue-size": 1024} + mem = 4096 + mem_devs = mem1 + backend_mem_mem1 = memory-backend-file + mem-path_mem1 = /dev/shm + size_mem1 = 4G + use_mem_mem1 = no + share_mem = yes + guest_numa_nodes = shm0 + numa_memdev_shm0 = mem-mem1 + numa_nodeid_shm0 = 0 + driver_name = viofs + device_name = "VirtIO FS Device" diff --git a/qemu/tests/cfg/x86_cpu_L3_cache.cfg b/qemu/tests/cfg/x86_cpu_L3_cache.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d7de0edca2789d2becf48458911f177cbf31fc03 --- /dev/null +++ b/qemu/tests/cfg/x86_cpu_L3_cache.cfg @@ -0,0 +1,6 @@ +- x86_cpu_L3_cache: + type = x86_cpu_L3_cache + only Linux + only x86_64, i386 + start_vm = no + old_machine = 'rhel7.3.0' diff --git a/qemu/tests/cfg/x86_cpu_flags.cfg b/qemu/tests/cfg/x86_cpu_flags.cfg new file mode 100644 index 0000000000000000000000000000000000000000..904a77daffbab69b14a7513d0610ba1c0c2d4c65 --- /dev/null +++ b/qemu/tests/cfg/x86_cpu_flags.cfg @@ -0,0 +1,75 @@ +- x86_cpu_flags: + type = x86_cpu_flags + cpu_model_flags += ",enforce,-mpx" + kill_vm_on_error = yes + start_vm = no + check_host_flags = yes + reboot_method = "shell" + only i386, x86_64 + variants: + - intel: + only HostCpuVendor.intel + variants: + - memory_protection_key: + cpu_model_flags += ",+pku,check" + flags = "pku ospke" + - avx512_series_flags_5: + # support RHEL.8.2.0 guest or later + no RHEL.6 RHEL.7 RHEL.8.0 RHEL.8.1 + cpu_model_flags += ",+avx512-bf16" + flags = "avx512_bf16" + - avx512_series_flags_2: + # support RHEL.7 guest or later + no RHEL.6 + cpu_model_flags += ",+avx512bw,+avx512dq,+avx512vl" + flags = "avx512bw avx512dq avx512vl" + - avx512_series_flags_4: + # support RHEL.7 guest or later + no RHEL.6 + cpu_model_flags += ",+avx512vbmi2,+avx512vnni,+gfni,+vaes,+avx512bitalg,+vpclmulqdq" + flags = "avx512_vbmi2 avx512_vnni gfni vaes avx512_bitalg vpclmulqdq" + - arch_capabilities: + # support RHEL.7 guest or later + no RHEL.6 + cpu_model_flags += ",arch-capabilities=on,+mds-no" + flags = "arch_capabilities" + check_guest_cmd = "cat /sys/devices/system/cpu/vulnerabilities/mds | grep '%s'" + expect_items = 'Not affected' + - amd: + only HostCpuVendor.amd + variants: + - flag_1: + # support RHEL.7 guest or later + no RHEL.6 + cpu_model_flags += ",+xsaveerptr,+clzero" + flags = "clzero xsaveerptr" + - others: + variants: + - flag_disable: + only Linux + type = x86_cpu_flag_disable + check_clock = 'cat /sys/devices/system/clocksource/clocksource0/available_clocksource' + cpu_model_flags += ",-nx,-kvmclock" + no_flags = "nx" + flags = "nx" + - test_smap: + # support RHEL.7 guest or later + no RHEL.6 + flags = "smap" + - test_dies: + # support RHEL.8.1.0 host or later + no Host_RHEL.m6 Host_RHEL.m7 Host_RHEL.m8.u0 + # support RHEL.7.8 guest or later + no RHEL.6 RHEL.7.1 RHEL.7.2 RHEL.7.3 RHEL.7.4 RHEL.7.5 RHEL.7.6 RHEL.7.7 + type = x86_cpu_test_dies + # fixed qemu version: qemu-kvm-4.1.0-1.module+el8.1.0+3966+4a23dca1 + required_qemu = [4.1.0, ) + vcpu_maxcpus = 0 + start_vm = no + check_die_id = 'cat /sys/devices/system/cpu/cpu*/topology/die_id | sort | uniq -c' + check_die_cpus_list = 'cat /sys/devices/system/cpu/cpu*/topology/die_cpus_list | sort | uniq -c' + - nonstop_tsc: + type = x86_cpu_flag_nonstop_tsc + check_clock = 'cat /sys/devices/system/clocksource/clocksource0/available_clocksource | grep tsc' + cpu_model_flags += ",+invtsc" + flags = "nonstop_tsc" diff --git a/qemu/tests/cfg/x86_cpu_model.cfg b/qemu/tests/cfg/x86_cpu_model.cfg index 703d171051e9f068ec5fb888e6f0442797de980d..42a323c1496d7cfd45ffa61712b2d1a85bcd2c65 100644 --- a/qemu/tests/cfg/x86_cpu_model.cfg +++ b/qemu/tests/cfg/x86_cpu_model.cfg @@ -10,50 +10,110 @@ get_model_cmd = "wmic cpu get name" Linux: get_model_cmd = "lscpu | grep 'Model name'" - check_flag_cmd = "lscpu | grep Flags | awk -F ':' '{print $2}'" variants model: + - host: + type = boot + cpu_model = host + start_vm = yes + cpu_model_flags += ",check" + - EPYC-Rome: + only HostCpuVendor.amd + flags = "movbe rdrand rdtscp fxsr_opt cr8_legacy osvw fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 arat f16c xsaveerptr clzero rdpid perfctr_core ibpb wbnoinvd stibp clwb umip xsaves" + model_pattern = "AMD EPYC-Rome Processor%s" - EPYC: + only HostCpuVendor.amd flags = "movbe rdrand rdtscp fxsr_opt cr8_legacy osvw fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 arat f16c" model_pattern = "AMD EPYC Processor%s" - Opteron_G5: + only HostCpuVendor.amd flags = "vme sse2 sse fxsr mmx clflush pse36 pat cmov mca pge mtrr sep apic cx8 mce pae msr tsc pse de f16c avx xsave aes popcnt sse4_2 sse4_1 cx16 fma ssse3 pclmulqdq lm pdpe1gb nx syscall tbm fma4 xop 3dnowprefetch misalignsse sse4a abm lahf_lm fpu" model_pattern = "AMD Opteron 63xx class CPU%s" - Opteron_G4: + only HostCpuVendor.amd flags = "vme sse2 sse fxsr mmx clflush pse36 pat cmov mca pge mtrr sep apic cx8 mce pae msr tsc pse de avx xsave aes popcnt sse4_2 sse4_1 cx16 ssse3 pclmulqdq lm pdpe1gb nx syscall fma4 xop 3dnowprefetch misalignsse sse4a abm lahf_lm fpu" model_pattern = "AMD Opteron 62xx class CPU%s" - Opteron_G3: + only HostCpuVendor.amd flags = "vme sse2 sse fxsr mmx clflush pse36 pat cmov mca pge mtrr sep apic cx8 mce pae msr tsc pse de popcnt cx16 lm nx syscall misalignsse sse4a abm lahf_lm fpu" model_pattern = "AMD Opteron 23xx \(Gen 3 Class Opteron%s\)" + - Cooperlake: + only HostCpuVendor.intel + # support 'BFloat16' with RHEL8.2 guest or later + no RHEL.6 RHEL.7 RHEL.8.0 RHEL.8.1 + flags = "avx512_bf16 stibp arch_capabilities" + model_pattern = "Intel Xeon Processor \(Cooperlake%s\)" + check_cmd = "cat /sys/devices/system/cpu/vulnerabilities/%s" + vulnerabilities = "ls /sys/devices/system/cpu/vulnerabilities/" + check_items = "itlb_multihit mds tsx_async" + expect_result = 'Not affected' - Icelake-Server: + only HostCpuVendor.intel flags = "la57 wbnoinvd avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_bitalg avx512_vpopcntdq clflushopt pdpe1gb clwb avx512f avx512dq avx512bw avx512cd avx512vl" model_pattern = "Intel Xeon Processor \(Icelake%s\)" + - Icelake-Server-noTSX: + only HostCpuVendor.intel + flags = "la57 wbnoinvd avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_bitalg avx512_vpopcntdq clflushopt pdpe1gb clwb avx512f avx512dq avx512bw avx512cd avx512vl" + model_pattern = "Intel Xeon Processor \(Icelake%s\)" + no_flags = "hle rtm" - Icelake-Client: + only HostCpuVendor.intel + flags = "wbnoinvd avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_bitalg avx512_vpopcntdq" + model_pattern = "Intel Core Processor \(Icelake%s\)" + - Icelake-Client-noTSX: + only HostCpuVendor.intel flags = "wbnoinvd avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_bitalg avx512_vpopcntdq" model_pattern = "Intel Core Processor \(Icelake%s\)" + no_flags = "hle rtm" - Cascadelake-Server: + only HostCpuVendor.intel flags = "avx512_vnni clflushopt ibrs ibpb pdpe1gb clwb avx512f avx512dq avx512bw avx512cd avx512vl" model_pattern = "Intel Xeon Processor \(Cascadelake%s\)" + - Cascadelake-Server-noTSX: + only HostCpuVendor.intel + flags = "avx512_vnni clflushopt ibrs ibpb pdpe1gb clwb avx512f avx512dq avx512bw avx512cd avx512vl" + model_pattern = "Intel Xeon Processor \(Cascadelake%s\)" + no_flags = "hle rtm" - Skylake-Server: + only HostCpuVendor.intel flags = "pdpe1gb clwb avx512f avx512dq avx512bw avx512cd avx512vl" model_pattern = "Intel Xeon Processor \(Skylake%s\)" + - Skylake-Server-noTSX: + only HostCpuVendor.intel + flags = "pdpe1gb clwb avx512f avx512dq avx512bw avx512cd avx512vl" + model_pattern = "Intel Xeon Processor \(Skylake%s(, no TSX)?\)" + no_flags = "hle rtm" - Skylake-Client: + only HostCpuVendor.intel flags = "xsavec xgetbv1" model_pattern = "Intel Core Processor \(Skylake%s\)" + - Skylake-Client-noTSX: + only HostCpuVendor.intel + flags = "xsavec xgetbv1" + model_pattern = "Intel Core Processor \(Skylake%s(, no TSX)?\)" + no_flags = "hle rtm" - Broadwell: + only HostCpuVendor.intel flags = "adx rdseed 3dnowprefetch smap hle rtm" model_pattern = "Intel Core Processor \(Broadwell%s\)" - Broadwell-noTSX: + only HostCpuVendor.intel flags = "adx rdseed 3dnowprefetch smap" model_pattern = "Intel Core Processor \(Broadwell, no TSX%s\)" + no_flags = "hle rtm" - Haswell: + only HostCpuVendor.intel flags = "vme sse2 sse fxsr mmx clflush pse36 pat cmov mca pge mtrr sep apic cx8 mce pae msr tsc pse de fpu avx xsave aes tsc_deadline_timer fma movbe popcnt x2apic sse4_2 sse4_1 cx16 ssse3 pclmulqdq pni f16c rdrand fsgsbase bmi1 hle avx2 smep bmi2 erms invpcid rtm lm rdtscp nx syscall lahf_lm xsaveopt arat pcid" model_pattern = "Intel Core Processor \(Haswell%s\)" - Haswell-noTSX: + only HostCpuVendor.intel flags = "vme sse2 sse fxsr mmx clflush pse36 pat cmov mca pge mtrr sep apic cx8 mce pae msr tsc pse de fpu avx xsave aes tsc_deadline_timer fma movbe popcnt x2apic sse4_2 sse4_1 cx16 ssse3 pclmulqdq pni f16c rdrand fsgsbase bmi1 avx2 smep bmi2 erms invpcid lm rdtscp nx syscall lahf_lm xsaveopt arat pcid" model_pattern = "Intel Core Processor \(Haswell, no TSX%s\)" + no_flags = "hle rtm" - IvyBridge: + only HostCpuVendor.intel flags = "vme sse2 sse fxsr mmx clflush pse36 pat cmov mca pge mtrr sep apic cx8 mce pae msr tsc pse de fpu avx xsave aes tsc_deadline_timer popcnt x2apic sse4_2 sse4_1 cx16 ssse3 pclmulqdq pni rdrand fsgsbase smep erms lm rdtscp nx syscall lahf_lm xsaveopt arat f16c" model_pattern = "Intel Xeon E3-12xx v2 \(Ivy Bridge%s\)" - SandyBridge: + only HostCpuVendor.intel flags = "vme sse2 sse fxsr mmx clflush pse36 pat cmov mca pge mtrr sep apic cx8 mce pae msr tsc pse de fpu avx xsave aes tsc_deadline_timer popcnt x2apic sse4_2 sse4_1 cx16 ssse3 pclmulqdq pni lm rdtscp nx syscall lahf_lm xsaveopt arat" model_pattern = "Intel Xeon E312xx \(Sandy Bridge%s\)" diff --git a/qemu/tests/cgroup.py b/qemu/tests/cgroup.py index 3fa29ebdbae0b8ff7e7b95fac75b22c75e504872..3db731d6bb12a4a20a203446665d72cd6969ee45 100644 --- a/qemu/tests/cgroup.py +++ b/qemu/tests/cgroup.py @@ -672,6 +672,7 @@ def run(test, params, env): # Create first VM params['smp'] = 1 params["vcpu_sockets"] = 1 + params["vcpu_maxcpus"] = host_cpus params['vms'] = "vm0" preprocess(test, params, env) @@ -837,6 +838,7 @@ def run(test, params, env): host_cpus = open('/proc/cpuinfo').read().count('processor') # when smp <= 0 use smp = no_host_cpus vm_cpus = int(params.get('smp', 0)) # cpus per VM + params["vcpu_maxcpus"] = host_cpus # Use smp = no_host_cpu if vm_cpus <= 0 or params.get('cgroup_use_max_smp') == "yes": params['smp'] = host_cpus @@ -929,7 +931,7 @@ def run(test, params, env): dist = distance(min(stats[i][:host_cpus]), max(stats[i][:host_cpus])) # less vms, lower limit. Maximal limit is 0.2 - if dist > min(0.10 + 0.01 * len(vms), 0.2): + if dist > min(0.15 + 0.01 * len(vms), 0.2): err += "1, " logging.error("1st part's limits broken. Utilisation should be" " equal. stats = %s, distance = %s", stats[i], @@ -940,7 +942,7 @@ def run(test, params, env): i += 1 dist = distance(min(stats[i]), max(stats[i])) if host_cpus % no_speeds == 0 and no_speeds <= host_cpus: - if dist > min(0.10 + 0.01 * len(vms), 0.2): + if dist > min(0.15 + 0.01 * len(vms), 0.2): err += "2, " logging.error("2nd part's limits broken, Utilisation " "should be equal. stats = %s, distance = %s", @@ -958,7 +960,7 @@ def run(test, params, env): norm_stats = [float(stats[i][_]) / speeds[_] for _ in range(len(stats[i]))] dist = distance(min(norm_stats), max(norm_stats)) - if dist > min(0.10 + 0.02 * len(vms), 0.25): + if dist > min(0.15 + 0.02 * len(vms), 0.25): err += "3, " logging.error("3rd part's limits broken; utilisation " "should be in accordance to self.speeds. " @@ -1183,6 +1185,7 @@ def run(test, params, env): sessions[-1].cmd("touch /tmp/cgroup-cpu-lock") sessions[-1].sendline(cmd) + cpu_time_type = params.get_numeric('cpu_time_type') try: logging.info("Test") for i in range(len(cpusets)): @@ -1195,7 +1198,8 @@ def run(test, params, env): _load = get_load_per_cpu() time.sleep(test_time) # Stats after test_time - stats.append(get_load_per_cpu(_load)[1:]) + _load_diff = get_load_per_cpu(_load) + stats.append(list(map(list, zip(*_load_diff)))[cpu_time_type][1:]) serial.cmd("rm -f /tmp/cgroup-cpu-lock") err = "" @@ -1701,7 +1705,7 @@ def run(test, params, env): timeout = int(params.get("login_timeout", 360)) vm = env.get_all_vms()[0] - vm_cpus = int(params.get('smp', 0)) # cpus per VM + vm_cpus = vm.cpuinfo.smp # cpus per VM serial = vm.wait_for_serial_login(timeout=timeout) sessions = [] for _ in range(vm_cpus): diff --git a/qemu/tests/change_media.py b/qemu/tests/change_media.py index 14b130db0b4cd5860c01be53976d8c51af0c3b79..f0919b9bc5da689afcae294444702e577ffa82b4 100644 --- a/qemu/tests/change_media.py +++ b/qemu/tests/change_media.py @@ -54,6 +54,11 @@ def run(test, params, env): output = str(err) return output + def get_qdev_by_filename(filename): + for info_dict in vm.monitor.info("block"): + if filename in str(info_dict): + return info_dict['qdev'] + vm = env.get_vm(params["main_vm"]) vm.verify_alive() monitor = vm.get_monitors_by_type('qmp') @@ -148,9 +153,8 @@ def run(test, params, env): new_img_name) if vm.check_capability(Flags.BLOCKDEV): sys_image = QemuImg(params, data_dir.get_data_dir(), params['images'].split()[0]) - device_name = vm.get_block({"filename": sys_image.image_filename}) change_insert_cmd = ("blockdev-change-medium id=%s,filename=%s" % ( - vm.devices.get_qdev_by_drive(device_name), new_img_name)) + get_qdev_by_filename(sys_image.image_filename), new_img_name)) output = change_block(change_insert_cmd) if "is not removable" not in output: test.fail("Could remove non-removable device!") diff --git a/qemu/tests/chardev_acpi.py b/qemu/tests/chardev_acpi.py new file mode 100644 index 0000000000000000000000000000000000000000..1fde97d404ad340079f417a5cf04ff975ac03cf3 --- /dev/null +++ b/qemu/tests/chardev_acpi.py @@ -0,0 +1,38 @@ +from virttest import error_context +from virttest import env_process + +from avocado.utils import process + + +@error_context.context_aware +def run(test, params, env): + """ + acpi description of serial and parallel ports incorrect + with -chardev/-device: + 1) Check device resources(io port and irq) on host + 2) Boot guest A with isa-serial with tty chardev backend + 3) Check device resources inside guest A + 4) Boot guest B with -serial /dev/ttyS0 + 5) Check device resources inside guest B + 6) Check if the result are same for host, A and B. + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + outputs = [] + check_cmd = params['check_cmd'] + host_output = process.getoutput(check_cmd)[:35] + outputs.append(host_output) + for x in range(2): + if x >= 1: + params['serials'] = " ".join(params['serials'].split()[:-1]) + params['extra_params'] = params.get('extra_params', '') + ' -serial /dev/ttyS0' + env_process.preprocess(test, params, env) + vm = env.get_vm(params["main_vm"]) + session = vm.wait_for_login() + vm_output = session.cmd_status_output(check_cmd)[1][:35] + outputs.append(vm_output) + vm.destroy() + assert outputs.count(outputs[0]) == len(outputs), \ + "Host: {} and VM 1: {} and VM 2: {} are not the same".\ + format(outputs[0], outputs[1], outputs[2]) diff --git a/qemu/tests/chardev_free_port.py b/qemu/tests/chardev_free_port.py new file mode 100644 index 0000000000000000000000000000000000000000..b007d0a92c579dce2035a6a15dbce90277c4c476 --- /dev/null +++ b/qemu/tests/chardev_free_port.py @@ -0,0 +1,56 @@ +import logging + +from virttest import error_context, env_process +from virttest import utils_misc + +from qemu.tests.virtio_console import add_chardev + + +@error_context.context_aware +def run(test, params, env): + """ + qemu should try to find a free port value by to= with unix socket and tcp options: + 1) boot guest with socket 'host=127.0.0.1,port=num' + 2) query chardev and check port number + 3) boot another guest with socket 'host=127.0.0.1,port=num,to=num+' + 4) query chardev and check port number, should different from 2) + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + chardev_infos = [] + vms = params.get("vms").split() + _vm = env.get_vm(vms[0]) + char_device = add_chardev(_vm, params)[0] + chardev_params = char_device.params + for vm_ind, vm in enumerate(vms): + if vm_ind == 1: + host = chardev_params['host'] + chardev_to = utils_misc.find_free_ports(int(chardev_params['port']) + 1, 6000, 1, host) + chardev_params['to'] = str(chardev_to[0]) + + extra_params = ' ' + char_device.cmdline() + params['extra_params_%s' % vm] = params.get('extra_params', '') + extra_params + params['start_vm_%s' % vm] = "yes" + env_process.preprocess(test, params, env) + for vm in vms: + _vm = env.get_vm(vm) + chardev_infos.append(_vm.monitor.info("chardev")) + _port, _to = int(chardev_params['port']), int(chardev_params['to']) + for char_ind, chardevs in enumerate(chardev_infos): + in_chardev = False + for chardev in chardevs: + if chardev['label'] == chardev_params['id']: + tmp_pnum = int(chardev['filename'].split(':')[-1].split(',')[0]) + error_context.context("Get port %d for vm%d from monitor" % (tmp_pnum, char_ind), logging.info) + break + if char_ind == 0: + error_context.context("The expect port for vm%d is %d" % (char_ind, _port), logging.info) + if tmp_pnum == _port: + in_chardev = True + else: + error_context.context("The expect port for vm%d is in [%d, %d]" % (char_ind, _port + 1, _to), logging.info) + if tmp_pnum > _port and tmp_pnum <= _to: + in_chardev = True + assert in_chardev is True, 'The actual port does not match with the expect port in VM %d' % char_ind diff --git a/qemu/tests/chardev_hotplug.py b/qemu/tests/chardev_hotplug.py index b1abaf5d902ebc0e0b1c4d66037ba98ee05ceeee..377cb4c84fefea6bfc8d156b41d88dbf6fdd1de1 100644 --- a/qemu/tests/chardev_hotplug.py +++ b/qemu/tests/chardev_hotplug.py @@ -3,6 +3,7 @@ import os from avocado.utils import process +from virttest import arch from virttest import error_context @@ -91,32 +92,37 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login() session.cmd_status("dmesg -c") + ppc_host = 'ppc' in params.get('vm_arch_name', arch.ARCH) error_context.context("Test null chardev", logging.info) chardev_add(vm, "chardev-null", "null", {}) - chardev_use(vm, "chardev-null") + if not ppc_host: + chardev_use(vm, "chardev-null") chardev_del(vm, "chardev-null") error_context.context("Test file chardev", logging.info) filename = "/tmp/chardev-file-%s" % vm.instance args = {'out': filename} chardev_add(vm, "chardev-file", "file", args) - chardev_use(vm, "chardev-file") + if not ppc_host: + chardev_use(vm, "chardev-file") chardev_del(vm, "chardev-file") - output = process.system_output("cat %s" % filename).decode() - if output.find("Hello virttest world") == -1: - test.fail("Guest message not found [%s]" % output) + if not ppc_host: + output = process.system_output("cat %s" % filename).decode() + if output.find("Hello virttest world") == -1: + test.fail("Guest message not found [%s]" % output) error_context.context("Test pty chardev", logging.info) reply = chardev_add(vm, "chardev-pty", "pty", {}) filename = reply["return"]["pty"] logging.info("host pty device is '%s'" % filename) - fd_dst = os.open(filename, os.O_RDWR | os.O_NONBLOCK) - chardev_use(vm, "chardev-pty") - output = os.read(fd_dst, 256).decode() - os.close(fd_dst) - if output.find("Hello virttest world") == -1: - test.fail("Guest message not found [%s]" % output) + if not ppc_host: + fd_dst = os.open(filename, os.O_RDWR | os.O_NONBLOCK) + chardev_use(vm, "chardev-pty") + output = os.read(fd_dst, 256).decode() + os.close(fd_dst) + if output.find("Hello virttest world") == -1: + test.fail("Guest message not found [%s]" % output) chardev_del(vm, "chardev-pty") error_context.context("Cleanup", logging.info) diff --git a/qemu/tests/chardev_legacy_unplug.py b/qemu/tests/chardev_legacy_unplug.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa3be9bb1885d68866c4f99f82a05e00c8cad9a --- /dev/null +++ b/qemu/tests/chardev_legacy_unplug.py @@ -0,0 +1,38 @@ +from virttest import error_context, env_process +from virttest.qemu_monitor import QMPCmdError + + +@error_context.context_aware +def run(test, params, env): + """ + unplug chardevs while guest attached with isa-serial (RHEL and x86 only): + isa-device could not hotplug&un-plug,so just regard it as negative testing + 1) Start guest with isa-serial with pty + 2) login guest and do some operation + 3) Try to un-plug chardevs device from isa-device + 4) repeat step 1 to 3 with udp, tcp. + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + serial_id = params.objects('serials')[-1] + params['start_vm'] = 'yes' + for backend in ['unix_socket', 'tcp_socket', 'pty']: + params['chardev_backend_%s' % serial_id] = backend + vm = params['main_vm'] + env_process.preprocess_vm(test, params, env, vm) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + serial_device = vm.devices.get(serial_id) + chardev_qid = serial_device.get_param("chardev") + chardev_device = vm.devices.get_by_qid(chardev_qid)[0] + try: + chardev_device.unplug(vm.monitor) + except QMPCmdError as e: + if e.data["desc"] != "Chardev '%s' is busy" % chardev_qid: + test.fail('It is not the expected error') + else: + test.fail("Should not be unplug successfully") + vm.verify_kernel_crash() + vm.destroy() diff --git a/qemu/tests/chardev_remove_pending_watches.py b/qemu/tests/chardev_remove_pending_watches.py new file mode 100644 index 0000000000000000000000000000000000000000..97bcc2127f7de4417eb6571cfe60c0bdf3ecc0c6 --- /dev/null +++ b/qemu/tests/chardev_remove_pending_watches.py @@ -0,0 +1,63 @@ +import aexpect + +from virttest import error_context +from virttest import utils_test +from virttest.utils_virtio_port import VirtioPortTest +from qemu.tests.virtio_serial_file_transfer import generate_data_file + + +@error_context.context_aware +def run(test, params, env): + """ + Remove pending watches after virtserialport unplug. + + 1) Start guest with virtio serial device(s). + 2) Open the chardev on the host + 3) Send 2g file from guest to host + 4) Hot-unplug the port on the host + 5) After step 4, read transferred data on host + 6) Guest has no crash or panic + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + os_type = params["os_type"] + file_size = params.get_numeric("filesize") + guest_dir = params.get("guest_script_folder", '/var/tmp/') + port_name = params["file_transfer_serial_port"] + + virtio_test = VirtioPortTest(test, env, params) + (vm, guest_worker, port) = virtio_test.get_vm_with_single_port() + port.open() + session = vm.wait_for_login() + guest_file_name = generate_data_file(guest_dir, file_size, session) + if os_type == "windows": + vport_name = '\\\\.\\' + port_name + guest_file_name = guest_file_name.replace("/", "") + guest_send_cmd = "copy %s > con %s" % (guest_file_name, vport_name) + driver_name = params["driver_name"] + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name) + else: + vport_name = '/dev/virtio-ports/%s' % port_name + guest_send_cmd = "cat %s > %s" % (guest_file_name, vport_name) + + try: + session.cmd(guest_send_cmd) + # large data transfer won't exit because of small ringbuf + except aexpect.ShellTimeoutError: + pass + + try: + port_unplug = vm.devices.get(port_name) + vm.devices.simple_unplug(port_unplug, vm.monitor) + if port.sock.recv(4096) is None: + test.fail("Host can't receive data !") + finally: + clean_cmd = params['clean_cmd'] + port.close() + session.cmd('%s %s' % (clean_cmd, guest_file_name)) + session.close() + vm.verify_alive() + vm.verify_kernel_crash() diff --git a/qemu/tests/chardev_serial_login.py b/qemu/tests/chardev_serial_login.py new file mode 100644 index 0000000000000000000000000000000000000000..14a23960cd1b62f2e08dc793a212434998c6bbd1 --- /dev/null +++ b/qemu/tests/chardev_serial_login.py @@ -0,0 +1,78 @@ +import re +import os + +from virttest import error_context +from virttest import remote +from virttest import qemu_monitor +from virttest import env_process + + +@error_context.context_aware +def run(test, params, env): + """ + Verify the login function of chardev-serial (RHEL only): + 1) Start guest with chardev-serial with backend + 2) for pty and file backend: + 2.1) open and close chardev + 3) for unix_socket and tcp_socket + 3.1) Login guest. + 3.2) move, create files inside guest + 4) Hot-unplug chardev which is in use, should fail. + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def check_guest(): + session.cmd('touch file.txt') + session.cmd('mkdir -p tmp') + session.cmd('command cp file.txt ./tmp/test.txt') + + serial_id = params.objects('serials')[-1] + prompt = params.get("shell_prompt") + if params['serial_type'] == 'spapr-vty' \ + and params['inactivity_watcher'] == 'none': + params['vga'] = 'none' + params['start_vm'] = 'yes' + for backend in ['tcp_socket', 'unix_socket', 'pty', 'file']: + params['chardev_backend_%s' % serial_id] = backend + env_process.preprocess(test, params, env) + vm = env.get_vm(params["main_vm"]) + vm.wait_for_login() + serial_device = vm.devices.get(serial_id) + chardev_qid = serial_device.get_param("chardev") + chardev_device = vm.devices.get_by_qid(chardev_qid)[0] + if backend == 'tcp_socket': + session = remote.remote_login(client='nc', + host=chardev_device.params['host'], + port=chardev_device.params['port'], + username='root', + password='kvmautotest', + prompt=prompt, + timeout=240) + check_guest() + elif backend == 'unix_socket': + session = vm.wait_for_serial_login() + check_guest() + elif backend == 'pty': + chardev_info = vm.monitor.human_monitor_cmd('info chardev') + hostfile = re.findall('%s: filename=pty:(/dev/pts/\\d)?' % + "serial0", chardev_info)[0] + if not hostfile: + test.fail("Guest boot fail with pty backend.") + fd_pty = os.open(hostfile, os.O_RDWR | os.O_NONBLOCK) + os.close(fd_pty) + elif backend == 'file': + filename = chardev_device.params['path'] + f = open(filename) + if 'Linux' not in f.read(): + f.close() + test.fail("Guest boot fail with file backend.") + f.close() + try: + vm.devices.simple_unplug(chardev_device, vm.monitor) + except qemu_monitor.QMPCmdError as e: + if 'is busy' not in e.data['desc']: + test.fail(e.data['desc']) + else: + test.fail("Hot-unplug should fail.") + vm.destroy() diff --git a/qemu/tests/cpu_device_hotplug_during_boot.py b/qemu/tests/cpu_device_hotplug_during_boot.py index d825ef0d74eb149c3da913de9fb8f16a165f614b..bba85c27195b23513964dfce75abfa35fa2d1a69 100644 --- a/qemu/tests/cpu_device_hotplug_during_boot.py +++ b/qemu/tests/cpu_device_hotplug_during_boot.py @@ -21,7 +21,7 @@ def run(test, params, env): """ vcpu_devices = params.objects("vcpu_devices") unplug_during_boot = params.get_boolean("unplug_during_boot") - boot_patterns = [r".*Linux version.*", r".*Kernel command line:.*"] + boot_patterns = [r".*Started udev Wait for Complete Device Initialization.*"] reboot_patterns = [r".*[Rr]ebooting.*", r".*[Rr]estarting system.*", r".*[Mm]achine restart.*"] diff --git a/qemu/tests/cpu_device_hotplug_maximum.py b/qemu/tests/cpu_device_hotplug_maximum.py index 52b9beb1e910058426f78a4f5d742cc9bab3e057..7549e574168ed2fe81b177d2c2c837071d0f7e67 100644 --- a/qemu/tests/cpu_device_hotplug_maximum.py +++ b/qemu/tests/cpu_device_hotplug_maximum.py @@ -30,6 +30,7 @@ def run(test, params, env): os_type = params["os_type"] machine_type = params["machine_type"] reboot_timeout = params.get_numeric("reboot_timeout") + offline_vcpu_after_hotplug = params.get_boolean("offline_vcpu_after_hotplug") mismatch_text = "Actual number of guest CPUs is not equal to the expected" not_equal_text = "CPU quantity mismatched! Guest got %s but expected is %s" # Many vCPUs will be plugged, it takes some time to bring them online. @@ -42,11 +43,11 @@ def run(test, params, env): utils_qemu.get_maxcpus_hard_limit(qemu_binary, current_machine)) if not params.get_boolean("allow_pcpu_overcommit"): - supported_maxcpus = min(supported_maxcpus, cpu.online_cpus_count()) + supported_maxcpus = min(supported_maxcpus, cpu.online_count()) logging.info("Define the CPU topology of guest") vcpu_devices = [] - if (cpu.get_cpu_vendor_name() == "amd" and + if (cpu.get_vendor() == "amd" and params.get_numeric("vcpu_threads") != 1): test.cancel("AMD cpu does not support multi threads") elif machine_type.startswith("pseries"): @@ -72,6 +73,7 @@ def run(test, params, env): session = vm.wait_for_login() cpuinfo = vm.cpuinfo smp = cpuinfo.smp + vcpus_count = vm.params.get_numeric("vcpus_count") error_context.context("Hotplug all vCPU devices", logging.info) for vcpu_device in vcpu_devices: @@ -91,12 +93,20 @@ def run(test, params, env): if not cpu_utils.check_guest_cpu_topology(session, os_type, cpuinfo): test.fail("CPU topology of guest is not as expected after reboot.") - error_context.context("Hotunplug all vCPU devices", logging.info) - for vcpu_device in reversed(vcpu_devices): - vm.hotunplug_vcpu_device(vcpu_device) - if not utils_misc.wait_for(lambda: vm.get_cpu_count() == smp, - verify_wait_timeout, first=5, step=10): - logging.error(not_equal_text, vm.get_cpu_count(), smp) - test.fail(mismatch_text) - logging.info("CPU quantity is as expected after hotunplug: %s", smp) - session.close() + if os_type == "linux": + error_context.context("Hotunplug all vCPU devices", logging.info) + if offline_vcpu_after_hotplug: + hotplugged_vcpu = range(smp, supported_maxcpus) + vcpu_list = "%d-%d" % (hotplugged_vcpu[0], hotplugged_vcpu[-1]) + logging.info("Offline vCPU: %s.", vcpu_list) + session.cmd("chcpu -d %s" % vcpu_list, timeout=len(hotplugged_vcpu)) + if vm.get_cpu_count() != smp: + test.error("Failed to offline all hotplugged vCPU.") + for vcpu_device in reversed(vcpu_devices): + vm.hotunplug_vcpu_device(vcpu_device, 10 * vcpus_count) + if not utils_misc.wait_for(lambda: vm.get_cpu_count() == smp, + verify_wait_timeout, first=5, step=10): + logging.error(not_equal_text, vm.get_cpu_count(), smp) + test.fail(mismatch_text) + logging.info("CPU quantity is as expected after hotunplug: %s", smp) + session.close() diff --git a/qemu/tests/cpu_device_hotpluggable.py b/qemu/tests/cpu_device_hotpluggable.py index 02fadefe975e56966564a3d265b5fe446b42381a..777f27f4a777c4aaccb1185cf338995645638922 100644 --- a/qemu/tests/cpu_device_hotpluggable.py +++ b/qemu/tests/cpu_device_hotpluggable.py @@ -113,6 +113,7 @@ def run(test, params, env): hotpluggable_test = params["hotpluggable_test"] verify_wait_timeout = params.get_numeric("verify_wait_timeout", 60) sub_test_type = params.get("sub_test_type") + check_cpu_topology = params.get_boolean("check_cpu_topology", True) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -159,9 +160,9 @@ def run(test, params, env): if vm.is_alive(): session = vm.wait_for_login(timeout=login_timeout) check_guest_cpu_count(expected_vcpus) - if (expected_vcpus == maxcpus and - not cpu_utils.check_guest_cpu_topology(session, os_type, - vm.cpuinfo)): - session.close() - test.fail("CPU topology of guest is inconsistent with " - "expectations.") + if expected_vcpus == maxcpus and check_cpu_topology: + if not cpu_utils.check_guest_cpu_topology(session, os_type, + vm.cpuinfo): + session.close() + test.fail("CPU topology of guest is inconsistent with " + "expectations.") diff --git a/qemu/tests/cpu_device_hotpluggable_with_stress.py b/qemu/tests/cpu_device_hotpluggable_with_stress.py index 03b0bcc8ec342c9b69173ad64baf2d0214906de0..22421ad4c8d733fbd4b0ced699123b2b3b472b8c 100644 --- a/qemu/tests/cpu_device_hotpluggable_with_stress.py +++ b/qemu/tests/cpu_device_hotpluggable_with_stress.py @@ -5,6 +5,7 @@ import logging from provider import cpu_utils +from virttest import arch from virttest import error_context from virttest import utils_misc from virttest import utils_package @@ -39,6 +40,7 @@ def run(test, params, env): session.cmd(install_cmd) os_type = params["os_type"] + vm_arch_name = params.get('vm_arch_name', arch.ARCH) login_timeout = params.get_numeric("login_timeout", 360) stress_duration = params.get_numeric("stress_duration", 180) verify_wait_timeout = params.get_numeric("verify_wait_timeout", 60) @@ -88,14 +90,16 @@ def run(test, params, env): "%.2f%%" % (cpu_id, cpu_usage_rate)) logging.info("Usage rate of vCPU(%s) is: %.2f%%", cpu_id, cpu_usage_rate) - for vcpu_dev in vcpu_devices: - error_context.context("Hotunplug vcpu device: %s" % vcpu_dev, - logging.info) - vm.hotunplug_vcpu_device(vcpu_dev) - # Drift the running stress task to other vCPUs - time.sleep(random.randint(5, 10)) - if vm.get_cpu_count() != smp: - test.fail("Actual number of guest CPUs is not equal to expected") + if not vm_arch_name.startswith("s390"): + for vcpu_dev in vcpu_devices: + error_context.context("Hotunplug vcpu device: %s" % vcpu_dev, + logging.info) + vm.hotunplug_vcpu_device(vcpu_dev) + # Drift the running stress task to other vCPUs + time.sleep(random.randint(5, 10)) + if vm.get_cpu_count() != smp: + test.fail("Actual number of guest CPUs is not equal to " + "expected") stress_tool.unload_stress() stress_tool.clean() else: diff --git a/qemu/tests/cpu_info_check.py b/qemu/tests/cpu_info_check.py new file mode 100644 index 0000000000000000000000000000000000000000..b5aa3853d1172213834eaf4e34f69e666e557ba6 --- /dev/null +++ b/qemu/tests/cpu_info_check.py @@ -0,0 +1,137 @@ +import re +import logging + +from avocado.utils import process + +from virttest import cpu +from virttest import env_process +from virttest import error_context +from virttest import utils_misc + +from virttest.utils_version import VersionInterval + + +@error_context.context_aware +def run(test, params, env): + """ + cpuinfo query test: + 1). run query cmd. e.g -cpu ?cpuid + 2). check the expected info is included in the cmd output. + 3). Boot guest and check the output of qmp command "qom-list-types" + 4). Check the output of qmp command "query-cpu-definitions" + 5). Check the output of qmp command "query-cpu-model-expansion" + """ + + def remove_models(model_list): + """ + Remove models from cpu_types + :param model_list: The list of models to be removed + """ + for model in model_list: + try: + cpu_types.remove(model) + except ValueError: + logging.warning('The model to be removed is not' + ' in the list: %s' % model) + continue + + def get_patterns(p_list): + """ + Return all possible patterns for given flags + :param p_list: The list of flags + """ + r_list = [] + replace_char = [('_', ''), ('_', '-'), ('.', '-'), + ('.', ''), ('.', '_')] + for p in p_list: + r_list.extend(list(map(lambda x: p.replace(*x), replace_char))) + return set(r_list) + + cpu_types = [] + list(map(cpu_types.extend, list(cpu.CPU_TYPES.values()))) + + qemu_path = utils_misc.get_qemu_binary(params) + qemu_version = env_process._get_qemu_version(qemu_path) + match = re.search(r'[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?', + qemu_version) + host_qemu = match.group(0) + remove_models(params.objects('remove_list')) + if host_qemu in VersionInterval('[,4.2.0)'): + remove_models(params.objects('cpu_model_8')) + if host_qemu in VersionInterval('[,3.1.0)'): + remove_models(params.objects('cpu_model_3_1_0')) + if host_qemu in VersionInterval('[,2.12.0)'): + remove_models(params.objects('cpu_model_2_12_0')) + qemu_binary = utils_misc.get_qemu_binary(params) + logging.info('Query cpu models by qemu command') + query_cmd = "%s -cpu ? | awk '{print $2}'" % qemu_binary + qemu_binary_output = process.system_output( + query_cmd, shell=True).decode().splitlines() + cpuid_index = qemu_binary_output.index('CPUID') + cpu_models_binary = qemu_binary_output[1: cpuid_index - 1] + cpu_flags_binary = qemu_binary_output[cpuid_index + 1:] + params['start_vm'] = 'yes' + vm_name = params['main_vm'] + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + # query cpu model supported by qemu + logging.info('Query cpu model supported by qemu by qemu monitor') + qmp_model_output = str(vm.monitor.cmd('qom-list-types')) + qmp_def_output = str(vm.monitor.cmd('query-cpu-definitions')) + + # Check if all the output contain expected cpu models + output_list = {'qemu-kvm': cpu_models_binary, + 'qom-list-types': qmp_model_output, + 'query-cpu-definitions': qmp_def_output} + missing = dict.fromkeys(output_list.keys(), []) + for cpu_model in cpu_types: + logging.info('Check cpu model %s from qemu command output and' + ' qemu monitor output' % cpu_model) + for key, value in output_list.items(): + if cpu_model not in value: + missing[key].append(cpu_model) + for key, value in missing.items(): + if value: + test.fail('%s is missing in the %s output: %s\n' % + (', '.join(value), key, output_list[key])) + + # Check if qemu command output matches qmp output + missing = [] + logging.info('Check if qemu command output matches qemu monitor output') + for cpu_model in cpu_models_binary: + if cpu_model not in qmp_model_output: + missing.append(cpu_model) + if missing: + test.fail('The qemu monitor output does not included all the cpu' + ' model in qemu command output, missing: \n %s' % + ', '.join(missing)) + + # Check if the flags in qmp output matches expectation + args = {'type': 'full', 'model': {'name': vm.cpuinfo.model}} + output = vm.monitor.cmd('query-cpu-model-expansion', args) + model = output.get('model') + model_name = model.get('name') + if model_name != vm.cpuinfo.model: + test.fail('Command query-cpu-model-expansion return' + ' wrong model: %s' % model_name) + model_prop = model.get('props') + for flag in cpu.CPU_TYPES_RE.get(model_name).split(','): + logging.info('Check flag %s from qemu monitor output' % flag) + flags = get_patterns(flag.split('|')) + for f in flags: + if model_prop.get(f) is True: + break + else: + test.fail('Check cpu model props failed, %s is not True' % flag) + + # Check if the flags in qmp output matches qemu command output + missing = [] + logging.info('Check if the flags in qemu monitor output matches' + ' qemu command output') + for flag in cpu_flags_binary: + if flag not in str(output): + missing.append(flag) + if missing: + test.fail('The monitor output does not included all the cpu flags' + ' in qemu command output, missing: \n %s' % + ', '.join(missing)) diff --git a/qemu/tests/cpu_model_inter_generation.py b/qemu/tests/cpu_model_inter_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..309d950b241bccce330f4f9b042efbea7b7b0ac0 --- /dev/null +++ b/qemu/tests/cpu_model_inter_generation.py @@ -0,0 +1,70 @@ +import logging + +from virttest import cpu +from virttest import utils_misc +from virttest import env_process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Boot latest cpu model on old host + + :param test: QEMU test object. + :param params: Dictionary with test parameters. + :param env: Dictionary with the test environment. + """ + + def start_with_model(test_model): + """ + Start vm with tested model + :param test_model: The model been tested + """ + vm = None + params['cpu_model'] = test_model + logging.info('Start vm with cpu model %s' % test_model) + try: + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + output = vm.process.get_output() + if warning_text not in output: + test.fail("Qemu should output warning for lack flags" + " while it does not.") + except Exception as e: + if boot_expected == 'no': + logging.info('Expect vm boot up failed when enforce is set.') + if warning_text not in str(e): + raise + else: + raise + else: + if boot_expected == 'no': + test.fail('The vm should not boot successfully' + ' when cpu enforce mode is on') + finally: + if vm and vm.is_alive(): + vm.verify_kernel_crash() + vm.destroy() + + fd = open("/proc/cpuinfo") + cpu_info = fd.read() + fd.close() + vendor = cpu.get_cpu_vendor(cpu_info) + cpu_model_list = cpu.CPU_TYPES.get(vendor) + latest_cpu_model = cpu_model_list[-1] + for cpu_model in cpu_model_list: + qemu_binary = utils_misc.get_qemu_binary(params) + if cpu_model in cpu.get_qemu_cpu_models(qemu_binary): + latest_cpu_model = cpu_model + break + + host_cpu_model = cpu.get_qemu_best_cpu_model(params) + if host_cpu_model.startswith(latest_cpu_model): + test.cancel('The host cpu is not old enough for this test.') + + vm_name = params['main_vm'] + warning_text = params.get('warning_text') + boot_expected = params.get('boot_expected', 'yes') + params['start_vm'] = 'yes' + start_with_model(latest_cpu_model) diff --git a/qemu/tests/cpu_model_negative.py b/qemu/tests/cpu_model_negative.py new file mode 100644 index 0000000000000000000000000000000000000000..9de7140ebe89b43255ae79f42da5e8e71d897224 --- /dev/null +++ b/qemu/tests/cpu_model_negative.py @@ -0,0 +1,58 @@ +import re +import logging + +from avocado.utils import process + +from virttest import cpu +from virttest import error_context +from virttest import utils_qemu +from virttest import utils_misc + + +@error_context.context_aware +def run(test, params, env): + """ + Runs CPU negative test: + + 1. Launch qemu with improper cpu configuration + 2. Verify qemu failed to start + + :param test: QEMU test object. + :param params: Dictionary with test parameters. + :param env: Dictionary with the test environment. + """ + + enforce_flag = params.get('enforce_flag') + if enforce_flag and 'CPU_MODEL' in params['wrong_cmd']: + if enforce_flag in cpu.get_host_cpu_models(): + test.cancel('This case only test on the host without the flag' + ' %s.' % enforce_flag) + cpu_model = cpu.get_qemu_best_cpu_model(params) + params['wrong_cmd'] = params['wrong_cmd'].replace('CPU_MODEL', + cpu_model) + + qemu_bin = utils_misc.get_qemu_binary(params) + if 'OUT_OF_RANGE' in params['wrong_cmd']: + machine_type = params['machine_type'].split(':')[-1] + m_types = utils_qemu.get_machines_info(qemu_bin)[machine_type] + m_type = re.search(r'\(alias of (\S+)\)', m_types)[1] + max_value = utils_qemu.get_maxcpus_hard_limit(qemu_bin, m_type) + smp = str(max_value + 1) + params['wrong_cmd'] = params['wrong_cmd'].replace( + 'MACHINE_TYPE', machine_type).replace('OUT_OF_RANGE', smp) + msg = params['warning_msg'].replace('SMP_VALUE', smp).replace( + 'MAX_VALUE', str(max_value)).replace('MACHINE_TYPE', m_type) + params['warning_msg'] = msg + + warning_msg = params['warning_msg'] + wrong_cmd = '%s %s' % (qemu_bin, params['wrong_cmd']) + logging.info('Start qemu with command: %s' % wrong_cmd) + status, output = process.getstatusoutput(wrong_cmd) + logging.info('Qemu prompt output:\n%s' % output) + if status == 0: + test.fail('Qemu guest boots up while it should not.') + if warning_msg not in output: + test.fail('Does not get expected warning message.') + else: + logging.info('Test passed as qemu does not boot up and' + ' prompts expected message.') diff --git a/qemu/tests/cpu_offline_online.py b/qemu/tests/cpu_offline_online.py new file mode 100644 index 0000000000000000000000000000000000000000..1397b8d6c2521869b643a2ff7d04fd17879509af --- /dev/null +++ b/qemu/tests/cpu_offline_online.py @@ -0,0 +1,38 @@ +import logging + +from avocado.utils import cpu + +from virttest import error_context +from virttest.cpu import check_if_vm_vcpu_match + + +@error_context.context_aware +def run(test, params, env): + """ + vCPU offline/online test: + 1) Launch a guest with many CPU. + 2) Offline all CPUs except 0. + 3) Online them again. + """ + host_cpu = cpu.online_count() + cpu_range = range(host_cpu) + cpu_list = "{}-{}".format(cpu_range[1], cpu_range[-1]) + params["smp"] = params["vcpu_maxcpus"] = host_cpu + params["start_vm"] = "yes" + vm = env.get_vm(params['main_vm']) + vm.create(params=params) + vm.verify_alive() + session = vm.wait_for_login() + + error_context.base_context("Offline CPUs: {}".format(cpu_list), + logging.info) + session.cmd("chcpu -d {}".format(cpu_list), timeout=len(cpu_range)) + if not check_if_vm_vcpu_match(1, vm): + test.fail("CPU quantity on guest mismatch after offline") + logging.info("{} have been offline.".format(cpu_list)) + + error_context.context("Online CPUs: {}".format(cpu_list), logging.info) + session.cmd("chcpu -e {}".format(cpu_list), timeout=len(cpu_range)) + if not check_if_vm_vcpu_match(host_cpu, vm): + test.fail("CPU quantity on guest mismatch after online again") + logging.info("{} have been online.".format(cpu_list)) diff --git a/qemu/tests/cpu_rdrand.py b/qemu/tests/cpu_rdrand.py new file mode 100644 index 0000000000000000000000000000000000000000..1a47061ae4a2a040c19eedb656c7c8ef2660f4d3 --- /dev/null +++ b/qemu/tests/cpu_rdrand.py @@ -0,0 +1,38 @@ +import logging + +from virttest import error_context +from virttest import data_dir +from virttest import utils_misc + + +@error_context.context_aware +def run(test, params, env): + """ + Runs CPU rdrand test: + + :param test: QEMU test object. + :param params: Dictionary with test parameters. + :param env: Dictionary with the test environment. + """ + + vm = env.get_vm(params["main_vm"]) + session = vm.wait_for_login() + test_bin = params['test_bin'] + source_file = params['source_file'] + guest_path = params['guest_path'] + host_path = utils_misc.get_path(data_dir.get_deps_dir('rdrand'), source_file) + vm.copy_files_to(host_path, '%s%s' % (guest_path, source_file)) + if params['os_type'] == 'linux': + build_cmd = params.get('build_cmd', 'cd %s; gcc -lrt %s -o %s') + error_context.context("build binary file 'rdrand'", logging.info) + session.cmd(build_cmd % (guest_path, source_file, test_bin)) + s, o = session.cmd_status_output('%s%s' % (guest_path, test_bin)) + session.cmd(params['delete_cmd']) + if s != 0: + test.fail('rdrand failed with status %s' % s) + if params['os_type'] == 'linux': + try: + if not int(float(o)) in range(-101, 101): + test.fail('rdrand output is %s, which is not expected' % o) + except ValueError as e: + test.fail('rdrand should output a float: %s' % str(e)) diff --git a/qemu/tests/cpu_topology_test.py b/qemu/tests/cpu_topology_test.py new file mode 100644 index 0000000000000000000000000000000000000000..813a56b972799d6690fde24411a7e8351c705b66 --- /dev/null +++ b/qemu/tests/cpu_topology_test.py @@ -0,0 +1,75 @@ +import re +import random + +from avocado.utils import cpu + +from virttest import error_context +from virttest import env_process + +from provider.cpu_utils import check_guest_cpu_topology + + +@error_context.context_aware +def run(test, params, env): + """ + Check guest gets correct vcpu num, cpu cores, processors, sockets, siblings + + 1) Boot guest with options: -smp n,cores=x,threads=y,sockets=z... + 2) Check cpu topology + + :param test: QEMU test object. + :param params: Dictionary with test parameters. + :param env: Dictionary with the test environment. + """ + + def check(p_name, exp, check_cmd): + """ + Check the cpu property inside guest + :param p_name: Property name + :param exp: The expect value + :param check_cmd: The param to get check command + """ + res = session.cmd_output(params[check_cmd]).strip() + if int(res) != int(exp): + test.fail('The vcpu %s number inside guest is %s,' + ' while it is set to %s' % (p_name, res, exp)) + + vm_name = params['main_vm'] + os_type = params['os_type'] + vcpu_threads_list = [1, 2] + if params['machine_type'] == 'pseries': + vcpu_threads_list = [1, 2, 4, 8] + host_cpu = cpu.online_count() + params['vcpu_cores'] = vcpu_cores = random.randint(1, min(6, host_cpu//2)) + for vcpu_threads in vcpu_threads_list: + vcpu_sockets = min(max(host_cpu // (vcpu_cores * vcpu_threads), 1), + random.randint(1, 6)) + vcpu_sockets = 2 if (os_type == 'Windows' and + vcpu_sockets > 2) else vcpu_sockets + params['vcpu_sockets'] = vcpu_sockets + params['vcpu_threads'] = vcpu_threads + params['smp'] = params['vcpu_maxcpus'] = (vcpu_cores * + vcpu_threads * vcpu_sockets) + params['start_vm'] = 'yes' + try: + env_process.preprocess_vm(test, params, env, vm_name) + except Exception as e: + # The cpu topology sometimes will be changed by + # qemu_vm.VM.make_create_command, and thus cause qemu vm fail to + # start, which is expected; Modify the value and restart vm in + # this case, and verify cpu topology inside guest after that + if 'qemu-kvm: cpu topology' in str(e): + sockets = int(re.findall(r'sockets\s+\((\d)\)', str(e))[0]) + threads = int(re.findall(r'threads\s+\((\d)\)', str(e))[0]) + cores = int(re.findall(r'cores\s+\((\d)\)', str(e))[0]) + params['smp'] = params['vcpu_maxcpus'] = (sockets * + threads * cores) + env_process.preprocess_vm(test, params, env, vm_name) + else: + raise + vm = env.get_vm(vm_name) + session = vm.wait_for_login() + check_guest_cpu_topology(session, os_type, vm.cpuinfo) + if params.get('check_siblings_cmd'): + check('sibling', vcpu_threads * vcpu_cores, 'check_siblings_cmd') + vm.destroy() diff --git a/qemu/tests/create_large_raw_img.py b/qemu/tests/create_large_raw_img.py index 609bb5e94890f87eb3b00c1f7896d9f8524670d6..bfc8ec8fda056701b43a918cdb07d2d2bacf1e7e 100755 --- a/qemu/tests/create_large_raw_img.py +++ b/qemu/tests/create_large_raw_img.py @@ -1,5 +1,6 @@ import logging import os +import six from avocado import TestError from avocado.utils import partition as p @@ -25,6 +26,7 @@ def run(test, params, env): loop_img = os.path.join(root_dir, "loop.img") loop_size = int(params["loop_file_size"]) file_sys = params["file_sys"] + err_info = params["err_info"].split(";") mnt_dir = os.path.join(root_dir, "tmp") large = QemuImg(params.object_params(large_img), mnt_dir, large_img) @@ -38,11 +40,14 @@ def run(test, params, env): try: large.create(large.params) except TestError as err: - if params["err_info"] not in str(err): + for info in err_info: + if info in six.text_type(err): + break + else: test.fail("CML failed with unexpected output: %s" % err) else: test.fail("There is no error when creating an image with large size.") - - part.unmount() - os.rmdir(mnt_dir) - os.remove(loop_img) + finally: + part.unmount() + os.rmdir(mnt_dir) + os.remove(loop_img) diff --git a/qemu/tests/curl_cookie_with_secret.py b/qemu/tests/curl_cookie_with_secret.py new file mode 100644 index 0000000000000000000000000000000000000000..8c68820525b6c3c373cb12822d0bf7954223f66d --- /dev/null +++ b/qemu/tests/curl_cookie_with_secret.py @@ -0,0 +1,70 @@ +import os +import signal +import logging + +from virttest import utils_misc +from virttest import qemu_storage +from virttest import error_context + +from avocado.utils import process + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Start tcpdump to capture the request + 2) Access libcurl image by qemu-img + 3) Wait till tcpdump finished + 4) tcpdump should catch the cookie data + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def _get_tcpdump_pid(dump_file): + cmd = ("ps -ef|grep tcpdump|grep %s|grep -v grep|awk '{print $2}'" + % dump_file) + return process.system_output(cmd, shell=True, + ignore_status=True).strip() + + def _wait_for_tcpdump_done(dump_file): + response_timeout = params.get_numeric('response_timeout', 10) + if not utils_misc.wait_for(lambda: not _get_tcpdump_pid(dump_file), + response_timeout, 0, 1): + test.fail('tcpdump is running unexpectedly') + + def _cleanup(dump_file): + if os.path.exists(dump_file): + os.unlink(dump_file) + + pid = _get_tcpdump_pid(dump_file) + if pid: + os.kill(int(pid), signal.SIGKILL) + + tag = params['remote_image_tag'] + img_params = params.object_params(tag) + img_obj = qemu_storage.QemuImg(img_params, None, tag) + dump_file = utils_misc.generate_tmp_file_name('%s_access_tcpdump' % tag, + 'out') + + logging.info('start tcpdump, save packets in %s' % dump_file) + process.system( + params['tcpdump_cmd'].format(server=img_params['curl_server'], + dump_file=dump_file), + shell=True, ignore_status=True, ignore_bg_processes=True + ) + + try: + img_obj.info() + _wait_for_tcpdump_done(dump_file) + with open(dump_file, 'rb') as fd: + for line in fd: + line = line.decode('utf-8', 'ignore') + if 'Cookie: %s' % img_params['curl_cookie_secret'] in line: + logging.info('get "%s" from "%s"' + % (img_params['curl_cookie_secret'], line)) + break + else: + test.fail('Failed to get cookie data from tcpdump output') + finally: + _cleanup(dump_file) diff --git a/qemu/tests/device_option_check.py b/qemu/tests/device_option_check.py index aaebf6640de710b02ac1a3dfcfe308cb27ff632c..8f35935610232887de2da3d7e4778c30dd26d85d 100644 --- a/qemu/tests/device_option_check.py +++ b/qemu/tests/device_option_check.py @@ -27,6 +27,7 @@ def run(test, params, env): parameter_prefix = params.get("parameter_prefix", "") check_cmds = params["check_cmds"] convert_str = params.get("convert_str") + sg_vpd_cmd = params.get("sg_vpd_cmd") if params.get("start_vm") == "no": if parameter_value == "random": @@ -109,6 +110,17 @@ def run(test, params, env): " Guest output is '%s'" % (params_name, output)) + if sg_vpd_cmd: + error_context.context("Check serial number length with command %s" + % sg_vpd_cmd, logging.info) + sg_vpd_cmd = utils_misc.set_winutils_letter(session, sg_vpd_cmd) + output = session.cmd_output(sg_vpd_cmd) + actual_len = sum(len(_.split()[-1]) for _ in output.splitlines()[1:3]) + expected_len = len(params.get("drive_serial_image1")) + 4 + if actual_len != expected_len: + test.fail("Incorrect serial number length return." + " Guest output serial number is %s" % actual_len) + session.close() if failed_log: diff --git a/qemu/tests/disk_extension.py b/qemu/tests/disk_extension.py index 6c84e81f4000074e875151a98c69eb516c405eab..4ba10a5ece909d31997fbc96800e4b5a00f9108f 100644 --- a/qemu/tests/disk_extension.py +++ b/qemu/tests/disk_extension.py @@ -69,6 +69,10 @@ def run(test, params, env): size_unit = params["increment_size"][-1] guest_cmd = params["guest_cmd"] + loop_device = process.run("losetup -f").stdout.decode().strip() + params["image_name_stg1"] = loop_device + params["loop_device"] = loop_device + loop_device_backend_img_tag = params["loop_device_backend_img_tag"] loop_device_img_tag = params["loop_device_img_tag"] diff --git a/qemu/tests/dump_guest_memory.py b/qemu/tests/dump_guest_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..dc78f09b8419419c18922739ae2ea293332a35b6 --- /dev/null +++ b/qemu/tests/dump_guest_memory.py @@ -0,0 +1,197 @@ +import logging +import operator +import os + +from avocado.utils import process + +from virttest import utils_misc +from virttest import utils_package + + +def run(test, params, env): + """ + Test dump-guest-memory, this case will: + + 1) Start VM with qmp enable. + 2) Check if host kernel are same with guest + 3) Connect to qmp port then run qmp_capabilities command. + 4) Initiate the qmp command defined in config (qmp_cmd) + 5) Verify that qmp command works as designed. + 6) Verify dump file with crash + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environmen. + """ + + def check_env(): + """ + Check if host kernel version is same with guest + """ + guest_kernel_version = session.cmd("uname -r").strip() + if host_kernel_version != guest_kernel_version: + test.cancel("Please update your host and guest kernel " + "to same version") + + def check_list(qmp_o, key, val=None, check_item_in_pair=True): + """ + Check if the expect key, val are contained in QMP output qmp_o. + + :param qmp_o: output of QMP command + :type qmp_o: list + :param key: expect result + :type key: str + :param val: expect result + :type val: str or None(if check_item_in_pair=False) + :param check_item_in_pair: expect result is dict (True) or str (False) + :type check_item_in_pair: bool. + + :return check result + :rtype: bool + """ + for element in qmp_o: + if isinstance(element, dict): + if _check_dict(element, key, val, check_item_in_pair): + return True + elif isinstance(element, list): + if check_list(element, key, val, check_item_in_pair): + return True + elif element != '' and not check_item_in_pair: + if key in str(element): + return True + return False + + def _check_dict(dic, key, val, check_item_in_pair=True): + """ + Check if the expect key, val are contained in QMP output dic. + + :param dic: content of QMP command return value + :type dic: dict + :param key: expect result + :type key: str + :param val: expect result + :type val: str or None(if check_item_in_pair=False) + :param check_item_in_pair: If expect result is dict or str + :type check_item_in_pair: bool. Means expect result is dict or str. + + :return check result + :rtype: bool + """ + if key in dic and not check_item_in_pair: + return True + elif key in dic and val == dic[key]: + return True + else: + for value in dic.values(): + if isinstance(value, dict): + if _check_dict(value, key, val, check_item_in_pair): + return True + elif isinstance(value, list): + if check_list(value, key, val, check_item_in_pair): + return True + elif value != '' and not check_item_in_pair: + if key in str(value): + return True + return False + + def check_result(qmp_o, expect_o=None): + """ + Check test result with difference way according to result_check. + result_check = equal, expect_o should equal to qmp_o. + result_check = contain, expect_o should be contained in qmp_o + + :param qmp_o: output from qmp_cmd. + :type qmp_o: list + :param expect_o: the expect result. + :type expect_o: dict + + :return check result + :rtype: bool + """ + logging.info("Expect result is %s" % expect_o) + logging.info("Actual result that get from qmp_cmd is %s" % qmp_o) + result = None + if result_check == "equal": + if not operator.eq(qmp_o, expect_o): + test.fail("QMP output does not equal to the expect result.\n " + "Expect result: '%s'\n" + "Actual result: '%s'" % (expect_o, qmp_o)) + elif result_check == "contain": + if len(expect_o) == 0: + result = True + elif isinstance(expect_o, dict): + for key, val in expect_o.items(): + result = check_list(qmp_o, key, val) + return result + + def execute_qmp_cmd(qmp_cmd, expect_result): + """ + Execute qmp command and check if result as expect + + :param qmp_cmd: qmp command + :type qmp_cmd: str + :param expect_result: expect result of qmp command + :type expect_result: str + + :return check result + :rtype: bool + """ + # qmp command + try: + # Testing command + logging.info("Run qmp command '%s'.", qmp_cmd) + qmp_o = qmp_port.send_args_cmd(qmp_cmd) + logging.debug("QMP command:'%s' \n Output: '%s'", qmp_cmd, [qmp_o]) + except Exception as err: + qmp_o = err.data + logging.info(err) + + if result_check: + logging.info("Verify qmp command '%s'." % qmp_cmd) + return check_result([qmp_o], eval(expect_result)) + + def check_dump_file(): + """ + Use crash to check dump file + """ + process.getstatusoutput("echo bt > %s" % crash_script) + process.getstatusoutput("echo quit >> %s" % crash_script) + crash_cmd = "crash -i %s /usr/lib/debug/lib/modules/%s/vmlinux " + crash_cmd %= (crash_script, host_kernel_version) + crash_cmd += dump_file + status, output = process.getstatusoutput(crash_cmd) + os.remove(crash_script) + logging.debug(output) + if status != 0 or 'error' in output: + test.fail("vmcore corrupt") + + # install crash/gdb/kernel-debuginfo in host + utils_package.package_install(["crash", "gdb", "kernel-debuginfo*"]) + + vm = env.get_vm(params["main_vm"]) + session = vm.wait_for_login() + host_kernel_version = process.getoutput("uname -r").strip() + check_env() + qmp_port = vm.monitor + + qmp_cmd = params.get("qmp_cmd") + query_qmp_cmd = params.get("query_qmp_cmd") + dump_file = params.get("dump_file") + crash_script = params.get("crash_script") + check_dump = params.get("check_dump") + result_check = params.get("cmd_result_check") + query_cmd_return_value = params.get("query_cmd_return_value") + expect_result = params.get("cmd_return_value", "[]") + dump_file_timeout = params.get("dump_file_timeout") + + # execute qmp command + execute_qmp_cmd(qmp_cmd, expect_result) + + if check_dump == "True": + # query dump status and wait for dump completed + utils_misc.wait_for(lambda: execute_qmp_cmd(query_qmp_cmd, + query_cmd_return_value), dump_file_timeout) + check_dump_file() + os.remove(dump_file) + + session.close() diff --git a/qemu/tests/eject_media.py b/qemu/tests/eject_media.py index 6ee30ef952fd1a6483f6599d1ace25c46b28c065..982adcc23e78a5e5830f87f3c6dc791b9627fcc0 100644 --- a/qemu/tests/eject_media.py +++ b/qemu/tests/eject_media.py @@ -5,7 +5,7 @@ from virttest import error_context from provider.cdrom import QMPEventCheckCDEject, QMPEventCheckCDChange from virttest import data_dir from virttest.qemu_capabilities import Flags -from virttest.qemu_storage import QemuImg +from virttest.qemu_storage import QemuImg, get_image_json @error_context.context_aware @@ -36,6 +36,17 @@ def run(test, params, env): def check_block(block): return True if block in str(vm.monitor.info("block")) else False + def eject_non_cdrom(device_name, force=False): + if vm.check_capability(Flags.BLOCKDEV): + for info_dict in vm.monitor.info("block"): + if device_name in str(info_dict): + qdev = info_dict['qdev'] + break + vm.monitor.blockdev_open_tray(qdev, force) + return vm.monitor.blockdev_remove_medium(qdev) + else: + vm.eject_cdrom(device_name, force) + orig_img_name = params.get("cdrom_cd1") p_dict = {"file": orig_img_name} device_name = vm.get_block(p_dict) @@ -98,15 +109,20 @@ def run(test, params, env): p_dict = {"removable": False} device_name = vm.get_block(p_dict) if vm.check_capability(Flags.BLOCKDEV): - sys_image = QemuImg(params, data_dir.get_data_dir(), params['images'].split()[0]) - device_name = vm.get_block({"filename": sys_image.image_filename}) + img_tag = params['images'].split()[0] + root_dir = data_dir.get_data_dir() + sys_image = QemuImg(params, root_dir, img_tag) + filename = sys_image.image_filename + if sys_image.image_format == 'luks': + filename = get_image_json(img_tag, params, root_dir) + device_name = vm.get_block({"filename": filename}) if device_name is None: test.error("Could not find non-removable device") try: if params.get("force_eject", "no") == "yes": - vm.eject_cdrom(device_name, force=True) + eject_non_cdrom(device_name, force=True) else: - vm.eject_cdrom(device_name) + eject_non_cdrom(device_name) except Exception as e: if "is not removable" not in str(e): test.fail(e) diff --git a/qemu/tests/ept_test.py b/qemu/tests/ept_test.py new file mode 100644 index 0000000000000000000000000000000000000000..053614a80ffc909300e2fe70c4b17065758197c7 --- /dev/null +++ b/qemu/tests/ept_test.py @@ -0,0 +1,42 @@ +from avocado.utils import process, cpu +from virttest import env_process + + +def run(test, params, env): + """ + ept test: + 1) Turn off ept on host + 2) Check if reading kvm_intel parameter crash host + 3) Launch a guest + 3) Check no error in guest + 4) Restore env, turn on ept + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + if cpu.get_cpu_vendor_name() != 'intel': + test.cancel("This test is supposed to run on Intel host") + + unload_cmd = params["unload_cmd"] + load_cmd = params["load_cmd"] + read_cmd = params["read_cmd"] + ept_value = process.getoutput(read_cmd % "ept") + + try: + process.system(unload_cmd) + process.system(load_cmd % "0") + process.system(read_cmd % "vmentry_l1d_flush") + + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + env_process.preprocess_vm(test, params, env, vm.name) + timeout = float(params.get("login_timeout", 240)) + + vm.wait_for_login(timeout=timeout) + vm.verify_kernel_crash() + finally: + vm.destroy() + process.system(unload_cmd) + process.system(load_cmd % ept_value) diff --git a/qemu/tests/fio_linux.py b/qemu/tests/fio_linux.py index b06cc806b3df9b0985b9b22ab71e84da06b2a419..b06b98020581f3b83ff730398de56e29ebccebd8 100644 --- a/qemu/tests/fio_linux.py +++ b/qemu/tests/fio_linux.py @@ -40,6 +40,10 @@ def run(test, params, env): test.error("Failed to get '%s' drive path" % data_image) yield drive_path[5:] + def _run_fio_test(target): + for option in params['fio_options'].split(';'): + fio.run('--filename=%s %s' % (target, option)) + data_images = params["images"].split()[1:] info = [] for image in data_images: @@ -53,9 +57,11 @@ def run(test, params, env): session = vm.wait_for_login(timeout=float(params.get("login_timeout", 240))) fio = generate_instance(params, vm, 'fio') try: - for did in _get_data_disks(): - for option in params['fio_options'].split(';'): - fio.run('--filename=%s %s' % (did, option)) + if params.get('image_backend') == 'nvme_direct': + _run_fio_test(params.get('fio_filename')) + else: + for did in _get_data_disks(): + _run_fio_test(did) finally: fio.clean() session.close() diff --git a/qemu/tests/fio_windows.py b/qemu/tests/fio_windows.py index 5c61754425b00aa9491f58999098a318c43a6d9b..522d1c80408e870981c3ec39b634d18aa51012ef 100644 --- a/qemu/tests/fio_windows.py +++ b/qemu/tests/fio_windows.py @@ -33,9 +33,10 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - error_context.context("Format disk", logging.info) - utils_misc.format_windows_disk(session, params["disk_index"], - mountpoint=params["disk_letter"]) + if not params.get('image_backend') == 'nvme_direct': + error_context.context("Format disk", logging.info) + utils_misc.format_windows_disk(session, params["disk_index"], + mountpoint=params["disk_letter"]) try: installed = session.cmd_status(check_installed_cmd) == 0 if not installed: diff --git a/qemu/tests/format_disk.py b/qemu/tests/format_disk.py index fc660da657216827d2f883d2542a0f974552a656..4780edd7a2db56f28ab1c707b9cc7d6f028296cf 100644 --- a/qemu/tests/format_disk.py +++ b/qemu/tests/format_disk.py @@ -33,6 +33,9 @@ def run(test, params, env): cmd_timeout = int(params.get("cmd_timeout", 360)) os_type = params["os_type"] + if os_type == 'linux': + session.cmd("dmesg -C") + drive_path = "" if os_type == 'linux': drive_name = params.objects("images")[-1] @@ -151,6 +154,14 @@ def run(test, params, env): logging.debug("The mounted devices are: %s" % mount_list) test.fail("Failed to umount with error: %s" % output) + # Clean partition on disk + clean_partition_cmd = params.get("clean_partition_cmd") + if clean_partition_cmd: + status, output = session.cmd_status_output(clean_partition_cmd, + timeout=cmd_timeout) + if status != 0: + test.fail("Failed to clean partition with error: %s" % output) + output = "" try: output = session.cmd("dmesg -c") diff --git a/qemu/tests/guest_iommu_test.py b/qemu/tests/guest_iommu_test.py index e7458a1953106f3ef461e0dbd784279cc5e0ca4a..caaef5a7abb82bf0ab26775955589a9f1f143776 100644 --- a/qemu/tests/guest_iommu_test.py +++ b/qemu/tests/guest_iommu_test.py @@ -103,7 +103,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - if cpu.get_cpu_vendor_name() != 'intel': + if cpu.get_vendor() != 'intel': test.cancel("This case only support Intel platform") vm = env.get_vm(params["main_vm"]) diff --git a/qemu/tests/guest_memory_dump_analysis.py b/qemu/tests/guest_memory_dump_analysis.py deleted file mode 100644 index 02da901b93d0875ac60249b89c47ca8535ef9ecb..0000000000000000000000000000000000000000 --- a/qemu/tests/guest_memory_dump_analysis.py +++ /dev/null @@ -1,295 +0,0 @@ -""" -Integrity test of a big guest vmcore, using the dump-guest-memory QMP -command and the "crash" utility. - -:copyright: 2013 Red Hat, Inc. -:author: Laszlo Ersek - -Related RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=990118 -""" - -import logging -import os -import gzip -import threading - -from aexpect import ShellCmdError - - -REQ_GUEST_MEM = 4096 # exact size of guest RAM required -REQ_GUEST_ARCH = "x86_64" # the only supported guest arch -REQ_GUEST_DF = 6144 # minimum guest disk space required -# after package installation -LONG_TIMEOUT = 10 * 60 # timeout for long operations -VMCORE_BASE = "vmcore" # basename of the host-side file the -# guest vmcore is written to, .gz -# suffix will be appended. No -# metacharacters or leading dashes -# please. -VMCORE_FD_NAME = "vmcore_fd" # fd identifier used in the monitor -CRASH_SCRIPT = "crash.cmd" # guest-side filename of the minimal -# crash script - - -def run(test, params, env): - """ - Verify the vmcore written by dump-guest-memory by a big guest. - - :param test: QEMU test object. - :param params: Dictionary with the test parameters. - :param env: Dictionary with test environment. - """ - def check_requirements(vm, session): - """ - Check guest RAM size and guest architecture. - - :param vm: virtual machine. - :param session: login shell session. - :raise: error.TestError if the test is misconfigured. - """ - mem_size = vm.get_memory_size() - if (mem_size != REQ_GUEST_MEM): - test.error("the guest must have %d MB RAM exactly " - "(current: %d MB)" % (REQ_GUEST_MEM, - mem_size)) - arch = session.cmd("uname -m").rstrip() - if (arch != REQ_GUEST_ARCH): - test.error("this test only supports %s guests " - "(current: %s)" % (REQ_GUEST_ARCH, arch)) - - def install_kernel_debuginfo(vm, session, login_timeout): - """ - In the guest, install a kernel debuginfo package that matches - the running kernel. - - Debuginfo packages are available for the most recent kernels - only, so this step may need a kernel upgrade and a corresponding - VM reboot. Also, the "debuginfo-install" yum utility is not good - enough for this, because its exit status doesn't seem to reflect - any failure to find a matching debuginfo package. Only "yum - install" seems to do that, and only if an individual package is - requested. - - :param vm: virtual machine. Can be None if the caller demands a - debuginfo package for the running kernel. - :param session: login shell session. - :param login_timeout: passed to vm.reboot() as timeout. Can be - None if vm is None. - :return: If the debuginfo package has been successfully - installed, None is returned. If no debuginfo package - matching the running guest kernel is available. - If vm is None, an exception is raised; otherwise, the - guest kernel is upgraded, and a new session is returned - for the rebooted guest. In this case the next call to - this function should succeed, using the new session and - with vm=None. - :raise: error.TestError (guest uname command failed), - ShellCmdError (unexpected guest yum command failure), - exceptions from vm.reboot(). - """ - def install_matching_debuginfo(session): - try: - guest_kernel = session.cmd("uname -r").rstrip() - except ShellCmdError as details: - test.error("guest uname command failed: %s" % details) - return session.cmd("yum -y install --enablerepo='*debuginfo' " - "kernel-debuginfo-%s" % guest_kernel, - timeout=LONG_TIMEOUT) - - try: - output = install_matching_debuginfo(session) - logging.debug("%s", output) - new_sess = None - except ShellCmdError as details: - if (vm is None): - raise - logging.info("failed to install matching debuginfo, " - "upgrading kernel") - logging.debug("shell error was: %s", details) - output = session.cmd("yum -y upgrade kernel", - timeout=LONG_TIMEOUT) - logging.debug("%s", output) - new_sess = vm.reboot(session, timeout=login_timeout) - return new_sess - - def install_crash(session): - """ - Install the "crash" utility in the guest. - - :param session: login shell session. - :raise: exceptions from session.cmd(). - """ - output = session.cmd("yum -y install crash") - logging.debug("%s", output) - - def check_disk_space(session): - """ - Check free disk space in the guest before uploading, - uncompressing and analyzing the vmcore. - - :param session: login shell session. - :raise: exceptions from session.cmd(); error.TestError if free - space is insufficient. - """ - output = session.cmd("rm -f -v %s %s.gz" % (VMCORE_BASE, VMCORE_BASE)) - logging.debug("%s", output) - output = session.cmd("yum clean all") - logging.debug("%s", output) - output = session.cmd("LC_ALL=C df --portability --block-size=1M .") - logging.debug("%s", output) - df_megs = int(output.split()[10]) - if (df_megs < REQ_GUEST_DF): - test.error("insufficient free disk space: %d < %d" % - (df_megs, REQ_GUEST_DF)) - - def dump_and_compress(qmp_monitor, vmcore_host): - """ - Dump the guest vmcore on the host side and compress it. - - Use the "dump-guest-memory" QMP command with paging=false. Start - a new Python thread that compresses data from a file descriptor - to a host file. Create a pipe and pass its writeable end to qemu - for vmcore dumping. Pass the pipe's readable end (with full - ownership) to the compressor thread. Track references to the - file descriptions underlying the pipe end fds carefully. - - Compressing the vmcore on the fly, then copying it to the guest, - then decompressing it inside the guest should be much faster - than dumping and copying a huge plaintext vmcore, especially on - rotational media. - - :param qmp_monitor: QMP monitor for the guest. - :param vmcore_host: absolute pathname of gzipped destination - file. - :raise: all sorts of exceptions. No resources should be leaked. - """ - def compress_from_fd(input_fd, gzfile): - # Run in a separate thread, take ownership of input_fd. - try: - buf = os.read(input_fd, 4096) - while (buf): - gzfile.write(buf) - buf = os.read(input_fd, 4096) - finally: - # If we've run into a problem, this causes an EPIPE in - # the qemu process, preventing it from blocking in - # write() forever. - os.close(input_fd) - - def dump_vmcore(qmp_monitor, vmcore_fd): - # Temporarily create another reference to vmcore_fd, in the - # qemu process. We own the duplicate. - qmp_monitor.cmd(cmd="getfd", - args={"fdname": "%s" % VMCORE_FD_NAME}, - fd=vmcore_fd) - try: - # Includes ownership transfer on success, no need to - # call the "closefd" command then. - qmp_monitor.cmd(cmd="dump-guest-memory", - args={"paging": False, - "protocol": "fd:%s" % VMCORE_FD_NAME}, - timeout=LONG_TIMEOUT) - except: - qmp_monitor.cmd(cmd="closefd", - args={"fdname": "%s" % VMCORE_FD_NAME}) - raise - - gzfile = gzip.open(vmcore_host, "wb", 1) - try: - try: - (read_by_gzip, written_by_qemu) = os.pipe() - try: - compressor = threading.Thread(target=compress_from_fd, - name="compressor", - args=(read_by_gzip, gzfile)) - compressor.start() - # Compressor running, ownership of readable end has - # been transferred. - read_by_gzip = -1 - try: - dump_vmcore(qmp_monitor, written_by_qemu) - finally: - # Close Python's own reference to the writeable - # end as well, so that the compressor can - # experience EOF before we try to join it. - os.close(written_by_qemu) - written_by_qemu = -1 - compressor.join() - finally: - if (read_by_gzip != -1): - os.close(read_by_gzip) - if (written_by_qemu != -1): - os.close(written_by_qemu) - finally: - # Close the gzipped file first, *then* delete it if - # there was an error. - gzfile.close() - except: - os.unlink(vmcore_host) - raise - - def verify_vmcore(vm, session, host_compr, guest_compr, guest_plain): - """ - Verify the vmcore with the "crash" utility in the guest. - - Standard output needs to be searched for "crash:" and "WARNING:" - strings; the test is successful iff there are no matches and - "crash" exits successfully. - - :param vm: virtual machine. - :param session: login shell session. - :param host_compr: absolute pathname of gzipped vmcore on host, - source file. - :param guest_compr: single-component filename of gzipped vmcore - on guest, destination file. - :param guest_plain: single-component filename of gunzipped - vmcore on guest that guest-side gunzip is expected to - create. - :raise: vm.copy_files_to() and session.cmd() exceptions; - error.TestFail if "crash" meets trouble in the vmcore. - """ - vm.copy_files_to(host_compr, guest_compr) - output = session.cmd("gzip -d -v %s" % guest_compr, - timeout=LONG_TIMEOUT) - logging.debug("%s", output) - - session.cmd("{ echo bt; echo quit; } > %s" % CRASH_SCRIPT) - output = session.cmd("crash -i %s " - "/usr/lib/debug/lib/modules/$(uname -r)/vmlinux " - "%s" % (CRASH_SCRIPT, guest_plain)) - logging.debug("%s", output) - if (output.find("crash:") >= 0 or - output.find("WARNING:") >= 0): - test.fail("vmcore corrupt") - - vm = env.get_vm(params["main_vm"]) - vm.verify_alive() - - qmp_monitor = vm.get_monitors_by_type("qmp") - if qmp_monitor: - qmp_monitor = qmp_monitor[0] - else: - test.error('Could not find a QMP monitor, aborting test') - - login_timeout = int(params.get("login_timeout", 240)) - session = vm.wait_for_login(timeout=login_timeout) - try: - check_requirements(vm, session) - - new_sess = install_kernel_debuginfo(vm, session, login_timeout) - if (new_sess is not None): - session = new_sess - install_kernel_debuginfo(None, session, None) - - install_crash(session) - check_disk_space(session) - - vmcore_compr = "%s.gz" % VMCORE_BASE - vmcore_host = os.path.join(test.tmpdir, vmcore_compr) - dump_and_compress(qmp_monitor, vmcore_host) - try: - verify_vmcore(vm, session, vmcore_host, vmcore_compr, VMCORE_BASE) - finally: - os.unlink(vmcore_host) - finally: - session.close() diff --git a/qemu/tests/hotplug_mem_negative.py b/qemu/tests/hotplug_mem_negative.py new file mode 100644 index 0000000000000000000000000000000000000000..23590bbff8e9e2f8a6373e49091a2976f3002940 --- /dev/null +++ b/qemu/tests/hotplug_mem_negative.py @@ -0,0 +1,83 @@ +import logging +import re + +from avocado.utils import memory + +from virttest import env_process +from virttest import error_context + +from virttest.qemu_devices import qdevices +from virttest.utils_numeric import normalize_data_size +from virttest.utils_test.qemu import MemoryHotplugTest + + +@error_context.context_aware +def run(test, params, env): + """ + Qemu memory hotplug test: + 1) Boot guest with -m option. + 2) Hotplug memory with invalid params. + 3) Check qemu prompt message. + 4) Check vm is alive after hotplug. + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + @error_context.context_aware + def _hotplug_memory(vm, name): + hotplug_test = MemoryHotplugTest(test, params, env) + devices = vm.devices.memory_define_by_params(params, name) + for dev in devices: + if isinstance(dev, qdevices.Dimm): + if params["set_addr"] == "yes": + addr = params["addr_dimm_%s" % name] + else: + addr = hotplug_test.get_mem_addr(vm, dev.get_qid()) + dev.set_param("addr", addr) + error_context.context("Hotplug %s '%s' to VM" % + ("pc-dimm", dev.get_qid()), logging.info) + vm.devices.simple_hotplug(dev, vm.monitor) + hotplug_test.update_vm_after_hotplug(vm, dev) + return devices + + def collect_hotplug_info(): + details = {} + for target_mem in params.objects("target_mems"): + try: + _hotplug_memory(vm, target_mem) + except Exception as e: + error_context.context("Error happen %s: %s" % + (target_mem, e), logging.info) + details.update({target_mem: str(e)}) + else: + error_context.context("Hotplug memory successful", + logging.info) + details.update({target_mem: "Hotplug memory successful"}) + return details + + def check_msg(keywords, msg): + if not re.search(r"%s" % keywords, msg): + test.fail("No valid keywords were found in the qemu prompt message") + + if params["size_mem"] == "": + overcommit_mem = normalize_data_size("%sK" % (memory.memtotal() * 2), "G") + params["size_mem"] = "%sG" % round(float(overcommit_mem)) + if params["policy_mem"] == "bind": + params["host-nodes"] = str(max(memory.numa_nodes()) + 1) + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + vm.wait_for_login() + + msg = collect_hotplug_info() + if len(params.objects("target_mems")) == 1: + error_context.context("Check qemu prompt message.", logging.info) + check_msg(params["keywords"], msg[params["target_mems"]]) + else: + for target_mem in params.objects("target_mems"): + mem_params = params.object_params(target_mem) + error_context.context("Check %s qemu prompt " + "message." % target_mem, logging.info) + check_msg(mem_params["keywords"], msg[target_mem]) diff --git a/qemu/tests/hpt_max_page_size.py b/qemu/tests/hpt_max_page_size.py index 5aa6f972d1132843073a99c9d9f110799f5176b9..f91feb3b7db62fc30df1310a6a7040121dfe2967 100644 --- a/qemu/tests/hpt_max_page_size.py +++ b/qemu/tests/hpt_max_page_size.py @@ -26,12 +26,13 @@ def run(test, params, env): """ def _check_meminfo(key): meminfo = session.cmd_output("grep %s /proc/meminfo" % key) - actual_value = re.search(r'\d+', meminfo) + actual_value = re.search(r'\d{4,}', meminfo) return actual_value.group(0) if actual_value else "" + timeout = params.get_numeric("login_timeout", 240) vm = env.get_vm(params["main_vm"]) vm.verify_alive() - session = vm.wait_for_login() + session = vm.wait_for_login(timeout=timeout) error_context.context("Check output Hugepage size.", logging.info) if _check_meminfo("Hugepagesize") != params["expected_value"]: diff --git a/qemu/tests/hugepage_mem_stress.py b/qemu/tests/hugepage_mem_stress.py index bd5aa3efab0baedf036030efada837f706d3fcb1..7ba40f5fd112492748d4039f936b5bcbdc575d8e 100644 --- a/qemu/tests/hugepage_mem_stress.py +++ b/qemu/tests/hugepage_mem_stress.py @@ -1,9 +1,13 @@ import logging +import tempfile +from virttest import env_process from virttest import error_context from virttest import utils_misc from virttest import utils_test + from virttest.utils_test import BackgroundTest +from virttest.utils_test import utils_memory @error_context.context_aware @@ -13,8 +17,9 @@ def run(test, params, env): Steps: 1) System setup hugepages on host. 2) Mount this hugepage to /mnt/kvm_hugepage. - 3) Run memory heavy stress inside guest. - 4) Check guest call trace in dmesg log. + 3) HugePages didn't leak when using non-existent mem-path. + 4) Run memory heavy stress inside guest. + 5) Check guest call trace in dmesg log. :params test: QEMU test object. :params params: Dictionary with the test parameters. :params env: Dictionary with test environment. @@ -29,40 +34,56 @@ def run(test, params, env): install_cmd = params["install_cmd"] % winutil_drive session.cmd(install_cmd) + if params.get_boolean("non_existent_point"): + dir = tempfile.mkdtemp(prefix='hugepage_') + error_context.context("This path %s, doesn't mount hugepage." % + dir, logging.info) + params["extra_params"] = " -mem-path %s" % dir + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + os_type = params["os_type"] - verify_wait_timeout = params.get_numeric("verify_wait_timeout", 60) + stress_duration = params.get_numeric("stress_duration", 60) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() - error_context.context("Run memory heavy stress in guest", logging.info) - if os_type == "linux": - stress_args = params["stress_custom_args"] % ( - params.get_numeric("mem") / 512) - stress_test = utils_test.VMStress(vm, "stress", - params, stress_args=stress_args) - try: - stress_test.load_stress_tool() - utils_misc.wait_for(lambda: (stress_test.app_running is False), 30) - stress_test.unload_stress() - utils_misc.verify_dmesg(session=session) - finally: - stress_test.clean() - else: - install_path = params["install_path"] - test_installed_cmd = 'dir "%s" | findstr /I heavyload' % install_path - heavyload_install() - error_context.context("Run heavyload inside guest.", logging.info) - heavyload_bin = r'"%s\heavyload.exe" ' % install_path - heavyload_options = ["/MEMORY %d" % (params.get_numeric("mem") / 512), - "/DURATION 30", - "/AUTOEXIT", - "/START"] - start_cmd = heavyload_bin + " ".join(heavyload_options) - stress_tool = BackgroundTest(session.cmd, (start_cmd, 30, 30)) - stress_tool.start() - if not utils_misc.wait_for(stress_tool.is_alive, verify_wait_timeout): - test.error("Failed to start heavyload process.") - stress_tool.join(30) + try: + error_context.context("Run memory heavy stress in guest", logging.info) + if os_type == "linux": + stress_args = params["stress_custom_args"] % ( + params.get_numeric("mem") / 512) + stress_test = utils_test.VMStress(vm, "stress", + params, stress_args=stress_args) + try: + stress_test.load_stress_tool() + utils_misc.wait_for(lambda: (stress_test.app_running is False), 30) + stress_test.unload_stress() + utils_misc.verify_dmesg(session=session) + finally: + stress_test.clean() + else: + install_path = params["install_path"] + test_installed_cmd = 'dir "%s" | findstr /I heavyload' % install_path + heavyload_install() + error_context.context("Run heavyload inside guest.", logging.info) + heavyload_bin = r'"%s\heavyload.exe" ' % install_path + heavyload_options = ["/MEMORY %d" % (params.get_numeric("mem") / 512), + "/DURATION %d" % (stress_duration // 60), + "/AUTOEXIT", + "/START"] + start_cmd = heavyload_bin + " ".join(heavyload_options) + stress_tool = BackgroundTest(session.cmd, (start_cmd, + stress_duration, stress_duration)) + stress_tool.start() + if not utils_misc.wait_for(stress_tool.is_alive, stress_duration): + test.error("Failed to start heavyload process.") + stress_tool.join(stress_duration) - session.close() + if params.get_boolean("non_existent_point"): + error_context.context("Check large memory pages free on host.", + logging.info) + if utils_memory.get_num_huge_pages() != utils_memory.get_num_huge_pages_free(): + test.fail("HugePages leaked.") + finally: + session.close() diff --git a/qemu/tests/hugepage_specify_node.py b/qemu/tests/hugepage_specify_node.py new file mode 100644 index 0000000000000000000000000000000000000000..d208e5d750efe7aa58897ac7cb3992c23e3ddfaa --- /dev/null +++ b/qemu/tests/hugepage_specify_node.py @@ -0,0 +1,86 @@ +import logging +import math + +from avocado.utils import memory + +from virttest import env_process +from virttest import error_context +from virttest import test_setup +from virttest import utils_misc + +from virttest.utils_numeric import normalize_data_size + + +@error_context.context_aware +def run(test, params, env): + """ + Qemu allocate hugepage from specify node. + Steps: + 1) Setup total of 4G mem hugepages for specify node. + 2) Setup total of 1G mem hugepages for idle node. + 3) Mount this hugepage to /mnt/kvm_hugepage. + 4) Boot guest only allocate hugepage from specify node. + 5) Check the hugepage used from every node. + :params test: QEMU test object. + :params params: Dictionary with the test parameters. + :params env: Dictionary with test environment. + """ + memory.drop_caches() + hugepage_size = memory.get_huge_page_size() + mem_size = int(normalize_data_size("%sM" % params["mem"], "K")) + idle_node_mem = int(normalize_data_size("%sM" % params["idle_node_mem"], "K")) + + error_context.context("Get host numa topological structure.", logging.info) + host_numa_node = utils_misc.NumaInfo() + node_list = host_numa_node.get_online_nodes_withmem() + idle_node_list = node_list.copy() + node_meminfo = host_numa_node.get_all_node_meminfo() + + for node_id in node_list: + error_context.base_context("Check preprocess HugePages Free on host " + "numa node %s." % node_id, logging.info) + node_memfree = int(node_meminfo[node_id]["MemFree"]) + if node_memfree < idle_node_mem: + idle_node_list.remove(node_id) + if node_memfree < mem_size: + node_list.remove(node_id) + + if len(idle_node_list) < 2 or not node_list: + test.cancel("Host node does not have enough nodes to run the test, " + "skipping test...") + + for node_id in node_list: + error_context.base_context("Specify qemu process only allocate " + "HugePages from node%s." % node_id, logging.info) + params["target_nodes"] = "%s" % node_id + params["target_num_node%s" % node_id] = math.ceil(mem_size / hugepage_size) + error_context.context("Setup huge pages for specify node%s." % + node_id, logging.info) + check_list = [_ for _ in idle_node_list if _ != node_id] + for idle_node in check_list: + params["target_nodes"] += " %s" % idle_node + params["target_num_node%s" % idle_node] = math.ceil(idle_node_mem / hugepage_size) + error_context.context("Setup huge pages for idle node%s." % + idle_node, logging.info) + params["setup_hugepages"] = "yes" + hp_config = test_setup.HugePageConfig(params) + hp_config.setup() + params["qemu_command_prefix"] = "numactl --membind=%s" % node_id + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + try: + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + vm.wait_for_login() + + meminfo = host_numa_node.get_all_node_meminfo() + for index in check_list: + error_context.base_context("Check process HugePages Free on host " + "numa node %s." % index, logging.info) + hugepages_free = int(meminfo[index]["HugePages_Free"]) + if int(node_meminfo[index]["HugePages_Free"]) > hugepages_free: + test.fail("Qemu still use HugePages from other node." + "Expect: node%s, used: node%s." % (node_id, index)) + finally: + vm.destroy() + hp_config.cleanup() diff --git a/qemu/tests/hv_check_cpu_utilization.py b/qemu/tests/hv_check_cpu_utilization.py new file mode 100644 index 0000000000000000000000000000000000000000..08c5f302efc73056e89410ead73de49fec7101d1 --- /dev/null +++ b/qemu/tests/hv_check_cpu_utilization.py @@ -0,0 +1,149 @@ +import time +import threading +import re +import logging + +from virttest import utils_misc +from virttest import utils_test +from virttest import error_context +from avocado.utils import process + + +def _check_cpu_usage(session): + """ + Check windows guest cpu usage by wmic. This function is used within + utils_misc.wait_for(), to check cpu usage repeatedly. + + param session: a session object to send wmic commands + """ + status, output = session.cmd_status_output( + "wmic cpu get loadpercentage /value") + if not status: + result = re.search(r'LoadPercentage=(\d+)', output) + if result: + percent = int(result.group(1)) + if percent > 1: + logging.warning("Guest cpu usage :%s%%", percent) + + +def _check_cpu_thread_func(session, timeout): + """ + Repeatedlly checking guest cpu usage, until timeout has arrived. + + param session: a session object to send commands + param timeout: total checking time + """ + utils_misc.wait_for(lambda: _check_cpu_usage(session), timeout, 0, 5) + + +def _pin_vm_threads(vm, node): + """ + Pin guest to certain numa node. + + param vm: a vm object + param node: a numa node to pin to + """ + node = utils_misc.NumaNode(node) + utils_test.qemu.pin_vm_threads(vm, node) + + +def _stop_service(test, params, session, service): + """ + Check & stop windows system service + + param session: a session to send commands + param service: the name of the service to stop + """ + service_check_cmd = params.get("service_check_cmd") + service_stop_cmd = params.get("service_stop_cmd") + s, o = session.cmd_status_output("sc query") + if s: + test.error("Failed to query service list, " + "status=%s, output=%s" % (s, o)) + service_item = re.search( + r'SERVICE_NAME:\s+%s' % service, o, re.I | re.M) + if not service_item: + return + + s, o = session.cmd_status_output(service_check_cmd % service) + if s: + test.error("Failed to get status for service: %s, " + "status=%s, output=%s" % (service, s, o)) + if re.search(r'STOPPED', o, re.I | re.M): + return + session.cmd(service_stop_cmd.format(service)) + + +@error_context.context_aware +def run(test, params, env): + """ + Cpu utilization test with hv flags. + + 1)Start a Windows guest vm. + 2)Pin the vm to certain numa node, to keep accuracy. + 3)Stop serval Windows services & background processes on guest. + to lower the cpu usage to minimum. + 4)Reboot vm to apply changes, then wait for serveral minutes to make + sure the cpu is chill down. + 5)Start both checking the guest&host's cpu usage, monitoring the value. + 6)Compare the average utilization value to standard values. + + param test: the test object + param params: the params of the test + param env: the testing environment object + """ + vm = env.get_vm(params["main_vm"]) + + # pin guest vcpus/memory/vhost threads to last numa node of host + _pin_vm_threads(vm, params.get_numeric("numa_node", -1)) + + vm.verify_alive() + + timeout = params.get_numeric("login_timeout", 240) + host_check_times = params.get_numeric("host_check_times", 900) + host_check_interval = params.get_numeric("host_check_interval", 2) + guest_check_timeout = host_check_times * host_check_interval + thread_cpu_level = params.get_numeric("thread_cpu_level", 5) + session = vm.wait_for_serial_login(timeout=timeout) + do_migration = params.get("do_migration", "no") == "yes" + + service_names = params.get("serives_to_stop").split() + + # check and stop services + for service in service_names: + _stop_service(test, params, session, service) + + # stop windows defender + session.cmd(params["reg_cmd"]) + + session = vm.reboot(session, timeout=timeout, serial=True) + + if do_migration: + vm.migrate(env=env) + session = vm.wait_for_serial_login(timeout=timeout) + + # wait for the guest to chill + time.sleep(900) + + # start background checking guest cpu usage + thread = threading.Thread(target=_check_cpu_thread_func, + args=(session, guest_check_timeout)) + thread.start() + time.sleep(60) + + # start checking host cpu usage + pid = vm.get_pid() + process.system(params["host_check_cmd"] % pid, shell=True) + thread.join(guest_check_timeout + 360) + + vcpu_thread_pattern = params.get("vcpu_thread_pattern", + r'thread_id.?[:|=]\s*(\d+)') + vcpu_ids = vm.get_vcpu_pids(vcpu_thread_pattern) + for thread_id in vcpu_ids: + # output result + host_cpu_usage = process.system_output( + params["thread_process_cmd"] % thread_id, shell=True) + host_cpu_usage = float(host_cpu_usage.decode()) + if host_cpu_usage > thread_cpu_level: + test.fail("The cpu usage of thread %s is %s" + " > %s" % (thread_id, host_cpu_usage, thread_cpu_level)) diff --git a/qemu/tests/hv_crash.py b/qemu/tests/hv_crash.py new file mode 100644 index 0000000000000000000000000000000000000000..111a17ab300a55332599ff05ba2758fc2fe7d059 --- /dev/null +++ b/qemu/tests/hv_crash.py @@ -0,0 +1,69 @@ +import logging + +from virttest import error_context +from virttest import env_process +from virttest import utils_misc + + +@error_context.context_aware +def run(test, params, env): + """ + Test the hv_crash flag avaliability + + 1) boot the guest with hv_crash flag + 2) use nmi to make the guest crash, the qemu process should quit + 3) boot the guest without hv_crash flag + 4) use nmi again, the qemu should not quit + + param test: the test object + param params: the test params + param env: the test env object + """ + + def _boot_guest_with_cpu_flag(hv_flag): + """ + Boot the guest, with param cpu_model_flags set to hv_flag + + param hv_flag: the hv flags to set to cpu + return: the booted vm + """ + params["cpu_model_flags"] = hv_flag + params["start_vm"] = "yes" + vm_name = params["main_vm"] + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.verify_alive() + session = vm.wait_for_login(timeout=timeout) + return vm, session + + def _trigger_crash(vm, session): + """ + Trigger a system crash by nmi + """ + session.cmd(set_nmi_cmd) + vm.reboot(session=session, timeout=timeout) + vm.monitor.nmi() + + timeout = params.get("timeout", 360) + hv_crash_flag = params["hv_crash_flag"] + set_nmi_cmd = params["set_nmi_cmd"] + flags_without_hv_crash = params["cpu_model_flags"] + flags_with_hv_crash = flags_without_hv_crash + "," + hv_crash_flag + + error_context.context("Boot the guest with hv_crash flag", logging.info) + vm, session = _boot_guest_with_cpu_flag(flags_with_hv_crash) + + error_context.context("Make the guest crash", logging.info) + _trigger_crash(vm, session) + logging.info("Check the qemu process is quit") + if not utils_misc.wait_for(vm.is_dead, 10, 1, 1): + test.fail("The qemu still active after crash") + + error_context.context("Boot the guest again", logging.info) + vm, session = _boot_guest_with_cpu_flag(flags_without_hv_crash) + + error_context.context("Make the guest crash again", logging.info) + _trigger_crash(vm, session) + logging.info("Check the qemu process is not quit") + if utils_misc.wait_for(vm.is_dead, 10, 1, 1): + test.fail("The qemu is quit after crash") diff --git a/qemu/tests/hv_kvm_unit_test.py b/qemu/tests/hv_kvm_unit_test.py new file mode 100644 index 0000000000000000000000000000000000000000..418861743358a0169a70fe0577575b096e556c34 --- /dev/null +++ b/qemu/tests/hv_kvm_unit_test.py @@ -0,0 +1,45 @@ +import logging +import re +import json + +from virttest import error_context +from virttest import data_dir +from virttest import cpu +from avocado.utils import process + + +@error_context.context_aware +def run(test, params, env): + """ + Run kvm-unit-tests for Hyper-V testdev device + + 1) compile kvm-unit-tests tools source code + 2) Run each unit tests by compiled binary tools + 3) For each unit test, compare the test result to expected value + """ + tmp_dir = data_dir.get_tmp_dir() + kvm_unit_tests_dir = data_dir.get_deps_dir("kvm_unit_tests") + compile_cmd = params["compile_cmd"] % (tmp_dir, kvm_unit_tests_dir) + test_cmd = params["test_cmd"] + unit_tests_mapping = params["unit_tests_mapping"] + skip_tests = params.get("skip_tests", "").split() + cpu_flags = params["cpu_model_flags"] + cpu_model = cpu.get_qemu_best_cpu_model(params) + cpu_param = cpu_model + cpu_flags + + error_context.context("Copy & compile kvm-unit-test tools", logging.info) + process.system(compile_cmd, shell=True) + + error_context.context("Run unit tests", logging.info) + for unit_test, unit_test_result in json.loads(unit_tests_mapping).items(): + if unit_test in skip_tests: + continue + logging.info("Start running unit test %s" % unit_test) + unit_test_cmd = test_cmd % (tmp_dir, unit_test, cpu_param) + result_output = process.system_output(unit_test_cmd, shell=True) + result_output = result_output.decode() + find_result = re.findall('^%s' % unit_test_result[0], result_output, re.M) + if len(find_result) != int(unit_test_result[1]): + test.fail("Unit test result mismatch target, " + "target=%s, output=%s" % + (unit_test_result[1], result_output)) diff --git a/qemu/tests/hv_reset.py b/qemu/tests/hv_reset.py new file mode 100644 index 0000000000000000000000000000000000000000..c654f67c47bcd3c548c6688d57fc6c26c54ebd6c --- /dev/null +++ b/qemu/tests/hv_reset.py @@ -0,0 +1,17 @@ +def run(test, params, env): + """ + Reboot test with hv_reset flag: + 1) Log into a guest + 2) Send a reboot command in guest + 3) Wait until the guest is up again + 4) Log into the guest to verify it's up again + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + + """ + vm_name = params["main_vm"] + vm = env.get_vm(vm_name) + vm.reboot(vm.wait_for_login(timeout=360)) + vm.verify_alive() diff --git a/qemu/tests/hv_time.py b/qemu/tests/hv_time.py new file mode 100644 index 0000000000000000000000000000000000000000..327038f7364e48c7eb4e160c14c1c8ce5a8f240e --- /dev/null +++ b/qemu/tests/hv_time.py @@ -0,0 +1,135 @@ +import logging +import re + +from virttest import error_context +from virttest import utils_misc +from virttest import env_process + + +@error_context.context_aware +def run(test, params, env): + """ + Test hv_time flag avaliability and effectiveness. + + 1) boot the guest, setup the testing environment + 2) reboot the guest without hv_time flag + 3) run gettime_cycles.exe to acquire cpu cycles of IO operations + 4) reboot the guest with hv_time flag + 5) run the gettime_cycles.exe again, then compare the cycles + to previous result + + param test: the test object + param params: the test params + param env: the test env object + """ + + def _setup_environments(): + """ + Setup the guest test environment, includes close the useplatformclock, + and copy gettime_cycles.exe related files to guest + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login(timeout=timeout) + + logging.info("Turn off the useplatformclock attribute") + session.cmd(close_pltclk_cmd) + + logging.info("Reboot to check the useplatformclock is off") + session = vm.reboot(session, timeout=timeout) + s, o = session.cmd_status_output(check_pltclk_cmd) + if s: + test.error("Failed to check the useplatfromclock after reboot, " + "status=%s, output=%s" % (s, o)) + use_pltck = re.search(r'useplatformclock\s+no', o, re.I | re.M) + if not use_pltck: + test.error("The useplatfromclock isn't off after reboot, " + "output=%s" % o) + + logging.info("Copy the related files to the guest") + for f in gettime_filenames: + copy_file_cmd = utils_misc.set_winutils_letter(session, + copy_cmd % f) + session.cmd(copy_file_cmd) + vm.graceful_shutdown(timeout=timeout) + + def _run_gettime(session): + """ + Run the gettime_cycles.exe to acquire cpu cycles + + return: the cpu cycles amount of certain IO operation + """ + o = session.cmd_output_safe(run_gettime_cmd, timeout=timeout) + cycles = int(re.search(r'\d+', o).group(0)) + logging.info("The cycles with out hv_time is %d" % cycles) + return cycles + + def _boot_guest_with_cpu_flag(hv_flag): + """ + Boot the guest, with param cpu_model_flags set to hv_flag + + param hv_flag: the hv flags to set to cpu + + return: the booted vm + """ + params["cpu_model_flags"] = hv_flag + params["start_vm"] = "yes" + vm_name = params["main_vm"] + env_process.preprocess(test, params, env) + return env.get_vm(vm_name) + + def _get_cycles_with_flags(cpu_flag): + """ + Boot the guest with cpu_model_flags set to cpu_flag, + then run gettime_cycle.exe to acquire cpu cycles. + + param cpu_flag: the cpu flags to set + return: the cpu cycles returned by gettime_cycle.exe + """ + logging.info("Boot the guest with cpu_model_flags= %s" % cpu_flag) + vm = _boot_guest_with_cpu_flag(cpu_flag) + session = vm.wait_for_login(timeout=timeout) + logging.info("Run gettime_cycle.exe") + cycles = _run_gettime(session) + vm = env.get_vm(params["main_vm"]) + vm.graceful_shutdown(timeout=timeout) + return cycles + + def _check_result(cycles_without_flag, cycles_with_flag): + """ + Calculate the factor of optimization for the hv_time flag usage, + and check if the factor is as effective as we want. + param cycles_without_flag: the cpu cycles acquired by + gettime_cycles.exe, without hv_time flag set + param cycles_with_flag: the cpu cycles acquired by gettime_cycles.exe, + with hv_time flag set + """ + factor = cycles_with_flag / float(cycles_without_flag) + if factor > 0.1: + test.fail("Cycles with flag is %d, cycles without flag is %d, " + "the factor is %f > 0.1" % (cycles_with_flag, + cycles_without_flag, + factor)) + + close_pltclk_cmd = params["close_pltclk_cmd"] + check_pltclk_cmd = params["check_pltclk_cmd"] + gettime_filenames = params["gettime_filenames"].split() + copy_cmd = params["copy_cmd"] + run_gettime_cmd = params["run_gettime_cmd"] + timeout = params.get("timeout", 360) + hv_time_flags = params["hv_time_flags"].split() + flags_with_hv_time = params["cpu_model_flags"] + flags_without_hv_time = ','.join( + [_ for _ in flags_with_hv_time.split(',') if _ not in hv_time_flags]) + + error_context.context("Setting up environments", logging.info) + _setup_environments() + + error_context.context("Get cpu cycles without hv_time flag", logging.info) + cycles_without_flag = _get_cycles_with_flags(flags_without_hv_time) + + error_context.context("Get cpu cycles with hv_time flag", logging.info) + cycles_with_flag = _get_cycles_with_flags(flags_with_hv_time) + + error_context.context("Check the optimize factor", logging.info) + _check_result(cycles_without_flag, cycles_with_flag) diff --git a/qemu/tests/hv_tlbflush.py b/qemu/tests/hv_tlbflush.py new file mode 100644 index 0000000000000000000000000000000000000000..434079ba645a59c2e915e7e7d352df36fd6408fc --- /dev/null +++ b/qemu/tests/hv_tlbflush.py @@ -0,0 +1,202 @@ +import logging +import time +import datetime +import os + +from virttest import error_context +from virttest import env_process +from virttest import utils_misc +from virttest import utils_numeric +from virttest import data_dir +from virttest import utils_test +from avocado.utils import cpu + + +@error_context.context_aware +def run(test, params, env): + """ + Test hv_tlbflush flag improvement + 1) Prepare test related tools on host and guest, includes + hv_tlbflush.exe and related files, and stress tool + 2) Boot the guest without hv_tlbflush and other related flags + 3) Run stress tool on host, then run hv_tlbflush.exe on guest, + the total running time is acquired + 4) Shutdown and reboot guest with all hv flags + 5) Run stress tool and hv_tlvflush.exe again on host&guest, + another time is acquired + 6) Compare the 2 time and calculate the improvement factor, + then judge the result depends on the architecure of the guest + + param test: the test object + param params: the test params + param env: the test env object + """ + + def _prepare_test_environment(): + """ + Prepare the test tools, such as hv_tlbflush & stress + + return: a running HostStress object + """ + + copy_tlbflush_cmd = params["copy_tlbflush_cmd"] + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login(timeout=timeout) + + logging.info("Copy tlbflush tool related files") + for f in tlbflush_filenames: + copy_file_cmd = utils_misc.set_winutils_letter( + session, copy_tlbflush_cmd % f) + session.cmd(copy_file_cmd) + + logging.info("Create a large file for test") + create_test_file_cmd = params["create_test_file_cmd"] + test_file_size = params["test_file_size"] + test_file_size = utils_numeric.normalize_data_size( + test_file_size, order_magnitude="B") + session.cmd(create_test_file_cmd % test_file_size) + vm.graceful_shutdown(timeout=timeout) + + stress_type = params.get("stress_type", "stress") + stress_pkg_name = params.get("stress_pkg_name", "stress-1.0.4.tar.gz") + stress_root_dir = data_dir.get_deps_dir("stress") + downloaded_file_path = os.path.join(stress_root_dir, stress_pkg_name) + host_cpu_count = cpu.total_cpus_count() + + host_stress = utils_test.HostStress( + stress_type, params, + download_type="tarball", + downloaded_file_path=downloaded_file_path, + stress_args="--cpu %s > /dev/null 2>&1& " % host_cpu_count) + return host_stress + + def _clean_test_environment(host_stress): + """ + Remove the test related files + + param host_stress: the HostStress object + """ + delete_tlbflush_cmd = params["delete_tlbflush_cmd"] + delete_test_file_cmd = params["delete_test_file_cmd"] + + _stop_host_stress(host_stress) + logging.info("Cleanup the stress tool on host") + host_stress.clean() + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + session = vm.wait_for_login(timeout=timeout) + logging.info("Delete tlbflush files") + for f in tlbflush_filenames: + session.cmd(delete_tlbflush_cmd % f) + + logging.info("Delete test file") + session.cmd(delete_test_file_cmd) + + def _start_host_stress(host_stress): + """ + Start running stress tool on host + + param host_stress: the HostStress object + """ + if not host_stress.app_running(): + host_stress.load_stress_tool() + + if not host_stress.app_running(): + test.error("Can't start the stress tool on host") + + def _stop_host_stress(host_stress): + """ + Stop the stress tool on host + + param host_stress: the running HostStress object + """ + if host_stress.app_running(): + host_stress.unload_stress() + + def _boot_guest_with_cpu_flag(hv_flag): + """ + Boot the guest, with param cpu_model_flags set to hv_flag + + param hv_flag: the hv flags to set to cpu + + return: the booted vm and a loggined session + """ + params["cpu_model_flags"] = hv_flag + params["start_vm"] = "yes" + vm_name = params["main_vm"] + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + session = vm.wait_for_login(timeout=timeout) + return (vm, session) + + def _run_tlbflush(session, host_stress): + """ + Start running the hv_tlvflush tool on guest. + + param session: a loggined session to send commands + + return: the time result of the running, a float value + """ + run_tlbflush_cmd = params["run_tlbflush_cmd"] + run_tlbflush_timeout = params.get_numeric("run_tlbflush_timeout", 3600) + + logging.info("Start stress on host") + _start_host_stress(host_stress) + + logging.info("Start run hv_tlbflush.exe on guest") + s, o = session.cmd_status_output(run_tlbflush_cmd, + run_tlbflush_timeout) + logging.info("Stop stress on host") + _stop_host_stress(host_stress) + + if s: + test.error("Run tlbflush error: status = %s, output = %s", + (s, o)) + time_str = o.strip().split('\n')[-1] + time_str = time_str.split(".")[0] + s_t = time.strptime(time_str, "%H:%M:%S") + total_time = datetime.timedelta(hours=s_t.tm_hour, + minutes=s_t.tm_min, + seconds=s_t.tm_sec).total_seconds() + logging.info("Runing result: %f" % total_time) + return total_time + + timeout = params.get_numeric("timeout", 360) + tlbflush_filenames = params["tlbflush_filenames"].split() + cpu_model_flags = params["cpu_model_flags"] + hv_flags_to_ignore = params["hv_flags_to_ignore"].split() + + error_context.context("Prepare test environment", logging.info) + host_stress = _prepare_test_environment() + + try: + error_context.context("Boot guest with hv_tlbflush related flags") + hv_flag_without_tlbflush = ','.join( + [_ for _ in cpu_model_flags.split(',') + if _ not in hv_flags_to_ignore]) + vm, session = _boot_guest_with_cpu_flag(hv_flag_without_tlbflush) + + error_context.context("Run tlbflush without hv_tlbflush", logging.info) + time_without_flag = _run_tlbflush(session, host_stress) + vm.graceful_shutdown(timeout=timeout) + + error_context.context("Boot guest with related flags") + vm, session = _boot_guest_with_cpu_flag(cpu_model_flags) + error_context.context("Run tlbflush with hv_tlbflush", logging.info) + time_with_flag = _run_tlbflush(session, host_stress) + + error_context.context("Compare test results between 2 tests") + factor = time_with_flag / time_without_flag + vm_arch = params.get("vm_arch_name") + if factor >= 0.5 if vm_arch == "x86_64" else factor >= 1.0: + test.fail("The improvement factor=%d is not enough. " + "Time WITHOUT flag: %s, " + "Time WITH flag: %s" % + (factor, time_without_flag, time_with_flag)) + + finally: + _clean_test_environment(host_stress) diff --git a/qemu/tests/hv_vapic_test.py b/qemu/tests/hv_vapic_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a8f03b126e10651cbac820b188b6f834eb2942c9 --- /dev/null +++ b/qemu/tests/hv_vapic_test.py @@ -0,0 +1,131 @@ +import logging +import re +import os +import time + +from virttest import error_context +from virttest import utils_disk +from virttest import env_process +from virttest import data_dir +from virttest.qemu_storage import QemuImg + +from provider.storage_benchmark import generate_instance + + +@error_context.context_aware +def run(test, params, env): + """ + Test the hv_vapic flag improvement + 1) Create tmpfs data disk in host + 2) Mount&format the disk in guest, then prepare the fio test + environment + 3) Boot the guest with all hv flags + 4) Run fio test, record the result's bw value + 5) Shutdown and boot the guest again without hv_vapic flag + 6) Run fio test again, record the result's bw value + 7) Calculate the improvement value of the 2 fio tests, + then check if it is obvious enough + + param test: the test object + param params: the test params + param env: the test env object + """ + + def _create_tmpfs_data_disk(): + """ + Create a tmpfs data disk + """ + logging.info("Create tmpfs data disk") + disk_name_key = params["disk_name_key"] + tmp_dir = data_dir.get_tmp_dir() + tmpfs_dir = os.path.join(tmp_dir, "tmpfs") + if not os.path.isdir(tmpfs_dir): + os.makedirs(tmpfs_dir) + params[disk_name_key] = os.path.join(tmpfs_dir, "data") + tmpfs_image_name = params["tmpfs_image_name"] + img_param = params.object_params(tmpfs_image_name) + img = QemuImg(img_param, data_dir.get_data_dir(), tmpfs_image_name) + img.create(img_param) + + def _format_tmpfs_disk(): + """ + Format the new tmpfs disk in guest + + return: the formatted drive letter of the disk + """ + logging.info("Boot the guest to setup tmpfs disk") + vm, session = _boot_guest_with_cpu_flag(cpu_model_flags) + logging.info("Format tmpfs disk") + disk_size = params["image_size_" + params["tmpfs_image_name"]] + disk_id = utils_disk.get_windows_disks_index(session, disk_size)[0] + drive_letter = utils_disk.configure_empty_windows_disk( + session, disk_id, disk_size)[0] + vm.graceful_shutdown(timeout=timeout) + return drive_letter + + def _boot_guest_with_cpu_flag(hv_flag): + """ + Boot the guest, with param cpu_model_flags set to hv_flag + + param hv_flag: the hv flags to set to cpu + + return: the booted vm and a loggined session + """ + params["cpu_model_flags"] = hv_flag + params["start_vm"] = "yes" + vm_name = params["main_vm"] + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.verify_alive() + session = vm.wait_for_login(timeout=timeout) + return (vm, session) + + def _run_fio(session, drive_letter): + """ + First format tmpfs disk to wipe out cache, + then run fio test, and return the result's bw value + + param session: a session loggined to guest os + drive_letter: the drive to run the fio job on + return: the bw value of the running result(bw=xxxB/s) + """ + bw_search_reg = params["bw_search_reg"] + logging.info("Format tmpfs data disk") + utils_disk.create_filesystem_windows(session, drive_letter, "ntfs") + logging.info("Start fio test") + fio = generate_instance(params, vm, 'fio') + o = fio.run(params["fio_options"] % drive_letter) + return int(re.search(bw_search_reg, o, re.M).group(1)) + + timeout = params.get_numeric("timeout", 360) + cpu_model_flags = params["cpu_model_flags"] + + error_context.context("Create tmpfs data disk in host", logging.info) + _create_tmpfs_data_disk() + + error_context.context("Prepare tmpfs in guest", logging.info) + drive_letter = _format_tmpfs_disk() + + error_context.context("Boot guest with all the hv flags") + vm, session = _boot_guest_with_cpu_flag(cpu_model_flags) + time.sleep(300) + error_context.context("Start fio in guest", logging.info) + bw_with_hv_vapic = _run_fio(session, drive_letter) + + error_context.context("Shutdown guest and boot without hv_vapnic", + logging.info) + vm.graceful_shutdown(timeout=timeout) + cpu_model_flags = cpu_model_flags.replace(",hv_vapic", "") + vm, session = _boot_guest_with_cpu_flag(cpu_model_flags) + time.sleep(300) + error_context.context("Start fio in guest again", logging.info) + bw_without_hv_vapic = _run_fio(session, drive_letter) + + error_context.context("Check the improvement of hv_vapic", logging.info) + improvement = (float)(bw_with_hv_vapic - bw_without_hv_vapic) + improvement /= bw_without_hv_vapic + if improvement < 0.05: + test.fail("Improvement not above 5%%." + " bw with hv_vapic: %s," + " bw without hv_vapic: %s" % + (bw_with_hv_vapic, bw_without_hv_vapic)) diff --git a/qemu/tests/image_create_with_preallocation.py b/qemu/tests/image_create_with_preallocation.py new file mode 100644 index 0000000000000000000000000000000000000000..0235421123b540da5dae548b4e12b8f950d5f575 --- /dev/null +++ b/qemu/tests/image_create_with_preallocation.py @@ -0,0 +1,79 @@ +import json +import os + +from avocado import fail_on +from avocado.utils import process + +from virttest import data_dir +from virttest import qemu_storage + +from provider import qemu_img_utils as img_utils + + +def run(test, params, env): + """ + Check parameter preallocation when creating an image. + 1. Create a qcow2 image with preallocation=off, full, falloc, metadata. + 2. Create a raw image with preallocation=off, full, falloc. + 3. Create a luks image with preallocation=off, full, falloc. + 4. check points: + 4.1 preallocation=off + Image create successfully, the actual_size is less than specified size. + 4.2 preallocation=full + Image create successfully, the actual_size is greater than or equal to + specified size. + (It's normal because of the temporary predictive preallocation of XFS.) + 4.3 preallocation=falloc + Image create successfully, invoked fallocate system call, + the actual_size is greater than or equal to specified size. + (It's normal because of the temporary predictive preallocation of XFS.) + 4.4 preallocation=metadata + Image create successfully, the actual_size is less than specified size. + + :param test: VT test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def check_fallocate_syscall(trace_event): + """ + check whether invoke fallocate system call + when creating an image with preallocation=falloc + """ + strace_log = os.path.join(test.debugdir, "fallocate.log") + with img_utils.strace(img_stg, trace_event.split(), strace_log, True): + fail_on((process.CmdError,))(img_stg.create)(image_stg_params) + with open(strace_log) as fd: + if trace_event not in fd.read(): + test.fail("Not invoked fallocate system call when " + "creating an image with preallocation=falloc") + + def check_actual_size_field(): + """ + check whether 'actual-size' field from qemu-img info is as expected. + """ + cmd_result = img_stg.info(output="json") + actual_size = int(params["actual_size"]) + info = json.loads(cmd_result) + if params["preallocated_stg"] in ["full", "falloc"]: + if info["actual-size"] < actual_size: + test.fail("The 'actual-size' field from qemu-img info " + "is not greater than or equal to %s. " + "The actual output is %s" + % (actual_size, cmd_result)) + elif params["preallocated_stg"] in ["off", "metadata"]: + if info["actual-size"] >= actual_size: + test.fail("The 'actual-size' field from qemu-img info " + "is not less than %s. The actual output is %s" + % (actual_size, cmd_result)) + + trace_event = params.get("trace_event") + image_stg = params["images"] + root_dir = data_dir.get_data_dir() + image_stg_params = params.object_params(image_stg) + img_stg = qemu_storage.QemuImg(image_stg_params, root_dir, image_stg) + if trace_event: + check_fallocate_syscall(trace_event) + else: + img_stg.create(image_stg_params) + check_actual_size_field() diff --git a/qemu/tests/image_creation_luks_with_non_utf8_secret.py b/qemu/tests/image_creation_luks_with_non_utf8_secret.py new file mode 100755 index 0000000000000000000000000000000000000000..0b22bb8df0d81804c41298b6293bf512411b8d6d --- /dev/null +++ b/qemu/tests/image_creation_luks_with_non_utf8_secret.py @@ -0,0 +1,41 @@ +import os +import re + +from avocado.utils import process + +from virttest import data_dir +from virttest import utils_misc + + +def run(test, params, env): + """ + Negative test. + Luks image creation with non_utf8_secret: + 1. It should be failed to create the image. + 2. The error information should be corret. + e.g. Data from secret sec0 is not valid UTF-8 + + :param test: Qemu test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + image_stg_name = params["image_name_stg"] + root_dir = data_dir.get_data_dir() + image_stg_path = utils_misc.get_path(root_dir, image_stg_name) + if os.path.exists(image_stg_path): + os.remove(image_stg_path) + err_info = params["err_info"] + tmp_dir = data_dir.get_tmp_dir() + non_utf8_secret_file = os.path.join(tmp_dir, "non_utf8_secret") + non_utf8_secret = params["echo_non_utf8_secret_cmd"] % non_utf8_secret_file + process.run(non_utf8_secret, shell=True) + qemu_img_create_cmd = params["qemu_img_create_cmd"] % (non_utf8_secret_file, + image_stg_path) + cmd_result = process.run(qemu_img_create_cmd, + ignore_status=True, shell=True) + if os.path.exists(image_stg_path): + test.fail("The image '%s' should not exist. Since created" + " it with non_utf8_secret." % image_stg_path) + if not re.search(err_info, cmd_result.stderr.decode(), re.I): + test.fail("Failed to get error information. The actual error " + "information is %s." % cmd_result.stderr.decode()) diff --git a/qemu/tests/insert_media.py b/qemu/tests/insert_media.py new file mode 100644 index 0000000000000000000000000000000000000000..7da4f88c7432de27a122f7bd2486359fbaf7606b --- /dev/null +++ b/qemu/tests/insert_media.py @@ -0,0 +1,57 @@ +from virttest import env_process +from virttest import utils_misc + +from virttest.qemu_capabilities import Flags +from virttest.qemu_devices import qdevices + + +def run(test, params, env): + """ + Steps: + 1. Boot guest with scsi-cd without file, not dummy image. + 2. Add drive layer and insert media. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def move_tary(action, dev_id): + getattr(vm.monitor, 'blockdev_%s_tray' % action)(dev_id) + if not utils_misc.wait_for( + lambda: vm.monitor.get_event(tray_move_event), 60, 0, 3): + test.fail('Failed to get event %s after %s tray.' % + (tray_move_event, action)) + + tray_move_event = params.get('tray_move_event') + dev_id = params.get('cdroms').split()[0] + params["start_vm"] = "yes" + vm_name = params.get('main_vm') + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + + if not vm.check_capability(Flags.BLOCKDEV): + test.cancel("Unsupported the insertion media.") + vm.verify_alive() + + drive = vm.devices[dev_id] + format_node = vm.devices[drive.get_param('drive')] + nodes = [format_node] + nodes.extend((n for n in format_node.get_child_nodes())) + for node in nodes: + vm.devices.remove(node, True if isinstance( + node, qdevices.QBlockdevFormatNode) else False) + if not isinstance(node, qdevices.QBlockdevFormatNode): + format_node.del_child_node(node) + drive.set_param('drive', None) + + vm.destroy(False) + vm = vm.clone(copy_state=True) + vm.create() + + move_tary('open', dev_id) + vm.monitor.blockdev_remove_medium(dev_id) + for node in reversed(nodes): + vm.devices.simple_hotplug(node, vm.monitor) + vm.monitor.blockdev_insert_medium(dev_id, format_node.get_qid()) + move_tary('close', dev_id) + vm.destroy() diff --git a/qemu/tests/interrupt_check.py b/qemu/tests/interrupt_check.py new file mode 100644 index 0000000000000000000000000000000000000000..4fe9b45823dfccc5e198609b286ac2ea9f0a7810 --- /dev/null +++ b/qemu/tests/interrupt_check.py @@ -0,0 +1,124 @@ +import time +import logging + +from virttest import error_context +from virttest import utils_net +from virttest import utils_disk +from virttest import utils_misc + + +@error_context.context_aware +def run(test, params, env): + """ + Check number of interrupt after do some test. + 1) Launch a guest + 2) Check number of interrupts with specified pattern + 3) Do sub test on guest + 4) Recheck number of interrupts + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def get_irq_info(): + """ + Get interrupt information using specified pattern + """ + return session.cmd_output("grep '%s' /proc/interrupts" % irq_pattern, + print_func=logging.info).split() + + def analyze_interrupts(irq_before_test, irq_after_test): + """ + Compare interrupt information and analyze them + """ + error_context.context("Analyzing interrupts", logging.info) + filtered_result = [x for x in zip(irq_before_test, irq_after_test) + if x[0] != x[1]] + if not filtered_result: + test.fail("Number of interrupts on the CPUs have not changed after" + " test execution") + elif any([int(x[1]) < int(x[0]) for x in filtered_result]): + test.fail("The number of interrupts has decreased") + + def dd_test(): + """ + dd test to increase the number of interrupts + """ + vm_disks = utils_disk.get_linux_disks(session) + extra_disk = list(vm_disks.keys())[0] if vm_disks else None + if not extra_disk: + test.error("No additional disks found") + + error_context.context("Execute dd write test", logging.info) + session.cmd(params["dd_write"] % extra_disk) + irq_info_after_dd_write = get_irq_info() + analyze_interrupts(irq_info_before_test, irq_info_after_dd_write) + + error_context.context("Execute dd read test", logging.info) + session.cmd(params["dd_read"] % extra_disk) + irq_info_after_dd_read = get_irq_info() + analyze_interrupts(irq_info_after_dd_write, irq_info_after_dd_read) + + def ping_test(): + """ + ping test to increase the number of interrupts + """ + error_context.context("Execute ping test", logging.info) + utils_net.ping(guest_ip, 10, session=session) + irq_info_after_ping = get_irq_info() + analyze_interrupts(irq_info_before_test, irq_info_after_ping) + + def hotplug_test(): + """ + hotplug test to increase the number of interrupts + """ + current_cpu = vm.get_cpu_count() + vcpu_devices = params.objects("vcpu_devices") + error_context.context("Execute hotplug CPU test", logging.info) + for vcpu_device in vcpu_devices: + vm.hotplug_vcpu_device(vcpu_device) + if not utils_misc.wait_for( + lambda: vm.get_cpu_count() == current_cpu + len(vcpu_devices), + 30): + test.fail("Actual number of guest CPUs is not equal to expected") + guest_cpus = vm.get_cpu_count() + irq_info_after_hotplug = get_irq_info() + if (len(irq_info_after_hotplug) != (len(irq_info_before_test) + + len(vcpu_devices))): + test.fail("Number of CPUs for %s is incorrect" % irq_pattern) + + irq_num_before_hotplug = irq_info_before_test[1: (current_cpu+1)] + irq_num_after_hotplug = irq_info_after_hotplug[1: (guest_cpus+1)] + if (sum(map(int, irq_num_after_hotplug)) <= + sum(map(int, irq_num_before_hotplug))): + test.fail("Abnormal number of interrupts") + + def standby_test(): + """ + Guest standby and then check number of interrupts again + """ + time.sleep(params.get_numeric("standby_time")) + irq_info_after_standby = get_irq_info() + analyze_interrupts(irq_info_before_test, irq_info_after_standby) + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + guest_ip = vm.get_address() + guest_ifname = utils_net.get_linux_ifname(session, vm.get_mac_address(0)) + irq_pattern = params["irq_pattern"].format(ifname=guest_ifname) + test_execution = {"dd": dd_test, "ping": ping_test, + "hotplug": hotplug_test, "standby": standby_test} + + error_context.base_context("Get interrupt info before executing test", + logging.info) + irq_info_before_test = get_irq_info() + + error_context.context("Execute test to verify increased interrupts") + try: + test_execution[params["increase_test"]]() + logging.info("The number of interrupts increased correctly") + finally: + session.close() + vm.destroy() diff --git a/qemu/tests/ioeventfd.py b/qemu/tests/ioeventfd.py index 40445aec42f6a8e853409c92cfc31f66f6eea2d1..94d9c9eeb76ffac775d9b0ea0aa151416a231066 100644 --- a/qemu/tests/ioeventfd.py +++ b/qemu/tests/ioeventfd.py @@ -175,8 +175,20 @@ def run(test, params, env): ioeventfds = (params['orig_ioeventfd'], params['new_ioeventfd']) for ioeventfd in ioeventfds: dev_id = _set_ioeventfd_options() + # Disable iothread when ioeventfd=off + if ioeventfd == "ioeventfd=off" and params.get( + "iothread_scheme"): + error_context.context("Disable iothread under %s" % ioeventfd, + logging.info) + clone_params = params.copy() + clone_params["iothread_scheme"] = None + clone_params["image_iothread"] = None + clone_params["iothreads"] = "" + else: + clone_params = params + error_context.context('Boot a guest with "%s".' % ioeventfd, logging.info) - env_process.preprocess_vm(test, params, env, params["main_vm"]) + env_process.preprocess_vm(test, clone_params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) diff --git a/qemu/tests/kernbench.py b/qemu/tests/kernbench.py index 7f62f4706b5cab218b47ff7cd459a4b4181c9f61..bbe86a5c56eb83e05adcb478f747a622e0121ced 100644 --- a/qemu/tests/kernbench.py +++ b/qemu/tests/kernbench.py @@ -127,7 +127,7 @@ def run(test, params, env): if "guest" in test_type: cpu_num = params.get("smp") else: - cpu_num = cpu.online_cpus_count() + cpu_num = cpu.online_count() test_cmd = params.get("test_cmd") % (int(cpu_num) * cpu_multiplier) logging.info("Start making the kernel ....") (s, o) = cmd_status_output(test_cmd, timeout=cmd_timeout) diff --git a/qemu/tests/kvm_unit_test_nested.py b/qemu/tests/kvm_unit_test_nested.py new file mode 100644 index 0000000000000000000000000000000000000000..c85af0dfd05bb936036d49b41e76900feb1d29d3 --- /dev/null +++ b/qemu/tests/kvm_unit_test_nested.py @@ -0,0 +1,38 @@ +import logging +import os + +from avocado.utils import process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Run nested relstead tests in kvm-unit-test test suite + + 1) Start multiple(4) L1 guest vms + 2) Clone kvm-unit-tests test suite from repo + 3) Compile test suite + 4) Run vmx/svm test suite with multiple running vms on host + """ + + vms = env.get_all_vms() + for vm in vms: + vm.verify_alive() + + kvm_unit_test_dir = os.path.join(test.logdir, "kvm_unit_tests/") + logging.info("kvm_unit_test_dir: %s", kvm_unit_test_dir) + clone_cmd = params["clone_cmd"] % kvm_unit_test_dir + process.system(clone_cmd) + compile_cmd = params["compile_cmd"] % kvm_unit_test_dir + process.system(compile_cmd, shell=True) + + error_context.context("Run kvm_unit_tests on host", logging.info) + timeout = params.get_numeric("kvm_unit_test_timeout", 60) + run_cmd = params["test_cmd"] % kvm_unit_test_dir + logging.info("Run command %s ", run_cmd) + status, output = process.getstatusoutput(run_cmd, timeout) + + if output: + test.fail("kvm_unit_tests failed, status: %s, output: %s" % + (status, output)) diff --git a/qemu/tests/macvtap_guest_communicate.py b/qemu/tests/macvtap_guest_communicate.py new file mode 100644 index 0000000000000000000000000000000000000000..38818ccbb18eeac0620755cf48b1d2c7b839ce67 --- /dev/null +++ b/qemu/tests/macvtap_guest_communicate.py @@ -0,0 +1,112 @@ +import logging +import os + +from virttest import error_context +from virttest import data_dir +from virttest import utils_net +from virttest import utils_misc +from virttest import utils_netperf + + +@error_context.context_aware +def run(test, params, env): + """ + Test Step: + 1. Boot up two guest with vnic over macvtap, mode vepa, and vhost=on + 2. Ping from guest1 to guest2 for 30 counts + 3. Run netperf stress test between two guest + Params: + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def ping_test(): + #Ping from guest1 to guest2 for 30 counts + status, output = utils_net.ping(dest=addresses[1], count=30, + timeout=60, + session=sessions[0]) + if status: + test.fail("ping %s unexpected, output %s" % (vms[1], output)) + + def netperf_test(): + """ + Netperf stress test between two guest. + """ + n_client = utils_netperf.NetperfClient( + addresses[0], params["client_path"], + netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), + params.get("netperf_client_link")), + client=params.get("shell_client"), + port=params.get("shell_port"), + prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), + username=params.get("username"), + password=params.get("password"), + linesep=params.get("shell_linesep", "\n").encode().decode( + 'unicode_escape'), + status_test_command=params.get("status_test_command", ""), + compile_option=params.get("compile_option_client", "")) + + n_server = utils_netperf.NetperfServer( + addresses[1], + params["server_path"], + netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), + params.get("netperf_server_link")), + username=params.get("username"), + password=params.get("password"), + client=params.get("shell_client"), + port=params.get("shell_port"), + prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), + linesep=params.get("shell_linesep", "\n").encode().decode( + 'unicode_escape'), + status_test_command=params.get("status_test_command", "echo $?"), + compile_option=params.get("compile_option_server", "")) + + try: + n_server.start() + # Run netperf with message size defined in range. + netperf_test_duration = params.get_numeric("netperf_test_duration") + test_protocols = params.get("test_protocols", "TCP_STREAM") + netperf_output_unit = params.get("netperf_output_unit") + test_option = params.get("test_option", "") + test_option += " -l %s" % netperf_test_duration + if netperf_output_unit in "GMKgmk": + test_option += " -f %s" % netperf_output_unit + t_option = "%s -t %s" % (test_option, test_protocols) + n_client.bg_start(addresses[1], + t_option, + params.get_numeric("netperf_para_sessions"), + params.get("netperf_cmd_prefix", ""), + package_sizes=params.get("netperf_sizes")) + if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 1, + "Wait netperf test start"): + logging.info("Netperf test start successfully.") + else: + test.error("Can not start netperf client.") + utils_misc.wait_for( + lambda: not n_client.is_netperf_running(), + netperf_test_duration, 0, 5, + "Wait netperf test finish %ss" % netperf_test_duration) + finally: + n_server.stop() + n_server.cleanup(True) + n_client.cleanup(True) + + login_timeout = params.get_numeric("login_timeout", 360) + sessions = [] + addresses = [] + vms = [] + error_context.context("Init boot the vms") + for vm_name in params.objects("vms"): + vm = env.get_vm(vm_name) + vms.append(vm) + vm.verify_alive() + sessions.append(vm.wait_for_login(timeout=login_timeout)) + addresses.append(vm.get_address()) + + try: + ping_test() + netperf_test() + finally: + for session in sessions: + if session: + session.close() diff --git a/qemu/tests/memhp_threads.py b/qemu/tests/memhp_threads.py new file mode 100644 index 0000000000000000000000000000000000000000..81e250b2c621af69d0fab02236e4495186653587 --- /dev/null +++ b/qemu/tests/memhp_threads.py @@ -0,0 +1,75 @@ +import time +import logging + +from avocado.utils import process +from virttest import error_context +from virttest import utils_test +from virttest.qemu_devices.qdevices import Memory +from virttest.utils_test.qemu import MemoryHotplugTest + + +@error_context.context_aware +def run(test, params, env): + """ + prealloc-threads test: + 1) Boot guest in paused status + 2) Get qemu threads number + 3) Hotplug memory backend with a large size and option prealloc-threads + 4) Get qemu threads number during step 3 + 5) Check if qemu threads number in step 4 is expected, if not, fail test + 6) Otherwise, hotplug pc-dimm device + 7) Resume vm + 8) Check guest memory + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def get_qemu_threads(cmd, timeout=60): + """ + Get qemu threads when it's stable + """ + threads = 0 + start_time = time.time() + end_time = time.time() + float(timeout) + while time.time() < end_time: + cur_threads = int(process.system_output(cmd, shell=True)) + if cur_threads != threads: + threads = cur_threads + time.sleep(1) + else: + return threads + test.error("Can't get stable qemu threads number in %ss." % timeout) + + vm = env.get_vm(params["main_vm"]) + logging.info("Get qemu threads number at beginning") + get_threads_cmd = params["get_threads_cmd"] % vm.get_pid() + pre_threads = get_qemu_threads(get_threads_cmd) + mem = params.get("target_mems") + new_params = params.object_params(mem).object_params("mem") + attrs = Memory.__attributes__[new_params["backend"]][:] + new_params = new_params.copy_from_keys(attrs) + dev = Memory(new_params["backend"], new_params) + dev.set_param("id", "%s-%s" % ("mem", mem)) + args = [vm.monitor] + bg = utils_test.BackgroundTest(dev.hotplug, args) + logging.info("Hotplug memory backend '%s' to guest" % dev["id"]) + bg.start() + threads_num = int(new_params["prealloc-threads"]) + logging.info("Get qemu threads number again") + post_threads = get_qemu_threads(get_threads_cmd) + if post_threads - pre_threads != threads_num: + test.fail("QEMU threads number is not right, pre is %s, post is %s" + % (pre_threads, post_threads)) + bg.join() + memhp_test = MemoryHotplugTest(test, params, env) + memhp_test.update_vm_after_hotplug(vm, dev) + dimm = vm.devices.dimm_device_define_by_params(params.object_params(mem), + mem) + dimm.set_param("memdev", dev["id"]) + logging.info("Hotplug pc-dimm '%s' to guest" % dimm["id"]) + vm.devices.simple_hotplug(dimm, vm.monitor) + memhp_test.update_vm_after_hotplug(vm, dimm) + logging.info("Resume vm and check memory inside guest") + vm.resume() + memhp_test.check_memory(vm) diff --git a/qemu/tests/microcode_test.py b/qemu/tests/microcode_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9082d779d6120ad8d4b049c664fbef658ce8cfb7 --- /dev/null +++ b/qemu/tests/microcode_test.py @@ -0,0 +1,47 @@ +import re +import logging + +from avocado.utils import process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Microcode test: + 1) Get microcode version on host + 2) Boot guest with '-cpu host' + 3) Check if microcode version inside guest match host + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def get_microcode_ver(cmd, session=None): + """ + Get microcde version in guest or host + """ + if session: + output = session.cmd_output(cmd) + else: + output = process.getoutput(cmd, shell=True) + ver = re.findall(r":\s*(0x[0-9A-Fa-f]+)", output)[0] + return ver + + cmd = params["get_microcode_cmd"] + error_context.context("Get microcode version on host", logging.info) + host_ver = get_microcode_ver(cmd) + logging.info("The microcode version on host is %s", host_ver) + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + try: + error_context.context("Get microcode version in guest", logging.info) + guest_ver = get_microcode_ver(cmd) + logging.info("The microcode version in guest is %s", guest_ver) + if guest_ver != host_ver: + test.fail("The microcode version in guest does not match host") + finally: + if session: + session.close() diff --git a/qemu/tests/migration.py b/qemu/tests/migration.py index 693ade0ab4ccf6f52f87d434607903c3f6dca874..3856172902421c880993b3db0dc83c5a21e6058f 100644 --- a/qemu/tests/migration.py +++ b/qemu/tests/migration.py @@ -199,11 +199,6 @@ def run(test, params, env): test.error("migration bg check command failed") session2.close() - # run some functions before migrate start. - pre_migrate = get_functions(params.get("pre_migrate"), globals()) - for func in pre_migrate: - func(vm, params, test) - # Start stress test in guest. guest_stress_test = params.get("guest_stress_test") if guest_stress_test: @@ -219,10 +214,14 @@ def run(test, params, env): target_mig_parameters = params.get("target_migrate_parameters", "None") target_mig_parameters = ast.literal_eval(target_mig_parameters) migrate_parameters = (mig_parameters, target_mig_parameters) + pre_migrate = get_functions(params.get("pre_migrate"), globals()) # Migrate the VM ping_pong = params.get("ping_pong", 1) for i in range(int(ping_pong)): + # run some functions before migrate start + for func in pre_migrate: + func(vm, params, test) if i % 2 == 0: logging.info("Round %s ping..." % str(i / 2)) else: diff --git a/qemu/tests/migration_with_netperf.py b/qemu/tests/migration_with_netperf.py index 96f119158d000a59cfdfdbd991fa32ff468aab5e..07153b397cc3acb31c5709b26b21a6ee8f68b5a2 100644 --- a/qemu/tests/migration_with_netperf.py +++ b/qemu/tests/migration_with_netperf.py @@ -1,8 +1,8 @@ import logging +import os from virttest import error_context from virttest import utils_netperf -from virttest import utils_misc from virttest import data_dir from virttest import utils_net @@ -34,97 +34,73 @@ def run(test, params, env): guest_address = vm.get_address() host_address = utils_net.get_host_ip_address(params) remote_ip = params.get("remote_host", host_address) - netperf_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), - params.get("netperf_link")) - md5sum = params.get("pkg_md5sum") + netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), + params.get("netperf_link")) netperf_server_link = params.get("netperf_server_link_win") if netperf_server_link: - netperf_server_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), - netperf_server_link) - server_md5sum_win = params.get("server_md5sum") + netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), + netperf_server_link) netperf_client_link = params.get("netperf_client_link_win", netperf_link) - client_md5sum_win = params.get("client_md5sum", md5sum) - netperf_client_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), - netperf_client_link) + netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), + netperf_client_link) server_path = params.get("server_path", "/var/tmp/") client_path = params.get("client_path", "/var/tmp/") server_path_win = params.get("server_path_win") client_path_win = params.get("client_path_win") - username = params.get("username", "root") - password = params.get("password", "redhat") - passwd = params.get("hostpassword", "redhat") - client = params.get("shell_client", "ssh") - port = params.get("shell_port", "22") - prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") - linesep = params.get( - "shell_linesep", "\n").encode().decode('unicode_escape') - status_test_command = params.get("status_test_command", "echo $?") - compile_option_client_h = params.get("compile_option_client_h", "") - compile_option_server_h = params.get("compile_option_server_h", "") - compile_option_client_g = params.get("compile_option_client_g", "") - compile_option_server_g = params.get("compile_option_server_g", "") if params.get("os_type") == "linux": session.cmd("iptables -F", ignore_all_errors=True) g_client_link = netperf_link g_server_link = netperf_link g_server_path = server_path g_client_path = client_path - g_server_md5sum = md5sum - g_client_md5sum = md5sum elif params.get("os_type") == "windows": g_client_link = netperf_client_link g_server_link = netperf_server_link g_server_path = server_path_win g_client_path = client_path_win - g_server_md5sum = server_md5sum_win - g_client_md5sum = client_md5sum_win + netperf_client_g = None netperf_client_h = None netperf_server_g = None netperf_server_h = None try: - netperf_client_g = utils_netperf.NetperfClient(guest_address, - g_client_path, - g_client_md5sum, - g_client_link, - client=client, - port=port, - username=username, - password=password, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - compile_option=compile_option_client_g) - netperf_server_h = utils_netperf.NetperfServer(remote_ip, - server_path, - md5sum, - netperf_link, - password=passwd, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - install=False, - compile_option=compile_option_server_h) - netperf_client_h = utils_netperf.NetperfClient(remote_ip, client_path, - md5sum, netperf_link, - password=passwd, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - compile_option=compile_option_client_h) - netperf_server_g = utils_netperf.NetperfServer(guest_address, - g_server_path, - g_server_md5sum, - g_server_link, - client=client, - port=port, - username=username, - password=password, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - compile_option=compile_option_server_g) + netperf_client_g = utils_netperf.NetperfClient( + guest_address, g_client_path, + netperf_source=g_client_link, + client=params.get("shell_client"), + port=params.get("shell_port"), + prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), + username=params.get("username"), + password=params.get("password"), + linesep=params.get("shell_linesep", "\n").encode().decode( + 'unicode_escape'), + status_test_command=params.get("status_test_command", ""), + compile_option=params.get("compile_option_client_g", "")) + netperf_server_h = utils_netperf.NetperfServer( + remote_ip, + server_path, + netperf_source=netperf_link, + password=params.get("hostpassword"), + compile_option=params.get("compile_option", "")) + netperf_client_h = utils_netperf.NetperfClient( + remote_ip, client_path, + netperf_source=netperf_link, + password=params.get("hostpassword"), + compile_option=params.get("compile_option", "")) + netperf_server_g = utils_netperf.NetperfServer( + guest_address, + g_server_path, + netperf_source=g_server_link, + username=params.get("username"), + password=params.get("password"), + client=params.get("shell_client"), + port=params.get("shell_port"), + prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), + linesep=params.get("shell_linesep", "\n").encode().decode( + 'unicode_escape'), + status_test_command=params.get("status_test_command", "echo $?"), + compile_option=params.get("compile_option_server_g", "")) error_context.base_context("Run netperf test between host and guest") error_context.context("Start netserver in guest.", logging.info) netperf_server_g.start() diff --git a/qemu/tests/migration_with_numa.py b/qemu/tests/migration_with_numa.py new file mode 100644 index 0000000000000000000000000000000000000000..c2123d56e5a106c58804a19de6d5a341c1eb869b --- /dev/null +++ b/qemu/tests/migration_with_numa.py @@ -0,0 +1,114 @@ +import re +import logging + +from avocado.utils import process +from virttest import env_process +from virttest import error_context +from virttest.utils_misc import get_mem_info +from virttest.utils_misc import normalize_data_size +from virttest.utils_misc import NumaInfo + + +@error_context.context_aware +def run(test, params, env): + """ + Bind guest node0 and node1 to 2 host nodes, do migration test + + 1. Boot src guest with 2 numa node and all bind to 2 host numa nodes + 2. Migration + 3. Check the numa memory size in guest, linux guest only + 4. Check the numa memory policy in dest host + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment + """ + + def get_nodes_size(size_type='MemTotal', session=None): + """ + Get the node size of each node in host/guest, descending sort with size + + :param size_type: the type of the node size + :param session: ShellSession object + + :return: a list of tuple include node id and node size(M) + :rtype: list + """ + numa_info = NumaInfo(session=session) + nodes_size = {} + numa_nodes = numa_info.online_nodes + for node in numa_nodes: + node_size = numa_info.online_nodes_meminfo[node][size_type] + nodes_size[node] = float(normalize_data_size('%s KB' % node_size)) + nodes_size = sorted(nodes_size.items(), key=lambda item: item[1], reverse=True) + return nodes_size + + host_nodes_size = get_nodes_size(size_type='MemFree') + mem_devs = params.objects('mem_devs') + if len(host_nodes_size) < len(mem_devs): + test.cancel("Host do not have enough nodes for testing!") + for mem_dev in mem_devs: + size_mem = params.object_params(mem_dev).get('size_mem') + size_mem = float(normalize_data_size(size_mem)) + if host_nodes_size[0][1] >= size_mem: + params['host-nodes_mem_%s' % mem_dev] = str(host_nodes_size[0][0]) + del host_nodes_size[0] + else: + test.cancel("host nodes do not have enough memory for testing!") + + params['start_vm'] = 'yes' + env_process.preprocess(test, params, env) + vm = env.get_vm(params["main_vm"]) + session = vm.wait_for_login() + # do migration + mig_timeout = float(params.get("mig_timeout", "3600")) + mig_protocol = params.get("migration_protocol", "tcp") + vm.migrate(mig_timeout, mig_protocol, env=env) + session = vm.wait_for_login() + + os_type = params["os_type"] + if os_type == 'linux': + error_context.context("Check the numa memory size in guest", logging.info) + # Use 30 plus the gap of 'MemTotal' in OS and '-m' in cli as threshold + mem_total = get_mem_info(session, 'MemTotal') + mem_total = float(normalize_data_size('%s KB' % mem_total)) + error_context.context("MemTotal in guest os is %s MB" + % mem_total, logging.info) + threshold = float(params.get_numeric("mem") - mem_total) + 30 + error_context.context("The acceptable threshold is: %s" + % threshold, logging.info) + guest_nodes_size = get_nodes_size(size_type='MemTotal', session=session) + guest_nodes_size = dict(guest_nodes_size) + for nodenr, node in enumerate(params.objects('guest_numa_nodes')): + mdev = params.get("numa_memdev_node%d" % nodenr) + if mdev: + mdev = mdev.split('-')[1] + size = float(normalize_data_size(params.get("size_mem_%s" % mdev))) + if abs(size - guest_nodes_size[nodenr]) > threshold: + test.fail("[Guest]Wrong size of numa node %d: %f. Expected:" + " %s" % (nodenr, guest_nodes_size[nodenr], size)) + + error_context.context("Check the numa memory policy in dest host", logging.info) + qemu_pid = vm.get_pid() + for mem_dev in mem_devs: + memdev_params = params.object_params(mem_dev) + size_mem = memdev_params.get('size_mem') + size_mem = int(float(normalize_data_size(size_mem, 'K'))) + smaps = process.getoutput("grep -E -B1 '^Size: *%d' /proc/%d/smaps" + % (size_mem, qemu_pid)) + mem_start_pattern = r'(\w+)-\w+\s+\w+-\w+\s+\w+\s+\w+:\w+\s\w+\s+\n'\ + r'Size:\s+%d' % size_mem + match = re.search(mem_start_pattern, smaps) + if not match: + test.error("Failed to get the mem start address in smaps: %s" % smaps) + mem_start = match.groups()[0] + numa_maps = process.getoutput("grep %s /proc/%d/numa_maps" + % (mem_start, qemu_pid)) + node_match = re.search(r'bind:(\d+)', numa_maps) + if not node_match: + test.fail("Failed to get the bind node in numa_maps: %s" % numa_maps) + bind_node = node_match.groups()[0] + expected_node = memdev_params.get('host-nodes_mem') + if bind_node != expected_node: + test.fail("Host node for memdev %s in numa_maps is %s, while the " + "expected is:%s" % (mem_dev, bind_node, expected_node)) diff --git a/qemu/tests/migration_with_speed_measurement.py b/qemu/tests/migration_with_speed_measurement.py index 8c5e47074626e49ca82a69e48c2446dd4d113334..e0700d70d67a813c55588c6b920213a051ebb774 100644 --- a/qemu/tests/migration_with_speed_measurement.py +++ b/qemu/tests/migration_with_speed_measurement.py @@ -87,6 +87,8 @@ def run(test, params, env): last_transfer_mem = 0 transfered_mem = 0 mig_stat = Statistic() + while vm.monitor.get_migrate_progress() == 0: + pass for _ in range(30): o = vm.monitor.info("migrate") warning_msg = ("Migration already ended. Migration speed is" @@ -127,7 +129,7 @@ def run(test, params, env): vm.monitor.migrate_set_speed(mig_speed) cmd = ("%s/cpuflags-test --stressmem %d,%d" % - (os.path.join(install_path, "cpu_flags"), + (os.path.join(install_path, "cpu_flags", "src"), vm_mem * 4, vm_mem / 2)) logging.debug("Sending command: %s" % (cmd)) session.sendline(cmd) @@ -137,7 +139,8 @@ def run(test, params, env): clonevm = vm.migrate(mig_timeout, mig_protocol, not_wait_for_migration=True, env=env) - mig_speed = int(utils_misc.normalize_data_size(mig_speed, "M")) + mig_speed = int(float( + utils_misc.normalize_data_size(mig_speed, "M"))) mig_stat = get_migration_statistic(vm) diff --git a/qemu/tests/mmu_basic.py b/qemu/tests/mmu_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..dd20eff1e115b39aeeb10d74b7323e37bc033b78 --- /dev/null +++ b/qemu/tests/mmu_basic.py @@ -0,0 +1,55 @@ +import logging +import re + +from virttest import error_context +from virttest import utils_test + + +@error_context.context_aware +def run(test, params, env): + """ + Test to disabling radix MMU mode on guest. + Steps: + 1) There are two options, boot up a native(radix)guest or HPT guest. + 2) Check the MMU mode in the guest. + 3) Adding disable radix to guest's kernel line directly then reboot guest. + 4) Check again the MMU mode in the guest. + 5) Check guest call trace in dmesg log. + + + :params test: QEMU test object. + :params params: Dictionary with the test parameters. + :params env: Dictionary with test environment. + """ + + def cpu_info_match(pattern): + match = re.search(pattern, session.cmd_output("cat /proc/cpuinfo")) + return True if match else False + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + + error_context.context("Check the MMU mode.", logging.info) + if cpu_info_match('MMU'): + if cpu_info_match('POWER9'): + if cpu_info_match('Radix') is False: + test.fail("mmu mode is not Radix, doesn't meet expectations.") + else: + if cpu_info_match('Hash') is False: + test.fail("mmu mode is not Hash, doesn't meet expectations.") + else: + if params["mmu_option"] == 'yes': + test.fail("There should be MMU mode.") + utils_test.update_boot_option(vm, args_added="disable_radix") + session = vm.wait_for_login() + + error_context.context("Check the MMU mode.", logging.info) + if cpu_info_match('MMU'): + if cpu_info_match('Hash') is False: + test.fail("mmu mode is not Hash, mmu mode disabled failure.") + else: + if params["mmu_option"] == 'yes': + test.fail("There should be MMU mode.") + + vm.verify_dmesg() diff --git a/qemu/tests/msi_change_flag.py b/qemu/tests/msi_change_flag.py new file mode 100644 index 0000000000000000000000000000000000000000..48bc81b3cdc23c22f4a94c3ddd3f96a8cbc8d0f9 --- /dev/null +++ b/qemu/tests/msi_change_flag.py @@ -0,0 +1,131 @@ +import logging +import os +import re +import ctypes + +from avocado.utils import crypto, process +from virttest import utils_misc +from virttest import utils_test +from virttest import error_context +from provider import win_dev + + +@error_context.context_aware +def run(test, params, env): + """ + vhost is no longer disabled when guest does not use MSI-X. + The vhostforce flag is no longer required. + + 1) Start guest with different NIC option + 2) Check virtio device's irq number,irq number should be greater than one. + 3) Disable msi of guest + 4) Reboot guest,check if msi is disabled and irq number should be equal to 1. + 5) Check network and vhost process (transfer data). + 6) Check md5 value of both sides. + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def irq_check(session, device_name, devcon_folder): + hwid = win_dev.get_hwids(session, device_name, devcon_folder, + login_timeout)[0] + get_irq_cmd = params["get_irq_cmd"] % (devcon_folder, hwid) + irq_list = re.findall(r':\s+(\d+)', session.cmd_output(get_irq_cmd), re.M) + if not irq_list: + test.error("device %s's irq checked fail" % device_name) + return irq_list + + def get_file_md5sum(file_name, session, timeout): + """ + return: Return the md5sum value of the guest. + """ + logging.info("Get md5sum of the file:'%s'", file_name) + s, o = session.cmd_status_output("md5sum %s" % file_name, + timeout=timeout) + if s != 0: + test.error("Get file md5sum failed as %s" % o) + return re.findall(r"\w{32}", o)[0] + + tmp_dir = params["tmp_dir"] + filesize = int(params.get("filesize")) + dd_cmd = params["dd_cmd"] + delete_cmd = params["delete_cmd"] + file_md5_check_timeout = int(params.get("file_md5_check_timeout")) + login_timeout = int(params.get("login_timeout", 360)) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_serial_login() + + if params.get("os_type") == "linux": + error_context.context("Check the pci msi in guest", logging.info) + pci_id = session.cmd("lspci |grep Eth |awk {'print $1'}").strip() + status = session.cmd("lspci -vvv -s %s|grep MSI-X" % pci_id).strip() + enable_status = re.search(r'Enable\+', status, re.M | re.I) + if enable_status.group() == "Enable+": + error_context.context("Disable pci msi in guest", logging.info) + utils_test.update_boot_option(vm, args_added="pci=nomsi") + session_msi = vm.wait_for_serial_login(timeout=login_timeout) + pci_id = session_msi.cmd("lspci |grep Eth |awk {'print $1'}").strip() + status = session_msi.cmd("lspci -vvv -s %s|grep MSI-X" % pci_id).strip() + session_msi.close() + change_status = re.search(r'Enable\-', status, re.M | re.I) + if change_status.group() != "Enable-": + test.fail("virtio device's statuts is not correct") + elif enable_status.group() != "Enable+": + test.fail("virtio device's statuts is not correct") + else: + driver = params.get("driver_name") + device_name = params["device_name"] + devcon_folder = utils_misc.set_winutils_letter(session, + params["devcon_folder"]) + error_context.context("Boot guest with %s device" % driver, + logging.info) + session = utils_test.qemu.windrv_check_running_verifier(session, vm, + test, driver, + login_timeout) + error_context.context("Check %s's irq number" % device_name, + logging.info) + irq_list = irq_check(session, device_name, devcon_folder) + irq_nums = len(irq_list) + if not irq_nums > 1 and\ + max(ctypes.c_int32(int(irq)).value for irq in irq_list) < 0: + test.fail("%s's irq is not correct." % device_name) + if params.get("msi_cmd"): + error_context.context("Disable MSI in guest", logging.info) + hwid_msi = win_dev.get_hwids(session, device_name, devcon_folder, + login_timeout)[0] + session.cmd(params["msi_cmd"] % (hwid_msi, 0)) + session = vm.reboot(session=session) + error_context.context("Check %s's irq number" % device_name, + logging.info) + irq_list = irq_check(session, device_name, devcon_folder) + irq_nums = len(irq_list) + if not irq_nums == 1 and \ + min(ctypes.c_int32(int(irq)).value for irq in irq_list) > 0: + test.fail("%s's irq is not correct." % device_name) + + # prepare test data + guest_path = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) + host_path = os.path.join(test.tmpdir, "tmp-%s" % + utils_misc.generate_random_string(8)) + logging.info("Test setup: Creating %dMB file on host", filesize) + process.run(dd_cmd % host_path, shell=True) + + try: + src_md5 = crypto.hash_file(host_path, algorithm="md5") + logging.info("md5 value of data from src: %s" % src_md5) + # transfer data + error_context.context("Transfer data from host to %s" % vm.name, + logging.info) + vm.copy_files_to(host_path, guest_path) + dst_md5 = get_file_md5sum(guest_path, session, + timeout=file_md5_check_timeout) + logging.info("md5 value of data in %s: %s" % (vm.name, dst_md5)) + if dst_md5 != src_md5: + test.fail("File changed after transfer host -> %s" % vm.name) + finally: + os.remove(host_path) + session.cmd(delete_cmd % guest_path, + timeout=login_timeout, ignore_all_errors=True) + session.close() diff --git a/qemu/tests/multi_disk.py b/qemu/tests/multi_disk.py index b81ea3bb317036ee078b967feada056568206128..f9f9344125357bea95cf6cf64358cbf146d3b8e8 100644 --- a/qemu/tests/multi_disk.py +++ b/qemu/tests/multi_disk.py @@ -201,8 +201,9 @@ def run(test, params, env): error_context.context("Start the guest with those disks", logging.info) vm = env.get_vm(params["main_vm"]) - vm.create(timeout=max(10, stg_image_num), params=params) login_timeout = int(params.get("login_timeout", 360)) + create_timeout = int(params.get("create_timeout", 1800)) + vm.create(timeout=create_timeout, params=params) session = vm.wait_for_login(timeout=login_timeout) n_repeat = int(params.get("n_repeat", "1")) @@ -338,7 +339,7 @@ def run(test, params, env): test.fail("Fail to shut down guest.") error_context.context("Start the guest again.", logging.info) vm = env.get_vm(params["main_vm"]) - vm.create(params=params) + vm.create(timeout=create_timeout, params=params) session = vm.wait_for_login(timeout=login_timeout) error_context.context("Delete partitions in guest.", logging.info) for disk in disks: diff --git a/qemu/tests/multi_disk_random_hotplug.py b/qemu/tests/multi_disk_random_hotplug.py index 87c71476a070644e09b2471bd2d9c6cacface510..52d081a14c70ab5b406b288b77ac30b5bf667fd6 100644 --- a/qemu/tests/multi_disk_random_hotplug.py +++ b/qemu/tests/multi_disk_random_hotplug.py @@ -6,22 +6,20 @@ multi_disk_random_hotplug test for Autotest framework. import logging import random import time -import threading +import re from virttest import error_context from virttest import funcatexit from virttest import data_dir from virttest import qemu_qtree from virttest import utils_test +from virttest import utils_disk from virttest import env_process -from virttest.qemu_devices import utils from virttest.remote import LoginTimeoutError -from virttest.qemu_monitor import MonitorError -from virttest.qemu_capabilities import Flags +from virttest.qemu_monitor import Monitor - -# qdev is not thread safe so in case of dangerous ops lock this thread -LOCK = None +from provider.block_devices_plug import BlockDevicesPlug +from provider.storage_benchmark import generate_instance def stop_stresser(vm, stop_cmd): @@ -47,7 +45,7 @@ def convert_params(params, args): :note: This is only temporarily solution until qtree vs. qdev verification is available. :param params: Dictionary with the test parameters - :type param: virttest.utils_params.Params + :type params: virttest.utils_params.Params :param args: Dictionary of images_define_by_params arguments :type args: dictionary :return: Updated dictionary with the test parameters @@ -65,7 +63,17 @@ def convert_params(params, args): name = args.pop('name') params['images'] += " %s" % name params['image_name_%s' % name] = args.pop('filename') - params['image_raw_device_%s' % name] = 'yes' + params["image_size_%s" % name] = params['stg_image_size'] + params['remove_image_%s' % name] = 'yes' + params['boot_drive_%s' % name] = 'no' + if params.get('image_format_%s' % name): + params['image_format_%s' % name] = params.get('image_format_%s' % name) + else: + params['image_format_%s' % name] = params.get('image_format') + if params.get('image_iothread_%s' % name): + params['image_iothread_%s' % name] = params.get('image_iothread_%s' % name) + else: + params['image_iothread_%s' % name] = params.get('image_iothread') for key, value in args.items(): params["%s_%s" % (convert.get(key, key), name)] = value return params @@ -115,29 +123,38 @@ def run(test, params, env): test.fail("%s errors occurred while verifying" " qtree vs. params" % err) - def insert_into_qdev(qdev, param_matrix, no_disks, params, new_devices): - """ - Inserts no_disks disks int qdev using randomized args from param_matrix - :param qdev: qemu devices container - :type qdev: virttest.qemu_devices.qcontainer.DevContainer - :param param_matrix: Matrix of randomizable params - :type param_matrix: list of lists - :param no_disks: Desired number of disks - :type no_disks: integer - :param params: Dictionary with the test parameters - :type params: virttest.utils_params.Params - :return: (newly added devices, number of added disks) - :rtype: tuple(list, integer) - """ - dev_idx = 0 - _new_devs_fmt = "" - pci_bus = {'aobject': 'pci.0'} - _formats = param_matrix.pop('fmt', [params.get('drive_format')]) + def _create_params_matrix(): + matrix = {} + stg_image_name = params['stg_image_name'] + if not stg_image_name[0] == "/": + stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name) + matrix['stg_image_name'] = stg_image_name + stg_params = params.get('stg_params', '').split(' ') + for i in range(len(stg_params)): + if not stg_params[i].strip(): + continue + if stg_params[i][-1] == '\\': + stg_params[i] = '%s %s' % (stg_params[i][:-1], + stg_params.pop(i + 1)) + if not stg_params[i].strip(): + continue + (cmd, parm) = stg_params[i].split(':', 1) + # ',' separated list of values + parm = parm.split(',') + for j in range(len(parm)): + if parm[j][-1] == '\\': + parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) + matrix[cmd] = parm + return matrix + + def configure_images_params(params): + params_matrix = _create_params_matrix() + _formats = params_matrix.pop('fmt', [params.get('drive_format')]) formats = _formats[:] - if len(new_devices) == 1: - strict_mode = None - else: - strict_mode = True + usb_port_occupied = 0 + usb_max_port = params.get('usb_max_port', 6) + set_drive_bus = params.get('set_drive_bus', 'yes') == 'yes' + no_disks = int(params['stg_image_num']) i = 0 while i < no_disks: # Set the format @@ -150,8 +167,11 @@ def run(test, params, env): _formats, i) break name = 'stg%d' % i - args = {'name': name, 'filename': stg_image_name % i, 'pci_bus': pci_bus} + args = {'name': name, 'filename': params_matrix['stg_image_name'] % i} fmt = random.choice(formats) + drive_bus = None + if set_drive_bus and fmt != 'virtio': + drive_bus = str(i) if fmt == 'virtio_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'virtio-scsi-pci' @@ -161,232 +181,88 @@ def run(test, params, env): elif fmt == 'spapr_vscsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'spapr-vscsi' + elif fmt == 'usb2': + usb_port_occupied += 1 + if usb_port_occupied > int(usb_max_port): + continue + args['fmt'] = fmt else: args['fmt'] = fmt - args['imgfmt'] = params['image_format_%s' % name] if params.get( - 'image_format_%s' % name) else params['image_format'] + args['drive_bus'] = drive_bus # Other params - for key, value in param_matrix.items(): + for key, value in params_matrix.items(): args[key] = random.choice(value) - - try: - devs = qdev.images_define_by_variables(**args) - # parallel test adds devices in mixed order, force bus/addrs - qdev.insert(devs, strict_mode) - except utils.DeviceError: - for dev in devs: - if dev in qdev: - qdev.remove(dev, recursive=True) - formats.remove(fmt) - continue - - params = convert_params(params, args) - env_process.preprocess_image(test, params.object_params(name), - name) - new_devices[dev_idx].extend(devs) - dev_idx = (dev_idx + 1) % len(new_devices) - _new_devs_fmt += "%s(%s) " % (name, fmt) + env_process.preprocess_image( + test, convert_params(params, args).object_params(name), name) i += 1 - if _new_devs_fmt: - logging.info("Using disks: %s", _new_devs_fmt[:-1]) - param_matrix['fmt'] = _formats - return new_devices, params - - def _hotplug(new_devices, monitor, prefix=""): - """ - Do the actual hotplug of the new_devices using monitor monitor. - :param new_devices: List of devices which should be hotplugged - :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice - :param monitor: Monitor which should be used for hotplug - :type monitor: virttest.qemu_monitor.Monitor - """ - hotplug_outputs = [] - hotplug_sleep = float(params.get('wait_between_hotplugs', 0)) - for device in new_devices: # Hotplug all devices - time.sleep(hotplug_sleep) - hotplug_outputs.append(device.hotplug(monitor)) - time.sleep(hotplug_sleep) - failed = [] - passed = [] - unverif = [] - for device in new_devices: # Verify the hotplug status - out = hotplug_outputs.pop(0) - out = device.verify_hotplug(out, monitor) - if out is True: - passed.append(str(device)) - elif out is False: - failed.append(str(device)) - else: - unverif.append(str(device)) - if not failed and not unverif: - logging.debug("%sAll hotplugs verified (%s)", prefix, len(passed)) - elif not failed: - logging.warn("%sHotplug status:\nverified %s\nunverified %s", - prefix, passed, unverif) - else: - logging.error("%sHotplug status:\nverified %s\nunverified %s\n" - "failed %s", prefix, passed, unverif, failed) - logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) - test.fail("%sHotplug of some devices failed." % prefix) - - def hotplug_serial(new_devices, monitor): - _hotplug(new_devices[0], monitor) - - def hotplug_parallel(new_devices, monitors): - threads = [] - for i in range(len(new_devices)): - name = "Th%s: " % i - logging.debug("%sworks with %s devices", name, - [_.str_short() for _ in new_devices[i]]) - thread = threading.Thread(target=_hotplug, name=name[:-2], - args=(new_devices[i], monitors[i], name)) - thread.start() - threads.append(thread) - for thread in threads: - thread.join() - logging.debug("All threads finished.") def _postprocess_images(): # remove and check the images _disks = [] for disk in params['images'].split(' '): if disk.startswith("stg"): - env_process.postprocess_image(test, params.object_params(disk), - disk) + env_process.postprocess_image( + test, params.object_params(disk), disk) else: _disks.append(disk) params['images'] = " ".join(_disks) - def _unplug(new_devices, qdev, monitor, prefix=""): - """ - Do the actual unplug of new_devices using monitor monitor - :param new_devices: List of devices which should be hotplugged - :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice - :param qdev: qemu devices container - :type qdev: virttest.qemu_devices.qcontainer.DevContainer - :param monitor: Monitor which should be used for hotplug - :type monitor: virttest.qemu_monitor.Monitor - """ - unplug_sleep = float(params.get('wait_between_unplugs', 0)) - unplug_outs = [] - unplug_devs = [] - for device in new_devices[::-1]: # unplug all devices - if device in qdev: # Some devices are removed with previous one - time.sleep(unplug_sleep) - unplug_devs.append(device) - try: - output = device.unplug(monitor) - except MonitorError: - # In new versions of qemu, to unplug a disk, cmd - # '__com.redhat_drive_del' is not necessary; while it's - # necessary in old qemu verisons. Following update is to - # pass the error caused by using the cmd in new - # qemu versions. - if device.get_qid() not in monitor.info("block", - debug=False): - pass - else: - raise - unplug_outs.append(output) - # Remove from qdev even when unplug failed because further in - # this test we compare VM with qdev, which should be without - # these devices. We can do this because we already set the VM - # as dirty. - if LOCK: - LOCK.acquire() - qdev.remove( - device, False if vm.check_capability(Flags.BLOCKDEV) else True) - if LOCK: - LOCK.release() - time.sleep(unplug_sleep) - failed = [] - passed = [] - unverif = [] - for device in unplug_devs: # Verify unplugs - _out = unplug_outs.pop(0) - # unplug effect can be delayed as it waits for OS respone before - # it removes the device form qtree - for _ in range(50): - out = device.verify_unplug(_out, monitor) - if out is True: - break - time.sleep(0.1) - if out is True: - passed.append(str(device)) - elif out is False: - failed.append(str(device)) - else: - unverif.append(str(device)) - - if not failed and not unverif: - logging.debug("%sAll unplugs verified (%s)", prefix, len(passed)) - elif not failed: - logging.warn("%sUnplug status:\nverified %s\nunverified %s", - prefix, passed, unverif) - else: - logging.error("%sUnplug status:\nverified %s\nunverified %s\n" - "failed %s", prefix, passed, unverif, failed) - logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) - test.fail("%sUnplug of some devices failed." % prefix) - - def unplug_serial(new_devices, qdev, monitor): - _unplug(new_devices[0], qdev, monitor) - - def unplug_parallel(new_devices, qdev, monitors): - threads = [] - for i in range(len(new_devices)): - name = "Th%s: " % i - logging.debug("%sworks with %s devices", name, - [_.str_short() for _ in new_devices[i]]) - thread = threading.Thread(target=_unplug, - args=(new_devices[i], qdev, monitors[i])) - thread.start() - threads.append(thread) - for thread in threads: - thread.join() - logging.debug("All threads finished.") - def verify_qtree_unsupported(params, info_qtree, info_block, qdev): return logging.warn("info qtree not supported. Can't verify qtree vs. " "guest disks.") + def enable_driver_verifier(driver, timeout=300): + return utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver, timeout) + + def _initial_win_drives(): + size = params['stg_image_size'] + disks = utils_disk.get_windows_disks_index(session, size) + if not utils_disk.update_windows_disk_attributes(session, disks): + test.fail("Failed to update windows disk attributes.") + for disk in disks[1:24]: + yield utils_disk.configure_empty_windows_disk(session, disk, size)[0] + + def run_stress_iozone(): + error_context.context("Run iozone stress after hotplug", logging.info) + iozone = generate_instance(params, vm, 'iozone') + try: + iozone_cmd_option = params['iozone_cmd_option'] + iozone_timeout = float(params['iozone_timeout']) + for letter in _initial_win_drives(): + iozone.run(iozone_cmd_option.format(letter), iozone_timeout) + finally: + iozone.clean() + + def run_stress_dd(): + error_context.context("Run dd stress after hotplug", logging.info) + output = session.cmd_output(params.get("get_dev_cmd", "ls /dev/[svh]d*")) + system_dev = re.findall(r"/dev/[svh]d\w+(?=\d+)", output)[0] + for dev in re.split(r"\s+", output): + if not dev: + continue + if not re.findall(system_dev, dev): + session.cmd(params['dd_cmd'].format(dev), + int(params['dd_timeout'])) + + Monitor.CONNECT_TIMEOUT = params.get_numeric('connect_timeout', 60) + BlockDevicesPlug.ACQUIRE_LOCK_TIMEOUT = params.get_numeric( + 'acquire_lock_timeout', 20) + BlockDevicesPlug.VERIFY_UNPLUG_TIMEOUT = params.get_numeric( + 'verify_unplug_timeout', 60) + + configure_images_params(params) + params['start_vm'] = 'yes' + env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params['main_vm']) - qdev = vm.devices session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) + is_windows = params['os_type'] == 'windows' + if is_windows: + session = enable_driver_verifier(params['driver_name']) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = verify_qtree_unsupported - stg_image_name = params['stg_image_name'] - if not stg_image_name[0] == "/": - stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name) - stg_image_num = int(params['stg_image_num']) - stg_params = params.get('stg_params', '').split(' ') - i = 0 - while i < len(stg_params) - 1: - if not stg_params[i].strip(): - i += 1 - continue - if stg_params[i][-1] == '\\': - stg_params[i] = '%s %s' % (stg_params[i][:-1], - stg_params.pop(i + 1)) - i += 1 - - param_matrix = {} - for i in range(len(stg_params)): - if not stg_params[i].strip(): - continue - (cmd, parm) = stg_params[i].split(':', 1) - # ',' separated list of values - parm = parm.split(',') - j = 0 - while j < len(parm) - 1: - if parm[j][-1] == '\\': - parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) - j += 1 - - param_matrix[cmd] = parm - # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: @@ -402,23 +278,14 @@ def run(test, params, env): rp_times = int(params.get("repeat_times", 1)) queues = params.get("multi_disk_type") == "parallel" + timeout = params.get_numeric('plug_timeout', 300) if queues: # parallel - queues = range(len(vm.monitors)) - hotplug = hotplug_parallel - unplug = unplug_parallel - monitor = vm.monitors - global LOCK - LOCK = threading.Lock() + hotplug, unplug = 'hotplug_devs_threaded', 'unplug_devs_threaded' else: # serial - queues = range(1) - hotplug = hotplug_serial - unplug = unplug_serial - monitor = vm.monitor + hotplug, unplug = 'hotplug_devs_serial', 'unplug_devs_serial' + context_msg = "Running sub test '%s' %s" - error_context.context("Verify disk before test", logging.info) - info_qtree = vm.monitor.info('qtree', False) - info_block = vm.monitor.info_block(False) - verify_qtree(params, info_qtree, info_block, qdev) + plug = BlockDevicesPlug(vm) for iteration in range(rp_times): error_context.context("Hotplugging/unplugging devices, iteration %d" % iteration, logging.info) @@ -428,30 +295,22 @@ def run(test, params, env): logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) - error_context.context("Insert devices into qdev", logging.debug) - qdev.set_dirty() - new_devices = [[] for _ in queues] - new_devices, params = insert_into_qdev(qdev, param_matrix, - stg_image_num, params, - new_devices) - error_context.context("Hotplug the devices", logging.debug) - hotplug(new_devices, monitor) + getattr(plug, hotplug)(timeout=timeout) time.sleep(float(params.get('wait_after_hotplug', 0))) error_context.context("Verify disks after hotplug", logging.debug) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() - verify_qtree(params, info_qtree, info_block, qdev) - qdev.set_clean() + verify_qtree(params, info_qtree, info_block, vm.devices) sub_type = params.get("sub_type_after_plug") if sub_type: error_context.context(context_msg % (sub_type, "after hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) - + run_stress_iozone() if is_windows else run_stress_dd() sub_type = params.get("sub_type_before_unplug") if sub_type: error_context.context(context_msg % (sub_type, "before hotunplug"), @@ -461,7 +320,7 @@ def run(test, params, env): error_context.context("Unplug and remove the devices", logging.debug) if stress_cmd: session.cmd(params["stress_stop_cmd"]) - unplug(new_devices, qdev, monitor) + getattr(plug, unplug)(timeout=timeout) if stress_cmd: session.cmd(params["stress_cont_cmd"]) _postprocess_images() @@ -471,16 +330,14 @@ def run(test, params, env): info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() - verify_qtree(params, info_qtree, info_block, qdev) - # we verified the unplugs, set the state to 0 - for _ in range(qdev.get_state()): - qdev.set_clean() + verify_qtree(params, info_qtree, info_block, vm.devices) sub_type = params.get("sub_type_after_unplug") if sub_type: error_context.context(context_msg % (sub_type, "after hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) + configure_images_params(params) # Check for various KVM failures error_context.context("Validating VM after all disk hotplug/unplugs", diff --git a/qemu/tests/multi_vms_nics.py b/qemu/tests/multi_vms_nics.py index a5e27208b3fdf77696e02c391d4855376d2cff94..6b9926b406a1f9b93c3e31a306ffe866641f1515 100644 --- a/qemu/tests/multi_vms_nics.py +++ b/qemu/tests/multi_vms_nics.py @@ -130,7 +130,7 @@ def run(test, params, env): "Active: active|running") vms = params["vms"].split() host_mem = utils_memory.memtotal() // (1024 * 1024) - host_cpu_count = cpu.total_cpus_count() + host_cpu_count = cpu.total_count() vhost_count = 0 if params.get("vhost"): vhost_count = 1 @@ -139,7 +139,8 @@ def run(test, params, env): "pcus: %d, minimum of vcpus and vhost: %d" % (host_cpu_count, (1 + vhost_count) * len(vms))) params['mem'] = host_mem // len(vms) * 1024 - params['smp'] = host_cpu_count // len(vms) - vhost_count + params['smp'] = params['vcpu_maxcpus'] = \ + host_cpu_count // len(vms) - vhost_count if params['smp'] % 2 != 0: params['vcpu_sockets'] = 1 params["start_vm"] = "yes" diff --git a/qemu/tests/nbd_long_export_name.py b/qemu/tests/nbd_long_export_name.py new file mode 100644 index 0000000000000000000000000000000000000000..6d1825d377639976114070289ae700eb926d8390 --- /dev/null +++ b/qemu/tests/nbd_long_export_name.py @@ -0,0 +1,69 @@ +import socket + +from avocado.utils import process + +from virttest import qemu_storage +from virttest import error_context + +from provider.nbd_image_export import QemuNBDExportImage + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Create a local file with qemu-img command + 2) Export the file in raw format with qemu-nbd, + the length of export name is the max 4096 + 3) Access the exported nbd file with qemu-image, + 3.1) export name is exactly the same as 2) + 3.2) length of export name is 4097 + 3.3) length of export name is 4000 + 3.4) length of export name is 4095 + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def _make_export_name(length): + return process.run( + params['create_export_name_cmd'].format(length=length), + ignore_status=True, + shell=True + ).stdout.decode().strip() + + tag = params["images"].split()[0] + params['nbd_export_name'] = _make_export_name( + params['max_export_name_len']) + + nbd_export = QemuNBDExportImage(params, tag) + nbd_export.export_image() + + nbd_image_tag = params['nbd_image_tag'] + nbd_image_params = params.object_params(nbd_image_tag) + localhost = socket.gethostname() + nbd_image_params['nbd_server'] = localhost if localhost else 'localhost' + qemu_img = qemu_storage.QemuImg(nbd_image_params, None, nbd_image_tag) + + try: + # Access image with the export name, just make sure + # qemu-img info can access image successfully + out = qemu_img.info() + if 'file format: raw' not in out: + test.fail('Failed to access image, output(%s)' % out) + + # Access image with wrong export names + for length in params['access_export_name_lens'].split(): + nbd_image_params['nbd_export_name'] = _make_export_name(length) + qemu_img = qemu_storage.QemuImg(nbd_image_params, + None, nbd_image_tag) + + try: + out = qemu_img.info() + except process.CmdError as e: + if params['errmsg_check_%s' % length] not in str(e): + test.fail('Failed to get export name(%s) from output(%s)' + % (qemu_img.params['nbd_export_name'], out)) + else: + test.fail('qemu-img should fail due to wrong export name') + finally: + nbd_export.stop_export() diff --git a/qemu/tests/nested_libguestfs_unittest.py b/qemu/tests/nested_libguestfs_unittest.py new file mode 100644 index 0000000000000000000000000000000000000000..21c20b1a8ce62218bd928e07de791688af53723b --- /dev/null +++ b/qemu/tests/nested_libguestfs_unittest.py @@ -0,0 +1,88 @@ +import re +import os +import logging + +from avocado.utils import cpu + +from virttest import arch +from virttest import error_context +from virttest import utils_package + + +@error_context.context_aware +def run(test, params, env): + """ + Execute the libguestfs-test-tool unittest inside L1 guest. + + 1) Launch a guest and check if libguestfs-tools is installed. + 2) Execute the libguestfs-test-tool directly launching qemu. + 3) Analyze the result of libguestfs-test-tool. + 4) Check the nested file exists. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + kvm_module = arch.get_kvm_module_list()[-1].replace('-', '_') + is_kvm_mode = params["nested_flag"] == "nested_flag_on" + nested_file = os.path.join("/sys/module/", kvm_module, + "parameters/nested") + unittest_timeout = params.get_numeric("unittest_timeout") + + cpu_vendor = cpu.get_vendor() + cpu_arch = cpu.get_arch() + if cpu_arch == "powerpc" and int(cpu.get_family().strip("power")) < 9: + test.cancel("Nested feature requires a POWER9 CPU") + elif cpu_arch == "x86_64": + flag = "vmx" if cpu_vendor == "intel" else "svm" + params["cpu_model_flags"] = params["cpu_model_flags"].format(flag) + + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + vm.create(params=params) + vm.verify_alive() + session = vm.wait_for_login() + + error_context.context("Check if libguestfs-tools is installed.", + logging.info) + sm = utils_package.RemotePackageMgr(session, "libguestfs-tools") + if not (sm.is_installed("libguestfs-tools") or sm.install()): + test.cancel("Unable to install libguestfs-tools inside guest.") + + try: + error_context.context("Execute the libguestfs-test-tool unittest " + "directly launching qemu.", logging.info) + stderr_file = "/tmp/lgf_stderr" + lgf_cmd = ("LIBGUESTFS_BACKEND=direct libguestfs-test-tool " + "--timeout {} 2> {}".format(unittest_timeout, + stderr_file)) + lgf_s, lgf_o = session.cmd_status_output(lgf_cmd, + timeout=unittest_timeout) + logging.debug("libguestfs-test-tool stdout:\n%s", lgf_o) + lgf_stderr = session.cmd_output("cat " + stderr_file) + lgf_tcg = re.search("Back to tcg accelerator", lgf_stderr) + + error_context.context("Analyze the libguestfs-test-tool test result.", + logging.info) + fail_msg = ("the exit status is non-zero" if lgf_s else + "back to tcg accelerator" if lgf_tcg and is_kvm_mode else "") + if fail_msg: + logging.debug("libguestfs-test-tool stderr:\n%s", lgf_stderr) + test.fail("libguestfs-test-tool execution failed due to: %s. " + % fail_msg) + + error_context.context("Check the nested file status.", logging.info) + file_s, file_o = session.cmd_status_output("cat " + nested_file) + if re.match(r"[1Y]", file_o) and is_kvm_mode: + logging.info("Guest runs with nested flag, the nested feature has " + "been enabled.") + elif file_s == 1 and not is_kvm_mode: + logging.info("Guest runs without nested flag, so the nested file " + "does not exist.") + else: + logging.error("Nested file status: %s, output: %s", file_s, file_o) + test.fail("Getting the status of nested file has unexpected " + "result.") + finally: + session.cmd("rm -f " + stderr_file, ignore_all_errors=True) + session.close() diff --git a/qemu/tests/nested_test.py b/qemu/tests/nested_test.py index b25c0e0b6969661ad9846b9c967185c8ce6049d1..2e129739a20706d4c272f27687343c0f49fa373e 100644 --- a/qemu/tests/nested_test.py +++ b/qemu/tests/nested_test.py @@ -8,6 +8,7 @@ from avocado.utils import software_manager from virttest import error_context from virttest import data_dir as virttest_data_dir +from virttest import cpu as virttest_cpu @error_context.context_aware @@ -48,11 +49,13 @@ def run(test, params, env): return invent_file.name def copy_network_script(env): + login_timeout = params.get_numeric("login_timeout", 360) deps_dir = virttest_data_dir.get_deps_dir() file_name = os.path.basename(setup_bridge_sh) br_file = os.path.join(deps_dir, file_name) for vm in get_live_vms(env): + vm.wait_for_login(timeout=login_timeout) vm.copy_files_to(br_file, setup_bridge_sh) def generate_parameter_file(params): @@ -62,14 +65,21 @@ def run(test, params, env): guest_password = params.get("password") bootstrap_options = params.get("nested_bs_options") + accept_cancel = params.get_boolean("accept_cancel") kar_cmd = "python3 ./ConfigTest.py " test_type = params.get("test_type") - if test_type: - case_name = params.get("case_name") - if case_name: - kar_cmd += " --%s=%s " % (test_type, case_name) + variant_name = params.get("nested_test") + case_name = params.get("case_name", "") + + if variant_name == "check_cpu_model_l2": + host_cpu_models = virttest_cpu.get_host_cpu_models() + case_name = ','.join(["%s.%s" % (case_name, i) + for i in host_cpu_models]) + + kar_cmd += " --%s=%s " % (test_type, case_name) + l2_guest_name = params.get("l2_guest_name") if l2_guest_name: kar_cmd += " --guestname=%s" % l2_guest_name @@ -79,6 +89,12 @@ def run(test, params, env): else: kar_cmd += " --clone=no" + l2_kar_options = params.get("l2_kar_options") + if l2_kar_options: + kar_cmd += " %s" % l2_kar_options + + logging.info("Kar cmd: %s" % kar_cmd) + results_dir = test.logdir logging.info("Result_dir: %s" % results_dir) @@ -87,6 +103,7 @@ def run(test, params, env): data = {"guest_password": guest_password, "bootstrap_options": bootstrap_options, + "accept_cancel": accept_cancel, "command_line": kar_cmd, "setup_br_sh": setup_bridge_sh, "host_log_files_dir": results_dir, @@ -100,7 +117,7 @@ def run(test, params, env): return json_file.name if (params.get('check_vendor', 'no') == 'yes' and - cpu.get_cpu_vendor_name() != 'intel'): + cpu.get_vendor() != 'intel'): test.cancel("We only test this case with Intel platform now") sm = software_manager.SoftwareManager() diff --git a/qemu/tests/nic_hotplug.py b/qemu/tests/nic_hotplug.py index 00b36093a39dae8559838addcbc46deac0d2d775..869738e1f56c34f1141acace81c62d52f08615f2 100644 --- a/qemu/tests/nic_hotplug.py +++ b/qemu/tests/nic_hotplug.py @@ -115,9 +115,10 @@ def run(test, params, env): root_port_id = bus.get_free_root_port() if root_port_id: pci_add_cmd += ",bus=%s" % root_port_id - root_port = vm.devices.get_buses({"aobject": root_port_id})[0] - root_port.insert(qdevices.QBaseDevice(pci_model, - aobject=device_id)) + if used_sameid != "yes": + root_port = vm.devices.get_buses({"aobject": root_port_id})[0] + root_port.insert(qdevices.QBaseDevice(pci_model, + aobject=device_id)) else: test.error("No free root port for device %s to plug." % device_id) diff --git a/qemu/tests/nic_opt.py b/qemu/tests/nic_opt.py index df38e5b324ef5266dcf2890b99ce8975bfb48d77..a53b0f77e3d9d1639a56456639ea2de1485e584e 100644 --- a/qemu/tests/nic_opt.py +++ b/qemu/tests/nic_opt.py @@ -128,7 +128,7 @@ def run(test, params, env): check_type = params.get("check_type") smp_value = params.get_numeric("smp") or params.get_numeric("vcpu_maxcpus") - if cpu.online_cpus_count() < 2 * smp_value: + if cpu.online_count() < 2 * smp_value: test.cancel("The number of smp counts in this host is not big enough") vm = env.get_vm(params["main_vm"]) vm.verify_alive() diff --git a/qemu/tests/numa_consistency.py b/qemu/tests/numa_consistency.py index 0fb569ddf054d32f24d3bb2f5be6c2ca1bdd9114..19987f24d0cdb33063dd0ca3b01ce6fee0d8b962 100644 --- a/qemu/tests/numa_consistency.py +++ b/qemu/tests/numa_consistency.py @@ -57,6 +57,7 @@ def run(test, params, env): vm.verify_alive() vcpu_threads = vm.vcpu_threads session = vm.wait_for_login(timeout=timeout) + threshold = params.get_numeric("threshold", target_type=float) dd_size = 256 if dd_size * len(vcpu_threads) > int(params['mem']): @@ -97,7 +98,7 @@ def run(test, params, env): page_size = resource.getpagesize() / 1024 memory_allocated = (memory_used_after - memory_used_before) * page_size / 1024 - if 1 - float(memory_allocated) / float(dd_size) > 0.05: + if 1 - float(memory_allocated) / float(dd_size) > threshold: numa_hardware_cmd = params.get("numa_hardware_cmd") if numa_hardware_cmd: numa_info = process.system_output(numa_hardware_cmd, diff --git a/qemu/tests/numa_cpu.py b/qemu/tests/numa_cpu.py new file mode 100644 index 0000000000000000000000000000000000000000..86206ffd7e2572d53002fbbae360d725fce3b58a --- /dev/null +++ b/qemu/tests/numa_cpu.py @@ -0,0 +1,237 @@ +import logging +import re + +from virttest import error_context +from virttest import utils_package +from virttest.utils_misc import NumaInfo + + +@error_context.context_aware +def run(test, params, env): + """ + Assign cpu to numa node with "-numa cpu", check the numa info in monitor + and guest os match with the qemu cli + """ + + def convert_cpu_topology_to_ids(socketid=None, dieid=None, coreid=None, + threadid=None): + """ + Convert the cpu topology to cpu id list + """ + def _get_boundary(value, max_value, weight): + """ + Get the data range of one bit + + :param value: the current value of the bit + :param max_value: the max value of the bit + :param weight: the weight of the bit + """ + min_boundary = int(value if value is not None else 0) * weight + max_boundary = int(value if value is not None else (max_value - 1)) * weight + return (min_boundary, max_boundary) + + if vm_arch in ('x86_64', 'i386'): + socket_min, socket_max = _get_boundary(socketid, vcpu_sockets, socket_weight) + die_min, die_max = _get_boundary(dieid, vcpu_dies, die_weight) + core_min, core_max = _get_boundary(coreid, vcpu_cores, core_weight) + thread_min, thread_max = _get_boundary(threadid, vcpu_threads, thread_weight) + cpu_min = socket_min + die_min + core_min + thread_min + cpu_max = socket_max + die_max + core_max + thread_max + elif vm_arch in ('ppc64', 'ppc64le'): + cpu_min = int(coreid) + cpu_max = int(coreid) + vcpu_threads - 1 + cpu_list = list(range(cpu_min, cpu_max + 1)) + return cpu_list + + def numa_cpu_guest(): + """ + Get the cpu id list for each node in guest os, sort with node id. + """ + error_context.context("Get cpus in guest os", logging.info) + numa_cpu_guest = [] + if vm_arch in ('ppc64', 'ppc64le'): + numa_info_guest = NumaInfo(session=session) + nodes_guest = numa_info_guest.online_nodes + for node in nodes_guest: + numa_cpus = numa_info_guest.online_nodes_cpus[node] + numa_cpus = sorted([int(v) for v in numa_cpus.split()]) + numa_cpu_guest.append(numa_cpus) + else: + error_context.context("Get SRAT ACPI table", logging.info) + if not utils_package.package_install("acpidump", session): + test.cancel("Please install acpidump in guest to proceed") + content = session.cmd_output('cd /tmp && acpidump -n SRAT -b && ' + 'iasl -d srat.dat && cat srat.dsl') + pattern = re.compile(r'Proximity Domain Low\(8\)\s+:\s+([0-9A-Fa-f]+)' + r'\n.*Apic ID\s+:\s+([0-9A-Fa-f]+)') + node_cpus = pattern.findall(content) + + tmp = {} + for item in node_cpus: + nodeid = int(item[0], 16) + cpuid = int(item[1], 16) + if nodeid in tmp.keys(): + tmp[nodeid] += [cpuid] + else: + tmp[nodeid] = [cpuid] + for item in sorted(tmp.items(), key=lambda item: item[0]): + numa_cpu_guest.append(sorted(item[1])) + return numa_cpu_guest + + def numa_cpu_cli(): + """ + Get the cpu id list for each node according to the qemu cli, sort with nodeid. + """ + error_context.context("Get the expected cpus in qemu command line", logging.info) + numa_cpus = params.objects("guest_numa_cpus") + numa_cpu_cli = [] + tmp = {} + for numa_cpu in numa_cpus: + numa_cpu_params = params.object_params(numa_cpu) + nodeid = numa_cpu_params["numa_cpu_nodeid"] + socket = numa_cpu_params.get("numa_cpu_socketid") + die = numa_cpu_params.get("numa_cpu_dieid") + core = numa_cpu_params.get("numa_cpu_coreid") + thread = numa_cpu_params.get("numa_cpu_threadid") + cpu_list = convert_cpu_topology_to_ids(socket, die, core, thread) + if nodeid in tmp.keys(): + tmp[nodeid] += cpu_list + else: + tmp[nodeid] = cpu_list + for item in sorted(tmp.items(), key=lambda item: item[0]): + numa_cpu_cli.append(sorted(item[1])) + return numa_cpu_cli + + def numa_cpu_setted(numa_cpu_options): + """ + Get the new setted cpu id list for each node according to the set options, + sort with nodeid. + """ + numa_cpu_setted = [] + tmp = {} + for cpu in numa_cpu_options: + nodeid = cpu['node_id'] + socket = cpu.get("socket_id") + die = cpu.get("die_id") + core = cpu.get("core_id") + thread = cpu.get("thread_id") + cpu_list = convert_cpu_topology_to_ids(socket, die, core, thread) + if nodeid in tmp.keys(): + tmp[nodeid] += cpu_list + else: + tmp[nodeid] = cpu_list + for item in sorted(tmp.items(), key=lambda item: item[0]): + numa_cpu_setted.append(sorted(item[1])) + return numa_cpu_setted + + def get_hotpluggable_cpus(): + """ + Get the specified cpu id list for each node that sort with node id and + unspecified cpu topology with the output of "query-hotpluggable-cpus". + """ + error_context.context("Get the hotpluggable cpus", logging.info) + specified_cpus = [] + unspecified_cpus = [] + tmp = {} + out = vm.monitor.info("hotpluggable-cpus") + for vcpu_info in out: + vcpus_count = vcpu_info["vcpus-count"] + vcpu_info = vcpu_info["props"] + nodeid = vcpu_info.get("node-id") + socket = vcpu_info.get("socket-id") + die = vcpu_info.get("die-id") + core = vcpu_info.get("core-id") + thread = vcpu_info.get("thread-id") + if nodeid is not None: + cpu_list = convert_cpu_topology_to_ids(socket, die, core, thread) + if nodeid in tmp.keys(): + tmp[nodeid] += cpu_list + else: + tmp[nodeid] = cpu_list + else: + options = {'socket_id': socket, 'die_id': die, + 'core_id': core, 'thread_id': thread} + for key in list(options.keys()): + if options[key] is None: + del options[key] + unspecified_cpus.append(options) + + for item in sorted(tmp.items(), key=lambda item: item[0]): + specified_cpus.append(sorted(item[1])) + return specified_cpus, unspecified_cpus + + vm = env.get_vm(params["main_vm"]) + qemu_preconfig = params.get_boolean("qemu_preconfig") + os_type = params["os_type"] + vm_arch = params["vm_arch_name"] + + vcpu_threads = params.get_numeric('vcpu_threads') + if vm_arch in ('x86_64', 'i386'): + vcpu_sockets = params.get_numeric('vcpu_sockets') + vcpu_dies = params.get_numeric('vcpu_dies') + vcpu_cores = params.get_numeric('vcpu_cores') + + socket_weight = vcpu_dies * vcpu_cores * vcpu_threads + die_weight = vcpu_cores * vcpu_threads + core_weight = vcpu_threads + thread_weight = 1 + + numa_cpu_cli = numa_cpu_cli() + specified_cpus, unspecified_cpus = get_hotpluggable_cpus() + + if specified_cpus != numa_cpu_cli: + test.fail("cpu ids for each node with 'info hotpluggable-cpus' is: %s," + "but the seting in qemu cli is: %s" + % (specified_cpus, numa_cpu_cli)) + + if qemu_preconfig: + node_ids = [] + for node in params.objects('guest_numa_nodes'): + node_params = params.object_params(node) + node_ids.append(node_params.get_numeric('numa_nodeid')) + node_ids = sorted(node_ids) + + # Set unspecified cpus from node 0 to max, and set the left cpus to node 0 + set_numa_node_options = [] + for index, cpu_option in enumerate(unspecified_cpus): + try: + cpu_option.update({'node_id': node_ids[index]}) + except IndexError: + cpu_option.update({'node_id': 0}) + set_numa_node_options.append(cpu_option) + + for options in set_numa_node_options: + vm.monitor.set_numa_node('cpu', **options) + + numa_cpu_setted = numa_cpu_setted(set_numa_node_options) + + expected_cpus = [] + # All nodes have corresponding cpus in qemu cli at the initial state + numa_cpu_setted.extend([[]] * (len(numa_cpu_cli) - len(numa_cpu_setted))) + for item in zip(numa_cpu_cli, numa_cpu_setted): + expected_cpus.append(sorted(item[0] + item[1])) + + new_specified_cpus = get_hotpluggable_cpus()[0] + if new_specified_cpus != expected_cpus: + test.fail("cpu ids for each node with 'info hotpluggable-cpus' after" + "numa_cpu_set is %s, but expected result is: %s" + % (new_specified_cpus, expected_cpus)) + + vm.monitor.exit_preconfig() + vm.resume() + else: + expected_cpus = numa_cpu_cli + + numa_cpu_monitor = [sorted(list(item[1])) for item in vm.monitor.info_numa()] + if numa_cpu_monitor != expected_cpus: + test.fail("cpu ids for each node with 'info numa' after setted is: %s, " + "but expected result is: %s" % (numa_cpu_monitor, expected_cpus)) + + # check numa cpus in guest os, only for Linux + if os_type == 'linux': + session = vm.wait_for_login() + numa_cpu_guest = numa_cpu_guest() + session.close() + if numa_cpu_guest != expected_cpus: + test.fail("cpu ids for each node in guest os is: %s, but the " + "expected result is: %s" % (numa_cpu_guest, expected_cpus)) diff --git a/qemu/tests/numa_dist.py b/qemu/tests/numa_dist.py new file mode 100644 index 0000000000000000000000000000000000000000..1993c9361f5f1cad074e9707116324afb49b9eb3 --- /dev/null +++ b/qemu/tests/numa_dist.py @@ -0,0 +1,48 @@ +import ast + +from virttest import error_context +from virttest.utils_misc import NumaInfo + + +@error_context.context_aware +def run(test, params, env): + """ + Simple test to check if NUMA dist options are being parsed properly + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + os_type = params["os_type"] + session = vm.wait_for_login() + if os_type == 'windows': + return + + expected_numa_dist = {} + guest_numa_nodes = params.objects("guest_numa_nodes") + for numa_node in guest_numa_nodes: + numa_node_dist_value = ['unset' for i in range(len(guest_numa_nodes))] + numa_params = params.object_params(numa_node) + numa_nodeid = numa_params["numa_nodeid"] + numa_dist = ast.literal_eval(numa_params.get("numa_dist", "[]")) + for dist in numa_dist: + dst_node = dist[0] + distance_value = dist[1] + numa_node_dist_value[dst_node] = str(distance_value) + expected_numa_dist[int(numa_nodeid)] = numa_node_dist_value + + for src_id, dist_info in expected_numa_dist.items(): + # The distance from a node to itself is always 10 + dist_info[src_id] = '10' + for dst_id, val in enumerate(dist_info): + if val == 'unset': + # when distances are only given in one direction for each pair + # of nodes, the distances in the opposite directions are assumed + # to be the same + expected_numa_dist[src_id][dst_id] = expected_numa_dist[dst_id][src_id] + + numa_info_guest = NumaInfo(session=session) + session.close() + + guest_numa_dist = numa_info_guest.distances + if guest_numa_dist != expected_numa_dist: + test.fail("The actual numa distance info in guest os is: %s, but the " + "expected result is: %s" % (guest_numa_dist, expected_numa_dist)) diff --git a/qemu/tests/numa_negative.py b/qemu/tests/numa_negative.py new file mode 100644 index 0000000000000000000000000000000000000000..26e94e8e24149477cd4ce994b4f6439209aa268c --- /dev/null +++ b/qemu/tests/numa_negative.py @@ -0,0 +1,39 @@ +import logging +import re + +from virttest import error_context +from virttest import virt_vm + + +@error_context.context_aware +def run(test, params, env): + """ + numa options negative test: + 1) Boot vm with incorrect numa options + 2) Check if can get the expected qemu output + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + vm = env.get_vm(params["main_vm"]) + params['start_vm'] = 'yes' + negative_type = params.get('negative_type') + error_msg = params.get('error_msg', '') + try: + vm.create(params=params) + output = vm.process.get_output() + except virt_vm.VMCreateError as e: + output = str(e) + if negative_type == 'non-fatal': + test.fail("Create VM failed as unexpected: %s" % output) + + error_context.context("Check the expected error message: %s" + % error_msg, logging.info) + if not re.search(error_msg, output): + test.fail("Can not get expected error message: %s" % error_msg) + + if negative_type == 'non-fatal': + vm.verify_alive() + vm.verify_kernel_crash() diff --git a/qemu/tests/numa_opts.py b/qemu/tests/numa_opts.py index 3a1d489cb13f3828fe6426055349f8b337f9af70..0c08d75e42509c6ac3258bdccbb38b279bcfdb6d 100644 --- a/qemu/tests/numa_opts.py +++ b/qemu/tests/numa_opts.py @@ -1,46 +1,111 @@ import logging - -logger = logging.getLogger(__name__) -dbg = logger.debug +from virttest import error_context +from virttest.utils_misc import normalize_data_size +from virttest.utils_misc import get_mem_info +from virttest.utils_misc import NumaInfo +@error_context.context_aware def run(test, params, env): """ Simple test to check if NUMA options are being parsed properly + 1) Boot vm with different numa nodes + 2) With qemu monitor, check if size and cpus for every node match with cli + 3) In guest os, check if size and cpus for every node match with cli - This _does not_ test if NUMA information is being properly exposed to the - guest. + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment """ - dbg("starting numa_opts test...") + def numa_info_guest(): + """ + The numa info in guest os, linux only + + return: An array of (ram, cpus) tuples, where ram is the RAM size in + MB and cpus is a set of CPU numbers + """ + + numa_info_guest = NumaInfo(session=session) + + numa_guest = [] + nodes_guest = numa_info_guest.online_nodes + for node in nodes_guest: + node_size = numa_info_guest.online_nodes_meminfo[node]['MemTotal'] + node_size = float(normalize_data_size('%s KB' % node_size)) + node_cpus = numa_info_guest.online_nodes_cpus[node] + node_cpus = set([int(v) for v in node_cpus.split()]) + numa_guest.append((node_size, node_cpus)) + + # It is a known WONTFIX issue for x86, node info of node0 and node1 is + # opposite in guest os when vm have 2 nodes + if (vm_arch in ("x86_64", "i686") and len(numa_guest) == 2): + numa_guest.reverse() + return numa_guest vm = env.get_vm(params["main_vm"]) + os_type = params["os_type"] + vm_arch = params["vm_arch_name"] + session = vm.wait_for_login() + + error_context.context("starting numa_opts test...", logging.info) - numa = vm.monitors[0].info_numa() - dbg("info numa reply: %r", numa) - - numa_nodes = params.get("numa_nodes") - if numa_nodes: - numa_nodes = int(params.get("numa_nodes")) - if len(numa) != numa_nodes: - test.fail( - "Wrong number of numa nodes: %d. Expected: %d" % - (len(numa), numa_nodes)) - - for nodenr, node in enumerate(numa): - size = params.get("numa_node%d_size" % (nodenr)) - if size is not None: - size = int(size) - if size != numa[nodenr][0]: - test.fail( - "Wrong size of numa node %d: %d. Expected: %d" % - (nodenr, numa[nodenr][0], size)) - - cpus = params.get("numa_node%d_cpus" % (nodenr)) + # Get numa info from monitor + numa_monitor = vm.monitors[0].info_numa() + error_context.context("numa info in monitor: %r" % numa_monitor, logging.info) + monitor_expect_nodes = params.get_numeric("monitor_expect_nodes") + if len(numa_monitor) != monitor_expect_nodes: + test.fail("[Monitor]Wrong number of numa nodes: %d. Expected: %d" % + (len(numa_monitor), monitor_expect_nodes)) + + if os_type == 'linux': + # Get numa info in guest os, only for Linux + numa_guest = numa_info_guest() + error_context.context("numa info in guest: %r" % numa_guest, logging.info) + guest_expect_nodes = int(params.get("guest_expect_nodes", + monitor_expect_nodes)) + if len(numa_guest) != guest_expect_nodes: + test.fail("[Guest]Wrong number of numa nodes: %d. Expected: %d" % + (len(numa_guest), guest_expect_nodes)) + # Use 30 plus the gap of 'MemTotal' in OS and '-m' in cli as threshold + MemTotal = get_mem_info(session, 'MemTotal') + MemTotal = float(normalize_data_size('%s KB' % MemTotal)) + error_context.context("MemTotal in guest os is %s MB" + % MemTotal, logging.info) + threshold = float(params.get_numeric("mem") - MemTotal) + 30 + error_context.context("The acceptable threshold is: %s" + % threshold, logging.info) + else: + numa_guest = numa_monitor + session.close() + + for nodenr, node in enumerate(numa_guest): + mdev = params.get("numa_memdev_node%d" % (nodenr)) + if mdev: + mdev = mdev.split('-')[1] + size = float(normalize_data_size(params.get("size_%s" % mdev))) + else: + size = params.get_numeric("mem") + + cpus = params.get("numa_cpus_node%d" % (nodenr)) if cpus is not None: - cpus = set([int(v) for v in cpus.split()]) - if cpus != numa[nodenr][1]: - test.fail( - "Wrong CPU set on numa node %d: %s. Expected: %s" % - (nodenr, numa[nodenr][1], cpus)) + cpus = set([int(v) for v in cpus.split(",")]) + else: + cpus = set([int(v) for v in range(params.get_numeric('smp'))]) + + if len(numa_monitor) != 0: + if size != numa_monitor[nodenr][0]: + test.fail("[Monitor]Wrong size of numa node %d: %f. Expected: %f" + % (nodenr, numa_monitor[nodenr][0], size)) + if cpus != numa_monitor[nodenr][1]: + test.fail("[Monitor]Wrong CPU set on numa node %d: %s. Expected: %s" + % (nodenr, numa_monitor[nodenr][1], cpus)) + + if os_type == 'linux': + if size - numa_guest[nodenr][0] > threshold: + test.fail("[Guest]Wrong size of numa node %d: %f. Expected: %f" + % (nodenr, numa_guest[nodenr][0], size)) + if cpus != numa_guest[nodenr][1]: + test.fail("[Guest]Wrong CPU set on numa node %d: %s. Expected: %s" + % (nodenr, numa_guest[nodenr][1], cpus)) diff --git a/qemu/tests/numa_stress.py b/qemu/tests/numa_stress.py index 56d061ccd6176153f8d0ad01693d82e8292cea10..d8edf416a7f1878592bc8fe8e28f8fb708653589 100644 --- a/qemu/tests/numa_stress.py +++ b/qemu/tests/numa_stress.py @@ -83,10 +83,10 @@ def run(test, params, env): if test_count < len(host_numa_node.online_nodes): test_count = len(host_numa_node.online_nodes) - tmpfs_size = 0 + tmpfs_size = params.get_numeric("tmpfs_size") for node in host_numa_node.nodes: node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal")) - if tmpfs_size < node_mem: + if tmpfs_size == 0: tmpfs_size = node_mem tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test") tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path) diff --git a/qemu/tests/nvdimm.py b/qemu/tests/nvdimm.py index 62d119b58fd38ec8946e9809718dfd4d8c924b9a..37f6dc8be59d43dc1e412ff5ede62e25c86aadb9 100644 --- a/qemu/tests/nvdimm.py +++ b/qemu/tests/nvdimm.py @@ -1,10 +1,12 @@ import os import logging +import time from avocado.utils import process from virttest import env_process from virttest import error_context +from virttest.utils_test.qemu import MemoryHotplugTest class NvdimmTest(object): @@ -37,18 +39,20 @@ class NvdimmTest(object): self.test.fail("Execute command '%s' failed, output: %s" % (cmd, output)) return output.strip() - def verify_nvdimm(self, vm): + def verify_nvdimm(self, vm, mems): """ verify nvdimm in monitor and guest :params vm: VM object + :params mems: memory objects """ - dimms_expect = set(["dimm-%s" % dev for dev in self.params.objects("mem_devs")]) + dimms_expect = set("dimm-%s" % mem for mem in mems) + logging.info("Check if dimm %s in memory-devices" % dimms_expect) dimms_monitor = set([info["data"]["id"] for info in vm.monitor.info("memory-devices")]) if not dimms_expect.issubset(dimms_monitor): invisible_dimms = dimms_expect - dimms_monitor self.test.fail("%s dimms are invisible in monitor" % invisible_dimms) - check_cmd = "test -b %s" % self.params.get("pmem", "/dev/pmem0") + check_cmd = "test -b %s" % self.params.get("dev_path", "/dev/pmem0") self.run_guest_cmd(check_cmd) def format_nvdimm(self): @@ -73,7 +77,7 @@ class NvdimmTest(object): """ Umount nvdimm device in guest. """ - umount_cmd = "umount %s" % self.params["pmem"] + umount_cmd = "umount %s" % self.params["dev_path"] self.run_guest_cmd(umount_cmd) def md5_hash(self, file): @@ -139,8 +143,19 @@ def run(test, params, env): error_context.context("Login to the guest", logging.info) login_timeout = int(params.get("login_timeout", 360)) nvdimm_test.session = vm.wait_for_login(timeout=login_timeout) + mems = params.objects("mem_devs") + target_mems = params.objects("target_mems") + if target_mems: + hotplug_test = MemoryHotplugTest(test, params, env) + for mem in target_mems: + hotplug_test.hotplug_memory(vm, mem) + time.sleep(10) + mems += target_mems error_context.context("Verify nvdimm in monitor and guest", logging.info) - nvdimm_test.verify_nvdimm(vm) + nvdimm_ns_create_cmd = params.get("nvdimm_ns_create_cmd") + if nvdimm_ns_create_cmd: + nvdimm_test.run_guest_cmd(nvdimm_ns_create_cmd) + nvdimm_test.verify_nvdimm(vm, mems) error_context.context("Format and mount nvdimm in guest", logging.info) nvdimm_test.mount_nvdimm() nv_file = params.get("nv_file", "/mnt/nv") @@ -152,7 +167,7 @@ def run(test, params, env): nvdimm_test.umount_nvdimm() nvdimm_test.session = vm.reboot() error_context.context("Verify nvdimm after reboot", logging.info) - nvdimm_test.verify_nvdimm(vm) + nvdimm_test.verify_nvdimm(vm, mems) nvdimm_test.mount_nvdimm(format_device="no") new_md5 = nvdimm_test.md5_hash(nv_file) error_context.context("Compare current md5 to original md5", logging.info) diff --git a/qemu/tests/nvdimm_mapsync.py b/qemu/tests/nvdimm_mapsync.py new file mode 100644 index 0000000000000000000000000000000000000000..eada6c459ace876d0214ecf403e89afac3a535cd --- /dev/null +++ b/qemu/tests/nvdimm_mapsync.py @@ -0,0 +1,57 @@ +import re +import os +import pathlib + +from avocado.utils import process + +from virttest import env_process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Run nvdimm cases: + 1) Mount nvdimm device on host + 2) Create a file in the mount point + 3) Boot guest with nvdimm backed by the file + 4) Check flag 'sf' is present in qemu smaps + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + dev_path = params["dev_path"] + p = pathlib.Path(dev_path) + if not p.is_block_device(): + test.error("There is no nvdimm device in host, please add kernel param" + "'memmap' to emulate one") + + format_cmd = params["format_command"] + mount_cmd = params["mount_command"] + truncate_cmd = params["truncate_command"] + check_cmd = params["check_command"] + clean_cmd = params["clean_command"] + try: + process.run(format_cmd) + process.run(mount_cmd, shell=True) + process.run(truncate_cmd) + except Exception as e: + test.error(e) + else: + try: + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params['main_vm']) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + vm.wait_for_login() + vm_pid = vm.get_pid() + + output = process.system_output(check_cmd % vm_pid).decode() + if 'sf' not in re.findall('(?:VmFlags: )(.*)', output)[0].split(): + test.fail("Flag 'sf' is not present in smaps file") + finally: + vm.destroy() + finally: + if os.path.ismount(params["mount_dir"]): + process.run(clean_cmd, shell=True) diff --git a/qemu/tests/nvdimm_mode.py b/qemu/tests/nvdimm_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..9af2005c654143ba803c2a61b0e810fdcc822990 --- /dev/null +++ b/qemu/tests/nvdimm_mode.py @@ -0,0 +1,35 @@ +from virttest import utils_package +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Run nvdimm cases: + 1) Boot guest with two nvdimm devices + 2) Change the two nvdimm devices to dax mode inside guest + 3) Check if both devices are dax mode + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + if not utils_package.package_install("ndctl", session): + test.cancel("Please install ndctl inside guest to proceed") + create_dax_cmd = params["create_dax_cmd"] + nvdimm_number = len(params["mem_devs"].split()) + try: + for i in range(nvdimm_number): + session.cmd(create_dax_cmd % i) + output = session.cmd_output(params["ndctl_check_cmd"]) + output = eval(output) + for item in output: + if item['mode'] != 'devdax': + test.fail("Change both nvdimm to dax mode failed") + finally: + utils_package.package_remove("ndctl", session) + session.close() + vm.destroy() diff --git a/qemu/tests/nvme_plug.py b/qemu/tests/nvme_plug.py new file mode 100644 index 0000000000000000000000000000000000000000..4c67aa103f4ef90b9d740af248d16d32ae7869fd --- /dev/null +++ b/qemu/tests/nvme_plug.py @@ -0,0 +1,56 @@ +from virttest import env_process +from virttest import utils_disk +from virttest.tests import unattended_install + +from provider.block_devices_plug import BlockDevicesPlug +from provider.storage_benchmark import generate_instance + + +def run(test, params, env): + """ + Test hot plug and unplug NVMe device. + + Steps: + 1. Install guest with local filesystem. + 2. Hot plug NVMe device to guest. + 3. Check if the NVMe device exists in qemu side. + 4. Check if the NVMe has been successfully added to guest. + 5. Run fio in the hot plugged NVMe device in guest. + 6. Unplug the NVMe device. + 7. Check if the NVMe device still exists. + 8. Check if the NVMe has been successfully removed from guest. + 9. Reboot guest. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + unattended_install.run(test, params, env) + + if params.get('remove_options'): + for option in params.get('remove_options').split(): + del params[option] + params['cdroms'] = params.get('default_cdroms') + + params['start_vm'] = 'yes' + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + + plug = BlockDevicesPlug(vm) + plug.hotplug_devs_serial() + target = '/dev/%s' % plug[0] + os_type = params['os_type'] + data_img_size = params.get('image_size_%s' % params.get('data_img_tag')) + if os_type == 'windows': + utils_disk.update_windows_disk_attributes(session, plug[0]) + drive_letter = utils_disk.configure_empty_disk(session, plug[0], + data_img_size, + os_type)[0] + target = r'%s\:\\%s' % (drive_letter, params.get('fio_filename')) + fio = generate_instance(params, vm, 'fio') + for option in params['fio_options'].split(';'): + fio.run('--filename=%s %s' % (target, option)) + plug.unplug_devs_serial() + vm.reboot(session) diff --git a/qemu/tests/pci_bridge.py b/qemu/tests/pci_bridge.py index 5feb6e53d51a877f87de985afd5aafa8d8dcce42..458370daa9646a3655de1156a4e5fbebb79671b6 100644 --- a/qemu/tests/pci_bridge.py +++ b/qemu/tests/pci_bridge.py @@ -5,6 +5,7 @@ from virttest import env_process from virttest import utils_test from virttest import utils_misc from virttest import utils_disk +from virttest.qemu_capabilities import Flags def prepare_pci_bridge(test, params, pci_bridge_num): @@ -152,6 +153,8 @@ def disk_hotplug(test, params, vm, session, image_name, if drive_format == 'virtio': return [devices[-1]] else: + if Flags.BLOCKDEV in vm.devices.caps: + return devices[::3] return devices[::2] diff --git a/qemu/tests/pcie_hotplug_opt.py b/qemu/tests/pcie_hotplug_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..ff6d76d046c65465e25adfe0cb7a36cf2c071be5 --- /dev/null +++ b/qemu/tests/pcie_hotplug_opt.py @@ -0,0 +1,131 @@ +import logging + +from virttest import error_context +from virttest.qemu_monitor import QMPCmdError + + +@error_context.context_aware +def run(test, params, env): + """ + Hot-unplug/Hot-plug virtio-blk-pci, virtio-scsi-pci, virtio-net-pci while + the parent pcie-root-port use 'hotplug=off' + + 1) Boot guest with virtio-blk-pci, virtio-scsi-pci, virtio-net-pci device, + and the parent pcie-root-port use 'hotplug=off' + 2) Hot unplug those devices + 3) Hot plug new virtio-blk-pci, virtio-scsi-pci, virtio-net-pci device to + pcie-root-port that use 'hotplug=off' + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def hotplug_blk(): + """ + Hot-plug virtio-blk-pci + """ + virtio_blk_pci_dev = image_devs[-1] + virtio_blk_pci_dev.set_param("bus", free_root_port_id) + virtio_blk_pci_dev.hotplug(vm.monitor) + + def hotplug_scsi(): + """ + Hot-plug virtio-scsi-pci + """ + pci_add_cmd = "device_add driver=virtio-scsi-pci, id=plug" + pci_add_cmd += ",bus=%s" % free_root_port_id + vm.monitor.send_args_cmd(pci_add_cmd) + + def hotplug_nic(): + """ + Hot-plug virtio-net-pci + """ + nic_name = 'plug' + nic_params = params.object_params(nic_name) + nic_params["nic_model"] = 'virtio-net-pci' + nic_params["nic_name"] = nic_name + vm.hotplug_nic(**nic_params) + + def unplug_device(device): + """ + Hot unplug device + + :param device: QDevice object + """ + parent_bus = device.get_param('bus') + driver = device.get_param('driver') + device_id = device.get_param('id') + error_context.context("Hot-unplug %s" % driver, logging.info) + try: + device.unplug(vm.monitor) + except QMPCmdError as e: + if e.data["desc"] != unplug_error % parent_bus: + test.fail("Hot-unplug failed but '%s' isn't the expected error" + % e.data["desc"]) + error_context.context("Hot-unplug %s failed as expected: %s" + % (driver, e.data["desc"]), logging.info) + else: + test.fail("Hot-unplug %s should not success" % driver) + + def plug_device(driver): + """ + Hot plug device + + :param driver: the driver name + """ + error_context.context("Hot-plug %s" % driver, logging.info) + try: + callback[driver]() + except QMPCmdError as e: + if e.data["desc"] != hotplug_error % free_root_port_id: + test.fail("Hot-plug failed but '%s' isn't the expected error" + % e.data["desc"]) + error_context.context("Hot-plug %s failed as expected: %s" + % (driver, e.data["desc"]), logging.info) + else: + test.fail("Hot-plug %s should not success" % driver) + + vm = env.get_vm(params["main_vm"]) + vm.wait_for_login() + images = params.objects('images') + hotplug_error = params.get('hotplug_error') + unplug_error = params.get('unplug_error') + unplug_devs = [] + + blk_image = images[1] + blk_pci_dev = vm.devices.get_by_qid(blk_image)[0] + unplug_devs.append(blk_pci_dev) + + # In this case only one virtio-scsi-pci device, and the drive name is + # fixed 'virtio-scsi-pci' for q35 + scsi_pci_dev = vm.devices.get_by_params({'driver': 'virtio-scsi-pci'})[0] + unplug_devs.append(scsi_pci_dev) + + nic_id = vm.virtnet[0].device_id + nic_dev = vm.devices.get_by_qid(nic_id)[0] + unplug_devs.append(nic_dev) + for dev in unplug_devs: + unplug_device(dev) + + # TODO: eject device in windows guest + + # one free root port is enough, use the default one provided by framework + bus = vm.devices.get_buses({'aobject': 'pci.0'})[0] + free_root_port_dev = bus.get_free_root_port() + free_root_port_id = free_root_port_dev.child_bus[0].busid + plug_image = images[-1] + plug_image_params = params.object_params(plug_image) + image_devs = vm.devices.images_define_by_params(plug_image, + plug_image_params, + 'disk') + error_context.context("Hot-plug the Drive/BlockdevNode first, " + "will be used by virtio-blk-pci", logging.info) + for image_dev in image_devs[:-1]: + vm.devices.simple_hotplug(image_dev, vm.monitor) + + callback = {"virtio-blk-pci": hotplug_blk, + "virtio-scsi-pci": hotplug_scsi, + "virtio-net-pci": hotplug_nic} + for driver in ['virtio-blk-pci', 'virtio-scsi-pci', 'virtio-net-pci']: + plug_device(driver) diff --git a/qemu/tests/pktgen.py b/qemu/tests/pktgen.py index 23887a5ddd441619dd7667ba44b54f4c8a6375bc..96a8d651bd7cb7ede83aa11e111454527f05874b 100644 --- a/qemu/tests/pktgen.py +++ b/qemu/tests/pktgen.py @@ -55,7 +55,7 @@ def run(test, params, env): vm_pktgen = env.get_vm(pktgen_server) vm_pktgen.verify_alive() server_session = vm_pktgen.wait_for_login(timeout=login_timeout) - runner = server_session.cmd_output_safe + runner = server_session.cmd pktgen_ip = vm_pktgen.get_address() pktgen_mac = vm_pktgen.get_mac_address() server_interface = utils_net.get_linux_ifname(server_session, @@ -66,7 +66,7 @@ def run(test, params, env): server_session = remote.wait_for_login(s_shell_client, pktgen_ip, s_shell_port, s_username, s_passwd, s_shell_prompt) - runner = server_session.cmd_output_safe + runner = server_session.cmd server_interface = params.get("server_interface") if not server_interface: test.cancel("Must config server interface before test") diff --git a/qemu/tests/ple_test.py b/qemu/tests/ple_test.py new file mode 100644 index 0000000000000000000000000000000000000000..08dea532675e2604cd20aa2a3a05373ffb3b3289 --- /dev/null +++ b/qemu/tests/ple_test.py @@ -0,0 +1,81 @@ +import re +import logging + +from avocado.utils import cpu +from avocado.utils import process +from virttest import env_process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + ple test: + 1) Check if ple is enabled on host, if not, enable it + 2) Boot guest and run unixbench inside guest + 3) Record benchmark scores and shutdown guest + 4) Disable ple on host + 5) Boot guest and run unixbench inside guest again + 6) Compare benchmark scores with step 3) + 7) Restore env, set ple back + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def reload_module(value): + """ + Reload module + """ + process.system("rmmod %s" % module) + cmd = "modprobe %s %s=%s" % (module, mod_param, value) + process.system(cmd) + + def run_unixbench(cmd): + """ + Run unixbench inside guest, return benchmark scores + """ + error_context.context("Run unixbench inside guest", logging.info) + output = session.cmd_output_safe(cmd, timeout=4800) + scores = re.findall(r"System Benchmarks Index Score\s+(\d+\.?\d+)", + output) + return [float(i) for i in scores] + + module = params["module_name"] + mod_param = params["mod_param"] + read_cmd = "cat /sys/module/%s/parameters/%s" % (module, mod_param) + origin_ple = process.getoutput(read_cmd) + error_context.context("Enable ple on host if it's disabled", logging.info) + if origin_ple == 0: + reload_module(params["ple_value"]) + + host_cpu_count = cpu.online_count() + params["smp"] = host_cpu_count + params["vcpu_maxcpus"] = host_cpu_count + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + session.cmd(params["get_unixbench"]) + try: + cmd = params["run_unixbench"] + scores_on = run_unixbench(cmd) + logging.info("Unixbench scores are %s when ple is on" % scores_on) + vm.destroy() + + error_context.context("Disable ple on host", logging.info) + reload_module(0) + vm.create(params=params) + session = vm.wait_for_login() + scores_off = run_unixbench(cmd) + logging.info("Unixbench scores are %s when ple is off" % scores_off) + scores_off = [x*0.96 for x in scores_off] + if scores_on[0] < scores_off[0] or scores_on[1] < scores_off[1]: + test.fail("Scores is much lower when ple is on than off") + finally: + session.cmd_output_safe("rm -rf %s" % params["unixbench_dir"]) + session.close() + vm.destroy() + reload_module(origin_ple) diff --git a/qemu/tests/plug_cdrom.py b/qemu/tests/plug_cdrom.py index 6802e14f819affec36f10789dd9c082fe0a13e4e..8b8deee02c44054ab609ad6a88559734a04cacc8 100644 --- a/qemu/tests/plug_cdrom.py +++ b/qemu/tests/plug_cdrom.py @@ -44,11 +44,8 @@ def run(test, params, env): def _reboot_vm(session): """Reboot vm.""" - reboot_cmd = params["reboot_cmd"] - error_context.context( - "Rebooting VM by \"%s\"." % reboot_cmd, logging.info) - session.sendline(reboot_cmd) - return vm.wait_for_login(timeout=360) + error_context.context("Rebooting VM.", logging.info) + return vm.reboot(session=session, timeout=360) def _check_cdrom_info_by_qmp(items): """Check the cdrom device info by qmp.""" diff --git a/qemu/tests/pmu_nmi_watchdog.py b/qemu/tests/pmu_nmi_watchdog.py new file mode 100644 index 0000000000000000000000000000000000000000..8e25984158b7ab39550d29e7ab08d918b6453676 --- /dev/null +++ b/qemu/tests/pmu_nmi_watchdog.py @@ -0,0 +1,148 @@ +""" +pmu nmi watchdog +""" +import logging +import time +import os.path + +from virttest import utils_test +from virttest import env_process +from virttest import error_context +from virttest import data_dir + +@error_context.context_aware +def run(test, params, env): + """ + Test the function of pmu nmi watchdog + + Test Step: + 1. see every function step + + :param test: qemu test object + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + # pylint: disable=R0914, C0103, W1201, W0641, R0915 + if "custom_smp" in params: + params["smp"] = params["custom_smp"] + env_process.preprocess(test, params, env) + vm = env.get_vm(params["main_vm"]) + vm.create(params=params) + vm.verify_alive() + timeout = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + end_time = time.time() + timeout + + deadlock_test_link = params.get("deadlock_test_link") + deadlock_test_path = params.get("deadlock_test_path") + src_link = os.path.join(data_dir.get_deps_dir(""), + deadlock_test_link) + vm.copy_files_to(src_link, deadlock_test_path, timeout=60) + pre_cmd = params["pre_cmd"] + session.cmd(pre_cmd) + + deadlock_test_cmd = params["deadlock_test_cmd"] + + def _nmi_watchdog_check(session): + """ + check if pmu_nmi_watchdog run successfully + """ + session.cmd(deadlock_test_cmd, ignore_all_errors=True) + + qmp_monitors = vm.get_monitors_by_type("qmp") + if qmp_monitors: + qmp_monitor = qmp_monitors[0] + else: + test.error("Could not find a QMP monitor, aborting test.") + + while time.time() < end_time: + if qmp_monitor.get_event("RESET"): + logging.info("pmu_nmi_watchdog run successfully.") + return True + + return False + + def nmi_watchdog_test(): + """ + Basic functions + """ + res = _nmi_watchdog_check(session) + if not res: + logging.error("pmu_nmi_watchdog doesn't run successfully.") + + def nmi_watchdog_edit(): + """ + Test whether this switch takes effect. + """ + switch_cmd0 = params["switch_cmd0"] + session.cmd(switch_cmd0) + deadlock_test_cmd = params["deadlock_test_cmd"] + session.cmd(deadlock_test_cmd, ignore_all_errors=True) + + rmmod_deadlock = params["rmmod_deadlock_cmd"] + session.cmd(rmmod_deadlock) + + switch_cmd1 = params["switch_cmd1"] + session.cmd(switch_cmd1) + + res = _nmi_watchdog_check(session) + if not res: + logging.error("pmu_nmi_watchdog doesn't run successfully.") + + def cmdline_test(): + """ + Test pmu nmi watchdog work with different cmdline, + such as set "irqchip.gicv3_pseudo_nmi=0",then pmu nmi watchdog cannot run. + """ + boot_option_added = params.get("boot_option_added") + boot_option_removed = params.get("boot_option_removed") + + utils_test.update_boot_option(vm, + args_removed=boot_option_removed, + args_added=boot_option_added) + + res = _nmi_watchdog_check(session) + if not res: + logging.debug("pmu_nmi_watchdog doesn't run successfully.") + else: + logging.error("pmu_nmi_watchdog run successfully is not our target!") + + def workwith_i6300esb(): + """ + Testing if i6300esb can work with pmu nmi watchdog + """ + trigger_cmd = params.get("trigger_cmd", "echo c > /dev/watchdog") + watchdog_action = params.get("watchdog_action", "reset") + + def _trigger_watchdog(session, trigger_cmd=None): + """ + Trigger watchdog action + Param session: guest connect session. + Param trigger_cmd: cmd trigger the watchdog + """ + if trigger_cmd is not None: + error_context.context(("Trigger Watchdog action using:'%s'." % + trigger_cmd), logging.info) + session.sendline(trigger_cmd) + + # Testing i6300esb + _trigger_watchdog(session, trigger_cmd) + if watchdog_action == "reset": + logging.info("Try to login the guest after reboot") + vm.wait_for_login(timeout=timeout) + logging.info("Watchdog action '%s' come into effect." % + watchdog_action) + + res = _nmi_watchdog_check(session) + if not res: + logging.error("pmu_nmi_watchdog doesn't run successfully.") + + # main procedure + test_type = params.get("test_type") + + if test_type in locals(): + test_running = locals()[test_type] + test_running() + else: + test.error("Oops test %s doesn't exist, have a check please." + % test_type) diff --git a/qemu/tests/ppc_check_cpu_and_mmu.py b/qemu/tests/ppc_check_cpu_and_mmu.py new file mode 100644 index 0000000000000000000000000000000000000000..6083d0f4d30b082300837f927b0bd78aa41edf9d --- /dev/null +++ b/qemu/tests/ppc_check_cpu_and_mmu.py @@ -0,0 +1,45 @@ +import re +import logging + +from avocado.utils import process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Check the CPU model and MMU mode of host and guest are matched. + + 1) Launch a guest. + 2) Get CPU information both of host and guest. + 3) Assert that CPU model and MMU node are matched. + + :param test: the test object. + :param params: the test params. + :param env: test environment. + """ + def get_cpu_mmu(session=None): + cmd_func = session.cmd if session else process.getoutput + cpu_info = cmd_func("tail -n 11 /proc/cpuinfo") + cpu_info = re.findall(r"(?:cpu\s+:\s+(\w+\d+)).*(?:MMU\s+:\s+(\w+))", + cpu_info, re.S) + if cpu_info: + return cpu_info[0] + test.error("Unable to get the CPU information of this system.") + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + guest_session = vm.wait_for_login() + + error_context.base_context("Get CPU information of host and guest.", + logging.info) + host_cpu_model, host_mmu_mode = get_cpu_mmu() + guest_cpu_model, guest_mmu_mode = get_cpu_mmu(guest_session) + + error_context.context("Assert CPU model and MMU mode of host and guest.", + logging.info) + assert (guest_cpu_model == host_cpu_model), ("The CPU model of the host " + "and guest do not match") + assert (guest_mmu_mode == host_mmu_mode), ("The MMU mode of the host and " + "guest do not match") + logging.info("CPU model and MMU mode of host and guest are matched.") diff --git a/qemu/tests/ppc_ic_mode_check.py b/qemu/tests/ppc_ic_mode_check.py new file mode 100644 index 0000000000000000000000000000000000000000..24a65f06aec0eb5c2820ee25e8c5ec14df4c4a19 --- /dev/null +++ b/qemu/tests/ppc_ic_mode_check.py @@ -0,0 +1,48 @@ +import re +import logging + +from virttest import error_context +from virttest.virt_vm import VMCreateError + + +@error_context.context_aware +def run(test, params, env): + """ + Check the interrupt controller mode. + + 1) Launch a guest with kernel-irqchip=on/off and ic-mode=xics/xive. + 2) Get pic info from human monitor and get interrupts info inside guest. + 3) Check whether irqchip and ic-mode match what we set. + + :param test: the test object. + :param params: the test params. + :param env: test environment. + """ + ic_mode = params["ic_mode"] + kernel_irqchip = params["kernel_irqchip"] + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + + error_context.base_context("Try to create a qemu instance...", logging.info) + try: + vm.create(params=params) + except VMCreateError as e: + if re.search(r"kernel_irqchip requested but unavailable|" + r"XIVE-only machines", e.output): + test.cancel(e.output) + raise + else: + vm.verify_alive() + session = vm.wait_for_login() + + error_context.context("Get irqchip and ic-mode information.", logging.info) + pic_o = vm.monitor.info("pic") + irqchip_match = re.search(r"^irqchip: %s" % kernel_irqchip, pic_o, re.M) + ic_mode_match = session.cmd_status("grep %s /proc/interrupts" + % ic_mode.upper()) == 0 + + error_context.context("Check wherever irqchip/ic-mode match.", logging.info) + if not irqchip_match: + test.fail("irqchip does not match to '%s'." % kernel_irqchip) + elif not ic_mode_match: + test.fail("ic-mode does not match to '%s'." % ic_mode) diff --git a/qemu/tests/pxe_query_cpus.py b/qemu/tests/pxe_query_cpus.py index 25129b1a7e8eb554ee79dc45485e092315302bb2..d820f3e80c6577f4f4cdea0856546de259cf5f22 100644 --- a/qemu/tests/pxe_query_cpus.py +++ b/qemu/tests/pxe_query_cpus.py @@ -15,7 +15,7 @@ from virttest import env_process @error_context.context_aware def _capture_tftp(test, vm, timeout): error_context.context("Snoop packet in the tap device", logging.info) - output = aexpect.run_fg("tcpdump -nli %s" % vm.get_ifname(), + output = aexpect.run_fg("tcpdump -nli %s port '(tftp or bootps)'" % vm.get_ifname(), logging.debug, "(pxe capture) ", timeout)[1] error_context.context("Analyzing the tcpdump result", logging.info) @@ -84,7 +84,7 @@ def run(test, params, env): while True: count += 1 try: - vm.monitor.info("cpus") + vm.monitor.info("cpus", debug=False) vm.verify_status("running") if not bg.is_alive(): break diff --git a/qemu/tests/qemu_disk_img_info.py b/qemu/tests/qemu_disk_img_info.py index 2530a6f25158e766685c7b85d1fe802e04a51c99..5034b5fd34e92d87f98f897dc3feaaea06ad8c30 100644 --- a/qemu/tests/qemu_disk_img_info.py +++ b/qemu/tests/qemu_disk_img_info.py @@ -59,6 +59,10 @@ def run(test, params, env): p.get(opt, default))) enable_ceph = params.get("enable_ceph") == "yes" enable_iscsi = params.get("enable_iscsi") == "yes" + enable_gluster = params.get("enable_gluster") == "yes" + enable_nbd = params.get("enable_nbd") == "yes" + enable_curl = params.get("enable_curl") == "yes" + enable_ssh = params.get("enable_ssh") == "yes" if enable_ceph: update_params.update({ "enable_ceph_%s" % base_image: optval("enable_ceph", @@ -79,12 +83,51 @@ def run(test, params, env): base_image, params, "no"), "lun_%s" % base_image: optval("lun", base_image, params, "0")}) + elif enable_gluster: + update_params.update({ + "enable_gluster_%s" % base_image: optval("enable_gluster", + base_image, + params, "no"), + "storage_type_%s" % base_image: optval("storage_type", + base_image, + params, "filesystem")}) + elif enable_nbd: + update_params.update({ + "enable_nbd_%s" % base_image: optval("enable_nbd", + base_image, + params, "no"), + "nbd_port_%s" % base_image: optval("nbd_port", + base_image, + params, "10809"), + "storage_type_%s" % base_image: optval("storage_type", + base_image, + params, "filesystem")}) + elif enable_curl: + update_params.update({ + "enable_curl_%s" % base_image: optval("enable_curl", + base_image, + params, "no"), + "storage_type_%s" % base_image: optval("storage_type", + base_image, + params, "filesystem")}) + elif enable_ssh: + update_params.update({ + "enable_ssh_%s" % base_image: optval("enable_ssh", + base_image, + params, "no"), + "storage_type_%s" % base_image: optval("storage_type", + base_image, + params, "filesystem")}) params.update(update_params) image_chain = params.get("image_chain", "").split() check_files = [] md5_dict = {} for idx, tag in enumerate(image_chain): + # VM cannot boot up from a readonly image + if params.object_params(tag).get('image_readonly') == 'yes': + continue + params["image_chain"] = " ".join(image_chain[:idx + 1]) info_test = InfoTest(test, params, env, tag) n_params = info_test.create_snapshot() diff --git a/qemu/tests/qemu_guest_agent.py b/qemu/tests/qemu_guest_agent.py index 3f30824c32cc859b3e92ac83fa5cb2d8dec99dea..9ccd5ae61dfd4cef1bae6dc5666e88612464bad7 100644 --- a/qemu/tests/qemu_guest_agent.py +++ b/qemu/tests/qemu_guest_agent.py @@ -155,6 +155,29 @@ class QemuGuestAgentTest(BaseVirtTest): get latest qemu-guest-agent rpm package url. :return: rpm pkg list """ + def get_mdl_tag_build_status(get_mdl_tag_cmd): + """ + Get module tag and qemu-kvm build status. + """ + logging.info("Get the needed module tag.") + mdl_tag = process.system_output(get_mdl_tag_cmd, + shell=True, + timeout=query_timeout + ).strip().split()[0].decode() + logging.info("Check qemu-kvm build is ready or not") + get_qemu_name_cmd = "brew list-tagged %s" % mdl_tag + get_qemu_name_cmd += " | grep qemu-kvm" + qemu_bild_name = process.system_output(get_qemu_name_cmd, + shell=True, + timeout=query_timeout + ).strip().split()[0].decode() + get_build_ready_cmd = "brew buildinfo %s | grep State" % qemu_bild_name + output = process.system_output(get_build_ready_cmd, + shell=True, + timeout=query_timeout + ).strip().decode() + return mdl_tag, "COMPLETE" in output + virt_module_stream = self.params.get("virt_module_stream", "") guest_name = self.params.get("guest_name") arch = self.params["vm_arch_name"] @@ -168,7 +191,7 @@ class QemuGuestAgentTest(BaseVirtTest): except avo_path.CmdNotFoundError as detail: raise TestCancel(str(detail)) - error_context.context("Get latest virt module tag of %s" + error_context.context("Get the latest qemu-guest-agent pkg of %s" " stream." % virt_module_stream, logging.info) # target release,such as 810,811 @@ -184,14 +207,23 @@ class QemuGuestAgentTest(BaseVirtTest): tag_version) get_latest_mdl_tag_cmd = "brew list-targets |grep" get_latest_mdl_tag_cmd += " %s |sort -r |head -n 1" % platform_tag - latest_mdl_tag = process.system_output(get_latest_mdl_tag_cmd, - shell=True, - timeout=query_timeout - ).strip().split()[0].decode() - error_context.context("Get qemu-guest-agent rpm pkg url.", - logging.info) + mdl_tag, build_s = get_mdl_tag_build_status(get_latest_mdl_tag_cmd) + + if not build_s: + logging.info("The qemu-kvm build's status is not ready," + " so we well check it in the previous virt module") + get_pre_mdl_tag_cmd = "brew list-targets |grep %s" % platform_tag + get_pre_mdl_tag_cmd += " |sort -r |head -n 2 |tail -n 1" + mdl_tag, build_s = get_mdl_tag_build_status( + get_pre_mdl_tag_cmd) + if not build_s: + self.test.error("Please check why the recent two modules'" + " qemu-kvm build is not ready.") + + error_context.context("Get qemu-guest-agent rpm pkg" + " url of %s." % mdl_tag, logging.info) get_brew_latest_pkg_cmd = "brew --quiet --topdir=%s" % download_root - get_brew_latest_pkg_cmd += " list-tagged %s" % latest_mdl_tag + get_brew_latest_pkg_cmd += " list-tagged %s" % mdl_tag get_brew_latest_pkg_cmd += " --path --arch=%s" % arch get_brew_latest_pkg_cmd += " |grep qemu-guest-agent-[0-9]" @@ -311,6 +343,29 @@ class QemuGuestAgentTest(BaseVirtTest): self.gagent.verify_responsive() logging.info(self.gagent.cmd("guest-info")) + @error_context.context_aware + def gagent_setsebool_value(self, value, params, vm): + ''' + Set selinux boolean 'virt_qemu_ga_read_nonsecurity_files' + as 'on' or 'off' for linux guest can access filesystem + successfully and restore guest original env when test is over. + + :param value: value of selinux boolean. + :param params: Dictionary with the test parameters + :param vm: Virtual machine object. + ''' + session = self._get_session(params, vm) + self._open_session_list.append(session) + error_context.context("Turn %s virt_qemu_ga_read_nonsecurity_files." % + value, logging.info) + set_selinux_bool_cmd = params["setsebool_cmd"] % value + session.cmd(set_selinux_bool_cmd).strip() + get_sebool_cmd = params['getsebool_cmd'] + value_selinux_bool_guest = session.cmd_output(get_sebool_cmd).strip() + if value_selinux_bool_guest != value: + self.test.error("Set boolean virt_qemu_ga_read_nonsecurity_files " + "failed.") + @error_context.context_aware def setup(self, test, params, env): BaseVirtTest.setup(self, test, params, env) @@ -779,6 +834,13 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): def gagent_check_set_time(self, test, params, env): """ Execute "guest-set-time" command to guest agent + steps: + 1) Query the timestamp of current time in guest + 2) Move the guest time one week into the past with command "guest-set-time" + 3) Check if the guest time is set + 4) Set a invalid guest time if needed + 5) Set the system time from the hwclock for rhel guest + :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. @@ -790,7 +852,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): guest_time_before = session.cmd_output(get_guest_time_cmd) if not guest_time_before: test.error("can't get the guest time for contrast") - error_context.context("the time before being moved back into past is '%d' " + error_context.context("the time before being moved back into past is '%d' " % int(guest_time_before), logging.info) # Need to move the guest time one week into the past target_time = (int(guest_time_before) - 604800) * 1000000000 @@ -801,7 +863,29 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): delta = abs(int(guest_time_after) - target_time / 1000000000) if delta > 3: test.fail("the time set for guest is not the same with target") - # Set the system time from the hwclock + + # set invalid guest time if needed + invalid_time_test = params.get_boolean("invalid_time_test") + if invalid_time_test: + error_context.context("Set time to an invalid value.", + logging.info) + guest_time_before_invalid = session.cmd_output(get_guest_time_cmd) + target_time_invalid = int(guest_time_before) * 1000000000000 + try: + self.gagent.set_time(target_time_invalid) + except guest_agent.VAgentCmdError as e: + expected = "Invalid parameter type" + if expected not in e.edata["desc"]: + test.fail(str(e)) + guest_time_after_invalid = session.cmd_output(get_guest_time_cmd) + delta = abs(int(guest_time_after_invalid) - int( + guest_time_before_invalid)) + # time should have no change after invalid time set, 1min is + # acceptable as there are some check during test + if delta > 60: + test.fail("The guest time is changed after invalid time set.") + return + # Only for linux guest, set the system time from the hwclock if params["os_type"] != "windows": move_time_cmd = params["move_time_cmd"] session.cmd("hwclock -w") @@ -1039,6 +1123,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): timeout = float(params.get("login_timeout", 240)) session = self.vm.wait_for_login(timeout=timeout) device_name = get_guest_discard_disk(session) + self.gagent_setsebool_value('on', params, self.vm) error_context.context("format disk '%s' in guest" % device_name, logging.info) format_disk_cmd = params["format_disk_cmd"] @@ -1072,6 +1157,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): error_context.context("execute the guest-fstrim cmd", logging.info) self.gagent.fstrim() + self.gagent_setsebool_value('off', params, self.vm) # check the bitmap after trim bitmap_after_trim = get_allocation_bitmap() @@ -1280,8 +1366,10 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): for windows guest. """ if self.params.get("os_type") == "linux": - black_list = self.params["black_list"] - for black_cmd in black_list.split(): + cmd_black_list = self.params["black_list"] + cmd_blacklist_backup = self.params["black_list_backup"] + session.cmd(cmd_blacklist_backup) + for black_cmd in cmd_black_list.split(): bl_check_cmd = self.params["black_list_check_cmd"] % black_cmd bl_change_cmd = self.params["black_list_change_cmd"] % black_cmd session.cmd(bl_change_cmd) @@ -1295,6 +1383,14 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): self.test.fail("Could not restart qemu-ga in VM after changing" " list, detail: %s" % o) + def _change_bl_back(self, session): + """ + Change the blacklist_bck back for recovering guest env. + """ + if self.params.get("os_type") == "linux": + cmd_change_bl_back = self.params["recovery_black_list"] + session.cmd(cmd_change_bl_back) + def _read_check(self, ret_handle, content, count=None): """ Read file and check if the content read is correct. @@ -1383,6 +1479,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): self.gagent.guest_file_close(ret_handle) cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) session.cmd(cmd_del_file) + self._change_bl_back(session) @error_context.context_aware def gagent_check_file_write(self, test, params, env): @@ -1433,6 +1530,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): self.gagent.guest_file_close(ret_handle) cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) session.cmd(cmd_del_file) + self._change_bl_back(session) @error_context.context_aware def gagent_check_file_read(self, test, params, env): @@ -1544,6 +1642,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) session.cmd(cmd_del_file) + self._change_bl_back(session) @error_context.context_aware def gagent_check_with_fsfreeze(self, test, params, env): @@ -1589,6 +1688,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): self.gagent.guest_file_close(ret_handle) cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) session.cmd(cmd_del_file) + self._change_bl_back(session) @error_context.context_aware def gagent_check_with_selinux(self, test, params, env): @@ -1693,6 +1793,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): " guest.", logging.info) session.cmd("setenforce 0") result_check_permissive() + self._change_bl_back(session) @error_context.context_aware def gagent_check_guest_exec(self, test, params, env): @@ -1830,10 +1931,12 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): capture_output=True) if result["exitcode"] == 0: test.fail("The cmd should be failed with wrong args.") + self._change_bl_back(session) @error_context.context_aware def _action_before_fsfreeze(self, *args): session = self._get_session(self.params, None) + session.cmd("restorecon -Rv /", timeout=180) self._open_session_list.append(session) @error_context.context_aware @@ -1873,26 +1976,34 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): logging.info("FS is thawed as expected, can write in guest.") @error_context.context_aware - def gagent_check_fsfreeze(self, test, params, env): + def _fsfreeze(self, fsfreeze_list=False, mountpoints=None, + check_mountpoints=None): """ - Test guest agent commands "guest-fsfreeze-freeze/status/thaw" + Test guest agent commands "guest-fsfreeze-freeze/status/thaw/ + fsfreeze-list" Test steps: 1) Check the FS is thawed. 2) Freeze the FS. - 3) Check the FS is frozen from both guest agent side and guest os side. + 3) Check the FS is frozen from both guest agent side and + guest os side. 4) Thaw the FS. - 5) Check the FS is unfrozen from both guest agent side and guest os side. - - :param test: kvm test object - :param params: Dictionary with the test parameters - :param env: Dictionary with test environmen. - """ - error_context.context("Check guest agent command " - "'guest-fsfreeze-freeze/thaw'", - logging.info) - write_cmd = params.get("gagent_fs_test_cmd", "") - write_cmd_timeout = int(params.get("write_cmd_timeout", 60)) + 5) Check the FS is unfrozen from both guest agent side and + guest os side. + + :param fsfreeze_list: Freeze fs with guest-fsfreeze-freeze or + guest-fsfreeze-freeze-list + :param mountpoints: an array of mountpoints of filesystems to be frozen. + it's the parameter for guest-fsfreeze-freeze-list. + if omitted, every mounted filesystem is frozen + :param check_mountpoints: an array of mountpoints, to check if they are + frozen/thaw, used to the following two sceanrio. + a.fsfreeze_list is true and mountpoints is none. + b.fsfreeze_list is true and mountpoints has + invalid value and valide value(only linux guest) + """ + write_cmd = self.params.get("gagent_fs_test_cmd", "") + write_cmd_timeout = int(self.params.get("write_cmd_timeout", 60)) try: expect_status = self.gagent.FSFREEZE_STATUS_THAWED self.gagent.verify_fsfreeze_status(expect_status) @@ -1901,10 +2012,28 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): self.gagent.fsthaw(check_status=False) self._action_before_fsfreeze() - error_context.context("Freeze the FS.", logging.info) - self.gagent.fsfreeze() + error_context.context("Freeze the FS when fsfreeze_list is" + " %s and mountpoints is %s." % + (fsfreeze_list, mountpoints), logging.info) + self.gagent.fsfreeze(fsfreeze_list=fsfreeze_list, + mountpoints=mountpoints) try: - self._action_after_fsfreeze(write_cmd, write_cmd_timeout) + if fsfreeze_list: + if check_mountpoints: + # only for invalid mount_points + # or mountpoints is none + mountpoints = check_mountpoints + write_cmd_list = [] + for mpoint in mountpoints: + mpoint = "/tmp" if mpoint == "/" else mpoint + write_cmd_m = write_cmd % mpoint + write_cmd_list.append(write_cmd_m) + write_cmd_guest = ";".join(write_cmd_list) + else: + mountpoint_def = self.params["mountpoint_def"] + write_cmd_guest = write_cmd % mountpoint_def + + self._action_after_fsfreeze(write_cmd_guest, write_cmd_timeout) # Next, thaw guest fs. self._action_before_fsthaw() error_context.context("Thaw the FS.", logging.info) @@ -1918,8 +2047,128 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): logging.warn("Finally failed to thaw guest fs," " detail: '%s'", detail) raise + # check after fsthaw + self._action_after_fsthaw(write_cmd_guest, write_cmd_timeout) + + @error_context.context_aware + def gagent_check_fsfreeze(self, test, params, env): + """ + Test guest agent commands "guest-fsfreeze-freeze" + + Test steps: + 1) Check the FS is thawed. + 2) Freeze the FS. + 3) Check the FS is frozen from both guest agent side and + guest os side. + 4) Thaw the FS. + 5) Check the FS is unfrozen from both guest agent side and + guest os side. - self._action_after_fsthaw(write_cmd, write_cmd_timeout) + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environmen. + """ + self._fsfreeze() + + @error_context.context_aware + def gagent_check_fsfreeze_list(self, test, params, env): + """ + Test guest agent commands "guest-fsfreeze-freeze-list" + + Test steps: + 1) Check the FS is thawed. + 2) Freeze the FS without mountpoint. + 3) Check the FS is frozen from both guest agent side and + guest os side. + 4) Thaw the FS. + 5) Check the FS is unfrozen from both guest agent side and + guest os side. + 6) Freeze the FS with one valid mountpoint. + 7) repeate step4-5. + 8) Freeze the FS with two valid mountpoints + 9) repeate step4-5. + 8) Freeze the FS with one valid mountpoint and + one invalid mountpoint. + 9) Check the result. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environmen. + """ + session = self._get_session(params, self.vm) + self._open_session_list.append(session) + image_size_stg0 = params["image_size_stg0"] + + error_context.context("Format the new data disk and mount it.", + logging.info) + if params.get("os_type") == "linux": + self.gagent_setsebool_value('on', params, self.vm) + disk_data = list(utils_disk.get_linux_disks(session).keys()) + mnt_point_data = utils_disk.configure_empty_disk( + session, disk_data[0], image_size_stg0, "linux", + labeltype="msdos")[0] + mount_points = ["/", mnt_point_data] + else: + disk_index = utils_misc.wait_for( + lambda: utils_disk.get_windows_disks_index(session, + image_size_stg0), + 120) + if disk_index: + logging.info("Clear readonly for disk and online it in" + " windows guest.") + if not utils_disk.update_windows_disk_attributes(session, + disk_index): + test.error("Failed to update windows disk attributes.") + mnt_point_data = utils_disk.configure_empty_disk( + session, disk_index[0], image_size_stg0, "windows", + labeltype="msdos")[0] + mount_points = ["C:\\", "%s:\\" % mnt_point_data] + else: + test.error("Didn't find any disk_index except system disk.") + + error_context.context("Freeze fs without parameter of mountpoints.", + logging.info) + self._fsfreeze(fsfreeze_list=True, check_mountpoints=mount_points) + error_context.context("Freeze fs with two mount point.", + logging.info) + self._fsfreeze(fsfreeze_list=True, mountpoints=mount_points) + error_context.context("Freeze fs with every mount point.", + logging.info) + for mpoint in mount_points: + mpoint = ["%s" % mpoint] + self._fsfreeze(fsfreeze_list=True, mountpoints=mpoint) + + error_context.context("Freeze fs with one valid mountpoint and" + " one invalid mountpoint.", logging.info) + if params.get("os_type") == "linux": + mount_points_n = ["/", "/invalid"] + check_mp = ["/"] + self._fsfreeze(fsfreeze_list=True, mountpoints=mount_points_n, + check_mountpoints=check_mp) + self.gagent_setsebool_value('off', params, self.vm) + else: + mount_points_n = ["C:\\", "X:\\"] + logging.info("Make sure the current status is thaw.") + try: + expect_status = self.gagent.FSFREEZE_STATUS_THAWED + self.gagent.verify_fsfreeze_status(expect_status) + except guest_agent.VAgentFreezeStatusError: + # Thaw guest FS if the fs status is incorrect. + self.gagent.fsthaw(check_status=False) + try: + self.gagent.fsfreeze(fsfreeze_list=True, + mountpoints=mount_points_n) + except guest_agent.VAgentCmdError as e: + expected = "failed to add X:\\ to snapshot set" + if expected not in e.edata["desc"]: + test.fail(e) + else: + test.fail("Cmd 'guest-fsfreeze-freeze-list' is executed" + " successfully, but it should return error.") + finally: + if self.gagent.get_fsfreeze_status() == \ + self.gagent.FSFREEZE_STATUS_FROZEN: + self.gagent.fsthaw(check_status=False) @error_context.context_aware def gagent_check_thaw_unfrozen(self, test, params, env): @@ -2045,7 +2294,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): session, disk_index[0], image_size_stg0, "windows", labeltype="msdos") session.cmd(disk_write_cmd % mnt_point[0]) error_context.context("Unplug the added disk", logging.info) - self.vm.devices.simple_unplug(devs[0], self.vm.monitor) + self.vm.devices.simple_unplug(devs[-1], self.vm.monitor) finally: if self.gagent.get_fsfreeze_status() == self.gagent.FSFREEZE_STATUS_FROZEN: try: @@ -2232,6 +2481,27 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): check_value_frontend_open(out, False) session.close() + @error_context.context_aware + def gagent_check_qgastatus_after_remove_qga(self, test, params, env): + """ + Check the qga.service status after removing qga. + """ + session = self._get_session(self.params, None) + self._open_session_list.append(session) + + error_context.context("Remove qga.service.", logging.info) + self.gagent_uninstall(session, self.vm) + + error_context.context("Check qga.service after removing it.", logging.info) + try: + if self._check_ga_service(session, params.get("gagent_status_cmd")): + test.fail("QGA service should be removed.") + finally: + error_context.context("Recover test env that start qga.", logging.info) + self.gagent_install(session, self.vm) + self.gagent_start(session, self.vm) + self.gagent_verify(params, self.vm) + @error_context.context_aware def gagent_check_frozen_io(self, test, params, env): """ @@ -2325,30 +2595,91 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): mountpoint,disk's name and serial number. steps: - 1) check file system type of every mount point. - 2) check disk name. - 3) check disk's serial number. + 1) Check filesystem usage statistics + 2) check file system type of every mount point. + 3) check disk name. + 4) check disk's serial number. :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def qga_guest_diskusage(mountpoint): + """ + Send cmd in guest to get disk usage. + :param mountpoint: the mountpoint of filesystem + """ + cmd_get_diskusage = params["cmd_get_disk_usage"] % mountpoint + disk_usage_guest = session.cmd(cmd_get_diskusage).strip().split() + disk_total_guest = int(disk_usage_guest[0]) + if params["os_type"] == "windows": + # Just can get total and freespace disk usage from windows. + disk_freespace_guest = int(disk_usage_guest[1]) + disk_used_guest = int(disk_total_guest - disk_freespace_guest) + else: + disk_used_guest = int(disk_usage_guest[1]) + disk_total_qga = int(fs["total-bytes"]) + disk_used_qga = int(fs["used-bytes"]) + diff_total_qga_guest = abs(disk_total_guest - disk_total_qga) + diff_used_qga_guest = abs(disk_used_guest - disk_used_qga) + return (diff_total_qga_guest, diff_used_qga_guest) + + def check_usage_qga_guest(mount_point): + """ + Contrast disk usage from guest and qga that needed + to call previous function 'qga_guest_diskusage'. + :param mountpoint: the mountpoint of filesystem + """ + disk_usage_guest = qga_guest_diskusage(mount_point) + diff_total_qgaguest = int(disk_usage_guest[0]) + diff_used_qgaguest = int(disk_usage_guest[1]) + if diff_total_qgaguest != 0: + test.fail("File System %s Total bytes doesn't match." % + mount_point) + if diff_used_qgaguest != 0: + if mount_point != 'C:' and mount_point != '/': + test.fail("File system %s used bytes doesn't match." % + mount_point) + else: + # Disk 'C:' and '/' used space usage have a floating interval, + # so set a safe value '10485760'. + logging.info("Need to check the floating interval for C: or /.") + if diff_used_qgaguest > 10485760: + test.fail("File System floating interval is too large," + "Something must go wrong.") + else: + logging.info("File system '%s' usages are within the safe " + "floating range." % mount_point) + session = self._get_session(params, None) self._open_session_list.append(session) - serial_num = params["blk_extra_params"].split("=")[1] + serial_num = params["blk_extra_params_image1"].split("=")[1] error_context.context("Check all file system info in a loop.", logging.info) fs_info_qga = self.gagent.get_fsinfo() for fs in fs_info_qga: + device_id = fs["name"] mount_pt = fs["mountpoint"] - if params["os_type"] == "windows": + if (params["os_type"] == "windows" and + mount_pt != "System Reserved"): mount_pt = mount_pt[:2] + error_context.context("Check file system '%s' usage statistics." % + mount_pt, logging.info) + if mount_pt != 'System Reserved': + # disk usage statistic for System Reserved + # volume is not supported. + check_usage_qga_guest(mount_pt) + else: + logging.info("'%s' disk usage statistic is not supported" % mount_pt) + error_context.context("Check file system type of '%s' mount point." % mount_pt, logging.info) fs_type_qga = fs["type"] - cmd_get_disk = params["cmd_get_disk"] % mount_pt + cmd_get_disk = params["cmd_get_disk"] % mount_pt.replace("/", r"\/") + if params["os_type"] == "windows": + cmd_get_disk = params["cmd_get_disk"] % device_id.replace("\\", r"\\") disk_info_guest = session.cmd(cmd_get_disk).strip().split() fs_type_guest = disk_info_guest[1] if fs_type_qga != fs_type_guest: @@ -2373,6 +2704,9 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): logging.info("Disk name is %s which is expected." % disk_name_qga) error_context.context("Check serial number of some disk.", logging.info) + if fs_type_qga == "UDF" or fs_type_qga == "CDFS": + logging.info("Only check block disk's serial info, no cdrom.") + continue serial_qga = fs["disk"][0]["serial"] if not re.findall(serial_num, serial_qga): test.fail("Serial name is not correct via qga.\n" @@ -2451,6 +2785,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): self.gagent.guest_file_read(ret_handle) log_check("guest-file-read") + self._change_bl_back(session) @error_context.context_aware def gagent_check_with_migrate(self, test, params, env): @@ -2515,6 +2850,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): error_context.context("Format the new data disk and mount it.", logging.info) if params.get("os_type") == "linux": + self.gagent_setsebool_value('on', params, self.vm) disk_data = list(utils_disk.get_linux_disks(session).keys()) mnt_point = utils_disk.configure_empty_disk( session, disk_data[0], image_size_stg0, "linux", @@ -2538,6 +2874,7 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): test.error("Didn't find any disk_index except system disk.") error_context.context("Freeze fs.", logging.info) + session.cmd("restorecon -Rv /", timeout=180) self.gagent.fsfreeze() error_context.context("Umount fs or offline disk in guest.", @@ -2584,14 +2921,17 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): error_context.context("Mount fs or online disk in guest.", logging.info) if params.get("os_type") == "linux": - if not utils_disk.mount(src, mnt_point[0], session=session): - if params['os_variant'] != 'rhel6': - test.fail("For rhel7+ guest, mount fs should success" - " after fsthaw.") - else: - if params['os_variant'] == 'rhel6': - test.fail("For rhel6 guest, mount fs should fail after" - " fsthaw.") + try: + if not utils_disk.mount(src, mnt_point[0], session=session): + if params['os_variant'] != 'rhel6': + test.fail("For rhel7+ guest, mount fs should success" + " after fsthaw.") + else: + if params['os_variant'] == 'rhel6': + test.fail("For rhel6 guest, mount fs should fail after" + " fsthaw.") + finally: + self.gagent_setsebool_value('off', params, self.vm) else: if not utils_disk.update_windows_disk_attributes(session, disk_index): @@ -2686,6 +3026,345 @@ class QemuGuestAgentBasicCheck(QemuGuestAgentTest): cmd = "mv -f /etc/sysconfig/qemu-ga-bk /etc/sysconfig/qemu-ga" session.cmd(cmd) + @error_context.context_aware + def gagent_check_virtio_device(self, test, params, env): + """ + check virtio device in windows guest. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + session = self._get_session(params, None) + self._open_session_list.append(session) + + def _result_check(rsult_qga, rsult_guest): + if rsult_qga != rsult_guest: + msg = "The result is different between qga and guest\n" + msg += "from qga: %s\n" % rsult_qga + msg += "from guest: %s\n" % rsult_guest + test.fail(msg) + + devs_list = self.gagent.get_virtio_device() + check_driver_cmd_org = params["check_driver_powershell_cmd"] + for device in devs_list: + driver_name = device["driver-name"] + error_context.context("Check %s info." % driver_name, logging.info) + + driver_date = device["driver-date"] + driver_version = device["driver-version"] + device_address = device["address"]["data"] + device_id = device_address["device-id"] + vendor_id = device_address["vendor-id"] + + filter_name = "friendlyname" if "Ethernet" in driver_name \ + else "devicename" + check_driver_cmd = check_driver_cmd_org % (filter_name, driver_name) + + driver_info_guest = session.cmd_output(check_driver_cmd) + # check driver date + # driverdate : 20200219000000.******+*** + date_group = re.search(r"driverdate.*\:\s(\d{4})(\d{2})(\d{2})", + driver_info_guest, re.I).groups() + driver_date_guest = "-".join(date_group) + _result_check(driver_date, driver_date_guest) + + # check driver version + driver_ver_guest = re.search(r"driverversion.*\:\s(\S+)", + driver_info_guest, re.I).group(1) + _result_check(driver_version, driver_ver_guest) + + # check vender id and device id + pattern_dev = r"deviceid.*VEN_([A-Za-z0-9]+)&DEV_([A-Za-z0-9]+)&" + id_dev = re.search(pattern_dev, driver_info_guest, re.I) + vender_id_guest = int(id_dev.group(1), 16) + device_id_guest = int(id_dev.group(2), 16) + _result_check(vendor_id, vender_id_guest) + _result_check(device_id, device_id_guest) + + @error_context.context_aware + def gagent_check_os_basic_info(self, test, params, env): + """ + Get hostname, timezone and currently active users on the vm. + Steps: + 1) Check host name. + 2) Check host name after setting new host name + 3) Check timezone name. + 4) check timezone's offset to UTS in seconds. + 5) Check all active users number. + 6) Check every user info. + 7) Check every user's domain(windows only) + 8) Get the earlier loggin time for the same user. + 9) Check the login time for every user. + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + session = self._get_session(params, None) + self._open_session_list.append(session) + + def _result_check(rsult_qga, rsult_guest): + if rsult_qga != rsult_guest: + msg = "The result is different between qga and guest\n" + msg += "from qga: %s\n" % rsult_qga + msg += "from guest: %s\n" % rsult_guest + test.fail(msg) + + error_context.context("Check host name of guest.", logging.info) + host_name_ga = self.gagent.get_host_name()["host-name"] + cmd_get_host_name = params["cmd_get_host_name"] + host_name_guest = session.cmd_output(cmd_get_host_name).strip() + _result_check(host_name_ga, host_name_guest) + + if params["os_type"] == "linux": + # this step that set new hostname and + # check it out just for linux. + error_context.context("Check host name after setting new host name.", + logging.info) + cmd_set_host_name = params["cmd_set_host_name"] + host_name_guest = session.cmd_output(cmd_set_host_name).strip() + host_name_ga = self.gagent.get_host_name()["host-name"] + _result_check(host_name_ga, host_name_guest) + + error_context.context("Check timezone of guest.", logging.info) + timezone_ga = self.gagent.get_timezone() + timezone_name_ga = timezone_ga["zone"] + timezone_offset_ga = timezone_ga["offset"] + + logging.info("Check timezone name.") + cmd_get_timezone_name = params["cmd_get_timezone_name"] + timezone_name_guest = session.cmd_output( + cmd_get_timezone_name).strip() + if params["os_type"] == "windows": + # there are standard name and daylight name for windows os, + # both are accepted. + cmd_dlight_name = params["cmd_get_timezone_dlight_name"] + timezone_dlight_name_guest = session.cmd_output( + cmd_dlight_name).strip() + timezone_name_list = [timezone_name_guest, + timezone_dlight_name_guest] + if timezone_name_ga not in timezone_name_list: + msg = "The result is different between qga and guest\n" + msg += "from qga: %s\n" % timezone_name_ga + msg += "from guest: %s\n" % timezone_name_list + test.fail(msg) + else: + _result_check(timezone_name_ga, timezone_name_guest) + + logging.info("Check timezone offset.") + cmd_get_timezone_offset = params["cmd_get_timezone_offset"] + timezone_offset_guest = session.cmd_output( + cmd_get_timezone_offset).strip() + # +08:00 + # (UTC+08:00) Beijing, Chongqing, Hong Kong, Urumqi + pattern = r"(\S)(\d\d):\d\d" + timezone_list = re.findall(pattern, timezone_offset_guest, re.I) + # if it's daylight save time, offset should be 1h early + if "daylight" in timezone_name_ga.lower(): + offset_seconds = (int(timezone_list[0][1]) - 1) * 3600 + else: + offset_seconds = int(timezone_list[0][1]) * 3600 + if timezone_list[0][0] == "-": + timezone_offset_guest_seconds = int(timezone_list[0][0] + + str(offset_seconds)) + else: + timezone_offset_guest_seconds = int(offset_seconds) + _result_check(timezone_offset_ga, timezone_offset_guest_seconds) + + error_context.context("Check the current active users number.", + logging.info) + user_qga_list = self.gagent.get_users() + user_num_qga = len(user_qga_list) + cmd_get_users = params["cmd_get_users"] + user_guest = session.cmd_output(cmd_get_users).strip() + user_guest_list = user_guest.splitlines() + + logging.info("Get all users name in guest.") + if params["os_type"] == "linux": + cmd_get_user_name = params["cmd_get_users_name"] + user_name_guest = session.cmd_output(cmd_get_user_name).strip() + user_name_list_guest = user_name_guest.splitlines() + else: + user_name_list_guest = [] + for user in user_guest_list: + user = user.strip(' >') + user_name = user.split()[0] + user_name_list_guest.append(user_name) + # get non duplicate user name + user_num_guest = len(set(user_name_list_guest)) + + if user_num_qga != user_num_guest: + msg = "Currently active users number are different" + msg += " between qga and guest\n" + msg += "from qga: %s\n" % len(user_num_qga) + msg += "from guest: %s\n" % len(user_num_guest) + test.fail(msg) + + error_context.context("Check the current active users info.", + logging.info) + for user_qga in user_qga_list: + login_time_qga = user_qga["login-time"] + user_name_qga = user_qga["user"] + + error_context.context("Check %s user info." % user_name_qga, + logging.info) + # only have domain key in windows guest + if params["os_type"] == "windows": + # username is lowercase letters in windows guest + user_name = user_name_qga.lower() + logging.info("Check domain name of %s user." % user_name) + domain_qga = user_qga["domain"] + cmd_get_user_domain = params["cmd_get_user_domain"] % user_name + domain_guest = session.cmd_output(cmd_get_user_domain).strip() + _result_check(domain_qga, domain_guest) + else: + user_name = user_name_qga + + # get this user's info in vm, maybe the same user + # loggin many times. + cmd_get_user = params["cmd_get_user"] % user_name + records = session.cmd_output(cmd_get_user).strip().splitlines() + error_context.context("Check active users logging time, if " + "multiple instances of the user are " + "logged in, record the earliest one.", + logging.info) + first_login = float('inf') + time_pattern = params["time_pattern"] + cmd_time_trans = params["cmd_time_trans"] + for record in records: + login_time_guest = re.search(time_pattern, record).group(1) + cmd_time_trans_guest = cmd_time_trans % login_time_guest + login_time_guest = session.cmd_output( + cmd_time_trans_guest).strip() + first_login = min(first_login, float(login_time_guest)) + + delta = abs(float(login_time_qga) - float(first_login)) + if delta > 60: + msg = "%s login time are different between" % user_name_qga + msg += " qga and guest\n" + msg += "from qga: %s\n" % login_time_qga + msg += "from guest: %s\n" % first_login + test.fail(msg) + + @error_context.context_aware + def gagent_check_os_info(self, test, params, env): + """ + Get operating system info of vm. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def _result_check(rsult_qga, rsult_guest): + if rsult_qga.lower() != rsult_guest.lower(): + msg = "The result is different between qga and guest\n" + msg += "from qga: %s\n" % rsult_qga + msg += "from guest: %s\n" % rsult_guest + test.fail(msg) + + session = self._get_session(params, None) + self._open_session_list.append(session) + + error_context.context("Get os information from qga.", logging.info) + + os_info_qga = self.gagent.get_osinfo() + os_id_qga = os_info_qga["id"] + os_name_qga = os_info_qga["name"] + os_pretty_name_qga = os_info_qga["pretty-name"] + os_version_qga = os_info_qga["version"] + os_version_id_qga = os_info_qga["version-id"] + kernel_version_qga = os_info_qga["kernel-version"] + kernel_release_qga = os_info_qga["kernel-release"] + # x86_64 or x86 + machine_type_qga = os_info_qga["machine"] + + cmd_get_full_name = params["cmd_get_full_name"] + os_name_full_guest = session.cmd_output(cmd_get_full_name).strip() + + error_context.context("Check os basic id and name.", logging.info) + os_type = params["os_type"] + os_id = params["os_id"] + if os_type == "windows": + os_name = "Microsoft Windows" + else: + os_name = re.search(r'(Red Hat.*) release', + os_name_full_guest, re.I).group(1) + _result_check(os_id_qga, os_id) + _result_check(os_name_qga, os_name) + + error_context.context("Check os pretty name.", logging.info) + if os_type == "windows": + os_pretty_name_guest = re.search( + r'Microsoft (.*)', os_name_full_guest, re.M).group(1) + else: + os_pretty_name_guest = os_name_full_guest + if "release" in os_name_full_guest: + os_pretty_name_guest = re.sub(r'release ', '', + os_name_full_guest) + _result_check(os_pretty_name_qga, os_pretty_name_guest) + + error_context.context("Check os version info.", logging.info) + # 2019, 8.1, 2012 R2, 8 + pattern = r"(\d+(.)?(?(2)(\d+))( R2)?)" + os_version_id_guest = re.search(pattern, + os_name_full_guest, re.I).group(1) + if os_type == "windows": + os_version_guest = re.search(r'(Microsoft.*\d)', + os_name_full_guest, re.I).group(1) + # 2012 R2 + if "R2" in os_version_id_guest: + os_version_id_guest = re.sub(r' R2', 'R2', + os_version_id_guest) + else: + os_version_guest = re.search(r'release (\d.*)', + os_name_full_guest, re.I).group(1) + if "Beta" in os_version_guest: + os_version_guest = re.sub(r'Beta ', '', os_version_guest) + + _result_check(os_version_qga, os_version_guest) + _result_check(os_version_id_qga, os_version_id_guest) + + error_context.context("Check kernel version and release version.", + logging.info) + cmd_get_kernel_ver = params["cmd_get_kernel_ver"] + kernel_info_guest = session.cmd_output(cmd_get_kernel_ver).strip() + if os_type == "windows": + kernel_g = re.search(r'(\d+\.\d+)\.(\d+)', + kernel_info_guest, re.I) + kernel_version_guest = kernel_g.group(1) + kernel_release_guest = kernel_g.group(2) + else: + kernel_version_guest = kernel_info_guest + cmd_get_kernel_rel = params["cmd_get_kernel_rel"] + kernel_release_guest = session.cmd_output( + cmd_get_kernel_rel).strip() + _result_check(kernel_version_qga, kernel_version_guest) + _result_check(kernel_release_qga, kernel_release_guest) + + error_context.context("Check variant and machine type.", logging.info) + if self.params.get("os_variant", "") != 'rhel8': + # for rhel8+ there is no variant info + # server or client + variant_qga = os_info_qga["variant"] + variant_id_qga = os_info_qga["variant-id"] + variant_guest = "server" \ + if "server" in os_name_full_guest.lower() else "client" + _result_check(variant_qga, variant_guest) + _result_check(variant_id_qga, variant_guest) + + cmd_get_machine_type = params["cmd_get_machine_type"] + machine_type_guest = session.cmd_output(cmd_get_machine_type).strip() + if os_type == "windows": + # one of x86, x86_64, arm, ia64 + if "32-bit" in machine_type_guest: + machine_type_guest = "x86" + elif "64-bit" in machine_type_guest: + machine_type_guest = "x86_64" + else: + test.error("Only support x86 and x86_64 in this auto test now.") + + _result_check(machine_type_qga, machine_type_guest) + def run_once(self, test, params, env): QemuGuestAgentTest.run_once(self, test, params, env) @@ -2778,6 +3457,7 @@ class QemuGuestAgentBasicCheckWin(QemuGuestAgentBasicCheck): else: logging.info("qemu-ga service is not running.") self.gagent_start(session, self.vm) + time.sleep(5) args = [params.get("gagent_serial_type"), params.get("gagent_name")] self.gagent_create(params, self.vm, *args) diff --git a/qemu/tests/qemu_img.py b/qemu/tests/qemu_img.py index 1d50829094acdde41fd06cbece9c6d966eb3dbab..931315f1eda57057e200c9aebccbaeed0e01319a 100644 --- a/qemu/tests/qemu_img.py +++ b/qemu/tests/qemu_img.py @@ -35,6 +35,7 @@ def run(test, params, env): image_format = params["image_format"] image_size = params.get("image_size", "10G") enable_gluster = params.get("enable_gluster", "no") == "yes" + enable_nvme = params.get("enable_nvme", "no") == "yes" image_name = storage.get_image_filename(params, data_dir.get_data_dir()) def remove(path): @@ -43,12 +44,14 @@ def run(test, params, env): except OSError: pass - def _get_image_filename(img_name, enable_gluster=False, img_fmt=None): + def _get_image_filename(img_name, enable_gluster=False, + enable_nvme=False, img_fmt=None): """ Generate an image path. :param image_name: Force name of image. :param enable_gluster: Enable gluster or not. + :param enable_nvme: Enable nvme or not. :param image_format: Format for image. """ if enable_gluster: @@ -56,6 +59,8 @@ def run(test, params, env): image_filename = "%s%s" % (gluster_uri, img_name) if img_fmt: image_filename += ".%s" % img_fmt + elif enable_nvme: + image_filename = image_name else: if img_fmt: img_name = "%s.%s" % (img_name, img_fmt) @@ -170,7 +175,7 @@ def run(test, params, env): device = params.get("device") if not device: img = _get_image_filename(image_large, enable_gluster, - image_format) + enable_nvme, image_format) else: img = device _create(cmd, img_name=img, fmt=image_format, @@ -524,13 +529,13 @@ def run(test, params, env): " support 'rebase' subcommand") sn_fmt = params.get("snapshot_format", "qcow2") sn1 = params["image_name_snapshot1"] - sn1 = _get_image_filename(sn1, enable_gluster, sn_fmt) + sn1 = _get_image_filename(sn1, enable_gluster, img_fmt=sn_fmt) base_img = storage.get_image_filename(params, data_dir.get_data_dir()) _create(cmd, sn1, sn_fmt, base_img=base_img, base_img_fmt=image_format) # Create snapshot2 based on snapshot1 sn2 = params["image_name_snapshot2"] - sn2 = _get_image_filename(sn2, enable_gluster, sn_fmt) + sn2 = _get_image_filename(sn2, enable_gluster, img_fmt=sn_fmt) _create(cmd, sn2, sn_fmt, base_img=sn1, base_img_fmt=sn_fmt) rebase_mode = params.get("rebase_mode", "safe") @@ -648,7 +653,7 @@ def run(test, params, env): except Exception: image_filename = _get_image_filename(img_name, enable_gluster, - img_fmt) + img_fmt=img_fmt) backup_img_chain(image_filename) raise finally: diff --git a/qemu/tests/qemu_img_check_fragmentation.py b/qemu/tests/qemu_img_check_fragmentation.py new file mode 100755 index 0000000000000000000000000000000000000000..aa67088184e69acfc7c45dd2ef67ee47a6818ca5 --- /dev/null +++ b/qemu/tests/qemu_img_check_fragmentation.py @@ -0,0 +1,44 @@ +import re + +from avocado.utils import process + +from virttest import data_dir +from virttest import storage + + +def run(test, params, env): + """ + Check file fragmentation. + 1. Create a raw image with 10GiB. + 2. Create a badly fragmented file with qemu-img bench. + 3. Check file fragmentation. The extents should less than 10000. + With 1 MiB extents, the theoretical maximum for a 10 GiB image + is 10000 extents (10000 * 1 MiB = 10 GiB) + + :param test: VT test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + image_stg = params["images"] + root_dir = data_dir.get_data_dir() + image_stg_name = storage.get_image_filename(params.object_params(image_stg), + root_dir) + timeout = float(params.get("timeout", 1800)) + offset = params["offsets"].split() + fragmentation_maximum = params["fragmentation_maximum"] + qemu_img_bench_cmd = params["qemu_img_bench_cmd"] + for o in offset: + process.run(qemu_img_bench_cmd % (image_stg_name, o), + timeout=timeout, shell=True) + check_fragmentation_cmd = params["check_fragmentation_cmd"] % image_stg_name + cmd_result = process.run(check_fragmentation_cmd, shell=True) + extents_number_pattern = params["extents_number_pattern"] + fragmentation_maximum = int(params["fragmentation_maximum"]) + extents_number = re.search(extents_number_pattern, + cmd_result.stdout.decode()) + if not extents_number: + test.fail("Failed to get extents number. " + "The output is '%s'." % cmd_result.stdout.decode()) + if int(extents_number.group(1)) >= fragmentation_maximum: + test.fail("The extents should less than %s, the actual result is %s." + % (fragmentation_maximum, extents_number.group(1))) diff --git a/qemu/tests/qemu_img_measure_convert_image.py b/qemu/tests/qemu_img_measure_convert_image.py index 3048c7dee6c6fd83c92f8450958ae5c96dc1bd43..65120bb238f545dc2139fe8eaacdf94d789e8538 100644 --- a/qemu/tests/qemu_img_measure_convert_image.py +++ b/qemu/tests/qemu_img_measure_convert_image.py @@ -4,6 +4,7 @@ import json from avocado.utils import process from virttest import data_dir from virttest.qemu_storage import QemuImg +from virttest.qemu_storage import get_image_json from virttest.qemu_io import QemuIOSystem @@ -34,8 +35,14 @@ def run(test, params, env): def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" - logging.info("Run qemu-io %s" % img.image_filename) - q = QemuIOSystem(test, params, img.image_filename) + image_filename = img.image_filename + logging.info("Run qemu-io %s" % image_filename) + if img.image_format == "luks": + image_secret_object = img._secret_objects[-1] + image_json_str = get_image_json(img.tag, img.params, img.root_dir) + image_json_str = " '%s'" % image_json_str + image_filename = image_secret_object + image_json_str + q = QemuIOSystem(test, params, image_filename) q.cmd_output(cmd, 120) def _get_file_size(img): diff --git a/qemu/tests/qmp_command.py b/qemu/tests/qmp_command.py index 9f3516387ec97c423b9c80353e0c87ee01350c8f..a9114050e87f113c863a6a60943bc9fa09b136d3 100644 --- a/qemu/tests/qmp_command.py +++ b/qemu/tests/qmp_command.py @@ -7,6 +7,7 @@ from avocado.utils import process from virttest import utils_misc from virttest import qemu_monitor +from virttest.qemu_capabilities import Flags def run(test, params, env): @@ -241,10 +242,22 @@ def run(test, params, env): image_params = params.object_params(image) image_format = image_params['image_format'] image_drive = "drive_%s" % image - image_info['device'] = image_drive + if vm.check_capability(Flags.BLOCKDEV): + image_info['node-name'] = image_drive + else: + image_info['device'] = image_drive image_info['qdev'] = image image_info['format'] = image_format expect_o.append(image_info) + elif qmp_cmd == "query-target": + host_arch = platform.machine() + if host_arch == "ppc64le": + host_arch = host_arch[:5] + expect_o = [{"arch": host_arch}] + elif qmp_cmd == "query-machines": + # Remove avocado machine type + vm_machines = params["machine_type"].split(':', 1)[-1] + expect_o = [{'alias': vm_machines}] check_result(qmp_o, expect_o) elif result_check.startswith("post_"): logging.info("Verify post qmp command '%s' works as designed." % post_cmd) diff --git a/qemu/tests/queues_number_test.py b/qemu/tests/queues_number_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4464f477114970fefb60aa7abef8301829e988 --- /dev/null +++ b/qemu/tests/queues_number_test.py @@ -0,0 +1,213 @@ +import logging +import re +import os + +from virttest import error_context +from virttest import data_dir +from virttest import utils_net +from virttest import utils_test +from virttest import utils_misc +from virttest import utils_netperf + + +@error_context.context_aware +def run(test, params, env): + """ + MULTI_QUEUE chang queues number test + 1) Boot up VM, and login guest + 2) Enable the queues in guest + 3) Run netperf_and_ping test + 4) Change queues number repeatedly during netperf_and_ping stress testing + 5) Reboot VM + 6) Repeat above 1-4 steps + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def change_queues_number(ifname, q_number, queues_status=None): + """ + change queues number + """ + if not queues_status: + queues_status = get_queues_status(ifname) + mq_set_cmd = "ethtool -L %s combined %d" % (ifname, q_number) + output = session_serial.cmd_output_safe(mq_set_cmd) + cur_queues_status = get_queues_status(ifname) + + err_msg = "" + expect_q_number = q_number + if q_number != queues_status[1] and q_number <= queues_status[0]: + if (cur_queues_status[1] != q_number + or cur_queues_status[0] != queues_status[0]): + err_msg = "Param is valid, but change queues failed, " + elif cur_queues_status != queues_status: + if q_number != queues_status[1]: + err_msg = "Param is invalid, " + err_msg += "Current queues value is not expected, " + expect_q_number = queues_status[1] + + if len(err_msg) > 0: + err_msg += "current queues set is %s, " % cur_queues_status[1] + err_msg += "max allow queues set is %s, " % cur_queues_status[0] + err_msg += "when run cmd: '%s', " % mq_set_cmd + err_msg += "expect queues are %s," % expect_q_number + err_msg += "expect max allow queues are %s, " % queues_status[0] + err_msg += "output: '%s'" % output + test.fail(err_msg) + + return cur_queues_status + + def get_queues_status(ifname): + """ + Get queues status + """ + mq_get_cmd = "ethtool -l %s" % ifname + nic_mq_info = session_serial.cmd_output_safe(mq_get_cmd) + queues_reg = re.compile(r"Combined:\s+(\d)", re.I) + queues_info = queues_reg.findall(" ".join(nic_mq_info.splitlines())) + if len(queues_info) != 2: + err_msg = "Oops, get guest queues info failed, " + err_msg += "make sure your guest support MQ.\n" + err_msg += "Check cmd is: '%s', " % mq_get_cmd + err_msg += "Command output is: '%s'." % nic_mq_info + test.cancel(err_msg) + return [int(x) for x in queues_info] + + def ping_test(dest_ip, ping_time, ping_lost_ratio): + """ + ping guest from host,until change queues finished. + """ + _, output = utils_net.ping(dest=dest_ip, timeout=ping_time) + packets_lost = utils_test.get_loss_ratio(output) + if packets_lost > ping_lost_ratio: + err = " %s%% packages lost during ping. " % packets_lost + err += "Ping command log:\n %s" % "\n".join( + output.splitlines()[-3:]) + test.fail(err) + + def netperf_test(): + """ + Netperf stress test for nic option. + """ + try: + n_server.start() + # Run netperf with message size defined in range. + netperf_test_duration = params.get_numeric("netperf_test_duration") + test_protocols = params.get("test_protocols", "TCP_STREAM") + netperf_output_unit = params.get("netperf_output_unit") + test_option = params.get("test_option", "") + test_option += " -l %s" % netperf_test_duration + if netperf_output_unit in "GMKgmk": + test_option += " -f %s" % netperf_output_unit + t_option = "%s -t %s" % (test_option, test_protocols) + n_client.bg_start(utils_net.get_host_ip_address(params), + t_option, + params.get_numeric("netperf_para_sessions"), + params.get("netperf_cmd_prefix", ""), + package_sizes=params.get("netperf_sizes")) + if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 1, + "Wait netperf test start"): + logging.info("Netperf test start successfully.") + else: + test.error("Can not start netperf client.") + utils_misc.wait_for( + lambda: not n_client.is_netperf_running(), + netperf_test_duration, 0, 5, + "Wait netperf test finish %ss" % netperf_test_duration) + finally: + n_server.stop() + + login_timeout = params.get_numeric("login_timeout", 360) + netperf_stress = params.get("run_bgstress") + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + vm.wait_for_serial_login(timeout=login_timeout) + guest_ip = vm.get_address() + n_client = utils_netperf.NetperfClient( + guest_ip, params.get("client_path"), + netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), + params.get("netperf_client_link")), + client=params.get("shell_client"), + username=params.get("username"), + password=params.get("password"), + compile_option=params.get("compile_option", "")) + n_server = utils_netperf.NetperfServer( + utils_net.get_host_ip_address(params), + params.get("server_path", "/var/tmp"), + netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), + params.get("netperf_server_link")), + password=params.get("hostpassword"), + compile_option=params.get("compile_option", "")) + wait_time = params.get_numeric("wait_bg_time") + ping_lost_ratio = params.get_numeric("background_ping_package_lost_ratio", 5) + ping_time = params.get_numeric("background_ping_time") + required_reboot = True + bg_test = True + try: + while bg_test: + session_serial = vm.wait_for_serial_login(timeout=login_timeout) + n_client.session = session_serial + error_context.context("Enable multi queues in guest.", logging.info) + for nic in vm.virtnet: + ifname = utils_net.get_linux_ifname(session_serial, nic.mac) + queues = int(nic.queues) + change_queues_number(ifname, queues) + error_context.context("Run test %s background" % netperf_stress, + logging.info) + stress_thread = utils_misc.InterruptedThread(netperf_test) + stress_thread.start() + utils_misc.wait_for(lambda: wait_time, 0, 5, + "Wait %s start background" % netperf_stress) + + # ping test + error_context.context("Ping guest from host", logging.info) + args = (guest_ip, ping_time, ping_lost_ratio) + bg_ping = utils_misc.InterruptedThread(ping_test, args) + bg_ping.start() + + error_context.context("Change queues number repeatedly", + logging.info) + repeat_counts = params.get_numeric("repeat_counts") + for nic in vm.virtnet: + queues = int(nic.queues) + if queues == 1: + logging.info("Nic with single queue, skip and continue") + continue + ifname = utils_net.get_linux_ifname(session_serial, nic.mac) + change_list = params.get("change_list").split(",") + for repeat_num in range(repeat_counts): + error_context.context("Change queues number -- %sth" + % repeat_num, logging.info) + queues_status = get_queues_status(ifname) + for q_number in change_list: + queues_status = change_queues_number(ifname, + int(q_number), + queues_status) + + logging.info("wait for background test finish") + try: + stress_thread.join() + except Exception as err: + err_msg = "Run %s test background error!\n " + err_msg += "Error Info: '%s'" + test.error(err_msg % (netperf_stress, err)) + + logging.info("Wait for background ping test finish.") + try: + bg_ping.join() + except Exception as err: + txt = "Fail to wait background ping test finish. " + txt += "Got error message %s" % err + test.fail(txt) + + if required_reboot: + logging.info("Rebooting guest ...") + vm.reboot() + required_reboot = False + else: + bg_test = False + finally: + n_server.cleanup(True) + n_client.cleanup(True) + if session_serial: + session_serial.close() diff --git a/qemu/tests/remote_convert.py b/qemu/tests/remote_convert.py index a96793b7cff191c1b32f98e1cb45920dcdb3e268..f93a667ae38cf036cf6e85fd25ac9db658fdcfa1 100644 --- a/qemu/tests/remote_convert.py +++ b/qemu/tests/remote_convert.py @@ -69,13 +69,17 @@ def run(test, params, env): target_filename = storage.get_image_filename(target_params, root_dir) storage.file_remove(target_params, target_filename) + # skip nbd image creation + skip_target_creation = target_params.get_boolean("skip_target_creation") + # Convert source to target cache_mode = params.get("cache_mode") source_cache_mode = params.get("source_cache_mode") logging.info("Convert %s to %s", source, target) fail_on((process.CmdError,))(source_image.convert)( params, root_dir, cache_mode=cache_mode, - source_cache_mode=source_cache_mode) + source_cache_mode=source_cache_mode, + skip_target_creation=skip_target_creation) _check_file(target, md5_value) diff --git a/qemu/tests/remote_image_guestfish_access.py b/qemu/tests/remote_image_guestfish_access.py new file mode 100644 index 0000000000000000000000000000000000000000..dc8162ab00eec165249f9137dc44baea2215f66a --- /dev/null +++ b/qemu/tests/remote_image_guestfish_access.py @@ -0,0 +1,57 @@ +import logging + +from avocado.utils import process + +from virttest import qemu_storage +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Start VM to make sure it's a bootable system image, shutdown VM + 2) Write a file into the image by guestfish without booting up vm + 3) Read the file and check the content is exactly what we write + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + image_tag = params.get("images").split()[0] + image_object = qemu_storage.QemuImg(params.object_params(image_tag), + None, image_tag) + if image_object.image_access: + test.cancel('Access remote image with tls-creds is ' + 'not supported by guestfish, skip the test') + + # Make sure the image holds an OS instance + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + try: + session = vm.wait_for_login( + timeout=params.get_numeric("login_timeout", 360)) + session.close() + finally: + vm.destroy() + + msg = params['msg_check'] + testfile = params['guest_file_name'] + write_cmd = params['write_cmd'].format(fmt=image_object.image_format, + uri=image_object.image_filename) + read_cmd = params['read_cmd'].format(fmt=image_object.image_format, + uri=image_object.image_filename) + + logging.info("Write file '%s'" % testfile) + result = process.run(write_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + test.fail('Failed to write a file, error message: %s' + % result.stderr.decode()) + + logging.info("Read file '%s'" % testfile) + result = process.run(read_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + test.fail('Failed to read a file, error message: %s' + % result.stderr.decode()) + elif result.stdout.decode().strip() != msg: + test.fail("Message '%s' mismatched with '%s'" + % (msg, result.stdout.decode())) diff --git a/qemu/tests/remote_image_multiwrite.py b/qemu/tests/remote_image_multiwrite.py new file mode 100644 index 0000000000000000000000000000000000000000..b0ace9bcaeb2d1d507824cca20130f1065ad8ca1 --- /dev/null +++ b/qemu/tests/remote_image_multiwrite.py @@ -0,0 +1,45 @@ +import re +import logging + +from virttest import error_context +from virttest import utils_disk +from virttest import utils_misc + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Start guest with both data disk and system disk. + 2) Format a data disk(ext4 for rhel6+ and xfs for rhel7+) + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + session = None + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login(timeout=int(params.get("login_timeout", 240))) + + stg_tag = params["images"].split()[-1] + stg_params = params.object_params(stg_tag) + stg_fstype = stg_params["disk_format"] + stg_size = stg_params["image_size"] + stg_extra_params = stg_params.get("blk_extra_params", "") + match = re.search(r"(serial|wwn)=(\w+)", stg_extra_params, re.M) + + try: + drive_id = match.group(2) + drive_path = utils_misc.get_linux_drive_path(session, drive_id) + did = drive_path[5:] + logging.info("Format %s(size=%s) with %s type." % (did, stg_size, + stg_fstype)) + mnts = utils_disk.configure_empty_linux_disk(session, did, stg_size, + fstype=stg_fstype) + if not mnts: + test.fail("Failed to create %s on disk %s." % (stg_fstype, did)) + finally: + if session: + session.close() + vm.destroy() diff --git a/qemu/tests/remote_image_ncat_access.py b/qemu/tests/remote_image_ncat_access.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b8a83c66903b85cfa79e9c6d1a034d00125006 --- /dev/null +++ b/qemu/tests/remote_image_ncat_access.py @@ -0,0 +1,89 @@ +import socket + +from avocado.utils import process + +from virttest import error_context +from virttest import qemu_storage +from virttest import storage +from virttest import data_dir + +from provider.nbd_image_export import InternalNBDExportImage + +from provider import qemu_img_utils as img_utils + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Clone system image with qemu-img + 2) Export the image with qemu internal NBD server + 3) ncate ip -p port or ncat -U /socket/path + 4) Boot from the exported nbd image + 5) Log into VM + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def _create_image(): + result = qemu_storage.QemuImg( + params, None, params['images'].split()[0]).dd( + output=storage.get_image_filename( + params.object_params(params["local_image_tag"]), + data_dir.get_data_dir() + ), + bs=1024*1024 + ) + + if result.exit_status != 0: + test.fail('Failed to clone the system image, error: %s' + % result.stderr.decode()) + + def _start_vm_without_image(): + params['images'] = '' + vm = None + try: + vm = img_utils.boot_vm_with_images(test, params, env) + vm.verify_alive() + finally: + # let VT remove it + params['images'] = ' %s' % params['local_image_tag'] + return vm + + def _make_ncat_cmd(): + ncat = '' + if params.get('nbd_unix_socket_%s' % params['nbd_image_tag']): + ncat = params['ncat_cmd'] + else: + localhost = socket.gethostname() + params['nbd_server'] = localhost if localhost else 'localhost' + ncat = params['ncat_cmd'].format(localhost=params['nbd_server']) + return ncat + + _create_image() + vm = _start_vm_without_image() + + nbd_export = InternalNBDExportImage(vm, params, params['local_image_tag']) + nbd_export.hotplug_tls() + nbd_export.hotplug_image() + nbd_export.export_image() + params['nbd_export_name'] = nbd_export.get_export_name() + + ncat_cmd = _make_ncat_cmd() + result = process.run(ncat_cmd, ignore_status=True, shell=True) + if params['errmsg_check'] not in result.stderr.decode().strip(): + test.fail('Failed to read message(%s) from output(%s)' + % (params['errmsg_check'], result.stderr.decode())) + + vm2 = None + try: + # Start another VM from the nbd exported image + vm2 = img_utils.boot_vm_with_images(test, params, env, + (params["nbd_image_tag"],), + 'vm2') + session = vm2.wait_for_login( + timeout=params.get_numeric("login_timeout", 480)) + session.close() + finally: + if vm2: + vm2.destroy() diff --git a/qemu/tests/remote_image_nmap_access.py b/qemu/tests/remote_image_nmap_access.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf5675e4e54017d2a20350eae2b381502539106 --- /dev/null +++ b/qemu/tests/remote_image_nmap_access.py @@ -0,0 +1,44 @@ +import socket + +from avocado.utils import process + +from virttest import error_context + +from provider.nbd_image_export import QemuNBDExportImage + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Create a local raw file with qemu-img + 2) Export the file in raw format with qemu-nbd + 3) Scan the port with nmap + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) + nbd_export.export_image() + + h = socket.gethostname() + params['nbd_server_%s' % params["nbd_image_tag"]] = h if h else 'localhost' + nmap_cmd = params['nmap_cmd'].format( + localhost=params['nbd_server_%s' % params["nbd_image_tag"]]) + try: + result = process.run(nmap_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + test.fail('Failed to execute nmap, error: %s' + % result.stderr.decode()) + + nbd_export.list_exported_image( + params["nbd_image_tag"], + params.object_params(params["nbd_image_tag"]) + ) + + if params.get('msg_check'): + if params['msg_check'] not in result.stdout.decode().strip(): + test.fail('Failed to read message(%s) from output(%s)' + % (params['msg_check'], result.stderr.decode())) + finally: + nbd_export.stop_export() diff --git a/qemu/tests/remote_image_qemu_info_access.py b/qemu/tests/remote_image_qemu_info_access.py new file mode 100644 index 0000000000000000000000000000000000000000..8d1683db47e6d9ea3fa8b2b7185c32785c71ff2e --- /dev/null +++ b/qemu/tests/remote_image_qemu_info_access.py @@ -0,0 +1,37 @@ +import logging + +from virttest import qemu_storage +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Access remote image by qemu-img info + 2) Check url in output for libcurl backend + 3) Replace '_' with '%5f' in image name, + access image + 4) Check url in output for libcurl backend + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + img_params = params.object_params(params['remote_image_tag']) + image_name_list = [ + img_params['curl_path'], + img_params['curl_path'].replace(params['replace_symbol'], + params['ascii_symbol']) + ] + + for image_name in image_name_list: + img_params['curl_path'] = image_name + img_obj = qemu_storage.QemuImg(img_params, None, + params['remote_image_tag']) + + logging.info('Access image: %s' % img_obj.image_filename) + out = img_obj.info() + + if img_obj.image_filename not in out: + test.fail('Failed to get url(%s) from output(%s)' + % (img_obj.image_filename, out)) diff --git a/qemu/tests/remote_image_qemu_io_access.py b/qemu/tests/remote_image_qemu_io_access.py new file mode 100644 index 0000000000000000000000000000000000000000..462a9d8e5c28358d27b6ec3f06065e446799ec59 --- /dev/null +++ b/qemu/tests/remote_image_qemu_io_access.py @@ -0,0 +1,104 @@ +import socket + +from avocado.utils import process + +from virttest import data_dir +from virttest import storage +from virttest import qemu_storage +from virttest import utils_misc +from virttest import error_context + +from provider.nbd_image_export import QemuNBDExportImage + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Create a local file by echo command + 2) Export the file in raw format with qemu-nbd + 3) Access the exported nbd file by qemu-io + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def _prepare(): + tag = params["local_image_tag"] + image_params = params.object_params(tag) + + if image_params.get('create_description_cmd'): + params['nbd_export_description_%s' % tag] = process.run( + image_params['create_description_cmd'], + ignore_status=True, + shell=True + ).stdout.decode().strip() + + if image_params.get('create_image_cmd'): + params['create_image_cmd_%s' % tag] = image_params['create_image_cmd'].format( + desc=params['nbd_export_description_%s' % tag], + filename=storage.get_image_filename(image_params, + data_dir.get_data_dir()) + ) + + # update nbd image's server to the local host + localhost = socket.gethostname() + params['nbd_server_%s' % params["nbd_image_tag"] + ] = localhost if localhost else 'localhost' + + def _get_tls_creds_obj(tag, params): + tls_str = '--object tls-creds-x509,id={t.aid},endpoint=client,dir={t.tls_creds}' + tls = storage.StorageAuth.auth_info_define_by_params(tag, params) + return tls_str.format(t=tls) if tls else '' + + def _get_secret_obj(tag, params): + secret_str = '--object secret,id={s.aid},data={s.data}' + secret = storage.ImageSecret.image_secret_define_by_params(tag, params) + return secret_str.format(s=secret) if secret else '' + + def _make_qemu_io_cmd(): + nbd_image = params["nbd_image_tag"] + nbd_image_params = params.object_params(nbd_image) + + nbd_image_filename = storage.get_image_filename(nbd_image_params, None) + nbd_image_format = '-f %s' % nbd_image_params['image_format'] + + tls_obj = _get_tls_creds_obj(nbd_image, nbd_image_params) + sec_obj = _get_secret_obj(nbd_image, nbd_image_params) + if tls_obj or sec_obj: + nbd_image_format = '' + nbd_image_filename = "'%s'" % qemu_storage.get_image_json( + nbd_image, nbd_image_params, None) + + qemu_io = utils_misc.get_qemu_io_binary(params) + return params['qemu_io_cmd'].format(qemu_io=qemu_io, + tls_creds=tls_obj, + secret=sec_obj, + fmt=nbd_image_format, + subcmd=params['qemu_io_subcmd'], + filename=nbd_image_filename) + + _prepare() + + nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) + nbd_export.create_image() + nbd_export.export_image() + + qemu_io_cmd = _make_qemu_io_cmd() + + try: + result = process.run(qemu_io_cmd, ignore_status=True, shell=True) + if result.exit_status != 0: + test.fail('Failed to execute qemu-io, error: %s' + % result.stderr.decode()) + + if params.get('msg_check'): + if params['msg_check'] not in result.stdout.decode().strip(): + test.fail('Failed to read message(%s) from output(%s)' + % (params['msg_check'], result.stderr.decode())) + + nbd_export.list_exported_image( + params["nbd_image_tag"], + params.object_params(params["nbd_image_tag"]) + ) + finally: + nbd_export.stop_export() diff --git a/qemu/tests/remote_image_unix_socket_access.py b/qemu/tests/remote_image_unix_socket_access.py new file mode 100644 index 0000000000000000000000000000000000000000..6d738807e1383925a3db246b4b68f52fe7d3579a --- /dev/null +++ b/qemu/tests/remote_image_unix_socket_access.py @@ -0,0 +1,62 @@ +import logging + +from virttest import data_dir +from virttest import storage +from virttest import qemu_storage +from virttest import error_context + +from provider import qemu_img_utils as img_utils + +from provider.nbd_image_export import QemuNBDExportImage + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Clone the system image1 with qemu-img + 2) Export the cloned image with qemu-nbd(type=unix) + 3) Start VM from the exported image + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + def _prepare(): + logging.info("Clone system image with qemu-img") + result = qemu_storage.QemuImg( + params, None, params['images'].split()[0]).dd( + output=storage.get_image_filename( + params.object_params(params["local_image_tag"]), + data_dir.get_data_dir() + ), + bs=1024*1024 + ) + if result.exit_status != 0: + test.fail('Failed to clone the system image, error: %s' + % result.stderr.decode()) + + # Remove the image after test by avocado-vt + # params['images'] += ' %s' % params["local_image_tag"] + + _prepare() + + # local image to be exported + nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) + nbd_export.export_image() + + session = None + logging.info("Start VM from the exported image") + + try: + # Start VM from the nbd exported image + vm = img_utils.boot_vm_with_images(test, params, env, + (params["nbd_image_tag"],)) + session = vm.wait_for_login( + timeout=int(params.get("login_timeout", 360))) + if not session: + test.fail('Failed to log into VM') + finally: + if session: + session.close() + vm.destroy() + nbd_export.stop_export() diff --git a/qemu/tests/remote_server_disconnected.py b/qemu/tests/remote_server_disconnected.py new file mode 100644 index 0000000000000000000000000000000000000000..039efb14ec67398e12a705611b51749bc5ffd53a --- /dev/null +++ b/qemu/tests/remote_server_disconnected.py @@ -0,0 +1,72 @@ +import os +import netaddr +import logging +import json + +from avocado.utils import process + + +def run(test, params, env): + """ + Convert remote image. + + 1) Start VM + 2) Disconnect local host from the remote server one by one, + make sure the vm can be accessed. + """ + + def _check_hosts(hosts): + if len(hosts) < 2: + test.cancel("2 remote servers at least are required.") + for h in hosts: + if os.path.exists(h) or netaddr.valid_ipv6(h): + test.cancel("Neither ipv6 nor unix domain" + " socket is supported by now.") + + hosts = [] + if params.get_boolean("enable_gluster"): + hosts.append(params["gluster_server"]) + hosts.extend([peer['host'] for peer in json.loads( + params.get('gluster_peers', '[]')) if 'host' in peer]) + + _check_hosts(hosts) + hosts.pop() # The last server should be accessible + + disconn_cmd = params["disconn_cmd"] + recover_cmd = params["recover_cmd"] + conn_check_cmd = params["conn_check_cmd"] + disk_op_cmd = params["disk_op_cmd"] + disk_op_tm = int(params["disk_op_timeout"]) + + session = None + disconn_hosts = [] + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + tm = int(params.get("login_timeout", 300)) + session = vm.wait_for_login(timeout=tm) + + try: + for host in hosts: + logging.info("Disconnect to %s" % host) + process.system(disconn_cmd.format(source=host), + ignore_status=True, shell=True) + if process.system(conn_check_cmd.format(source=host), + ignore_status=True, shell=True) == 0: + test.error("Failed to disconnect to remote server") + disconn_hosts.append(host) + + logging.info("Do disk I/O in VM") + s, o = session.cmd_status_output(disk_op_cmd, timeout=disk_op_tm) + if s != 0: + test.fail("Failed to do I/O in VM: %s" % o) + finally: + for host in disconn_hosts: + logging.info("Recover connection to %s" % host) + process.system(recover_cmd.format(source=host), + ignore_status=True, shell=True) + if process.system(conn_check_cmd.format(source=host), + ignore_status=True, shell=True) != 0: + logging.warn("Failed to recover connection to %s" % host) + if session: + session.close() + vm.destroy() diff --git a/qemu/tests/resize_qemu_img.py b/qemu/tests/resize_qemu_img.py index eed20c41987a25fc359b924c2cab20bde30c228a..b9b1d45763e805ab8aa72d2acd2c4b5c4b93f1ba 100644 --- a/qemu/tests/resize_qemu_img.py +++ b/qemu/tests/resize_qemu_img.py @@ -12,8 +12,8 @@ def run(test, params, env): """ A 'qemu-img' resize test. - 1.create a raw/qcow2 image - 2.change the raw/qcow2 image size * n + 1.create a raw/qcow2/luks image + 2.change the raw/qcow2/luks image size * n 3.verify resize * n :param test: Qemu test object @@ -33,7 +33,7 @@ def run(test, params, env): res.append(s) return sum(res) - def _verify_resize(img_size, expected_size): + def _verify_resize_image(img_size, expected_size): """Verify the image size is as expected after resize.""" logging.info("Verify the size of %s is %s." % (img.image_filename, expected_size)) @@ -41,25 +41,43 @@ def run(test, params, env): test.fail("Got image virtual size: %s, should be: %s." % (img_size, expected_size)) - def _resize(size_changes): + def _verify_resize_disk(disk_size, expected_size): + """ + Verify the disk size is as expected after resize. + """ + logging.info("Verify the disk size of the image %s is %sG." + % (img.image_filename, expected_size)) + if disk_size != expected_size: + test.fail("Got image actual size: %sG, should be: %sG." + % (disk_size, expected_size)) + + def _resize(size_changes, preallocation): """Resize the image and verify its size.""" for idx, size in enumerate(size_changes): - logging.info("Resize the raw image %s %s." % (img.image_filename, - size)) + logging.info("Resize the raw image %s %s with preallocation %s." + % (img.image_filename, size, preallocation)) shrink = True if "-" in size else False - img.resize(size, shrink=shrink) + img.resize(size, shrink=shrink, preallocation=preallocation) + if preallocation in ["full", "falloc"]: + disk_size = json.loads(img.info(output="json"))["actual-size"] + # Set the magnitude order to GiB, allow some bytes deviation + disk_size = float( + utils_numeric.normalize_data_size(str(disk_size), "G")) + expected_disk_size = size[1] + _verify_resize_disk(int(disk_size), int(expected_disk_size)) img_size = json.loads(img.info(output="json"))["virtual-size"] expected_size = (int(utils_numeric.normalize_data_size( params["image_size_test"], "B")) + _sum_size_changes(size_changes[:idx + 1])) - _verify_resize(img_size, expected_size) + _verify_resize_image(img_size, expected_size) img_param = params.object_params('test') img = QemuImg(img_param, data_dir.get_data_dir(), 'test') size_changes = params["size_changes"].split() + preallocation = params.get("preallocation") logging.info("Create a raw image %s." % img.image_filename) img.create(img_param) - _resize(size_changes) + _resize(size_changes, preallocation) diff --git a/qemu/tests/rh_kernel_update.py b/qemu/tests/rh_kernel_update.py index 6307cf3e8273ad0dbc83d7304d9f0aebbfcd4df4..b78feb5a11c8fad091d022e133d7a9d261639bb5 100644 --- a/qemu/tests/rh_kernel_update.py +++ b/qemu/tests/rh_kernel_update.py @@ -90,9 +90,9 @@ def run(test, params, env): for pkg_name in params["kernel_pkgs"].split(): pkg_params = params.object_params(pkg_name) pkg_arch = pkg_params["pkg_arch"] - # package pattern: n-v-r.a - pkg_pattern = "%s-%s.%s." % (pkg_name, ver_rev, pkg_arch) - pkg_pattern = re.compile(".*/%s.*" % re.escape(pkg_pattern)) + # package pattern: n-v-r.a.rpm + pkg_pattern = "%s-%s.%s.rpm" % (pkg_name, ver_rev, pkg_arch) + pkg_pattern = re.compile(".*/%s" % re.escape(pkg_pattern)) match = pkg_pattern.search(buildinfo, re.M | re.I) if not match: test.error("Could not get the link of '%s' in buildinfo" diff --git a/qemu/tests/rng_bat.py b/qemu/tests/rng_bat.py index a29616141f7db8da5448d6f354ef2137d2b1cd21..ea5c86d67756242c85dd6668d699c3b8b1d05c56 100644 --- a/qemu/tests/rng_bat.py +++ b/qemu/tests/rng_bat.py @@ -37,6 +37,15 @@ def run(test, params, env): output = process.system_output(lsof_cmd, ignore_status=True).decode() return re.search(r"\s+%s\s+" % vm_pid, output, re.M) + def _is_rngd_running(): + """ + Check whether rngd is running + """ + output = session.cmd_output(check_rngd_service) + if 'running' not in output: + return False + return True + rng_data_rex = params.get("rng_data_rex", r".*") dev_file = params.get("filename_passthrough") timeout = float(params.get("login_timeout", 360)) @@ -94,8 +103,7 @@ def run(test, params, env): if os_type == "linux": check_rngd_service = params.get("check_rngd_service") if check_rngd_service: - output = session.cmd_output(check_rngd_service) - if 'running' not in output: + if not utils_misc.wait_for(_is_rngd_running, 30, first=5): start_rngd_service = params["start_rngd_service"] status, output = session.cmd_status_output(start_rngd_service) if status: diff --git a/qemu/tests/smbios_default_check.py b/qemu/tests/smbios_default_check.py new file mode 100644 index 0000000000000000000000000000000000000000..1173b403ce2994118412c6c1958e24eba89cfb68 --- /dev/null +++ b/qemu/tests/smbios_default_check.py @@ -0,0 +1,51 @@ +import logging +import re + +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Check default smbios strings in qemu : + 1) Boot guest with default smbios set up + 2) Verify if bios info have been emulated correctly + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def check_info(cmd, template): + msg_log = "Check " + template + " info" + error_context.context(msg_log, logging.info) + cmd_output = session.cmd_output(cmd) + cmd_output_re = re.split('\n', cmd_output.strip('\n'))[-1].strip(' ') + template = params[template] + if not re.match(template, cmd_output_re): + return cmd_output_re + + re_template = ["System_Manufacturer", "System_SKU_Number", + "Baseboard_Manufacturer", "Baseboard_Product_Name"] + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = int(params.get("login_timeout", 360)) + session = vm.wait_for_login(timeout=timeout) + + failures = [] + check_info_cmd = [] + check_info_cmd.append(params["get_sys_manufacturer"]) + check_info_cmd.append(params["get_sys_SKUNumber"]) + check_info_cmd.append(params["get_baseboard_manufacturer"]) + check_info_cmd.append(params["get_baseboard_product_name"]) + for cmd, template in zip(check_info_cmd, re_template): + output = check_info(cmd, template) + if output: + e_msg = ("%s mismatch, out: %s" % (template, output)) + failures.append(e_msg) + session.close() + + if failures: + test.fail("Smbios default check test reported %s failures:\n%s" + % (len(failures), "\n".join(failures))) diff --git a/qemu/tests/smt_test.py b/qemu/tests/smt_test.py new file mode 100644 index 0000000000000000000000000000000000000000..94226320548c2aabde66848882fd9e476fae0e13 --- /dev/null +++ b/qemu/tests/smt_test.py @@ -0,0 +1,119 @@ +import logging +import re +import time + +from virttest import env_process +from virttest import error_context +from virttest import utils_misc +from virttest import utils_test + +from virttest.utils_test import BackgroundTest +from provider.cpu_utils import check_cpu_flags + + +@error_context.context_aware +def run(test, params, env): + """ + smt test: + 1) Check if host has topoext flag, if not, cancel test + 2) Boot guest and check cpu count and threads number in guest + 3) Run stress inside guest, and check cpu usage in guest(only for linux) + + :params test: QEMU test object. + :params params: Dictionary with the test parameters. + :params env: Dictionary with test environment. + """ + def get_guest_threads(): + """ + Get guest threads number + """ + if os_type == "linux": + cmd = params["get_threads_cmd"] + output = session.cmd_output_safe(cmd) + threads = int(re.findall(r":\s*(\d+)", output)[0]) + else: + cmd = params["get_cores_cmd"] + output = session.cmd_output_safe(cmd) + cores = int(re.findall(r"=(\d+)", output)[0]) + cmd = params["get_sockets_cmd"] + output = session.cmd_output_safe(cmd) + sockets = len(re.findall(r"SocketDesignation=", output)) + threads = int(vm.cpuinfo.smp/sockets/cores) + return threads + + def heavyload_install(install_path): + """ + Install heavyload in windows guest + """ + test_installed_cmd = 'dir "%s" | findstr /I heavyload' % install_path + if session.cmd_status(test_installed_cmd) != 0: + logging.warning("Could not find installed heavyload in guest, will" + " install it via winutils.iso ") + winutil_drive = utils_misc.get_winutils_vol(session) + if not winutil_drive: + test.cancel("WIN_UTILS CDROM not found.") + install_cmd = params["install_cmd"] % winutil_drive + session.cmd(install_cmd) + + def run_stress(): + """ + Run stress inside guest, return guest cpu usage + """ + error_context.context("Run stress in guest and get cpu usage", + logging.info) + if os_type == "linux": + stress_args = params["stress_args"] + stress_test = utils_test.VMStress(vm, "stress", + params, stress_args=stress_args) + try: + stress_test.load_stress_tool() + time.sleep(stress_duration / 2) + output = session.cmd_output_safe(params["get_cpu_usage_cmd"]) + utils_misc.wait_for(lambda: (stress_test.app_running is False), 30) + stress_test.unload_stress() + cpu_usage = re.findall(r":\s*(\d+.?\d+)\s*us", output) + cpu_usage = [float(x) for x in cpu_usage] + logging.info("Guest cpu usage is %s", cpu_usage) + unloaded_cpu = [x for x in cpu_usage if x < 20] + if unloaded_cpu: + test.fail("CPU(s) load percentage is less than 20%") + finally: + stress_test.clean() + else: + install_path = params["install_path"] + heavyload_install(install_path) + error_context.context("Run heavyload inside guest.", logging.info) + heavyload_bin = r'"%s\heavyload.exe" ' % install_path + heavyload_options = ["/CPU %d" % vm.cpuinfo.smp, + "/DURATION %d" % (stress_duration // 60), + "/AUTOEXIT", + "/START"] + start_cmd = heavyload_bin + " ".join(heavyload_options) + stress_tool = BackgroundTest(session.cmd, (start_cmd, + stress_duration, + stress_duration)) + stress_tool.start() + if not utils_misc.wait_for(stress_tool.is_alive, stress_duration): + test.error("Failed to start heavyload process.") + stress_tool.join(stress_duration) + + check_cpu_flags(params, "topoext", test) + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + + os_type = params["os_type"] + stress_duration = params.get_numeric("stress_duration", 60) + vm = env.get_vm(params["main_vm"]) + session = vm.wait_for_login() + + try: + if vm.get_cpu_count() != vm.cpuinfo.smp: + test.fail("Guest cpu number is not right") + threads = get_guest_threads() + logging.info("Guest threads number is %s", threads) + if threads != params.get_numeric("expected_threads", 1): + test.fail("Guest cpu threads number is not right") + run_stress() + finally: + if session: + session.close() diff --git a/qemu/tests/softlockup.py b/qemu/tests/softlockup.py index d34d639827550170c3a38c284da11f2524264053..c3de73cc0f969932623396d1edab175de1154317 100644 --- a/qemu/tests/softlockup.py +++ b/qemu/tests/softlockup.py @@ -81,7 +81,7 @@ def run(test, params, env): logging.info("Run stress on host") # stress_threads = 2 * n_cpus - threads_host = 2 * cpu.online_cpus_count() + threads_host = 2 * cpu.online_count() # Run stress test on host process.run(stress_cmd % (stress_dir, threads_host), shell=True) diff --git a/qemu/tests/spapr_vty_multi_backends.py b/qemu/tests/spapr_vty_multi_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..1992b8b7e595ad71879b15d0ec2e968d1fa3a9bf --- /dev/null +++ b/qemu/tests/spapr_vty_multi_backends.py @@ -0,0 +1,74 @@ +import logging +import os +import re + +from virttest import error_context +from virttest import remote +from virttest import utils_test + + +@error_context.context_aware +def run(test, params, env): + """ + Verify the login guest with multi backends spapr-vty: + 1) Boot guest with multi spapr-vty with backend + 2) Modify the kernel cfg file to specify the backend + 3) For pty and file backend: + 3.1) Open and close chardev + 4) For unix_socket and tcp_socket: + 4.1) Login guest + 4.2) Create and delete files inside guest + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + prompt = params.get("shell_prompt") + create_delete_file = params["create_delete_file"] + vm = env.get_vm(params["main_vm"]) + vm.wait_for_login() + for serial_id in params.objects("serials"): + if serial_id != "vs1": + hvc_id = int(serial_id.replace('vs', '')) - 1 + kernel_params = "console=hvc%s,115200" % hvc_id + utils_test.update_boot_option(vm, args_added=kernel_params) + + backend = params.object_params(serial_id)["chardev_backend"] + serial_device = vm.devices.get(serial_id) + chardev_qid = serial_device.get_param("chardev") + chardev_device = vm.devices.get_by_qid(chardev_qid)[0] + + logging.info("The currently tested backend is %s." % backend) + if backend == 'unix_socket': + session = vm.wait_for_serial_login(timeout=60) + session.cmd(create_delete_file) + session.close() + elif backend == 'tcp_socket': + session = remote.remote_login(client='nc', + host=chardev_device.params['host'], + port=chardev_device.params['port'], + username=params['username'], + password=params['password'], + prompt=prompt, + timeout=240) + session.cmd(create_delete_file) + session.close() + elif backend == 'pty': + chardev_info = vm.monitor.human_monitor_cmd('info chardev') + hostfile = re.findall('%s: filename=pty:(/dev/pts/\\d)?' % + serial_id, chardev_info) + if not hostfile: + test.fail("Can't find the corresponding pty backend: %s" % + chardev_info) + fd_pty = os.open(hostfile[0], os.O_RDWR | os.O_NONBLOCK) + os.close(fd_pty) + elif backend == 'file': + filename = chardev_device.params['path'] + with open(filename) as f: + if 'Linux' not in f.read(): + test.fail("Guest boot fail with file backend.") + elif backend == 'null': + session = vm.wait_for_login() + session.cmd(create_delete_file) + + vm.verify_dmesg() + vm.destroy() diff --git a/qemu/tests/steal_time.py b/qemu/tests/steal_time.py new file mode 100644 index 0000000000000000000000000000000000000000..c07ba7cc61d5e65dea2a381544ea87aa0769a2e7 --- /dev/null +++ b/qemu/tests/steal_time.py @@ -0,0 +1,84 @@ +import logging +import time +import re + +from avocado.utils import process +from virttest import error_context +from virttest import utils_test + + +@error_context.context_aware +def run(test, params, env): + """ + Steal time test: + 1) Boot two guests and bind to one same cpu + 2) Run stress inside both guests + 3) Check steal time in top inside guests + 4) Check if two qemu processes have equal cpu usage + 5) Check steal time in /proc/stat in guest + 6) Repeat step 4) after 60s, compare the steal time changed in two guests + + :params test: QEMU test object. + :params params: Dictionary with the test parameters. + :params env: Dictionary with test environment. + """ + def get_stat_val(): + """ + Get steal time value in /proc/stat + """ + stat_val = [] + for session in sessions: + val = session.cmd_output(stat_cmd).split()[8] + stat_val.append(int(val)) + return stat_val + + stress_args = params["stress_args"] + stress_tests = [] + sessions = [] + vms = env.get_all_vms() + + error_context.context("Run stress in both guests", logging.info) + for vm in vms: + session = vm.wait_for_login() + sessions.append(session) + stress_test = utils_test.VMStress(vm, "stress", + params, stress_args=stress_args) + stress_test.load_stress_tool() + stress_tests.append(stress_test) + + time.sleep(10) + top_cmd = params["top_cmd"] + stat_cmd = params["stat_cmd"] + + try: + error_context.context("Check steal time in guests", logging.info) + for session in sessions: + output = session.cmd_output(top_cmd) + top_st = re.findall(r",\s*(\d+.?\d+)\s*st", output)[0] + if abs(float(top_st) - 50) > 10: + test.fail("Guest steal time is not around 50") + + error_context.context("Check two qemu process cpu usage", logging.info) + cmd = "top -n1 -b -p %s -p %s | grep qemu-kvm | awk '{print $9}'" \ + % (vms[0].get_pid(), vms[1].get_pid()) + cpu_usage = process.getoutput(cmd, shell=True).split() + logging.info("QEMU cpu usage are %s", cpu_usage) + cpu_usage = sorted([float(x) for x in cpu_usage]) + if sum(cpu_usage) < 80 or cpu_usage[0] < 40: + test.fail("Two qemu process didn't get equal cpu usage") + + error_context.context("Check steal time in /proc/stat", logging.info) + stat_val_pre = get_stat_val() + logging.info("Steal time value in /proc/stat is %s", stat_val_pre) + time.sleep(60) + stat_val_post = get_stat_val() + logging.info("After 60s, steal time value in /proc/stat is %s", + stat_val_post) + + delta = list(map(lambda x, y: y - x, stat_val_pre, stat_val_post)) + if abs(delta[0] - delta[1]) > sum(delta)/2*0.1: + test.fail("Guest steal time change in /proc/stat is not close") + + finally: + for stress_test in stress_tests: + stress_test.unload_stress() diff --git a/qemu/tests/thin_provision_check_mode.py b/qemu/tests/thin_provision_check_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..609173565aa9e14345edc32f5e24ab16c445c1b8 --- /dev/null +++ b/qemu/tests/thin_provision_check_mode.py @@ -0,0 +1,99 @@ +import os +import logging + +from avocado.utils import genio +from avocado.utils import path as utils_path +from avocado.utils import process + +from virttest import env_process +from virttest import error_context + +from virttest.utils_misc import get_linux_drive_path + + +@error_context.context_aware +def run(test, params, env): + """ + Qemu provisioning mode checking test: + 1) load scsi_debug module with lbpu=1 / lbpu=0 + 2) boot guest with scsi_debug emulated disk as extra data disk + 3) get provisioning mode of data disk in guest + 4) check provisioning mode + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def get_host_scsi_disk(): + """ + Get scsi disk which emulated by scsi_debug module. + """ + cmd = "lsblk -S -n -p|grep scsi_debug" + status, output = process.getstatusoutput(cmd) + if status != 0: + test.fail("Can not get scsi_debug disk on host") + + scsi_disk_info = output.strip().split() + return scsi_disk_info[1], scsi_disk_info[0] + + def get_provisioning_mode(device, host_id): + """ + Get disk provisioning_mode, value usually is 'writesame_16' or 'unmap', + depends on params for scsi_debug module. + """ + device_name = os.path.basename(device) + path = "/sys/block/%s/device/scsi_disk" % device_name + path += "/%s/provisioning_mode" % host_id + return genio.read_one_line(path).strip() + + def get_guest_provisioning_mode(device): + """ + Get disk provisioning_mode in guest + """ + cmd = "lsblk -S -n %s" % device + status, output = session.cmd_status_output(cmd) + if status != 0: + test.fail("Can not find device %s in guest" % device) + + host_id = output.split()[1] + cmd = "cat /sys/bus/scsi/devices/{0}/scsi_disk/{0}/provisioning_mode".format( + host_id) + + status, output = session.cmd_status_output(cmd) + if status == 0: + return output.strip() + + test.fail("Can not get provisioning mode %s in guest" % host_id) + + utils_path.find_command("lsblk") + host_scsi_id, disk_name = get_host_scsi_disk() + provisioning_mode = get_provisioning_mode(disk_name, host_scsi_id) + logging.info("Current host provisioning_mode = '%s'", provisioning_mode) + + # prepare params to boot vm with scsi_debug disk. + vm_name = params["main_vm"] + data_tag = params["data_tag"] + target_mode = params["target_mode"] + disk_serial = params["disk_serial"] + params["start_vm"] = "yes" + params["image_name_%s" % data_tag] = disk_name + + error_context.context("boot guest with disk '%s'" % disk_name, + logging.info) + # boot guest with scsi_debug disk + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.verify_alive() + timeout = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + + output_path = get_linux_drive_path(session, disk_serial) + + if not output_path: + test.fail("Can not get output file path in guest.") + + mode = get_guest_provisioning_mode(output_path) + error_context.context("Checking provision mode %s" % mode, logging.info) + if mode != target_mode: + test.fail("Got unexpected mode:%s", mode) diff --git a/qemu/tests/thin_provision_guest_fstrim.py b/qemu/tests/thin_provision_guest_fstrim.py new file mode 100644 index 0000000000000000000000000000000000000000..878a94a8d770199d22782841159e5fe5195a2684 --- /dev/null +++ b/qemu/tests/thin_provision_guest_fstrim.py @@ -0,0 +1,125 @@ +import logging +from avocado.utils import path as utils_path +from avocado.utils import process + +from virttest import env_process +from virttest import error_context +from virttest import guest_agent +from virttest.utils_misc import get_linux_drive_path + + +@error_context.context_aware +def run(test, params, env): + """ + Execute guest-fstrim command to guest agent for discard testing: + 1) Load scsi_debug module on host. + 2) Boot guest with the scsi_debug emulated disk as data disk. + 3) Format data disk with ext4 or xfs in guest. + 4) Check the number blocks of the scsi_debug device. + 5) Mount the disk in guest then fill data on it. + 6) Check the number blocks of the scsi_debug device. + 7) Remove data from the data disk in guest. + 8) Execute guest-fstrim command to guest agent. + 9) Check the number blocks of the scsi_debug device. it should less than + the number before execute guest-fstrim. + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def get_scsi_debug_disk(guest_session=None): + """" + Get scsi debug disk on host or on guest which created as scsi-block. + """ + cmd = "lsblk -S -n -p|grep scsi_debug" + + if guest_session: + status, output = guest_session.cmd_status_output(cmd) + else: + status, output = process.getstatusoutput(cmd) + + if status != 0: + test.fail("Can not find scsi_debug disk") + + return output.split()[0] + + def get_guest_discard_disk(): + """ + Get discard disk on guest. + """ + if params["drive_format_%s" % data_tag] == "scsi-block": + return get_scsi_debug_disk(session) + + disk_serial = params["disk_serial"] + return get_linux_drive_path(session, disk_serial) + + def create_guest_agent_session(): + """ + Create guest agent session. + """ + guest_agent_serial_type = params["guest_agent_serial_type"] + guest_agent_name = params["guest_agent_name"] + filename = vm.get_serial_console_filename(guest_agent_name) + guest_agent_params = params.object_params(guest_agent_name) + guest_agent_params["monitor_filename"] = filename + return guest_agent.QemuAgent(vm, guest_agent_name, + guest_agent_serial_type, + guest_agent_params, + get_supported_cmds=True) + + def get_blocks(): + """ + Get numbers blocks of the scsi debug disk on host. + """ + cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" + output = process.system_output(cmd).decode().split(",") + return sum([abs(eval(i)) for i in output if i != ""]) + + utils_path.find_command("lsblk") + disk_name = get_scsi_debug_disk() + + # prepare params to boot vm with scsi_debug disk. + vm_name = params["main_vm"] + data_tag = params["data_tag"] + params["start_vm"] = "yes" + params["image_name_%s" % data_tag] = disk_name + + error_context.context("Boot guest with disk '%s'" % disk_name, + logging.info) + # boot guest with scsi_debug disk + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + vm.verify_alive() + session = vm.wait_for_login() + + agent_session = create_guest_agent_session() + + disk_name = get_guest_discard_disk() + + guest_format_command = params["guest_format_command"].format(disk_name) + guest_dd_command = params["guest_dd_command"] + guest_rm_command = params["guest_rm_command"] + + error_context.context("Format disk in guest.", logging.info) + session.cmd(guest_format_command) + count = get_blocks() + logging.info("The initial blocks is %d" % count) + + error_context.context("Fill data disk in guest.", logging.info) + session.cmd(guest_dd_command, ignore_all_errors=True) + old_count = get_blocks() + error_context.context("Blocks before trim: %d" % old_count, logging.info) + + error_context.context("Remove data from disk in guest.", logging.info) + session.cmd(guest_rm_command) + + session.cmd("setenforce 0") + error_context.context("Execute guest-fstrim command.", logging.info) + agent_session.fstrim() + new_count = get_blocks() + error_context.context("Blocks after trim: %d" % new_count, logging.info) + + error_context.context("Compare blocks.", logging.info) + if new_count >= old_count: + test.fail("Got unexpected result:%s %s" % (old_count, new_count)) diff --git a/qemu/tests/throttle_parameter_test.py b/qemu/tests/throttle_parameter_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0151be0dd35e406397945b2a42ee729f5f8345e0 --- /dev/null +++ b/qemu/tests/throttle_parameter_test.py @@ -0,0 +1,49 @@ +import logging + +from virttest import error_context + +from provider.storage_benchmark import generate_instance +from provider.throttle_utils import ThrottleGroupManager, ThrottleTester, \ + ThrottleGroupsTester + + +# This decorator makes the test function aware of context strings +@error_context.context_aware +def run(test, params, env): + """ + Test throttle relevant properties feature. + + 1) Boot up guest with throttle groups. + There are two throttle groups and each have two disk + 2) Build fio operation options and expected result + according to throttle properties. + 3) Execute one disk or all disks testing on groups parallel. + """ + + error_context.context("Get the main VM", logging.info) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + session = vm.wait_for_login(timeout=360) + + error_context.context("Deploy fio", logging.info) + fio = generate_instance(params, vm, 'fio') + + tgm = ThrottleGroupManager(vm) + groups = params["throttle_groups"].split() + testers = [] + for group in groups: + tgm.get_throttle_group_props(group) + images = params["throttle_group_member_%s" % group].split() + tester = ThrottleTester(test, params, vm, session, group, images) + error_context.context("Build test stuff for %s:%s" % (group, images), + logging.info) + tester.build_default_option() + tester.build_images_fio_option() + tester.set_fio(fio) + testers.append(tester) + + error_context.context("Start groups testing:%s" % groups, logging.info) + groups_tester = ThrottleGroupsTester(testers) + + groups_tester.start() diff --git a/qemu/tests/timedrift.py b/qemu/tests/timedrift.py index fe0e2f765251bb128e5dda1223470a3f1a48aaf5..4bd40bc01ea6d3bba0b8fa2d9d87bff1d5c9e96d 100644 --- a/qemu/tests/timedrift.py +++ b/qemu/tests/timedrift.py @@ -94,7 +94,7 @@ def run(test, params, env): guest_load_instances = params["guest_load_instances"] host_load_instances = params["host_load_instances"] if not guest_load_instances and not host_load_instances: - host_load_instances = cpu.total_cpus_count() + host_load_instances = cpu.total_count() guest_load_instances = vm.get_cpu_count() else: host_load_instances = int(host_load_instances) diff --git a/qemu/tests/timedrift_adjust_time.py b/qemu/tests/timedrift_adjust_time.py index 8a56a094f5ab2243cab0931e0beacbdf0d27934e..6a7719c1b8d937d2458864fb4f46a889369ddf6e 100644 --- a/qemu/tests/timedrift_adjust_time.py +++ b/qemu/tests/timedrift_adjust_time.py @@ -130,6 +130,28 @@ class TimedriftTest(object): "Guest Time: %s" % guest_timestr) return list(map(float, [epoch_host, epoch_guest])) + def get_hwtime(self, session): + """ + Get guest's hardware clock in epoch. + + :param session: VM session. + """ + hwclock_time_command = self.params.get("hwclock_time_command", + "hwclock -u") + hwclock_time_filter_re = self.params.get("hwclock_time_filter_re", + r"(\d+-\d+-\d+ \d+:\d+:\d+).*") + hwclock_time_format = self.params.get("hwclock_time_format", + "%Y-%m-%d %H:%M:%S") + output = session.cmd_output_safe(hwclock_time_command) + try: + str_time = re.findall(hwclock_time_filter_re, output)[0] + guest_time = time.mktime(time.strptime(str_time, hwclock_time_format)) + except Exception as err: + logging.debug( + "(time_format, output): (%s, %s)", hwclock_time_format, output) + raise err + return guest_time + @error_context.context_aware def verify_clock_source(self, session): """ @@ -198,14 +220,28 @@ class BackwardtimeTest(TimedriftTest): while time.time() < start_time + timeout: host_epoch_time, guest_epoch_time = self.get_epoch_seconds(session) real_difference = abs(host_epoch_time - guest_epoch_time) - if abs(real_difference - expect_difference) < tolerance: - return + if self.params["os_type"] == 'linux': + expect_difference_hwclock = float(self.params["time_difference_hwclock"]) + guest_hwtime = self.get_hwtime(session) + real_difference_hw = abs(host_epoch_time - guest_hwtime) + if abs(real_difference - expect_difference) < tolerance and \ + abs(real_difference_hw - expect_difference_hwclock) < tolerance: + return + else: + if abs(real_difference - expect_difference) < tolerance: + return logging.info("Host epoch time: %s" % host_epoch_time) logging.info("Guest epoch time: %s" % guest_epoch_time) - err_msg = "Unexcept time difference between host and guest after" - err_msg += " testing.(actual difference: %s)" % real_difference - err_msg += " except difference: %s)" % expect_difference - self.test.fail(err_msg) + if self.params["os_type"] == 'linux': + logging.info("Guest hardware time: %s" % guest_hwtime) + err_msg = "Unexpected sys and hardware time difference (%s %s)\ + between host and guest after adjusting time." \ + % (real_difference, real_difference_hw) + else: + err_msg = "Unexpected time difference between host and guest after" + err_msg += " testing.(actual difference: %s)" % real_difference + err_msg += " expected difference: %s)" % expect_difference + self.test.fail(err_msg) @error_context.context_aware def check_dirft_before_adjust_time(self, session): @@ -223,12 +259,24 @@ class BackwardtimeTest(TimedriftTest): tolerance = float(self.params.get("tolerance", 6)) host_epoch_time, guest_epoch_time = self.get_epoch_seconds(session) real_difference = abs(host_epoch_time - guest_epoch_time) - if real_difference > tolerance: - logging.info("Host epoch time: %s" % host_epoch_time) - logging.info("Guest epoch time: %s" % guest_epoch_time) - err_msg = "Unexcept time difference (%s) " % real_difference - err_msg += " between host and guest before testing." - self.test.fail(err_msg) + if self.params["os_type"] == 'linux': + guest_hwtime = self.get_hwtime(session) + real_difference_hw = abs(host_epoch_time - guest_hwtime) + if real_difference > tolerance or real_difference_hw > tolerance: + logging.info("Host epoch time: %s" % host_epoch_time) + logging.info("Guest epoch time: %s" % guest_epoch_time) + logging.info("Guest hardware time: %s" % guest_hwtime) + err_msg = "Unexpected sys and hardware time difference (%s %s) \ + between host and guest before testing."\ + % (real_difference, real_difference_hw) + self.test.fail(err_msg) + else: + if real_difference > tolerance: + logging.info("Host epoch time: %s" % host_epoch_time) + logging.info("Guest epoch time: %s" % guest_epoch_time) + err_msg = "Unexcept time difference (%s) " % real_difference + err_msg += " between host and guest before testing." + self.test.fail(err_msg) def pre_test(self): """ @@ -243,6 +291,8 @@ class BackwardtimeTest(TimedriftTest): vm = self.get_vm(create=True) if self.params["os_type"] == 'windows': utils_time.sync_timezone_win(vm) + else: + utils_time.sync_timezone_linux(vm) session = self.get_session(vm) self.check_dirft_before_adjust_time(session) if self.params.get("read_clock_source_cmd"): diff --git a/qemu/tests/timedrift_with_multi_vms.py b/qemu/tests/timedrift_with_multi_vms.py index 7c85afa9f4717961ed9251e78cd0d25e2951bba2..95af4bc8be48345b5d70d363cc2bcfdbfa556ed9 100644 --- a/qemu/tests/timedrift_with_multi_vms.py +++ b/qemu/tests/timedrift_with_multi_vms.py @@ -52,7 +52,7 @@ def run(test, params, env): vms = params.get("vms").split() vm_obj = [] sessions = [] - host_cpu_list = cpu.cpu_online_list() + host_cpu_list = cpu.online_list() if same_cpu == "no": if len(host_cpu_list) < len(vms): test.cancel("There aren't enough physical cpus to pin all guests") diff --git a/qemu/tests/timerdevice_boot.py b/qemu/tests/timerdevice_boot.py index ef1ca7076763a06d204e2429d8df9d1a313d8b34..049456984883145bec6c7dad4b4945af2971f5e8 100644 --- a/qemu/tests/timerdevice_boot.py +++ b/qemu/tests/timerdevice_boot.py @@ -5,6 +5,7 @@ import re from avocado.utils import process from virttest import utils_test from virttest import utils_time +from virttest import env_process from virttest import funcatexit from virttest import error_context @@ -134,13 +135,25 @@ def run(test, params, env): timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd") if timerdevice_host_load_cmd: error_context.context("Add some load on host", logging.info) - process.system(timerdevice_host_load_cmd, shell=True, - ignore_bg_processes=True) + host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"] + host_cpu_cnt = int(process.system_output(host_cpu_cnt_cmd, shell=True).strip()) + if params["os_type"] == "linux": + timerdevice_host_load_cmd = timerdevice_host_load_cmd % host_cpu_cnt + process.system(timerdevice_host_load_cmd, shell=True, + ignore_bg_processes=True) + else: + timerdevice_host_load_cmd = timerdevice_host_load_cmd % int(host_cpu_cnt/2) + stress_bg = utils_test.HostStress("stress", params, + stress_args=timerdevice_host_load_cmd) + stress_bg.load_stress_tool() host_load_stop_cmd = params.get("timerdevice_host_load_stop_cmd", "pkill -f 'do X=1'") funcatexit.register(env, params["type"], _system, host_load_stop_cmd) + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params.get("main_vm")) + vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -169,6 +182,15 @@ def run(test, params, env): if params["os_type"] == "linux": verify_timedrift(session, is_hardware=True) + repeat_nums = params.get_numeric("repeat_nums") + if repeat_nums: + sleep_time = params["sleep_time"] + for index in range(repeat_nums): + time.sleep(int(sleep_time)) + verify_timedrift(session) + if params["os_type"] == "linux": + verify_timedrift(session, is_hardware=True) + if params.get("timerdevice_reboot_test") == "yes": sleep_time = params.get("timerdevice_sleep_time") if sleep_time: diff --git a/qemu/tests/timerdevice_check_ntp_offset.py b/qemu/tests/timerdevice_check_ntp_offset.py new file mode 100644 index 0000000000000000000000000000000000000000..acc0257c2a425450e800862f8f03a0495c3ad328 --- /dev/null +++ b/qemu/tests/timerdevice_check_ntp_offset.py @@ -0,0 +1,90 @@ +import os +import time +import logging + +from avocado.utils import process +from virttest import utils_test +from virttest import data_dir +from virttest import env_process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Check the offset of Meinberg NTP for windows guest. + + 1) sync the host time with ntp server + 2) boot a windows guest with network + 3) install diskspd tool and Meinberg NTP + 4) run diskspd benchmark in Administrator user + 5) play a video fullscreen + 6) periodically verify "offset" output of Meinberg NTP + + :param test: QEMU test object. + :param params: Dictionary with test parameters. + :param env: Dictionary with the test environment. + """ + + def clean_tmp_file(): + if not session.cmd_status("dir %s" % ntp_dst_path): + session.cmd("rd /s /q %s" % ntp_dst_path) + ntp_install_path = params["ntp_install_path"] + ntp_uninstall_cmd = params["ntp_uninstall_cmd"] + if not session.cmd_status("dir %s" % ntp_install_path): + session.cmd(ntp_uninstall_cmd) + diskspd_check_cmd = params["diskspd_check_cmd"] + diskspd_end_cmd = params["diskspd_end_cmd"] + if not session.cmd_status("dir %s" % (dst_path + diskspd_name)): + if not session.cmd_status(diskspd_check_cmd): + session.cmd(diskspd_end_cmd) + session.cmd("del %s" % (dst_path + diskspd_name)) + ntp_cmd = params["ntp_cmd"] + error_context.context("Sync host system time with ntpserver", logging.info) + process.system(ntp_cmd, shell=True) + + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params.get("main_vm")) + + vm = env.get_vm(params["main_vm"]) + session = vm.wait_for_login() + + error_context.context("Install diskspd tool on guest", logging.info) + diskspd_dir = params["diskspd_dir"] + diskspd_name = params["diskspd_name"] + dst_path = params["dst_path"] + diskspd_src_path = os.path.join(data_dir.get_deps_dir(diskspd_dir)) + vm.copy_files_to(diskspd_src_path, dst_path) + + error_context.context("Install Meinberg NTP on guest", logging.info) + ntp_dir = params["ntp_dir"] + ntp_name = params["ntp_name"] + ntp_unattend_file = params["ntp_unattend_file"] + ntp_dst_path = params["ntp_dst_path"] + install_ntp_cmd = params["install_ntp_cmd"] + vm.copy_files_to(data_dir.get_deps_dir(ntp_dir), dst_path) + session.cmd("cd %s" % ntp_dst_path) + session.cmd(install_ntp_cmd % (ntp_name, ntp_unattend_file)) + + error_context.context("Run diskspd on guest", logging.info) + diskspd_run_cmd = params["diskspd_run_cmd"] + session.cmd("cd %s" % dst_path) + session.cmd(diskspd_run_cmd) + + error_context.context("Play a video on guest", logging.info) + sub_test = params["sub_test"] + utils_test.run_virt_sub_test(test, params, env, sub_test) + + error_context.context("Check offset of ntp", logging.info) + check_offset_cmd = params["check_offset_cmd"] + sleep_time = params["sleep_time"] + try: + for _ in range(params.get_numeric("nums")): + time.sleep(int(sleep_time)) + ntp_offset = session.cmd_output(check_offset_cmd) + ntp_offset = float(ntp_offset.strip().split("\n")[-1].split()[-2]. + strip('-+')) + if ntp_offset > 100: + test.fail("The ntp offset %s is larger than 100ms" % ntp_offset) + finally: + clean_tmp_file() diff --git a/qemu/tests/timerdevice_tsc_enable.py b/qemu/tests/timerdevice_tsc_enable.py new file mode 100644 index 0000000000000000000000000000000000000000..3a68b508cf35cddb662304dc491d83485838b0d0 --- /dev/null +++ b/qemu/tests/timerdevice_tsc_enable.py @@ -0,0 +1,42 @@ +import logging + +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + 1) Boot a guest with "-cpu host,+invtsc" or "-cpu $cpu_model,+invtsc". + 2) Check current clocksource and available clocksource and nonstop_tsc flag + in guest. + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + timeout = float(params.get("login_timeout", 240)) + session = vm.wait_for_login(timeout=timeout) + + error_context.context("Check current cloclsource", logging.info) + cur_clksrc_cmd = params["cur_clksrc_cmd"] + current_clksrc = session.cmd_output_safe(cur_clksrc_cmd) + avl_clksrc_cmd = params["avl_clksrc_cmd"] + avl_clksrc = session.cmd_output_safe(avl_clksrc_cmd) + check_tsc_flag_cmd = params["check_tsc_flag_cmd"] + tsc_flag = session.cmd_status(check_tsc_flag_cmd) + expect_cur_clk = params["expect_cur_clk"] + expect_avl_clk = params["expect_avl_clk"] + expect_tsc_flag = params["expect_tsc_flag"] + + if expect_cur_clk not in current_clksrc: + test.fail("Current clocksource is %s, the expected is %s." % + (current_clksrc, expect_cur_clk)) + if tsc_flag: + test.fail("Can not get expected flag: %s." % expect_tsc_flag) + + if expect_avl_clk not in avl_clksrc: + test.fail("Available clocksources are %s, the exected are %s." + % (avl_clksrc, expect_avl_clk)) diff --git a/qemu/tests/tpm_unattended_install.py b/qemu/tests/tpm_unattended_install.py new file mode 100644 index 0000000000000000000000000000000000000000..4b86558fb8d0b27e0ba7c353b1c9a6c5a386c92c --- /dev/null +++ b/qemu/tests/tpm_unattended_install.py @@ -0,0 +1,65 @@ +import logging +import re + +from virttest import env_process +from virttest import error_context +from virttest.tests import unattended_install + + +@error_context.context_aware +def run(test, params, env): + """ + Unattended install test with virtual TPM device: + 1) Starts a VM with an appropriated setup to start an unattended OS install. + 2) Wait until the install reports to the install watcher its end. + 3) Check TPM device info inside guest. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def search_keywords(patterns, string, flags=re.M, split_string=';'): + logging.info(string) + for pattern in patterns.split(split_string): + if not re.search(r'%s' % pattern, string, flags): + test.fail('No Found pattern "%s" from "%s".' % (pattern, string)) + if re.search(r'error', string, re.M | re.A): + test.error('Found errors from "%s".' % string) + + unattended_install.run(test, params, env) + + vm = env.get_vm(params["main_vm"]) + if vm: + vm.destroy() + + ovmf_vars_secboot_fd = params.get('ovmf_vars_secboot_fd') + if ovmf_vars_secboot_fd: + params['ovmf_vars_filename'] = ovmf_vars_secboot_fd + + params['start_vm'] = 'yes' + params['cdroms'] = params.get('default_cdrom', '') + params['force_create_image'] = 'no' + params['kernel'] = '' + params['initrd'] = '' + params['kernel_params'] = '' + params['boot_once'] = 'c' + + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + + error_context.context("Check TPM info inside guest.", logging.info) + for name in params.get('check_cmd_names').split(): + if name: + pattern = params.get('pattern_output_%s' % name) + cmd = params.get('cmd_%s' % name) + search_keywords(pattern, session.cmd(cmd)) + + cmd_check_secure_boot_enabled = params.get('cmd_check_secure_boot_enabled') + if cmd_check_secure_boot_enabled: + error_context.context("Check whether secure boot enabled inside guest.", + logging.info) + status, output = session.cmd_status_output(cmd_check_secure_boot_enabled) + if status: + test.fail('Secure boot is not enabled, output: %s' % output) diff --git a/qemu/tests/tpm_verify_device.py b/qemu/tests/tpm_verify_device.py new file mode 100644 index 0000000000000000000000000000000000000000..5d94cbaf3889a50b20c0f6ff12504db61aa859d5 --- /dev/null +++ b/qemu/tests/tpm_verify_device.py @@ -0,0 +1,77 @@ +import logging +import re + +from avocado.utils import process + +from virttest import error_context +from virttest import env_process + + +@error_context.context_aware +def run(test, params, env): + """ + Verify the TPM device info inside guest and host. + Steps: + 1. Boot guest with a emulator TPM device or pass through device. + 2. Check and verify TPM/vTPM device info inside guest. + 3. Check and verify TPM/vTPM device info in the OVMF log on host. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def search_keywords(patterns, string, flags=re.M, split_string=';'): + logging.info(string) + for pattern in patterns.split(split_string): + if not re.search(r'%s' % pattern, string, flags): + test.fail('No Found pattern "%s" from "%s".' % (pattern, string)) + if re.search(r'error', string, re.M | re.A): + test.error('Found errors from "%s".' % string) + + cmd_get_tpm_ver = params.get('cmd_get_tpm_version') + cmd_check_tpm_dev = params.get('cmd_check_tpm_device') + if cmd_check_tpm_dev: + status, output = process.getstatusoutput(cmd_check_tpm_dev) + if status: + test.cancel('No found TPM device on host, output: %s' % output) + if cmd_get_tpm_ver: + actual_tpm_ver = process.system_output(cmd_get_tpm_ver, + shell=True).decode().strip() + logging.info('The TPM device version is %s.' % actual_tpm_ver) + required_tmp_ver = params.get('required_tmp_version') + if actual_tpm_ver != required_tmp_ver: + test.cancel('Cancel to test due to require TPM device version %s, ' + 'actual version: %s' % (required_tmp_ver, actual_tpm_ver)) + + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + + for _ in range(params.get_numeric('repeat_times', 1)): + sessions = [] + vms = env.get_all_vms() + for vm in vms: + vm.verify_alive() + sessions.append(vm.wait_for_login()) + for vm, session in zip(vms, sessions): + error_context.context("%s: Check TPM info inside guest." % vm.name, + logging.info) + for name in params.get('check_cmd_names').split(): + if name: + pattern = params.get('pattern_output_%s' % name) + cmd = params.get('cmd_%s' % name) + search_keywords(pattern, session.cmd(cmd)) + + reboot_method = params.get("reboot_method") + if reboot_method: + error_context.context("Reboot guest '%s'." % vm.name, logging.info) + vm.reboot(session, reboot_method).close() + continue + + error_context.context("Check TPM info on host.", logging.info) + cmd_check_log = params.get('cmd_check_log') + if cmd_check_log: + output = process.system_output(cmd_check_log).decode() + pattern = params.get('pattern_check_log') + search_keywords(pattern, output) + session.close() diff --git a/qemu/tests/tpm_with_bitlocker.py b/qemu/tests/tpm_with_bitlocker.py new file mode 100644 index 0000000000000000000000000000000000000000..e4833bdf1e88b9dfb3796314ca09fcb0e9e9d628 --- /dev/null +++ b/qemu/tests/tpm_with_bitlocker.py @@ -0,0 +1,50 @@ +import logging +import re + +from virttest import error_context +from virttest import utils_misc + + +@error_context.context_aware +def run(test, params, env): + """ + Test virtual TPM device by BitLocker inside windows guest. + Steps: + 1. Boot guest with a emulator TPM device. + 2. Install BitLocker inside guest. + 3. Prepares hard drive for BitLocker Drive Encryption. + 4. Encrypts the volume and turns BitLocker protection on. + 5. Wait until Percentage Encrypted to 100%. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + + cmd_install_bitlocker = params.get('cmd_install_bitlocker') + if cmd_install_bitlocker: + error_context.context("Install BitLocker inside guest", logging.info) + session.cmd(cmd_install_bitlocker, 360) + session = vm.reboot(session, timeout=480) + + error_context.context("Prepares hard drive for BitLocker Drive " + "Encryption inside guest", logging.info) + cmd_bdehdcfg = session.cmd_output(params.get('cmd_bdehdcfg')) + if re.search(r'error', cmd_bdehdcfg, re.M | re.A): + test.fail('Found error message.') + + error_context.context("Encrypts the volume and turns BitLocker " + "protection on inside guest", logging.info) + session.cmd(params.get('cmd_manage_bde_on')) + session = vm.reboot(session, timeout=480) + + error_context.context("Wait until Percentage Encrypted finished", + logging.info) + finished_keywords = params.get('finished_keywords') + cmd_manage_bde_status = params.get('cmd_manage_bde_status') + if not utils_misc.wait_for(lambda: finished_keywords in session.cmd( + cmd_manage_bde_status, 300), step=5, timeout=600): + test.fail('Failed to encrypt the volume.') diff --git a/qemu/tests/tsc_drift.py b/qemu/tests/tsc_drift.py index 1fe9f14190a793e42aedd4065c63996cab0796d4..74d5483e8e6f3bbbf88d39ffa61e58634b286399 100644 --- a/qemu/tests/tsc_drift.py +++ b/qemu/tests/tsc_drift.py @@ -58,7 +58,7 @@ def run(test, params, env): if not os.path.exists(tsc_cmd_guest): process.run("gcc %s" % tsc_freq_path) - ncpu = cpu.online_cpus_count() + ncpu = cpu.online_count() logging.info("Interval is %s" % interval) logging.info("Determine the TSC frequency in the host") diff --git a/qemu/tests/uefi_check_log_info.py b/qemu/tests/uefi_check_log_info.py new file mode 100644 index 0000000000000000000000000000000000000000..35ba26350b3038b7c39d4bfcfead8f69d4e5b88e --- /dev/null +++ b/qemu/tests/uefi_check_log_info.py @@ -0,0 +1,67 @@ +import re +import os +import logging + +from avocado.utils import process + +from virttest import utils_misc +from virttest import env_process +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Verify UEFI config setting in the GUI screen: + 1) Boot up a guest. + 2) If boot_splash_time not None, check splash-time in log output + 3) If check_info_pattern not None, check info in log output + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def info_check(info): + """ + Check log info + """ + logs = vm.logsessions['seabios'].get_output() + result = re.search(info, logs, re.S) + return result + + def create_cdroms(cdrom_test): + """ + Create 'test' cdrom with one file on it + """ + logging.info("creating test cdrom") + process.run("dd if=/dev/urandom of=test bs=10M count=1") + process.run("mkisofs -o %s test" % cdrom_test) + process.run("rm -f test") + + boot_splash_time = params.get("boot_splash_time") + check_info_pattern = params.get("check_info_pattern") + timeout = int(params.get("check_timeout", 360)) + cdrom_test = params.get("cdrom_test") + if cdrom_test: + create_cdroms(cdrom_test) + params["start_vm"] = "yes" + env_process.process(test, params, env, + env_process.preprocess_image, + env_process.preprocess_vm) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + try: + if check_info_pattern: + expect_result = check_info_pattern + elif boot_splash_time: + splash_time_pattern = params.get("splash_time_pattern") + expect_result = (splash_time_pattern % + (int(boot_splash_time) // 1000)) + if not utils_misc.wait_for(lambda: info_check(expect_result), timeout): + test.fail("Does not get expected result from bios log: %s" + % expect_result) + finally: + if params.get("cdroms") == "test": + logging.info("cleaning up temp cdrom images") + os.remove(cdrom_test) diff --git a/qemu/tests/uefi_check_resolution.py b/qemu/tests/uefi_check_resolution.py new file mode 100644 index 0000000000000000000000000000000000000000..a01d4dffa5b11ec0da33c349d42748f5429543f9 --- /dev/null +++ b/qemu/tests/uefi_check_resolution.py @@ -0,0 +1,74 @@ +import re +import random + +from virttest import utils_misc +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Verify UEFI config setting in the GUI screen: + 1) Boot up a guest. + 2) Set default resolution + 3) Change resolution to $re1 + 4) Save it by hitting 'F10' + 'Y' or 'Commit Changes and Exit' + 5) Exit setup interface + 6) Check if resolution had been change to $re1 + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + change_prefered = ["640 x 480", "800 x 480", "800 x 600", "832 x 624", + "960 x 640", "1024 x 600", "1024 x 768", "1152 x 864", + "1152 x 870", "1280 x 720", "1280 x 760", "1280 x 768", + "1280 x 800", "1280 x 960", "1280 x 1024", "1360 x 768", + "1366 x 768", "1400 x 1050", "1440 x 900", "1600 x 900", + "1600 x 1200", "1680 x 1050", "1920 x 1080", + "1920 x 1200", "1920 x 1440", "2000 x 2000", + "2048 x 1536", "2048 x 2048", "2560 x 1440", + "2560 x 1600"] + + def boot_check(info): + """ + boot info check + """ + logs = vm.logsessions['seabios'].get_output() + result = re.search(info, logs, re.S) + return result + + def choose_resolution(): + """ + choose resolution randomly + """ + n = random.randint(0, 29) + change_resolution_key = ["kp_enter"] + ["down"] * n + ["kp_enter"] + resolution = change_prefered[n] + check_info = "GraphicsConsole video resolution " + resolution + return change_resolution_key, check_info, resolution + + timeout = int(params.get("timeout", 360)) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + + boot_menu_hint = params["boot_menu_hint"] + enter_change_preferred = params["enter_change_preferred"].split(";") + default_resolution_key = params["default_resolution_key"].split(";") + save_change_key = params["save_change"].split(";") + esc_boot_menu_key = params["esc_boot_menu_key"].split(";") + change_resolution_key, check_info, resolution = choose_resolution() + if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), + timeout, 1): + test.fail("Could not get boot menu message") + key = [] + key += enter_change_preferred + key += default_resolution_key + key += change_resolution_key + key += save_change_key + key += esc_boot_menu_key + list(map(vm.send_key, key)) + vm.reboot() + + if not boot_check(check_info): + test.fail("Change to resolution {'%s'} fail" % resolution) diff --git a/qemu/tests/uefi_check_secure_mor.py b/qemu/tests/uefi_check_secure_mor.py new file mode 100644 index 0000000000000000000000000000000000000000..f853a606204a194efc0431fb4410becc2d23c608 --- /dev/null +++ b/qemu/tests/uefi_check_secure_mor.py @@ -0,0 +1,78 @@ +import logging + +from virttest import env_process +from virttest import data_dir + + +def run(test, params, env): + """ + Please make sure the guest installed with signed driver + Verify Secure MOR control feature using Device Guard tool in Windows guest: + + 1) Boot up a guest. + 2) Check if Secure Boot is enable. + 3) Download Device Guard and copy to guest. + 4) Enable Device Guard and check the output. + 5) Reboot guest. + 5) Run Device Guard and check the output. + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def execute_powershell_command(command, timeout=60): + status, output = session.cmd_status_output(command, timeout) + if status != 0: + test.fail("execute command fail: %s" % output) + return output + + login_timeout = int(params.get("login_timeout", 360)) + params["ovmf_vars_filename"] = 'OVMF_VARS.secboot.fd' + params["cpu_model_flags"] = ',hv-passthrough' + params["start_vm"] = 'yes' + env_process.preprocess_vm(test, params, env, params['main_vm']) + vm = env.get_vm(params["main_vm"]) + session = vm.wait_for_serial_login(timeout=login_timeout) + + check_cmd = params['check_secure_boot_enabled_cmd'] + dgreadiness_path_command = params['dgreadiness_path_command'] + executionPolicy_command = params['executionPolicy_command'] + enable_command = params['enable_command'] + ready_command = params['ready_command'] + try: + output = session.cmd_output(check_cmd) + if 'False' in output: + test.fail('Secure boot is not enabled. The actual output is %s' + % output) + + # Copy Device Guard to guest + dgreadiness_host_path = data_dir.get_deps_dir("dgreadiness") + dst_path = params["dst_path"] + logging.info("Copy Device Guuard to guest.") + s, o = session.cmd_status_output("mkdir %s" % dst_path) + if s and "already exists" not in o: + test.error("Could not create Device Guard directory in " + "VM '%s', detail: '%s'" % (vm.name, o)) + vm.copy_files_to(dgreadiness_host_path, dst_path) + + execute_powershell_command(dgreadiness_path_command) + execute_powershell_command(executionPolicy_command) + output = execute_powershell_command(enable_command) + check_enable_info = params['check_enable_info'] + if check_enable_info not in output: + test.fail("Device Guard enable failed. The actual output is %s" + % output) + + # Reboot guest and run Device Guard + session = vm.reboot(session) + execute_powershell_command(dgreadiness_path_command) + execute_powershell_command(executionPolicy_command) + output = execute_powershell_command(ready_command) + check_ready_info = params['check_ready_info'] + if check_ready_info not in output: + test.fail("Device Guard running failed. The actual output is %s" + % output) + + finally: + session.close() diff --git a/qemu/tests/unattended_install_reboot_driftfix.py b/qemu/tests/unattended_install_reboot_driftfix.py new file mode 100644 index 0000000000000000000000000000000000000000..ba48b83cb0d91ad54de3160ed1cd5fd5b3f20f90 --- /dev/null +++ b/qemu/tests/unattended_install_reboot_driftfix.py @@ -0,0 +1,29 @@ +from virttest import env_process + +from virttest.tests import unattended_install + + +def run(test, params, env): + """ + Unattended install test: + 1) Starts a VM with an appropriated setup to start an unattended OS install. + 2) Wait until the install reports to the install watcher its end. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + params["cpu_model_flags"] = "" + unattended_install.run(test, params, env) + vm = env.get_vm(params.get("main_vm")) + vm.destroy() + + params["cdroms"] = "winutils" + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params.get("main_vm")) + vm = env.get_vm(params.get("main_vm")) + session = vm.wait_for_login() + + session = vm.reboot(session) + session.close() diff --git a/qemu/tests/usb_host.py b/qemu/tests/usb_host.py index d6849e8ee5fb9c0a23b0746f6099de4e67af61e8..66b922190e36eb2fd13a83e1fcf5f5891a9b5304 100644 --- a/qemu/tests/usb_host.py +++ b/qemu/tests/usb_host.py @@ -1,7 +1,17 @@ +import re +import os +import time import logging from avocado.utils import process + from virttest import error_context +from virttest import utils_misc +from virttest.qemu_devices import qdevices +from virttest.qemu_monitor import QMPCmdError +from virttest.utils_test import BackgroundTest + +from provider.storage_benchmark import generate_instance @error_context.context_aware @@ -13,14 +23,27 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_usb_host_dev(): + device_list = [] + for device in vm.devices: + if isinstance(device, qdevices.QDevice): + if device.get_param("driver") == "usb-host": + device_list.append(device) + return device_list + + def get_vendorid_productid(bus, addr): + out = process.getoutput("lsusb -v -s %s:%s" % (bus, addr)) + res = re.search(r"idVendor\s+0x(\w+).*idProduct\s+0x(\w+)", out, re.S) + return (res.group(1), res.group(2)) + @error_context.context_aware - def usb_dev_hotplug(): - error_context.context("Plugin usb device", logging.info) + def usb_dev_hotplug(dev): + error_context.context("Hotplug usb-host device", logging.info) session.cmd_status("dmesg -c") - vm.monitor.cmd(monitor_add) + vm.devices.simple_hotplug(dev, vm.monitor) session.cmd_status("sleep 2") session.cmd_status("udevadm settle") - messages_add = session.cmd("dmesg -c") + messages_add = session.cmd("dmesg") for line in messages_add.splitlines(): logging.debug("[dmesg add] %s" % line) if messages_add.find(match_add) == -1: @@ -28,14 +51,14 @@ def run(test, params, env): @error_context.context_aware def usb_dev_verify(): - error_context.context("Check usb device %s in guest" % device, - logging.info) + error_context.context("Check usb device in guest", logging.info) session.cmd(lsusb_cmd) @error_context.context_aware - def usb_dev_unplug(): - error_context.context("Unplug usb device", logging.info) - vm.monitor.cmd(monitor_del) + def usb_dev_unplug(dev): + error_context.context("Unplug usb-host device", logging.info) + session.cmd("dmesg -c") + vm.devices.simple_unplug(dev, vm.monitor) session.cmd_status("sleep 2") messages_del = session.cmd("dmesg -c") for line in messages_del.splitlines(): @@ -43,72 +66,127 @@ def run(test, params, env): if messages_del.find(match_del) == -1: test.fail("kernel didn't detect unplug") + def _get_usb_mount_point(): + """ Get passthrough usb stick mount point """ + dmesg_cmd = "dmesg | grep 'Attached SCSI removable disk'" + s, o = session.cmd_status_output(dmesg_cmd) + if s: + test.error("Fail to get passthrough usb stick in guest.") + dev = re.findall(r'\[(sd\w+)\]', o)[0] + mounts_cmd = "cat /proc/mounts | grep /dev/%s" % dev + s, o = session.cmd_status_output(mounts_cmd) + if s: + s, o = session.cmd_status_output('mount /dev/%s /mnt' % dev) + if s: + test.error("Fail to mount /dev/%s, output: %s" % (s, o)) + mp = "/mnt" + else: + mp = re.findall(r'/dev/%s\d*\s+(\S+)\s+' % dev, o)[0] + return mp + + def _usb_stick_io(mount_point, bg=False): + """ + Do I/O operations on passthrough usb stick + """ + error_context.context("Read and write on usb stick ", logging.info) + testfile = os.path.join(mount_point, 'testfile') + if bg: + iozone_cmd = params.get("iozone_cmd_bg", " -az -I -g 1g -f %s") + iozone_thread = BackgroundTest(iozone_test.run, + (iozone_cmd % testfile,)) + iozone_thread.start() + if not utils_misc.wait_for(iozone_thread.is_alive, timeout=10): + test.fail("Fail to start the iozone background test.") + time.sleep(10) + else: + iozone_cmd = params.get("iozone_cmd", + " -a -I -r 64k -s 1m -i 0 -i 1 -f %s") + iozone_test.run(iozone_cmd % testfile) + + usb_params = {} + if params.get("usb_negative_test", "no") != "no": # Negative test. vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() + usb_reply_msg_list = params.get("usb_reply_msg").split(";") usb_host_device_list = params["usb_host_device_list"].split(",") for dev in usb_host_device_list: vid, pid = dev.split(":") - monitor_add = "device_add usb-host,bus=usbtest.0,id=usbhostdev" - monitor_add += ",vendorid=%s" % vid - monitor_add += ",productid=%s" % pid - reply = vm.monitor.cmd(monitor_add) - usb_reply_msg_list = params.get("usb_reply_msg").split(";") - negative_flag = False - for msg in usb_reply_msg_list: - if msg in reply: - negative_flag = True - break - if not negative_flag: - test.fail("Could not get expected warning" - " msg in negative test, monitor" - " returns: '%s'" % reply) - vm.reboot() + usb_params["vendorid"] = vid + usb_params["productid"] = pid + dev = qdevices.QDevice("usb-host", usb_params) + try: + vm.devices.simple_hotplug(dev, vm.monitor) + except QMPCmdError as detail: + logging.warn(detail) + for msg in usb_reply_msg_list: + if msg in detail.data['desc']: + break + else: + test.fail("Could not get expected warning" + " msg in negative test, monitor" + " returns: '%s'" % detail) + else: + test.fail("Hotplug operation in negative test" + " should not succeed.") return - device = params["usb_host_device"] - (vendorid, productid) = device.split(":") + usb_hostdev = params["usb_devices"].split()[-1] + usb_options = params.get("options") + if usb_options == "with_vendorid_productid": + vendorid = params["usbdev_option_vendorid_%s" % usb_hostdev] + productid = params["usbdev_option_productid_%s" % usb_hostdev] + usb_params["vendorid"] = "0x%s" % vendorid + usb_params["productid"] = "0x%s" % productid + elif usb_options == "with_hostbus_hostaddr": + hostbus = params["usbdev_option_hostbus_%s" % usb_hostdev] + hostaddr = params["usbdev_option_hostaddr_%s" % usb_hostdev] + usb_params["hostbus"] = hostbus + usb_params["hostaddr"] = hostaddr + (vendorid, productid) = get_vendorid_productid(hostbus, hostaddr) - # compose strings - lsusb_cmd = "lsusb -v -d %s" % device - monitor_add = "device_add usb-host,bus=usbtest.0,id=usbhostdev" - monitor_add += ",vendorid=0x%s" % vendorid - monitor_add += ",productid=0x%s" % productid - monitor_del = "device_del usbhostdev" + lsusb_cmd = "lsusb -v -d %s:%s" % (vendorid, productid) match_add = "New USB device found, " match_add += "idVendor=%s, idProduct=%s" % (vendorid, productid) match_del = "USB disconnect" - - error_context.context("Check usb device %s on host" % device, logging.info) - try: - process.system(lsusb_cmd, shell=True) - except: - test.cancel("Device %s not present on host" % device) + usb_stick = "Mass Storage" in process.getoutput(lsusb_cmd) error_context.context("Log into guest", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() - repeat_times = int(params.get("usb_repeat_times", "1")) - for i in range(repeat_times): - if params.get("usb_check_isobufs", "no") == "no": - error_context.context("Hotplug (iteration %i)" % (i + 1), - logging.info) - else: - # The value of isobufs could only be in '4, 8, 16' - isobufs = (2 << (i % 3 + 1)) - monitor_add = "device_add usb-host,bus=usbtest.0,id=usbhostdev" - monitor_add += ",vendorid=0x%s" % vendorid - monitor_add += ",productid=0x%s" % productid - monitor_add += ",isobufs=%d" % isobufs - error_context.context("Hotplug (iteration %i), with 'isobufs'" - " option set to %d" % ((i + 1), isobufs), - logging.info) - usb_dev_hotplug() + try: usb_dev_verify() - usb_dev_unplug() + if usb_stick: + iozone_test = None + mount_point = _get_usb_mount_point() + iozone_test = generate_instance(params, vm, 'iozone') + _usb_stick_io(mount_point) + usb_devs = get_usb_host_dev() + for dev in usb_devs: + usb_dev_unplug(dev) - session.close() + repeat_times = int(params.get("usb_repeat_times", "1")) + for i in range(repeat_times): + msg = "Hotplug (iteration %d)" % (i+1) + usb_params["id"] = "usbhostdev%s" % i + if params.get("usb_check_isobufs", "no") == "yes": + # The value of isobufs could only be in '4, 8, 16' + isobufs = (2 << (i % 3 + 1)) + usb_params["isobufs"] = isobufs + msg += ", with 'isobufs' option set to %d." % isobufs + error_context.context(msg, logging.info) + usb_dev = qdevices.QDevice("usb-host", usb_params) + usb_dev_hotplug(usb_dev) + usb_dev_verify() + if usb_stick: + mount_point = _get_usb_mount_point() + _usb_stick_io(mount_point, bg=True) + usb_dev_unplug(usb_dev) + finally: + if usb_stick and iozone_test: + iozone_test.clean() + session.close() diff --git a/qemu/tests/usb_redir.py b/qemu/tests/usb_redir.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd95053d3eb134ed59a99508743b295f3ae00ab --- /dev/null +++ b/qemu/tests/usb_redir.py @@ -0,0 +1,273 @@ +import logging +import re +import os + +from virttest import error_context +from virttest import utils_misc +from virttest import env_process +from virttest.qemu_devices import qdevices +from virttest.utils_params import Params + +from provider.storage_benchmark import generate_instance + +from avocado.utils import process + + +@error_context.context_aware +def run(test, params, env): + """ + Test usb redirection + + 1) Check host configurations + 2) Preprocess VM + 3) Start USB redirection via spice (optional) + 4) Check the boot menu list (optional) + 5) Check the redirected USB device in guest + + :param test: QEMU test object + :param params: Dictionary with test parameters + :param env: Dictionary with test environment. + """ + def _host_config_check(): + status = True + err_msg = '' + if option == "with_negative_config": + out = process.getoutput("dmesg") + pattern = r"usb (\d-\d(?:.\d)?):.*idVendor=%s, idProduct=%s" + pattern = pattern % (vendorid, productid) + obj = re.search(pattern, out, re.ASCII) + if not obj: + status = False + err_msg = "Fail to get the USB device info in host dmesg" + return (status, err_msg) + error_context.context("Make USB device unconfigured", logging.info) + unconfig_value = params["usbredir_unconfigured_value"] + cmd = "echo %s > /sys/bus/usb/devices/%s/bConfigurationValue" + cmd = cmd % (unconfig_value, obj.group(1)) + logging.info(cmd) + s, o = process.getstatusoutput(cmd) + if s: + status = False + err_msg = "Fail to unconfig the USB device, output: %s" % o + return (status, err_msg) + + if backend == 'spicevmc': + gui_group = "Server with GUI" + out = process.getoutput('yum group list --installed', + allow_output_check='stdout', shell=True) + obj = re.search(r"(Installed Environment Groups:.*?)^\S", + out, re.S | re.M) + if not obj or gui_group not in obj.group(1): + gui_groupinstall_cmd = "yum groupinstall -y '%s'" % gui_group + s, o = process.getstatusoutput(gui_groupinstall_cmd, shell=True) + if s: + status = False + err_msg = "Fail to install '%s' on host, " % gui_group + err_msg += "output: %s" % o + return (status, err_msg) + virt_viewer_cmd = "rpm -q virt-viewer || yum install -y virt-viewer" + s, o = process.getstatusoutput(virt_viewer_cmd, shell=True) + if s: + status = False + err_msg = "Fail to install 'virt-viewer' on host, " + err_msg += "output: %s" % o + return (status, err_msg) + return (status, err_msg) + + def _usbredir_preprocess(): + def _generate_usb_redir_cmdline(): + extra_params = '' + _backend = 'socket' if 'socket' in backend else backend + chardev_id = usbredir_params.get("chardev_id", + "chardev_%s" % usbredirdev_name) + chardev_params = Params({'backend': _backend, 'id': chardev_id}) + if backend == 'spicevmc': + chardev_params['debug'] = usbredir_params.get('chardev_debug') + chardev_params['name'] = usbredir_params.get('chardev_name') + chardev = qdevices.CharDevice(chardev_params, chardev_id) + usbredir_dev = qdevices.QDevice('usb-redir', + aobject=usbredirdev_name) + usbredir_filter = usbredir_params.get("usbdev_option_filter") + usbredir_bootindex = usbredir_params.get("usbdev_option_bootindex") + usbredir_bus = usbredir_params.get("usb_bus") + usbredir_dev.set_param('id', 'usb-%s' % usbredirdev_name) + usbredir_dev.set_param('chardev', chardev_id) + usbredir_dev.set_param('filter', usbredir_filter) + usbredir_dev.set_param('bootindex', usbredir_bootindex) + usbredir_dev.set_param('bus', usbredir_bus) + extra_params += ' '.join([chardev.cmdline(), + usbredir_dev.cmdline()]) + return extra_params + extra_params = _generate_usb_redir_cmdline() + params["extra_params"] = extra_params + if backend == 'spicevmc': + params["paused_after_start_vm"] = "yes" + del params["spice_password"] + del params["spice_addr"] + del params["spice_image_compression"] + del params["spice_zlib_glz_wan_compression"] + del params["spice_streaming_video"] + del params["spice_agent_mouse"] + del params["spice_playback_compression"] + del params["spice_ipv4"] + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + + def _start_spice_redirection(): + def _rv_connection_check(): + rv_pid = process.getoutput("pidof %s" % rv_binary) + spice_port = vm.get_spice_var('spice_port') + cmd = 'netstat -ptn | grep "^tcp.*127.0.0.1:%s.*ESTABLISHED %s.*"' + cmd = cmd % (spice_port, rv_pid) + s, o = process.getstatusoutput(cmd) + if s: + return False + logging.info("netstat output:\n%s", o) + return True + status = True + err_msg = '' + rv_binary_path = utils_misc.get_binary(rv_binary, params) + spice_port = vm.get_spice_var('spice_port') + rv_args = rv_binary_path + " spice://localhost:%s " % spice_port + rv_args += "--spice-usbredir-redirect-on-connect=" + rv_args += "'-1,0x%s,0x%s,-1,1'" % (vendorid, productid) + rv_args += " > /dev/null 2>&1" + rv_thread = utils_misc.InterruptedThread(os.system, (rv_args,)) + rv_thread.start() + if not utils_misc.wait_for(_rv_connection_check, timeout, 60): + status = False + err_msg = "Fail to establish %s connection" % rv_binary + return (status, err_msg) + + def boot_check(info): + """ + boot info check + """ + return re.search(info, vm.serial_console.get_stripped_output()) + + def _usb_dev_verify(): + error_context.context("Check USB device in guest", logging.info) + if session.cmd_status(lsusb_cmd): + return False + return True + + def _kill_rv_proc(): + s, o = process.getstatusoutput("pidof %s" % rv_binary) + if not s: + process.getoutput("killall %s" % rv_binary) + + def _get_usb_mount_point(): + """ Get redirected USB stick mount point """ + dmesg_cmd = "dmesg | grep 'Attached SCSI removable disk'" + s, o = session.cmd_status_output(dmesg_cmd) + if s: + test.error("Fail to get redirected USB stick in guest.") + dev = re.findall(r'\[(sd\w+)\]', o)[0] + mounts_cmd = "cat /proc/mounts | grep /dev/%s" % dev + s, o = session.cmd_status_output(mounts_cmd) + if s: + s, o = session.cmd_status_output('mount /dev/%s /mnt' % dev) + if s: + test.error("Fail to mount /dev/%s, output: %s" % (dev, o)) + mp = "/mnt" + else: + mp = re.findall(r'/dev/%s\d*\s+(\S+)\s+' % dev, o)[0] + return mp + + def _usb_stick_io(mount_point): + """ + Do I/O operations on passthrough USB stick + """ + error_context.context("Read and write on USB stick ", logging.info) + testfile = os.path.join(mount_point, 'testfile') + iozone_cmd = params.get("iozone_cmd", + " -a -I -r 64k -s 1m -i 0 -i 1 -f %s") + iozone_test.run(iozone_cmd % testfile) + + usbredirdev_name = params["usbredirdev_name"] + usbredir_params = params.object_params(usbredirdev_name) + backend = usbredir_params.get('chardev_backend', 'spicevmc') + if backend not in ('spicevmc', 'tcp_socket'): + test.error("Unsupported char device backend type: %s" % backend) + + option = params.get("option") + vendorid = params["usbredir_vendorid"] + productid = params["usbredir_productid"] + timeout = params.get("wait_timeout", 600) + lsusb_cmd = "lsusb -v -d %s:%s" % (vendorid, productid) + usb_stick = "Mass Storage" in process.getoutput(lsusb_cmd) + rv_binary = params.get('rv_binary', 'remote-viewer') + + error_context.context("Check host configurations", logging.info) + s, o = _host_config_check() + if not s: + test.error(o) + + if backend == 'spicevmc': + error_context.context("Preprocess VM", logging.info) + _usbredir_preprocess() + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + error_context.context("Start USB redirection via spice", logging.info) + s, o = _start_spice_redirection() + if not s: + test.error(o) + vm.resume() + + if option == "with_bootindex": + error_context.context("Check 'bootindex' option", logging.info) + boot_menu_hint = params["boot_menu_hint"] + boot_menu_key = params["boot_menu_key"] + if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), + timeout, 1): + test.fail("Could not get boot menu message") + + # Send boot menu key in monitor. + vm.send_key(boot_menu_key) + + output = vm.serial_console.get_stripped_output() + boot_list = re.findall(r"^\d+\. (.*)\s", output, re.M) + if not boot_list: + test.fail("Could not get boot entries list") + logging.info("Got boot menu entries: '%s'", boot_list) + + bootindex = int(params["usbdev_option_bootindex_%s" % usbredirdev_name]) + if "USB" not in boot_list[bootindex]: + test.fail("'bootindex' option of usb-redir doesn't take effect") + + if usb_stick: + error_context.context("Boot from redirected USB stick", + logging.info) + boot_entry_info = params["boot_entry_info"] + vm.send_key(str(bootindex+1)) + if not utils_misc.wait_for(lambda: boot_check(boot_entry_info), + timeout, 1): + test.fail("Could not boot from redirected USB stick") + return + + error_context.context("Login to guest", logging.info) + session = vm.wait_for_login() + + if params.get("policy") == "deny": + if _usb_dev_verify(): + error_msg = "Redirected USB device can be found in guest" + error_msg += " while policy is deny" + test.fail(error_msg) + if backend == 'spicevmc': + _kill_rv_proc() + return + + if not _usb_dev_verify(): + test.fail("Can not find the redirected USB device in guest") + + if usb_stick: + iozone_test = None + try: + mount_point = _get_usb_mount_point() + iozone_test = generate_instance(params, vm, 'iozone') + _usb_stick_io(mount_point) + finally: + if iozone_test: + iozone_test.clean() + + session.close() diff --git a/qemu/tests/usb_storage.py b/qemu/tests/usb_storage.py index 6cb79a2c963a38df60f686e11a587739c3d15e43..37c2f33b1c5344e91f7ba009ed4b5c7ee21c2063 100644 --- a/qemu/tests/usb_storage.py +++ b/qemu/tests/usb_storage.py @@ -14,12 +14,14 @@ def run(test, params, env): Test usb storage devices in the guest. 1) Create a image file by qemu-img - 2) Boot up a guest add this image as a usb device - 3) Check usb device information via monitor - 4) Check usb information by executing guest command - 5) Check usb serial option (optional) - 6) Check usb removable option (optional) - 7) Check usb min_io_size/opt_io_size option (optional) + 2) Boot up a guest + 3) Hotplug a usb storage (optional) + 4) Check usb storage information via monitor + 5) Check usb information by executing guest command + 6) Check usb serial option (optional) + 7) Check usb removable option (optional) + 8) Check usb min_io_size/opt_io_size option (optional) + 9) Hotunplug the usb storage (optional) :param test: QEMU test object :param params: Dictionary with the test parameters @@ -193,56 +195,82 @@ def run(test, params, env): vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) - error_context.context("Check usb device information in monitor", - logging.info) - output = str(vm.monitor.info("usb")) - if "Product QEMU USB MSD" not in output: - logging.debug(output) - test.fail("Could not find mass storage device") - - error_context.context("Check usb device information in guest", - logging.info) - session = _login() - output = session.cmd(params["chk_usb_info_cmd"]) - # No bus specified, default using "usb.0" for "usb-storage" - for i in params["chk_usb_info_keyword"].split(","): - _verify_string(i, output, [i]) - session.close() - _do_io_test_guest() - - # this part is linux only - if params.get("check_serial_option") == "yes": - error_context.context("Check usb serial option", logging.info) - serial = str(uuid.uuid4()) - regex_str = r'usb-storage.*?serial = "(.*?)"\s' - _check_serial_option(serial, regex_str, serial) - - logging.info("Check this option with some illegal string") - logging.info("Set usb serial to a empty string") - # An empty string, "" - serial = "EMPTY_STRING" - regex_str = r'usb-storage.*?serial = (.*?)\s' - _check_serial_option(serial, regex_str, '""') - - logging.info("Leave usb serial option blank") - serial = "NO_EQUAL_STRING" - regex_str = r'usb-storage.*?serial = (.*?)\s' - _check_serial_option(serial, regex_str, '"on"') - - if params.get("check_removable_option") == "yes": - error_context.context("Check usb removable option", logging.info) - removable = "on" - expect_str = "Attached SCSI removable disk" - _check_removable_option(removable, expect_str) - - removable = "off" - expect_str = "Attached SCSI disk" - _check_removable_option(removable, expect_str) - - if params.get("check_io_size_option") == "yes": - error_context.context("Check usb min/opt io_size option", logging.info) - _check_io_size_option("0", "0") - # Guest can't recognize correct value which we set now, - # So comment these test temporary. - # _check_io_size_option("1024", "1024") - # _check_io_size_option("4096", "4096") + hotplug_unplug = (params["with_hotplug_unplug"] == "yes") + repeat_times = int(params.get("usb_repeat_times", "1")) + for rt in range(1, repeat_times+1): + disk_hotplugged = [] + if hotplug_unplug: + error_context.context("Hotplug the %s times." % rt, logging.info) + image_name = params.objects("images")[-1] + image_params = params.object_params(image_name) + devices = vm.devices.images_define_by_params(image_name, + image_params, + 'disk', None, + False, None) + for dev in devices: + ret = vm.devices.simple_hotplug(dev, vm.monitor) + if ret[1] is False: + test.fail("Failed to hotplug device '%s'. Output:\n%s" + % (dev, ret[0])) + disk_hotplugged.append(devices[-1]) + + error_context.context("Check usb device information in monitor", + logging.info) + output = str(vm.monitor.info("usb")) + if "Product QEMU USB MSD" not in output: + logging.debug(output) + test.fail("Could not find mass storage device") + + error_context.context("Check usb device information in guest", + logging.info) + session = _login() + output = session.cmd(params["chk_usb_info_cmd"]) + # No bus specified, default using "usb.0" for "usb-storage" + for i in params["chk_usb_info_keyword"].split(","): + _verify_string(i, output, [i]) + session.close() + _do_io_test_guest() + + # this part is linux only + if params.get("check_serial_option") == "yes": + error_context.context("Check usb serial option", logging.info) + serial = uuid.uuid4().hex + regex_str = r'usb-storage.*?serial = "(.*?)"\s' + _check_serial_option(serial, regex_str, serial) + + logging.info("Check this option with some illegal string") + logging.info("Set usb serial to a empty string") + # An empty string, "" + serial = "EMPTY_STRING" + regex_str = r'usb-storage.*?serial = (.*?)\s' + _check_serial_option(serial, regex_str, '""') + + logging.info("Leave usb serial option blank") + serial = "NO_EQUAL_STRING" + _check_serial_option(serial, regex_str, '"on"') + + if params.get("check_removable_option") == "yes": + error_context.context("Check usb removable option", logging.info) + removable = "on" + expect_str = "Attached SCSI removable disk" + _check_removable_option(removable, expect_str) + + removable = "off" + expect_str = "Attached SCSI disk" + _check_removable_option(removable, expect_str) + + if params.get("check_io_size_option") == "yes": + error_context.context("Check usb min/opt io_size option", logging.info) + _check_io_size_option("0", "0") + # NOTE: Guest can't recognize correct value which we set now, + # So comment these test temporary. + # _check_io_size_option("1024", "1024") + # _check_io_size_option("4096", "4096") + + if hotplug_unplug: + error_context.context("Hotunplug the %s times." % rt, logging.info) + for dev in disk_hotplugged: + ret = vm.devices.simple_unplug(dev, vm.monitor) + if ret[1] is False: + test.fail("Failed to unplug device '%s'. Output:\n%s" + % (dev, ret[0])) diff --git a/qemu/tests/vioinput_hotplug.py b/qemu/tests/vioinput_hotplug.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ded727abe466c47c10a67a7093d52c4ee04a2a --- /dev/null +++ b/qemu/tests/vioinput_hotplug.py @@ -0,0 +1,67 @@ +import logging +import time + +from virttest import error_context +from provider import input_tests + + +@error_context.context_aware +def run(test, params, env): + """ + Test hotplug/unplug of virtio input device + 1) Boot up w/ one virtio input device + 2) Unplug one virtio input device + 3) Hotplug one virtio input device + 4) Run basic keyboard/mouse test + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment + """ + + def hotplug_input_dev(vm, dev): + error_context.context("Hotplug %s" % dev, logging.info) + out, ver_out = vm.devices.simple_hotplug(dev, vm.monitor) + if not ver_out: + test.fail("No % device in qtree after hotplug" % dev) + logging.info("%s is hotpluged successfully" % dev) + + def unplug_input_dev(vm, dev): + error_context.context("Unplug %s" % dev, logging.info) + out, ver_out = vm.devices.simple_unplug(dev, vm.monitor) + if not ver_out: + test.fail("Still get %s in qtree after unplug" % dev) + logging.info("%s is unpluged successfully" % dev) + + def run_subtest(sub_test): + """ + Run subtest(e.g. rng_bat,reboot,shutdown) when it's not None + :param sub_test: subtest name + """ + error_context.context("Run %s subtest" % sub_test, logging.info) + wait_time = float(params.get("wait_time", 0.2)) + if sub_test == "keyboard_test": + input_tests.keyboard_test(test, params, vm, wait_time) + elif sub_test == "mouse_test": + input_tests.mouse_test(test, params, vm, wait_time, count=1) + + login_timeout = int(params.get("login_timeout", 360)) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login(timeout=login_timeout) + sub_test = params["sub_test"] + + # Hotplug an input device + new_dev = vm.devices.input_define_by_params( + params, params["input_name"])[0] + hotplug_input_dev(vm, new_dev) + # For virtio-mouse/tablet device, after new device added, + # the default working device will change from ps/2 mice to new added mice, + # so here add 5 sec time to waiting the progress finish. + time.sleep(5) + run_subtest(sub_test) + session = vm.reboot(session) + # Unplug attached input device + unplug_input_dev(vm, new_dev) + session = vm.reboot(session) + session.close() diff --git a/qemu/tests/vioser_in_use.py b/qemu/tests/vioser_in_use.py index 137f7b33be6d18063c64cf5fc13c76969303c421..ac16604edd922b0e4b563b1f285cc093179a9696 100644 --- a/qemu/tests/vioser_in_use.py +++ b/qemu/tests/vioser_in_use.py @@ -2,6 +2,7 @@ import re import os import signal import logging +import time from avocado.utils import process from virttest import utils_misc @@ -45,6 +46,21 @@ def live_migration_guest(test, params, vm, session): vm.migrate() +@error_context.context_aware +def vcpu_hotplug_guest(test, params, vm, session): + """ + Vcpu hot plug test. + """ + + maxcpus = int(params["vcpu_maxcpus"]) + current_cpus = int(params.get("smp", 2)) + for cpuid in range(current_cpus, maxcpus): + error_context.context("hot-pluging vCPU %s" % cpuid, logging.info) + vm.hotplug_vcpu(cpu_id=cpuid) + # make the cpu hotplug has slot during data transfer + time.sleep(2) + + @error_context.context_aware def kill_host_serial_pid(params, vm): """ @@ -116,10 +132,21 @@ def run(test, params, env): bg_thread = run_bg_test(test, params, vm, sender) globals().get(params["interrupt_test"])(test, params, vm, session) + + # for vcpu hotplug subtest, only check guest crash. + vcpu_hotplug = params.get_boolean("vcpu_hotplug") + if vcpu_hotplug: + error_context.context("Check if guest is alive.", logging.info) + vm.verify_kernel_crash() + session = vm.wait_for_login(timeout=timeout) + session.close() + return + if bg_thread: - bg_thread.join(timeout=timeout, suppress_exception=suppress_exception) + bg_thread.join(timeout=timeout, + suppress_exception=suppress_exception) if vm.is_alive(): kill_host_serial_pid(params, vm) - if (virtio_serial_file_transfer.transfer_data(params, vm, sender=sender) is - not True): + if (virtio_serial_file_transfer.transfer_data( + params, vm, sender=sender) is not True): test.fail("Serial data transfter test failed.") diff --git a/qemu/tests/virtio_blk_with_discard_write_zeroes.py b/qemu/tests/virtio_blk_with_discard_write_zeroes.py index 7c261f177918306e552aca777b5fbe25c59e5fb9..4298c4de18357502dd8bd74c963828f6b5ff771b 100644 --- a/qemu/tests/virtio_blk_with_discard_write_zeroes.py +++ b/qemu/tests/virtio_blk_with_discard_write_zeroes.py @@ -2,9 +2,11 @@ import ast import re import logging +from virttest import env_process from virttest import error_context from virttest import qemu_qtree from virttest import utils_misc +from virttest import virt_vm @error_context.context_aware @@ -67,14 +69,28 @@ def run(test, params, env): data_tag = params["images"].split()[1] vm = env.get_vm(params["main_vm"]) + + if params['start_vm'] == 'no': + params['start_vm'] = 'yes' + try: + env_process.preprocess_vm(test, params, env, params["main_vm"]) + except virt_vm.VMCreateError as e: + error_msg = params.get('error_msg') + if error_msg not in str(e): + test.fail('No found "%s" from the output of qemu:%s.' % + (error_msg, str(e))) + return + vm.verify_alive() session = vm.wait_for_login() data_disk = get_data_disk_by_serial(session, data_tag) - for attr_name, val in ast.literal_eval(params['attributes_checked']).items(): - check_attribute_in_qtree(data_tag, attr_name, val) + if params.get('attributes_checked'): + for attr_name, val in ast.literal_eval(params['attributes_checked']).items(): + check_attribute_in_qtree(data_tag, attr_name, val) - for cmd, val in ast.literal_eval(params['status_checked']).items(): - check_status_inside_guest(session, params[cmd].format(data_disk), val) + if params.get('status_checked'): + for cmd, val in ast.literal_eval(params['status_checked']).items(): + check_status_inside_guest(session, params[cmd].format(data_disk), val) dd_test(session, data_disk) diff --git a/qemu/tests/virtio_chardev_trace.py b/qemu/tests/virtio_chardev_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..3be99bafeafd41da8989518508fc0d7597e903af --- /dev/null +++ b/qemu/tests/virtio_chardev_trace.py @@ -0,0 +1,92 @@ +import os +import time +import aexpect + +from avocado.utils import process +from virttest import error_context +from virttest import env_process +from virttest import data_dir + + +@error_context.context_aware +def run(test, params, env): + """ + virtio-trace support testing: + 1) Make FIFO per CPU in a host. + 2) reboot guest with virtio-serial device, control path and data path per CPU. + 3) Download trace agent and compile it. + 4) Enable ftrace in the guest. + 5) Run trace agent in the guest. + 6) Open FIFO in a host. + 7) Start to read trace data by ordering from a host + 8) Stop to read trace data by ordering from a host + 9) repeat 7) and 8) with different CPU + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def get_procs(): + procs = [] + for x in range(0, int(nums_cpu)): + pipefile = '/tmp/virtio-trace/trace-path-cpu{}.out'.format(x) + proc = aexpect.run_bg('cat %s' % pipefile) + procs.append(proc) + return procs + + try: + nums_cpu = int(params.get("smp", 1)) + serials = params.get("serials", '') + v_path = "/tmp/virtio-trace/" + if not os.path.isdir(v_path): + process.run("mkdir {}".format(v_path)) + for t in ["in", "out"]: + process.run("mkfifo {}agent-ctl-path.{}".format(v_path, t)) + for x in range(int(nums_cpu)): + process.run("mkfifo {}trace-path-cpu{}.{}".format(v_path, x, t)) + + enable_cmd = "echo 1 > /tmp/virtio-trace/agent-ctl-path.in" + disable_cmd = "echo 0 > /tmp/virtio-trace/agent-ctl-path.in" + for x in range(int(nums_cpu)): + serials += ' vs{} '.format(x) + params['serial_type_vs{}'.format(x)] = 'virtserialport' + params['chardev_backend_vs{}'.format(x)] = 'pipe' + params['serial_name_vs{}'.format(x)] = "trace-path-cpu{}".format(x) + params['chardev_path_vs{}'.format(x)] = "{}trace-path-cpu{}".format(v_path, x) + params['serials'] = serials + params['start_vm'] = "yes" + env_process.preprocess(test, params, env) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + status, output = session.cmd_status_output("echo 1 > /sys/kernel/debug/tracing/events/sched/enable") + if status != 0: + test.error("Enable ftrace in the guest failed as %s" % output) + + # run trace agnet in vm + vm.copy_files_to(data_dir.get_deps_dir("virtio-trace"), '/home/') + session.cmd('cd /home/virtio-trace/ && make') + session.cmd('sudo /home/virtio-trace/trace-agent &') + + # Host injects read start order to the guest via virtio-serial + process.run(enable_cmd, shell=True) + procs = get_procs() + time.sleep(10) + + # Host injects read stop order to the guest via virtio-serial + process.run(disable_cmd, shell=True) + time.sleep(10) + for index, proc in enumerate(procs): + if not proc.get_output(): + test.fail("cpu %s do not have output while it is enabled in host" % index) + proc.close() + + procs = get_procs() + time.sleep(10) + for index, proc in enumerate(procs): + if proc.get_output(): + test.fail("cpu %s still have output after disabled in host" % index) + proc.close() + finally: + process.run("rm -rf {}".format(v_path)) diff --git a/qemu/tests/virtio_console.py b/qemu/tests/virtio_console.py index 7c3d769cc6974a1fe2440913784b09b26710ff63..7b7b03323888d9f936562ac33446271d56cbe6f3 100644 --- a/qemu/tests/virtio_console.py +++ b/qemu/tests/virtio_console.py @@ -245,7 +245,7 @@ def run(test, params, env): time.sleep(0.5) # wait for SIGHUP to be emitted # Enable sigio on specific port - guest_worker.cmd("virt.async('%s', True, 0)" % (port.name), 10) + guest_worker.cmd("virt.asynchronous('%s', True, 0)" % (port.name), 10) # Test sigio when port open guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT)" % @@ -283,7 +283,7 @@ def run(test, params, env): guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) # Disable sigio on specific port - guest_worker.cmd("virt.async('%s', False, 0)" % (port.name), 10) + guest_worker.cmd("virt.asynchronous('%s', False, 0)" % (port.name), 10) virtio_test.cleanup(vm, guest_worker) def test_lseek(): diff --git a/qemu/tests/virtio_fs_multi_vms.py b/qemu/tests/virtio_fs_multi_vms.py new file mode 100644 index 0000000000000000000000000000000000000000..0df0054c90730f23c83e8ec28d6aed3c93693ba6 --- /dev/null +++ b/qemu/tests/virtio_fs_multi_vms.py @@ -0,0 +1,198 @@ +import logging +import os +import re + +from virttest import error_context +from virttest import utils_disk +from virttest import utils_misc +from virttest import utils_test +from virttest.utils_windows import virtio_win + + +@error_context.context_aware +def run(test, params, env): + """ + Test to virtio-fs with the multiple VMs and virtiofs daemons. + Steps: + 1. Create shared directories on the host. + 2. Run virtiofs daemons on the host. + 3. Boot guests on the host with virtiofs options. + 4. Log into guest then mount the virtiofs targets. + 5. Generate files on the mount points inside guests. + 6. Compare the md5 among guests if multiple virtiofs + daemons share the source. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def get_viofs_exe(session): + """ + Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 + """ + media_type = params["virtio_win_media_type"] + try: + get_drive_letter = getattr(virtio_win, "drive_letter_%s" % + media_type) + get_product_dirname = getattr(virtio_win, + "product_dirname_%s" % media_type) + get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % + media_type) + except AttributeError: + test.error("Not supported virtio win media type '%s'", media_type) + viowin_ltr = get_drive_letter(session) + if not viowin_ltr: + test.error("Could not find virtio-win drive in guest") + guest_name = get_product_dirname(session) + if not guest_name: + test.error("Could not get product dirname of the vm") + guest_arch = get_arch_dirname(session) + if not guest_arch: + test.error("Could not get architecture dirname of the vm") + + exe_middle_path = ("{name}\\{arch}" if media_type == "iso" + else "{arch}\\{name}").format(name=guest_name, + arch=guest_arch) + exe_file_name = "virtiofs.exe" + exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' + exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) + exe_path = session.cmd(exe_find_cmd).strip() + logging.info("Found exe file '%s'", exe_path) + return exe_path + + cmd_dd = params.get('cmd_dd') + cmd_md5 = params.get('cmd_md5') + io_timeout = params.get_numeric('io_timeout') + shared_fs_source_dir = params.get('shared_fs_source_dir') + os_type = params.get('os_type') + + # cfg for windows vm + cmd_timeout = params.get_numeric("cmd_timeout", 120) + driver_name = params.get("driver_name") + wfsp_install_cmd = params.get("wfsp_install_cmd") + check_installed_cmd = params.get("check_installed_cmd") + + sessions = [] + vms = env.get_all_vms() + for vm in vms: + vm.verify_alive() + sessions.append(vm.wait_for_login()) + + mapping = {} + for vm, session in zip(params.objects('vms'), sessions): + vm_params = params.object_params(vm) + mapping[vm] = {'session': session, 'filesystems': []} + + # check driver verifier in windows vm + # install winfsp tool and start virtiofs exe in windows vm + if os_type == "windows": + # Check whether windows driver is running,and enable driver verifier + session = utils_test.qemu.windrv_check_running_verifier(session, + vm, test, + driver_name) + error_context.context("%s: Install winfsp for windows guest." % vm, + logging.info) + installed = session.cmd_status(check_installed_cmd) == 0 + if installed: + logging.info("%s: Winfsp tool is already installed." % vm) + else: + install_cmd = utils_misc.set_winutils_letter(session, + wfsp_install_cmd) + session.cmd(install_cmd, cmd_timeout) + if not utils_misc.wait_for(lambda: not session.cmd_status( + check_installed_cmd), 60): + test.error("%s: Winfsp tool is not installed." % vm) + + error_context.context("%s: Start virtiofs service in guest." % vm, + logging.info) + exe_path = get_viofs_exe(session) + start_vfs_cmd = params["start_vfs_cmd"] % exe_path + session.sendline(start_vfs_cmd) + + error_context.context("%s: Check if virtiofs service is started." + % vm, logging.info) + check_virtiofs_cmd = params["check_virtiofs_cmd"] + + if not utils_misc.wait_for(lambda: re.search("virtiofs", + session.cmd_output( + check_virtiofs_cmd), + re.IGNORECASE), 30): + test.fail("%s: Virtiofs service is failed to start." % vm) + + # get fs dest for vm + for fs in vm_params.objects('filesystems'): + fs_params = vm_params.object_params(fs) + fs_target = fs_params.get("fs_target") + fs_dest = fs_params.get("fs_dest") + + if os_type == "linux": + error_context.context( + "%s: Create a destination directory %s inside guest." % + (vm, fs_dest), logging.info) + utils_misc.make_dirs(fs_dest, session) + + error_context.context( + "%s: Mount the virtiofs target %s to %s inside guest." % + (vm, fs_target, fs_dest), logging.info) + utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session) + else: + virtio_fs_disk_label = fs_target + error_context.context("%s: Get Volume letter of virtio fs" + " target, the disk lable is %s." % + (vm, virtio_fs_disk_label), logging.info) + vol_con = "VolumeName='%s'" % virtio_fs_disk_label + vol_func = utils_misc.get_win_disk_vol(session, + condition=vol_con) + volume_letter = utils_misc.wait_for(lambda: vol_func, + cmd_timeout) + fs_dest = "%s:" % volume_letter + + guest_file = os.path.join(fs_dest, 'fs_test') + logging.info("%s: The guest file in shared dir is %s" % + (vm, guest_file)) + mapping[vm]['filesystems'].append({'fs_target': fs_target, + 'fs_dest': fs_dest, + 'guest_file': guest_file}) + + if cmd_dd: + logging.info("%s: Creating file under %s inside guest." % + (vm, fs_dest)) + session.cmd(cmd_dd % guest_file, io_timeout) + + if shared_fs_source_dir: + continue + + if os_type == "linux": + error_context.context("%s: Umount the viriofs target %s." % + (vm, fs_target), logging.info) + utils_disk.umount(fs_target, fs_dest, 'virtiofs', + session=session) + + if shared_fs_source_dir: + error_context.context("Compare the md5 among VMs.", logging.info) + + md5_set = set() + for vm, info in mapping.items(): + session = info['session'] + for fs in info['filesystems']: + shared_data = fs['guest_file'] + error_context.context("%s: Get the md5 of %s." % + (vm, shared_data), logging.info) + if os_type == "linux": + cmd_md5_vm = cmd_md5 % shared_data + else: + guest_file_win = shared_data.replace("/", "\\") + cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) + + md5_guest = session.cmd(cmd_md5_vm, + io_timeout).strip().split()[0] + logging.info(md5_guest) + md5_set.add(md5_guest) + + if os_type == "linux": + error_context.context("%s: Umount the viriofs target %s." % + (vm, fs['fs_target']), logging.info) + utils_disk.umount(fs['fs_target'], fs['fs_dest'], + 'virtiofs', session=session) + if len(md5_set) != 1: + test.fail('The md5 values are different among VMs.') diff --git a/qemu/tests/virtio_fs_readonly.py b/qemu/tests/virtio_fs_readonly.py new file mode 100644 index 0000000000000000000000000000000000000000..c15fccc8fac25bfca3dd4969ca7e7cda0c770119 --- /dev/null +++ b/qemu/tests/virtio_fs_readonly.py @@ -0,0 +1,47 @@ +import logging + +from virttest import error_context +from virttest import utils_misc +from virttest import utils_disk + + +@error_context.context_aware +def run(test, params, env): + """ + Test virtio-fs with mounting by read-only options. + Steps: + 1. Create a shared directory for testing on the host. + 2. Run the virtiofsd daemon on the host. + 3. Boot a guest on the host. + 4. Log into guest then mount the virtiofs with option "-o ro". + 5. Generate a file on the mount point in guest. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + fs_target = params.get('fs_target') + fs_dest = params.get('fs_dest') + + vm = env.get_vm(params.get("main_vm")) + vm.verify_alive() + session = vm.wait_for_login() + + error_context.context("Create a destination directory inside guest.", + logging.info) + utils_misc.make_dirs(fs_dest, session) + + error_context.context("Mount the virtiofs target with read-only to " + "the destination directory inside guest.", logging.info) + utils_disk.mount(fs_target, fs_dest, 'virtiofs', 'ro', session=session) + + try: + error_context.context("Create file under the destination " + "directory inside guest.", logging.info) + output = session.cmd_output(params.get('cmd_create_file')) + logging.info(output) + if params.get('check_str') not in output: + test.fail('Failed to mount the virtiofs target with read-only.') + finally: + utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_misc.safe_rmdir(fs_dest, session=session) diff --git a/qemu/tests/virtio_fs_share_data.py b/qemu/tests/virtio_fs_share_data.py new file mode 100644 index 0000000000000000000000000000000000000000..02eef043a70aa37acd9cdee778d245bbfcc77072 --- /dev/null +++ b/qemu/tests/virtio_fs_share_data.py @@ -0,0 +1,206 @@ +import logging +import os +import re + +from avocado.utils import process + +from virttest import data_dir +from virttest import error_context +from virttest import utils_disk +from virttest import utils_misc +from virttest import utils_test +from virttest.remote import scp_to_remote +from virttest.utils_windows import virtio_win + +from provider.storage_benchmark import generate_instance + + +@error_context.context_aware +def run(test, params, env): + """ + Test virtio-fs by sharing the data between host and guest. + Steps: + 1. Create shared directories on the host. + 2. Run virtiofsd daemons on the host. + 3. Boot a guest on the host with virtiofs options. + 4. Log into guest then mount the virtiofs targets. + 5. Generate files or run stress on the mount points inside guest. + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def get_viofs_exe(session): + """ + Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 + """ + media_type = params["virtio_win_media_type"] + try: + get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) + get_product_dirname = getattr(virtio_win, + "product_dirname_%s" % media_type) + get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + except AttributeError: + test.error("Not supported virtio win media type '%s'", media_type) + viowin_ltr = get_drive_letter(session) + if not viowin_ltr: + test.error("Could not find virtio-win drive in guest") + guest_name = get_product_dirname(session) + if not guest_name: + test.error("Could not get product dirname of the vm") + guest_arch = get_arch_dirname(session) + if not guest_arch: + test.error("Could not get architecture dirname of the vm") + + exe_middle_path = ("{name}\\{arch}" if media_type == "iso" + else "{arch}\\{name}").format(name=guest_name, + arch=guest_arch) + exe_file_name = "virtiofs.exe" + exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' + exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) + exe_path = session.cmd(exe_find_cmd).strip() + logging.info("Found exe file '%s'", exe_path) + return exe_path + + # data io config + cmd_dd = params.get('cmd_dd') + cmd_md5 = params.get('cmd_md5') + + # pjdfs test config + cmd_pjdfstest = params.get('cmd_pjdfstest') + cmd_unpack = params.get('cmd_unpack') + cmd_yum_deps = params.get('cmd_yum_deps') + cmd_autoreconf = params.get('cmd_autoreconf') + cmd_configure = params.get('cmd_configure') + cmd_make = params.get('cmd_make') + pjdfstest_pkg = params.get('pjdfstest_pkg') + username = params.get('username') + password = params.get('password') + port = params.get('file_transfer_port') + + # fio config + fio_options = params.get('fio_options') + io_timeout = params.get_numeric('io_timeout') + + os_type = params.get("os_type") + vm = env.get_vm(params.get("main_vm")) + vm.verify_alive() + session = vm.wait_for_login() + host_addr = vm.get_address() + + if os_type == "windows": + cmd_timeout = params.get_numeric("cmd_timeout", 120) + driver_name = params["driver_name"] + install_path = params["install_path"] + check_installed_cmd = params["check_installed_cmd"] % install_path + + # Check whether windows driver is running,and enable driver verifier + session = utils_test.qemu.windrv_check_running_verifier(session, + vm, test, + driver_name) + # install winfsp tool + error_context.context("Install winfsp for windows guest.", + logging.info) + installed = session.cmd_status(check_installed_cmd) == 0 + if installed: + logging.info("Winfsp tool is already installed.") + else: + install_cmd = utils_misc.set_winutils_letter(session, + params["install_cmd"]) + session.cmd(install_cmd, cmd_timeout) + if not utils_misc.wait_for(lambda: not session.cmd_status( + check_installed_cmd), 60): + test.error("Winfsp tool is not installed.") + + for fs in params.objects("filesystems"): + fs_params = params.object_params(fs) + fs_target = fs_params.get("fs_target") + fs_dest = fs_params.get("fs_dest") + + fs_source = fs_params.get("fs_source_dir") + base_dir = fs_params.get("fs_source_base_dir", + data_dir.get_data_dir()) + if not os.path.isabs(fs_source): + fs_source = os.path.join(base_dir, fs_source) + + host_data = os.path.join(fs_source, 'fs_test') + + if os_type == "linux": + error_context.context("Create a destination directory %s " + "inside guest." % fs_dest, logging.info) + utils_misc.make_dirs(fs_dest, session) + + error_context.context("Mount virtiofs target %s to %s inside" + " guest." % (fs_target, fs_dest), + logging.info) + utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session) + + else: + error_context.context("Start virtiofs service in guest.", logging.info) + exe_path = get_viofs_exe(session) + start_vfs_cmd = params["start_vfs_cmd"] % exe_path + session.sendline(start_vfs_cmd) + + error_context.context("Check if virtiofs service is started.", + logging.info) + check_virtiofs_cmd = params["check_virtiofs_cmd"] + + if not utils_misc.wait_for(lambda: re.search("virtiofs", + session.cmd_output( + check_virtiofs_cmd), + re.IGNORECASE), 30): + test.fail("Virtiofs service is failed to start.") + + virtio_fs_disk_label = fs_target + error_context.context("Get Volume letter of virtio fs target, the disk" + "lable is %s." % virtio_fs_disk_label, + logging.info) + vol_con = "VolumeName='%s'" % virtio_fs_disk_label + vol_func = utils_misc.get_win_disk_vol(session, condition=vol_con) + volume_letter = utils_misc.wait_for(lambda: vol_func, cmd_timeout) + fs_dest = "%s:" % volume_letter + + guest_file = os.path.join(fs_dest, 'fs_test') + logging.info("The guest file in shared dir is %s" % guest_file) + + try: + if cmd_dd: + logging.info("Creating file under %s inside guest." % fs_dest) + session.cmd(cmd_dd % guest_file, io_timeout) + + if os_type == "linux": + cmd_md5_vm = cmd_md5 % guest_file + else: + guest_file_win = guest_file.replace("/", "\\") + cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) + md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + + logging.info(md5_guest) + md5_host = process.run("md5sum %s" % host_data, + io_timeout).stdout_text.strip().split()[0] + if md5_guest != md5_host: + test.fail('The md5 value of host is not same to guest.') + + if fio_options: + error_context.context("Run fio on %s." % fs_dest, logging.info) + fio = generate_instance(params, vm, 'fio') + try: + fio.run(fio_options % guest_file, io_timeout) + finally: + fio.clean() + vm.verify_dmesg() + + if cmd_pjdfstest: + error_context.context("Run pjdfstest on %s." % fs_dest, logging.info) + host_path = os.path.join(data_dir.get_deps_dir('pjdfstest'), pjdfstest_pkg) + scp_to_remote(host_addr, port, username, password, host_path, fs_dest) + session.cmd(cmd_unpack.format(fs_dest), 180) + session.cmd(cmd_yum_deps, 180) + session.cmd(cmd_autoreconf % fs_dest, 180) + session.cmd(cmd_configure.format(fs_dest), 180) + session.cmd(cmd_make % fs_dest, io_timeout) + session.cmd(cmd_pjdfstest % fs_dest, io_timeout) + finally: + if os_type == "linux": + utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_misc.safe_rmdir(fs_dest, session=session) diff --git a/qemu/tests/virtio_net_dpdk.py b/qemu/tests/virtio_net_dpdk.py new file mode 100644 index 0000000000000000000000000000000000000000..efaef459acaf9beeb89647894dda40d2d5732bcd --- /dev/null +++ b/qemu/tests/virtio_net_dpdk.py @@ -0,0 +1,346 @@ +import os +import logging +import threading +import six +import time + +from avocado.utils import process + +from virttest import error_context +from virttest import virt_vm +from virttest import remote +from virttest import data_dir +from virttest import utils_misc +from virttest import utils_test + + +def format_result(result, base="12", fbase="2"): + """ + Format the result to a fixed length string. + + :param result: result need to convert + :param base: the length of converted string + :param fbase: the decimal digit for float + """ + if isinstance(result, six.string_types): + value = "%" + base + "s" + elif isinstance(result, int): + value = "%" + base + "d" + elif isinstance(result, float): + value = "%" + base + "." + fbase + "f" + return value % result + + +@error_context.context_aware +def run(test, params, env): + """ + Virtio with qemu vhost backend with dpdk + + 1) Boot up VM and reboot VM with 1G hugepages and iommu enabled + 2) Install dpdk realted packages + 3) Bind two nics to vfio-pci on VM + 4) Install and start Moongen on external host + 5) Start testpmd on VM, collect and analyze the results + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def _pin_vm_threads(node): + """ + pin guest vcpu and vhost threads to cpus of a numa node repectively + + :param node: which numa node to pin + """ + if node: + if not isinstance(node, utils_misc.NumaNode): + node = utils_misc.NumaNode(int(node)) + utils_test.qemu.pin_vm_threads(vm, node) + + def install_dpdk(): + """ Install dpdk realted packages""" + + cmd = 'yum install -y %s' % params.get("env_pkg") + session.cmd(cmd, timeout=360, ignore_all_errors=True) + session.cmd_output('rpm -qa |grep dpdk') + + def env_setup(): + """ + Prepare the test environment + 1) Set 1G hugepages and iommu enabled + 2) Copy testpmd script to guest + + """ + error_context.context("Setup env for guest") + + # setup hugepages + session.cmd(params.get("env_hugepages_cmd"), ignore_all_errors=True) + + # install dpdk related packages + install_dpdk() + + # install python pexpect + session.cmd("`command -v pip pip3` install pexpect", ignore_all_errors=True) + + # copy testpmd script to guest + testpmd_exec = params.get("testpmd_exec") + src = os.path.join(data_dir.get_deps_dir(), + "performance/%s" % testpmd_exec) + dst = "/tmp/%s" % testpmd_exec + vm.copy_files_to(src, dst, nic_index=0) + + return dst + + def dpdk_devbind(): + """ + + bind two nics to vfio-pci + return nic1 and nic2's pci + """ + + error_context.context("bind two nics to vfio-pci") + cmd = "modprobe vfio" + cmd += " && modprobe vfio-pci" + session.cmd(cmd, timeout=360, ignore_all_errors=True) + session.cmd_output("lspci|grep Eth") + cmd_nic_pci = "lspci |awk '/%s/ {print $1}'" % params.get("nic_driver") + nic_driver = params.get("nic_driver").split() + if len(nic_driver) > 1: + for i in nic_driver: + if i == "Virtio": + nic_pci_1 = "0000:%s" % session.cmd( + "lspci |awk '/%s network/ {print $1}'" % i).strip() + cmd_str = "dpdk-devbind --bind=vfio-pci %s" % nic_pci_1 + else: + nic_pci_2 = "0000:%s" % session.cmd( + "lspci |awk '/%s/ {print $1}'" % i).strip() + cmd_str = "dpdk-devbind --bind=vfio-pci %s" % nic_pci_2 + session.cmd_output(cmd_str) + session.cmd_output('dpdk-devbind --status') + return nic_pci_1, nic_pci_2 + + def install_moongen(session, ip, user, port, password): + """ + + Install moogen on remote moongen host + + """ + + # copy MoonGen.zip to remote moongen host + moongen_pkg = params.get("moongen_pkg") + local_path = os.path.join( + data_dir.get_deps_dir(), "performance/%s" % moongen_pkg) + remote.scp_to_remote(ip, shell_port, username, + password, local_path, "/home") + + # install moongen + cmd_str = "rm -rf /home/MoonGen" + cmd_str += " && unzip /home/%s -d /home" % params.get("moongen_pkg") + cmd_str += " && cd /home/MoonGen && ./build.sh" + if session.cmd_status(cmd_str, timeout=300) != 0: + test.error("Fail to install program on monngen host") + + # set hugepages + session.cmd(params.get("generator_hugepages_cmd"), ignore_all_errors=True) + + # probe vfio and vfip-pci + cmd_probe = "modprobe vfio; modprobe vfio-pci" + session.cmd_status(cmd_probe, timeout=300) + + # bind nic + moongen_dpdk_nic = params.get("moongen_dpdk_nic").split() + for i in list(moongen_dpdk_nic): + cmd_bind = "dpdk-devbind --bind=vfio-pci %s" % i + if session.cmd_status(cmd_bind) != 0: + test.error("Fail to bind nic %s on monngen host" % i) + + def result(recode, dst): + + if os.path.getsize(dst) > 0: + + cmd = "grep -i %s %s | tail -2 | awk -F ':' '{print $2}' | head -1"\ + "| awk '{print $1}'" % (recode, dst) + pps_results = process.system_output(cmd, shell=True) + power = 10**6 + mpps_results = float(pps_results) / float(power) + pps_results = "%.2f" % mpps_results + else: + test.error("the content of /tmp/testpmd.log is empty") + + return mpps_results + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + login_timeout = int(params.get("login_timeout", 360)) + + try: + vm.wait_for_serial_login( + timeout=login_timeout, restart_network=True).close() + except virt_vm.VMIPAddressMissingError: + pass + + # print numa information on host and pinning vhost and vcpus to cpus + process.system_output("numactl --hardware") + process.system_output("numactl --show") + _pin_vm_threads(params.get("numa_node")) + error_context.context("Prepare env of vm/generator host", logging.info) + + session = vm.wait_for_login(nic_index=0, timeout=login_timeout) + + guest_ip = vm.wait_for_get_address(0, timeout=90) + macvtap_mac = vm.get_mac_address(1) + vfio_mac = vm.get_mac_address(2) + + # get parameter from dictionary + category = params.get("category") + pkt_size = params.get("pkt_size") + kvm_ver_chk_cmd = params.get("kvm_ver_chk_cmd") + guest_ver_cmd = params["guest_ver_cmd"] + guest_dpdk_cmd = params["guest_dpdk_cmd"] + record_list = params["record_list"] + + # get record_list + record_line = "" + for record in record_list.split(): + record_line += "%s|" % format_result(record) + + # setup env and bind nics to vfio-pci in guest + + exec_file = env_setup() + nic_pci_1, nic_pci_2 = dpdk_devbind() + + # setup env on moongen host + generator_ip = params.get("generator") + shell_port = params.get("shell_port_generator") + password = params.get("password_generator") + username = params.get("username_generator") + generator1 = remote.wait_for_login(params.get("shell_client_generator"), + generator_ip, + shell_port, + username, + password, + params.get("shell_prompt_generator")) + generator2 = remote.wait_for_login(params.get("shell_client_generator"), + generator_ip, + shell_port, + username, + password, + params.get("shell_prompt_generator")) + install_moongen(generator1, generator_ip, username, shell_port, password) + + # get qemu, guest kernel, kvm version and dpdk version and write them into result + result_path = utils_misc.get_path(test.resultsdir, "virtio_net_dpdk.RHS") + result_file = open(result_path, "w") + kvm_ver = process.system_output(kvm_ver_chk_cmd, shell=True).decode() + host_ver = os.uname()[2] + guest_ver = session.cmd_output(guest_ver_cmd) + dpdk_ver = session.cmd_output(guest_dpdk_cmd) + result_file.write("### kvm-userspace-ver : %s" % kvm_ver) + result_file.write("### kvm_version : %s" % host_ver) + result_file.write("### guest-kernel-ver :%s" % guest_ver) + result_file.write("### guest-dpdk-ver :%s" % dpdk_ver) + + # get result tested by each scenario + for pkt_cate in category.split(): + result_file.write("Category:%s\n" % pkt_cate) + result_file.write("%s\n" % record_line.rstrip("|")) + nic1_driver = params.get("nic1_dpdk_driver") + nic2_driver = params.get("nic2_dpdk_driver") + cores = params.get("vcpu_sockets") + queues = params.get("testpmd_queues") + running_time = int(params.get("testpmd_running_time")) + size = 60 + + if pkt_cate == "rx": + error_context.context("test guest rx pps performance", + logging.info) + port = 1 + record = "Rx-pps" + mac = vm.get_mac_address(1) + if pkt_cate == "tx": + error_context.context("test guest tx pps performance", + logging.info) + port = 0 + record = "Tx-pps" + mac = vm.get_mac_address(2) + + status = launch_test(session, generator1, generator2, + mac, port, exec_file, + nic1_driver, nic2_driver, + nic_pci_1, nic_pci_2, + cores, queues, running_time) + if status is True: + error_context.context("%s test is finished" % + pkt_cate, logging.info) + else: + test.fail("test is failed, please check your command and env") + + dst = utils_misc.get_path(test.resultsdir, "testpmd.%s" % pkt_cate) + vm.copy_files_from("/tmp/testpmd.log", dst) + + pkt_cate_r = result("%s-pps" % pkt_cate, dst) + line = "%s|" % format_result(size) + line += "%s" % format_result(pkt_cate_r) + result_file.write(("%s\n" % line)) + + generator1.close() + generator2.close() + session.close() + + +@error_context.context_aware +def launch_test(session, generator1, generator2, + mac, port_id, exec_file, + nic1_driver, nic2_driver, + nic_pci_1, nic_pci_2, + cores, queues, running_time): + """ Launch MoonGen """ + + def start_moongen(generator1, mac, port_id, running_time): + + file = '/home/MoonGen/examples/udp-throughput.lua' + cmd = "pkill MoonGen ; rm -rf /tmp/throughput.log ; sleep 3" + cmd += r" && \cp %s %s.tmp" % (file, file) + tmp_file = "%s.tmp" % file + cmd += " && sed -i 's/10:11:12:13:14:15/%s/g' %s" % (mac, tmp_file) + cmd += " && cd /home/MoonGen "\ + " && ./build/MoonGen %s %s > /tmp/throughput.log &" % ( + tmp_file, port_id) + generator1.cmd_output(cmd) + + def run_moongen_up(generator2): + + cmd = 'grep "1 devices are up" /tmp/throughput.log' + if generator2.cmd_status(cmd) == 0: + return True + else: + return False + + def start_testpmd(session, exec_file, nic1_driver, nic2_driver, + nic1_pci_1, nic2_pci_2, cores, queues, running_time): + """ Start testpmd on VM """ + + cmd = "`command -v python python3` " + cmd += " %s %s %s %s %s %s %s %s > /tmp/testpmd.log" % ( + exec_file, nic1_driver, nic2_driver, + nic_pci_1, nic_pci_2, cores, queues, running_time) + session.cmd_output(cmd) + + moongen_thread = threading.Thread( + target=start_moongen, args=(generator1, mac, port_id, running_time)) + moongen_thread.start() + + if utils_misc.wait_for(lambda: run_moongen_up(generator2), 30, + text="Wait until devices is up to work"): + logging.debug("MoonGen start to work") + testpmd_thread = threading.Thread(target=start_testpmd, args=( + session, exec_file, nic1_driver, nic2_driver, + nic_pci_1, nic_pci_2, cores, queues, running_time)) + time.sleep(3) + testpmd_thread.start() + testpmd_thread.join() + moongen_thread.join() + return True + else: + return False diff --git a/qemu/tests/virtio_port_hotplug.py b/qemu/tests/virtio_port_hotplug.py index ab52edac7b6903819c746f843fa25aa178ccac56..913c721183971bb3eb7ee17bd4b0d71b6aa14145 100644 --- a/qemu/tests/virtio_port_hotplug.py +++ b/qemu/tests/virtio_port_hotplug.py @@ -2,9 +2,10 @@ import time import logging from avocado.utils import process +from virttest import utils_test from virttest import error_context -from qemu.tests.virtio_serial_file_transfer import transfer_data from qemu.tests.vioser_in_use import run_bg_test +from qemu.tests.virtio_serial_file_transfer import transfer_data @error_context.context_aware @@ -13,7 +14,7 @@ def run(test, params, env): Test hot unplug virtio serial devices. 1) Start guest with virtio serial device(s). - 2) Run serial data trainsfer in background(windows only) + 2) Run serial data trainsfer in background 3) Load module in guest os(linux only). 4) For each of the virtio serial ports, do following steps one by one: 4.1) Unload module in guest(linux only) @@ -28,35 +29,48 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - vm = env.get_vm(params["main_vm"]) vm.verify_alive() + os_type = params["os_type"] timeout = int(params.get("login_timeout", 360)) - if params["os_type"] == "windows": - run_bg_test(test, params, vm) - for repeat in range(int(params.get("repeat_times", 1))): - repeat += 1 + module = params.get("modprobe_module") + check_module = params.get_boolean("check_module", True) + bg_test = params.get_boolean("bg_test", True) + session = vm.wait_for_login() + if os_type == "windows": + driver_name = params["driver_name"] + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name) + if module and check_module: + error_context.context("Load module %s" % module, logging.info) + session.cmd("modprobe %s" % module) + time.sleep(1) + session.close() + + for port in params.objects("serials"): session = vm.wait_for_login(timeout=timeout) - module = params.get("modprobe_module") - if module: - error_context.context("Load module %s" % module, logging.info) - session.cmd("modprobe %s" % module) - for port in params.objects("serials"): - port_params = params.object_params(port) - if not port_params['serial_type'].startswith('virt'): - continue - virtio_port = vm.devices.get(port) - if not virtio_port: - test.fail("Virtio Port '%s' not found" % port) - chardev_qid = virtio_port.get_param("chardev") - try: - port_chardev = vm.devices.get_by_qid(chardev_qid)[0] - except IndexError: - test.error("Failed to get device %s" % chardev_qid) - if module: + port_params = params.object_params(port) + if not port_params['serial_type'].startswith('virt'): + continue + virtio_port = vm.devices.get(port) + if not virtio_port: + test.fail("Virtio Port '%s' not found" % port) + chardev_qid = virtio_port.get_param("chardev") + try: + port_chardev = vm.devices.get_by_qid(chardev_qid)[0] + except IndexError: + test.error("Failed to get device %s" % chardev_qid) + if port_params['serial_type'] == 'virtserialport': + params['file_transfer_serial_port'] = port + if bg_test: + run_bg_test(test, params, vm) + for repeat in range(params.get_numeric("repeat_times", 1)): + repeat += 1 + if module and check_module: error_context.context("Unload module %s" % module, logging.info) session.cmd("modprobe -r %s" % module) + time.sleep(1) error_context.context("Unplug virtio port '%s' in %d tune(s)" % (port, repeat), logging.info) vm.devices.simple_unplug(virtio_port, vm.monitor) @@ -68,20 +82,21 @@ def run(test, params, env): time.sleep(0.5) vm.devices.simple_hotplug(port_chardev, vm.monitor) vm.devices.simple_hotplug(virtio_port, vm.monitor) - if module: + if module and check_module: error_context.context("Load module %s" % module, logging.info) session.cmd("modprobe %s" % module) + time.sleep(1) session.close() - host_script = params['host_script'] - check_pid_cmd = 'pgrep -f %s' % host_script - host_proc_pid = process.getoutput(check_pid_cmd, shell=True) - if host_proc_pid: - logging.info("Kill the first serial process on host") - result = process.system('kill -9 %s' % host_proc_pid, shell=True) - if result != 0: - logging.error("Failed to kill the first serial process on host!") - if transfer_data(params, vm) is not True: - test.fail("Serial data transfter test failed.") + host_script = params['host_script'] + check_pid_cmd = 'pgrep -f %s' + host_proc_pid = process.getoutput(check_pid_cmd % host_script, shell=True) + if host_proc_pid: + logging.info("Kill the first serial process on host") + result = process.system('kill -9 %s' % host_proc_pid, shell=True) + if result != 0: + logging.error("Failed to kill the first serial process on host!") + if transfer_data(params, vm) is not True: + test.fail("Serial data transfter test failed.") vm.reboot() vm.verify_kernel_crash() session = vm.wait_for_login(timeout=timeout) diff --git a/qemu/tests/virtio_serial_file_transfer.py b/qemu/tests/virtio_serial_file_transfer.py index 2c7518e0e178d7908ed3148722c5126904e565ba..52975eb50a7800e51bb20419e47e86ced0b8fb6a 100644 --- a/qemu/tests/virtio_serial_file_transfer.py +++ b/qemu/tests/virtio_serial_file_transfer.py @@ -171,44 +171,46 @@ def transfer_data(params, vm, host_file_name=None, guest_file_name=None, """ session = vm.wait_for_login() os_type = params["os_type"] - guest_path = params.get("guest_script_folder", "C:\\") - guest_scripts = params.get("guest_scripts", - "VirtIoChannel_guest_send_receive.py") - copy_scripts(guest_scripts, guest_path, vm) - port_name = params["file_transfer_serial_port"] - port_type, port_path = get_virtio_port_property(vm, port_name) - file_size = int(params.get("filesize", 10)) - transfer_timeout = int(params.get("transfer_timeout", 720)) - host_dir = data_dir.get_tmp_dir() - guest_dir = params.get("tmp_dir", '/var/tmp/') - host_file_size, guest_file_size, host_action, guest_action\ - = get_command_options(sender, file_size) - if not host_file_name: - host_file_name = generate_data_file(host_dir, host_file_size) - if not guest_file_name: - guest_file_name = generate_data_file( - guest_dir, guest_file_size, session) - host_script = params.get("host_script", "serial_host_send_receive.py") - host_script = os.path.join(data_dir.get_root_dir(), "shared", "deps", - "serial", host_script) - python_bin = '`command -v python python3 | head -1`' - host_cmd = ("%s %s -t %s -s %s -f %s -a %s" % - (python_bin, host_script, port_type, port_path, - host_file_name, host_action)) - guest_script = os.path.join(guest_path, params['guest_script']) - python_bin = params.get('python_bin', python_bin) - guest_cmd = ("%s %s -d %s -f %s -a %s" % - (python_bin, guest_script, - port_name, guest_file_name, guest_action)) - result = _transfer_data( - session, host_cmd, guest_cmd, transfer_timeout, sender) - if os_type == "windows": - guest_file_name = guest_file_name.replace("/", "\\") - if clean_file: - clean_cmd = params['clean_cmd'] - os.remove(host_file_name) - session.cmd('%s %s' % (clean_cmd, guest_file_name)) - session.close() + try: + guest_path = params.get("guest_script_folder", "C:\\") + guest_scripts = params.get("guest_scripts", + "VirtIoChannel_guest_send_receive.py") + copy_scripts(guest_scripts, guest_path, vm) + port_name = params["file_transfer_serial_port"] + port_type, port_path = get_virtio_port_property(vm, port_name) + file_size = int(params.get("filesize", 10)) + transfer_timeout = int(params.get("transfer_timeout", 720)) + host_dir = data_dir.get_tmp_dir() + guest_dir = params.get("tmp_dir", '/var/tmp/') + host_file_size, guest_file_size, host_action, guest_action \ + = get_command_options(sender, file_size) + if not host_file_name: + host_file_name = generate_data_file(host_dir, host_file_size) + if not guest_file_name: + guest_file_name = generate_data_file( + guest_dir, guest_file_size, session) + host_script = params.get("host_script", "serial_host_send_receive.py") + host_script = os.path.join(data_dir.get_root_dir(), "shared", "deps", + "serial", host_script) + python_bin = '`command -v python python3 | head -1`' + host_cmd = ("%s %s -t %s -s %s -f %s -a %s" % + (python_bin, host_script, port_type, port_path, + host_file_name, host_action)) + guest_script = os.path.join(guest_path, params['guest_script']) + python_bin = params.get('python_bin', python_bin) + guest_cmd = ("%s %s -d %s -f %s -a %s" % + (python_bin, guest_script, + port_name, guest_file_name, guest_action)) + result = _transfer_data( + session, host_cmd, guest_cmd, transfer_timeout, sender) + finally: + if os_type == "windows": + guest_file_name = guest_file_name.replace("/", "\\") + if clean_file: + clean_cmd = params['clean_cmd'] + os.remove(host_file_name) + session.cmd('%s %s' % (clean_cmd, guest_file_name)) + session.close() return result diff --git a/qemu/tests/virtio_serial_hotplug_existed_port_pci.py b/qemu/tests/virtio_serial_hotplug_existed_port_pci.py new file mode 100644 index 0000000000000000000000000000000000000000..360d55498fcf1e3c106de80b575c0d2c6097ec54 --- /dev/null +++ b/qemu/tests/virtio_serial_hotplug_existed_port_pci.py @@ -0,0 +1,41 @@ +from virttest import error_context +from virttest import utils_test +from virttest import qemu_monitor + + +@error_context.context_aware +def run(test, params, env): + """ + Add existed virtio serial port and serial bus + 1) Start guest with virtio-serial-port and virtio-serial-pci + 2) Hot-plug existed virtio-serial-port + 3) Hot plug existed virtio-serial-pci + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session = vm.wait_for_login() + if params['os_type'] == 'windows': + utils_test.qemu.windrv_check_running_verifier( + session, vm, test, 'vioser', 300) + session.close() + port = params.objects('serials')[1] + virtio_port = vm.devices.get(port) + pci_dev_id = virtio_port.params['bus'].split('.')[0] + pci_dev = vm.devices.get(pci_dev_id) + try: + virtio_port.hotplug(vm.monitor) + except qemu_monitor.QMPCmdError as e: + if 'Duplicate' not in e.data['desc']: + test.fail(e.data['desc']) + else: + test.fail('hotplugg virtserialport device should be failed') + try: + pci_dev.hotplug(vm.monitor) + except qemu_monitor.QMPCmdError as e: + if 'Duplicate' not in e.data['desc']: + test.fail(e.data['desc']) + else: + test.fail('hotplugg virtio-serial-pci device should be failed') diff --git a/qemu/tests/virtio_serial_hotplug_max_chardevs.py b/qemu/tests/virtio_serial_hotplug_max_chardevs.py new file mode 100644 index 0000000000000000000000000000000000000000..1a902c11d3a5df691780ec72c90a810858b3c5c0 --- /dev/null +++ b/qemu/tests/virtio_serial_hotplug_max_chardevs.py @@ -0,0 +1,128 @@ +import logging +import time + +from virttest import error_context +from virttest import utils_misc +from virttest import utils_test +from virttest.qemu_monitor import QMPCmdError + +from qemu.tests import driver_in_use +from qemu.tests.virtio_console import add_chardev +from qemu.tests.virtio_console import add_virtio_ports_to_vm +from qemu.tests.virtio_serial_file_transfer import transfer_data +from qemu.tests.virtio_serial_hotplug_port_pci import get_buses_and_serial_devices + + +@error_context.context_aware +def run(test, params, env): + """ + Hot-plug max chardevs on one virtio-serial-pci + + 1. Boot a guest without any device + 2. Hotplug virtio-serial-pci + 3. Hotplug 31 chardevs + 4. Hotadd 30 virtserialports attached every one chardev + 5. Transfer data between guest and host via all ports + 6. Hotplug one existed chardev + 7. Hotplug one existed virtserialport + 8. Hot-unplug virtserialport + 9. Hot-unplug chardev + + :param test: kvm test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment + """ + def run_serial_data_transfer(): + """ + Transfer data via every virtserialport. + """ + for serial_port in serials: + port_params = params.object_params(serial_port) + if not port_params['serial_type'].startswith('virtserial'): + continue + logging.info("transfer data with port %s" % serial_port) + params['file_transfer_serial_port'] = serial_port + transfer_data(params, vm, sender='both') + + def run_bg_test(): + """ + Set the operation of transferring data as background + :return: return the background case thread if it's successful; + else raise error + """ + stress_thread = utils_misc.InterruptedThread(run_serial_data_transfer) + stress_thread.start() + if not utils_misc.wait_for(lambda: driver_in_use.check_bg_running( + vm, params), check_bg_timeout, 0, 1): + test.fail("Backgroud test is not alive!") + return stress_thread + + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + os_type = params["os_type"] + check_bg_timeout = float(params.get('check_bg_timeout', 120)) + num_chardev = int(params.get("numberic_chardev")) + num_serial_ports = int(params.get("virtio_serial_ports")) + sleep_time = float(params.get('sleep_time', 0.5)) + for i in range(1, num_chardev): + params["extra_chardevs"] += ' channel%d' % i + serial_name = 'port%d' % (i-1) + params['extra_serials'] = '%s %s' % (params.get('extra_serials', ''), + serial_name) + params['serial_type_%s' % serial_name] = "virtserialport" + char_devices = add_chardev(vm, params) + serials = params.objects('extra_serials') + buses, serial_devices = get_buses_and_serial_devices( + vm, params, char_devices, serials) + vm.devices.simple_hotplug(buses[0], vm.monitor) + for i in range(0, num_chardev): + vm.devices.simple_hotplug(char_devices[i], vm.monitor) + if i < num_serial_ports: + vm.devices.simple_hotplug(serial_devices[i], vm.monitor) + time.sleep(sleep_time) + for device in serial_devices: + add_virtio_ports_to_vm(vm, params, device) + if os_type == "windows": + driver_name = params["driver_name"] + session = vm.wait_for_login() + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name) + thread_transfer = run_bg_test() + + error_context.context("hotplug existed virtserialport and chardev", + logging.info) + try: + serial_devices[0].hotplug(vm.monitor) + except QMPCmdError as e: + if "Duplicate ID '%s' for device" % serial_devices[0] not in str( + e.data): + msg = ("Should fail to hotplug device %s with error Duplicate" + % serial_devices[0]) + test.fail(msg) + else: + msg = ("The device %s shoudn't be hotplugged successfully" + % serial_devices[0]) + test.fail(msg) + + try: + char_devices[0].hotplug(vm.monitor) + except QMPCmdError as e: + if "attempt to add duplicate property '%s'" % char_devices[0] \ + not in str(e.data): + msg = ("Should fail to hotplug device %s with error Duplicate" + % char_devices[0]) + test.fail(msg) + else: + msg = ("The device %s shoudn't be hotplugged successfully" + % char_devices[0]) + test.fail(msg) + + thread_transfer.join() + if not thread_transfer.is_alive(): + error_context.context("hot-unplug all virtserialport and chardev", + logging.info) + for i in range(0, num_chardev): + if i < num_serial_ports: + vm.devices.simple_unplug(serial_devices[i], vm.monitor) + vm.devices.simple_unplug(char_devices[i], vm.monitor) + vm.verify_kernel_crash() diff --git a/qemu/tests/virtio_serial_hotplug_port_pci.py b/qemu/tests/virtio_serial_hotplug_port_pci.py index bf704440fc38ef7f24076557eeb2bee24ab2d0fc..7db41c79f58f2d22fd7784e5424e83221975e200 100644 --- a/qemu/tests/virtio_serial_hotplug_port_pci.py +++ b/qemu/tests/virtio_serial_hotplug_port_pci.py @@ -138,7 +138,7 @@ def run(test, params, env): vm.devices.simple_unplug(serial_devices[1], vm.monitor) out = vm.devices.simple_unplug(buses[0], vm.monitor) if out[1] is False: - msg = "Still get %s in qtree after unplug" % device + msg = "Hot-unplug device %s failed" % buses[0] test.fail(msg) if interrupt_test_after_unplug: logging.info("Run %s after hot-unplug" diff --git a/qemu/tests/virtio_trace_pipenb.py b/qemu/tests/virtio_trace_pipenb.py new file mode 100644 index 0000000000000000000000000000000000000000..83981581e91f481d0c56c002459edffc6fc06724 --- /dev/null +++ b/qemu/tests/virtio_trace_pipenb.py @@ -0,0 +1,48 @@ +import time +import os +import errno + +from virttest import error_context + + +@error_context.context_aware +def run(test, params, env): + """ + Under named-pipe non-blocking testing: + 1) Create pipe named by the following + 2) Boot up a single-CPU guest with a virtio-serial device and + named-pipe chardev backend + 3) Write data to the virtio-serial port until the guest stops. + 4) check whether guest can work. + 5) Read the named-pipe file on the host. + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + timeout = float(params.get("login_timeout", 360)) + vm = env.get_vm(params["main_vm"]) + serials = params["serials"].split() + v_path = vm.get_serial_console_filename(serials[-1]) + vm.verify_alive() + session = vm.wait_for_login(timeout=timeout) + out_put = session.cmd_output("nohup cat /proc/kallsyms > /dev/virtio-ports/vs2 2>&1 &") + time.sleep(10) + if session.cmd_output("date") is None: + test.fail("Guest shouldn't be blocked and a date should output!") + guest_pid = out_put.split()[1] + pipe = os.open(v_path, os.O_RDONLY | os.O_NONBLOCK) + while True: + try: + os.read(pipe, 1) + except OSError as e: + if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK: + time.sleep(5) + break + else: + raise Exception("Read data in host failed as %s" % e) + + if not session.cmd_status("ps -p %s" % guest_pid, safe=True): + test.fail("send process in guest does not exit after all data are read out in host") + vm.verify_alive() + vm.verify_kernel_crash() diff --git a/qemu/tests/win_nics_teaming.py b/qemu/tests/win_nics_teaming.py new file mode 100644 index 0000000000000000000000000000000000000000..f211ccafcee8119cda2a76d956bb1c789b68ef17 --- /dev/null +++ b/qemu/tests/win_nics_teaming.py @@ -0,0 +1,114 @@ +import logging +import time +import os +import random + +import aexpect + +from avocado.utils import crypto, process +from virttest import utils_net +from virttest import utils_misc + + +def run(test, params, env): + """ + Nic teaming test in guest. + + 1) Start guest with four nic devices. + 2) Setup Team in guest. + 3) Execute file transfer from host to guest. + 4) Repeatedly set enable/disable interfaces by 'netsh interface set' + 5) Execute file transfer from guest to host. + + :param test: Kvm test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + tmp_dir = params["tmp_dir"] + filesize = params.get_numeric("filesize") + dd_cmd = params["dd_cmd"] + delete_cmd = params["delete_cmd"] + login_timeout = params.get_numeric("login_timeout", 1200) + vm = env.get_vm(params["main_vm"]) + vm.verify_alive() + session_serial = vm.wait_for_serial_login(timeout=login_timeout) + nics = params.objects("nics") + ifnames = () + for i in range(len(nics)): + mac = vm.get_mac_address(i) + connection_id = utils_net.get_windows_nic_attribute(session_serial, + "macaddress", + mac, + "netconnectionid") + ifnames += (connection_id,) + + # get params of teaming + setup_cmd = params["setup_cmd"] + status, output = session_serial.cmd_status_output(setup_cmd % ifnames) + if status: + test.fail("Failed to setup team nic from powershell," + "status=%s, output=%s" % (status, output)) + + # prepare test data + guest_path = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) + host_path = os.path.join(test.tmpdir, "tmp-%s" % + utils_misc.generate_random_string(8)) + logging.info("Test setup: Creating %dMB file on host", filesize) + process.run(dd_cmd % host_path, shell=True) + + try: + netsh_set_cmd = "netsh interface set interface \"%s\" %s" + # transfer data + original_md5 = crypto.hash_file(host_path, algorithm="md5") + logging.info("md5 value of data original: %s" % original_md5) + logging.info("Failover test with file transfer") + transfer_thread = utils_misc.InterruptedThread( + vm.copy_files_to, (host_path, guest_path)) + transfer_thread.start() + try: + while transfer_thread.isAlive(): + for ifname in ifnames: + session_serial.cmd(netsh_set_cmd % (ifname, "disable")) + time.sleep(random.randint(1, 30)) + session_serial.cmd(netsh_set_cmd % (ifname, "enable")) + time.sleep(random.randint(1, 30)) + except aexpect.ShellProcessTerminatedError: + transfer_thread.join(suppress_exception=True) + raise + else: + transfer_thread.join() + + os.remove(host_path) + logging.info('Cleaning temp file on host') + logging.info("Failover test 2 with file transfer") + transfer_thread = utils_misc.InterruptedThread( + vm.copy_files_from, (guest_path, host_path)) + transfer_thread.start() + try: + nic_num = len(ifnames) + index = 0 + while transfer_thread.isAlive(): + index = index % nic_num + for i in range(nic_num): + session_serial.cmd(netsh_set_cmd % (ifnames[i], "enable")) + for j in range(nic_num): + if i != j: + session_serial.cmd( + netsh_set_cmd % (ifnames[j], "disable")) + time.sleep(random.randint(1, 5)) + index += 1 + except aexpect.ShellProcessTerminatedError: + transfer_thread.join(suppress_exception=True) + raise + else: + transfer_thread.join() + current_md5 = crypto.hash_file(host_path, algorithm="md5") + logging.info("md5 value of data current: %s" % current_md5) + if original_md5 != current_md5: + test.fail("File changed after transfer host -> guest " + "and guest -> host") + finally: + os.remove(host_path) + session_serial.cmd(delete_cmd % guest_path, + timeout=login_timeout, ignore_all_errors=True) + session_serial.close() diff --git a/qemu/tests/win_virtio_driver_update_test.py b/qemu/tests/win_virtio_driver_update_test.py index 03585f06f186d3bf6546f28faf897c120fa2a490..a1898b5fe140eef95d11abd3018229389b84b7e0 100644 --- a/qemu/tests/win_virtio_driver_update_test.py +++ b/qemu/tests/win_virtio_driver_update_test.py @@ -3,6 +3,7 @@ import logging from virttest import error_context from virttest import utils_misc from virttest import data_dir +from virttest import utils_test from qemu.tests import single_driver_install @@ -26,8 +27,6 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ - vm = env.get_vm(params["main_vm"]) - vm.verify_alive() def change_virtio_media(cdrom_virtio): """ @@ -39,13 +38,20 @@ def run(test, params, env): logging.info("Changing virtio iso image to '%s'" % virtio_iso) vm.change_media("drive_virtio", virtio_iso) + vm = env.get_vm(params["main_vm"]) + timeout = int(params.get("login_timeout", 360)) + driver = params["driver_name"] + error_context.context("Enable driver verifier in guest.", logging.info) + session = vm.wait_for_login(timeout=timeout) + session = utils_test.qemu.windrv_check_running_verifier(session, vm, + test, driver, + timeout) + session.close() if params.get("need_uninstall") != "yes": error_context.context("Downgrade virtio driver", logging.info) change_virtio_media(params["cdrom_virtio_downgrade"]) single_driver_install.run(test, params, env) - error_context.context("Reboot guest after downgrade virtio driver", - logging.info) - vm.reboot() + # vm is rebooted in single driver install function error_context.context("Upgrade virtio driver to original", logging.info) change_virtio_media(params["cdrom_virtio"]) diff --git a/qemu/tests/x86_cpu_L3_cache.py b/qemu/tests/x86_cpu_L3_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..a35340b62a883e4ff5f701dd661f27dcbf83d449 --- /dev/null +++ b/qemu/tests/x86_cpu_L3_cache.py @@ -0,0 +1,78 @@ +import re +import logging + +from virttest import env_process +from virttest import error_context +from virttest import utils_misc +from virttest import utils_qemu + + +@error_context.context_aware +def run(test, params, env): + """ + Check L3 cache present to guest + + 1. boot guest with latest machine_type, + checking L3 cache presents inside guest. + 2. Boot guest with old machine type(rhel7.3.0), + L3 cache shouldn't present inside guest. + + :param test: QEMU test object. + :param params: Dictionary with test parameters. + :param env: Dictionary with the test environment. + """ + + def boot_and_check_guest(machine_type, check_L3=False): + """ + Boot guest and check L3 cache inside guest + + :param machine_type: Boot guest with which machine type + :param check_L3: if L3 cache should exist on guest + """ + params['machine_type'] = machine_type + params['start_vm'] = 'yes' + vm_name = params['main_vm'] + L3_existence = 'present' if check_L3 else 'not present' + logging.info('Boot guest with machine type %s and expect L3 cache %s' + ' inside guest' % (machine_type, L3_existence)) + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + session = vm.wait_for_login() + output = session.cmd_output('lscpu') + session.close() + vm.destroy() + L3_present = 'L3 cache' in output + if check_L3 ^ L3_present: + test.fail('L3 cache should %s inside guest for machine type %s' % + (L3_existence, machine_type)) + + def check_version(latest_machine): + """ + Check if the latest supported machine type is newer than the defined + old machine type, cancel the test if not. + + :param latest_machine: The latest machine type + """ + latest_ver = re.findall(r'\d+\.\d+', latest_machine)[0] + old_ver = re.findall(r'\d+\.\d+', old_machine)[0] + if latest_ver <= old_ver: + test.cancel('The latest supported machine type does not' + ' support this test case.') + + old_machine = params['old_machine'] + machine_type = params['machine_type'] + qemu_bin = utils_misc.get_qemu_binary(params) + machine_types = utils_qemu.get_supported_machines_list(qemu_bin) + m_keyword = 'q35' if 'q35' in machine_type else 'i440fx' + for m_type in machine_types: + if m_keyword in m_type and m_type != m_keyword: + check_version(m_type) + boot_and_check_guest(m_type, True) + break + + for m_type in machine_types: + if old_machine in m_type and m_keyword in m_type: + boot_and_check_guest(m_type) + break + else: + logging.warning('Old machine type is not supported, skip checking.') diff --git a/qemu/tests/x86_cpu_flag_disable.py b/qemu/tests/x86_cpu_flag_disable.py new file mode 100644 index 0000000000000000000000000000000000000000..40232e27959b7b6c700df3d67bf036f9f88eba41 --- /dev/null +++ b/qemu/tests/x86_cpu_flag_disable.py @@ -0,0 +1,43 @@ +import logging + +from virttest import env_process +from virttest import error_context + +from provider.cpu_utils import check_cpu_flags + + +@error_context.context_aware +def run(test, params, env): + """ + Test cpu flags. + 1) Check if current flags are in the supported lists, if no, cancel test + 2) Otherwise, boot guest with the cpu flags disabled + 3) Check cpu flags inside guest(only for linux guest) + 4) Check kvmclock inside guest(only for linux guest) + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + flag = params["flags"] + check_host_flags = params.get_boolean("check_host_flags") + if check_host_flags: + check_cpu_flags(params, flag, test) + + params["start_vm"] = "yes" + vm_name = params['main_vm'] + env_process.preprocess_vm(test, params, env, vm_name) + + vm = env.get_vm(vm_name) + error_context.context("Try to log into guest", logging.info) + session = vm.wait_for_login() + check_cpu_flags(params, '', test, session) + + check_clock = params.get("check_clock") + vm_clock_out = session.cmd_output(check_clock).split() + if 'kvmclock' in vm_clock_out: + test.fail("kvmclock shouldn't be found inside geust") + + vm.verify_kernel_crash() + session.close() diff --git a/qemu/tests/x86_cpu_flag_nonstop_tsc.py b/qemu/tests/x86_cpu_flag_nonstop_tsc.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d520b947c6df3a24c6bcd5fcde4d820658e300 --- /dev/null +++ b/qemu/tests/x86_cpu_flag_nonstop_tsc.py @@ -0,0 +1,48 @@ +import logging + +from virttest import env_process +from virttest import error_context + +from provider.cpu_utils import check_cpu_flags + + +@error_context.context_aware +def run(test, params, env): + """ + Test cpu flag nonstop_tsc. + 1) Check if current flags are in the supported lists on host, if no, cancel test + 2) Otherwise, boot guest with the cpu flag 'invtsc' + 3) Check cpu flags inside guest(only for linux guest and not for RHEL6) + 4) Check tsc inside guest(only for linux guest) + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + flag = params["flags"] + check_host_flags = params.get_boolean("check_host_flags") + if check_host_flags: + check_cpu_flags(params, flag, test) + + params["start_vm"] = "yes" + vm_name = params['main_vm'] + env_process.preprocess_vm(test, params, env, vm_name) + + vm = env.get_vm(vm_name) + error_context.context("Try to log into guest", logging.info) + session = vm.wait_for_login() + if params["os_type"] == "linux": + if params['os_variant'] != 'rhel6': + check_cpu_flags(params, flag, test, session) + check_clock = params["check_clock"] + check_clock_out = session.cmd_status(check_clock) + if check_clock_out: + test.fail("tsc can't be found inside guest") + + if params.get("reboot_method"): + error_context.context("Reboot guest '%s'." % vm.name, logging.info) + session = vm.reboot(session=session) + + vm.verify_kernel_crash() + session.close() diff --git a/qemu/tests/x86_cpu_flags.py b/qemu/tests/x86_cpu_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..216752640820bb724d2e0b834637dbc31d2b84ae --- /dev/null +++ b/qemu/tests/x86_cpu_flags.py @@ -0,0 +1,47 @@ +import logging + +from virttest import error_context, env_process +from provider.cpu_utils import check_cpu_flags + + +@error_context.context_aware +def run(test, params, env): + """ + Test cpu flags. + 1) Check if current flags are in the supported lists on host, if no, cancel test + 2) Otherwise, boot guest with the cpu flags + 3) Check cpu flags inside guest(only for linux guest) + 4) Reboot guest + + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + flags = params["flags"] + check_host_flags = params.get_boolean("check_host_flags") + if check_host_flags: + check_cpu_flags(params, flags, test) + + params["start_vm"] = "yes" + vm_name = params['main_vm'] + env_process.preprocess_vm(test, params, env, vm_name) + + vm = env.get_vm(vm_name) + error_context.context("Try to log into guest", logging.info) + session = vm.wait_for_login() + if params["os_type"] == "linux": + check_guest_cmd = params.get("check_guest_cmd") + check_cpu_flags(params, flags, test, session) + if check_guest_cmd: + expect_items = params.get("expect_items") + if expect_items: + result = session.cmd_status(check_guest_cmd % expect_items) + if result: + test.fail("'%s' can't be found inside guest" % expect_items) + + if params.get("reboot_method"): + error_context.context("Reboot guest '%s'." % vm.name, logging.info) + session = vm.reboot(session=session) + + vm.verify_kernel_crash() + session.close() diff --git a/qemu/tests/x86_cpu_model.py b/qemu/tests/x86_cpu_model.py index 304353113beae5347be82fd505cd78d46e26a8fd..8dbe1dd808ddb82b70588ad9ac728e39f7fe9ef9 100644 --- a/qemu/tests/x86_cpu_model.py +++ b/qemu/tests/x86_cpu_model.py @@ -2,8 +2,11 @@ import logging import re import json -from avocado.utils import cpu, process +from avocado.utils import cpu +from avocado.utils import process + from virttest import error_context, utils_misc, env_process +from provider.cpu_utils import check_cpu_flags @error_context.context_aware @@ -36,7 +39,7 @@ def run(test, params, env): model = params["model"] model_pattern = params["model_pattern"] flags = params["flags"] - if cpu.get_cpu_vendor_name() == 'intel': + if cpu.get_vendor() == 'intel': model_ib = "%s-IBRS" % model flag_ib = " ibpb ibrs" name_ib = ", IBRS( update)?" @@ -72,16 +75,27 @@ def run(test, params, env): test.fail("Guest cpu model is not right") if params["os_type"] == "linux": - error_context.context("Check cpu flags inside guest", logging.info) - cmd = params["check_flag_cmd"] - out = session.cmd_output(cmd).split() - missing = [f for f in flags.split() if f not in out] - if missing: - test.fail("Flag %s not in guest" % missing) + check_cmd = params.get("check_cmd") + if check_cmd: + vul_host = process.getoutput(params.get("vulnerabilities")).split() + vul_guest = session.cmd_output(params.get("vulnerabilities")).split() + vulnerabilities = list(set(vul_host).intersection(set(vul_guest))) + check_items = params.get("check_items").split() + expect_result = params.get("expect_result") + for item in vulnerabilities: + h_out = re.search("Vulnerable|Mitigation|Not affected", + process.getoutput(check_cmd % item))[0] + g_out = re.search("Vulnerable|Mitigation|Not affected", + session.cmd_output(check_cmd % item))[0] + if h_out != g_out: + test.fail("Guest is not equal to Host with '%s'" % item) + if item in check_items and g_out != expect_result: + test.fail("'%s' can't get '%s'" % (item, expect_result)) + check_cpu_flags(params, flags, test, session) if params.get("reboot_method"): error_context.context("Reboot guest '%s'." % vm.name, logging.info) - vm.reboot(session=session) + session = vm.reboot(session=session) vm.verify_kernel_crash() session.close() diff --git a/qemu/tests/x86_cpu_test_dies.py b/qemu/tests/x86_cpu_test_dies.py new file mode 100644 index 0000000000000000000000000000000000000000..4ccc760a32d1896e592e5f05a40a462273e3b921 --- /dev/null +++ b/qemu/tests/x86_cpu_test_dies.py @@ -0,0 +1,45 @@ +import random + +from avocado.utils import cpu + +from virttest import error_context +from virttest import env_process + + +@error_context.context_aware +def run(test, params, env): + """ + Check guest gets correct multiple vcpu dies + + 1) Boot guest with options: -smp n,dies=2x... + 2) Check cpu dies(only for Linux guest and Intel host) + + :param test: QEMU test object. + :param params: Dictionary with test parameters. + :param env: Dictionary with the test environment. + """ + vm_name = params['main_vm'] + vcpu_dies_list = [2, 4] + params['vcpu_dies'] = random.choice(vcpu_dies_list) + params['start_vm'] = 'yes' + env_process.preprocess_vm(test, params, env, vm_name) + vm = env.get_vm(vm_name) + session = vm.wait_for_login() + if params["os_type"] == "linux" and cpu.get_vendor() == 'intel': + check_die_id = params['check_die_id'] + check_die_cpus_list = params['check_die_cpus_list'] + vcpu_sockets = vm.cpuinfo.sockets + vcpu_dies = vm.cpuinfo.dies + dies_id = session.cmd_output(check_die_id).strip().split('\n') + dies_cpus_list = session.cmd_output( + check_die_cpus_list).strip().split('\n') + if len(dies_id) != int(vcpu_dies): + test.fail("die_id is not right: %d != %d" + % (len(dies_id), int(vcpu_dies))) + if len(dies_cpus_list) != int(vcpu_sockets)*int(vcpu_dies): + test.fail("die_cpus_list is not right: %d != %d" + % (len(dies_cpus_list), int(vcpu_sockets)*int(vcpu_dies))) + + vm.verify_kernel_crash() + session.close() + vm.destroy()