Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move virtiofs managedsave cases to positive cases part #6092

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions libvirt/tests/cfg/virtual_device/filesystem_device.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,11 @@
stdio_handler = "logd"
- detach_device:
detach_device_alias = "no"
- managedsave:
only xattr_on.cache_mode_auto..one_fs
s390-virtio:
kvm_module_parameters = "hpage=1"
managedsave = "yes"
variants:
- positive_test:
status_error = "no"
Expand Down Expand Up @@ -130,9 +135,3 @@
- larger_than_1024:
queue_size = 2048
error_msg_start = "queue-size property must be 1024 or smaller"
- managedsave:
only nop
s390-virtio:
kvm_module_parameters = "hpage=1"
managedsave = "yes"
error_msg_save = "migration with virtiofs device is not supported"
179 changes: 91 additions & 88 deletions libvirt/tests/src/virtual_device/filesystem_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,95 +366,98 @@ def check_filesystem_hotplug_with_mem_setup():
cmd = 'ps aux | grep /usr/libexec/virtiofsd'
utils_test.libvirt.check_cmd_output(cmd, content=expected_results)

if managedsave:
expected_error = error_msg_save
result = virsh.managedsave(vm_names[0], ignore_status=True, debug=True)
utils_test.libvirt.check_exit_status(result, expected_error)
else:
shared_data(vm_names, fs_devs)
if suspend_resume:
virsh.suspend(vm_names[0], debug=True, ignore_status=False)
time.sleep(30)
virsh.resume(vm_names[0], debug=True, ignore_statue=False)
elif destroy_start:
session = vm.wait_for_login(timeout=120)
# Prepare the guest test script
script_path = os.path.join(fs_devs[0].source["dir"], "test.py")
script_content %= (fs_devs[0].source["dir"], fs_devs[0].source["dir"])
prepare_stress_script(script_path, script_content)
# Run guest stress script
stress_script_thread = threading.Thread(target=run_stress_script,
args=(session, script_path))
stress_script_thread.setDaemon(True)
stress_script_thread.start()
# Create a lot of unlink files
time.sleep(60)
virsh.destroy(vm_names[0], debug=True, ignore_status=False)
ret = virsh.start(vm_names[0], debug=True)
libvirt.check_exit_status(ret)
elif edit_start:
vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[0])
if vm.is_alive():
virsh.destroy(vm_names[0])
cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[0]
cmd_result = process.run(cmd, ignore_status=True, shell=True)
logging.debug(virsh.dumpxml(vm_names[0]))
if cmd_result.exit_status:
test.error("virt-xml edit guest failed: %s" % cmd_result)
result = virsh.start(vm_names[0], ignore_status=True, debug=True)
if error_msg_start:
expected_fails_msg.append(error_msg_start)
utils_test.libvirt.check_result(result, expected_fails=expected_fails_msg)
if not libvirt_version.version_compare(6, 10, 0):
# Because of bug #1897105, it was fixed in libvirt-6.10.0,
# before this version, need to recover the env manually.
cmd = "pkill virtiofsd"
process.run(cmd, shell=True)
if not vm.is_alive():
# Restoring vm and check if vm can start successfully
vmxml_virtio_backup.sync()
virsh.start(vm_names[0], ignore_status=False, shell=True)
elif socket_file_checking:
result = virsh.domid(vm_names[0])
domid = result.stdout.strip()
domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[0]
if result.exit_status:
test.fail("Get domid failed.")
for fs_dev in fs_devs:
shared_data(vm_names, fs_devs)
if suspend_resume:
virsh.suspend(vm_names[0], debug=True, ignore_status=False)
time.sleep(30)
virsh.resume(vm_names[0], debug=True, ignore_statue=False)
elif managedsave:
virsh.managedsave(vm_names[0], ignore_status=True, debug=True)
save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_names[0]
if not os.path.exists(save_file):
test.fail("guest is not manangedsaved")
virsh.start(vm_names[0], ignore_status=True, debug=True)
if os.path.exists(save_file):
test.fail("guest is not restored from the managedsave file")
elif destroy_start:
session = vm.wait_for_login(timeout=120)
# Prepare the guest test script
script_path = os.path.join(fs_devs[0].source["dir"], "test.py")
script_content %= (fs_devs[0].source["dir"], fs_devs[0].source["dir"])
prepare_stress_script(script_path, script_content)
# Run guest stress script
stress_script_thread = threading.Thread(target=run_stress_script,
args=(session, script_path))
stress_script_thread.setDaemon(True)
stress_script_thread.start()
# Create a lot of unlink files
time.sleep(60)
virsh.destroy(vm_names[0], debug=True, ignore_status=False)
ret = virsh.start(vm_names[0], debug=True)
libvirt.check_exit_status(ret)
elif edit_start:
vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[0])
if vm.is_alive():
virsh.destroy(vm_names[0])
cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[0]
cmd_result = process.run(cmd, ignore_status=True, shell=True)
logging.debug(virsh.dumpxml(vm_names[0]))
if cmd_result.exit_status:
test.error("virt-xml edit guest failed: %s" % cmd_result)
result = virsh.start(vm_names[0], ignore_status=True, debug=True)
if error_msg_start:
expected_fails_msg.append(error_msg_start)
utils_test.libvirt.check_result(result, expected_fails=expected_fails_msg)
if not libvirt_version.version_compare(6, 10, 0):
# Because of bug #1897105, it was fixed in libvirt-6.10.0,
# before this version, need to recover the env manually.
cmd = "pkill virtiofsd"
process.run(cmd, shell=True)
if not vm.is_alive():
# Restoring vm and check if vm can start successfully
vmxml_virtio_backup.sync()
virsh.start(vm_names[0], ignore_status=False, shell=True)
elif socket_file_checking:
result = virsh.domid(vm_names[0])
domid = result.stdout.strip()
domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[0]
if result.exit_status:
test.fail("Get domid failed.")
for fs_dev in fs_devs:
alias = fs_dev.alias['name']
expected_pid = domain_dir + alias + '-fs.pid'
expected_sock = alias + '-fs.sock'
status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status
status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status
if not (status1 and status2):
test.fail("The socket and pid file is not as expected")
elif hotplug_unplug:
for vm in vms:
umount_fs(vm)
for fs_dev in fs_devs:
if detach_device_alias:
utils_package.package_install("lsof")
alias = fs_dev.alias['name']
expected_pid = domain_dir + alias + '-fs.pid'
expected_sock = alias + '-fs.sock'
status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status
status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status
if not (status1 and status2):
test.fail("The socket and pid file is not as expected")
elif hotplug_unplug:
for vm in vms:
umount_fs(vm)
for fs_dev in fs_devs:
if detach_device_alias:
utils_package.package_install("lsof")
alias = fs_dev.alias['name']
cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % (vm.name, alias)
output = process.run(cmd).stdout_text.splitlines()
for item in output[1:]:
if stdio_handler_file:
if item.split()[0] != "virtiofsd":
test.fail("When setting stdio_handler as file, the command"
"to write log should be virtiofsd!")
else:
if item.split()[0] != "virtlogd":
test.fail("When setting stdio_handler as logd, the command"
"to write log should be virtlogd!")
ret = virsh.detach_device_alias(vm.name, alias, ignore_status=True,
debug=True, wait_for_event=True,
event_timeout=10)
else:
ret = virsh.detach_device(vm.name, fs_dev.xml, ignore_status=True,
debug=True, wait_for_event=True)
libvirt.check_exit_status(ret, status_error)
check_filesystem_in_guest(vm, fs_dev)
check_detached_xml(vm)
cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % (vm.name, alias)
output = process.run(cmd).stdout_text.splitlines()
for item in output[1:]:
if stdio_handler_file:
if item.split()[0] != "virtiofsd":
test.fail("When setting stdio_handler as file, the command"
"to write log should be virtiofsd!")
else:
if item.split()[0] != "virtlogd":
test.fail("When setting stdio_handler as logd, the command"
"to write log should be virtlogd!")
ret = virsh.detach_device_alias(vm.name, alias, ignore_status=True,
debug=True, wait_for_event=True,
event_timeout=10)
else:
ret = virsh.detach_device(vm.name, fs_dev.xml, ignore_status=True,
debug=True, wait_for_event=True)
libvirt.check_exit_status(ret, status_error)
check_filesystem_in_guest(vm, fs_dev)
check_detached_xml(vm)
finally:
for vm in vms:
alias = fs_dev.alias['name']
Expand Down