summary refs log tree commit diff stats
path: root/results/classifier/004/KVM
diff options
context:
space:
mode:
Diffstat (limited to 'results/classifier/004/KVM')
-rw-r--r--results/classifier/004/KVM/04472277584
-rw-r--r--results/classifier/004/KVM/26430026173
-rw-r--r--results/classifier/004/KVM/43643137546
-rw-r--r--results/classifier/004/KVM/714562931494
-rw-r--r--results/classifier/004/KVM/80615920356
5 files changed, 3153 insertions, 0 deletions
diff --git a/results/classifier/004/KVM/04472277 b/results/classifier/004/KVM/04472277
new file mode 100644
index 00000000..e3118005
--- /dev/null
+++ b/results/classifier/004/KVM/04472277
@@ -0,0 +1,584 @@
+KVM: 0.890
+device: 0.849
+network: 0.847
+graphic: 0.846
+other: 0.846
+instruction: 0.845
+assembly: 0.841
+boot: 0.831
+vnc: 0.828
+socket: 0.824
+mistranslation: 0.817
+semantic: 0.815
+
+[BUG][KVM_SET_USER_MEMORY_REGION] KVM_SET_USER_MEMORY_REGION failed
+
+Hi all,
+I start a VM in openstack, and openstack use libvirt to start qemu VM, but now log show this ERROR.
+Is there any one know this?
+The ERROR log from /var/log/libvirt/qemu/instance-0000000e.log
+```
+2023-03-14T10:09:17.674114Z qemu-system-x86_64: kvm_set_user_memory_region: KVM_SET_USER_MEMORY_REGION failed, slot=4, start=0xfffffffffe000000, size=0x2000: Invalid argument
+kvm_set_phys_mem: error registering slot: Invalid argument
+2023-03-14 10:09:18.198+0000: shutting down, reason=crashed
+```
+The xml file
+```
+root@c1c2:~# cat /etc/libvirt/qemu/instance-0000000e.xml
+<!--
+WARNING: THIS IS AN AUTO-GENERATED FILE. CHANGES TO IT ARE LIKELY TO BE
+OVERWRITTEN AND LOST. Changes to this xml configuration should be made using:
+  virsh edit instance-0000000e
+or other application using the libvirt API.
+-->
+<domain type='kvm'>
+  <name>instance-0000000e</name>
+  <uuid>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</uuid>
+  <metadata>
+    <nova:instance xmlns:nova="
+http://openstack.org/xmlns/libvirt/nova/1.1
+">
+      <nova:package version="25.1.0"/>
+      <nova:name>provider-instance</nova:name>
+      <nova:creationTime>2023-03-14 10:09:13</nova:creationTime>
+      <nova:flavor name="cirros-os-dpu-test-1">
+        <nova:memory>64</nova:memory>
+        <nova:disk>1</nova:disk>
+        <nova:swap>0</nova:swap>
+        <nova:ephemeral>0</nova:ephemeral>
+        <nova:vcpus>1</nova:vcpus>
+      </nova:flavor>
+      <nova:owner>
+        <nova:user uuid="ff627ad39ed94479b9c5033bc462cf78">admin</nova:user>
+        <nova:project uuid="512866f9994f4ad8916d8539a7cdeec9">admin</nova:project>
+      </nova:owner>
+      <nova:root type="image" uuid="9e58cb69-316a-4093-9f23-c1d1bd8edffe"/>
+      <nova:ports>
+        <nova:port uuid="77c1dc00-af39-4463-bea0-12808f4bc340">
+          <nova:ip type="fixed" address="172.1.1.43" ipVersion="4"/>
+        </nova:port>
+      </nova:ports>
+    </nova:instance>
+  </metadata>
+  <memory unit='KiB'>65536</memory>
+  <currentMemory unit='KiB'>65536</currentMemory>
+  <vcpu placement='static'>1</vcpu>
+  <sysinfo type='smbios'>
+    <system>
+      <entry name='manufacturer'>OpenStack Foundation</entry>
+      <entry name='product'>OpenStack Nova</entry>
+      <entry name='version'>25.1.0</entry>
+      <entry name='serial'>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</entry>
+      <entry name='uuid'>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</entry>
+      <entry name='family'>Virtual Machine</entry>
+    </system>
+  </sysinfo>
+  <os>
+    <type arch='x86_64' machine='pc-i440fx-6.2'>hvm</type>
+    <boot dev='hd'/>
+    <smbios mode='sysinfo'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <vmcoreinfo state='on'/>
+  </features>
+  <cpu mode='host-model' check='partial'>
+    <topology sockets='1' dies='1' cores='1' threads='1'/>
+  </cpu>
+  <clock offset='utc'>
+    <timer name='pit' tickpolicy='delay'/>
+    <timer name='rtc' tickpolicy='catchup'/>
+    <timer name='hpet' present='no'/>
+  </clock>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>destroy</on_crash>
+  <devices>
+    <emulator>/usr/bin/qemu-system-x86_64</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='none'/>
+      <source file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/disk'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+    </disk>
+    <controller type='usb' index='0' model='piix3-uhci'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'/>
+    <interface type='hostdev' managed='yes'>
+      <mac address='fa:16:3e:aa:d9:23'/>
+      <source>
+        <address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x5'/>
+      </source>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+    </interface>
+    <serial type='pty'>
+      <log file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/console.log' append='off'/>
+      <target type='isa-serial' port='0'>
+        <model name='isa-serial'/>
+      </target>
+    </serial>
+    <console type='pty'>
+      <log file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/console.log' append='off'/>
+      <target type='serial' port='0'/>
+    </console>
+    <input type='tablet' bus='usb'>
+      <address type='usb' bus='0' port='1'/>
+    </input>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>
+      <listen type='address' address='0.0.0.0'/>
+    </graphics>
+    <audio id='1' type='none'/>
+    <video>
+      <model type='virtio' heads='1' primary='yes'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <hostdev mode='subsystem' type='pci' managed='yes'>
+      <source>
+        <address domain='0x0000' bus='0x01' slot='0x00' function='0x6'/>
+      </source>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+    </hostdev>
+    <memballoon model='virtio'>
+      <stats period='10'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </memballoon>
+    <rng model='virtio'>
+      <backend model='random'>/dev/urandom</backend>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </rng>
+  </devices>
+</domain>
+```
+----
+Simon Jones
+
+This is happened in ubuntu22.04.
+QEMU is install by apt like this:
+apt install -y qemu qemu-kvm qemu-system
+and QEMU version is 6.2.0
+----
+Simon Jones
+Simon Jones <
+batmanustc@gmail.com
+> 于2023年3月21日周二 08:40写道:
+Hi all,
+I start a VM in openstack, and openstack use libvirt to start qemu VM, but now log show this ERROR.
+Is there any one know this?
+The ERROR log from /var/log/libvirt/qemu/instance-0000000e.log
+```
+2023-03-14T10:09:17.674114Z qemu-system-x86_64: kvm_set_user_memory_region: KVM_SET_USER_MEMORY_REGION failed, slot=4, start=0xfffffffffe000000, size=0x2000: Invalid argument
+kvm_set_phys_mem: error registering slot: Invalid argument
+2023-03-14 10:09:18.198+0000: shutting down, reason=crashed
+```
+The xml file
+```
+root@c1c2:~# cat /etc/libvirt/qemu/instance-0000000e.xml
+<!--
+WARNING: THIS IS AN AUTO-GENERATED FILE. CHANGES TO IT ARE LIKELY TO BE
+OVERWRITTEN AND LOST. Changes to this xml configuration should be made using:
+  virsh edit instance-0000000e
+or other application using the libvirt API.
+-->
+<domain type='kvm'>
+  <name>instance-0000000e</name>
+  <uuid>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</uuid>
+  <metadata>
+    <nova:instance xmlns:nova="
+http://openstack.org/xmlns/libvirt/nova/1.1
+">
+      <nova:package version="25.1.0"/>
+      <nova:name>provider-instance</nova:name>
+      <nova:creationTime>2023-03-14 10:09:13</nova:creationTime>
+      <nova:flavor name="cirros-os-dpu-test-1">
+        <nova:memory>64</nova:memory>
+        <nova:disk>1</nova:disk>
+        <nova:swap>0</nova:swap>
+        <nova:ephemeral>0</nova:ephemeral>
+        <nova:vcpus>1</nova:vcpus>
+      </nova:flavor>
+      <nova:owner>
+        <nova:user uuid="ff627ad39ed94479b9c5033bc462cf78">admin</nova:user>
+        <nova:project uuid="512866f9994f4ad8916d8539a7cdeec9">admin</nova:project>
+      </nova:owner>
+      <nova:root type="image" uuid="9e58cb69-316a-4093-9f23-c1d1bd8edffe"/>
+      <nova:ports>
+        <nova:port uuid="77c1dc00-af39-4463-bea0-12808f4bc340">
+          <nova:ip type="fixed" address="172.1.1.43" ipVersion="4"/>
+        </nova:port>
+      </nova:ports>
+    </nova:instance>
+  </metadata>
+  <memory unit='KiB'>65536</memory>
+  <currentMemory unit='KiB'>65536</currentMemory>
+  <vcpu placement='static'>1</vcpu>
+  <sysinfo type='smbios'>
+    <system>
+      <entry name='manufacturer'>OpenStack Foundation</entry>
+      <entry name='product'>OpenStack Nova</entry>
+      <entry name='version'>25.1.0</entry>
+      <entry name='serial'>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</entry>
+      <entry name='uuid'>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</entry>
+      <entry name='family'>Virtual Machine</entry>
+    </system>
+  </sysinfo>
+  <os>
+    <type arch='x86_64' machine='pc-i440fx-6.2'>hvm</type>
+    <boot dev='hd'/>
+    <smbios mode='sysinfo'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <vmcoreinfo state='on'/>
+  </features>
+  <cpu mode='host-model' check='partial'>
+    <topology sockets='1' dies='1' cores='1' threads='1'/>
+  </cpu>
+  <clock offset='utc'>
+    <timer name='pit' tickpolicy='delay'/>
+    <timer name='rtc' tickpolicy='catchup'/>
+    <timer name='hpet' present='no'/>
+  </clock>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>destroy</on_crash>
+  <devices>
+    <emulator>/usr/bin/qemu-system-x86_64</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='none'/>
+      <source file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/disk'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+    </disk>
+    <controller type='usb' index='0' model='piix3-uhci'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'/>
+    <interface type='hostdev' managed='yes'>
+      <mac address='fa:16:3e:aa:d9:23'/>
+      <source>
+        <address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x5'/>
+      </source>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+    </interface>
+    <serial type='pty'>
+      <log file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/console.log' append='off'/>
+      <target type='isa-serial' port='0'>
+        <model name='isa-serial'/>
+      </target>
+    </serial>
+    <console type='pty'>
+      <log file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/console.log' append='off'/>
+      <target type='serial' port='0'/>
+    </console>
+    <input type='tablet' bus='usb'>
+      <address type='usb' bus='0' port='1'/>
+    </input>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>
+      <listen type='address' address='0.0.0.0'/>
+    </graphics>
+    <audio id='1' type='none'/>
+    <video>
+      <model type='virtio' heads='1' primary='yes'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <hostdev mode='subsystem' type='pci' managed='yes'>
+      <source>
+        <address domain='0x0000' bus='0x01' slot='0x00' function='0x6'/>
+      </source>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+    </hostdev>
+    <memballoon model='virtio'>
+      <stats period='10'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </memballoon>
+    <rng model='virtio'>
+      <backend model='random'>/dev/urandom</backend>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </rng>
+  </devices>
+</domain>
+```
+----
+Simon Jones
+
+This is full ERROR log
+2023-03-23 08:00:52.362+0000: starting up libvirt version: 8.0.0, package: 1ubuntu7.4 (Christian Ehrhardt <
+christian.ehrhardt@canonical.com
+> Tue, 22 Nov 2022 15:59:28 +0100), qemu version: 6.2.0Debian 1:6.2+dfsg-2ubuntu6.6, kernel: 5.19.0-35-generic, hostname: c1c2
+LC_ALL=C \
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin \
+HOME=/var/lib/libvirt/qemu/domain-4-instance-0000000e \
+XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-4-instance-0000000e/.local/share \
+XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-4-instance-0000000e/.cache \
+XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-4-instance-0000000e/.config \
+/usr/bin/qemu-system-x86_64 \
+-name guest=instance-0000000e,debug-threads=on \
+-S \
+-object '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/var/lib/libvirt/qemu/domain-4-instance-0000000e/master-key.aes"}' \
+-machine pc-i440fx-6.2,usb=off,dump-guest-core=off,memory-backend=pc.ram \
+-accel kvm \
+-cpu Cooperlake,ss=on,vmx=on,pdcm=on,hypervisor=on,tsc-adjust=on,sha-ni=on,umip=on,waitpkg=on,gfni=on,vaes=on,vpclmulqdq=on,rdpid=on,movdiri=on,movdir64b=on,fsrm=on,md-clear=on,avx-vnni=on,xsaves=on,ibpb=on,ibrs=on,amd-stibp=on,amd-ssbd=on,hle=off,rtm=off,avx512f=off,avx512dq=off,avx512cd=off,avx512bw=off,avx512vl=off,avx512vnni=off,avx512-bf16=off,taa-no=off \
+-m 64 \
+-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":67108864}' \
+-overcommit mem-lock=off \
+-smp 1,sockets=1,dies=1,cores=1,threads=1 \
+-uuid ff91d2dc-69a1-43ef-abde-c9e4e9a0305b \
+-smbios 'type=1,manufacturer=OpenStack Foundation,product=OpenStack Nova,version=25.1.0,serial=ff91d2dc-69a1-43ef-abde-c9e4e9a0305b,uuid=ff91d2dc-69a1-43ef-abde-c9e4e9a0305b,family=Virtual Machine' \
+-no-user-config \
+-nodefaults \
+-chardev socket,id=charmonitor,fd=33,server=on,wait=off \
+-mon chardev=charmonitor,id=monitor,mode=control \
+-rtc base=utc,driftfix=slew \
+-global kvm-pit.lost_tick_policy=delay \
+-no-hpet \
+-no-shutdown \
+-boot strict=on \
+-device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 \
+-blockdev '{"driver":"file","filename":"/var/lib/nova/instances/_base/8b58db82a488248e7c5e769599954adaa47a5314","node-name":"libvirt-2-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
+-blockdev '{"node-name":"libvirt-2-format","read-only":true,"cache":{"direct":true,"no-flush":false},"driver":"raw","file":"libvirt-2-storage"}' \
+-blockdev '{"driver":"file","filename":"/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/disk","node-name":"libvirt-1-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
+-blockdev '{"node-name":"libvirt-1-format","read-only":false,"cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-1-storage","backing":"libvirt-2-format"}' \
+-device virtio-blk-pci,bus=pci.0,addr=0x3,drive=libvirt-1-format,id=virtio-disk0,bootindex=1,write-cache=on \
+-add-fd set=1,fd=34 \
+-chardev pty,id=charserial0,logfile=/dev/fdset/1,logappend=on \
+-device isa-serial,chardev=charserial0,id=serial0 \
+-device usb-tablet,id=input0,bus=usb.0,port=1 \
+-audiodev '{"id":"audio1","driver":"none"}' \
+-vnc
+0.0.0.0:0
+,audiodev=audio1 \
+-device virtio-vga,id=video0,max_outputs=1,bus=pci.0,addr=0x2 \
+-device vfio-pci,host=0000:01:00.5,id=hostdev0,bus=pci.0,addr=0x4 \
+-device vfio-pci,host=0000:01:00.6,id=hostdev1,bus=pci.0,addr=0x5 \
+-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x6 \
+-object '{"qom-type":"rng-random","id":"objrng0","filename":"/dev/urandom"}' \
+-device virtio-rng-pci,rng=objrng0,id=rng0,bus=pci.0,addr=0x7 \
+-device vmcoreinfo \
+-sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \
+-msg timestamp=on
+char device redirected to /dev/pts/3 (label charserial0)
+2023-03-23T08:00:53.728550Z qemu-system-x86_64: kvm_set_user_memory_region: KVM_SET_USER_MEMORY_REGION failed, slot=4, start=0xfffffffffe000000, size=0x2000: Invalid argument
+kvm_set_phys_mem: error registering slot: Invalid argument
+2023-03-23 08:00:54.201+0000: shutting down, reason=crashed
+2023-03-23 08:54:43.468+0000: starting up libvirt version: 8.0.0, package: 1ubuntu7.4 (Christian Ehrhardt <
+christian.ehrhardt@canonical.com
+> Tue, 22 Nov 2022 15:59:28 +0100), qemu version: 6.2.0Debian 1:6.2+dfsg-2ubuntu6.6, kernel: 5.19.0-35-generic, hostname: c1c2
+LC_ALL=C \
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin \
+HOME=/var/lib/libvirt/qemu/domain-5-instance-0000000e \
+XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-5-instance-0000000e/.local/share \
+XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-5-instance-0000000e/.cache \
+XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-5-instance-0000000e/.config \
+/usr/bin/qemu-system-x86_64 \
+-name guest=instance-0000000e,debug-threads=on \
+-S \
+-object '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/var/lib/libvirt/qemu/domain-5-instance-0000000e/master-key.aes"}' \
+-machine pc-i440fx-6.2,usb=off,dump-guest-core=off,memory-backend=pc.ram \
+-accel kvm \
+-cpu Cooperlake,ss=on,vmx=on,pdcm=on,hypervisor=on,tsc-adjust=on,sha-ni=on,umip=on,waitpkg=on,gfni=on,vaes=on,vpclmulqdq=on,rdpid=on,movdiri=on,movdir64b=on,fsrm=on,md-clear=on,avx-vnni=on,xsaves=on,ibpb=on,ibrs=on,amd-stibp=on,amd-ssbd=on,hle=off,rtm=off,avx512f=off,avx512dq=off,avx512cd=off,avx512bw=off,avx512vl=off,avx512vnni=off,avx512-bf16=off,taa-no=off \
+-m 64 \
+-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":67108864}' \
+-overcommit mem-lock=off \
+-smp 1,sockets=1,dies=1,cores=1,threads=1 \
+-uuid ff91d2dc-69a1-43ef-abde-c9e4e9a0305b \
+-smbios 'type=1,manufacturer=OpenStack Foundation,product=OpenStack Nova,version=25.1.0,serial=ff91d2dc-69a1-43ef-abde-c9e4e9a0305b,uuid=ff91d2dc-69a1-43ef-abde-c9e4e9a0305b,family=Virtual Machine' \
+-no-user-config \
+-nodefaults \
+-chardev socket,id=charmonitor,fd=33,server=on,wait=off \
+-mon chardev=charmonitor,id=monitor,mode=control \
+-rtc base=utc,driftfix=slew \
+-global kvm-pit.lost_tick_policy=delay \
+-no-hpet \
+-no-shutdown \
+-boot strict=on \
+-device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 \
+-blockdev '{"driver":"file","filename":"/var/lib/nova/instances/_base/8b58db82a488248e7c5e769599954adaa47a5314","node-name":"libvirt-2-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
+-blockdev '{"node-name":"libvirt-2-format","read-only":true,"cache":{"direct":true,"no-flush":false},"driver":"raw","file":"libvirt-2-storage"}' \
+-blockdev '{"driver":"file","filename":"/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/disk","node-name":"libvirt-1-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
+-blockdev '{"node-name":"libvirt-1-format","read-only":false,"cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-1-storage","backing":"libvirt-2-format"}' \
+-device virtio-blk-pci,bus=pci.0,addr=0x3,drive=libvirt-1-format,id=virtio-disk0,bootindex=1,write-cache=on \
+-add-fd set=1,fd=34 \
+-chardev pty,id=charserial0,logfile=/dev/fdset/1,logappend=on \
+-device isa-serial,chardev=charserial0,id=serial0 \
+-device usb-tablet,id=input0,bus=usb.0,port=1 \
+-audiodev '{"id":"audio1","driver":"none"}' \
+-vnc
+0.0.0.0:0
+,audiodev=audio1 \
+-device virtio-vga,id=video0,max_outputs=1,bus=pci.0,addr=0x2 \
+-device vfio-pci,host=0000:01:00.5,id=hostdev0,bus=pci.0,addr=0x4 \
+-device vfio-pci,host=0000:01:00.6,id=hostdev1,bus=pci.0,addr=0x5 \
+-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x6 \
+-object '{"qom-type":"rng-random","id":"objrng0","filename":"/dev/urandom"}' \
+-device virtio-rng-pci,rng=objrng0,id=rng0,bus=pci.0,addr=0x7 \
+-device vmcoreinfo \
+-sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \
+-msg timestamp=on
+char device redirected to /dev/pts/3 (label charserial0)
+2023-03-23T08:54:44.755039Z qemu-system-x86_64: kvm_set_user_memory_region: KVM_SET_USER_MEMORY_REGION failed, slot=4, start=0xfffffffffe000000, size=0x2000: Invalid argument
+kvm_set_phys_mem: error registering slot: Invalid argument
+2023-03-23 08:54:45.230+0000: shutting down, reason=crashed
+----
+Simon Jones
+Simon Jones <
+batmanustc@gmail.com
+> 于2023年3月23日周四 05:49写道:
+This is happened in ubuntu22.04.
+QEMU is install by apt like this:
+apt install -y qemu qemu-kvm qemu-system
+and QEMU version is 6.2.0
+----
+Simon Jones
+Simon Jones <
+batmanustc@gmail.com
+> 于2023年3月21日周二 08:40写道:
+Hi all,
+I start a VM in openstack, and openstack use libvirt to start qemu VM, but now log show this ERROR.
+Is there any one know this?
+The ERROR log from /var/log/libvirt/qemu/instance-0000000e.log
+```
+2023-03-14T10:09:17.674114Z qemu-system-x86_64: kvm_set_user_memory_region: KVM_SET_USER_MEMORY_REGION failed, slot=4, start=0xfffffffffe000000, size=0x2000: Invalid argument
+kvm_set_phys_mem: error registering slot: Invalid argument
+2023-03-14 10:09:18.198+0000: shutting down, reason=crashed
+```
+The xml file
+```
+root@c1c2:~# cat /etc/libvirt/qemu/instance-0000000e.xml
+<!--
+WARNING: THIS IS AN AUTO-GENERATED FILE. CHANGES TO IT ARE LIKELY TO BE
+OVERWRITTEN AND LOST. Changes to this xml configuration should be made using:
+  virsh edit instance-0000000e
+or other application using the libvirt API.
+-->
+<domain type='kvm'>
+  <name>instance-0000000e</name>
+  <uuid>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</uuid>
+  <metadata>
+    <nova:instance xmlns:nova="
+http://openstack.org/xmlns/libvirt/nova/1.1
+">
+      <nova:package version="25.1.0"/>
+      <nova:name>provider-instance</nova:name>
+      <nova:creationTime>2023-03-14 10:09:13</nova:creationTime>
+      <nova:flavor name="cirros-os-dpu-test-1">
+        <nova:memory>64</nova:memory>
+        <nova:disk>1</nova:disk>
+        <nova:swap>0</nova:swap>
+        <nova:ephemeral>0</nova:ephemeral>
+        <nova:vcpus>1</nova:vcpus>
+      </nova:flavor>
+      <nova:owner>
+        <nova:user uuid="ff627ad39ed94479b9c5033bc462cf78">admin</nova:user>
+        <nova:project uuid="512866f9994f4ad8916d8539a7cdeec9">admin</nova:project>
+      </nova:owner>
+      <nova:root type="image" uuid="9e58cb69-316a-4093-9f23-c1d1bd8edffe"/>
+      <nova:ports>
+        <nova:port uuid="77c1dc00-af39-4463-bea0-12808f4bc340">
+          <nova:ip type="fixed" address="172.1.1.43" ipVersion="4"/>
+        </nova:port>
+      </nova:ports>
+    </nova:instance>
+  </metadata>
+  <memory unit='KiB'>65536</memory>
+  <currentMemory unit='KiB'>65536</currentMemory>
+  <vcpu placement='static'>1</vcpu>
+  <sysinfo type='smbios'>
+    <system>
+      <entry name='manufacturer'>OpenStack Foundation</entry>
+      <entry name='product'>OpenStack Nova</entry>
+      <entry name='version'>25.1.0</entry>
+      <entry name='serial'>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</entry>
+      <entry name='uuid'>ff91d2dc-69a1-43ef-abde-c9e4e9a0305b</entry>
+      <entry name='family'>Virtual Machine</entry>
+    </system>
+  </sysinfo>
+  <os>
+    <type arch='x86_64' machine='pc-i440fx-6.2'>hvm</type>
+    <boot dev='hd'/>
+    <smbios mode='sysinfo'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <vmcoreinfo state='on'/>
+  </features>
+  <cpu mode='host-model' check='partial'>
+    <topology sockets='1' dies='1' cores='1' threads='1'/>
+  </cpu>
+  <clock offset='utc'>
+    <timer name='pit' tickpolicy='delay'/>
+    <timer name='rtc' tickpolicy='catchup'/>
+    <timer name='hpet' present='no'/>
+  </clock>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>destroy</on_crash>
+  <devices>
+    <emulator>/usr/bin/qemu-system-x86_64</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='none'/>
+      <source file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/disk'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+    </disk>
+    <controller type='usb' index='0' model='piix3-uhci'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'/>
+    <interface type='hostdev' managed='yes'>
+      <mac address='fa:16:3e:aa:d9:23'/>
+      <source>
+        <address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x5'/>
+      </source>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+    </interface>
+    <serial type='pty'>
+      <log file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/console.log' append='off'/>
+      <target type='isa-serial' port='0'>
+        <model name='isa-serial'/>
+      </target>
+    </serial>
+    <console type='pty'>
+      <log file='/var/lib/nova/instances/ff91d2dc-69a1-43ef-abde-c9e4e9a0305b/console.log' append='off'/>
+      <target type='serial' port='0'/>
+    </console>
+    <input type='tablet' bus='usb'>
+      <address type='usb' bus='0' port='1'/>
+    </input>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>
+      <listen type='address' address='0.0.0.0'/>
+    </graphics>
+    <audio id='1' type='none'/>
+    <video>
+      <model type='virtio' heads='1' primary='yes'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <hostdev mode='subsystem' type='pci' managed='yes'>
+      <source>
+        <address domain='0x0000' bus='0x01' slot='0x00' function='0x6'/>
+      </source>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+    </hostdev>
+    <memballoon model='virtio'>
+      <stats period='10'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </memballoon>
+    <rng model='virtio'>
+      <backend model='random'>/dev/urandom</backend>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </rng>
+  </devices>
+</domain>
+```
+----
+Simon Jones
+
diff --git a/results/classifier/004/KVM/26430026 b/results/classifier/004/KVM/26430026
new file mode 100644
index 00000000..8db2ac74
--- /dev/null
+++ b/results/classifier/004/KVM/26430026
@@ -0,0 +1,173 @@
+KVM: 0.919
+mistranslation: 0.915
+semantic: 0.904
+device: 0.904
+assembly: 0.896
+vnc: 0.893
+instruction: 0.888
+graphic: 0.862
+boot: 0.841
+socket: 0.817
+other: 0.813
+network: 0.758
+
+[BUG] cxl,i386: e820 mappings may not be correct for cxl
+
+Context included below from prior discussion
+    - `cxl create-region` would fail on inability to allocate memory
+    - traced this down to the memory region being marked RESERVED
+    - E820 map marks the CXL fixed memory window as RESERVED
+
+
+Re: x86 errors, I found that region worked with this patch. (I also
+added the SRAT patches the Davidlohr posted, but I do not think they are
+relevant).
+
+I don't think this is correct, and setting this to E820_RAM causes the
+system to fail to boot at all, but with this change `cxl create-region`
+succeeds, which suggests our e820 mappings in the i386 machine are
+incorrect.
+
+Anyone who can help or have an idea as to what e820 should actually be
+doing with this region, or if this is correct and something else is
+failing, please help!
+
+
+diff --git a/hw/i386/pc.c b/hw/i386/pc.c
+index 566accf7e6..a5e688a742 100644
+--- a/hw/i386/pc.c
++++ b/hw/i386/pc.c
+@@ -1077,7 +1077,7 @@ void pc_memory_init(PCMachineState *pcms,
+                 memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw,
+                                       "cxl-fixed-memory-region", fw->size);
+                 memory_region_add_subregion(system_memory, fw->base, &fw->mr);
+-                e820_add_entry(fw->base, fw->size, E820_RESERVED);
++                e820_add_entry(fw->base, fw->size, E820_NVS);
+                 cxl_fmw_base += fw->size;
+                 cxl_resv_end = cxl_fmw_base;
+             }
+
+
+On Mon, Oct 10, 2022 at 05:32:42PM +0100, Jonathan Cameron wrote:
+>
+>
+> > but i'm not sure of what to do with this info.  We have some proof
+>
+> > that real hardware works with this no problem, and the only difference
+>
+> > is that the EFI/bios/firmware is setting the memory regions as `usable`
+>
+> > or `soft reserved`, which would imply the EDK2 is the blocker here
+>
+> > regardless of the OS driver status.
+>
+> >
+>
+> > But I'd seen elsewhere you had gotten some of this working, and I'm
+>
+> > failing to get anything working at the moment.  If you have any input i
+>
+> > would greatly appreciate the help.
+>
+> >
+>
+> > QEMU config:
+>
+> >
+>
+> > /opt/qemu-cxl2/bin/qemu-system-x86_64 \
+>
+> > -drive
+>
+> > file=/var/lib/libvirt/images/cxl.qcow2,format=qcow2,index=0,media=d\
+>
+> > -m 2G,slots=4,maxmem=4G \
+>
+> > -smp 4 \
+>
+> > -machine type=q35,accel=kvm,cxl=on \
+>
+> > -enable-kvm \
+>
+> > -nographic \
+>
+> > -device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 \
+>
+> > -device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 \
+>
+> > -object memory-backend-file,id=cxl-mem0,mem-path=/tmp/cxl-mem0,size=256M \
+>
+> > -object memory-backend-file,id=lsa0,mem-path=/tmp/cxl-lsa0,size=256M \
+>
+> > -device cxl-type3,bus=rp0,pmem=true,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0
+>
+> > \
+>
+> > -M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.size=256M
+>
+> >
+>
+> > I'd seen on the lists that you had seen issues with single-rp setups,
+>
+> > but no combination of configuration I've tried (including all the ones
+>
+> > in the docs and tests) lead to a successful region creation with
+>
+> > `cxl create-region`
+>
+>
+>
+> Hmm. Let me have a play.  I've not run x86 tests for a while so
+>
+> perhaps something is missing there.
+>
+>
+>
+> I'm carrying a patch to override check_last_peer() in
+>
+> cxl_port_setup_targets() as that is wrong for some combinations,
+>
+> but that doesn't look like it's related to what you are seeing.
+>
+>
+I'm not sure if it's relevant, but turned out I'd forgotten I'm carrying 3
+>
+patches that aren't upstream (and one is a horrible hack).
+>
+>
+Hack:
+https://lore.kernel.org/linux-cxl/20220819094655.000005ed@huawei.com/
+>
+Shouldn't affect a simple case like this...
+>
+>
+https://lore.kernel.org/linux-cxl/20220819093133.00006c22@huawei.com/T/#t
+>
+(Dan's version)
+>
+>
+https://lore.kernel.org/linux-cxl/20220815154044.24733-1-Jonathan.Cameron@huawei.com/T/#t
+>
+>
+For writes to work you will currently need two rps (nothing on the second is
+>
+fine)
+>
+as we still haven't resolved if the kernel should support an HDM decoder on
+>
+a host bridge with one port.  I think it should (Spec allows it), others
+>
+unconvinced.
+>
+>
+Note I haven't shifted over to x86 yet so may still be something different
+>
+from
+>
+arm64.
+>
+>
+Jonathan
+>
+>
+
diff --git a/results/classifier/004/KVM/43643137 b/results/classifier/004/KVM/43643137
new file mode 100644
index 00000000..0ce14b08
--- /dev/null
+++ b/results/classifier/004/KVM/43643137
@@ -0,0 +1,546 @@
+KVM: 0.794
+other: 0.781
+semantic: 0.764
+device: 0.760
+instruction: 0.754
+vnc: 0.742
+assembly: 0.727
+graphic: 0.721
+network: 0.709
+socket: 0.674
+mistranslation: 0.665
+boot: 0.652
+
+[Qemu-devel] [BUG/RFC] INIT IPI lost when VM starts
+
+Hi,
+We encountered a problem that when a domain starts, seabios failed to online a 
+vCPU.
+
+After investigation, we found that the reason is in kvm-kmod, KVM_APIC_INIT bit 
+in
+vcpu->arch.apic->pending_events was overwritten by qemu, and thus an INIT IPI 
+sent
+to AP was lost. Qemu does this since libvirtd sends a ‘query-cpus’ qmp command 
+to qemu
+on VM start.
+
+In qemu, qmp_query_cpus-> cpu_synchronize_state-> kvm_cpu_synchronize_state->
+do_kvm_cpu_synchronize_state, qemu gets registers/vcpu_events from kvm-kmod and
+sets cpu->kvm_vcpu_dirty to true, and vcpu thread in qemu will call
+kvm_arch_put_registers if cpu->kvm_vcpu_dirty is true, thus pending_events is
+overwritten by qemu.
+
+I think there is no need for qemu to set cpu->kvm_vcpu_dirty to true after 
+‘query-cpus’,
+and  kvm-kmod should not clear KVM_APIC_INIT unconditionally. And I am not sure 
+whether
+it is OK for qemu to set cpu->kvm_vcpu_dirty in do_kvm_cpu_synchronize_state in 
+each caller.
+
+What’s your opinion?
+
+Let me clarify it more clearly. Time sequence is that qemu handles ‘query-cpus’ qmp 
+command, vcpu 1 (and vcpu 0) got registers from kvm-kmod (qmp_query_cpus-> 
+cpu_synchronize_state-> kvm_cpu_synchronize_state->
+> do_kvm_cpu_synchronize_state-> kvm_arch_get_registers), then vcpu 0 (BSP) 
+sends INIT-SIPI to vcpu 1(AP). In kvm-kmod, vcpu 1’s pending_events’s KVM_APIC_INIT 
+bit set.
+Then vcpu 1 continue running, vcpu1 thread in qemu calls 
+kvm_arch_put_registers-> kvm_put_vcpu_events, so KVM_APIC_INIT bit in vcpu 1’s 
+pending_events got cleared, i.e., lost.
+
+In kvm-kmod, except for pending_events, sipi_vector may also be overwritten., 
+so I am not sure if there are other fields/registers in danger, i.e., those may 
+be modified asynchronously with vcpu thread itself.
+
+BTW, using a sleep like following can reliably reproduce this problem, if VM 
+equipped with more than 2 vcpus and starting VM using libvirtd.
+
+diff --git a/target/i386/kvm.c b/target/i386/kvm.c
+index 55865db..5099290 100644
+--- a/target/i386/kvm.c
++++ b/target/i386/kvm.c
+@@ -2534,6 +2534,11 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
+             KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
+     }
+
++    if (CPU(cpu)->cpu_index == 1) {
++        fprintf(stderr, "vcpu 1 sleep!!!!\n");
++        sleep(10);
++    }
++
+     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+ }
+
+
+On 2017/3/20 22:21, Herongguang (Stephen) wrote:
+Hi,
+We encountered a problem that when a domain starts, seabios failed to online a 
+vCPU.
+
+After investigation, we found that the reason is in kvm-kmod, KVM_APIC_INIT bit 
+in
+vcpu->arch.apic->pending_events was overwritten by qemu, and thus an INIT IPI 
+sent
+to AP was lost. Qemu does this since libvirtd sends a ‘query-cpus’ qmp command 
+to qemu
+on VM start.
+
+In qemu, qmp_query_cpus-> cpu_synchronize_state-> kvm_cpu_synchronize_state->
+do_kvm_cpu_synchronize_state, qemu gets registers/vcpu_events from kvm-kmod and
+sets cpu->kvm_vcpu_dirty to true, and vcpu thread in qemu will call
+kvm_arch_put_registers if cpu->kvm_vcpu_dirty is true, thus pending_events is
+overwritten by qemu.
+
+I think there is no need for qemu to set cpu->kvm_vcpu_dirty to true after 
+‘query-cpus’,
+and  kvm-kmod should not clear KVM_APIC_INIT unconditionally. And I am not sure 
+whether
+it is OK for qemu to set cpu->kvm_vcpu_dirty in do_kvm_cpu_synchronize_state in 
+each caller.
+
+What’s your opinion?
+
+On 20/03/2017 15:21, Herongguang (Stephen) wrote:
+>
+>
+We encountered a problem that when a domain starts, seabios failed to
+>
+online a vCPU.
+>
+>
+After investigation, we found that the reason is in kvm-kmod,
+>
+KVM_APIC_INIT bit in
+>
+vcpu->arch.apic->pending_events was overwritten by qemu, and thus an
+>
+INIT IPI sent
+>
+to AP was lost. Qemu does this since libvirtd sends a ‘query-cpus’ qmp
+>
+command to qemu
+>
+on VM start.
+>
+>
+In qemu, qmp_query_cpus-> cpu_synchronize_state->
+>
+kvm_cpu_synchronize_state->
+>
+do_kvm_cpu_synchronize_state, qemu gets registers/vcpu_events from
+>
+kvm-kmod and
+>
+sets cpu->kvm_vcpu_dirty to true, and vcpu thread in qemu will call
+>
+kvm_arch_put_registers if cpu->kvm_vcpu_dirty is true, thus
+>
+pending_events is
+>
+overwritten by qemu.
+>
+>
+I think there is no need for qemu to set cpu->kvm_vcpu_dirty to true
+>
+after ‘query-cpus’,
+>
+and  kvm-kmod should not clear KVM_APIC_INIT unconditionally. And I am
+>
+not sure whether
+>
+it is OK for qemu to set cpu->kvm_vcpu_dirty in
+>
+do_kvm_cpu_synchronize_state in each caller.
+>
+>
+What’s your opinion?
+Hi Rongguang,
+
+sorry for the late response.
+
+Where exactly is KVM_APIC_INIT dropped?  kvm_get_mp_state does clear the
+bit, but the result of the INIT is stored in mp_state.
+
+kvm_get_vcpu_events is called after kvm_get_mp_state; it retrieves
+KVM_APIC_INIT in events.smi.latched_init and kvm_set_vcpu_events passes
+it back.  Maybe it should ignore events.smi.latched_init if not in SMM,
+but I would like to understand the exact sequence of events.
+
+Thanks,
+
+paolo
+
+On 2017/4/6 0:16, Paolo Bonzini wrote:
+On 20/03/2017 15:21, Herongguang (Stephen) wrote:
+We encountered a problem that when a domain starts, seabios failed to
+online a vCPU.
+
+After investigation, we found that the reason is in kvm-kmod,
+KVM_APIC_INIT bit in
+vcpu->arch.apic->pending_events was overwritten by qemu, and thus an
+INIT IPI sent
+to AP was lost. Qemu does this since libvirtd sends a ‘query-cpus’ qmp
+command to qemu
+on VM start.
+
+In qemu, qmp_query_cpus-> cpu_synchronize_state->
+kvm_cpu_synchronize_state->
+do_kvm_cpu_synchronize_state, qemu gets registers/vcpu_events from
+kvm-kmod and
+sets cpu->kvm_vcpu_dirty to true, and vcpu thread in qemu will call
+kvm_arch_put_registers if cpu->kvm_vcpu_dirty is true, thus
+pending_events is
+overwritten by qemu.
+
+I think there is no need for qemu to set cpu->kvm_vcpu_dirty to true
+after ‘query-cpus’,
+and  kvm-kmod should not clear KVM_APIC_INIT unconditionally. And I am
+not sure whether
+it is OK for qemu to set cpu->kvm_vcpu_dirty in
+do_kvm_cpu_synchronize_state in each caller.
+
+What’s your opinion?
+Hi Rongguang,
+
+sorry for the late response.
+
+Where exactly is KVM_APIC_INIT dropped?  kvm_get_mp_state does clear the
+bit, but the result of the INIT is stored in mp_state.
+It's dropped in KVM_SET_VCPU_EVENTS, see below.
+kvm_get_vcpu_events is called after kvm_get_mp_state; it retrieves
+KVM_APIC_INIT in events.smi.latched_init and kvm_set_vcpu_events passes
+it back.  Maybe it should ignore events.smi.latched_init if not in SMM,
+but I would like to understand the exact sequence of events.
+time0:
+vcpu1:
+qmp_query_cpus-> cpu_synchronize_state-> kvm_cpu_synchronize_state->
+> do_kvm_cpu_synchronize_state(and set vcpu1's cpu->kvm_vcpu_dirty to true)-> 
+kvm_arch_get_registers(KVM_APIC_INIT bit in vcpu->arch.apic->pending_events was not set)
+
+time1:
+vcpu0:
+send INIT-SIPI to all AP->(in vcpu 0's context)__apic_accept_irq(KVM_APIC_INIT bit 
+in vcpu1's arch.apic->pending_events is set)
+
+time2:
+vcpu1:
+kvm_cpu_exec->(if cpu->kvm_vcpu_dirty is 
+true)kvm_arch_put_registers->kvm_put_vcpu_events(overwritten KVM_APIC_INIT bit in 
+vcpu->arch.apic->pending_events!)
+
+So it's a race between vcpu1 get/put registers with kvm/other vcpus changing 
+vcpu1's status/structure fields in the mean time, I am in worry of if there are 
+other fields may be overwritten,
+sipi_vector is one.
+
+also see:
+https://www.mail-archive.com/address@hidden/msg438675.html
+Thanks,
+
+paolo
+
+.
+
+Hi Paolo,
+
+What's your opinion about this patch? We found it just before finishing patches 
+for the past two days.
+
+
+Thanks,
+-Gonglei
+
+
+>
+-----Original Message-----
+>
+From: address@hidden [
+mailto:address@hidden
+On
+>
+Behalf Of Herongguang (Stephen)
+>
+Sent: Thursday, April 06, 2017 9:47 AM
+>
+To: Paolo Bonzini; address@hidden; address@hidden;
+>
+address@hidden; address@hidden; address@hidden;
+>
+wangxin (U); Huangweidong (C)
+>
+Subject: Re: [BUG/RFC] INIT IPI lost when VM starts
+>
+>
+>
+>
+On 2017/4/6 0:16, Paolo Bonzini wrote:
+>
+>
+>
+> On 20/03/2017 15:21, Herongguang (Stephen) wrote:
+>
+>> We encountered a problem that when a domain starts, seabios failed to
+>
+>> online a vCPU.
+>
+>>
+>
+>> After investigation, we found that the reason is in kvm-kmod,
+>
+>> KVM_APIC_INIT bit in
+>
+>> vcpu->arch.apic->pending_events was overwritten by qemu, and thus an
+>
+>> INIT IPI sent
+>
+>> to AP was lost. Qemu does this since libvirtd sends a ‘query-cpus’ qmp
+>
+>> command to qemu
+>
+>> on VM start.
+>
+>>
+>
+>> In qemu, qmp_query_cpus-> cpu_synchronize_state->
+>
+>> kvm_cpu_synchronize_state->
+>
+>> do_kvm_cpu_synchronize_state, qemu gets registers/vcpu_events from
+>
+>> kvm-kmod and
+>
+>> sets cpu->kvm_vcpu_dirty to true, and vcpu thread in qemu will call
+>
+>> kvm_arch_put_registers if cpu->kvm_vcpu_dirty is true, thus
+>
+>> pending_events is
+>
+>> overwritten by qemu.
+>
+>>
+>
+>> I think there is no need for qemu to set cpu->kvm_vcpu_dirty to true
+>
+>> after ‘query-cpus’,
+>
+>> and  kvm-kmod should not clear KVM_APIC_INIT unconditionally. And I am
+>
+>> not sure whether
+>
+>> it is OK for qemu to set cpu->kvm_vcpu_dirty in
+>
+>> do_kvm_cpu_synchronize_state in each caller.
+>
+>>
+>
+>> What’s your opinion?
+>
+> Hi Rongguang,
+>
+>
+>
+> sorry for the late response.
+>
+>
+>
+> Where exactly is KVM_APIC_INIT dropped?  kvm_get_mp_state does clear
+>
+the
+>
+> bit, but the result of the INIT is stored in mp_state.
+>
+>
+It's dropped in KVM_SET_VCPU_EVENTS, see below.
+>
+>
+>
+>
+> kvm_get_vcpu_events is called after kvm_get_mp_state; it retrieves
+>
+> KVM_APIC_INIT in events.smi.latched_init and kvm_set_vcpu_events passes
+>
+> it back.  Maybe it should ignore events.smi.latched_init if not in SMM,
+>
+> but I would like to understand the exact sequence of events.
+>
+>
+time0:
+>
+vcpu1:
+>
+qmp_query_cpus-> cpu_synchronize_state-> kvm_cpu_synchronize_state->
+>
+> do_kvm_cpu_synchronize_state(and set vcpu1's cpu->kvm_vcpu_dirty to
+>
+true)-> kvm_arch_get_registers(KVM_APIC_INIT bit in
+>
+vcpu->arch.apic->pending_events was not set)
+>
+>
+time1:
+>
+vcpu0:
+>
+send INIT-SIPI to all AP->(in vcpu 0's
+>
+context)__apic_accept_irq(KVM_APIC_INIT bit in vcpu1's
+>
+arch.apic->pending_events is set)
+>
+>
+time2:
+>
+vcpu1:
+>
+kvm_cpu_exec->(if cpu->kvm_vcpu_dirty is
+>
+true)kvm_arch_put_registers->kvm_put_vcpu_events(overwritten
+>
+KVM_APIC_INIT bit in vcpu->arch.apic->pending_events!)
+>
+>
+So it's a race between vcpu1 get/put registers with kvm/other vcpus changing
+>
+vcpu1's status/structure fields in the mean time, I am in worry of if there
+>
+are
+>
+other fields may be overwritten,
+>
+sipi_vector is one.
+>
+>
+also see:
+>
+https://www.mail-archive.com/address@hidden/msg438675.html
+>
+>
+> Thanks,
+>
+>
+>
+> paolo
+>
+>
+>
+> .
+>
+>
+>
+
+2017-11-20 06:57+0000, Gonglei (Arei):
+>
+Hi Paolo,
+>
+>
+What's your opinion about this patch? We found it just before finishing
+>
+patches
+>
+for the past two days.
+I think your case was fixed by f4ef19108608 ("KVM: X86: Fix loss of
+pending INIT due to race"), but that patch didn't fix it perfectly, so
+maybe you're hitting a similar case that happens in SMM ...
+
+>
+> -----Original Message-----
+>
+> From: address@hidden [
+mailto:address@hidden
+On
+>
+> Behalf Of Herongguang (Stephen)
+>
+> On 2017/4/6 0:16, Paolo Bonzini wrote:
+>
+> > Hi Rongguang,
+>
+> >
+>
+> > sorry for the late response.
+>
+> >
+>
+> > Where exactly is KVM_APIC_INIT dropped?  kvm_get_mp_state does clear
+>
+> the
+>
+> > bit, but the result of the INIT is stored in mp_state.
+>
+>
+>
+> It's dropped in KVM_SET_VCPU_EVENTS, see below.
+>
+>
+>
+> >
+>
+> > kvm_get_vcpu_events is called after kvm_get_mp_state; it retrieves
+>
+> > KVM_APIC_INIT in events.smi.latched_init and kvm_set_vcpu_events passes
+>
+> > it back.  Maybe it should ignore events.smi.latched_init if not in SMM,
+>
+> > but I would like to understand the exact sequence of events.
+>
+>
+>
+> time0:
+>
+> vcpu1:
+>
+> qmp_query_cpus-> cpu_synchronize_state-> kvm_cpu_synchronize_state->
+>
+>  > do_kvm_cpu_synchronize_state(and set vcpu1's cpu->kvm_vcpu_dirty to
+>
+> true)-> kvm_arch_get_registers(KVM_APIC_INIT bit in
+>
+> vcpu->arch.apic->pending_events was not set)
+>
+>
+>
+> time1:
+>
+> vcpu0:
+>
+> send INIT-SIPI to all AP->(in vcpu 0's
+>
+> context)__apic_accept_irq(KVM_APIC_INIT bit in vcpu1's
+>
+> arch.apic->pending_events is set)
+>
+>
+>
+> time2:
+>
+> vcpu1:
+>
+> kvm_cpu_exec->(if cpu->kvm_vcpu_dirty is
+>
+> true)kvm_arch_put_registers->kvm_put_vcpu_events(overwritten
+>
+> KVM_APIC_INIT bit in vcpu->arch.apic->pending_events!)
+>
+>
+>
+> So it's a race between vcpu1 get/put registers with kvm/other vcpus changing
+>
+> vcpu1's status/structure fields in the mean time, I am in worry of if there
+>
+> are
+>
+> other fields may be overwritten,
+>
+> sipi_vector is one.
+Fields that can be asynchronously written by other VCPUs (like SIPI,
+NMI) must not be SET if other VCPUs were not paused since the last GET.
+(Looking at the interface, we can currently lose pending SMI.)
+
+INIT is one of the restricted fields, but the API unconditionally
+couples SMM with latched INIT, which means that we can lose an INIT if
+the VCPU is in SMM mode -- do you see SMM in kvm_vcpu_events?
+
+Thanks.
+
diff --git a/results/classifier/004/KVM/71456293 b/results/classifier/004/KVM/71456293
new file mode 100644
index 00000000..4f5d04ac
--- /dev/null
+++ b/results/classifier/004/KVM/71456293
@@ -0,0 +1,1494 @@
+KVM: 0.691
+mistranslation: 0.659
+vnc: 0.625
+instruction: 0.624
+graphic: 0.603
+assembly: 0.602
+device: 0.601
+semantic: 0.600
+other: 0.598
+boot: 0.598
+socket: 0.596
+network: 0.491
+
+[Qemu-devel][bug] qemu crash when migrate vm and vm's disks
+
+When migrate vm and vm’s disks target host qemu crash due to an invalid free.
+#0  object_unref (obj=0x1000) at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/qom/object.c:920
+#1  0x0000560434d79e79 in memory_region_unref (mr=<optimized out>)
+at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:1730
+#2  flatview_destroy (view=0x560439653880) at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:292
+#3  0x000056043514dfbe in call_rcu_thread (opaque=<optimized out>)
+at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/util/rcu.c:284
+#4  0x00007fbc2b36fe25 in start_thread () from /lib64/libpthread.so.0
+#5  0x00007fbc2b099bad in clone () from /lib64/libc.so.6
+test base qemu-2.12.0
+,
+but use lastest qemu(v6.0.0-rc2) also reproduce.
+As follow patch can resolve this problem:
+https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg02272.html
+Steps to reproduce:
+(1) Create VM (virsh define)
+(2) Add 64 virtio scsi disks
+(3) migrate vm and vm’disks
+-------------------------------------------------------------------------------------------------------------------------------------
+本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出
+的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、
+或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本
+邮件!
+This e-mail and its attachments contain confidential information from New H3C, which is
+intended only for the person or entity whose address is listed above. Any use of the
+information contained herein in any way (including, but not limited to, total or partial
+disclosure, reproduction, or dissemination) by persons other than the intended
+recipient(s) is prohibited. If you receive this e-mail in error, please notify the sender
+by phone or email immediately and delete it!
+
+* Yuchen (yu.chen@h3c.com) wrote:
+>
+When migrate vm and vm’s disks target host qemu crash due to an invalid free.
+>
+>
+#0  object_unref (obj=0x1000) at
+>
+/qemu-2.12/rpmbuild/BUILD/qemu-2.12/qom/object.c:920
+>
+#1  0x0000560434d79e79 in memory_region_unref (mr=<optimized out>)
+>
+at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:1730
+>
+#2  flatview_destroy (view=0x560439653880) at
+>
+/qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:292
+>
+#3  0x000056043514dfbe in call_rcu_thread (opaque=<optimized out>)
+>
+at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/util/rcu.c:284
+>
+#4  0x00007fbc2b36fe25 in start_thread () from /lib64/libpthread.so.0
+>
+#5  0x00007fbc2b099bad in clone () from /lib64/libc.so.6
+>
+>
+test base qemu-2.12.0,but use lastest qemu(v6.0.0-rc2) also reproduce.
+Interesting.
+
+>
+As follow patch can resolve this problem:
+>
+https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg02272.html
+That's a pci/rcu change; ccing Paolo and Micahel.
+
+>
+Steps to reproduce:
+>
+(1) Create VM (virsh define)
+>
+(2) Add 64 virtio scsi disks
+Is that hot adding the disks later, or are they included in the VM at
+creation?
+Can you provide a libvirt XML example?
+
+>
+(3) migrate vm and vm’disks
+What do you mean by 'and vm disks' - are you doing a block migration?
+
+Dave
+
+>
+-------------------------------------------------------------------------------------------------------------------------------------
+>
+本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出
+>
+的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、
+>
+或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本
+>
+邮件!
+>
+This e-mail and its attachments contain confidential information from New
+>
+H3C, which is
+>
+intended only for the person or entity whose address is listed above. Any use
+>
+of the
+>
+information contained herein in any way (including, but not limited to, total
+>
+or partial
+>
+disclosure, reproduction, or dissemination) by persons other than the intended
+>
+recipient(s) is prohibited. If you receive this e-mail in error, please
+>
+notify the sender
+>
+by phone or email immediately and delete it!
+-- 
+Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
+
+>
+-----邮件原件-----
+>
+发件人: Dr. David Alan Gilbert [
+mailto:dgilbert@redhat.com
+]
+>
+发送时间: 2021年4月8日 19:27
+>
+收件人: yuchen (Cloud) <yu.chen@h3c.com>; pbonzini@redhat.com;
+>
+mst@redhat.com
+>
+抄送: qemu-devel@nongnu.org
+>
+主题: Re: [Qemu-devel][bug] qemu crash when migrate vm and vm's disks
+>
+>
+* Yuchen (yu.chen@h3c.com) wrote:
+>
+> When migrate vm and vm’s disks target host qemu crash due to an invalid
+>
+free.
+>
+>
+>
+> #0  object_unref (obj=0x1000) at
+>
+> /qemu-2.12/rpmbuild/BUILD/qemu-2.12/qom/object.c:920
+>
+> #1  0x0000560434d79e79 in memory_region_unref (mr=<optimized out>)
+>
+>     at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:1730
+>
+> #2  flatview_destroy (view=0x560439653880) at
+>
+> /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:292
+>
+> #3  0x000056043514dfbe in call_rcu_thread (opaque=<optimized out>)
+>
+>     at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/util/rcu.c:284
+>
+> #4  0x00007fbc2b36fe25 in start_thread () from /lib64/libpthread.so.0
+>
+> #5  0x00007fbc2b099bad in clone () from /lib64/libc.so.6
+>
+>
+>
+> test base qemu-2.12.0,but use lastest qemu(v6.0.0-rc2) also reproduce.
+>
+>
+Interesting.
+>
+>
+> As follow patch can resolve this problem:
+>
+>
+https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg02272.html
+>
+>
+That's a pci/rcu change; ccing Paolo and Micahel.
+>
+>
+> Steps to reproduce:
+>
+> (1) Create VM (virsh define)
+>
+> (2) Add 64 virtio scsi disks
+>
+>
+Is that hot adding the disks later, or are they included in the VM at
+>
+creation?
+>
+Can you provide a libvirt XML example?
+>
+Include disks in the VM at creation
+
+vm disks xml (only virtio scsi disks):
+  <devices>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native'/>
+      <source file='/vms/tempp/vm-os'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data1'/>
+      <target dev='sda' bus='scsi'/>
+      <address type='drive' controller='2' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data2'/>
+      <target dev='sdb' bus='scsi'/>
+      <address type='drive' controller='3' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data3'/>
+      <target dev='sdc' bus='scsi'/>
+      <address type='drive' controller='4' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data4'/>
+      <target dev='sdd' bus='scsi'/>
+      <address type='drive' controller='5' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data5'/>
+      <target dev='sde' bus='scsi'/>
+      <address type='drive' controller='6' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data6'/>
+      <target dev='sdf' bus='scsi'/>
+      <address type='drive' controller='7' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data7'/>
+      <target dev='sdg' bus='scsi'/>
+      <address type='drive' controller='8' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data8'/>
+      <target dev='sdh' bus='scsi'/>
+      <address type='drive' controller='9' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data9'/>
+      <target dev='sdi' bus='scsi'/>
+      <address type='drive' controller='10' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data10'/>
+      <target dev='sdj' bus='scsi'/>
+      <address type='drive' controller='11' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data11'/>
+      <target dev='sdk' bus='scsi'/>
+      <address type='drive' controller='12' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data12'/>
+      <target dev='sdl' bus='scsi'/>
+      <address type='drive' controller='13' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data13'/>
+      <target dev='sdm' bus='scsi'/>
+      <address type='drive' controller='14' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data14'/>
+      <target dev='sdn' bus='scsi'/>
+      <address type='drive' controller='15' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data15'/>
+      <target dev='sdo' bus='scsi'/>
+      <address type='drive' controller='16' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data16'/>
+      <target dev='sdp' bus='scsi'/>
+      <address type='drive' controller='17' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data17'/>
+      <target dev='sdq' bus='scsi'/>
+      <address type='drive' controller='18' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data18'/>
+      <target dev='sdr' bus='scsi'/>
+      <address type='drive' controller='19' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data19'/>
+      <target dev='sds' bus='scsi'/>
+      <address type='drive' controller='20' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data20'/>
+      <target dev='sdt' bus='scsi'/>
+      <address type='drive' controller='21' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data21'/>
+      <target dev='sdu' bus='scsi'/>
+      <address type='drive' controller='22' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data22'/>
+      <target dev='sdv' bus='scsi'/>
+      <address type='drive' controller='23' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data23'/>
+      <target dev='sdw' bus='scsi'/>
+      <address type='drive' controller='24' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data24'/>
+      <target dev='sdx' bus='scsi'/>
+      <address type='drive' controller='25' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data25'/>
+      <target dev='sdy' bus='scsi'/>
+      <address type='drive' controller='26' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data26'/>
+      <target dev='sdz' bus='scsi'/>
+      <address type='drive' controller='27' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data27'/>
+      <target dev='sdaa' bus='scsi'/>
+      <address type='drive' controller='28' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data28'/>
+      <target dev='sdab' bus='scsi'/>
+      <address type='drive' controller='29' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data29'/>
+      <target dev='sdac' bus='scsi'/>
+      <address type='drive' controller='30' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data30'/>
+      <target dev='sdad' bus='scsi'/>
+      <address type='drive' controller='31' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data31'/>
+      <target dev='sdae' bus='scsi'/>
+      <address type='drive' controller='32' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data32'/>
+      <target dev='sdaf' bus='scsi'/>
+      <address type='drive' controller='33' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data33'/>
+      <target dev='sdag' bus='scsi'/>
+      <address type='drive' controller='34' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data34'/>
+      <target dev='sdah' bus='scsi'/>
+      <address type='drive' controller='35' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data35'/>
+      <target dev='sdai' bus='scsi'/>
+      <address type='drive' controller='36' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data36'/>
+      <target dev='sdaj' bus='scsi'/>
+      <address type='drive' controller='37' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data37'/>
+      <target dev='sdak' bus='scsi'/>
+      <address type='drive' controller='38' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data38'/>
+      <target dev='sdal' bus='scsi'/>
+      <address type='drive' controller='39' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data39'/>
+      <target dev='sdam' bus='scsi'/>
+      <address type='drive' controller='40' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data40'/>
+      <target dev='sdan' bus='scsi'/>
+      <address type='drive' controller='41' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data41'/>
+      <target dev='sdao' bus='scsi'/>
+      <address type='drive' controller='42' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data42'/>
+      <target dev='sdap' bus='scsi'/>
+      <address type='drive' controller='43' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data43'/>
+      <target dev='sdaq' bus='scsi'/>
+      <address type='drive' controller='44' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data44'/>
+      <target dev='sdar' bus='scsi'/>
+      <address type='drive' controller='45' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data45'/>
+      <target dev='sdas' bus='scsi'/>
+      <address type='drive' controller='46' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data46'/>
+      <target dev='sdat' bus='scsi'/>
+      <address type='drive' controller='47' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data47'/>
+      <target dev='sdau' bus='scsi'/>
+      <address type='drive' controller='48' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data48'/>
+      <target dev='sdav' bus='scsi'/>
+      <address type='drive' controller='49' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data49'/>
+      <target dev='sdaw' bus='scsi'/>
+      <address type='drive' controller='50' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data50'/>
+      <target dev='sdax' bus='scsi'/>
+      <address type='drive' controller='51' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data51'/>
+      <target dev='sday' bus='scsi'/>
+      <address type='drive' controller='52' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data52'/>
+      <target dev='sdaz' bus='scsi'/>
+      <address type='drive' controller='53' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data53'/>
+      <target dev='sdba' bus='scsi'/>
+      <address type='drive' controller='54' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data54'/>
+      <target dev='sdbb' bus='scsi'/>
+      <address type='drive' controller='55' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data55'/>
+      <target dev='sdbc' bus='scsi'/>
+      <address type='drive' controller='56' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data56'/>
+      <target dev='sdbd' bus='scsi'/>
+      <address type='drive' controller='57' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data57'/>
+      <target dev='sdbe' bus='scsi'/>
+      <address type='drive' controller='58' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data58'/>
+      <target dev='sdbf' bus='scsi'/>
+      <address type='drive' controller='59' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data59'/>
+      <target dev='sdbg' bus='scsi'/>
+      <address type='drive' controller='60' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data60'/>
+      <target dev='sdbh' bus='scsi'/>
+      <address type='drive' controller='61' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data61'/>
+      <target dev='sdbi' bus='scsi'/>
+      <address type='drive' controller='62' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data62'/>
+      <target dev='sdbj' bus='scsi'/>
+      <address type='drive' controller='63' bus='0' target='0' unit='0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data63'/>
+      <target dev='sdbk' bus='scsi'/>
+      <address type='drive' controller='64' bus='0' target='0' unit='0'/>
+    </disk>
+    <controller type='scsi' index='0'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x02' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='1' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='2' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x01' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='3' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x03' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='4' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x04' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='5' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x05' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='6' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x06' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='7' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x07' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='8' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x08' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='9' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x09' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='10' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0a' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='11' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0b' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='12' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0c' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='13' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0d' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='14' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0e' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='15' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0f' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='16' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x10' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='17' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x11' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='18' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x12' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='19' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x13' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='20' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x14' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='21' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x15' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='22' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x16' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='23' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x17' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='24' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x18' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='25' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x19' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='26' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1a' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='27' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1b' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='28' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1c' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='29' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1d' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='30' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1e' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='31' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x01' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='32' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x02' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='33' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x03' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='34' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x04' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='35' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x05' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='36' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x06' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='37' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x07' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='38' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x08' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='39' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x09' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='40' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x0a' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='41' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x0b' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='42' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x0c' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='43' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x0d' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='44' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='45' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='46' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='47' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0c' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='48' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0d' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='49' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0e' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='50' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0f' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='51' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x10' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='52' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x11' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='53' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x12' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='54' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x13' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='55' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x14' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='56' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x15' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='57' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x16' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='58' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x17' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='59' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x18' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='60' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x19' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='61' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1a' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='62' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1b' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='63' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1c' 
+function='0x0'/>
+    </controller>
+    <controller type='scsi' index='64' model='virtio-scsi'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1d' 
+function='0x0'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'/>
+    <controller type='pci' index='1' model='pci-bridge'>
+      <model name='pci-bridge'/>
+      <target chassisNr='1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1f' 
+function='0x0'/>
+    </controller>
+    <controller type='pci' index='2' model='pci-bridge'>
+      <model name='pci-bridge'/>
+      <target chassisNr='2'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1f' 
+function='0x0'/>
+    </controller>
+  </devices>
+
+vm disks xml (only virtio disks):
+  <devices>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native'/>
+      <source file='/vms/tempp/vm-os'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data2'/>
+      <target dev='vdb' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data3'/>
+      <target dev='vdc' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data4'/>
+      <target dev='vdd' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data5'/>
+      <target dev='vde' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0c' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data6'/>
+      <target dev='vdf' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0d' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data7'/>
+      <target dev='vdg' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0e' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data8'/>
+      <target dev='vdh' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0f' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data9'/>
+      <target dev='vdi' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x10' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data10'/>
+      <target dev='vdj' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x11' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data11'/>
+      <target dev='vdk' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x12' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data12'/>
+      <target dev='vdl' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x13' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data13'/>
+      <target dev='vdm' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x14' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data14'/>
+      <target dev='vdn' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x15' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data15'/>
+      <target dev='vdo' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x16' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data16'/>
+      <target dev='vdp' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x17' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data17'/>
+      <target dev='vdq' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x18' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data18'/>
+      <target dev='vdr' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x19' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data19'/>
+      <target dev='vds' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1a' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data20'/>
+      <target dev='vdt' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1b' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data21'/>
+      <target dev='vdu' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1c' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data22'/>
+      <target dev='vdv' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1d' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data23'/>
+      <target dev='vdw' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1e' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data24'/>
+      <target dev='vdx' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x01' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data25'/>
+      <target dev='vdy' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x03' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data26'/>
+      <target dev='vdz' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x04' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data27'/>
+      <target dev='vdaa' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x05' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data28'/>
+      <target dev='vdab' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x06' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data29'/>
+      <target dev='vdac' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x07' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data30'/>
+      <target dev='vdad' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x08' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data31'/>
+      <target dev='vdae' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x09' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data32'/>
+      <target dev='vdaf' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0a' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data33'/>
+      <target dev='vdag' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0b' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data34'/>
+      <target dev='vdah' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0c' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data35'/>
+      <target dev='vdai' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0d' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data36'/>
+      <target dev='vdaj' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0e' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data37'/>
+      <target dev='vdak' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x0f' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data38'/>
+      <target dev='vdal' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x10' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data39'/>
+      <target dev='vdam' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x11' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data40'/>
+      <target dev='vdan' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x12' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data41'/>
+      <target dev='vdao' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x13' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data42'/>
+      <target dev='vdap' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x14' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data43'/>
+      <target dev='vdaq' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x15' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data44'/>
+      <target dev='vdar' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x16' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data45'/>
+      <target dev='vdas' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x17' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data46'/>
+      <target dev='vdat' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x18' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data47'/>
+      <target dev='vdau' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x19' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data48'/>
+      <target dev='vdav' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1a' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data49'/>
+      <target dev='vdaw' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1b' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data50'/>
+      <target dev='vdax' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1c' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data51'/>
+      <target dev='vday' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1d' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data52'/>
+      <target dev='vdaz' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1e' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data53'/>
+      <target dev='vdba' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x01' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data54'/>
+      <target dev='vdbb' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x02' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data55'/>
+      <target dev='vdbc' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x03' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data56'/>
+      <target dev='vdbd' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x04' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data57'/>
+      <target dev='vdbe' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x05' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data58'/>
+      <target dev='vdbf' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x06' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data59'/>
+      <target dev='vdbg' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x07' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data60'/>
+      <target dev='vdbh' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x08' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data61'/>
+      <target dev='vdbi' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x09' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data62'/>
+      <target dev='vdbj' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x0a' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data63'/>
+      <target dev='vdbk' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x02' slot='0x0b' 
+function='0x0'/>
+    </disk>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='directsync' io='native' 
+discard='unmap'/>
+      <source file='/vms/tempp/vm-data1'/>
+      <target dev='vdbl' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' 
+function='0x0'/>
+    </disk>
+    <controller type='pci' index='0' model='pci-root'/>
+    <controller type='pci' index='1' model='pci-bridge'>
+      <model name='pci-bridge'/>
+      <target chassisNr='1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x1f' 
+function='0x0'/>
+    </controller>
+    <controller type='pci' index='2' model='pci-bridge'>
+      <model name='pci-bridge'/>
+      <target chassisNr='2'/>
+      <address type='pci' domain='0x0000' bus='0x01' slot='0x1f' 
+function='0x0'/>
+    </controller>
+  </devices>
+
+>
+> (3) migrate vm and vm’disks
+>
+>
+What do you mean by 'and vm disks' - are you doing a block migration?
+>
+Yes, block migration.
+In fact, only migration domain also reproduced.
+
+>
+Dave
+>
+>
+> ----------------------------------------------------------------------
+>
+> ---------------------------------------------------------------
+>
+Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
+-------------------------------------------------------------------------------------------------------------------------------------
+本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出
+的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、
+或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本
+邮件!
+This e-mail and its attachments contain confidential information from New H3C, 
+which is
+intended only for the person or entity whose address is listed above. Any use 
+of the
+information contained herein in any way (including, but not limited to, total 
+or partial
+disclosure, reproduction, or dissemination) by persons other than the intended
+recipient(s) is prohibited. If you receive this e-mail in error, please notify 
+the sender
+by phone or email immediately and delete it!
+
diff --git a/results/classifier/004/KVM/80615920 b/results/classifier/004/KVM/80615920
new file mode 100644
index 00000000..b06a8931
--- /dev/null
+++ b/results/classifier/004/KVM/80615920
@@ -0,0 +1,356 @@
+KVM: 0.803
+mistranslation: 0.800
+other: 0.786
+vnc: 0.768
+instruction: 0.751
+boot: 0.750
+device: 0.748
+assembly: 0.747
+semantic: 0.737
+network: 0.732
+socket: 0.732
+graphic: 0.730
+
+[BUG] accel/tcg: cpu_exec_longjmp_cleanup: assertion failed: (cpu == current_cpu)
+
+It seems there is a bug in SIGALRM handling when 486 system emulates x86_64 
+code.
+
+This code: 
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+
+pthread_t thread1, thread2;
+
+// Signal handler for SIGALRM
+void alarm_handler(int sig) {
+    // Do nothing, just wake up the other thread
+}
+
+// Thread 1 function
+void* thread1_func(void* arg) {
+    // Set up the signal handler for SIGALRM
+    signal(SIGALRM, alarm_handler);
+
+    // Wait for 5 seconds
+    sleep(1);
+
+    // Send SIGALRM signal to thread 2
+    pthread_kill(thread2, SIGALRM);
+
+    return NULL;
+}
+
+// Thread 2 function
+void* thread2_func(void* arg) {
+    // Wait for the SIGALRM signal
+    pause();
+
+    printf("Thread 2 woke up!\n");
+
+    return NULL;
+}
+
+int main() {
+    // Create thread 1
+    if (pthread_create(&thread1, NULL, thread1_func, NULL) != 0) {
+        fprintf(stderr, "Failed to create thread 1\n");
+        return 1;
+    }
+
+    // Create thread 2
+    if (pthread_create(&thread2, NULL, thread2_func, NULL) != 0) {
+        fprintf(stderr, "Failed to create thread 2\n");
+        return 1;
+    }
+
+    // Wait for both threads to finish
+    pthread_join(thread1, NULL);
+    pthread_join(thread2, NULL);
+
+    return 0;
+}
+
+
+Fails with this -strace log (there are also unsupported syscalls 334 and 435, 
+but it seems it doesn't affect the code much):
+
+...
+736 rt_sigaction(SIGALRM,0x000000001123ec20,0x000000001123ecc0) = 0
+736 clock_nanosleep(CLOCK_REALTIME,0,{tv_sec = 1,tv_nsec = 0},{tv_sec = 
+1,tv_nsec = 0})
+736 rt_sigprocmask(SIG_BLOCK,0x00000000109fad20,0x0000000010800b38,8) = 0
+736 Unknown syscall 435
+736 
+clone(CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|CLONE_SYSVSEM|CLONE_SETTLS|CLONE_PARENT_SETTID|
+ ...
+736 rt_sigprocmask(SIG_SETMASK,0x0000000010800b38,NULL,8)
+736 set_robust_list(0x11a419a0,0) = -1 errno=38 (Function not implemented)
+736 rt_sigprocmask(SIG_SETMASK,0x0000000011a41fb0,NULL,8) = 0
+ = 0
+736 pause(0,0,2,277186368,0,295966400)
+736 
+futex(0x000000001123f990,FUTEX_CLOCK_REALTIME|FUTEX_WAIT_BITSET,738,NULL,NULL,0)
+ = 0
+736 rt_sigprocmask(SIG_BLOCK,0x00000000109fad20,0x000000001123ee88,8) = 0
+736 getpid() = 736
+736 tgkill(736,739,SIGALRM) = 0
+ = -1 errno=4 (Interrupted system call)
+--- SIGALRM {si_signo=SIGALRM, si_code=SI_TKILL, si_pid=736, si_uid=0} ---
+0x48874a != 0x3c69e10
+736 rt_sigprocmask(SIG_SETMASK,0x000000001123ee88,NULL,8) = 0
+**
+ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: 
+(cpu == current_cpu)
+Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion 
+failed: (cpu == current_cpu)
+0x48874a != 0x3c69e10
+**
+ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: 
+(cpu == current_cpu)
+Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion 
+failed: (cpu == current_cpu)
+# 
+
+The code fails either with or without -singlestep, the command line:
+
+/usr/bin/qemu-x86_64 -L /opt/x86_64 -strace -singlestep  /opt/x86_64/alarm.bin
+
+Source code of QEMU 8.1.1 was modified with patch "[PATCH] qemu/timer: Don't 
+use RDTSC on i486" [1], 
+with added few ioctls (not relevant) and cpu_exec_longjmp_cleanup() now prints 
+current pointers of 
+cpu and current_cpu (line "0x48874a != 0x3c69e10").
+
+config.log (built as a part of buildroot, basically the minimal possible 
+configuration for running x86_64 on 486):
+
+# Configured with: 
+'/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/build/qemu-8.1.1/configure'
+ '--prefix=/usr' 
+'--cross-prefix=/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/host/bin/i486-buildroot-linux-gnu-'
+ '--audio-drv-list=' 
+'--python=/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/host/bin/python3'
+ 
+'--ninja=/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/host/bin/ninja' 
+'--disable-alsa' '--disable-bpf' '--disable-brlapi' '--disable-bsd-user' 
+'--disable-cap-ng' '--disable-capstone' '--disable-containers' 
+'--disable-coreaudio' '--disable-curl' '--disable-curses' 
+'--disable-dbus-display' '--disable-docs' '--disable-dsound' '--disable-hvf' 
+'--disable-jack' '--disable-libiscsi' '--disable-linux-aio' 
+'--disable-linux-io-uring' '--disable-malloc-trim' '--disable-membarrier' 
+'--disable-mpath' '--disable-netmap' '--disable-opengl' '--disable-oss' 
+'--disable-pa' '--disable-rbd' '--disable-sanitizers' '--disable-selinux' 
+'--disable-sparse' '--disable-strip' '--disable-vde' '--disable-vhost-crypto' 
+'--disable-vhost-user-blk-server' '--disable-virtfs' '--disable-whpx' 
+'--disable-xen' '--disable-attr' '--disable-kvm' '--disable-vhost-net' 
+'--disable-download' '--disable-hexagon-idef-parser' '--disable-system' 
+'--enable-linux-user' '--target-list=x86_64-linux-user' '--disable-vhost-user' 
+'--disable-slirp' '--disable-sdl' '--disable-fdt' '--enable-trace-backends=nop' 
+'--disable-tools' '--disable-guest-agent' '--disable-fuse' 
+'--disable-fuse-lseek' '--disable-seccomp' '--disable-libssh' 
+'--disable-libusb' '--disable-vnc' '--disable-nettle' '--disable-numa' 
+'--disable-pipewire' '--disable-spice' '--disable-usb-redir' 
+'--disable-install-blobs'
+
+Emulation of the same x86_64 code with qemu 6.2.0 installed on another x86_64 
+native machine works fine.
+
+[1]
+https://lists.nongnu.org/archive/html/qemu-devel/2023-11/msg05387.html
+Best regards,
+Petr
+
+On Sat, 25 Nov 2023 at 13:09, Petr Cvek <petrcvekcz@gmail.com> wrote:
+>
+>
+It seems there is a bug in SIGALRM handling when 486 system emulates x86_64
+>
+code.
+486 host is pretty well out of support currently. Can you reproduce
+this on a less ancient host CPU type ?
+
+>
+ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed:
+>
+(cpu == current_cpu)
+>
+Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup:
+>
+assertion failed: (cpu == current_cpu)
+>
+0x48874a != 0x3c69e10
+>
+**
+>
+ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed:
+>
+(cpu == current_cpu)
+>
+Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup:
+>
+assertion failed: (cpu == current_cpu)
+What compiler version do you build QEMU with? That
+assert is there because we have seen some buggy compilers
+in the past which don't correctly preserve the variable
+value as the setjmp/longjmp spec requires them to.
+
+thanks
+-- PMM
+
+Dne 27. 11. 23 v 10:37 Peter Maydell napsal(a):
+>
+On Sat, 25 Nov 2023 at 13:09, Petr Cvek <petrcvekcz@gmail.com> wrote:
+>
+>
+>
+> It seems there is a bug in SIGALRM handling when 486 system emulates x86_64
+>
+> code.
+>
+>
+486 host is pretty well out of support currently. Can you reproduce
+>
+this on a less ancient host CPU type ?
+>
+It seems it only fails when the code is compiled for i486. QEMU built with the 
+same compiler with -march=i586 and above runs on the same physical hardware 
+without a problem. All -march= variants were executed on ryzen 3600.
+
+>
+> ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion
+>
+> failed: (cpu == current_cpu)
+>
+> Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup:
+>
+> assertion failed: (cpu == current_cpu)
+>
+> 0x48874a != 0x3c69e10
+>
+> **
+>
+> ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion
+>
+> failed: (cpu == current_cpu)
+>
+> Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup:
+>
+> assertion failed: (cpu == current_cpu)
+>
+>
+What compiler version do you build QEMU with? That
+>
+assert is there because we have seen some buggy compilers
+>
+in the past which don't correctly preserve the variable
+>
+value as the setjmp/longjmp spec requires them to.
+>
+i486 and i586+ code variants were compiled with GCC 13.2.0 (more exactly, 
+slackware64 current multilib distribution).
+
+i486 binary which runs on the real 486 is also GCC 13.2.0 and installed as a 
+part of the buildroot crosscompiler (about two week old git snapshot).
+
+>
+thanks
+>
+-- PMM
+best regards,
+Petr
+
+On 11/25/23 07:08, Petr Cvek wrote:
+ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: 
+(cpu == current_cpu)
+Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion 
+failed: (cpu == current_cpu)
+#
+
+The code fails either with or without -singlestep, the command line:
+
+/usr/bin/qemu-x86_64 -L /opt/x86_64 -strace -singlestep  /opt/x86_64/alarm.bin
+
+Source code of QEMU 8.1.1 was modified with patch "[PATCH] qemu/timer: Don't use 
+RDTSC on i486" [1],
+with added few ioctls (not relevant) and cpu_exec_longjmp_cleanup() now prints 
+current pointers of
+cpu and current_cpu (line "0x48874a != 0x3c69e10").
+If you try this again with 8.2-rc2, you should not see an assertion failure.
+You should see instead
+
+QEMU internal SIGILL {code=ILLOPC, addr=0x12345678}
+which I think more accurately summarizes the situation of attempting RDTSC on hardware
+that does not support it.
+r~
+
+Dne 29. 11. 23 v 15:25 Richard Henderson napsal(a):
+>
+On 11/25/23 07:08, Petr Cvek wrote:
+>
+> ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion
+>
+> failed: (cpu == current_cpu)
+>
+> Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup:
+>
+> assertion failed: (cpu == current_cpu)
+>
+> #
+>
+>
+>
+> The code fails either with or without -singlestep, the command line:
+>
+>
+>
+> /usr/bin/qemu-x86_64 -L /opt/x86_64 -strace -singlestepÂ
+>
+> /opt/x86_64/alarm.bin
+>
+>
+>
+> Source code of QEMU 8.1.1 was modified with patch "[PATCH] qemu/timer: Don't
+>
+> use RDTSC on i486" [1],
+>
+> with added few ioctls (not relevant) and cpu_exec_longjmp_cleanup() now
+>
+> prints current pointers of
+>
+> cpu and current_cpu (line "0x48874a != 0x3c69e10").
+>
+>
+>
+If you try this again with 8.2-rc2, you should not see an assertion failure.
+>
+You should see instead
+>
+>
+QEMU internal SIGILL {code=ILLOPC, addr=0x12345678}
+>
+>
+which I think more accurately summarizes the situation of attempting RDTSC on
+>
+hardware that does not support it.
+>
+>
+Compilation of vanilla qemu v8.2.0-rc2 with -march=i486 by GCC 13.2.0 and 
+running the resulting binary on ryzen still leads to:
+
+**
+ERROR:../accel/tcg/cpu-exec.c:533:cpu_exec_longjmp_cleanup: assertion failed: 
+(cpu == current_cpu)
+Bail out! ERROR:../accel/tcg/cpu-exec.c:533:cpu_exec_longjmp_cleanup: assertion 
+failed: (cpu == current_cpu)
+Aborted
+
+>
+>
+r~
+Petr
+