summary refs log tree commit diff stats
path: root/results/classifier/108/semantic
diff options
context:
space:
mode:
Diffstat (limited to 'results/classifier/108/semantic')
-rw-r--r--results/classifier/108/semantic/103826
-rw-r--r--results/classifier/108/semantic/109478690
-rw-r--r--results/classifier/108/semantic/115243
-rw-r--r--results/classifier/108/semantic/121224
-rw-r--r--results/classifier/108/semantic/12416
-rw-r--r--results/classifier/108/semantic/1288385105
-rw-r--r--results/classifier/108/semantic/1347555128
-rw-r--r--results/classifier/108/semantic/137028
-rw-r--r--results/classifier/108/semantic/137134
-rw-r--r--results/classifier/108/semantic/137235
-rw-r--r--results/classifier/108/semantic/137335
-rw-r--r--results/classifier/108/semantic/137437
-rw-r--r--results/classifier/108/semantic/137534
-rw-r--r--results/classifier/108/semantic/1405136
-rw-r--r--results/classifier/108/semantic/141720
-rw-r--r--results/classifier/108/semantic/149771152
-rw-r--r--results/classifier/108/semantic/158622945
-rw-r--r--results/classifier/108/semantic/169618070
-rw-r--r--results/classifier/108/semantic/1743191492
-rw-r--r--results/classifier/108/semantic/180954692
-rw-r--r--results/classifier/108/semantic/1829964103
-rw-r--r--results/classifier/108/semantic/18563351087
-rw-r--r--results/classifier/108/semantic/186525251
-rw-r--r--results/classifier/108/semantic/1898215100
-rw-r--r--results/classifier/108/semantic/190556282
-rw-r--r--results/classifier/108/semantic/190597970
-rw-r--r--results/classifier/108/semantic/1907969166
-rw-r--r--results/classifier/108/semantic/1922391142
-rw-r--r--results/classifier/108/semantic/22716
-rw-r--r--results/classifier/108/semantic/237164110
-rw-r--r--results/classifier/108/semantic/258238
-rw-r--r--results/classifier/108/semantic/71458
-rw-r--r--results/classifier/108/semantic/75463585
33 files changed, 3650 insertions, 0 deletions
diff --git a/results/classifier/108/semantic/1038 b/results/classifier/108/semantic/1038
new file mode 100644
index 000000000..bc4d29ac4
--- /dev/null
+++ b/results/classifier/108/semantic/1038
@@ -0,0 +1,26 @@
+semantic: 0.944
+KVM: 0.827
+device: 0.819
+performance: 0.799
+graphic: 0.780
+PID: 0.753
+other: 0.709
+vnc: 0.617
+network: 0.606
+files: 0.526
+socket: 0.430
+boot: 0.429
+permissions: 0.407
+debug: 0.389
+
+ppc 'max' CPU model is unlike the other targets 'max' CPU model
+Description of problem:
+On most targets the 'max' CPU model is either equivalent to 'host' (for KVM) or equivalent to all available CPU features (for TCG).
+
+On PPC64, however, this is not the case. Instead the 'max' model is an alias of the old '7400_v2.9' and simply doesn't work.
+
+This is confusing. At the very least the 'max' model alias should be deleted. Ideally a proper replacement would be introduced that matches semantics on other arches.
+Steps to reproduce:
+1. qemu-system-ppc64 -cpu max
+
+should be equiv to '-cpu host' or should expose all TCG features.
diff --git a/results/classifier/108/semantic/1094786 b/results/classifier/108/semantic/1094786
new file mode 100644
index 000000000..519006409
--- /dev/null
+++ b/results/classifier/108/semantic/1094786
@@ -0,0 +1,90 @@
+semantic: 0.924
+debug: 0.918
+graphic: 0.915
+permissions: 0.905
+performance: 0.898
+device: 0.897
+other: 0.883
+PID: 0.878
+vnc: 0.878
+socket: 0.860
+network: 0.847
+KVM: 0.830
+boot: 0.830
+files: 0.804
+
+static build with curses fails if requires -ltinfo
+
+On my system (amd64 Debian wheezy/sid) static ncurses build requires -ltinfo:
+$ pkg-config --libs --static ncurses
+-lncurses -ltinfo
+
+$ ../../configure --enable-curses --static
+# Actually this fails on line
+    if compile_prog "" "$curses_lib" ; then
+# with
+ERROR
+ERROR: User requested feature curses
+ERROR: configure was not able to find it
+ERROR
+# but if we add -ltinfo to this line check succeds
+...
+static build      yes
+...
+
+$ make
+...
+...
+  CC    i386-softmmu/hw/i386/../kvm/pci-assign.o
+  LINK  i386-softmmu/qemu-system-i386
+../os-posix.o: In function `change_process_uid':
+/home/vadim/soft/qemu/os-posix.c:205: warning: Using 'initgroups' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking # and many alike warnings
+...
+../ui/curses.o: In function `curses_cursor_position':
+/home/vadim/soft/qemu/ui/curses.c:137: undefined reference to `COLS'
+/home/vadim/soft/qemu/ui/curses.c:137: undefined reference to `LINES'
+/home/vadim/soft/qemu/ui/curses.c:138: undefined reference to `stdscr'
+/home/vadim/soft/qemu/ui/curses.c:139: undefined reference to `curs_set'
+../ui/curses.o: In function `curses_calc_pad':
+/home/vadim/soft/qemu/ui/curses.c:68: undefined reference to `stdscr'
+/home/vadim/soft/qemu/ui/curses.c:69: undefined reference to `stdscr'
+... and so on
+
+I tried to build the very minimal static qemu executable. Actual configure line I tried first was 
+../../configure --target-list=i386-softmmu --disable-sdl --disable-virtfs --disable-vnc --disable-xen --disable-brlapi --disable-bluez --disable-slirp --disable-kvm --disable-user --disable-vde --disable-vhost-net --disable-spice --disable-libiscsi --disable-smartcard --disable-usb-redir --disable-guest-agent  --audio-drv-list=  --audio-card-list= --enable-curses --static
+
+and the errors was the same.
+
+I can reproduce this issue.
+
+I tried
+
+./configure --static --target-list="x86_64-softmmu" --enable-curse
+
+I get
+
+ERROR
+ERROR: User requested feature curses
+ERROR: configure was not able to find it
+ERROR
+
+Please try qemu.git/master.
+
+If the error still occurs, please attach config.log.
+
+The problem may have to do with the way ./configure compile_prog and pkg_config interact with the --static option.  The --static option is supposed to set up LDFLAGS -static and pkg-config --static.
+
+The curses probing code tries building -lncurses, -lcurses, and finally pkg-config ncurses.  Try the following change:
+curses_list="$($pkg_config --libs ncurses 2>/dev/null):-lncurses:-lcurses"
+
+That will probe pkg-config ncurses first.
+
+I ran into the same issue on FreeBSD, and just posted my patch to the qemu-devel list.  It's the same solution stefanha describes above.
+
+(On FreeBSD we have an additional issue; we don't ship the .pc file with the ncurses port right now.  I just hacked one together to include -ltinfo in Libs.private.)
+
+
+Patch had been included here:
+http://git.qemu.org/?p=qemu.git;a=commitdiff;h=cfeda5f4b8710b6ba14
+So I think we can now mark this ticket as "Fix released".
+
diff --git a/results/classifier/108/semantic/1152 b/results/classifier/108/semantic/1152
new file mode 100644
index 000000000..e34177eb6
--- /dev/null
+++ b/results/classifier/108/semantic/1152
@@ -0,0 +1,43 @@
+semantic: 0.913
+other: 0.854
+KVM: 0.763
+graphic: 0.740
+debug: 0.727
+device: 0.624
+performance: 0.622
+boot: 0.565
+PID: 0.536
+permissions: 0.444
+vnc: 0.421
+socket: 0.384
+files: 0.327
+network: 0.323
+
+Windows crashes on resuming from sleep if hv-tlbflush is enabled
+Description of problem:
+The above steps cause my Windows VM to BSOD immediately upon waking up (even before restarting the display driver in my case).
+Steps to reproduce:
+1. Boot Windows
+2. Tell Windows to go to sleep (observe that qemu's state switches to suspended)
+3. Cause windows to wake up (e.g. using the `system_wakeup` HMP command)
+Additional information:
+Looking at the crash dumps always shows the "ATTEMPTED WRITE TO READONLY MEMORY" error, and always with this stack trace:
+
+```
+nt!KeBugCheckEx
+nt!MiRaisedIrqlFault+0x1413a6
+nt!MmAccessFault+0x4ef
+nt!KiPageFault+0x35e
+nt!MiIncreaseUsedPtesCount+0x12
+nt!MiBuildForkPte+0xc6
+nt!MiCloneVads+0x4ab
+nt!MiCloneProcessAddressSpace+0x261
+nt!MmInitializeProcessAddressSpace+0x1cb631
+nt!PspAllocateProcess+0x1d13
+nt!PspCreateProcess+0x242
+nt!NtCreateProcessEx+0x85
+nt!KiSystemServiceCopyEnd+0x25
+ntdll!NtCreateProcessEx+0x14
+```
+
+However, the process that is being created here is always `WerFault.exe`, i.e. the crash reporter. The crashing process is seemingly random. Removing `hv-tlbflush` from the command line resolves the problem. Hence, my hypothesis is that due to improper TLB flushing during wakeup, a random application on the core will crash, which spawns `WerFault.exe` which then immediately crashes again inside the kernel (also because of bad/stale TLB contents) and causes the BSOD. Perhaps one core wakes up first, requests a TLB flush, which is then *not* propagated to sleeping cores due to hv-tlbflush. Then one of those cores wakes up without the TLB flush?
diff --git a/results/classifier/108/semantic/1212 b/results/classifier/108/semantic/1212
new file mode 100644
index 000000000..e3e92b3d1
--- /dev/null
+++ b/results/classifier/108/semantic/1212
@@ -0,0 +1,24 @@
+semantic: 0.916
+device: 0.885
+PID: 0.881
+graphic: 0.865
+debug: 0.838
+network: 0.785
+vnc: 0.763
+socket: 0.711
+files: 0.687
+performance: 0.667
+boot: 0.532
+permissions: 0.432
+other: 0.075
+KVM: 0.018
+
+A NULL pointer dereference issue in elf2dmp
+Description of problem:
+SIGSEGV in get_pml4e for it didn't handle NULL result properly.
+Steps to reproduce:
+1.launch qemu and running "gab attach -p $QEMU_PID", run "gcore" inside gdb to generate coredump
+2../elf2dmp ./core.111 ./out.dmp 
+3.get segemantation fault
+Additional information:
+![1](/uploads/39da5ed2da15b105664ee7ee05f69078/1.png)
diff --git a/results/classifier/108/semantic/124 b/results/classifier/108/semantic/124
new file mode 100644
index 000000000..0a56c5e6f
--- /dev/null
+++ b/results/classifier/108/semantic/124
@@ -0,0 +1,16 @@
+semantic: 0.952
+device: 0.844
+performance: 0.726
+debug: 0.657
+network: 0.595
+graphic: 0.461
+boot: 0.240
+vnc: 0.123
+other: 0.085
+permissions: 0.063
+socket: 0.059
+KVM: 0.053
+PID: 0.041
+files: 0.006
+
+SIGSEGV when reading ARM GIC registers through GDB stub
diff --git a/results/classifier/108/semantic/1288385 b/results/classifier/108/semantic/1288385
new file mode 100644
index 000000000..1ff17a7e7
--- /dev/null
+++ b/results/classifier/108/semantic/1288385
@@ -0,0 +1,105 @@
+semantic: 0.923
+other: 0.920
+graphic: 0.920
+permissions: 0.916
+vnc: 0.911
+debug: 0.908
+performance: 0.905
+KVM: 0.900
+boot: 0.849
+device: 0.849
+PID: 0.840
+network: 0.787
+files: 0.780
+socket: 0.734
+
+VFIO passthrough causes assertation failure
+
+Since commit 5e95494380ec I am no longer able to passthrough my Nvidia GTX 770 using VFIO. Qemu terminates with:
+
+qemu-system-x86_64: hw/pci/pcie.c:240: pcie_cap_slot_hotplug_common: Assertion `((pci_dev->devfn) & 0x07) == 0' failed.
+
+Above output was generated using commit f55ea6297cc0.
+
+
+Lspci of the vga card:
+
+01:00.0 VGA compatible controller: NVIDIA Corporation GK104 [GeForce GTX 770] (rev a1)
+	Subsystem: Gigabyte Technology Co., Ltd Device 360c
+	Kernel driver in use: vfio-pci
+01:00.1 Audio device: NVIDIA Corporation GK104 HDMI Audio Controller (rev a1)
+	Subsystem: Gigabyte Technology Co., Ltd Device 360c
+	Kernel driver in use: vfio-pci
+
+
+Commandline used to start qemu:
+
+qemu-system-x86_64 -machine accel=kvm \
+        -nodefaults \
+        -name VFIO-Test \
+        -machine q35 \
+        -cpu host \
+        -smp 1 \
+        -enable-kvm \
+        -m 1024 \
+        -k de \
+        -vga none \
+        -device ioh3420,bus=pcie.0,addr=1c.0,multifunction=on,port=1,chassis=1,id=root.1 \
+        -device vfio-pci,host=01:00.0,bus=root.1,addr=00.0,multifunction=on,x-vga=on \
+        -device vfio-pci,host=01:00.1,bus=root.1,addr=00.1 \
+        -rtc base=utc \
+        -boot order=d \
+        -device ide-cd,drive=drive-cd-disk1,id=cd-disk1,unit=0 \
+        -drive file=/home/bluebird/Downloads/systemrescuecd-x86-4.0.0.iso,if=none,id=drive-cd-disk1,media=cdrom \
+        -nographic
+
+
+Full output of git bisect:
+
+5e95494380ecf83c97d28f72134ab45e0cace8f9 is the first bad commit
+commit 5e95494380ecf83c97d28f72134ab45e0cace8f9
+Author: Igor Mammedov <email address hidden>
+Date:   Wed Feb 5 16:36:52 2014 +0100
+
+    hw/pci: switch to a generic hotplug handling for PCIDevice
+    
+    make qdev_unplug()/device_set_realized() to call hotplug handler's
+    plug/unplug methods if available and remove not needed anymore
+    hot(un)plug handling from PCIDevice.
+    
+    In case if hotplug handler is not available, revert to the legacy
+    hotplug method for compatibility with not yet converted buses.
+    
+    Signed-off-by: Igor Mammedov <email address hidden>
+    Reviewed-by: Michael S. Tsirkin <email address hidden>
+    Signed-off-by: Michael S. Tsirkin <email address hidden>
+
+:040000 040000 9bdab0d75fbc9be4fe2e4274e58e0cdcd347ac7e d6d6294ea9c06e80a0fc8fcabd6345dfae5137ad M	hw
+:040000 040000 d064d75ca8b8f169c41eee2683082e8f9104e968 f2abbf9bee754ada0f49135968455fd1a69b2186 M	include
+:040000 040000 c515daff6c77f9bd2cc32873be4c5c3a1c20cbb9 c506f5587afe8f7ee129a7ca6e3ae2e5118254f9 M	tests
+
+commit 6e1f0a55a14bad1d0c8b9d29626ef4e4b2617c74
+Author: Igor Mammedov <email address hidden>
+Date:   Mon Feb 17 15:00:06 2014 +0100
+
+    PCIE: fix regression with coldplugged multifunction device
+    
+    PCIE is causing asserts each time a multifunction device is added
+    on command line (coldplug).
+    
+    This is caused by
+    commit a66e657e18cd9b70e9f57ae5512c07faf2bc508f
+        pci/pcie: convert PCIE hotplug to use hotplug-handler API
+    QEMU abort is caused by misplaced assertion, which should
+    be checked only when device is hotplugged.
+    
+    Reference to regression report:
+     http://<email address hidden>/msg216226.html
+    
+    Fixes: a66e657e18cd9b70e9f57ae5512c07faf2bc508f
+    
+    Reported-By: Nigel Kukard <email address hidden>
+    Signed-off-by: Igor Mammedov <email address hidden>
+    Reviewed-by: Michael S. Tsirkin <email address hidden>
+    Signed-off-by: Michael S. Tsirkin <email address hidden>
+
diff --git a/results/classifier/108/semantic/1347555 b/results/classifier/108/semantic/1347555
new file mode 100644
index 000000000..5703519ca
--- /dev/null
+++ b/results/classifier/108/semantic/1347555
@@ -0,0 +1,128 @@
+semantic: 0.931
+other: 0.915
+debug: 0.915
+permissions: 0.915
+device: 0.912
+performance: 0.901
+graphic: 0.881
+PID: 0.871
+files: 0.866
+socket: 0.864
+network: 0.849
+KVM: 0.840
+vnc: 0.833
+boot: 0.817
+
+qemu build failure, hxtool is a bash script, not a /bin/sh script
+
+hxtool (part of the early build process) is a bash script.  Running it with /bin/sh yields a syntax error on line 10:
+
+ 10             STEXI*|ETEXI*|SQMP*|EQMP*) flag=$(($flag^1))
+
+$(( expr )) is a bash extension, not part of /bin/sh.
+
+Note that replacing the sh in the first line in hxtool with /bin/bash does not help, because the script is run manually from the Makefile with sh:
+
+154         $(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@,"  GEN   $@")
+
+The fix is to change those lines to
+
+154         $(call quiet-command,bash $(SRC_PATH)/scripts/hxtool -h < $< > $@,"  GEN   $@")
+
+(there are five or so).
+
+On 07/23/2014 04:21 AM, Felix von Leitner wrote:
+> Public bug reported:
+> 
+> hxtool (part of the early build process) is a bash script.  Running it
+> with /bin/sh yields a syntax error on line 10:
+> 
+>  10             STEXI*|ETEXI*|SQMP*|EQMP*) flag=$(($flag^1))
+> 
+> $(( expr )) is a bash extension, not part of /bin/sh.
+
+Wrong.  $(( expr )) is mandated by POSIX.  What system are you on where
+/bin/sh is not POSIX?  (Solaris is the only platform where /bin/sh does
+not try to be POSIX-compliant, but who uses that for qemu?)
+
+What is the actual syntax error you are seeing?  Is this a bug in dash
+on your distribution?  I can't get dash to fail for me on Fedora:
+
+$ dash -c 'f=1; f=$(($f^1)); echo $f'
+0
+$ dash -n scripts/hxtool; echo $?
+0
+
+-- 
+Eric Blake   eblake redhat com    +1-919-301-3266
+Libvirt virtualization library http://libvirt.org
+
+
+
+I actually have bash installed as /bin/sh and /bin/bash.
+But I also have heirloom sh installed, which installs itself as /sbin/sh, and that happened to be first in my $PATH.
+
+Since the makefiles use "sh script" to run the scripts, that called the heirloom sh.
+
+http://heirloom.sourceforge.net/sh.html
+
+It is, it turns out, derived from OpenSolaris.  So there you go :-)
+
+When I delete /sbin/sh, qemu builds.
+
+On 07/23/2014 10:13 AM, Felix von Leitner wrote:
+> I actually have bash installed as /bin/sh and /bin/bash.
+> But I also have heirloom sh installed, which installs itself as /sbin/sh, and that happened to be first in my $PATH.
+> 
+> Since the makefiles use "sh script" to run the scripts, that called the
+> heirloom sh.
+> 
+> http://heirloom.sourceforge.net/sh.html
+> 
+> It is, it turns out, derived from OpenSolaris.  So there you go :-)
+> 
+> When I delete /sbin/sh, qemu builds.
+
+Then the bug is not in qemu, but in your environment.  Installing
+known-broken heirloom where it can be found first on a PATH search for
+sh is just asking for problems, not just with qemu, but with all SORTS
+of programs that expect POSIX semantics from a Linux /bin/sh.
+
+Rather than change the Makefile to invoke the script with bash, we could
+instead bend over backwards to rewrite the script in a way that works
+with non-POSIX shells (as in, flag=`expr $flag ^ 1`), but that feels
+backwards to me.  Until someone is actively worried about porting qemu
+to a true Solaris environment, rather than just an heirloom-as-/bin/sh
+Linux environment, I don't think it's worth the effort.
+
+-- 
+Eric Blake   eblake redhat com    +1-919-301-3266
+Libvirt virtualization library http://libvirt.org
+
+
+
+On 23 July 2014 17:31, Eric Blake <email address hidden> wrote:
+> Rather than change the Makefile to invoke the script with bash, we could
+> instead bend over backwards to rewrite the script in a way that works
+> with non-POSIX shells (as in, flag=`expr $flag ^ 1`), but that feels
+> backwards to me.  Until someone is actively worried about porting qemu
+> to a true Solaris environment, rather than just an heirloom-as-/bin/sh
+> Linux environment, I don't think it's worth the effort.
+
+My view on this has always been "we shouldn't assume bash,
+but we can assume POSIX shell semantics". (And also that
+we should assume /bin/sh is a POSIX shell, because it's the
+21st century, and Solaris should just get with it :-))
+
+thanks
+-- PMM
+
+
+It turns out that expr does not support ^ (at least according to the man page). :-)
+
+Still, you could do expr -$flag + 1 to do the same thing.
+
+Is the ruckus just about this one place where $(( )) is used or are there other non-Bourne-shell constructs?
+
+Closing this ticket, as it was rather a problem with the non-posix-compliant shell and not the QEMU build system.
+
diff --git a/results/classifier/108/semantic/1370 b/results/classifier/108/semantic/1370
new file mode 100644
index 000000000..57e5794d5
--- /dev/null
+++ b/results/classifier/108/semantic/1370
@@ -0,0 +1,28 @@
+semantic: 0.986
+graphic: 0.878
+device: 0.806
+socket: 0.681
+vnc: 0.658
+network: 0.606
+permissions: 0.580
+boot: 0.546
+performance: 0.523
+debug: 0.478
+other: 0.474
+files: 0.411
+PID: 0.269
+KVM: 0.210
+
+x86 BLSI and BLSR semantic bug
+Description of problem:
+The result of instruction BLSI and BLSR is different from the CPU. The value of CF is different.
+Steps to reproduce:
+1. Compile this code
+```
+void main() {
+    asm("blsi rax, rbx");
+}
+```
+2. Execute and compare the result with the CPU. The value of `CF` is exactly the opposite. This problem happens with BLSR, too.
+Additional information:
+This bug is discovered by research conducted by KAIST SoftSec.
diff --git a/results/classifier/108/semantic/1371 b/results/classifier/108/semantic/1371
new file mode 100644
index 000000000..ab73169e2
--- /dev/null
+++ b/results/classifier/108/semantic/1371
@@ -0,0 +1,34 @@
+semantic: 0.995
+graphic: 0.824
+device: 0.665
+boot: 0.468
+vnc: 0.465
+socket: 0.452
+permissions: 0.325
+network: 0.307
+debug: 0.306
+other: 0.217
+files: 0.203
+performance: 0.197
+PID: 0.103
+KVM: 0.064
+
+x86 BLSMSK semantic bug
+Description of problem:
+The result of instruction BLSMSK is different with from the CPU. The value of CF is different.
+Steps to reproduce:
+1. Compile this code
+```
+void main() {
+    asm("mov rax, 0x65b2e276ad27c67");
+    asm("mov rbx, 0x62f34955226b2b5d");
+    asm("blsmsk eax, ebx");
+}
+```
+2. Execute and compare the result with the CPU.
+    - CPU
+        - CF = 0
+    - QEMU
+        - CF = 1
+Additional information:
+This bug is discovered by research conducted by KAIST SoftSec.
diff --git a/results/classifier/108/semantic/1372 b/results/classifier/108/semantic/1372
new file mode 100644
index 000000000..38dc06e68
--- /dev/null
+++ b/results/classifier/108/semantic/1372
@@ -0,0 +1,35 @@
+semantic: 0.995
+graphic: 0.860
+device: 0.736
+vnc: 0.420
+boot: 0.411
+debug: 0.352
+permissions: 0.340
+socket: 0.263
+network: 0.171
+other: 0.159
+PID: 0.153
+performance: 0.140
+files: 0.125
+KVM: 0.083
+
+x86 BEXTR semantic bug
+Description of problem:
+The result of instruction BEXTR is different with from the CPU. The value of destination register is different. I think QEMU does not consider the operand size limit.
+Steps to reproduce:
+1. Compile this code
+```
+void main() {
+    asm("mov rax, 0x17b3693f77fb6e9");
+    asm("mov rbx, 0x8f635a775ad3b9b4");
+    asm("mov rcx, 0xb717b75da9983018");
+    asm("bextr eax, ebx, ecx");
+}
+```
+2. Execute and compare the result with the CPU.
+    - CPU
+        - RAX = 0x5a
+    - QEMU
+        - RAX = 0x635a775a
+Additional information:
+This bug is discovered by research conducted by KAIST SoftSec.
diff --git a/results/classifier/108/semantic/1373 b/results/classifier/108/semantic/1373
new file mode 100644
index 000000000..0f5a7af92
--- /dev/null
+++ b/results/classifier/108/semantic/1373
@@ -0,0 +1,35 @@
+semantic: 0.996
+graphic: 0.898
+device: 0.784
+vnc: 0.746
+boot: 0.505
+socket: 0.502
+debug: 0.488
+permissions: 0.472
+performance: 0.412
+network: 0.373
+files: 0.298
+other: 0.248
+PID: 0.169
+KVM: 0.113
+
+x86 ADOX and ADCX semantic bug
+Description of problem:
+The result of instruction ADOX and ADCX are different from the CPU. The value of one of EFLAGS is different.
+Steps to reproduce:
+1. Compile this code
+```
+void main() {
+    asm("push 512; popfq;");
+    asm("mov rax, 0xffffffff84fdbf24");
+    asm("mov rbx, 0xb197d26043bec15d");
+    asm("adox eax, ebx");
+}
+```
+2. Execute and compare the result with the CPU. This problem happens with ADCX, too (with CF).
+    - CPU
+        - OF = 0
+    - QEMU
+        - OF = 1
+Additional information:
+This bug is discovered by research conducted by KAIST SoftSec.
diff --git a/results/classifier/108/semantic/1374 b/results/classifier/108/semantic/1374
new file mode 100644
index 000000000..d2f8966c0
--- /dev/null
+++ b/results/classifier/108/semantic/1374
@@ -0,0 +1,37 @@
+semantic: 0.993
+graphic: 0.807
+device: 0.700
+boot: 0.382
+vnc: 0.341
+debug: 0.324
+socket: 0.314
+network: 0.272
+permissions: 0.246
+performance: 0.192
+PID: 0.155
+other: 0.093
+files: 0.078
+KVM: 0.044
+
+x86 BZHI semantic bug
+Description of problem:
+The result of instruction BZHI is different from the CPU. The value of destination register and SF of EFLAGS are different.
+Steps to reproduce:
+1. Compile this code
+```
+void main() {
+    asm("mov rax, 0xb1aa9da2fe33fe3");
+    asm("mov rbx, 0x80000000ffffffff");
+    asm("mov rcx, 0xf3fce8829b99a5c6");
+    asm("bzhi rax, rbx, rcx");
+}
+```
+2. Execute and compare the result with the CPU.
+    - CPU
+        - RAX = 0x0x80000000ffffffff
+        - SF = 1
+    - QEMU
+        - RAX = 0xffffffff
+        - SF = 0
+Additional information:
+This bug is discovered by research conducted by KAIST SoftSec.
diff --git a/results/classifier/108/semantic/1375 b/results/classifier/108/semantic/1375
new file mode 100644
index 000000000..af9ba57bc
--- /dev/null
+++ b/results/classifier/108/semantic/1375
@@ -0,0 +1,34 @@
+semantic: 0.988
+graphic: 0.792
+device: 0.739
+other: 0.687
+performance: 0.586
+vnc: 0.520
+permissions: 0.509
+debug: 0.468
+network: 0.458
+boot: 0.413
+socket: 0.407
+PID: 0.343
+KVM: 0.247
+files: 0.241
+
+x86 SSE/SSE2/SSE3 instruction semantic bugs with NaN
+Description of problem:
+The result of SSE/SSE2/SSE3 instructions with NaN is different from the CPU. From Intel manual Volume 1 Appendix D.4.2.2, they defined the behavior of such instructions with NaN. But I think QEMU did not implement this semantic exactly because the byte result is different.
+Steps to reproduce:
+1. Compile this code
+```
+void main() {
+    asm("mov rax, 0x000000007fffffff; push rax; mov rax, 0x00000000ffffffff; push rax; movdqu XMM1, [rsp];");
+    asm("mov rax, 0x2e711de7aa46af1a; push rax; mov rax, 0x7fffffff7fffffff; push rax; movdqu XMM2, [rsp];");
+    asm("addsubps xmm1, xmm2");
+}
+```
+2. Execute and compare the result with the CPU. This problem happens with other SSE/SSE2/SSE3 instructions specified in the manual, Volume 1 Appendix D.4.2.2.
+    - CPU
+        - xmm1[3] = 0xffffffff
+    - QEMU
+        - xmm1[3] = 0x7fffffff
+Additional information:
+This bug is discovered by research conducted by KAIST SoftSec.
diff --git a/results/classifier/108/semantic/1405 b/results/classifier/108/semantic/1405
new file mode 100644
index 000000000..cd4d96c0a
--- /dev/null
+++ b/results/classifier/108/semantic/1405
@@ -0,0 +1,136 @@
+semantic: 0.917
+device: 0.914
+PID: 0.910
+other: 0.905
+graphic: 0.903
+debug: 0.899
+permissions: 0.884
+socket: 0.878
+performance: 0.869
+vnc: 0.868
+files: 0.863
+boot: 0.847
+network: 0.821
+KVM: 0.792
+
+linux-user: calling SYS_get_thread_area and SYS_get_thread_area has incorrent result on multithread environment
+Description of problem:
+
+Steps to reproduce:
+1. Compile test.out by Command and source code: 
+```
+gcc -m32 -g test.c -lpthread -o test.out
+```
+```
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <asm/ldt.h>
+
+static inline int set_thread_area( struct user_desc *ptr )
+{
+    return syscall( SYS_set_thread_area, ptr );
+}
+
+static inline int get_thread_area( struct user_desc *ptr )
+{
+    return syscall( SYS_get_thread_area, ptr );
+}
+
+static unsigned int entry_number;
+
+static void* start_routine(void* ptr) 
+{
+    struct user_desc user_desc0 = { entry_number };
+    struct user_desc user_desc1 = { entry_number };
+    struct user_desc user_desc2 = { entry_number };
+    get_thread_area(&user_desc0);
+    printf("child thread: %u\n", user_desc0.base_addr);
+
+    user_desc1.base_addr = 2;
+    user_desc1.limit     = 0xFFF;
+    user_desc1.seg_32bit = 1;
+    set_thread_area( &user_desc1 );
+
+    get_thread_area(&user_desc2);
+    printf("child thread: %u\n", user_desc2.base_addr);
+    return NULL;
+}
+
+int main(void) {
+    struct user_desc user_desc0 = { -1 }, user_desc1 = { 0 }, user_desc2 = { 0 };
+    user_desc0.seg_32bit = 1;
+    user_desc0.useable = 1;
+    set_thread_area( &user_desc0 );
+
+    entry_number = user_desc0.entry_number;
+
+    user_desc1.entry_number = entry_number;
+    user_desc1.base_addr = 1;
+    user_desc1.limit     = 0xFFF;
+    user_desc1.seg_32bit = 1;
+    set_thread_area( &user_desc1 );
+
+    pthread_t thread_id;
+    pthread_create(&thread_id, NULL, &start_routine, NULL);
+    pthread_join(thread_id, NULL);
+
+    user_desc2.entry_number = entry_number;
+    get_thread_area(&user_desc2);
+    printf("main  thread: %u\n", user_desc2.base_addr); // main  thread: 1
+    return 0;
+}
+ ```
+2. Correct Result:
+```
+child thread: 1
+child thread: 2
+main  thread: 1
+```
+qemu-i386 Print Result:
+```
+child thread: 1
+child thread: 2
+main  thread: 2
+```
+Additional information:
+patch for fix the bug: 
+
+https://lists.nongnu.org/archive/html/qemu-devel/2023-02/msg02203.html
+
+CPUX86State::gdt::base on differect threads must have different vaules, but it points to same memory.
+value of CPUX86State::gdt::base must be copied when clone thread.
+
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/tls.c
+
+SYS_set_thread_area call do_set_thread_area in kernel, it set user_desc to different memroy area on differernt threads. tls_array is in thread local memory.
+
+```
+static void set_tls_desc(struct task_struct *p, int idx,
+			 const struct user_desc *info, int n)
+{
+	struct thread_struct *t = &p->thread;
+	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
+	int cpu;
+
+	/*
+	 * We must not get preempted while modifying the TLS.
+	 */
+	cpu = get_cpu();
+
+	while (n-- > 0) {
+		if (LDT_empty(info) || LDT_zero(info))
+			memset(desc, 0, sizeof(*desc));
+		else
+			fill_ldt(desc, info);
+		++info;
+		++desc;
+	}
+
+	if (t == &current->thread)
+		load_TLS(t, cpu);
+
+	put_cpu();
+}
+```
diff --git a/results/classifier/108/semantic/1417 b/results/classifier/108/semantic/1417
new file mode 100644
index 000000000..dc6844841
--- /dev/null
+++ b/results/classifier/108/semantic/1417
@@ -0,0 +1,20 @@
+semantic: 0.933
+device: 0.903
+graphic: 0.852
+socket: 0.768
+network: 0.737
+PID: 0.704
+vnc: 0.699
+performance: 0.649
+debug: 0.634
+other: 0.584
+files: 0.444
+permissions: 0.390
+boot: 0.375
+KVM: 0.053
+
+QEMU fails an assertion when hitting a breakpoint that is set on a tlb-missed 2-stage translated AArch64 memory
+Description of problem:
+After upgrading to QEMU v7.2.0 from v7.1.0, when hitting an instruction breakpoint on a memory address that is translated by 2 stages of translation, and is not already cached in the TLB, QEMU fails the assertion at target/arm/ptw.c:301 (`assert(fi->type != ARMFault_None);`).
+
+I believe this was introduced in f3639a64f602ea5c1436eb9c9b89f42028e3a4a8 (@rth7680), since in that commit the failure check for the return value of `get_phys_addr_lpae()` changed from checking for true (meaning failure) to checking for false (which actually means success).
diff --git a/results/classifier/108/semantic/1497711 b/results/classifier/108/semantic/1497711
new file mode 100644
index 000000000..01d60d887
--- /dev/null
+++ b/results/classifier/108/semantic/1497711
@@ -0,0 +1,52 @@
+semantic: 0.958
+graphic: 0.803
+device: 0.783
+socket: 0.754
+vnc: 0.750
+network: 0.718
+other: 0.664
+files: 0.657
+performance: 0.644
+PID: 0.598
+permissions: 0.533
+debug: 0.530
+boot: 0.466
+KVM: 0.334
+
+tests/libqos/ahci.c:745: redundant condition ?
+
+[qemu/tests/libqos/ahci.c:745]: (style) Redundant condition: props.ncq. '!props.ncq || (props.ncq && props.lba48)' is equivalent to '!props.ncq || props.lba48'
+
+    g_assert(!props->ncq || (props->ncq && props->lba48));
+
+On Sun, Sep 20, 2015 at 10:08:49AM -0000, dcb wrote:
+> Public bug reported:
+> 
+> [qemu/tests/libqos/ahci.c:745]: (style) Redundant condition: props.ncq.
+> '!props.ncq || (props.ncq && props.lba48)' is equivalent to '!props.ncq
+> || props.lba48'
+> 
+>     g_assert(!props->ncq || (props->ncq && props->lba48));
+
+CCing John Snow, AHCI maintainer
+
+
+Fixed in:
+
+commit 3d937150dce20cb95cbaae99b6fd48dca4261f32
+Author: John Snow <email address hidden>
+Date:   Mon Oct 5 12:00:55 2015 -0400
+
+    qtest/ahci: fix redundant assertion
+    
+    Fixes https://bugs.launchpad.net/qemu/+bug/1497711
+    
+    (!ncq || (ncq && lba48)) is the same as
+    (!ncq || lba48).
+    
+    The intention is simply: "If a command is NCQ,
+    it must also be LBA48."
+    
+    Signed-off-by: John Snow <email address hidden>
+    Message-id: <email address hidden>
+
diff --git a/results/classifier/108/semantic/1586229 b/results/classifier/108/semantic/1586229
new file mode 100644
index 000000000..a46910f77
--- /dev/null
+++ b/results/classifier/108/semantic/1586229
@@ -0,0 +1,45 @@
+semantic: 0.911
+graphic: 0.895
+other: 0.890
+PID: 0.848
+device: 0.822
+performance: 0.812
+debug: 0.791
+permissions: 0.785
+boot: 0.747
+files: 0.644
+socket: 0.619
+network: 0.540
+vnc: 0.361
+KVM: 0.322
+
+seabios hell
+
+getting weird annoying seabios hell and not sure how to fix it.
+
+ok.
+
+there IS a SEA-BIOS. There IS a way in.
+
+-I found it by mistake.(and yall need to move the BIOS key...its in the wrong place)
+
+I was tryng to boot Yosemite to re-install. I mashed the key too early and it wanted to boot the hard drive.
+
+Apparently the bios loads AFTER the hard drive wants to boot, not BEFORE it.And it will ONLY load when booting a hard disk.
+
+..Booting hard disk...[mash F8 here but let go and wait]
+eventually will want to load the OS and clear the screen[mash F8 again]
+
+--Youre in!
+
+Its tiny, like a mini award bios but youre in! 
+-Change anything HERE, though...and kiss booting a cd goodbye!
+
+Im trying to diagnose a black screen, seems related to seabios, not the vga driver.
+
+-mayhaps wants to boot hard disk but in fact its not bootable as the installer hung(and often unices install bootloader late in process)?
+
+I cant boot the disc to reinstall to tell. But I have a few dos iso lying around...hmmm.
+
+Sounds like your describing problems with SeaBIOS, not with QEMU. May I suggest to report this issues to the SeaBIOS project instead? See http://seabios.org/
+
diff --git a/results/classifier/108/semantic/1696180 b/results/classifier/108/semantic/1696180
new file mode 100644
index 000000000..9b8ff7c02
--- /dev/null
+++ b/results/classifier/108/semantic/1696180
@@ -0,0 +1,70 @@
+semantic: 0.912
+other: 0.910
+files: 0.909
+debug: 0.901
+KVM: 0.884
+graphic: 0.876
+performance: 0.871
+PID: 0.862
+device: 0.852
+permissions: 0.846
+vnc: 0.842
+boot: 0.815
+socket: 0.805
+network: 0.775
+
+Issues with qemu-img, libgfapi, and encryption at rest
+
+Hi,
+
+Encryption-at-rest has been supported for some time now.  The client is responsible for encrypting the files with a help of a master key file.  I have a properly setup environment and everything appears to be working fine but when I use qemu-img to move a file to gluster I get the following:
+
+
+# qemu-img convert -f raw -O raw linux.iso gluster://gluster01/virt0/linux.raw
+[2017-06-06 16:52:25.489720] E [mem-pool.c:579:mem_put] (-->/lib64/libglusterfs.so.0(syncop_lookup+0x4e5) [0x7f30f7a36d35] -->/lib64/libglusterfs.so.0(+0x59f02) [0x7f30f7a32f02] -->/lib64/libglusterfs.so.0(mem_put+0x190) [0x7f30f7a24a60] ) 0-mem-pool: mem-pool ptr is NULL
+[2017-06-06 16:52:25.490778] E [mem-pool.c:579:mem_put] (-->/lib64/libglusterfs.so.0(syncop_lookup+0x4e5) [0x7f30f7a36d35] -->/lib64/libglusterfs.so.0(+0x59f02) [0x7f30f7a32f02] -->/lib64/libglusterfs.so.0(mem_put+0x190) [0x7f30f7a24a60] ) 0-mem-pool: mem-pool ptr is NULL
+[2017-06-06 16:52:25.492263] E [mem-pool.c:579:mem_put] (-->/lib64/libglusterfs.so.0(syncop_lookup+0x4e5) [0x7f30f7a36d35] -->/lib64/libglusterfs.so.0(+0x59f02) [0x7f30f7a32f02] -->/lib64/libglusterfs.so.0(mem_put+0x190) [0x7f30f7a24a60] ) 0-mem-pool: mem-pool ptr is NULL
+[2017-06-06 16:52:25.497226] E [mem-pool.c:579:mem_put] (-->/lib64/libglusterfs.so.0(syncop_create+0x44d) [0x7f30f7a3cf5d] -->/lib64/libglusterfs.so.0(+0x59f02) [0x7f30f7a32f02] -->/lib64/libglusterfs.so.0(mem_put+0x190) [0x7f30f7a24a60] ) 0-mem-pool: mem-pool ptr is NULL
+
+On and on until I get this message:
+
+[2017-06-06 17:00:03.467361] E [MSGID: 108006] [afr-common.c:4409:afr_notify] 0-virt0-replicate-0: All subvolumes are down. Going offline until atleast one of them comes back up.
+[2017-06-06 17:00:03.467442] E [MSGID: 108006] [afr-common.c:4409:afr_notify] 0-virt0-replicate-1: All subvolumes are down. Going offline until atleast one of them comes back up.
+
+I asked for help assuming it's a problem with glusterfs and was told it appears qemu-img's implementation of libgfapi doesn't call the xlator function correctly.
+
+I'm using Fedora 24 with version:
+
+qemu-img 2.6.2
+glusterfs-api-3.8.12
+
+When reporting bugs to the QEMU project, please always try with the latest release first (distros are often not shipping the latest version). So can you please try with the latest release of QEMU (currently version 2.9.0)?
+
+Just upgraded to 2.9.0 and actually I see a different issue:
+
+# qemu-img convert -O raw fedora.iso gluster://dalpinfglt04/virt0/fedora6.raw
+[2017-06-07 16:52:43.300902] C [rpc-clnt-ping.c:160:rpc_clnt_ping_timer_expired] 0-virt0-client-2: server 172.19.38.42:49152 has not responded in the last 42 seconds, disconnecting.
+[2017-06-07 17:02:44.342745] E [rpc-clnt.c:365:saved_frames_unwind] (--> /lib64/libglusterfs.so.0(_gf_log_callingfn+0x17d)[0x7f78c3e4fe6d] (--> /lib64/libgfrpc.so.0(saved_frames_unwind+0x1d1)[0x7f78c3c169a1] (--> /lib64/libgfrpc.so.0(saved_frames_destroy+0xe)[0x7f78c3c16abe] (--> /lib64/libgfrpc.so.0(rpc_clnt_connection_cleanup+0x87)[0x7f78c3c18157] (--> /lib64/libgfrpc.so.0(rpc_clnt_notify+0x288)[0x7f78c3c18c28] ))))) 0-virt0-client-2: forced unwinding frame type(GlusterFS 3.3) op(WRITE(13)) called at 2017-06-07 16:52:00.618744 (xid=0x1c)
+[2017-06-07 17:02:44.342952] E [rpc-clnt.c:365:saved_frames_unwind] (--> /lib64/libglusterfs.so.0(_gf_log_callingfn+0x17d)[0x7f78c3e4fe6d] (--> /lib64/libgfrpc.so.0(saved_frames_unwind+0x1d1)[0x7f78c3c169a1] (--> /lib64/libgfrpc.so.0(saved_frames_destroy+0xe)[0x7f78c3c16abe] (--> /lib64/libgfrpc.so.0(rpc_clnt_connection_cleanup+0x87)[0x7f78c3c18157] (--> /lib64/libgfrpc.so.0(rpc_clnt_notify+0x288)[0x7f78c3c18c28] ))))) 0-virt0-client-2: forced unwinding frame type(GF-DUMP) op(NULL(2)) called at 2017-06-07 16:52:00.618753 (xid=0x1d)
+[2017-06-07 17:02:44.343415] E [MSGID: 114031] [client-rpc-fops.c:1593:client3_3_finodelk_cbk] 0-virt0-client-2: remote operation failed [Transport endpoint is not connected]
+[2017-06-07 17:08:49.367264] C [rpc-clnt-ping.c:160:rpc_clnt_ping_timer_expired] 0-virt0-client-3: server 172.19.38.43:49152 has not responded in the last 42 seconds, disconnecting.
+[2017-06-07 17:13:29.969206] E [rpc-clnt.c:365:saved_frames_unwind] (--> /lib64/libglusterfs.so.0(_gf_log_callingfn+0x17d)[0x7f78c3e4fe6d] (--> /lib64/libgfrpc.so.0(saved_frames_unwind+0x1d1)[0x7f78c3c169a1] (--> /lib64/libgfrpc.so.0(saved_frames_destroy+0xe)[0x7f78c3c16abe] (--> /lib64/libgfrpc.so.0(rpc_clnt_connection_cleanup+0x87)[0x7f78c3c18157] (--> /lib64/libgfrpc.so.0(rpc_clnt_notify+0x288)[0x7f78c3c18c28] ))))) 0-virt0-client-3: forced unwinding frame type(GlusterFS 3.3) op(WRITE(13)) called at 2017-06-07 17:08:06.371259 (xid=0x22)
+[2017-06-07 17:13:29.969250] E [MSGID: 114031] [client-rpc-fops.c:1593:client3_3_finodelk_cbk] 0-virt0-client-3: remote operation failed [Transport endpoint is not connected]
+[2017-06-07 17:13:29.969355] E [rpc-clnt.c:365:saved_frames_unwind] (--> /lib64/libglusterfs.so.0(_gf_log_callingfn+0x17d)[0x7f78c3e4fe6d] (--> /lib64/libgfrpc.so.0(saved_frames_unwind+0x1d1)[0x7f78c3c169a1] (--> /lib64/libgfrpc.so.0(saved_frames_destroy+0xe)[0x7f78c3c16abe] (--> /lib64/libgfrpc.so.0(rpc_clnt_connection_cleanup+0x87)[0x7f78c3c18157] (--> /lib64/libgfrpc.so.0(rpc_clnt_notify+0x288)[0x7f78c3c18c28] ))))) 0-virt0-client-3: forced unwinding frame type(GF-DUMP) op(NULL(2)) called at 2017-06-07 17:08:06.371268 (xid=0x23)
+[2017-06-07 17:13:29.972665] E [MSGID: 108008] [afr-transaction.c:2619:afr_write_txn_refresh_done] 0-virt0-replicate-1: Failing FSETXATTR on gfid 86042280-9ae1-444f-8342-be4442f82111: split-brain observed. [Input/output error]
+[2017-06-07 17:13:29.977821] E [MSGID: 108008] [afr-read-txn.c:90:afr_read_txn_refresh_done] 0-virt0-replicate-1: Failing FGETXATTR on gfid 86042280-9ae1-444f-8342-be4442f82111: split-brain observed. [Input/output error]
+[2017-06-07 17:13:29.981667] E [MSGID: 114031] [client-rpc-fops.c:1593:client3_3_finodelk_cbk] 0-virt0-client-2: remote operation failed [Invalid argument]
+[2017-06-07 17:13:30.157560] E [MSGID: 108006] [afr-common.c:4781:afr_notify] 0-virt0-replicate-0: All subvolumes are down. Going offline until atleast one of them comes back up.
+[2017-06-07 17:13:30.157904] E [MSGID: 108006] [afr-common.c:4781:afr_notify] 0-virt0-replicate-1: All subvolumes are down. Going offline until atleast one of them comes back up.
+qemu-img: gluster://dalpinfglt04/virt0/fedora6.raw: error while converting raw: Could not create image: Transport endpoint is not connected
+
+The file was created but nothing was written to it.  Either way, I don't think encryption-at-rest is tested much with qemu integration.
+
+
+This is an automated cleanup. This bug report has been moved to QEMU's
+new bug tracker on gitlab.com and thus gets marked as 'expired' now.
+Please continue with the discussion here:
+
+ https://gitlab.com/qemu-project/qemu/-/issues/145
+
+
diff --git a/results/classifier/108/semantic/1743191 b/results/classifier/108/semantic/1743191
new file mode 100644
index 000000000..9c6959ee6
--- /dev/null
+++ b/results/classifier/108/semantic/1743191
@@ -0,0 +1,492 @@
+semantic: 0.930
+device: 0.915
+permissions: 0.907
+boot: 0.897
+debug: 0.895
+other: 0.893
+PID: 0.893
+performance: 0.867
+vnc: 0.864
+socket: 0.851
+graphic: 0.843
+KVM: 0.838
+network: 0.797
+files: 0.788
+
+Interacting with NetBSD serial console boot blocks no longer works
+
+The NetBSD boot blocks display a menu allowing the user to make a
+selection using the keyboard.  For example, when booting a NetBSD
+installation CD-ROM, the menu looks like this:
+
+         1. Install NetBSD
+         2. Install NetBSD (no ACPI)
+         3. Install NetBSD (no ACPI, no SMP)
+         4. Drop to boot prompt
+
+    Choose an option; RETURN for default; SPACE to stop countdown.
+    Option 1 will be chosen in 30 seconds.
+
+When booting NetBSD in a recent qemu using an emulated serial console,
+making this menu selection no longer works: when you type the selected
+number, the keyboard input is ignored, and the 30-second countdown
+continues.  In older versions of qemu, it works.
+
+To reproduce the problem, run:
+
+   wget http://ftp.netbsd.org/pub/NetBSD/NetBSD-7.1.1/amd64/installation/cdrom/boot-com.iso
+   qemu-system-x86_64 -nographic -cdrom boot-com.iso
+
+During the 30-second countdown, press 4
+
+Expected behavior: The countdown stops and you get a ">" prompt
+
+Incorrect behavior: The countdown continues
+
+There may also be some corruption of the terminal output; for example,
+"Option 1 will be chosen in 30 seconds" may be displayed as "Option 1
+will be chosen in p0 seconds".
+
+Using bisection, I have determined that the problem appeared with qemu
+commit 083fab0290f2c40d3d04f7f22eed9c8f2d5b6787, in which seabios was
+updated to 1.11 prerelease, and the problem is still there as of
+commit 7398166ddf7c6dbbc9cae6ac69bb2feda14b40ac.  The host operating
+system used for the tests was Debian 9 x86_64.
+
+Credit for discovering this bug goes to Paul Goyette.
+
+Reverting to Seabios 1.10 (version rel-1.10.3.0-gb76661dd) fixes this problem. 
+
+Steps:
+
+$ cd && mkdir seabios-test && cd seabios-test
+$ git clone -b 1.10-stable https://github.com/coreboot/seabios.git
+$ cd seabios
+$ make
+$ qemu-system-x86_64 \
+-drive if=virtio,file=/home/oc/VM/img/netbsd.image,index=0,media=disk \
+-M q35,accel=kvm -m 350M -cpu host -smp $(nproc) \
+-nic user,model=virtio-net-pci,ipv6=off \
+-nographic -bios /home/oc/seabios-test/seabios/out/bios.bin
+
+Result: 
+I can interact with NetBSD boot menu and select one of the available options.
+
+Host:
+Linux e130 4.9.0-11-amd64 #1 SMP Debian 4.9.189-3+deb9u1 (2019-09-20) x86_64 GNU/Linux
+
+QEMU emulator version 4.2.0
+
+
+
+Possibly related thread:
+"Do we need a cpu with TSC support to run SeaBIOS?"
+https://<email address hidden>/msg11726.html
+
+Workaround: add "-vga none" to the qemu command line.
+
+@kraxel-redhat,
+
+I guess "-vga none" is implicit when using -nographic? 
+
+However, for the sake of trying, I've added "-vga none" and it won't solve it for me (when using default bios).
+
+Gerd Hommann wrote:
+> Workaround: add "-vga none" to the qemu command line.
+
+This supposed workaround does not work for me.
+
+
+@kraxel-redhat: This issue bisects to commit d6728f301d7e6e31ba0ee2fa51ed4a24feab8860 ("add serial console support").  seabios.git/master + "[PATCH] sercon: vbe modeset is int 10h function 4f02 not 4f00" still has the issue.
+
+I'm using the following command-line:
+
+  qemu-system-x86_64 -M accel=kvm -m 1G -cpu host -cdrom ~/Downloads/boot-com.iso -nographic
+
+Ah, it's a special serial console boot iso.  I was trying the normal NetBSD-<version>-amd64.iso.
+
+So, it seems seabios sercon and bootloader are fighting over the serial line.
+
+seabios enables sercon for no-graphical guests ("-machine graphics=off", "-nographics" enables this too).
+
+So one option is to turn off seabios sercon: "qemu -nographic -machine graphics=on".
+
+The other option is to turn on seabios sercon and use the normal boot.iso (this needs the "-vga none" workaround from comment 3, or the sercon patch).
+
+On Fri, 6 Mar 2020 at 13:24, Gerd Hoffmann <email address hidden> wrote:
+> So one option is to turn off seabios sercon: "qemu -nographic -machine
+> graphics=on".
+
+This works for me, but only if I turn off "q35", therefore changing
+from a sata disk to a plain ide:
+
+qemu-system-x86_64 \
+-drive if=virtio,file=/home/oc/VM/img/netbsd.image,index=0,media=disk \
+-drive if=virtio,file=/home/oc/VM/img/newdisk2.img,index=1,media=disk \
+-m 300M -cpu host -smp $(nproc) \
+-nic user,hostfwd=tcp::6665-:22,model=virtio-net-pci,ipv6=off \
+-nographic -machine accel=kvm,graphics=on
+
+
+Just to clarify my last comment, and in absence of updates, if I launch the VM as:
+
+qemu-system-x86_64 \
+-drive if=virtio,file=/home/oc/VM/img/openbsd.image,index=0,media=disk \
+-drive if=virtio,file=/home/oc/VM/img/openbsd.image.old,index=1,media=disk \
+-M q35,accel=kvm,graphics=on -m 250M -cpu host -smp $(nproc) \
+-nic user,hostfwd=tcp::6666-:22,model=virtio-net-pci -nographic
+
+(note the -M q35,accel=kvm,graphics=on), the problem still persists.
+
+I'm still on version 4.2 and I haven't updated to 5.0 yet.
+
+The QEMU project is currently considering to move its bug tracking to
+another system. For this we need to know which bugs are still valid
+and which could be closed already. Thus we are setting older bugs to
+"Incomplete" now.
+
+If you still think this bug report here is valid, then please switch
+the state back to "New" within the next 60 days, otherwise this report
+will be marked as "Expired". Or please mark it as "Fix Released" if
+the problem has been solved with a newer version of QEMU already.
+
+Thank you and sorry for the inconvenience.
+
+This bug was fixed long ago, so long ago that I have no idea when!
+
+Please close wiwth an appropriate status.
+
+
+On Thu, 22 Apr 2021, Thomas Huth wrote:
+
+> The QEMU project is currently considering to move its bug tracking to
+> another system. For this we need to know which bugs are still valid
+> and which could be closed already. Thus we are setting older bugs to
+> "Incomplete" now.
+>
+> If you still think this bug report here is valid, then please switch
+> the state back to "New" within the next 60 days, otherwise this report
+> will be marked as "Expired". Or please mark it as "Fix Released" if
+> the problem has been solved with a newer version of QEMU already.
+>
+> Thank you and sorry for the inconvenience.
+>
+> ** Changed in: qemu
+>       Status: New => Incomplete
+>
+> -- 
+> You received this bug notification because you are subscribed to the bug
+> report.
+> https://bugs.launchpad.net/bugs/1743191
+>
+> Title:
+>  Interacting with NetBSD serial console boot blocks no longer works
+>
+> Status in QEMU:
+>  Incomplete
+>
+> Bug description:
+>  The NetBSD boot blocks display a menu allowing the user to make a
+>  selection using the keyboard.  For example, when booting a NetBSD
+>  installation CD-ROM, the menu looks like this:
+>
+>           1. Install NetBSD
+>           2. Install NetBSD (no ACPI)
+>           3. Install NetBSD (no ACPI, no SMP)
+>           4. Drop to boot prompt
+>
+>      Choose an option; RETURN for default; SPACE to stop countdown.
+>      Option 1 will be chosen in 30 seconds.
+>
+>  When booting NetBSD in a recent qemu using an emulated serial console,
+>  making this menu selection no longer works: when you type the selected
+>  number, the keyboard input is ignored, and the 30-second countdown
+>  continues.  In older versions of qemu, it works.
+>
+>  To reproduce the problem, run:
+>
+>     wget http://ftp.netbsd.org/pub/NetBSD/NetBSD-7.1.1/amd64/installation/cdrom/boot-com.iso
+>     qemu-system-x86_64 -nographic -cdrom boot-com.iso
+>
+>  During the 30-second countdown, press 4
+>
+>  Expected behavior: The countdown stops and you get a ">" prompt
+>
+>  Incorrect behavior: The countdown continues
+>
+>  There may also be some corruption of the terminal output; for example,
+>  "Option 1 will be chosen in 30 seconds" may be displayed as "Option 1
+>  will be chosen in p0 seconds".
+>
+>  Using bisection, I have determined that the problem appeared with qemu
+>  commit 083fab0290f2c40d3d04f7f22eed9c8f2d5b6787, in which seabios was
+>  updated to 1.11 prerelease, and the problem is still there as of
+>  commit 7398166ddf7c6dbbc9cae6ac69bb2feda14b40ac.  The host operating
+>  system used for the tests was Debian 9 x86_64.
+>
+>  Credit for discovering this bug goes to Paul Goyette.
+>
+> To manage notifications about this bug go to:
+> https://bugs.launchpad.net/qemu/+bug/1743191/+subscriptions
+>
+> !DSPAM:60811a8265601949211437!
+>
+>
+
++--------------------+--------------------------+-----------------------+
+| Paul Goyette       | PGP Key fingerprint:     | E-mail addresses:     |
+| (Retired)          | FA29 0E3B 35AF E8AE 6651 | <email address hidden>     |
+| Software Developer | 0786 F758 55DE 53BA 7731 | <email address hidden>   |
++--------------------+--------------------------+-----------------------+
+
+
+Paul Goyette wrote:
+> This bug was fixed long ago, so long ago that I have no idea when!
+
+No, it is not fixed, and I did actually check before I switched the
+bug state back to "new".
+
+Perhaps you are specifying "-machine graphics=on" as suggested in one
+of the comments?  If so, that's a work-around, and an ugly and
+nonintuitive one at that, not a fix.
+-- 
+Andreas Gustafsson, <email address hidden>
+
+
+On Thu, 22 Apr 2021 at 13:46, Andreas Gustafsson
+<email address hidden> wrote:
+>
+> Paul Goyette wrote:
+> > This bug was fixed long ago, so long ago that I have no idea when!
+>
+> No, it is not fixed, and I did actually check before I switched the
+> bug state back to "new".
+>
+> Perhaps you are specifying "-machine graphics=on" as suggested in one
+> of the comments?  If so, that's a work-around, and an ugly and
+> nonintuitive one at that, not a fix.
+> --
+> Andreas Gustafsson, <email address hidden>
+
+I am currently using:
+
+$ qemu-system-x86_64 --version
+QEMU emulator version 5.2.0
+
+And I have no problem selecting from menu in serial console, so I
+assume this is fixed for me. This is my command line:
+
+$ cat opt/bin/boot-netbsd-virtio
+#!/bin/sh
+qemu-system-x86_64 \
+-drive if=virtio,file=/home/oc/VM/img/netbsd.image,index=0,media=disk \
+-drive if=virtio,file=/home/oc/VM/img/netbsd.image.old,index=1,media=disk \
+-M q35,accel=kvm -m 250M -cpu host -smp $(nproc) \
+-nic user,hostfwd=tcp:127.0.0.1:5555-:22,model=virtio-net-pci,ipv6=off  \
+-daemonize -display none  -vga none \
+-serial mon:telnet:127.0.0.1:6665,server,nowait \
+-pidfile /home/oc/VM/pid/netbsd-pid -nodefaults
+
+telnet 127.0.0.1 6665
+
+
+
+-- 
+Ottavio Caruso
+
+
+On Thu, 22 Apr 2021, Ottavio Caruso wrote:
+
+> On Thu, 22 Apr 2021 at 13:46, Andreas Gustafsson
+> <email address hidden> wrote:
+>>
+>> Paul Goyette wrote:
+>>> This bug was fixed long ago, so long ago that I have no idea when!
+>>
+>> No, it is not fixed, and I did actually check before I switched the
+>> bug state back to "new".
+>>
+>> Perhaps you are specifying "-machine graphics=on" as suggested in one
+>> of the comments?  If so, that's a work-around, and an ugly and
+>> nonintuitive one at that, not a fix.
+
+Andreas is correct - I am using the suggested work-around, and the
+original bug is NOT fixed.
+
+I believe Andreas has moved the bug back to New status to reflect
+that it is not fixed.  (Whether or not it is fixed, _I_ should not
+have asked to have _his_ bug closed.  It's been so long, I almost
+believed it was my bug. :)  My apologies to Andreas and everyone
+else.)
+
+
+>> --
+>> Andreas Gustafsson, <email address hidden>
+>
+> I am currently using:
+>
+> $ qemu-system-x86_64 --version
+> QEMU emulator version 5.2.0
+>
+> And I have no problem selecting from menu in serial console, so I
+> assume this is fixed for me. This is my command line:
+>
+> $ cat opt/bin/boot-netbsd-virtio
+> #!/bin/sh
+> qemu-system-x86_64 \
+> -drive if=virtio,file=/home/oc/VM/img/netbsd.image,index=0,media=disk \
+> -drive if=virtio,file=/home/oc/VM/img/netbsd.image.old,index=1,media=disk \
+> -M q35,accel=kvm -m 250M -cpu host -smp $(nproc) \
+> -nic user,hostfwd=tcp:127.0.0.1:5555-:22,model=virtio-net-pci,ipv6=off  \
+> -daemonize -display none  -vga none \
+> -serial mon:telnet:127.0.0.1:6665,server,nowait \
+> -pidfile /home/oc/VM/pid/netbsd-pid -nodefaults
+>
+> telnet 127.0.0.1 6665
+>
+>
+> -- 
+> Ottavio Caruso
+>
+> -- 
+> You received this bug notification because you are subscribed to the bug
+> report.
+> https://bugs.launchpad.net/bugs/1743191
+>
+> Title:
+>  Interacting with NetBSD serial console boot blocks no longer works
+>
+> Status in QEMU:
+>  New
+>
+> Bug description:
+>  The NetBSD boot blocks display a menu allowing the user to make a
+>  selection using the keyboard.  For example, when booting a NetBSD
+>  installation CD-ROM, the menu looks like this:
+>
+>           1. Install NetBSD
+>           2. Install NetBSD (no ACPI)
+>           3. Install NetBSD (no ACPI, no SMP)
+>           4. Drop to boot prompt
+>
+>      Choose an option; RETURN for default; SPACE to stop countdown.
+>      Option 1 will be chosen in 30 seconds.
+>
+>  When booting NetBSD in a recent qemu using an emulated serial console,
+>  making this menu selection no longer works: when you type the selected
+>  number, the keyboard input is ignored, and the 30-second countdown
+>  continues.  In older versions of qemu, it works.
+>
+>  To reproduce the problem, run:
+>
+>     wget http://ftp.netbsd.org/pub/NetBSD/NetBSD-7.1.1/amd64/installation/cdrom/boot-com.iso
+>     qemu-system-x86_64 -nographic -cdrom boot-com.iso
+>
+>  During the 30-second countdown, press 4
+>
+>  Expected behavior: The countdown stops and you get a ">" prompt
+>
+>  Incorrect behavior: The countdown continues
+>
+>  There may also be some corruption of the terminal output; for example,
+>  "Option 1 will be chosen in 30 seconds" may be displayed as "Option 1
+>  will be chosen in p0 seconds".
+>
+>  Using bisection, I have determined that the problem appeared with qemu
+>  commit 083fab0290f2c40d3d04f7f22eed9c8f2d5b6787, in which seabios was
+>  updated to 1.11 prerelease, and the problem is still there as of
+>  commit 7398166ddf7c6dbbc9cae6ac69bb2feda14b40ac.  The host operating
+>  system used for the tests was Debian 9 x86_64.
+>
+>  Credit for discovering this bug goes to Paul Goyette.
+>
+> To manage notifications about this bug go to:
+> https://bugs.launchpad.net/qemu/+bug/1743191/+subscriptions
+>
+> !DSPAM:608193ed146681924717040!
+>
+>
+
++--------------------+--------------------------+-----------------------+
+| Paul Goyette       | PGP Key fingerprint:     | E-mail addresses:     |
+| (Retired)          | FA29 0E3B 35AF E8AE 6651 | <email address hidden>     |
+| Software Developer | 0786 F758 55DE 53BA 7731 | <email address hidden>   |
++--------------------+--------------------------+-----------------------+
+
+
+Ottavio Caruso wrote:
+> I am currently using:
+> 
+> $ qemu-system-x86_64 --version
+> QEMU emulator version 5.2.0
+> 
+> And I have no problem selecting from menu in serial console, so I
+> assume this is fixed for me. This is my command line:
+> 
+> $ cat opt/bin/boot-netbsd-virtio
+> #!/bin/sh
+> qemu-system-x86_64 \
+> -drive if=virtio,file=/home/oc/VM/img/netbsd.image,index=0,media=disk \
+> -drive if=virtio,file=/home/oc/VM/img/netbsd.image.old,index=1,media=disk \
+> -M q35,accel=kvm -m 250M -cpu host -smp $(nproc) \
+> -nic user,hostfwd=tcp:127.0.0.1:5555-:22,model=virtio-net-pci,ipv6=off  \
+> -daemonize -display none  -vga none \
+> -serial mon:telnet:127.0.0.1:6665,server,nowait \
+> -pidfile /home/oc/VM/pid/netbsd-pid -nodefaults
+> 
+> telnet 127.0.0.1 6665
+
+Have you tried the test case in the original bug report?
+-- 
+Andreas Gustafsson, <email address hidden>
+
+
+On Thu, 22 Apr 2021 at 18:23, Andreas Gustafsson
+<email address hidden> wrote:
+>
+> Ottavio Caruso wrote:
+> > I am currently using:
+> >
+> > $ qemu-system-x86_64 --version
+> > QEMU emulator version 5.2.0
+> >
+> > And I have no problem selecting from menu in serial console, so I
+> > assume this is fixed for me. This is my command line:
+> >
+> > $ cat opt/bin/boot-netbsd-virtio
+> > #!/bin/sh
+> > qemu-system-x86_64 \
+> > -drive if=virtio,file=/home/oc/VM/img/netbsd.image,index=0,media=disk \
+> > -drive if=virtio,file=/home/oc/VM/img/netbsd.image.old,index=1,media=disk \
+> > -M q35,accel=kvm -m 250M -cpu host -smp $(nproc) \
+> > -nic user,hostfwd=tcp:127.0.0.1:5555-:22,model=virtio-net-pci,ipv6=off  \
+> > -daemonize -display none  -vga none \
+> > -serial mon:telnet:127.0.0.1:6665,server,nowait \
+> > -pidfile /home/oc/VM/pid/netbsd-pid -nodefaults
+> >
+> > telnet 127.0.0.1 6665
+>
+> Have you tried the test case in the original bug report?
+> --
+> Andreas Gustafsson, <email address hidden>
+
+You're right. Using the boot-com install image, the problem persists.
+
+
+-- 
+Ottavio Caruso
+
+A: Because it messes up the order in which people normally read text.
+Q: Why is top-posting such a bad thing?
+A: Top-posting.
+Q: What is the most annoying thing in e-mail?
+
+
+
+This is an automated cleanup. This bug report has been moved to QEMU's
+new bug tracker on gitlab.com and thus gets marked as 'expired' now.
+Please continue with the discussion here:
+
+ https://gitlab.com/qemu-project/qemu/-/issues/147
+
+
diff --git a/results/classifier/108/semantic/1809546 b/results/classifier/108/semantic/1809546
new file mode 100644
index 000000000..2d964b1d2
--- /dev/null
+++ b/results/classifier/108/semantic/1809546
@@ -0,0 +1,92 @@
+semantic: 0.913
+debug: 0.906
+performance: 0.875
+permissions: 0.867
+device: 0.860
+other: 0.855
+boot: 0.852
+graphic: 0.848
+network: 0.811
+PID: 0.807
+vnc: 0.735
+files: 0.732
+socket: 0.612
+KVM: 0.608
+
+Writing a byte to a pl011 SFR overwrites the whole SFR
+
+The bug is present in QEMU 2.8.1 and, if my analysis is correct, also on master.
+
+I first noticed that a PL011 UART driver, which is fine on real hardware, fails to enable the RX interrupt in the IMSC register when running in QEMU. However, the problem only comes up if the code is compiled without optimizations. I think I've narrowed it down to a minimal example that will exhibit the problem if run as a bare-metal application.
+
+Given:
+
+pl011_addr: .word 0x10009000
+
+The following snippet will be problematic:
+
+     ldr r3, pl011_addr
+     ldrb r2, [r3, #0x38]        // IMSC
+     mov r2, #0
+     orr r2, r2, #0x10           // R2 == 0x10
+     strb r2, [r3, #0x38]        // Whole word reads correctly after this
+     ldrb r2, [r3, #0x39]
+     mov r2, #0
+     strb r2, [r3, #0x39]        // Problem here! Overwrites offset 0x38 as well
+
+After the first strb instruction, which writes to 0x10009038, everything is fine. It can be seen in the QEMU monitor:
+
+(qemu) xp 0x10009038
+0000000010009038: 0x00000010
+
+After the second strb instruction, the write to 0x10009039 clears the entire word:
+
+(qemu) xp 0x10009038
+0000000010009038: 0x00000000
+
+QEMU command-line, using the vexpress-a9 which has the PL011 at 0x10009000:
+
+qemu-system-arm -S -M vexpress-a9 -m 32M -no-reboot -nographic -monitor telnet:127.0.0.1:1234,server,nowait -kernel pl011-sfr.bin -gdb tcp::2159 -serial mon:stdio
+
+Compiling the original C code with optimizations makes the driver work. It compiles down to assembly that only does a single write:
+
+    ldr r3, pl011_addr
+    mov r2, #0x10
+    str r2, [r3, #0x38]
+
+Attached is the an assembly file, and linkscript, that shows the problem, and also includes the working code.
+
+I haven't debugged inside of QEMU itself but it seems to me that the problem is in pl011_write in pl011.c - the functions looks at which offset is being written, and then writes the entire SFR that offset falls under, which means that changing a single byte will change the whole SFR.
+
+
+
+Adding the link script.
+
+Yes, our PL011 implementation assumes that you only ever access the 32-bit registers with full width 32-bit word reads and writes. Don't try to do byte accesses to them. The PL011 data sheet doesn't specifically say that partial-width accesses to registers are permitted, so I think that trying to access offset 0x39 falls under the general note in section 3.1 that attempting to access reserved or unused address locations can result in unpredictable behaviour.
+
+You need to make sure you write your C code in a manner which enforces that accesses to device registers are done as single 32-bit accesses, and the compiler does not silently break them down into multiple reads and writes, or you will be in for a lot of pain trying to figure out what is going on if the compiler ever does it with registers that are write-to-clear or similar behaviour. Linux, for instance, does this by having readl() and writel() functions that end up doing inline asm of ldr/str instructions.
+
+
+Thanks for the response.
+
+I don't think section 3.1 applies to 8-bit accesses. That is specifically about reserved locations, and neither offset 0x38 nor 0x39 are reserved, so I think it's a matter of whether 32-bit access is required or not.
+
+From what I usually see in ARM documentation, 32-bit access is explicitly mentioned when required. For the PL011, it's mentioned for the UARTPeriphID_n registers, for instance. In many other cases access size depends on the implementation and the corresponding memory mapping of that implementation.
+
+I understand that *in practice* you should ensure single-access writes unless doing otherwise is explicitly allowed. However, in cases like the PL011 it seems ambiguous whether that is actually required, so it seems like the best choice would be to explicitly document it for the QEMU implementation. That would save some guesswork.
+
+The QEMU project is currently considering to move its bug tracking to
+another system. For this we need to know which bugs are still valid
+and which could be closed already. Thus we are setting older bugs to
+"Incomplete" now.
+
+If you still think this bug report here is valid, then please switch
+the state back to "New" within the next 60 days, otherwise this report
+will be marked as "Expired". Or please mark it as "Fix Released" if
+the problem has been solved with a newer version of QEMU already.
+
+Thank you and sorry for the inconvenience.
+
+
+[Expired for QEMU because there has been no activity for 60 days.]
+
diff --git a/results/classifier/108/semantic/1829964 b/results/classifier/108/semantic/1829964
new file mode 100644
index 000000000..1c37f5363
--- /dev/null
+++ b/results/classifier/108/semantic/1829964
@@ -0,0 +1,103 @@
+semantic: 0.922
+other: 0.913
+graphic: 0.892
+device: 0.874
+PID: 0.861
+performance: 0.859
+permissions: 0.858
+boot: 0.845
+files: 0.809
+debug: 0.789
+KVM: 0.749
+network: 0.736
+socket: 0.691
+vnc: 0.597
+
+HOST VRAM Leak when performs android-x86 window rotation with Virt-GPU
+
+I will report something strange thing about host VRAM leakage after anroid-x86 window rotation when it runs with virt-gpu(+ virgl-renderer)
+
+Please watching below video link.
+
+https://www.youtube.com/watch?v=mJIbGZLWF1s&feature=youtu.be
+
+(orginal video file : https://drive.google.com/file/d/1lkdTx_8yTbSVjKXlnxnnk96fWe-w6Mxb/view?usp=sharing)
+
+I don't sure what is the problem...
+
+Here are my tested history
+--------------------------------------------------------------------------------------------------
+Install android-x86 on I7 desktop PCs with intel UHD GPU  - No leak.
+Install android-x86 on I7 desktop PCs with NVIDIA GTX GPU series - No leak.
+Install android-x86 on guest machine emulated skylake cpu with QEMU(+virt-gpu, virgl-renderer) - Leak
+(HOST CPU - I5, INTEL UHD GPU)
+Install android-x86 on guest machine emulated skylake cpu with QEMU(+virt-gpu, virgl-renderer) - Leak
+(HOST CPU - I7, NVIDIA GTX GPU)
+
+COMMON:
+In case of NVIDIA GPU : check vram using nvidia-smi
+In case of intel UHD GPU : check shared-vram using free cmd
+
+We checked guest android-x86 system down when vram is full after performing many rotation
+-------------------------------------------------------------------------------------------
+
+Is it virt-gpu driver's problem?
+
+I hope someone can help me...
+
+Thanks in advance!!
+
+Here are qemu options I used...
+
+-machine type=q35,accel=kvm -cpu host --enable-kvm \
+-smp cpus=4,cores=4,threads=1 -m 4096 \
+-drive file=ctb0319.qcow2,format=qcow2,if=virtio,aio=threads \
+-device virtio-vga,virgl=on \
+-device qemu-xhci,id=xhci -device usb-mouse,bus=xhci.0 -device usb-kbd,bus=xhci.0 \
+-soundhw hda -display sdl,gl=on -netdev user,id=qemunet0,hostfwd=tcp::4000-:7000,hostfwd=tcp::5555-:5555,hostfwd=tcp::4012-:7012,hostfwd=tcp::4013-:7013 -device virtio-net,netdev=qemunet0 -boot menu=on
+
+This is the *upstream* QEMU bug tracker here. If you've got a problem with the android emulator, please report these problems to the android emulator project instead. Thanks.
+
+To Thomas Huth,
+
+This is not android problem, qemu or virt-gpu  problem,.
+-------------------- our test log --------------------------------------
+Running android-x86 on I7 bare metal desktop PCs with intel UHD GPU - No leak.
+Running android-x86 on QEMU(+virt-gpu, virgl-renderer) - Leak
+------------------------------------------------------------------------
+
+Also in case of a guest linux, it also have leak after windows manager rotation.
+
+Ok, sorry, got that wrong - we sometimes get bug reports about the android emulator (which is a fork of QEMU) here, and at a first glance, your bug report looked like one of these misguided bug tickets, too.
+
+Anyway, please provide some more information: Which version of QEMU are you using? Which operating system are you running in QEMU?
+
+I tested many qemu & linux versions....
+
+in case of qemu,
+2.12
+3.10
+3.12
+4.0.0
+All versions I tested have same problem....
+
+also I tested many versions of linux 
+ubuntu 18.04 18.10
+centos 7
+fedora 18 19
+rhel
+
+Actually it is not only problem of windows rotation, if home launcher refreshed, vram usage is also up...
+
+I think it related gl related functions...
+
+so I don't sure it is qemu-virt-gpu problem or virt-gpu driver...
+
+That is why I already report this problem to android-x86 devel forum and author of virt-gpu drvier...
+
+
+
+
+
+[Expired for QEMU because there has been no activity for 60 days.]
+
diff --git a/results/classifier/108/semantic/1856335 b/results/classifier/108/semantic/1856335
new file mode 100644
index 000000000..d18c93cbd
--- /dev/null
+++ b/results/classifier/108/semantic/1856335
@@ -0,0 +1,1087 @@
+semantic: 0.926
+permissions: 0.911
+debug: 0.904
+graphic: 0.883
+performance: 0.847
+PID: 0.838
+device: 0.825
+files: 0.806
+vnc: 0.803
+socket: 0.794
+other: 0.750
+network: 0.677
+KVM: 0.582
+boot: 0.506
+
+Cache Layout wrong on many Zen Arch CPUs
+
+AMD CPUs have L3 cache per 2, 3 or 4 cores. Currently, TOPOEXT seems to always map Cache ass if it was an 4-Core per CCX CPU, which is incorrect, and costs upwards 30% performance (more realistically 10%) in L3 Cache Layout aware applications.
+
+Example on a 4-CCX CPU (1950X /w 8 Cores and no SMT): 
+
+  <cpu mode='custom' match='exact' check='full'>
+    <model fallback='forbid'>EPYC-IBPB</model>
+    <vendor>AMD</vendor>
+    <topology sockets='1' cores='8' threads='1'/>
+
+In windows, coreinfo reports correctly: 
+
+****----  Unified Cache 1, Level 3,    8 MB, Assoc  16, LineSize  64
+----****  Unified Cache 6, Level 3,    8 MB, Assoc  16, LineSize  64
+
+On a 3-CCX CPU (3960X /w 6 cores and no SMT):
+
+ <cpu mode='custom' match='exact' check='full'>
+    <model fallback='forbid'>EPYC-IBPB</model>
+    <vendor>AMD</vendor>
+    <topology sockets='1' cores='6' threads='1'/>
+
+in windows, coreinfo reports incorrectly: 
+
+****--  Unified Cache  1, Level 3,    8 MB, Assoc  16, LineSize  64
+----**  Unified Cache  6, Level 3,    8 MB, Assoc  16, LineSize  64
+
+
+Validated against 3.0, 3.1, 4.1 and 4.2 versions of qemu-kvm. 
+
+With newer Qemu there is a fix (that does behave correctly) in using the dies parameter: 
+ <qemu:arg value='cores=3,threads=1,dies=2,sockets=1'/>
+
+The problem is that the dies are exposed differently than how AMD does it natively, they are exposed to Windows as sockets, which means, you can't ever have a machine with more than two CCX (6 cores) as Windows only supports two sockets. (Should this be reported as a separate bug?)
+
+Hi,
+
+I've since confirmed that this bug also exist (as expected) on Linux guests, as well as Zen1 EPYC 7401 CPUs, to make sure this wasn't a problem with the detection of the newer consumer platform. 
+
+Basically it seems (looking at the code with layman eyes) that as long as you have a topology that is dividable by 4 or 8, it will always result in the wrong topology being exposed to the guest, even when the correct option can be built (12, 24 core CPUs, although, it would be great if we could support 9 Core VM CPus as that is a reasonable use case for VMs (3 CCXs of 3 Cores for a total of 9 (or 18 SMT threads)).
+
+Pinging the author and committer of the TopoEXT feature / EPYC cpu model as they should probably know best how to solve this issue.
+
+This is the commit I am referencing: https://git.qemu.org/?p=qemu.git;a=commitdiff;h=8f4202fb1080f86958782b1fca0bf0279f67d136
+
+Damir,
+  We normally test Linux guests here. Can you please give me exact qemu command line. Even the SMP parameters(sockets,cores,threads,dies) will also work. I will try to recreate it locally first.
+Give me example what works and what does not work.
+
+I have recently sent few more patches to fix another bug. Please check if this makes any difference.
+https://patchwork.kernel.org/cover/11272063/
+https://lore.kernel<email address hidden>/
+
+This should apply cleanly on git://github.com/ehabkost/qemu.git (branch x86-next)
+
+Note: I will be on vacation until first week of Jan. Responses will be delayed.
+
+Same problem for Ryzen 9 3900X. There should be 4x L3 caches, but there are only 3.
+
+Same results with "host-passthrough" and "EPYC-IBPB". Windows doesn't recognize the correct L3 cache layout.
+
+From coreinfo.exe:
+
+Logical Processor to Cache Map:
+**----------------------  Data Cache          0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------------  Instruction Cache   0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------------  Unified Cache       0, Level 2,  512 KB, Assoc   8, LineSize  64
+********----------------  Unified Cache       1, Level 3,   16 MB, Assoc  16, LineSize  64
+--**--------------------  Data Cache          1, Level 1,   32 KB, Assoc   8, LineSize  64
+--**--------------------  Instruction Cache   1, Level 1,   32 KB, Assoc   8, LineSize  64
+--**--------------------  Unified Cache       2, Level 2,  512 KB, Assoc   8, LineSize  64
+----**------------------  Data Cache          2, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------------  Instruction Cache   2, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------------  Unified Cache       3, Level 2,  512 KB, Assoc   8, LineSize  64
+------**----------------  Data Cache          3, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------------  Instruction Cache   3, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------------  Unified Cache       4, Level 2,  512 KB, Assoc   8, LineSize  64
+--------**--------------  Data Cache          4, Level 1,   32 KB, Assoc   8, LineSize  64
+--------**--------------  Instruction Cache   4, Level 1,   32 KB, Assoc   8, LineSize  64
+--------**--------------  Unified Cache       5, Level 2,  512 KB, Assoc   8, LineSize  64
+--------********--------  Unified Cache       6, Level 3,   16 MB, Assoc  16, LineSize  64
+----------**------------  Data Cache          5, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------------  Instruction Cache   5, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------------  Unified Cache       7, Level 2,  512 KB, Assoc   8, LineSize  64
+------------**----------  Data Cache          6, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----------  Instruction Cache   6, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----------  Unified Cache       8, Level 2,  512 KB, Assoc   8, LineSize  64
+--------------**--------  Data Cache          7, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------**--------  Instruction Cache   7, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------**--------  Unified Cache       9, Level 2,  512 KB, Assoc   8, LineSize  64
+----------------**------  Data Cache          8, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------**------  Instruction Cache   8, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------**------  Unified Cache      10, Level 2,  512 KB, Assoc   8, LineSize  64
+----------------********  Unified Cache      11, Level 3,   16 MB, Assoc  16, LineSize  64
+------------------**----  Data Cache          9, Level 1,   32 KB, Assoc   8, LineSize  64
+------------------**----  Instruction Cache   9, Level 1,   32 KB, Assoc   8, LineSize  64
+------------------**----  Unified Cache      12, Level 2,  512 KB, Assoc   8, LineSize  64
+--------------------**--  Data Cache         10, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------------**--  Instruction Cache  10, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------------**--  Unified Cache      13, Level 2,  512 KB, Assoc   8, LineSize  64
+----------------------**  Data Cache         11, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------------**  Instruction Cache  11, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------------**  Unified Cache      14, Level 2,  512 KB, Assoc   8, LineSize  64
+
+
+AMD does not use dies. For AMD dies is normally set to 1. You probably have to pass dies in some other ways. Did you try the latest qemu v 5.0? Please try it. 
+
+Qemu expects the user to configure the topology based on their requirement.
+ 
+Try replacing <qemu:arg value='cores=3,threads=1,dies=2,sockets=1'/> 
+with <qemu:arg value='cores=6,threads=1,dies=1,sockets=1'/>
+
+You can also use the numa configuration. There are multiple ways you can achieve your required configuration.
+
+
+Damir, Example of how to use numa configuration.
+-smp 16,maxcpus=16,cores=16,threads=1,sockets=1 -numa node,nodeid=0,cpus=0-7 -numa node,nodeid=1,cpus=8-15
+
+This will help to put all the cores in correct L3 boundary. I strongly suggest to use the latest qemu release. 
+
+It could be an issue of how the kernel presents the CPU topology. 
+
+Hardware: AMD Ryzen 3900X 12 core 24 threads (SMT)
+Host: Kernel 5.6.6, QEMU 4.2
+
+virsh capabilities | grep "cpu id"
+            <cpu id='0' socket_id='0' core_id='0' siblings='0,12'/>
+            <cpu id='1' socket_id='0' core_id='1' siblings='1,13'/>
+            <cpu id='2' socket_id='0' core_id='2' siblings='2,14'/>
+            <cpu id='3' socket_id='0' core_id='4' siblings='3,15'/>
+            <cpu id='4' socket_id='0' core_id='5' siblings='4,16'/>
+            <cpu id='5' socket_id='0' core_id='6' siblings='5,17'/>
+            <cpu id='6' socket_id='0' core_id='8' siblings='6,18'/>
+            <cpu id='7' socket_id='0' core_id='9' siblings='7,19'/>
+            <cpu id='8' socket_id='0' core_id='10' siblings='8,20'/>
+            <cpu id='9' socket_id='0' core_id='12' siblings='9,21'/>
+            <cpu id='10' socket_id='0' core_id='13' siblings='10,22'/>
+            <cpu id='11' socket_id='0' core_id='14' siblings='11,23'/>
+            <cpu id='12' socket_id='0' core_id='0' siblings='0,12'/>
+            <cpu id='13' socket_id='0' core_id='1' siblings='1,13'/>
+            <cpu id='14' socket_id='0' core_id='2' siblings='2,14'/>
+            <cpu id='15' socket_id='0' core_id='4' siblings='3,15'/>
+            <cpu id='16' socket_id='0' core_id='5' siblings='4,16'/>
+            <cpu id='17' socket_id='0' core_id='6' siblings='5,17'/>
+            <cpu id='18' socket_id='0' core_id='8' siblings='6,18'/>
+            <cpu id='19' socket_id='0' core_id='9' siblings='7,19'/>
+            <cpu id='20' socket_id='0' core_id='10' siblings='8,20'/>
+            <cpu id='21' socket_id='0' core_id='12' siblings='9,21'/>
+            <cpu id='22' socket_id='0' core_id='13' siblings='10,22'/>
+            <cpu id='23' socket_id='0' core_id='14' siblings='11,23'/>
+
+See how cpu id=3 gets core id=4, and cpu id=6 gets core id=8, etc.
+
+cat /sys/devices/system/cpu/cpu2/topology/core_id
+2
+
+cat /sys/devices/system/cpu/cpu3/topology/core_id
+4
+
+However, the association of CPU IDs to L3 caches seems to be correct:
+
+echo "Level  CPU list";for file in /sys/devices/system/cpu/cpu*/cache/index3; do echo $(cat $file/id) "    " $(cat $file/shared_cpu_list); done | sort --version-sort
+Level  CPU list
+0      0-2,12-14
+0      0-2,12-14
+0      0-2,12-14
+0      0-2,12-14
+0      0-2,12-14
+0      0-2,12-14
+1      3-5,15-17
+1      3-5,15-17
+1      3-5,15-17
+1      3-5,15-17
+1      3-5,15-17
+1      3-5,15-17
+2      6-8,18-20
+2      6-8,18-20
+2      6-8,18-20
+2      6-8,18-20
+2      6-8,18-20
+2      6-8,18-20
+3      9-11,21-23
+3      9-11,21-23
+3      9-11,21-23
+3      9-11,21-23
+3      9-11,21-23
+3      9-11,21-23
+
+There are 4 L3 caches with the correct CPU lists (6 CPUs/threads each).
+
+Is it possible that this weird CPU ID enumeration is causing the confusion?
+
+Haven't had a chance to check out QEMU 5.0, but hope to do that today.
+
+Finally installed QEMU 5.0.0.154 - still the same. QEMU doesn't recognize the L3 caches and still lists 3 L3 caches instead of 4 with 3 cores/6 threads.
+
+Here the vm.log with the qemu command line (shortened):
+
+2020-05-03 18:23:38.674+0000: starting up libvirt version: 5.10.0, qemu version: 5.0.50v5.0.0-154-g2ef486e76d-dirty, kernel: 5.4.36-1-MANJARO
+
+-machine pc-q35-4.2,accel=kvm,usb=off,vmport=off,dump-guest-core=off,kernel_irqchip=on,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-format \
+-cpu host,invtsc=on,hypervisor=on,topoext=on,hv-time,hv-relaxed,hv-vapic,hv-spinlocks=0x1fff,hv-vpindex,hv-synic,hv-stimer,hv-vendor-id=AuthenticAMD,hv-frequencies,hv-crash,kvm=off,host-cache-info=on,l3-cache=off \
+-m 49152 \
+-mem-prealloc \
+-mem-path /dev/hugepages/libvirt/qemu/1-win10 \
+-overcommit mem-lock=off \
+-smp 24,sockets=1,cores=12,threads=2 \
+-display none \
+-no-user-config \
+-nodefaults \
+-chardev socket,id=charmonitor,fd=34,server,nowait \
+-mon chardev=charmonitor,id=monitor,mode=control \
+-rtc base=localtime,driftfix=slew \
+-global kvm-pit.lost_tick_policy=delay \
+-no-hpet \
+-no-shutdown \
+-global ICH9-LPC.disable_s3=1 \
+-global ICH9-LPC.disable_s4=1 \
+-boot menu=off,strict=on \
+
+
+Hi Seiger,
+I am not an expert on libvirt. I mostly use qemu command line for my test. I was able to achieve the 3960X configuration with the following command line. 
+
+# qemu-system-x86_64 -name rhel7  -m 16384 -smp 24,cores=12,threads=2,sockets=1 -hda vdisk.qcow2 -enable-kvm -net nic -net bridge,br=virbr0,helper=/usr/libexec/qemu-bridge-helper -cpu host,+topoext -nographic -numa node,nodeid=0,cpus=0-5 -numa node,nodeid=1,cpus=6-11 -numa node,nodeid=2,cpus=12-17 -numa node,nodeid=3,cpus=18-23
+
+Basically qemu does not have all the information to build the topology for every configuration. It depends on  libvirt for that information. See if this combination works for you.
+
+Hello Babu,
+
+Thanks for the reply and the QEMU command line. I will try to implement it in the XML.
+
+So essentially what you do is to define each group of cpus and associate them with a numa node:
+
+-numa node,nodeid=0,cpus=0-5 -numa node,nodeid=1,cpus=6-11 -numa node,nodeid=2,cpus=12-17 -numa node,nodeid=3,cpus=18-23
+
+Haven't tried it but that might work. Do you need QEMU 5.0 for this to work, or is 4.2 OK?
+
+Yes. Sieger. Please install 5.0 it should work fine. I am not sure about 4.2. 
+
+Hello, 
+
+I took a look today at the layouts when using 1950X (which previously worked, and yes, admittedly, I am using Windows / coreinfo), and any basic config (previously something simple as Sockets=1,Cores=8, Theads=1 (now also Dies=1) worked, but now, the topology presents as if all cores share L3, and that each two cores share L1C/L1D/L2, like if they were smt-siblings. I would call this a serious regression. 
+
+I don't think using Numa Nodes is an ok way to solve this (especially not when at least for 4CCX CPUs, this worked flawlessly before), as that will make numa-aware applications start taking note of numa nodes, and possibly do wierd things (plus, it introduces more configuration where it was not needed before). 
+
+I upgraded to QEMU emulator version 5.0.50
+Using q35-5.1 (the latest) and the following libvirt configuration:
+
+  <memory unit="KiB">50331648</memory>
+  <currentMemory unit="KiB">50331648</currentMemory>
+  <memoryBacking>
+    <hugepages/>
+  </memoryBacking>
+  <vcpu placement="static">24</vcpu>
+  <cputune>
+    <vcpupin vcpu="0" cpuset="0"/>
+    <vcpupin vcpu="1" cpuset="12"/>
+    <vcpupin vcpu="2" cpuset="1"/>
+    <vcpupin vcpu="3" cpuset="13"/>
+    <vcpupin vcpu="4" cpuset="2"/>
+    <vcpupin vcpu="5" cpuset="14"/>
+    <vcpupin vcpu="6" cpuset="3"/>
+    <vcpupin vcpu="7" cpuset="15"/>
+    <vcpupin vcpu="8" cpuset="4"/>
+    <vcpupin vcpu="9" cpuset="16"/>
+    <vcpupin vcpu="10" cpuset="5"/>
+    <vcpupin vcpu="11" cpuset="17"/>
+    <vcpupin vcpu="12" cpuset="6"/>
+    <vcpupin vcpu="13" cpuset="18"/>
+    <vcpupin vcpu="14" cpuset="7"/>
+    <vcpupin vcpu="15" cpuset="19"/>
+    <vcpupin vcpu="16" cpuset="8"/>
+    <vcpupin vcpu="17" cpuset="20"/>
+    <vcpupin vcpu="18" cpuset="9"/>
+    <vcpupin vcpu="19" cpuset="21"/>
+    <vcpupin vcpu="20" cpuset="10"/>
+    <vcpupin vcpu="21" cpuset="22"/>
+    <vcpupin vcpu="22" cpuset="11"/>
+    <vcpupin vcpu="23" cpuset="23"/>
+  </cputune>
+  <os>
+    <type arch="x86_64" machine="pc-q35-5.1">hvm</type>
+    <loader readonly="yes" type="pflash">/usr/share/OVMF/x64/OVMF_CODE.fd</loader>
+    <nvram>/var/lib/libvirt/qemu/nvram/win10_VARS.fd</nvram>
+    <boot dev="hd"/>
+    <bootmenu enable="no"/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <hyperv>
+      <relaxed state="on"/>
+      <vapic state="on"/>
+      <spinlocks state="on" retries="8191"/>
+      <vpindex state="on"/>
+      <synic state="on"/>
+      <stimer state="on"/>
+      <vendor_id state="on" value="AuthenticAMD"/>
+      <frequencies state="on"/>
+    </hyperv>
+    <kvm>
+      <hidden state="on"/>
+    </kvm>
+    <vmport state="off"/>
+    <ioapic driver="kvm"/>
+  </features>
+  <cpu mode="host-passthrough" check="none">
+    <topology sockets="1" cores="12" threads="2"/>
+    <cache mode="passthrough"/>
+    <feature policy="require" name="invtsc"/>
+    <feature policy="require" name="hypervisor"/>
+    <feature policy="require" name="topoext"/>
+    <numa>
+      <cell id="0" cpus="0-2,12-14" memory="12582912" unit="KiB"/>
+      <cell id="1" cpus="3-5,15-17" memory="12582912" unit="KiB"/>
+      <cell id="2" cpus="6-8,18-20" memory="12582912" unit="KiB"/>
+      <cell id="3" cpus="9-11,21-23" memory="12582912" unit="KiB"/>
+    </numa>
+  </cpu>
+
+...
+
+/var/log/libvirt/qemu/win10.log:
+
+-machine pc-q35-5.1,accel=kvm,usb=off,vmport=off,dump-guest-core=off,kernel_irqchip=on,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-format \
+-cpu host,invtsc=on,hypervisor=on,topoext=on,hv-time,hv-relaxed,hv-vapic,hv-spinlocks=0x1fff,hv-vpindex,hv-synic,hv-stimer,hv-vendor-id=AuthenticAMD,hv-frequencies,hv-crash,kvm=off,host-cache-info=on,l3-cache=off \
+-m 49152 \
+-overcommit mem-lock=off \
+-smp 24,sockets=1,cores=12,threads=2 \
+-mem-prealloc \
+-mem-path /dev/hugepages/libvirt/qemu/3-win10 \
+-numa node,nodeid=0,cpus=0-2,cpus=12-14,mem=12288 \
+-numa node,nodeid=1,cpus=3-5,cpus=15-17,mem=12288 \
+-numa node,nodeid=2,cpus=6-8,cpus=18-20,mem=12288 \
+-numa node,nodeid=3,cpus=9-11,cpus=21-23,mem=12288 \
+...
+
+For some reason I always get l3-cache=off.
+
+CoreInfo.exe in Windows 10 then produces the following report (shortened):
+
+Logical to Physical Processor Map:
+**----------------------  Physical Processor 0 (Hyperthreaded)
+--*---------------------  Physical Processor 1
+---*--------------------  Physical Processor 2
+----**------------------  Physical Processor 3 (Hyperthreaded)
+------**----------------  Physical Processor 4 (Hyperthreaded)
+--------*---------------  Physical Processor 5
+---------*--------------  Physical Processor 6
+----------**------------  Physical Processor 7 (Hyperthreaded)
+------------**----------  Physical Processor 8 (Hyperthreaded)
+--------------*---------  Physical Processor 9
+---------------*--------  Physical Processor 10
+----------------**------  Physical Processor 11 (Hyperthreaded)
+------------------**----  Physical Processor 12 (Hyperthreaded)
+--------------------*---  Physical Processor 13
+---------------------*--  Physical Processor 14
+----------------------**  Physical Processor 15 (Hyperthreaded)
+
+Logical Processor to Socket Map:
+************************  Socket 0
+
+Logical Processor to NUMA Node Map:
+***---------***---------  NUMA Node 0
+---***---------***------  NUMA Node 1
+------***---------***---  NUMA Node 2
+---------***---------***  NUMA Node 3
+
+Approximate Cross-NUMA Node Access Cost (relative to fastest):
+     00  01  02  03
+00: 1.4 1.2 1.1 1.2
+01: 1.1 1.1 1.3 1.1
+02: 1.0 1.1 1.0 1.2
+03: 1.1 1.2 1.2 1.2
+
+Logical Processor to Cache Map:
+**----------------------  Data Cache          0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------------  Instruction Cache   0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------------  Unified Cache       0, Level 2,  512 KB, Assoc   8, LineSize  64
+***---------------------  Unified Cache       1, Level 3,   16 MB, Assoc  16, LineSize  64
+--*---------------------  Data Cache          1, Level 1,   32 KB, Assoc   8, LineSize  64
+--*---------------------  Instruction Cache   1, Level 1,   32 KB, Assoc   8, LineSize  64
+--*---------------------  Unified Cache       2, Level 2,  512 KB, Assoc   8, LineSize  64
+---*--------------------  Data Cache          2, Level 1,   32 KB, Assoc   8, LineSize  64
+---*--------------------  Instruction Cache   2, Level 1,   32 KB, Assoc   8, LineSize  64
+---*--------------------  Unified Cache       3, Level 2,  512 KB, Assoc   8, LineSize  64
+---***------------------  Unified Cache       4, Level 3,   16 MB, Assoc  16, LineSize  64
+----**------------------  Data Cache          3, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------------  Instruction Cache   3, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------------  Unified Cache       5, Level 2,  512 KB, Assoc   8, LineSize  64
+------**----------------  Data Cache          4, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------------  Instruction Cache   4, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------------  Unified Cache       6, Level 2,  512 KB, Assoc   8, LineSize  64
+------**----------------  Unified Cache       7, Level 3,   16 MB, Assoc  16, LineSize  64
+--------*---------------  Data Cache          5, Level 1,   32 KB, Assoc   8, LineSize  64
+--------*---------------  Instruction Cache   5, Level 1,   32 KB, Assoc   8, LineSize  64
+--------*---------------  Unified Cache       8, Level 2,  512 KB, Assoc   8, LineSize  64
+--------*---------------  Unified Cache       9, Level 3,   16 MB, Assoc  16, LineSize  64
+---------*--------------  Data Cache          6, Level 1,   32 KB, Assoc   8, LineSize  64
+---------*--------------  Instruction Cache   6, Level 1,   32 KB, Assoc   8, LineSize  64
+---------*--------------  Unified Cache      10, Level 2,  512 KB, Assoc   8, LineSize  64
+---------***------------  Unified Cache      11, Level 3,   16 MB, Assoc  16, LineSize  64
+----------**------------  Data Cache          7, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------------  Instruction Cache   7, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------------  Unified Cache      12, Level 2,  512 KB, Assoc   8, LineSize  64
+------------**----------  Data Cache          8, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----------  Instruction Cache   8, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----------  Unified Cache      13, Level 2,  512 KB, Assoc   8, LineSize  64
+------------***---------  Unified Cache      14, Level 3,   16 MB, Assoc  16, LineSize  64
+--------------*---------  Data Cache          9, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------*---------  Instruction Cache   9, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------*---------  Unified Cache      15, Level 2,  512 KB, Assoc   8, LineSize  64
+---------------*--------  Data Cache         10, Level 1,   32 KB, Assoc   8, LineSize  64
+---------------*--------  Instruction Cache  10, Level 1,   32 KB, Assoc   8, LineSize  64
+---------------*--------  Unified Cache      16, Level 2,  512 KB, Assoc   8, LineSize  64
+---------------*--------  Unified Cache      17, Level 3,   16 MB, Assoc  16, LineSize  64
+----------------**------  Data Cache         11, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------**------  Instruction Cache  11, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------**------  Unified Cache      18, Level 2,  512 KB, Assoc   8, LineSize  64
+----------------**------  Unified Cache      19, Level 3,   16 MB, Assoc  16, LineSize  64
+------------------**----  Data Cache         12, Level 1,   32 KB, Assoc   8, LineSize  64
+------------------**----  Instruction Cache  12, Level 1,   32 KB, Assoc   8, LineSize  64
+------------------**----  Unified Cache      20, Level 2,  512 KB, Assoc   8, LineSize  64
+------------------***---  Unified Cache      21, Level 3,   16 MB, Assoc  16, LineSize  64
+--------------------*---  Data Cache         13, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------------*---  Instruction Cache  13, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------------*---  Unified Cache      22, Level 2,  512 KB, Assoc   8, LineSize  64
+---------------------*--  Data Cache         14, Level 1,   32 KB, Assoc   8, LineSize  64
+---------------------*--  Instruction Cache  14, Level 1,   32 KB, Assoc   8, LineSize  64
+---------------------*--  Unified Cache      23, Level 2,  512 KB, Assoc   8, LineSize  64
+---------------------***  Unified Cache      24, Level 3,   16 MB, Assoc  16, LineSize  64
+----------------------**  Data Cache         15, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------------**  Instruction Cache  15, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------------**  Unified Cache      25, Level 2,  512 KB, Assoc   8, LineSize  64
+
+Logical Processor to Group Map:
+************************  Group 0
+
+
+The above result is even further away from the actual L3 cache configuration.
+
+So numatune doesn't produce the expected outcome.
+
+
+Same problem here on 5.0 and 3900x (3 cores per CCX). And as stated before - declaring NUMA nodes is definitely not the right solution if the aim is to emulate the host CPU as close as possible.
+
+The problem is that disabled cores are not taken into account.. ALL Zen2 CPUs have L3 cache group per CCX and every CCX has 4 cores, the problem is that some cores in each CCX (1 for 6 and 12-core CPUs, 2 for 3100) are disabled for some models, but they still use their core ids (as can be seen in virsh capabilities | grep "cpu id" output in above comments). Looking at target/i386/cpu.c:5529, this is not taken into account.
+
+Maybe the cleanest way to fix this is to emulate the host topology by also skipping disabled core ids in the VM? That way, die offset will actually match the real host CPU topology...
+
+A workaround for Linux VMs is to disable CPUs (and setting their number/pinnings accordingly, e.g. every 4th (and 3rd for 3100) core is going to be 'dummy' and disabled system-wide) by e.g. echo 0 > /sys/devices/system/cpu/cpu3/online
+
+No good workaround for Windows VMs exists, as far as I know - the best you can do is setting affinity to specific process(es) and avoid the 'dummy' CPUs, but I am not aware of any possibility to disable specific CPUs (only limiting the overall number).
+
+Hi Jan, 
+
+Problem for me now is why does every config (I can figure out) now result in SMT on/L3 across all cores which is obviously never true on Zen except if you have only less than 4 cores, 8 cores should always result in 2 L3 Caches, and so should 16 Threads /w 8+SMT. This worked in my initial post. 
+
+Latest qemu has removed all the hard coded configurations for AMD. It is leaving everything to customize.  One way is to configure is using numa nodes. This will make sure cpus under one numa node share same L3. Then pin the correct host cpus to guest cpus using vcpupin. I would change this -numa node,nodeid=0,cpus=0-2,cpus=12-14,mem=12288 to -numa node,nodeid=0,cpus=0-2,cpus=3-5,mem=12288. Then have vcpupin map the correct host cpu to guest cpu. Check if this works for you. Can you please post lscpu output from host for everybody's understanding?    
+
+
+No, creating artificial NUMA nodes is, simply put, never a good solution for CPUs that operate as a single NUMA node - which is the case for all Zen2 CPUs (except maybe EPYCs? not sure about those).
+
+You may workaround the L3 issue that way, but hit many new bugs/problems by introducing multiple NUMA nodes, _especially_ on Windows VMs, because that OS has crappy NUMA handling and multitude of bugs related to it - which was one of the major reasons why even Zen2 Threadrippers are now single NUMA node (e.g. https://www.servethehome.com/wp-content/uploads/2019/11/AMD-Ryzen-Threadripper-3960X-Topology.png ).
+
+The host CPU architecture should be replicated as closely as possible on the VM and for Zen2 CPUs with 4 cores per CCX, _this already works perfectly_ - there are no problems on 3300X/3700(X)/3800X/3950X/3970X/3990X.
+
+There is, unfortunately, no way to customize/specify the "disabled" CPU cores in QEMU, and therefore no way to emulate 1 NUMA node + L3 cache per 2/3 cores - only to passthrough the cache config from host, which is unfortunately not done correctly for CPUs with disabled cores (but again, works perfectly for CPUs with all 4 cores enabled per CCX).
+
+lscpu:
+Architecture:                    x86_64
+CPU op-mode(s):                  32-bit, 64-bit
+Byte Order:                      Little Endian
+Address sizes:                   43 bits physical, 48 bits virtual
+CPU(s):                          24
+On-line CPU(s) list:             0-23
+Thread(s) per core:              2
+Core(s) per socket:              12
+Socket(s):                       1
+NUMA node(s):                    1
+Vendor ID:                       AuthenticAMD
+CPU family:                      23
+Model:                           113
+Model name:                      AMD Ryzen 9 3900X 12-Core Processor
+Stepping:                        0
+Frequency boost:                 enabled
+CPU MHz:                         2972.127
+CPU max MHz:                     3800.0000
+CPU min MHz:                     2200.0000
+BogoMIPS:                        7602.55
+Virtualization:                  AMD-V
+L1d cache:                       384 KiB
+L1i cache:                       384 KiB
+L2 cache:                        6 MiB
+L3 cache:                        64 MiB
+NUMA node0 CPU(s):               0-23
+Vulnerability Itlb multihit:     Not affected
+Vulnerability L1tf:              Not affected
+Vulnerability Mds:               Not affected
+Vulnerability Meltdown:          Not affected
+Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
+Vulnerability Spectre v1:        Mitigation; usercopy/swapgs barriers and __user pointer sanitization
+Vulnerability Spectre v2:        Mitigation; Full AMD retpoline, IBPB conditional, STIBP conditional, RSB filling
+Vulnerability Tsx async abort:   Not affected
+Flags:                           fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonsto
+                                 p_tsc cpuid extd_apicid aperfmperf pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a mi
+                                 salignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate sme ssbd mba sev ibpb stibp vmmcall fsgsbase b
+                                 mi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru
+                                  wbnoinvd arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip rdpid overflow_recov succor smca
+
+
+But the important thing has already been posted here in previous comments - notice the skipped core ids belonging to the disabled cores:
+
+virsh capabilities | grep "cpu id":
+<cpu id='0' socket_id='0' core_id='0' siblings='0,12'/>
+<cpu id='1' socket_id='0' core_id='1' siblings='1,13'/>
+<cpu id='2' socket_id='0' core_id='2' siblings='2,14'/>
+<cpu id='3' socket_id='0' core_id='4' siblings='3,15'/>
+<cpu id='4' socket_id='0' core_id='5' siblings='4,16'/>
+<cpu id='5' socket_id='0' core_id='6' siblings='5,17'/>
+<cpu id='6' socket_id='0' core_id='8' siblings='6,18'/>
+<cpu id='7' socket_id='0' core_id='9' siblings='7,19'/>
+<cpu id='8' socket_id='0' core_id='10' siblings='8,20'/>
+<cpu id='9' socket_id='0' core_id='12' siblings='9,21'/>
+<cpu id='10' socket_id='0' core_id='13' siblings='10,22'/>
+<cpu id='11' socket_id='0' core_id='14' siblings='11,23'/>
+<cpu id='12' socket_id='0' core_id='0' siblings='0,12'/>
+<cpu id='13' socket_id='0' core_id='1' siblings='1,13'/>
+<cpu id='14' socket_id='0' core_id='2' siblings='2,14'/>
+<cpu id='15' socket_id='0' core_id='4' siblings='3,15'/>
+<cpu id='16' socket_id='0' core_id='5' siblings='4,16'/>
+<cpu id='17' socket_id='0' core_id='6' siblings='5,17'/>
+<cpu id='18' socket_id='0' core_id='8' siblings='6,18'/>
+<cpu id='19' socket_id='0' core_id='9' siblings='7,19'/>
+<cpu id='20' socket_id='0' core_id='10' siblings='8,20'/>
+<cpu id='21' socket_id='0' core_id='12' siblings='9,21'/>
+<cpu id='22' socket_id='0' core_id='13' siblings='10,22'/>
+<cpu id='23' socket_id='0' core_id='14' siblings='11,23'/>
+
+Damir:
+Hm, must be some misconfiguration, then. My config for Linux VMs to utilize 3 out of the 4 CCXs. Important parts of the libvirt domain XML:
+
+  <vcpu placement="static">24</vcpu>
+  <iothreads>1</iothreads>
+  <cputune>
+    <vcpupin vcpu="0" cpuset="3"/>
+    <vcpupin vcpu="1" cpuset="15"/>
+    <vcpupin vcpu="2" cpuset="4"/>
+    <vcpupin vcpu="3" cpuset="16"/>
+    <vcpupin vcpu="4" cpuset="5"/>
+    <vcpupin vcpu="5" cpuset="17"/>
+    <vcpupin vcpu="6" cpuset="0,12"/>
+    <vcpupin vcpu="7" cpuset="0,12"/>
+    <vcpupin vcpu="8" cpuset="6"/>
+    <vcpupin vcpu="9" cpuset="18"/>
+    <vcpupin vcpu="10" cpuset="7"/>
+    <vcpupin vcpu="11" cpuset="19"/>
+    <vcpupin vcpu="12" cpuset="8"/>
+    <vcpupin vcpu="13" cpuset="20"/>
+    <vcpupin vcpu="14" cpuset="0,12"/>
+    <vcpupin vcpu="15" cpuset="0,12"/>
+    <vcpupin vcpu="16" cpuset="9"/>
+    <vcpupin vcpu="17" cpuset="21"/>
+    <vcpupin vcpu="18" cpuset="10"/>
+    <vcpupin vcpu="19" cpuset="22"/>
+    <vcpupin vcpu="20" cpuset="11"/>
+    <vcpupin vcpu="21" cpuset="23"/>
+    <vcpupin vcpu="22" cpuset="0,12"/>
+    <vcpupin vcpu="23" cpuset="0,12"/>
+    <emulatorpin cpuset="1,13"/>
+    <iothreadpin iothread="1" cpuset="2,14"/>
+  </cputune>
+  <os>
+    <type arch="x86_64" machine="pc-q35-5.0">hvm</type>
+    <loader readonly="yes" type="pflash">/usr/share/ovmf/x64/OVMF_CODE.fd</loader>
+    <nvram>/var/lib/libvirt/qemu/nvram/ccxtest-clone_VARS.fd</nvram>
+  </os>
+.
+.
+.
+  <qemu:commandline>
+    <qemu:arg value="-cpu"/>
+    <qemu:arg value="host,topoext=on,hv-time,hv-relaxed,hv-vapic,hv-spinlocks=0x1fff,host-cache-info=on,-amd-stibp"/>
+  </qemu:commandline>
+
+The CPUs with cpuset="0,12" are disabled once booted. The host-cache-info=on is the part that makes sure that the cache config is passed to the VM (but unfortunately does not take disabled cores into account, which results in incorrect config). The qemu:commandline is added because I need to add -amd-stibp, otherwise I wouldn't be able to boot. This overrides most parts in the <cpu> XML part.
+
+"The CPUs with cpuset="0,12" are disabled once booted. The host-cache-info=on is the part that makes sure that the cache config is passed to the VM (but unfortunately does not take disabled cores into account, which results in incorrect config). The qemu:commandline is added because I need to add -amd-stibp, otherwise I wouldn't be able to boot. This overrides most parts in the <cpu> XML part."
+
+Is there a XML equivalent for host-cache-info=on ?
+
+Will that work with model EPYC-IBPB as well?
+
+Sieger, I am not an expert on XML. So, I dont know. Qemu probably cannot handle disabled cores. I am still trying to learn more about this problem. 
+
+With regard to Jan's comment earlier and the virsh capabilities listing the cores and siblings, also note the following lines from virsh capabilities for a 3900X CPU:
+
+    <cache>
+      <bank id='0' level='3' type='both' size='16' unit='MiB' cpus='0-2,12-14'/>
+      <bank id='1' level='3' type='both' size='16' unit='MiB' cpus='3-5,15-17'/>
+      <bank id='2' level='3' type='both' size='16' unit='MiB' cpus='6-8,18-20'/>
+      <bank id='3' level='3' type='both' size='16' unit='MiB' cpus='9-11,21-23'/>
+    </cache>
+
+virsh capabilities is perfectly able to identify the L3 cache structure and associate the right cpus. It would be ideal to just use the above output inside the libvirt domain configuration to "manually" define the L3 cache, or something to that effect on the qemu command line.
+
+Users could then decide to pin only part of the cpus, usually a multiple of 6 (in the case of the 3900X) to align with the CCX.
+
+I'm now on kernel 5.6.11 and QEMU v5.0.0.r533.gdebe78ce14-1 (from Arch Linux AUR qemu-git), running q35-5.1. I will try the host-passthrough with host-cache-info=on option Jan posted. Question - is host-cache-info=on the same as <cache mode="passthrough"/> under <cpu mode=host-passthrough...?
+
+<cache mode="passthrough"/>
+
+adds "host-cache-info=on,l3-cache=off"
+
+to the qemu -cpu args
+
+I believe l3-cache=off is useless with host-cache-info=on
+
+So <cache mode="passthrough"/> should do what you want.
+
+Thanks Jan. I had some new hardware/software issues combined with the QEMU 5.0.. issues that had my Windows VM crash after some minutes.
+
+I totally overlooked the following:
+    <vcpupin vcpu="6" cpuset="0,12"/>
+    <vcpupin vcpu="7" cpuset="0,12"/>
+
+So I guess you posted to answer to this: https://www.reddit.com/r/VFIO/comments/erwzrg/think_i_found_a_workaround_to_get_l3_cache_shared/
+
+As it's late, I'll try tomorrow. Sorry for all the confusion but I had a real tough time with this Ryzen build.
+
+Jan, I tried your suggestion but it didn't make a difference. Here is my current setup:
+
+h/w: AMD Ryzen 9 3900X
+kernel: 5.4
+QEMU: 5.0.0-6
+Chipset selection: Q35-5.0
+
+Configuration: host-passthrough, cache enabled
+
+Use CoreInfo.exe inside Windows. The problem is this:
+
+Logical Processor to Cache Map:
+**---------------------- Data Cache 0, Level 1, 32 KB, Assoc 8, LineSize 64
+**---------------------- Instruction Cache 0, Level 1, 32 KB, Assoc 8, LineSize 64
+**---------------------- Unified Cache 0, Level 2, 512 KB, Assoc 8, LineSize 64
+********---------------- Unified Cache 1, Level 3, 16 MB, Assoc 16, LineSize 64
+
+The last line above should be as follows:
+
+******------------------ Unified Cache 0, Level 3, 16 MB, Assoc 16, LineSize 64
+
+The cache is supposed to be associated with 3 cores a 2 threads in group 0. Yet it shows 8 (2x4) vcpus inside a cache that is associated with the next group.
+
+In total, I always get 3 L3 caches instead of 4 L4 caches for my 12 cores / 24 threads. Also see my next post.
+
+
+This is the CPU cache layout as shown by lscpu -a -e
+
+CPU NODE SOCKET CORE L1d:L1i:L2:L3 ONLINE    MAXMHZ    MINMHZ
+  0    0      0    0 0:0:0:0          yes 3800.0000 2200.0000
+  1    0      0    1 1:1:1:0          yes 3800.0000 2200.0000
+  2    0      0    2 2:2:2:0          yes 3800.0000 2200.0000
+  3    0      0    3 3:3:3:1          yes 3800.0000 2200.0000
+  4    0      0    4 4:4:4:1          yes 3800.0000 2200.0000
+  5    0      0    5 5:5:5:1          yes 3800.0000 2200.0000
+  6    0      0    6 6:6:6:2          yes 3800.0000 2200.0000
+  7    0      0    7 7:7:7:2          yes 3800.0000 2200.0000
+  8    0      0    8 8:8:8:2          yes 3800.0000 2200.0000
+  9    0      0    9 9:9:9:3          yes 3800.0000 2200.0000
+ 10    0      0   10 10:10:10:3       yes 3800.0000 2200.0000
+ 11    0      0   11 11:11:11:3       yes 3800.0000 2200.0000
+ 12    0      0    0 0:0:0:0          yes 3800.0000 2200.0000
+ 13    0      0    1 1:1:1:0          yes 3800.0000 2200.0000
+ 14    0      0    2 2:2:2:0          yes 3800.0000 2200.0000
+ 15    0      0    3 3:3:3:1          yes 3800.0000 2200.0000
+ 16    0      0    4 4:4:4:1          yes 3800.0000 2200.0000
+ 17    0      0    5 5:5:5:1          yes 3800.0000 2200.0000
+ 18    0      0    6 6:6:6:2          yes 3800.0000 2200.0000
+ 19    0      0    7 7:7:7:2          yes 3800.0000 2200.0000
+ 20    0      0    8 8:8:8:2          yes 3800.0000 2200.0000
+ 21    0      0    9 9:9:9:3          yes 3800.0000 2200.0000
+ 22    0      0   10 10:10:10:3       yes 3800.0000 2200.0000
+ 23    0      0   11 11:11:11:3       yes 3800.0000 2200.0000
+
+I was trying to allocate cache using the cachetune feature in libvirt, but it turns out to be either misleading or much too complicated to be usable. Here is what I tried:
+
+  <vcpu placement="static">24</vcpu>
+  <cputune>
+    <vcpupin vcpu="0" cpuset="0"/>
+    <vcpupin vcpu="1" cpuset="12"/>
+    <vcpupin vcpu="2" cpuset="1"/>
+    <vcpupin vcpu="3" cpuset="13"/>
+    <vcpupin vcpu="4" cpuset="2"/>
+    <vcpupin vcpu="5" cpuset="14"/>
+    <vcpupin vcpu="6" cpuset="3"/>
+    <vcpupin vcpu="7" cpuset="15"/>
+    <vcpupin vcpu="8" cpuset="4"/>
+    <vcpupin vcpu="9" cpuset="16"/>
+    <vcpupin vcpu="10" cpuset="5"/>
+    <vcpupin vcpu="11" cpuset="17"/>
+    <vcpupin vcpu="12" cpuset="6"/>
+    <vcpupin vcpu="13" cpuset="18"/>
+    <vcpupin vcpu="14" cpuset="7"/>
+    <vcpupin vcpu="15" cpuset="19"/>
+    <vcpupin vcpu="16" cpuset="8"/>
+    <vcpupin vcpu="17" cpuset="20"/>
+    <vcpupin vcpu="18" cpuset="9"/>
+    <vcpupin vcpu="19" cpuset="21"/>
+    <vcpupin vcpu="20" cpuset="10"/>
+    <vcpupin vcpu="21" cpuset="22"/>
+    <vcpupin vcpu="22" cpuset="11"/>
+    <vcpupin vcpu="23" cpuset="23"/>
+    <cachetune vcpus="0-2,12-14">
+      <cache id="0" level="3" type="both" size="16" unit="MiB"/>
+      <monitor level="3" vcpus="0-2,12-14"/>
+    </cachetune>
+    <cachetune vcpus="3-5,15-17">
+      <cache id="1" level="3" type="both" size="16" unit="MiB"/>
+      <monitor level="3" vcpus="3-5,15-17"/>
+    </cachetune>
+    <cachetune vcpus="6-8,18-20">
+      <cache id="2" level="3" type="both" size="16" unit="MiB"/>
+      <monitor level="3" vcpus="6-8,18-20"/>
+    </cachetune>
+    <cachetune vcpus="9-11,21-23">
+      <cache id="3" level="3" type="both" size="16" unit="MiB"/>
+      <monitor level="3" vcpus="9-11,21-23"/>
+    </cachetune>
+  </cputune>
+
+Unfortunately it gives the following error when I try to start the VM:
+
+Error starting domain: internal error: Missing or inconsistent resctrl info for memory bandwidth allocation
+
+I have resctrl mounted like this:
+
+mount -t resctrl resctrl /sys/fs/resctrl
+
+This error leads to the following description on how to allocate memory bandwith: https://software.intel.com/content/www/us/en/develop/articles/use-intel-resource-director-technology-to-allocate-memory-bandwidth.html
+
+I think this is over the top and perhaps I'm trying the wrong approach. All I can say is that every suggestion I've seen and tried so far has led me to one conclusion: QEMU does NOT support the L3 cache layout of the new ZEN 2 arch CPUs such as the Ryzen 9 3900X.
+
+h-sieger,
+that is a misunderstanding, read my comment carefully again:
+"A workaround for Linux VMs is to disable CPUs (and setting their number/pinnings accordingly, e.g. every 4th (and 3rd for 3100) core is going to be 'dummy' and disabled system-wide) by e.g. echo 0 > /sys/devices/system/cpu/cpu3/online
+
+No good workaround for Windows VMs exists, as far as I know - the best you can do is setting affinity to specific process(es) and avoid the 'dummy' CPUs, but I am not aware of any possibility to disable specific CPUs (only limiting the overall number)."
+
+I do NOT have a fix - only a very ugly workaround for Linux guests only - I cannot fix the cache layout, but on Linux, I can get around that by adding dummy CPUs that I then disable in the guest during startup, so they are not used - effectively making sure that only the correct 6 vCPUs / 3 cores are used. On Windows, you cannot do that, AFAIK.
+
+Thanks for clarifying, Jan.
+
+In the meantime I tried a number of so-called solutions published on Reddit and other places, none of which seems to work.
+
+So if I understand it correctly, there is currently no solution to the incorrect l3 cache layout for Zen architecture CPUs. At best a workaround for Linux guests.
+
+I hope somebody is looking into that.
+
+Thanks for clarifying, Jan.
+
+In the meantime I tried a number of so-called solutions published on Reddit and other places, none of which seems to work.
+
+So if I understand it correctly, there is currently no solution to the incorrect l3 cache layout for Zen architecture CPUs. At best a workaround for Linux guests.
+
+I hope somebody is looking into that.
+
+The problem is caused by the fact that with Ryzen CPUs with disabled cores, the APIC IDs are not sequential on host - in order for cache topology to be configured properly, there is a 'hole' in APIC ID and core ID numbering (I have added full output of cpuid for my 3900X). Unfortunately, adding holes to the numbering is the only way to achieve what is needed for 3 cores per CCX as CPUID Fn8000_001D_EAX NumSharingCache parameter rounds to  powers of two (for Ryzen 3100 with 2 cores per CCX, lowering NumSharingCache should also work, correctly setting the L3 cache cores with their IDs still being sequential).
+
+A small hack in x86_apicid_from_topo_ids() in include/hw/i386/topology.h can introduce a correct numbering (at least if you do not have epyc set as your cpu, then _epyc variant of the functions are used). But to fix this properly will probably require some thought - maybe introduce the ability to assign APIC IDs directly somehow? Or the ability to specify the 'holes' somehow in the -smt param, or maybe -cpu host,topoext=on should do this automatically? I don't know...
+
+e.g. For 3 core per CCX CPUs, to fix this, at include/hw/i386/topology.h:220 change:
+
+(topo_ids->core_id << apicid_core_offset(topo_info)) |
+
+to
+
+((topo_ids->core_id + (topo_ids->core_id / 3)) << apicid_core_offset(topo_info)) |
+
+
+The cache topology is now correct (-cpu host,topoext=on,hv-time,hv-relaxed,hv-vapic,hv-spinlocks=0x1fff,host-cache-info=on -smp 18,sockets=1,dies=1,cores=9,threads=2), even in Windows:
+
+Logical Processor to Cache Map:
+**----------------  Data Cache          0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------  Instruction Cache   0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------  Unified Cache       0, Level 2,  512 KB, Assoc   8, LineSize  64
+******------------  Unified Cache       1, Level 3,   16 MB, Assoc  16, LineSize  64
+--**--------------  Data Cache          1, Level 1,   32 KB, Assoc   8, LineSize  64
+--**--------------  Instruction Cache   1, Level 1,   32 KB, Assoc   8, LineSize  64
+--**--------------  Unified Cache       2, Level 2,  512 KB, Assoc   8, LineSize  64
+----**------------  Data Cache          2, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------  Instruction Cache   2, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------  Unified Cache       3, Level 2,  512 KB, Assoc   8, LineSize  64
+------**----------  Data Cache          3, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------  Instruction Cache   3, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------  Unified Cache       4, Level 2,  512 KB, Assoc   8, LineSize  64
+------******------  Unified Cache       5, Level 3,   16 MB, Assoc  16, LineSize  64
+--------**--------  Data Cache          4, Level 1,   32 KB, Assoc   8, LineSize  64
+--------**--------  Instruction Cache   4, Level 1,   32 KB, Assoc   8, LineSize  64
+--------**--------  Unified Cache       6, Level 2,  512 KB, Assoc   8, LineSize  64
+----------**------  Data Cache          5, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------  Instruction Cache   5, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------  Unified Cache       7, Level 2,  512 KB, Assoc   8, LineSize  64
+------------**----  Data Cache          6, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----  Instruction Cache   6, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----  Unified Cache       8, Level 2,  512 KB, Assoc   8, LineSize  64
+------------******  Unified Cache       9, Level 3,   16 MB, Assoc  16, LineSize  64
+
+
+
+@Jan: this coreinfo output looks good.
+
+I finally managed to get the core /cache alignment right, I believe:
+
+  <vcpu placement="static" current="24">32</vcpu>
+  <vcpus>
+    <vcpu id="0" enabled="yes" hotpluggable="no"/>
+    <vcpu id="1" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="2" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="3" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="4" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="5" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="6" enabled="no" hotpluggable="yes"/>
+    <vcpu id="7" enabled="no" hotpluggable="yes"/>
+    <vcpu id="8" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="9" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="10" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="11" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="12" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="13" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="14" enabled="no" hotpluggable="yes"/>
+    <vcpu id="15" enabled="no" hotpluggable="yes"/>
+    <vcpu id="16" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="17" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="18" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="19" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="20" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="21" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="22" enabled="no" hotpluggable="yes"/>
+    <vcpu id="23" enabled="no" hotpluggable="yes"/>
+    <vcpu id="24" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="25" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="26" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="27" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="28" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="29" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="30" enabled="no" hotpluggable="yes"/>
+    <vcpu id="31" enabled="no" hotpluggable="yes"/>
+  </vcpus>
+  <cputune>
+    <vcpupin vcpu="0" cpuset="0"/>
+    <vcpupin vcpu="1" cpuset="12"/>
+    <vcpupin vcpu="2" cpuset="1"/>
+    <vcpupin vcpu="3" cpuset="13"/>
+    <vcpupin vcpu="4" cpuset="2"/>
+    <vcpupin vcpu="5" cpuset="14"/>
+    <vcpupin vcpu="8" cpuset="3"/>
+    <vcpupin vcpu="9" cpuset="15"/>
+    <vcpupin vcpu="10" cpuset="4"/>
+    <vcpupin vcpu="11" cpuset="16"/>
+    <vcpupin vcpu="12" cpuset="5"/>
+    <vcpupin vcpu="13" cpuset="17"/>
+    <vcpupin vcpu="16" cpuset="6"/>
+    <vcpupin vcpu="17" cpuset="18"/>
+    <vcpupin vcpu="18" cpuset="7"/>
+    <vcpupin vcpu="19" cpuset="19"/>
+    <vcpupin vcpu="20" cpuset="8"/>
+    <vcpupin vcpu="21" cpuset="20"/>
+    <vcpupin vcpu="24" cpuset="9"/>
+    <vcpupin vcpu="25" cpuset="21"/>
+    <vcpupin vcpu="26" cpuset="10"/>
+    <vcpupin vcpu="27" cpuset="22"/>
+    <vcpupin vcpu="28" cpuset="11"/>
+    <vcpupin vcpu="29" cpuset="23"/>
+  </cputune>
+
+...
+  <cpu mode="host-passthrough" check="none">
+    <topology sockets="1" dies="1" cores="16" threads="2"/>
+    <cache mode="passthrough"/>
+
+
+The Windows Coreinfo output is this:
+
+Logical to Physical Processor Map:
+**----------------  Physical Processor 0 (Hyperthreaded)
+--**--------------  Physical Processor 1 (Hyperthreaded)
+----**------------  Physical Processor 2 (Hyperthreaded)
+------**----------  Physical Processor 3 (Hyperthreaded)
+--------**--------  Physical Processor 4 (Hyperthreaded)
+----------**------  Physical Processor 5 (Hyperthreaded)
+------------**----  Physical Processor 6 (Hyperthreaded)
+--------------**--  Physical Processor 7 (Hyperthreaded)
+----------------**  Physical Processor 8 (Hyperthreaded)
+
+Logical Processor to Socket Map:
+******************  Socket 0
+
+Logical Processor to NUMA Node Map:
+******************  NUMA Node 0
+
+No NUMA nodes.
+
+Logical Processor to Cache Map:
+**----------------  Data Cache          0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------  Instruction Cache   0, Level 1,   32 KB, Assoc   8, LineSize  64
+**----------------  Unified Cache       0, Level 2,  512 KB, Assoc   8, LineSize  64
+******------------  Unified Cache       1, Level 3,   16 MB, Assoc  16, LineSize  64
+--**--------------  Data Cache          1, Level 1,   32 KB, Assoc   8, LineSize  64
+--**--------------  Instruction Cache   1, Level 1,   32 KB, Assoc   8, LineSize  64
+--**--------------  Unified Cache       2, Level 2,  512 KB, Assoc   8, LineSize  64
+----**------------  Data Cache          2, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------  Instruction Cache   2, Level 1,   32 KB, Assoc   8, LineSize  64
+----**------------  Unified Cache       3, Level 2,  512 KB, Assoc   8, LineSize  64
+------**----------  Data Cache          3, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------  Instruction Cache   3, Level 1,   32 KB, Assoc   8, LineSize  64
+------**----------  Unified Cache       4, Level 2,  512 KB, Assoc   8, LineSize  64
+------******------  Unified Cache       5, Level 3,   16 MB, Assoc  16, LineSize  64
+--------**--------  Data Cache          4, Level 1,   32 KB, Assoc   8, LineSize  64
+--------**--------  Instruction Cache   4, Level 1,   32 KB, Assoc   8, LineSize  64
+--------**--------  Unified Cache       6, Level 2,  512 KB, Assoc   8, LineSize  64
+----------**------  Data Cache          5, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------  Instruction Cache   5, Level 1,   32 KB, Assoc   8, LineSize  64
+----------**------  Unified Cache       7, Level 2,  512 KB, Assoc   8, LineSize  64
+------------**----  Data Cache          6, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----  Instruction Cache   6, Level 1,   32 KB, Assoc   8, LineSize  64
+------------**----  Unified Cache       8, Level 2,  512 KB, Assoc   8, LineSize  64
+------------******  Unified Cache       9, Level 3,   16 MB, Assoc  16, LineSize  64
+--------------**--  Data Cache          7, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------**--  Instruction Cache   7, Level 1,   32 KB, Assoc   8, LineSize  64
+--------------**--  Unified Cache      10, Level 2,  512 KB, Assoc   8, LineSize  64
+----------------**  Data Cache          8, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------**  Instruction Cache   8, Level 1,   32 KB, Assoc   8, LineSize  64
+----------------**  Unified Cache      11, Level 2,  512 KB, Assoc   8, LineSize  64
+
+Logical Processor to Group Map:
+******************  Group 0
+
+
+Haven't been able to test if it performs as expected. Need to do that.
+
+Of course it would be great if QEMU was patched to recognize correct CCX alignment as I'm not sure if and what will be the penalty of this weird setup.
+
+Yep, I read the Reddit thread, had no idea this was possible.
+
+Still, both solutions are ugly workarounds and it would be nice to fix this properly. But at least I don't have to patch and compile QEMU on my own anymore.
+
+h-sieger,
+Your XML gave me very significant performance gains.
+Is there any way to do this with more than 24 assigned cores?
+
+
+@sanjaybmd
+
+I'm glad to read that it worked for you. In fact, since I posted the XML I didn't have the time to do benchmarking, now my motherboard is dead and I have to wait for repair/replacement.
+
+Do you have any data to quantify the performance gain?
+
+As to the number of cores, you will notice that my 3900X has only 12 physical cores, that is 24 threads. Yet I assigned 32 vcpus in total. 8 of them are disabled. This is to align the vcpus to the actual CCX topology of 3 cores per CCX.
+
+QEMU thinks the cores per CCX should be a multiple of 2, e.g. 2, 4, etc. cores. So I assign 4 cores = 8 vcpus, and disable 2 vcpus to simulate the actual topology.
+
+If your CPU has more cores, you could scale it up. Be aware that the 3950X should not have this issue as it has 4 cores per CCX, if I remember correctly.
+
+Note: I took this idea from a Reddit post (see link somewhere above).
+
+h-sieger, 
+I did some testing with geekbench 5:
+
+baseline multicore score = 12733
+https://browser.geekbench.com/v5/cpu/3069626
+
+score with <cache="passthrough"> option = 12775
+https://browser.geekbench.com/v5/cpu/3069415
+
+best score with your xml above = 16960
+https://browser.geekbench.com/v5/cpu/3066003
+
+I'm running a 3960x and it is 3 cores per CCX so your xml above works well. I'm just now learning about all this so I'm still trying to figure out how to modify your xml to assign more cores. Anyway, I'm getting better performance out of my Windows 10 VM now assigning 24 vcpu as opposed to the 32 that I was assigning before!
+By the way, I tried to email you directly because I'm not sure this is appropriate discussion for this bug report but I could not create an account on your website (captcha was malfunctioning). Hope you you can fix that soon. 
+
+
+Sanjay,
+
+You can just increase the number of vcpus, such as:
+
+<vcpu placement="static" current="48">64</vcpu>
+
+then continue to define the vcpus:
+
+    <vcpu id="32" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="33" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="34" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="35" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="36" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="37" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="38" enabled="no" hotpluggable="yes"/>
+    <vcpu id="39" enabled="no" hotpluggable="yes"/>
+    <vcpu id="40" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="41" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="42" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="43" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="44" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="45" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="46" enabled="no" hotpluggable="yes"/>
+    <vcpu id="47" enabled="no" hotpluggable="yes"/>
+    <vcpu id="48" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="49" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="50" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="51" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="52" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="53" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="54" enabled="no" hotpluggable="yes"/>
+    <vcpu id="55" enabled="no" hotpluggable="yes"/>
+    <vcpu id="56" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="57" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="58" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="59" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="60" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="61" enabled="yes" hotpluggable="yes"/>
+    <vcpu id="62" enabled="no" hotpluggable="yes"/>
+    <vcpu id="63" enabled="no" hotpluggable="yes"/>
+
+(6x enabled=yes, then 2x enabled=no.)
+
+You will get more vcpu ids than you have threads, but since you disable 16 out of 64, you will have 48 active.
+
+vcpupin should continue as follows:
+
+    <vcpupin vcpu="32" cpuset="24"/>
+    <vcpupin vcpu="33" cpuset="36"/>
+    <vcpupin vcpu="34" cpuset="25"/>
+    <vcpupin vcpu="35" cpuset="37"/>
+    <vcpupin vcpu="36" cpuset="26"/>
+    <vcpupin vcpu="37" cpuset="38"/>
+    <vcpupin vcpu="40" cpuset="27"/>
+    <vcpupin vcpu="41" cpuset="39"/>
+    <vcpupin vcpu="42" cpuset="28"/>
+    <vcpupin vcpu="43" cpuset="40"/>
+    <vcpupin vcpu="44" cpuset="29"/>
+    <vcpupin vcpu="45" cpuset="41"/>
+    <vcpupin vcpu="48" cpuset="30"/>
+    <vcpupin vcpu="49" cpuset="42"/>
+    <vcpupin vcpu="50" cpuset="31"/>
+    <vcpupin vcpu="51" cpuset="43"/>
+    <vcpupin vcpu="52" cpuset="32"/>
+    <vcpupin vcpu="53" cpuset="44"/>
+    <vcpupin vcpu="56" cpuset="33"/>
+    <vcpupin vcpu="57" cpuset="45"/>
+    <vcpupin vcpu="58" cpuset="34"/>
+    <vcpupin vcpu="59" cpuset="46"/>
+    <vcpupin vcpu="60" cpuset="35"/>
+    <vcpupin vcpu="61" cpuset="47"/>
+
+This is if you pin all vcpus to the VM, which may not be the best thing to do. The maximum number of vcpus you can pin on a Threadripper 3960X are 48.
+
+The QEMU project is currently considering to move its bug tracking to
+another system. For this we need to know which bugs are still valid
+and which could be closed already. Thus we are setting older bugs to
+"Incomplete" now.
+
+If you still think this bug report here is valid, then please switch
+the state back to "New" within the next 60 days, otherwise this report
+will be marked as "Expired". Or please mark it as "Fix Released" if
+the problem has been solved with a newer version of QEMU already.
+
+Thank you and sorry for the inconvenience.
+
+
+[Expired for QEMU because there has been no activity for 60 days.]
+
diff --git a/results/classifier/108/semantic/1865252 b/results/classifier/108/semantic/1865252
new file mode 100644
index 000000000..2d501a5e3
--- /dev/null
+++ b/results/classifier/108/semantic/1865252
@@ -0,0 +1,51 @@
+semantic: 0.951
+PID: 0.884
+other: 0.876
+boot: 0.867
+permissions: 0.850
+device: 0.846
+files: 0.831
+network: 0.823
+vnc: 0.816
+socket: 0.809
+graphic: 0.787
+performance: 0.787
+KVM: 0.712
+debug: 0.676
+
+QEMU Windows Portable Version (with HAXM accelerator and QEMU GUI)
+
+Please consider providing a QEMU Windows portable [1] [2] [3] version on official qemu.org.
+
+Reasons:
+
+* This would improve usability, the out of the box user experience of laymen (non-technical) users.
+* Linux distributions could add the QEMU Windows portable to their installer / live ISO images (and the DVD's autorun.inf). Users who are still running on the Windows platform could be having an easy path to try out a Linux distribution by running int inside QEMU. I've seen that in many some years ago. Was running Windows. Just open the DVD drive in Windows explorer, double click and QEMU (shipped with the ISO) booted the ISO.
+
+Ideally EMU Windows portable version would be bundled with:
+
+* the [QEMU HAXM accelerator] by default. Related ticket: [5]
+* a QEMU GUI by default. Related ticket: [6]
+
+
+[1] When I say "Windows Portable" I mean "USB portable". [4]
+
+[2] A compress archive (zip or so) which after extraction can be executed without further installation / setup required. As far I know [https://portableapps.com portableapps.com] is the most popular project of that kind.
+
+[3] QEMU might already be portable or mostly portable. See:
+
+* https://portableapps.com/search/node/QEMU
+* https://www.google.com/search?hl=en&q=site%3Aportableapps.com%20QEMU%20portable
+* https://www.portablefreeware.com/?id=640
+* https://willhaley.com/blog/simple-portable-linux-qemu-vm-usb/
+
+But not sure above projects are still maintained. Would be certainly better if official qemu.org would be providing a QEMU Windows portable version.
+
+[4] Or more generally "can be run on any external storage medium on any Windows [10] computer.
+
+[5] https://bugs.launchpad.net/qemu/+bug/1864955
+
+[6] https://bugs.launchpad.net/qemu/+bug/1865248
+
+QEMU, like most open source projects, relies on contributors who have motivation, skills and available time to work on implementing particular features. They naturally tend to focus on features that result in the greatest benefit to their own use cases. I'm sorry, but as far as I know there is currently nobody working on such a topic, and opening a ticket like this won't make it happen without some new contributor to step up to do the job. Thus I'm closing this ticket now. Feel free to re-open if you know someone who could contribute this feature.
+
diff --git a/results/classifier/108/semantic/1898215 b/results/classifier/108/semantic/1898215
new file mode 100644
index 000000000..2de49f9d8
--- /dev/null
+++ b/results/classifier/108/semantic/1898215
@@ -0,0 +1,100 @@
+semantic: 0.924
+graphic: 0.915
+permissions: 0.903
+other: 0.895
+debug: 0.892
+PID: 0.854
+device: 0.849
+files: 0.823
+boot: 0.805
+network: 0.804
+performance: 0.788
+socket: 0.783
+vnc: 0.760
+KVM: 0.747
+
+[git][archlinux]Build process is busted in spice-display.c
+
+Linux distribution: Archlinux. Crash log added is based on a build from scratch.
+
+Gcc version: 10.2.0
+
+Configure options used:
+
+configure \
+    --prefix=/usr \
+    --sysconfdir=/etc \
+    --localstatedir=/var \
+    --libexecdir=/usr/lib/qemu \
+    --extra-ldflags="$LDFLAGS" \
+    --smbd=/usr/bin/smbd \
+    --enable-modules \
+    --enable-sdl \
+    --disable-werror \
+    --enable-slirp=system \
+    --enable-xfsctl \
+    --audio-drv-list="pa alsa sdl"
+
+Crash log:
+
+../ui/spice-display.c: In function 'interface_client_monitors_config':
+../ui/spice-display.c:682:25: error: 'VD_AGENT_CONFIG_MONITORS_FLAG_PHYSICAL_SIZE' undeclared (first use in this function); did you mean 'VD_AGENT_CONFIG_MONITORS_FLAG_USE_POS'?
+  682 |         if (mc->flags & VD_AGENT_CONFIG_MONITORS_FLAG_PHYSICAL_SIZE) {
+      |                         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+      |                         VD_AGENT_CONFIG_MONITORS_FLAG_USE_POS
+../ui/spice-display.c:682:25: note: each undeclared identifier is reported only once for each function it appears in
+../ui/spice-display.c:683:13: error: unknown type name 'VDAgentMonitorMM'
+  683 |             VDAgentMonitorMM *mm = (void *)&mc->monitors[mc->num_of_monitors];
+      |             ^~~~~~~~~~~~~~~~
+../ui/spice-display.c:684:37: error: request for member 'width' in something not a structure or union
+  684 |             info.width_mm = mm[head].width;
+      |                                     ^
+../ui/spice-display.c:685:38: error: request for member 'height' in something not a structure or union
+  685 |             info.height_mm = mm[head].height;
+      |                                      ^
+make: *** [Makefile.ninja:2031: libcommon.fa.p/ui_spice-display.c.o] Error 1
+make: *** Waiting for unfinished jobs....
+
+Full build log with make V=1.
+
+This is a bug in the spice-server meson build system:
+https://gitlab.freedesktop.org/spice/spice/-/commit/37fd91a51f52cdc1b55d3ce41e6ce6db348b986c
+
+Most likely they will end up bumping the version to 0.15, so we may want to update the condition in qemu.
+
+Already reported to Arch:
+
+https://bugs.archlinux.org/task/68061
+
+The QEMU project is currently moving its bug tracking to another system.
+For this we need to know which bugs are still valid and which could be
+closed already. Thus we are setting the bug state to "Incomplete" now.
+
+If the bug has already been fixed in the latest upstream version of QEMU,
+then please close this ticket as "Fix released".
+
+If it is not fixed yet and you think that this bug report here is still
+valid, then you have two options:
+
+1) If you already have an account on gitlab.com, please open a new ticket
+for this problem in our new tracker here:
+
+    https://gitlab.com/qemu-project/qemu/-/issues
+
+and then close this ticket here on Launchpad (or let it expire auto-
+matically after 60 days). Please mention the URL of this bug ticket on
+Launchpad in the new ticket on GitLab.
+
+2) If you don't have an account on gitlab.com and don't intend to get
+one, but still would like to keep this ticket opened, then please switch
+the state back to "New" or "Confirmed" within the next 60 days (other-
+wise it will get closed as "Expired"). We will then eventually migrate
+the ticket automatically to the new system (but you won't be the reporter
+of the bug in the new system and thus you won't get notified on changes
+anymore).
+
+Thank you and sorry for the inconvenience.
+
+
+Fix released
+
diff --git a/results/classifier/108/semantic/1905562 b/results/classifier/108/semantic/1905562
new file mode 100644
index 000000000..5b3ccaf2a
--- /dev/null
+++ b/results/classifier/108/semantic/1905562
@@ -0,0 +1,82 @@
+semantic: 0.939
+graphic: 0.918
+other: 0.907
+permissions: 0.881
+debug: 0.854
+performance: 0.852
+socket: 0.816
+files: 0.816
+device: 0.802
+vnc: 0.784
+KVM: 0.768
+network: 0.760
+PID: 0.724
+boot: 0.673
+
+Guest seems suspended after host freed memory for it using oom-killer
+
+Host: qemu 5.1.0, linux 5.5.13
+Guest: Windows 7 64-bit
+
+This guest ran a memory intensive process, and triggered oom-killer on host.  Luckily, it killed chromium.  My understanding is this should mean qemu should have continued running unharmed.  But, the spice connection shows the host system clock is stuck at the exact time oom-killer was triggered.  The host is completely unresponsive.
+
+I can telnet to the qemu monitor.  "info status" shows "running".  But, multiple times running "info registers -a" and saving the output to text files shows the registers are 100% unchanged, so it's not really running.
+
+On the host, top shows around 4% CPU usage by qemu.  strace shows about 1,000 times a second, these 6 lines repeat:
+
+0.000698 ioctl(18, KVM_IRQ_LINE_STATUS, 0x7fff1f030c10) = 0 <0.000010>
+0.000034 ioctl(18, KVM_IRQ_LINE_STATUS, 0x7fff1f030c60) = 0 <0.000009>
+0.000031 ioctl(18, KVM_IRQ_LINE_STATUS, 0x7fff1f030c20) = 0 <0.000007>
+0.000028 ioctl(18, KVM_IRQ_LINE_STATUS, 0x7fff1f030c70) = 0 <0.000007>
+0.000030 ppoll([{fd=4, events=POLLIN}, {fd=6, events=POLLIN}, {fd=7, events=POLLIN}, {fd=8, events=POLLIN}, {fd=9, events=POLLIN}, {fd=11, events         =POLLIN}, {fd=16, events=POLLIN}, {fd=32, events=POLLIN}, {fd=34, events=POLLIN}, {fd=39, events=POLLIN}, {fd=40, events=POLLIN}, {fd=41, events=POLLI         N}, {fd=42, events=POLLIN}, {fd=43, events=POLLIN}, {fd=44, events=POLLIN}, {fd=45, events=POLLIN}], 16, {tv_sec=0, tv_nsec=0}, NULL, 8) = 0 (Timeout)          <0.000009>
+0.000043 ppoll([{fd=4, events=POLLIN}, {fd=6, events=POLLIN}, {fd=7, events=POLLIN}, {fd=8, events=POLLIN}, {fd=9, events=POLLIN}, {fd=11, events         =POLLIN}, {fd=16, events=POLLIN}, {fd=32, events=POLLIN}, {fd=34, events=POLLIN}, {fd=39, events=POLLIN}, {fd=40, events=POLLIN}, {fd=41, events=POLLI         N}, {fd=42, events=POLLIN}, {fd=43, events=POLLIN}, {fd=44, events=POLLIN}, {fd=45, events=POLLIN}], 16, {tv_sec=0, tv_nsec=769662}, NULL, 8) = 0 (Tim         eout) <0.000788>
+
+In the monitor, "info irq" shows IRQ 0 is increasing about 1,000 times a second.  IRQ 0 seems to be for the system clock, and 1,000 times a second seems to be the frequency a windows 7 guest might have the clock at.
+
+Those fd's are for: (9) [eventfd]; [signalfd], type=STREAM, 4 x the spice socket file, and "TCP localhost:ftnmtp->localhost:36566 (ESTABLISHED)".
+
+Because the guest's registers aren't changing, it seems to me like monitor thinks the VM is running, but it's actually effectively in a paused state.  I think all the strace activity shown above must be generated by the host.  Perhaps it's repeatedly trying to contact the guest to inject a new clock, and communicate with it on the various eventfd's, spice socket, etc.  So, I'm thinking the strace doesn't give any information about the real reason why the VM is acting as if it's paused.
+
+I've checked "info block", and there's nothing showing that a device is paused, or that there's any issues with them.  (Can't remember what term can be there, but a paused/blocked/etc block device I think caused a VM to act like this for me in the past.)
+
+
+Is there something I can provide to help fix the bug here?
+
+Is there something I can do, to try to get the VM running again?  (I sadly have unsaved work in it.)
+
+
+
+Am I correct to expect the VM to continue successfully, after oom-killer successfully freed up memory?  This journactl does show a calltrace which includes "vmx_vmexit", and I'm not sure what that function is for but looks a little worrisome.
+
+The QEMU project is currently moving its bug tracking to another system.
+For this we need to know which bugs are still valid and which could be
+closed already. Thus we are setting the bug state to "Incomplete" now.
+
+If the bug has already been fixed in the latest upstream version of QEMU,
+then please close this ticket as "Fix released".
+
+If it is not fixed yet and you think that this bug report here is still
+valid, then you have two options:
+
+1) If you already have an account on gitlab.com, please open a new ticket
+for this problem in our new tracker here:
+
+    https://gitlab.com/qemu-project/qemu/-/issues
+
+and then close this ticket here on Launchpad (or let it expire auto-
+matically after 60 days). Please mention the URL of this bug ticket on
+Launchpad in the new ticket on GitLab.
+
+2) If you don't have an account on gitlab.com and don't intend to get
+one, but still would like to keep this ticket opened, then please switch
+the state back to "New" or "Confirmed" within the next 60 days (other-
+wise it will get closed as "Expired"). We will then eventually migrate
+the ticket automatically to the new system (but you won't be the reporter
+of the bug in the new system and thus you won't get notified on changes
+anymore).
+
+Thank you and sorry for the inconvenience.
+
+
+[Expired for QEMU because there has been no activity for 60 days.]
+
diff --git a/results/classifier/108/semantic/1905979 b/results/classifier/108/semantic/1905979
new file mode 100644
index 000000000..fdbdf40c3
--- /dev/null
+++ b/results/classifier/108/semantic/1905979
@@ -0,0 +1,70 @@
+semantic: 0.910
+permissions: 0.900
+other: 0.869
+performance: 0.868
+device: 0.867
+graphic: 0.854
+network: 0.834
+debug: 0.820
+PID: 0.812
+vnc: 0.795
+files: 0.788
+socket: 0.778
+KVM: 0.775
+boot: 0.765
+
+Check if F_OFD_SETLK is supported may give wrong result
+
+In util/osdep.c there is a function qemu_probe_lock_ops() to check if file locks F_OFD_SETLK and F_OFD_GETLK (of the style "Open file description locks (non-POSIX)") are supported.
+
+This test is done by trying a lock operation on the file /dev/null.
+
+This test can get a wrong result.
+
+The result is (probably) if the operating system *in general* supports these locks. However, it does not guarantee that the file system where the lock is really wanted (for instance, in caller raw_check_lock_bytes() in block/file-posix.c) does support these locks.
+
+(In theory it could even be that /dev/null, being a device special file, does not support the lock type while a plain file would.)
+
+This is in particular relevant for disk images which are stored on a shared file system (my particular use case is the Quobyte file system, which appears not to support these locks).
+
+The code as mentioned above is present in the master branch (I checked commit ea8208249d1082eae0444934efb3b59cd3183f05) but also for example on stable-2.11 commit 0982a56a551556c704dc15752dabf57b4be1c640)
+
+This is rather serious, since it causes VMs to crash:
+
+Unexpected error in raw_check_lock_bytes() at /build/qemu-PKI6mj/qemu-4.2/block/file-posix.c:796:
+Failed to get "write" lock
+2020-11-23 11:32:27.810+0000: shutting down, reason=crashed
+
+when openstack attempts to create a snapshot.
+
+In this thread, it is pointed out that support for OFD is provided by the generic VFS layer in the kernel, so there should never be a situation where one filesystem supports OFD and another does not support OFD:
+
+  https://lists.gnu.org/archive/html/qemu-devel/2020-11/msg05264.html
+
+Can you say what filesystem you are using that exhibits the lack of OFD support, and what kernel version
+
+Interesting. Thanks for the link.
+
+The file system we are using is the Quobyte file system (2.24.1) (https://www.quobyte.com/), which works via FUSE. 
+We've had problems with OFD locks with this file system in the past, so my first thought, seeing the error in comment #1, was that those would be to blame.
+
+But if the OFD locks are not really handled by the file system, I'm not sure how that explains the OFD lock issues we had in the past. I don't suppose this changed in the last year or so. Just now I made a little test program (basically copying qemu_lock_fd_test() and qemu_probe_lock_ops() from qemu) to double-check, and indeed right now it seems that the OFD locks *are* working on the Quobyte file system. Or at least qemu_lock_fd_test() doesn't return an error.
+
+So now I'm back to square one on diagnosing the observed error. It occurred in an installation of Openstack Ussuri installed on Ubuntu 18.04 Bionic using the Ubuntu Cloud Archive for packaging. The Cloud Archive has backports of the latest Qemu to earlier Ubuntu versions. The exact qemu version was http://ubuntu-cloud.archive.canonical.com/ubuntu/pool/main/q/qemu/qemu_4.2-3ubuntu6.7~cloud0_amd64.deb . 
+
+Annoyingly I have not been able to locate the git repo from which the Ubuntu Cloud Archive creates its packages (containing the patches and build changes for backports); all I can find is version 4.2-3ubuntu6.7 (without ~cloud0) which is for Ubuntu 20.04 Focal. 
+
+For now we're working around it by downgrading Qemu to the normal Bionic version (2.11+dfsg-1ubuntu7.33)
+
+You wouldn't happen to know where the Ubuntu Cloud Archive stores exact files it creates its packages from? (I have already asked on stackoverflow without success so far:  https://stackoverflow.com/questions/65146846/from-which-git-repos-does-the-ubuntu-cloud-archive-compile-its-packages)
+
+
+
+Look in the same directory as that .deb link above - the the files ending in orig.tar.gz (upstream source) and files ending in debian.tar.xz (downstream modifications)
+
+The kernel version is Linux hostname 4.15.0-124-generic #127-Ubuntu SMP Fri Nov 6 10:54:43 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
+
+That is indeed the source and patches, but I wanted to follow their git repo for easier maintenance. Surely they must have one.
+
+[Expired for QEMU because there has been no activity for 60 days.]
+
diff --git a/results/classifier/108/semantic/1907969 b/results/classifier/108/semantic/1907969
new file mode 100644
index 000000000..77e6a74b7
--- /dev/null
+++ b/results/classifier/108/semantic/1907969
@@ -0,0 +1,166 @@
+semantic: 0.912
+PID: 0.907
+graphic: 0.905
+permissions: 0.904
+debug: 0.902
+other: 0.876
+performance: 0.854
+device: 0.841
+network: 0.838
+files: 0.824
+KVM: 0.813
+vnc: 0.806
+boot: 0.801
+socket: 0.794
+
+linux-user/i386: Segfault when mixing threads and signals
+
+Given the following C program, qemu-i386 will surely and certainly segfault when executing it.
+The problem is only noticeable if the program is statically linked to musl's libc and, as written
+in the title, it only manifests when targeting i386.
+
+Removing the pthread calls or the second raise() makes it not segfault.
+
+The crash is in some part of the TCG-generated code, right when it tries to perform a
+%gs-relative access.
+
+If you want a quick way of cross-compiling this binary:
+
+* Download a copy of the Zig compiler from https://ziglang.org/download/
+* Compile it with
+  `zig cc -target i386-linux-musl <C-FILE> -o <OUT>`
+
+```
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <asm/prctl.h>
+#include <sys/syscall.h>
+
+void sig_func(int sig)
+{
+    write(1, "hi!\n", strlen("hi!\n"));
+}
+
+void func(void *p) { }
+
+typedef void *(*F)(void *);
+
+int main()
+{
+    pthread_t tid;
+
+    struct sigaction action;
+    action.sa_flags = 0;
+    action.sa_handler = sig_func;
+
+    if (sigaction(SIGUSR1, &action, NULL) == -1) {
+        return 1;
+    }
+
+    // This works.
+    raise(SIGUSR1);
+
+    pthread_create(&tid, NULL, (F)func, NULL);
+    pthread_join(tid, NULL);
+
+    // This makes qemu segfault.
+    raise(SIGUSR1);
+}
+```
+
+
+
+I finally understand where the problem is.
+
+Qemu's user-mode emulation maps guest threads to native ones by spawning a new native one
+and running a forked copy of the CPUX86State in parallel with the main thread.
+
+This works fine for pretty much every architecture but i386 where the GDT/LDT comes into
+play: the two descriptor tables are shared among all the threads, mimicking the real hw
+behaviour, but since no host task-switching is being performed the TLS entry in the GDT
+become stale.
+
+Raising a signal makes Qemu reload the GS segment from the GDT, that's why removing that
+line makes the problem disappear.
+
+The problem is also confined to musl libc because of an interesting implementation choice.
+Once a thread dies Glibc adds the now unused stack to a queue in order to reuse it later,
+while musl frees it right away when it's not needed anymore and, as a consequence, makes
+Qemu segfault.
+
+As luck has it, after spending too much time debugging this, I found somebody else already
+stumbled across this problem and wrote a patch. 
+
+https://<email address hidden>/mbox
+
+Too bad the patch flew under the radar...
+
+Le 16/12/2020 à 09:59, The Lemon Man a écrit :
+> I finally understand where the problem is.
+> 
+> Qemu's user-mode emulation maps guest threads to native ones by spawning a new native one
+> and running a forked copy of the CPUX86State in parallel with the main thread.
+> 
+> This works fine for pretty much every architecture but i386 where the GDT/LDT comes into
+> play: the two descriptor tables are shared among all the threads, mimicking the real hw
+> behaviour, but since no host task-switching is being performed the TLS entry in the GDT
+> become stale.
+> 
+> Raising a signal makes Qemu reload the GS segment from the GDT, that's why removing that
+> line makes the problem disappear.
+> 
+> The problem is also confined to musl libc because of an interesting implementation choice.
+> Once a thread dies Glibc adds the now unused stack to a queue in order to reuse it later,
+> while musl frees it right away when it's not needed anymore and, as a consequence, makes
+> Qemu segfault.
+> 
+> As luck has it, after spending too much time debugging this, I found somebody else already
+> stumbled across this problem and wrote a patch. 
+> 
+> https://<email address hidden>/mbox
+> 
+> Too bad the patch flew under the radar...
+> 
+
+Could you add a Reviewed-by and/or a tested by to the patch on the ML?
+
+Thanks,
+Laurent
+
+
+The QEMU project is currently moving its bug tracking to another system.
+For this we need to know which bugs are still valid and which could be
+closed already. Thus we are setting the bug state to "Incomplete" now.
+
+If the bug has already been fixed in the latest upstream version of QEMU,
+then please close this ticket as "Fix released".
+
+If it is not fixed yet and you think that this bug report here is still
+valid, then you have two options:
+
+1) If you already have an account on gitlab.com, please open a new ticket
+for this problem in our new tracker here:
+
+    https://gitlab.com/qemu-project/qemu/-/issues
+
+and then close this ticket here on Launchpad (or let it expire auto-
+matically after 60 days). Please mention the URL of this bug ticket on
+Launchpad in the new ticket on GitLab.
+
+2) If you don't have an account on gitlab.com and don't intend to get
+one, but still would like to keep this ticket opened, then please switch
+the state back to "New" or "Confirmed" within the next 60 days (other-
+wise it will get closed as "Expired"). We will then eventually migrate
+the ticket automatically to the new system (but you won't be the reporter
+of the bug in the new system and thus you won't get notified on changes
+anymore).
+
+Thank you and sorry for the inconvenience.
+
+
+[Expired for QEMU because there has been no activity for 60 days.]
+
diff --git a/results/classifier/108/semantic/1922391 b/results/classifier/108/semantic/1922391
new file mode 100644
index 000000000..434670e40
--- /dev/null
+++ b/results/classifier/108/semantic/1922391
@@ -0,0 +1,142 @@
+semantic: 0.911
+graphic: 0.878
+permissions: 0.870
+PID: 0.869
+device: 0.838
+other: 0.837
+debug: 0.829
+performance: 0.814
+boot: 0.796
+vnc: 0.794
+network: 0.784
+socket: 0.761
+files: 0.660
+KVM: 0.603
+
+qemu-system-ppc assertion "!mr->container" failed
+
+Hi,
+
+I'm trying to run the NetBSD/macppc 8.2 installer (which is 32-bit ppc) in qemu-system-ppc
+version 5.2.0, and I'm hitting this assertion failure quite a bit into the "unpacking sets" 
+part of the installation procedure, unpacking from the install iso image.
+
+Qemu is run on a NetBSD/amd64 9.1 host system.  The stack backtrace from the core file is
+
+Program terminated with signal SIGABRT, Aborted.
+#0  0x000078859a36791a in _lwp_kill () from /usr/lib/libc.so.12
+[Current thread is 1 (process 1)]
+(gdb) where
+#0  0x000078859a36791a in _lwp_kill () from /usr/lib/libc.so.12
+#1  0x000078859a3671ca in abort () from /usr/lib/libc.so.12
+#2  0x000078859a2a8507 in __assert13 () from /usr/lib/libc.so.12
+#3  0x000000015a3c19c0 in memory_region_finalize ()
+#4  0x000000015a3fef1c in object_unref ()
+#5  0x000000015a3feee6 in object_unref ()
+#6  0x000000015a374154 in address_space_unmap ()
+#7  0x000000015a276551 in pmac_ide_atapi_transfer_cb ()
+#8  0x000000015a150a59 in dma_blk_cb ()
+#9  0x000000015a46a1c7 in blk_aio_complete ()
+#10 0x000000015a5a617d in coroutine_trampoline ()
+#11 0x000078859a264150 in ?? () from /usr/lib/libc.so.12
+Backtrace stopped: Cannot access memory at address 0x7884894ff000
+(gdb) 
+
+I start qemu with this small script:
+
+---
+#!/bin/sh
+
+MEM=3g
+qemu-system-ppc \
+        -M mac99,via=pmu \
+        -m $MEM  \
+        -nographic \
+        -drive id=hda,format=raw,file=disk.img \
+        -L pc-bios \
+        -netdev user,id=net0,hostfwd=tcp::2223-:22,ipv6=off \
+        -net nic,model=rtl8139,netdev=net0 \
+        -boot d \
+        -cdrom NetBSD-8.2-macppc.iso
+---
+
+and boot the install kernel with "boot cd:ofwboot.xcf".  If someone wants
+to replicate this I can provide more detailed instructions to repeat the
+procedure I used to start the install.
+
+Any hints about what more to look for?
+
+Regards,
+
+- Håvard
+
+Hmm,
+
+it seems I need to retract this bug.  It turns out that the 32-bit macppc port
+of NetBSD only supports a maximum of 2GB of memory.  As a NetBSD developer said it:
+
+> The physical memory map on G4 Macs doesn't have room for more than 2G of RAM.
+
+So, I've set the status of this bug report to "Invalid", as that seemed to be the
+best fit.
+
+Regards,
+
+- Håvard
+
+
+If the machine can not support more than 2GB, QEMU should report an error when the user tries to assign too many memory, not crash and let it figure out.
+Setting the bug status to confirmed.
+
+Proposed fix:
+https://lists.gnu.org/archive/html/qemu-devel/2021-04/msg00570.html
+
+On 4/7/21 3:11 PM, Mark Cave-Ayland wrote:
+> On 06/04/2021 09:48, Philippe Mathieu-Daudé wrote:
+> 
+>> On Mac99 and newer machines, the Uninorth PCI host bridge maps
+>> the PCI hole region at 2GiB, so the RAM area beside 2GiB is not
+>> accessible by the CPU. Restrict the memory to 2GiB to avoid
+>> problems such the one reported in the buglink.
+>>
+>> Buglink: https://bugs.launchpad.net/qemu/+bug/1922391
+>> Reported-by: Håvard Eidnes <email address hidden>
+>> Signed-off-by: Philippe Mathieu-Daudé <email address hidden>
+>> ---
+>>   hw/ppc/mac_newworld.c | 4 ++++
+>>   1 file changed, 4 insertions(+)
+>>
+>> diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
+>> index 21759628466..d88b38e9258 100644
+>> --- a/hw/ppc/mac_newworld.c
+>> +++ b/hw/ppc/mac_newworld.c
+>> @@ -157,6 +157,10 @@ static void ppc_core99_init(MachineState *machine)
+>>       }
+>>         /* allocate RAM */
+>> +    if (machine->ram_size > 2 * GiB) {
+>> +        error_report("RAM size more than 2 GiB is not supported");
+>> +        exit(1);
+>> +    }
+>>       memory_region_add_subregion(get_system_memory(), 0, machine->ram);
+>>         /* allocate and load firmware ROM */
+> 
+> I think the patch is correct, however I'm fairly sure that the default
+> g3beige machine also has the PCI hole located at 0x80000000 so the same
+> problem exists there too.
+> 
+> Also are you keen to get this merged for 6.0? It doesn't seem to solve a
+> security issue/release blocker and I'm sure the current behaviour has
+> been like this for a long time...
+
+No problem. I wanted to revisit this bug anyway, I realized during the
+night, while this patch makes QEMU exit cleanly, it hides the bug which
+is likely in TYPE_MACIO_IDE (I haven't tried Håvard's full reproducer).
+
+Regards,
+
+Phil.
+
+
+Philippe's fix has been merged here:
+https://gitlab.com/qemu-project/qemu/-/commit/03b3542ac93cb196bf6a6
+
diff --git a/results/classifier/108/semantic/227 b/results/classifier/108/semantic/227
new file mode 100644
index 000000000..4e1bffecf
--- /dev/null
+++ b/results/classifier/108/semantic/227
@@ -0,0 +1,16 @@
+semantic: 0.934
+other: 0.729
+graphic: 0.386
+device: 0.329
+files: 0.183
+vnc: 0.163
+socket: 0.136
+performance: 0.133
+permissions: 0.072
+PID: 0.069
+boot: 0.060
+KVM: 0.054
+network: 0.040
+debug: 0.028
+
+meson: incomplete 'make help'
diff --git a/results/classifier/108/semantic/237164 b/results/classifier/108/semantic/237164
new file mode 100644
index 000000000..1c8bc48fd
--- /dev/null
+++ b/results/classifier/108/semantic/237164
@@ -0,0 +1,110 @@
+semantic: 0.922
+debug: 0.897
+permissions: 0.894
+other: 0.887
+PID: 0.882
+performance: 0.873
+graphic: 0.856
+device: 0.855
+socket: 0.824
+files: 0.821
+network: 0.801
+KVM: 0.785
+boot: 0.735
+vnc: 0.734
+
+kvm needs to correctly simulate a proper monitor
+
+Binary package hint: xorg
+
+With xserver-xor-video-cirrus 1.2.1, there should be no need to require special handling for kvm in dexconf any longer.
+See also: bug 193323.
+
+Quote from Bryce:
+>Possibly with this fix, some portion of the kvm-specific changes to
+>dexconf could be dropped.
+>
+>If anyone is interested in assisting with this, please file a new bug assigned to me, attach a minimal xorg.conf that has been adequately tested.  Here are >the current kvm-specific things dexconf is doing:
+>a) hardcoding the driver to cirrus
+>b) specifying the depth
+>c) setting the HorizSync and VertRefresh
+>d) specifying the available resolutions
+>
+>In theory, none of these four things should be necessary, but I suspect
+>this bug fix only addresses b and maybe c.  Please test if these can be
+>removed and if so, file a bug and I can take care of dropping them in
+>dexconf.  Thanks ahead of time.
+
+considering this is a follow-up bug to #193323, it should certainly be marked as 'confirmed', since it is a genuie issue.
+
+
+Since I've compared qemu and kvm sources to find out why kvm works, and qemu doesn't (d'oh *g*), here my results:
+a) not too sure if this is addressed with the update, or if this was a problem in the first place.
+b) dexconf sets the depth to 24, which now the driver also does if it finds the corresponding cirrus card
+c) haven't seen any implementation difference between qemu/kvm, so it should work
+d) same as for c).
+
+To make FAUmachine (which however has a different cirrus implementation than qemu) work with the old cirrus driver, the only thing that was needed in the first place was to set the default depth to 24bpp.
+
+However I suggest to keep this bug in the state new, until anyone has in fact tested that kvm works with a plain xorg.conf.
+
+bryce: none of the quirks you are mentioning are actually working in qemu. The relevant part of dexconf that detects kvm is in line 271:
+
+QEMU_KVM=$(grep "QEMU Virtual CPU" /proc/cpuinfo || true)
+if [ -n "$QEMU_KVM" ]; then
+    DEVICE_DRIVER="cirrus"
+fi
+
+Only kvm reports that in /proc/cpuinfo. qemu reports "Pentium II (Katmai)", which is the very reason why the hardy live cd works in kvm but not in qemu.
+
+TBH, I'd suggest to just strip the kvm quircks out of dexconf in intrepid right now, and see if a daily livecd comes up. I'm pretty confident that it does so.
+
+Okay, I've stripped those all out of dexconf and repackaged xorg accordingly.  Could you please test and verify it works ok?
+
+http://people.ubuntu.com/~bryce/Testing/xorg/
+
+as asked on irc: can you provide a .deb for x11-common there as well? (iirc dexconf is in there... at least it's not in the .debs you put up there ;))
+
+Just tested kvm with the hardy cd, installing xserver-xorg-video-cirrus from intrepid, and then x11-common and rerunning dexconf.
+gdm comes up, however it uses a smaller resolution by default then.
+
+I'll attach xorg.conf (as supplied by the dexconf run), and Xorg.0.log (from the start with the new driver/new xorg.log) in a minute.
+
+
+
+
+
+what's the status of this? The kvm environment (still) doesn't seem to autoconfigure too well, that's why the Modes and HorizSync/VertRefresh are hardcoded.
+
+I just tested this, and Gnome comes up just fine without xorg.conf, however, the screen resolution is a sad little 800x600 without xorg.conf.  It's 1024x768 with xorg.conf.
+
+:-Dustin
+
+kirkland confirmed that kvm still does not work properly without these quirks, so they cannot be dropped at this time.  Feel free to reopen the xorg task if this situation changes, but moving the issue to kvm for now.
+
+Hi,
+
+to fix the kvm issue, kvm needs to simulate a monitor attached to the cirrus card, together with an EDID eeprom delivering the correct data for monitor modes. The simulated cirrus card shoul provide this via register sr8. A sample implementation can be found at www.faumachine.org (cvs checkout, see node-pc/simulator/chip_cirrus_gd5446.c -- based on qemu --  and lib/sig_i2c_bus.c and finally node-pc/monitor.c for the EDID contents) for details how to do it.
+
+Feel free to ask if anything is unclear.
+
+Cheers,
+     Stefan.
+
+Hey Stefan-
+
+There was actually some discussion upstream among KVM and Xorg developers.  I think they determined that this was a 'won't fix' situation, but I need to check that.  Let me track that down...
+
+
+:-Dustin
+
+As a workaround, the driver itself can force the resolution to a certain degree.  This is covered in bug #349331
+
+Isn't the issue here that the emulated card has too low video memory forcing 800x600 when the driver selects the default 24bpp depth?
+
+This is an issue with some very old real hardware too.
+
+I guess X could account for that but due to its architecture every driver would likely have a separate check for this condition (S3, cirrus, and any other driver that could be possibly used with such low-mem card).
+
+Since cirrus is not the prefered graphics card in QEMU anymore, and there hasn't been any update to this within the last four years, I think nobody will take care of this ticket anymore, so setting the status to "Won't fix" now.
+
diff --git a/results/classifier/108/semantic/2582 b/results/classifier/108/semantic/2582
new file mode 100644
index 000000000..fa38da58d
--- /dev/null
+++ b/results/classifier/108/semantic/2582
@@ -0,0 +1,38 @@
+semantic: 0.962
+device: 0.784
+graphic: 0.771
+PID: 0.707
+permissions: 0.687
+vnc: 0.674
+KVM: 0.625
+debug: 0.546
+network: 0.506
+performance: 0.504
+socket: 0.498
+files: 0.355
+boot: 0.327
+other: 0.076
+
+CR4.VMX leaks from L1 into L2 on Intel VMX
+Description of problem:
+In a nested virtualization setting, `savevm` can cause CR4 bits from leaking from L1 into L2. This causes general-protection faults in certain guests.
+
+The L2 guest executes this code:
+
+```
+mov rax, cr4  ; Get CR4​
+mov rcx, rax  ; Remember the old value​
+btc rax, 7    ; Toggle CR4.PGE​
+mov cr4, rax  ; #GP! <- Shouldn't happen!​
+mov cr4, rcx  ; Restore old value
+```
+
+If the guest code is interrupted at the right time (e.g. via `savevm`), Qemu marks CR4 dirty while the guest executes L2 code. Due to really complicated KVM semantics, this will result in L1 CR4 bits (VMXE) leaking into the L2 guest and the L2 will die with a GP:
+
+Instead of the expected CR4 value, the L2 guest reads a value with VMXE set. When it tries to write this back into CR4, this triggers the general protection fault.
+Steps to reproduce:
+This is only an issue on **Intel** systems.
+
+#
+Additional information:
+See also this discussion where we discussed a (flawed) approach to fixing this in KVM: https://lore.kernel.org/lkml/Zh6WlOB8CS-By3DQ@google.com/t/
diff --git a/results/classifier/108/semantic/714 b/results/classifier/108/semantic/714
new file mode 100644
index 000000000..2a08ec0b8
--- /dev/null
+++ b/results/classifier/108/semantic/714
@@ -0,0 +1,58 @@
+semantic: 0.914
+graphic: 0.775
+performance: 0.751
+device: 0.697
+vnc: 0.667
+permissions: 0.639
+socket: 0.476
+network: 0.387
+PID: 0.370
+debug: 0.349
+files: 0.302
+boot: 0.277
+other: 0.232
+KVM: 0.188
+
+Command line arguments are not passed correctly with user-space semihosting
+Description of problem:
+The emulated process always receives a value of 1 for `argc`, with `argv[0]` returning seemingly random characters (in Ubuntu packaged qemu 5.2), but correlating with command-line input (output below from master built qemu 6.1):
+```
+$ qemu-arm -cpu cortex-m7 ./a.out 123 test
+argc: 1
+argv: 
+ - @@@
+
+$ qemu-arm -cpu cortex-m7 ./a.out 
+argc: 1
+argv:
+ [0] @
+```
+Steps to reproduce:
+1. Compile the following program with [ARM embedded toolchain](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm/downloads):
+```cpp
+#include <iostream>
+
+int main(int argc, char* argv[]) {
+	std::cout << "argc: " << argc << "\n";
+	std::cout << "argv: \n";
+
+	for (int i = 0; i < argc; i++)
+		std::cout << " [" << i << "] " << argv[i] << "\n";
+	return 0;
+}
+```
+
+```
+$ $CXX --version
+arm-none-eabi-g++ (GNU Arm Embedded Toolchain 10-2020-q4-major) 10.2.1 20201103 (release)
+Copyright (C) 2020 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+$ $CXX main.cpp --specs=rdimon.specs -mcpu=cortex-m7
+```
+
+2. Run in user-space (semihosted):
+```
+$ qemu-arm -cpu cortex-m7 ./a.out 
+```
diff --git a/results/classifier/108/semantic/754635 b/results/classifier/108/semantic/754635
new file mode 100644
index 000000000..df155efcf
--- /dev/null
+++ b/results/classifier/108/semantic/754635
@@ -0,0 +1,85 @@
+semantic: 0.926
+graphic: 0.919
+device: 0.919
+debug: 0.918
+other: 0.907
+socket: 0.876
+PID: 0.873
+performance: 0.854
+permissions: 0.821
+files: 0.817
+network: 0.802
+boot: 0.750
+KVM: 0.737
+vnc: 0.726
+
+-d option outs wrong info about sections
+
+For example, after run ./qemu-i386 -d in_asm /bin/ls from 0.14.0 release, I received this qemu.log file:
+$ cat /tmp/qemu.log | grep -A7 guest
+Relocating guest address space from 0x08048000 to 0x8048000
+guest_base  0x0
+start    end      size     prot
+00048000-0005f000 00017000 r-x
+0005f000-00069000 0000a000 rw-
+00040000-00041000 00001000 ---
+00041000-00041800 00000800 rw-
+00041800-0005d800 0001c000 r-x
+0005d800-0005f800 00002000 rw-
+
+But such command in 0.12.5 release outs this:
+$ cat /tmp/qemu.log | grep -A7 guest
+guest_base  0x0
+start    end      size     prot
+00f38000-00f39000 00001000 ---
+08048000-0805f000 00017000 r-x
+0805f000-08061000 00002000 rw-
+40000000-40080000 00080000 rw-
+40080000-40081000 00001000 ---
+40081000-4009d000 0001c000 r-x
+
+It looks correct.
+I received such differences and with qemu-microblaze. 
+
+After comparing 0.12.5 and 0.14.0 releases I found this differences in exec.c:
+in 0.12.5:
+end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
+
+in 0.14.0:
+int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
+
+V_L1_SHIFT in my case is 10, but 32 - L1_BITS is 22
+
+I make this changes:
+$ diff -up qemu-0.14.0/exec.c exec.c
+--- qemu-0.14.0/exec.c	2011-04-08 17:26:00.524464002 +0400
++++ exec.c	2011-04-08 17:26:09.800464003 +0400
+@@ -2340,7 +2340,7 @@ int walk_memory_regions(void *priv, walk
+     data.prot = 0;
+ 
+     for (i = 0; i < V_L1_SIZE; i++) {
+-        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
++        int rc = walk_memory_regions_1(&data, (abi_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
+                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
+         if (rc != 0) {
+             return rc;
+
+After this outputs looks correct. 
+
+I don't know code base good, and think what may to do more general corrections.
+Host system: linux i386
+
+Hi,
+
+Thanks for reporting this issue, and the investigation. I don't really understand the rationale for the change, so I can't help much.
+
+This change appears to be from 5cd2c5b6ad75c46d40118ac67c0c09d4e7930a65. I think input from Richard Henderson (the author of the change) would be very useful.
+
+Brad
+
+
+Looking through old bug tickets... is this still an issue with the latest version of QEMU? Or could we close this ticket nowadays?
+
+
+[Expired for QEMU because there has been no activity for 60 days.]
+