summary refs log tree commit diff stats
path: root/results/classifier/108/other/1817865
diff options
context:
space:
mode:
Diffstat (limited to 'results/classifier/108/other/1817865')
-rw-r--r--results/classifier/108/other/181786588
1 files changed, 88 insertions, 0 deletions
diff --git a/results/classifier/108/other/1817865 b/results/classifier/108/other/1817865
new file mode 100644
index 000000000..b8f2d2667
--- /dev/null
+++ b/results/classifier/108/other/1817865
@@ -0,0 +1,88 @@
+other: 0.954
+vnc: 0.937
+KVM: 0.924
+permissions: 0.915
+performance: 0.913
+semantic: 0.911
+graphic: 0.897
+debug: 0.890
+device: 0.888
+PID: 0.866
+files: 0.858
+socket: 0.853
+network: 0.834
+boot: 0.810
+
+sorecvfrom freezes guest
+
+QEMU release 3.1.0; Guest running win10.
+
+The deadlock happens in prepare_mmio_access call when the guest is trying to read MMIO address 0xFEBC00C0. This address belongs to "Intel(R) PRO/1000 MT Network Connection" as shown in Windows 10 device manager. A quick analysis has shown that in prepare_mmio_access call:
+
+static bool prepare_mmio_access(MemoryRegion *mr)
+{
+    bool unlocked = !qemu_mutex_iothread_locked();
+    bool release_lock = false;
+
+    if (unlocked && mr->global_locking) {
+        qemu_mutex_lock_iothread();
+...
+
+"qemu_mutex_iothread_locked()" and "qemu_mutex_lock_iothread()" are not atomic operation, so the global mutex could become locked by "util/main-loop.c" (line = 236) after "qemu_mutex_iothread_locked()" is called.
+
+GDB backtrace analysis as following:
+
+(gdb) thread 9
+[Switching to thread 9 (Thread 0x7fffe6b7c700 (LWP 14405))]
+#0  __lll_lock_wait () at ../sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:103
+103	../sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: No such file or directory.
+(gdb) bt
+#0  __lll_lock_wait () at ../sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:103
+#1  0x00007ffff748c714 in __GI___pthread_mutex_lock (mutex=0x555556518340 <qemu_global_mutex>) at ../nptl/pthread_mutex_lock.c:80
+#2  0x0000555555d8ae7b in qemu_mutex_lock_impl (mutex=0x555556518340 <qemu_global_mutex>, 
+    file=0x555555e7a298 "/home/vic/Projects/tc-qemu/exec.c", line=3197) at util/qemu-thread-posix.c:66
+#3  0x000055555584cf66 in qemu_mutex_lock_iothread_impl (file=0x555555e7a298 "/home/vic/Projects/tc-qemu/exec.c", line=3197)
+    at /home/vic/Projects/tc-qemu/cpus.c:1849
+#4  0x0000555555804ccc in prepare_mmio_access (mr=0x5555575b9000) at /home/vic/Projects/tc-qemu/exec.c:3197
+#5  0x0000555555804f90 in flatview_read_continue (fv=0x7fffd86f2390, addr=4273733824, attrs=..., buf=0x7ffff7fcc028 "ۜ/o", len=4, 
+    addr1=192, l=4, mr=0x5555575b9000) at /home/vic/Projects/tc-qemu/exec.c:3292
+#6  0x0000555555805136 in flatview_read (fv=0x7fffd86f2390, addr=4273733824, attrs=..., buf=0x7ffff7fcc028 "ۜ/o", len=4)
+    at /home/vic/Projects/tc-qemu/exec.c:3332
+#7  0x00005555558051aa in address_space_read_full (as=0x555556517bc0 <address_space_memory>, addr=4273733824, attrs=..., 
+    buf=0x7ffff7fcc028 "ۜ/o", len=4) at /home/vic/Projects/tc-qemu/exec.c:3345
+#8  0x0000555555805281 in address_space_rw (as=0x555556517bc0 <address_space_memory>, addr=4273733824, attrs=..., 
+    buf=0x7ffff7fcc028 "ۜ/o", len=4, is_write=false) at /home/vic/Projects/tc-qemu/exec.c:3375
+#9  0x0000555555886199 in kvm_cpu_exec (cpu=0x5555566d1800) at /home/vic/Projects/tc-qemu/accel/kvm/kvm-all.c:2031
+#10 0x000055555584c001 in qemu_kvm_cpu_thread_fn (arg=0x5555566d1800) at /home/vic/Projects/tc-qemu/cpus.c:1281
+#11 0x0000555555d8b9f7 in qemu_thread_start (args=0x5555566f3c00) at util/qemu-thread-posix.c:498
+#12 0x00007ffff7489fa3 in start_thread (arg=<optimized out>) at pthread_create.c:486
+#13 0x00007ffff73ba80f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
+(gdb) f 2
+#2  0x0000555555d8ae7b in qemu_mutex_lock_impl (mutex=0x555556518340 <qemu_global_mutex>, 
+    file=0x555555e7a298 "/home/vic/Projects/tc-qemu/exec.c", line=3197) at util/qemu-thread-posix.c:66
+66	    err = pthread_mutex_lock(&mutex->lock);
+(gdb) p mutex
+$1 = (QemuMutex *) 0x555556518340 <qemu_global_mutex>
+(gdb) p *mutex
+$2 = {lock = pthread_mutex_t = {Type = Normal, Status = Acquired, possibly with waiters, Owner ID = 14393, Robust = No, Shared = No, 
+    Protocol = None}, file = 0x555555fc2a64 "util/main-loop.c", line = 236, initialized = true}
+(gdb) thread 1
+[Switching to thread 1 (Thread 0x7ffff495cc00 (LWP 14393))]
+#0  0x00007ffff7493832 in __libc_recvfrom (fd=147, buf=0x7fffd81ce0cc, len=1500, flags=0, addr=..., addrlen=0x7fffffffd9a8)
+    at ../sysdeps/unix/sysv/linux/recvfrom.c:27
+27	../sysdeps/unix/sysv/linux/recvfrom.c: No such file or directory.
+(gdb) bt
+#0  0x00007ffff7493832 in __libc_recvfrom (fd=147, buf=0x7fffd81ce0cc, len=1500, flags=0, addr=..., addrlen=0x7fffffffd9a8)
+    at ../sysdeps/unix/sysv/linux/recvfrom.c:27
+#1  0x0000555555c18bf3 in sorecvfrom (so=0x7fffd818fe60) at slirp/socket.c:584
+#2  0x0000555555c146f9 in slirp_pollfds_poll (pollfds=0x555556655470, select_error=0) at slirp/slirp.c:753
+#3  0x0000555555d8675b in main_loop_wait (nonblocking=0) at util/main-loop.c:498
+#4  0x00005555559d2194 in main_loop () at vl.c:1893
+#5  0x00005555559d9d93 in main (argc=16, argv=0x7fffffffdff8, envp=0x7fffffffe080) at vl.c:4692
+
+In order to fix the issue I think we need a call like "qemu_mutex_trylock_iothread()" in order to combine qemu_mutex_iothread_locked() and qemu_mutex_lock_iothread() into a single atomic operation. Any comments?
+
+Found out that the guest freeze issue is due to a block udp reading call rather than a deaklock. I have rephase the bug description and sent a patch in qemu-devel.
+
+Fix committed in slirp/src/socket.c
+