summary refs log tree commit diff stats
path: root/results/classifier/zero-shot/016/x86
diff options
context:
space:
mode:
Diffstat (limited to 'results/classifier/zero-shot/016/x86')
-rw-r--r--results/classifier/zero-shot/016/x86/17743720798
-rw-r--r--results/classifier/zero-shot/016/x86/28596630140
-rw-r--r--results/classifier/zero-shot/016/x86/55367348559
-rw-r--r--results/classifier/zero-shot/016/x86/577565891448
-rw-r--r--results/classifier/zero-shot/016/x86/92957605445
-rw-r--r--results/classifier/zero-shot/016/x86/99674399175
6 files changed, 3565 insertions, 0 deletions
diff --git a/results/classifier/zero-shot/016/x86/17743720 b/results/classifier/zero-shot/016/x86/17743720
new file mode 100644
index 000000000..0997166aa
--- /dev/null
+++ b/results/classifier/zero-shot/016/x86/17743720
@@ -0,0 +1,798 @@
+x86: 0.908
+hypervisor: 0.758
+virtual: 0.630
+debug: 0.447
+operating system: 0.100
+files: 0.081
+performance: 0.076
+PID: 0.049
+i386: 0.039
+TCG: 0.028
+assembly: 0.026
+register: 0.025
+user-level: 0.024
+VMM: 0.009
+semantic: 0.006
+kernel: 0.006
+ppc: 0.004
+device: 0.003
+KVM: 0.003
+architecture: 0.002
+alpha: 0.001
+graphic: 0.001
+network: 0.001
+arm: 0.001
+socket: 0.001
+permissions: 0.001
+risc-v: 0.001
+boot: 0.001
+mistranslation: 0.001
+peripherals: 0.001
+vnc: 0.000
+
+[Qemu-devel] [BUG] living migrate vm pause forever
+
+Sometimes, living migrate vm pause forever, migrate job stop, but very small 
+probability, I can’t reproduce.
+qemu wait semaphore from libvirt send migrate continue, however libvirt wait 
+semaphore from qemu send vm pause.
+
+follow stack:
+qemu:
+Thread 6 (Thread 0x7f50445f3700 (LWP 18120)):
+#0  0x00007f504b84d670 in sem_wait () from /lib/x86_64-linux-gnu/libpthread.so.0
+#1  0x00005574eda1e164 in qemu_sem_wait (sem=sem@entry=0x5574ef6930e0) at 
+qemu-2.12/util/qemu-thread-posix.c:322
+#2  0x00005574ed8dd72e in migration_maybe_pause (s=0x5574ef692f50, 
+current_active_state=0x7f50445f2ae4, new_state=10)
+    at qemu-2.12/migration/migration.c:2106
+#3  0x00005574ed8df51a in migration_completion (s=0x5574ef692f50) at 
+qemu-2.12/migration/migration.c:2137
+#4  migration_iteration_run (s=0x5574ef692f50) at 
+qemu-2.12/migration/migration.c:2311
+#5  migration_thread (opaque=0x5574ef692f50) 
+atqemu-2.12/migration/migration.c:2415
+#6  0x00007f504b847184 in start_thread () from 
+/lib/x86_64-linux-gnu/libpthread.so.0
+#7  0x00007f504b574bed in clone () from /lib/x86_64-linux-gnu/libc.so.6
+
+libvirt:
+Thread 95 (Thread 0x7fdb82ffd700 (LWP 28775)):
+#0  0x00007fdd177dc404 in pthread_cond_wait@@GLIBC_2.3.2 () from 
+/lib/x86_64-linux-gnu/libpthread.so.0
+#1  0x00007fdd198c3b07 in virCondWait (c=0x7fdbc4003000, m=0x7fdbc4002f30) at 
+../../../src/util/virthread.c:252
+#2  0x00007fdd198f36d2 in virDomainObjWait (vm=0x7fdbc4002f20) at 
+../../../src/conf/domain_conf.c:3303
+#3  0x00007fdd09ffaa44 in qemuMigrationRun (driver=0x7fdd000037b0, 
+vm=0x7fdbc4002f20, persist_xml=0x0,
+    cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n  
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss 
+</hostname>\n  
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"..., 
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8, 
+flags=777,
+    resource=0, spec=0x7fdb82ffc670, dconn=0x0, graphicsuri=0x0, 
+nmigrate_disks=0, migrate_disks=0x0, compression=0x7fdb78007990, 
+migParams=0x7fdb82ffc900)
+    at ../../../src/qemu/qemu_migration.c:3937
+#4  0x00007fdd09ffb26a in doNativeMigrate (driver=0x7fdd000037b0, 
+vm=0x7fdbc4002f20, persist_xml=0x0, uri=0x7fdb780073a0 
+"tcp://172.16.202.17:49152",
+    cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n  
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss</hostname>\n 
+ <hos---Type <return> to continue, or q <return> to quit---
+tuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"..., 
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8, 
+flags=777,
+    resource=0, dconn=0x0, graphicsuri=0x0, nmigrate_disks=0, 
+migrate_disks=0x0, compression=0x7fdb78007990, migParams=0x7fdb82ffc900)
+    at ../../../src/qemu/qemu_migration.c:4118
+#5  0x00007fdd09ffd808 in qemuMigrationPerformPhase (driver=0x7fdd000037b0, 
+conn=0x7fdb500205d0, vm=0x7fdbc4002f20, persist_xml=0x0,
+    uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", graphicsuri=0x0, 
+nmigrate_disks=0, migrate_disks=0x0, compression=0x7fdb78007990, 
+migParams=0x7fdb82ffc900,
+    cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n  
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss</hostname>\n 
+ <hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"..., 
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8, 
+flags=777,
+    resource=0) at ../../../src/qemu/qemu_migration.c:5030
+#6  0x00007fdd09ffdbb5 in qemuMigrationPerform (driver=0x7fdd000037b0, 
+conn=0x7fdb500205d0, vm=0x7fdbc4002f20, xmlin=0x0, persist_xml=0x0, 
+dconnuri=0x0,
+    uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", graphicsuri=0x0, 
+listenAddress=0x0, nmigrate_disks=0, migrate_disks=0x0, nbdPort=0, 
+compression=0x7fdb78007990,
+    migParams=0x7fdb82ffc900,
+    cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n  
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss</hostname>\n 
+ <hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"..., 
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8, 
+flags=777,
+    dname=0x0, resource=0, v3proto=true) at 
+../../../src/qemu/qemu_migration.c:5124
+#7  0x00007fdd0a054725 in qemuDomainMigratePerform3 (dom=0x7fdb78007b00, 
+xmlin=0x0,
+    cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n  
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss</hostname>\n 
+ <hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"..., 
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8, 
+dconnuri=0x0,
+    uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", flags=777, dname=0x0, 
+resource=0) at ../../../src/qemu/qemu_driver.c:12996
+#8  0x00007fdd199ad0f0 in virDomainMigratePerform3 (domain=0x7fdb78007b00, 
+xmlin=0x0,
+    cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n  
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss</hostname>\n 
+ <hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"..., 
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8, 
+dconnuri=0x0,
+    uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", flags=777, dname=0x0, 
+bandwidth=0) at ../../../src/libvirt-domain.c:4698
+#9  0x000055d13923a939 in remoteDispatchDomainMigratePerform3 
+(server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620, 
+rerr=0x7fdb82ffcbc0,
+    args=0x7fdb7800b220, ret=0x7fdb78021e90) at ../../../daemon/remote.c:4528
+#10 0x000055d13921a043 in remoteDispatchDomainMigratePerform3Helper 
+(server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620, 
+rerr=0x7fdb82ffcbc0,
+    args=0x7fdb7800b220, ret=0x7fdb78021e90) at 
+../../../daemon/remote_dispatch.h:7944
+#11 0x00007fdd19a260b4 in virNetServerProgramDispatchCall (prog=0x55d13af98b50, 
+server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620)
+    at ../../../src/rpc/virnetserverprogram.c:436
+#12 0x00007fdd19a25c17 in virNetServerProgramDispatch (prog=0x55d13af98b50, 
+server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620)
+    at ../../../src/rpc/virnetserverprogram.c:307
+#13 0x000055d13925933b in virNetServerProcessMsg (srv=0x55d13af90e60, 
+client=0x55d13b0156f0, prog=0x55d13af98b50, msg=0x55d13afbf620)
+    at ../../../src/rpc/virnetserver.c:148
+-------------------------------------------------------------------------------------------------------------------------------------
+本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出
+的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、
+或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本
+邮件!
+This e-mail and its attachments contain confidential information from New H3C, 
+which is
+intended only for the person or entity whose address is listed above. Any use 
+of the
+information contained herein in any way (including, but not limited to, total 
+or partial
+disclosure, reproduction, or dissemination) by persons other than the intended
+recipient(s) is prohibited. If you receive this e-mail in error, please notify 
+the sender
+by phone or email immediately and delete it!
+
+* Yuchen (address@hidden) wrote:
+>
+Sometimes, living migrate vm pause forever, migrate job stop, but very small
+>
+probability, I can’t reproduce.
+>
+qemu wait semaphore from libvirt send migrate continue, however libvirt wait
+>
+semaphore from qemu send vm pause.
+Hi,
+  I've copied in Jiri Denemark from libvirt.
+Can you confirm exactly which qemu and libvirt versions you're using
+please.
+
+>
+follow stack:
+>
+qemu:
+>
+Thread 6 (Thread 0x7f50445f3700 (LWP 18120)):
+>
+#0  0x00007f504b84d670 in sem_wait () from
+>
+/lib/x86_64-linux-gnu/libpthread.so.0
+>
+#1  0x00005574eda1e164 in qemu_sem_wait (sem=sem@entry=0x5574ef6930e0) at
+>
+qemu-2.12/util/qemu-thread-posix.c:322
+>
+#2  0x00005574ed8dd72e in migration_maybe_pause (s=0x5574ef692f50,
+>
+current_active_state=0x7f50445f2ae4, new_state=10)
+>
+at qemu-2.12/migration/migration.c:2106
+>
+#3  0x00005574ed8df51a in migration_completion (s=0x5574ef692f50) at
+>
+qemu-2.12/migration/migration.c:2137
+>
+#4  migration_iteration_run (s=0x5574ef692f50) at
+>
+qemu-2.12/migration/migration.c:2311
+>
+#5  migration_thread (opaque=0x5574ef692f50)
+>
+atqemu-2.12/migration/migration.c:2415
+>
+#6  0x00007f504b847184 in start_thread () from
+>
+/lib/x86_64-linux-gnu/libpthread.so.0
+>
+#7  0x00007f504b574bed in clone () from /lib/x86_64-linux-gnu/libc.so.6
+In migration_maybe_pause we have:
+
+    migrate_set_state(&s->state, *current_active_state,
+                      MIGRATION_STATUS_PRE_SWITCHOVER);
+    qemu_sem_wait(&s->pause_sem);
+    migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
+                      new_state);
+
+the line numbers don't match my 2.12.0 checkout; so I guess that it's
+that qemu_sem_wait it's stuck at.
+
+QEMU must have sent the switch to PRE_SWITCHOVER and that should have
+sent an event to libvirt, and libvirt should notice that - I'm
+not sure how to tell whether libvirt has seen that event yet or not?
+
+Dave
+
+>
+libvirt:
+>
+Thread 95 (Thread 0x7fdb82ffd700 (LWP 28775)):
+>
+#0  0x00007fdd177dc404 in pthread_cond_wait@@GLIBC_2.3.2 () from
+>
+/lib/x86_64-linux-gnu/libpthread.so.0
+>
+#1  0x00007fdd198c3b07 in virCondWait (c=0x7fdbc4003000, m=0x7fdbc4002f30) at
+>
+../../../src/util/virthread.c:252
+>
+#2  0x00007fdd198f36d2 in virDomainObjWait (vm=0x7fdbc4002f20) at
+>
+../../../src/conf/domain_conf.c:3303
+>
+#3  0x00007fdd09ffaa44 in qemuMigrationRun (driver=0x7fdd000037b0,
+>
+vm=0x7fdbc4002f20, persist_xml=0x0,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss
+>
+</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+flags=777,
+>
+resource=0, spec=0x7fdb82ffc670, dconn=0x0, graphicsuri=0x0,
+>
+nmigrate_disks=0, migrate_disks=0x0, compression=0x7fdb78007990,
+>
+migParams=0x7fdb82ffc900)
+>
+at ../../../src/qemu/qemu_migration.c:3937
+>
+#4  0x00007fdd09ffb26a in doNativeMigrate (driver=0x7fdd000037b0,
+>
+vm=0x7fdbc4002f20, persist_xml=0x0, uri=0x7fdb780073a0
+>
+"tcp://172.16.202.17:49152",
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n  <hos---Type <return> to continue, or q <return>
+>
+to quit---
+>
+tuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+flags=777,
+>
+resource=0, dconn=0x0, graphicsuri=0x0, nmigrate_disks=0,
+>
+migrate_disks=0x0, compression=0x7fdb78007990, migParams=0x7fdb82ffc900)
+>
+at ../../../src/qemu/qemu_migration.c:4118
+>
+#5  0x00007fdd09ffd808 in qemuMigrationPerformPhase (driver=0x7fdd000037b0,
+>
+conn=0x7fdb500205d0, vm=0x7fdbc4002f20, persist_xml=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", graphicsuri=0x0,
+>
+nmigrate_disks=0, migrate_disks=0x0, compression=0x7fdb78007990,
+>
+migParams=0x7fdb82ffc900,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+flags=777,
+>
+resource=0) at ../../../src/qemu/qemu_migration.c:5030
+>
+#6  0x00007fdd09ffdbb5 in qemuMigrationPerform (driver=0x7fdd000037b0,
+>
+conn=0x7fdb500205d0, vm=0x7fdbc4002f20, xmlin=0x0, persist_xml=0x0,
+>
+dconnuri=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", graphicsuri=0x0,
+>
+listenAddress=0x0, nmigrate_disks=0, migrate_disks=0x0, nbdPort=0,
+>
+compression=0x7fdb78007990,
+>
+migParams=0x7fdb82ffc900,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+flags=777,
+>
+dname=0x0, resource=0, v3proto=true) at
+>
+../../../src/qemu/qemu_migration.c:5124
+>
+#7  0x00007fdd0a054725 in qemuDomainMigratePerform3 (dom=0x7fdb78007b00,
+>
+xmlin=0x0,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+dconnuri=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", flags=777, dname=0x0,
+>
+resource=0) at ../../../src/qemu/qemu_driver.c:12996
+>
+#8  0x00007fdd199ad0f0 in virDomainMigratePerform3 (domain=0x7fdb78007b00,
+>
+xmlin=0x0,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+dconnuri=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", flags=777, dname=0x0,
+>
+bandwidth=0) at ../../../src/libvirt-domain.c:4698
+>
+#9  0x000055d13923a939 in remoteDispatchDomainMigratePerform3
+>
+(server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620,
+>
+rerr=0x7fdb82ffcbc0,
+>
+args=0x7fdb7800b220, ret=0x7fdb78021e90) at ../../../daemon/remote.c:4528
+>
+#10 0x000055d13921a043 in remoteDispatchDomainMigratePerform3Helper
+>
+(server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620,
+>
+rerr=0x7fdb82ffcbc0,
+>
+args=0x7fdb7800b220, ret=0x7fdb78021e90) at
+>
+../../../daemon/remote_dispatch.h:7944
+>
+#11 0x00007fdd19a260b4 in virNetServerProgramDispatchCall
+>
+(prog=0x55d13af98b50, server=0x55d13af90e60, client=0x55d13b0156f0,
+>
+msg=0x55d13afbf620)
+>
+at ../../../src/rpc/virnetserverprogram.c:436
+>
+#12 0x00007fdd19a25c17 in virNetServerProgramDispatch (prog=0x55d13af98b50,
+>
+server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620)
+>
+at ../../../src/rpc/virnetserverprogram.c:307
+>
+#13 0x000055d13925933b in virNetServerProcessMsg (srv=0x55d13af90e60,
+>
+client=0x55d13b0156f0, prog=0x55d13af98b50, msg=0x55d13afbf620)
+>
+at ../../../src/rpc/virnetserver.c:148
+>
+-------------------------------------------------------------------------------------------------------------------------------------
+>
+本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出
+>
+的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、
+>
+或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本
+>
+邮件!
+>
+This e-mail and its attachments contain confidential information from New
+>
+H3C, which is
+>
+intended only for the person or entity whose address is listed above. Any use
+>
+of the
+>
+information contained herein in any way (including, but not limited to, total
+>
+or partial
+>
+disclosure, reproduction, or dissemination) by persons other than the intended
+>
+recipient(s) is prohibited. If you receive this e-mail in error, please
+>
+notify the sender
+>
+by phone or email immediately and delete it!
+--
+Dr. David Alan Gilbert / address@hidden / Manchester, UK
+
+In migration_maybe_pause we have:
+
+    migrate_set_state(&s->state, *current_active_state,
+                      MIGRATION_STATUS_PRE_SWITCHOVER);
+    qemu_sem_wait(&s->pause_sem);
+    migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
+                      new_state);
+
+the line numbers don't match my 2.12.0 checkout; so I guess that it's that 
+qemu_sem_wait it's stuck at.
+
+QEMU must have sent the switch to PRE_SWITCHOVER and that should have sent an 
+event to libvirt, and libvirt should notice that - I'm not sure how to tell 
+whether libvirt has seen that event yet or not?
+
+
+Thank you for your attention. 
+Yes, you are right, QEMU wait semaphore in this place.
+I use qemu-2.12.1, libvirt-4.0.0.
+Because I added some debug code, so the line numbers doesn't match open qemu
+
+-----邮件原件-----
+发件人: Dr. David Alan Gilbert [
+mailto:address@hidden
+] 
+发送时间: 2019年8月21日 19:13
+收件人: yuchen (Cloud) <address@hidden>; address@hidden
+抄送: address@hidden
+主题: Re: [Qemu-devel] [BUG] living migrate vm pause forever
+
+* Yuchen (address@hidden) wrote:
+>
+Sometimes, living migrate vm pause forever, migrate job stop, but very small
+>
+probability, I can’t reproduce.
+>
+qemu wait semaphore from libvirt send migrate continue, however libvirt wait
+>
+semaphore from qemu send vm pause.
+Hi,
+  I've copied in Jiri Denemark from libvirt.
+Can you confirm exactly which qemu and libvirt versions you're using please.
+
+>
+follow stack:
+>
+qemu:
+>
+Thread 6 (Thread 0x7f50445f3700 (LWP 18120)):
+>
+#0  0x00007f504b84d670 in sem_wait () from
+>
+/lib/x86_64-linux-gnu/libpthread.so.0
+>
+#1  0x00005574eda1e164 in qemu_sem_wait (sem=sem@entry=0x5574ef6930e0)
+>
+at qemu-2.12/util/qemu-thread-posix.c:322
+>
+#2  0x00005574ed8dd72e in migration_maybe_pause (s=0x5574ef692f50,
+>
+current_active_state=0x7f50445f2ae4, new_state=10)
+>
+at qemu-2.12/migration/migration.c:2106
+>
+#3  0x00005574ed8df51a in migration_completion (s=0x5574ef692f50) at
+>
+qemu-2.12/migration/migration.c:2137
+>
+#4  migration_iteration_run (s=0x5574ef692f50) at
+>
+qemu-2.12/migration/migration.c:2311
+>
+#5  migration_thread (opaque=0x5574ef692f50)
+>
+atqemu-2.12/migration/migration.c:2415
+>
+#6  0x00007f504b847184 in start_thread () from
+>
+/lib/x86_64-linux-gnu/libpthread.so.0
+>
+#7  0x00007f504b574bed in clone () from
+>
+/lib/x86_64-linux-gnu/libc.so.6
+In migration_maybe_pause we have:
+
+    migrate_set_state(&s->state, *current_active_state,
+                      MIGRATION_STATUS_PRE_SWITCHOVER);
+    qemu_sem_wait(&s->pause_sem);
+    migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
+                      new_state);
+
+the line numbers don't match my 2.12.0 checkout; so I guess that it's that 
+qemu_sem_wait it's stuck at.
+
+QEMU must have sent the switch to PRE_SWITCHOVER and that should have sent an 
+event to libvirt, and libvirt should notice that - I'm not sure how to tell 
+whether libvirt has seen that event yet or not?
+
+Dave
+
+>
+libvirt:
+>
+Thread 95 (Thread 0x7fdb82ffd700 (LWP 28775)):
+>
+#0  0x00007fdd177dc404 in pthread_cond_wait@@GLIBC_2.3.2 () from
+>
+/lib/x86_64-linux-gnu/libpthread.so.0
+>
+#1  0x00007fdd198c3b07 in virCondWait (c=0x7fdbc4003000,
+>
+m=0x7fdbc4002f30) at ../../../src/util/virthread.c:252
+>
+#2  0x00007fdd198f36d2 in virDomainObjWait (vm=0x7fdbc4002f20) at
+>
+../../../src/conf/domain_conf.c:3303
+>
+#3  0x00007fdd09ffaa44 in qemuMigrationRun (driver=0x7fdd000037b0,
+>
+vm=0x7fdbc4002f20, persist_xml=0x0,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n  <hostname>mss
+>
+</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+flags=777,
+>
+resource=0, spec=0x7fdb82ffc670, dconn=0x0, graphicsuri=0x0,
+>
+nmigrate_disks=0, migrate_disks=0x0, compression=0x7fdb78007990,
+>
+migParams=0x7fdb82ffc900)
+>
+at ../../../src/qemu/qemu_migration.c:3937
+>
+#4  0x00007fdd09ffb26a in doNativeMigrate (driver=0x7fdd000037b0,
+>
+vm=0x7fdbc4002f20, persist_xml=0x0, uri=0x7fdb780073a0
+>
+"tcp://172.16.202.17:49152",
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n
+>
+<name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n  <hos---Type <return> to continue, or q
+>
+<return> to quit---
+>
+tuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"..
+>
+tuuid>., cookieinlen=207, cookieout=0x7fdb82ffcad0,
+>
+tuuid>cookieoutlen=0x7fdb82ffcac8, flags=777,
+>
+resource=0, dconn=0x0, graphicsuri=0x0, nmigrate_disks=0,
+>
+migrate_disks=0x0, compression=0x7fdb78007990, migParams=0x7fdb82ffc900)
+>
+at ../../../src/qemu/qemu_migration.c:4118
+>
+#5  0x00007fdd09ffd808 in qemuMigrationPerformPhase (driver=0x7fdd000037b0,
+>
+conn=0x7fdb500205d0, vm=0x7fdbc4002f20, persist_xml=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", graphicsuri=0x0,
+>
+nmigrate_disks=0, migrate_disks=0x0, compression=0x7fdb78007990,
+>
+migParams=0x7fdb82ffc900,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+flags=777,
+>
+resource=0) at ../../../src/qemu/qemu_migration.c:5030
+>
+#6  0x00007fdd09ffdbb5 in qemuMigrationPerform (driver=0x7fdd000037b0,
+>
+conn=0x7fdb500205d0, vm=0x7fdbc4002f20, xmlin=0x0, persist_xml=0x0,
+>
+dconnuri=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", graphicsuri=0x0,
+>
+listenAddress=0x0, nmigrate_disks=0, migrate_disks=0x0, nbdPort=0,
+>
+compression=0x7fdb78007990,
+>
+migParams=0x7fdb82ffc900,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+flags=777,
+>
+dname=0x0, resource=0, v3proto=true) at
+>
+../../../src/qemu/qemu_migration.c:5124
+>
+#7  0x00007fdd0a054725 in qemuDomainMigratePerform3 (dom=0x7fdb78007b00,
+>
+xmlin=0x0,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+dconnuri=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", flags=777,
+>
+dname=0x0, resource=0) at ../../../src/qemu/qemu_driver.c:12996
+>
+#8  0x00007fdd199ad0f0 in virDomainMigratePerform3 (domain=0x7fdb78007b00,
+>
+xmlin=0x0,
+>
+cookiein=0x7fdb780084e0 "<qemu-migration>\n  <name>mss-pl_652</name>\n
+>
+<uuid>1f2b2334-451e-424b-822a-ea10452abb38</uuid>\n
+>
+<hostname>mss</hostname>\n
+>
+<hostuuid>334e344a-4130-4336-5534-323544543642</hostuuid>\n</qemu-migra"...,
+>
+cookieinlen=207, cookieout=0x7fdb82ffcad0, cookieoutlen=0x7fdb82ffcac8,
+>
+dconnuri=0x0,
+>
+uri=0x7fdb780073a0 "tcp://172.16.202.17:49152", flags=777,
+>
+dname=0x0, bandwidth=0) at ../../../src/libvirt-domain.c:4698
+>
+#9  0x000055d13923a939 in remoteDispatchDomainMigratePerform3
+>
+(server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620,
+>
+rerr=0x7fdb82ffcbc0,
+>
+args=0x7fdb7800b220, ret=0x7fdb78021e90) at
+>
+../../../daemon/remote.c:4528
+>
+#10 0x000055d13921a043 in remoteDispatchDomainMigratePerform3Helper
+>
+(server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620,
+>
+rerr=0x7fdb82ffcbc0,
+>
+args=0x7fdb7800b220, ret=0x7fdb78021e90) at
+>
+../../../daemon/remote_dispatch.h:7944
+>
+#11 0x00007fdd19a260b4 in virNetServerProgramDispatchCall
+>
+(prog=0x55d13af98b50, server=0x55d13af90e60, client=0x55d13b0156f0,
+>
+msg=0x55d13afbf620)
+>
+at ../../../src/rpc/virnetserverprogram.c:436
+>
+#12 0x00007fdd19a25c17 in virNetServerProgramDispatch (prog=0x55d13af98b50,
+>
+server=0x55d13af90e60, client=0x55d13b0156f0, msg=0x55d13afbf620)
+>
+at ../../../src/rpc/virnetserverprogram.c:307
+>
+#13 0x000055d13925933b in virNetServerProcessMsg (srv=0x55d13af90e60,
+>
+client=0x55d13b0156f0, prog=0x55d13af98b50, msg=0x55d13afbf620)
+>
+at ../../../src/rpc/virnetserver.c:148
+>
+----------------------------------------------------------------------
+>
+---------------------------------------------------------------
+>
+本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出
+>
+的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、
+>
+或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本
+>
+邮件!
+>
+This e-mail and its attachments contain confidential information from
+>
+New H3C, which is intended only for the person or entity whose address
+>
+is listed above. Any use of the information contained herein in any
+>
+way (including, but not limited to, total or partial disclosure,
+>
+reproduction, or dissemination) by persons other than the intended
+>
+recipient(s) is prohibited. If you receive this e-mail in error,
+>
+please notify the sender by phone or email immediately and delete it!
+--
+Dr. David Alan Gilbert / address@hidden / Manchester, UK
+
diff --git a/results/classifier/zero-shot/016/x86/28596630 b/results/classifier/zero-shot/016/x86/28596630
new file mode 100644
index 000000000..4f1cca73b
--- /dev/null
+++ b/results/classifier/zero-shot/016/x86/28596630
@@ -0,0 +1,140 @@
+x86: 0.818
+hypervisor: 0.566
+i386: 0.555
+debug: 0.492
+files: 0.270
+operating system: 0.165
+user-level: 0.127
+TCG: 0.124
+register: 0.058
+risc-v: 0.046
+ppc: 0.046
+virtual: 0.044
+arm: 0.042
+VMM: 0.037
+PID: 0.035
+socket: 0.033
+device: 0.026
+network: 0.020
+vnc: 0.016
+alpha: 0.015
+performance: 0.015
+kernel: 0.013
+boot: 0.013
+assembly: 0.011
+peripherals: 0.009
+semantic: 0.009
+architecture: 0.006
+KVM: 0.005
+permissions: 0.004
+mistranslation: 0.003
+graphic: 0.002
+
+[Qemu-devel] [BUG] [low severity] a strange appearance of message involving slirp while doing "empty" make
+
+Folks,
+
+If qemu tree is already fully built, and "make" is attempted, for 3.1, the 
+outcome is:
+
+$ make
+        CHK version_gen.h
+$
+
+For 4.0-rc0, the outcome seems to be different:
+
+$ make
+make[1]: Entering directory '/home/build/malta-mips64r6/qemu-4.0/slirp'
+make[1]: Nothing to be done for 'all'.
+make[1]: Leaving directory '/home/build/malta-mips64r6/qemu-4.0/slirp'
+        CHK version_gen.h
+$
+
+Not sure how significant is that, but I report it just in case.
+
+Yours,
+Aleksandar
+
+On 20/03/2019 22.08, Aleksandar Markovic wrote:
+>
+Folks,
+>
+>
+If qemu tree is already fully built, and "make" is attempted, for 3.1, the
+>
+outcome is:
+>
+>
+$ make
+>
+CHK version_gen.h
+>
+$
+>
+>
+For 4.0-rc0, the outcome seems to be different:
+>
+>
+$ make
+>
+make[1]: Entering directory '/home/build/malta-mips64r6/qemu-4.0/slirp'
+>
+make[1]: Nothing to be done for 'all'.
+>
+make[1]: Leaving directory '/home/build/malta-mips64r6/qemu-4.0/slirp'
+>
+CHK version_gen.h
+>
+$
+>
+>
+Not sure how significant is that, but I report it just in case.
+It's likely because slirp is currently being reworked to become a
+separate project, so the makefiles have been changed a little bit. I
+guess the message will go away again once slirp has become a stand-alone
+library.
+
+ Thomas
+
+On Fri, 22 Mar 2019 at 04:59, Thomas Huth <address@hidden> wrote:
+>
+On 20/03/2019 22.08, Aleksandar Markovic wrote:
+>
+> $ make
+>
+> make[1]: Entering directory '/home/build/malta-mips64r6/qemu-4.0/slirp'
+>
+> make[1]: Nothing to be done for 'all'.
+>
+> make[1]: Leaving directory '/home/build/malta-mips64r6/qemu-4.0/slirp'
+>
+>       CHK version_gen.h
+>
+> $
+>
+>
+>
+> Not sure how significant is that, but I report it just in case.
+>
+>
+It's likely because slirp is currently being reworked to become a
+>
+separate project, so the makefiles have been changed a little bit. I
+>
+guess the message will go away again once slirp has become a stand-alone
+>
+library.
+Well, we'll still need to ship slirp for the foreseeable future...
+
+I think the cause of this is that the rule in Makefile for
+calling the slirp Makefile is not passing it $(SUBDIR_MAKEFLAGS)
+like all the other recursive make invocations. If we do that
+then we'll suppress the entering/leaving messages for
+non-verbose builds. (Some tweaking will be needed as
+it looks like the slirp makefile has picked an incompatible
+meaning for $BUILD_DIR, which the SUBDIR_MAKEFLAGS will
+also be passing to it.)
+
+thanks
+-- PMM
+
diff --git a/results/classifier/zero-shot/016/x86/55367348 b/results/classifier/zero-shot/016/x86/55367348
new file mode 100644
index 000000000..0367eb524
--- /dev/null
+++ b/results/classifier/zero-shot/016/x86/55367348
@@ -0,0 +1,559 @@
+x86: 0.960
+operating system: 0.814
+debug: 0.157
+kernel: 0.112
+files: 0.062
+register: 0.048
+virtual: 0.029
+alpha: 0.017
+TCG: 0.017
+PID: 0.013
+risc-v: 0.008
+VMM: 0.008
+hypervisor: 0.008
+semantic: 0.007
+socket: 0.007
+user-level: 0.006
+network: 0.006
+device: 0.005
+KVM: 0.004
+ppc: 0.002
+boot: 0.002
+performance: 0.002
+permissions: 0.002
+vnc: 0.002
+graphic: 0.001
+architecture: 0.001
+peripherals: 0.001
+mistranslation: 0.001
+i386: 0.001
+assembly: 0.001
+arm: 0.001
+
+[Qemu-devel] [Bug] Docs build fails at interop.rst
+
+https://paste.fedoraproject.org/paste/kOPx4jhtUli---TmxSLrlw
+running python3-sphinx-2.0.1-1.fc31.noarch on Fedora release 31
+(Rawhide)
+
+uname - a
+Linux iouring 5.1.0-0.rc6.git3.1.fc31.x86_64 #1 SMP Thu Apr 25 14:25:32
+UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
+
+Reverting commmit 90edef80a0852cf8a3d2668898ee40e8970e431
+allows for the build to occur
+
+Regards
+Aarushi Mehta
+
+On 5/20/19 7:30 AM, Aarushi Mehta wrote:
+>
+https://paste.fedoraproject.org/paste/kOPx4jhtUli---TmxSLrlw
+>
+running python3-sphinx-2.0.1-1.fc31.noarch on Fedora release 31
+>
+(Rawhide)
+>
+>
+uname - a
+>
+Linux iouring 5.1.0-0.rc6.git3.1.fc31.x86_64 #1 SMP Thu Apr 25 14:25:32
+>
+UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
+>
+>
+Reverting commmit 90edef80a0852cf8a3d2668898ee40e8970e431
+>
+allows for the build to occur
+>
+>
+Regards
+>
+Aarushi Mehta
+>
+>
+Ah, dang. The blocks aren't strictly conforming json, but the version I
+tested this under didn't seem to care. Your version is much newer. (I
+was using 1.7 as provided by Fedora 29.)
+
+For now, try reverting 9e5b6cb87db66dfb606604fe6cf40e5ddf1ef0e7 instead,
+which should at least turn off the "warnings as errors" option, but I
+don't think that reverting -n will turn off this warning.
+
+I'll try to get ahold of this newer version and see if I can't fix it
+more appropriately.
+
+--js
+
+On 5/20/19 12:37 PM, John Snow wrote:
+>
+>
+>
+On 5/20/19 7:30 AM, Aarushi Mehta wrote:
+>
+>
+https://paste.fedoraproject.org/paste/kOPx4jhtUli---TmxSLrlw
+>
+> running python3-sphinx-2.0.1-1.fc31.noarch on Fedora release 31
+>
+> (Rawhide)
+>
+>
+>
+> uname - a
+>
+> Linux iouring 5.1.0-0.rc6.git3.1.fc31.x86_64 #1 SMP Thu Apr 25 14:25:32
+>
+> UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
+>
+>
+>
+> Reverting commmit 90edef80a0852cf8a3d2668898ee40e8970e431
+>
+> allows for the build to occur
+>
+>
+>
+> Regards
+>
+> Aarushi Mehta
+>
+>
+>
+>
+>
+>
+Ah, dang. The blocks aren't strictly conforming json, but the version I
+>
+tested this under didn't seem to care. Your version is much newer. (I
+>
+was using 1.7 as provided by Fedora 29.)
+>
+>
+For now, try reverting 9e5b6cb87db66dfb606604fe6cf40e5ddf1ef0e7 instead,
+>
+which should at least turn off the "warnings as errors" option, but I
+>
+don't think that reverting -n will turn off this warning.
+>
+>
+I'll try to get ahold of this newer version and see if I can't fix it
+>
+more appropriately.
+>
+>
+--js
+>
+...Sigh, okay.
+
+So, I am still not actually sure what changed from pygments 2.2 and
+sphinx 1.7 to pygments 2.4 and sphinx 2.0.1, but it appears as if Sphinx
+by default always tries to do add a filter to the pygments lexer that
+raises an error on highlighting failure, instead of the default behavior
+which is to just highlight those errors in the output. There is no
+option to Sphinx that I am aware of to retain this lexing behavior.
+(Effectively, it's strict or nothing.)
+
+This approach, apparently, is broken in Sphinx 1.7/Pygments 2.2, so the
+build works with our malformed json.
+
+There are a few options:
+
+1. Update conf.py to ignore these warnings (and all future lexing
+errors), and settle for the fact that there will be no QMP highlighting
+wherever we use the directionality indicators ('->', '<-').
+
+2. Update bitmaps.rst to remove the directionality indicators.
+
+3. Update bitmaps.rst to format the QMP blocks as raw text instead of JSON.
+
+4. Update bitmaps.rst to remove the "json" specification from the code
+block. This will cause sphinx to "guess" the formatting, and the
+pygments guesser will decide it's Python3.
+
+This will parse well enough, but will mis-highlight 'true' and 'false'
+which are not python keywords. This approach may break in the future if
+the Python3 lexer is upgraded to be stricter (because '->' and '<-' are
+still invalid), and leaves us at the mercy of both the guesser and the
+lexer.
+
+I'm not actually sure what I dislike the least; I think I dislike #1 the
+most. #4 gets us most of what we want but is perhaps porcelain.
+
+I suspect if we attempt to move more of our documentation to ReST and
+Sphinx that we will need to answer for ourselves how we intend to
+document QMP code flow examples.
+
+--js
+
+On Mon, May 20, 2019 at 05:25:28PM -0400, John Snow wrote:
+>
+>
+>
+On 5/20/19 12:37 PM, John Snow wrote:
+>
+>
+>
+>
+>
+> On 5/20/19 7:30 AM, Aarushi Mehta wrote:
+>
+>>
+https://paste.fedoraproject.org/paste/kOPx4jhtUli---TmxSLrlw
+>
+>> running python3-sphinx-2.0.1-1.fc31.noarch on Fedora release 31
+>
+>> (Rawhide)
+>
+>>
+>
+>> uname - a
+>
+>> Linux iouring 5.1.0-0.rc6.git3.1.fc31.x86_64 #1 SMP Thu Apr 25 14:25:32
+>
+>> UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
+>
+>>
+>
+>> Reverting commmit 90edef80a0852cf8a3d2668898ee40e8970e431
+>
+>> allows for the build to occur
+>
+>>
+>
+>> Regards
+>
+>> Aarushi Mehta
+>
+>>
+>
+>>
+>
+>
+>
+> Ah, dang. The blocks aren't strictly conforming json, but the version I
+>
+> tested this under didn't seem to care. Your version is much newer. (I
+>
+> was using 1.7 as provided by Fedora 29.)
+>
+>
+>
+> For now, try reverting 9e5b6cb87db66dfb606604fe6cf40e5ddf1ef0e7 instead,
+>
+> which should at least turn off the "warnings as errors" option, but I
+>
+> don't think that reverting -n will turn off this warning.
+>
+>
+>
+> I'll try to get ahold of this newer version and see if I can't fix it
+>
+> more appropriately.
+>
+>
+>
+> --js
+>
+>
+>
+>
+...Sigh, okay.
+>
+>
+So, I am still not actually sure what changed from pygments 2.2 and
+>
+sphinx 1.7 to pygments 2.4 and sphinx 2.0.1, but it appears as if Sphinx
+>
+by default always tries to do add a filter to the pygments lexer that
+>
+raises an error on highlighting failure, instead of the default behavior
+>
+which is to just highlight those errors in the output. There is no
+>
+option to Sphinx that I am aware of to retain this lexing behavior.
+>
+(Effectively, it's strict or nothing.)
+>
+>
+This approach, apparently, is broken in Sphinx 1.7/Pygments 2.2, so the
+>
+build works with our malformed json.
+>
+>
+There are a few options:
+>
+>
+1. Update conf.py to ignore these warnings (and all future lexing
+>
+errors), and settle for the fact that there will be no QMP highlighting
+>
+wherever we use the directionality indicators ('->', '<-').
+>
+>
+2. Update bitmaps.rst to remove the directionality indicators.
+>
+>
+3. Update bitmaps.rst to format the QMP blocks as raw text instead of JSON.
+>
+>
+4. Update bitmaps.rst to remove the "json" specification from the code
+>
+block. This will cause sphinx to "guess" the formatting, and the
+>
+pygments guesser will decide it's Python3.
+>
+>
+This will parse well enough, but will mis-highlight 'true' and 'false'
+>
+which are not python keywords. This approach may break in the future if
+>
+the Python3 lexer is upgraded to be stricter (because '->' and '<-' are
+>
+still invalid), and leaves us at the mercy of both the guesser and the
+>
+lexer.
+>
+>
+I'm not actually sure what I dislike the least; I think I dislike #1 the
+>
+most. #4 gets us most of what we want but is perhaps porcelain.
+>
+>
+I suspect if we attempt to move more of our documentation to ReST and
+>
+Sphinx that we will need to answer for ourselves how we intend to
+>
+document QMP code flow examples.
+Writing a custom lexer that handles "<-" and "->" was simple (see below).
+
+Now, is it possible to convince Sphinx to register and use a custom lexer?
+
+$ cat > /tmp/lexer.py <<EOF
+from pygments.lexer import RegexLexer, DelegatingLexer
+from pygments.lexers.data import JsonLexer
+import re
+from pygments.token import *
+
+class QMPExampleMarkersLexer(RegexLexer):
+    tokens = {
+        'root': [
+            (r' *-> *', Generic.Prompt),
+            (r' *<- *', Generic.Output),
+        ]
+    }
+
+class QMPExampleLexer(DelegatingLexer):
+    def __init__(self, **options):
+        super(QMPExampleLexer, self).__init__(JsonLexer, 
+QMPExampleMarkersLexer, Error, **options)
+EOF
+$ pygmentize -l /tmp/lexer.py:QMPExampleLexer -x -f html <<EOF
+    -> {
+         "execute": "drive-backup",
+         "arguments": {
+           "device": "drive0",
+           "bitmap": "bitmap0",
+           "target": "drive0.inc0.qcow2",
+           "format": "qcow2",
+           "sync": "incremental",
+           "mode": "existing"
+         }
+       }
+
+    <- { "return": {} }
+EOF
+<div class="highlight"><pre><span></span><span class="gp">    -&gt; 
+</span><span class="p">{</span>
+         <span class="nt">&quot;execute&quot;</span><span class="p">:</span> 
+<span class="s2">&quot;drive-backup&quot;</span><span class="p">,</span>
+         <span class="nt">&quot;arguments&quot;</span><span class="p">:</span> 
+<span class="p">{</span>
+           <span class="nt">&quot;device&quot;</span><span class="p">:</span> 
+<span class="s2">&quot;drive0&quot;</span><span class="p">,</span>
+           <span class="nt">&quot;bitmap&quot;</span><span class="p">:</span> 
+<span class="s2">&quot;bitmap0&quot;</span><span class="p">,</span>
+           <span class="nt">&quot;target&quot;</span><span class="p">:</span> 
+<span class="s2">&quot;drive0.inc0.qcow2&quot;</span><span class="p">,</span>
+           <span class="nt">&quot;format&quot;</span><span class="p">:</span> 
+<span class="s2">&quot;qcow2&quot;</span><span class="p">,</span>
+           <span class="nt">&quot;sync&quot;</span><span class="p">:</span> 
+<span class="s2">&quot;incremental&quot;</span><span class="p">,</span>
+           <span class="nt">&quot;mode&quot;</span><span class="p">:</span> 
+<span class="s2">&quot;existing&quot;</span>
+         <span class="p">}</span>
+       <span class="p">}</span>
+
+<span class="go">    &lt;- </span><span class="p">{</span> <span 
+class="nt">&quot;return&quot;</span><span class="p">:</span> <span 
+class="p">{}</span> <span class="p">}</span>
+</pre></div>
+$ 
+
+
+-- 
+Eduardo
+
+On 5/20/19 7:04 PM, Eduardo Habkost wrote:
+>
+On Mon, May 20, 2019 at 05:25:28PM -0400, John Snow wrote:
+>
+>
+>
+>
+>
+> On 5/20/19 12:37 PM, John Snow wrote:
+>
+>>
+>
+>>
+>
+>> On 5/20/19 7:30 AM, Aarushi Mehta wrote:
+>
+>>>
+https://paste.fedoraproject.org/paste/kOPx4jhtUli---TmxSLrlw
+>
+>>> running python3-sphinx-2.0.1-1.fc31.noarch on Fedora release 31
+>
+>>> (Rawhide)
+>
+>>>
+>
+>>> uname - a
+>
+>>> Linux iouring 5.1.0-0.rc6.git3.1.fc31.x86_64 #1 SMP Thu Apr 25 14:25:32
+>
+>>> UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
+>
+>>>
+>
+>>> Reverting commmit 90edef80a0852cf8a3d2668898ee40e8970e431
+>
+>>> allows for the build to occur
+>
+>>>
+>
+>>> Regards
+>
+>>> Aarushi Mehta
+>
+>>>
+>
+>>>
+>
+>>
+>
+>> Ah, dang. The blocks aren't strictly conforming json, but the version I
+>
+>> tested this under didn't seem to care. Your version is much newer. (I
+>
+>> was using 1.7 as provided by Fedora 29.)
+>
+>>
+>
+>> For now, try reverting 9e5b6cb87db66dfb606604fe6cf40e5ddf1ef0e7 instead,
+>
+>> which should at least turn off the "warnings as errors" option, but I
+>
+>> don't think that reverting -n will turn off this warning.
+>
+>>
+>
+>> I'll try to get ahold of this newer version and see if I can't fix it
+>
+>> more appropriately.
+>
+>>
+>
+>> --js
+>
+>>
+>
+>
+>
+> ...Sigh, okay.
+>
+>
+>
+> So, I am still not actually sure what changed from pygments 2.2 and
+>
+> sphinx 1.7 to pygments 2.4 and sphinx 2.0.1, but it appears as if Sphinx
+>
+> by default always tries to do add a filter to the pygments lexer that
+>
+> raises an error on highlighting failure, instead of the default behavior
+>
+> which is to just highlight those errors in the output. There is no
+>
+> option to Sphinx that I am aware of to retain this lexing behavior.
+>
+> (Effectively, it's strict or nothing.)
+>
+>
+>
+> This approach, apparently, is broken in Sphinx 1.7/Pygments 2.2, so the
+>
+> build works with our malformed json.
+>
+>
+>
+> There are a few options:
+>
+>
+>
+> 1. Update conf.py to ignore these warnings (and all future lexing
+>
+> errors), and settle for the fact that there will be no QMP highlighting
+>
+> wherever we use the directionality indicators ('->', '<-').
+>
+>
+>
+> 2. Update bitmaps.rst to remove the directionality indicators.
+>
+>
+>
+> 3. Update bitmaps.rst to format the QMP blocks as raw text instead of JSON.
+>
+>
+>
+> 4. Update bitmaps.rst to remove the "json" specification from the code
+>
+> block. This will cause sphinx to "guess" the formatting, and the
+>
+> pygments guesser will decide it's Python3.
+>
+>
+>
+> This will parse well enough, but will mis-highlight 'true' and 'false'
+>
+> which are not python keywords. This approach may break in the future if
+>
+> the Python3 lexer is upgraded to be stricter (because '->' and '<-' are
+>
+> still invalid), and leaves us at the mercy of both the guesser and the
+>
+> lexer.
+>
+>
+>
+> I'm not actually sure what I dislike the least; I think I dislike #1 the
+>
+> most. #4 gets us most of what we want but is perhaps porcelain.
+>
+>
+>
+> I suspect if we attempt to move more of our documentation to ReST and
+>
+> Sphinx that we will need to answer for ourselves how we intend to
+>
+> document QMP code flow examples.
+>
+>
+Writing a custom lexer that handles "<-" and "->" was simple (see below).
+>
+>
+Now, is it possible to convince Sphinx to register and use a custom lexer?
+>
+Spoilers, yes, and I've sent a patch to list. Thanks for your help!
+
diff --git a/results/classifier/zero-shot/016/x86/57756589 b/results/classifier/zero-shot/016/x86/57756589
new file mode 100644
index 000000000..c79fbe69f
--- /dev/null
+++ b/results/classifier/zero-shot/016/x86/57756589
@@ -0,0 +1,1448 @@
+x86: 0.867
+virtual: 0.827
+debug: 0.667
+hypervisor: 0.624
+vnc: 0.451
+socket: 0.358
+operating system: 0.243
+register: 0.146
+boot: 0.101
+KVM: 0.100
+kernel: 0.096
+files: 0.092
+PID: 0.083
+network: 0.075
+device: 0.070
+performance: 0.042
+TCG: 0.018
+ppc: 0.015
+VMM: 0.014
+architecture: 0.012
+permissions: 0.008
+semantic: 0.008
+assembly: 0.007
+peripherals: 0.007
+graphic: 0.004
+user-level: 0.004
+risc-v: 0.002
+mistranslation: 0.001
+alpha: 0.001
+i386: 0.000
+arm: 0.000
+
+[Qemu-devel] 答复: Re:   答复: Re:  答复: Re: [BUG]COLO failover hang
+
+amost like wiki,but panic in Primary Node.
+
+
+
+
+setp:
+
+1 
+
+Primary Node.
+
+x86_64-softmmu/qemu-system-x86_64 -enable-kvm -boot c -m 2048 -smp 2 -qmp stdio 
+-vnc :7 -name primary -cpu qemu64,+kvmclock -device piix3-usb-uhci -usb 
+-usbdevice tablet\
+
+  -drive 
+if=virtio,id=colo-disk0,driver=quorum,read-pattern=fifo,vote-threshold=1,
+
+   
+children.0.file.filename=/mnt/sdd/pure_IMG/linux/redhat/rhel_6.5_64_2U_ide,children.0.driver=qcow2
+ -S \
+
+  -netdev 
+tap,id=hn1,vhost=off,script=/etc/qemu-ifup2,downscript=/etc/qemu-ifdown2 \
+
+  -device e1000,id=e1,netdev=hn1,mac=52:a4:00:12:78:67 \
+
+  -netdev 
+tap,id=hn0,vhost=off,script=/etc/qemu-ifup,downscript=/etc/qemu-ifdown \
+
+  -device e1000,id=e0,netdev=hn0,mac=52:a4:00:12:78:66 \
+
+  -chardev socket,id=mirror0,host=9.61.1.8,port=9003,server,nowait -chardev 
+socket,id=compare1,host=9.61.1.8,port=9004,server,nowait \
+
+  -chardev socket,id=compare0,host=9.61.1.8,port=9001,server,nowait -chardev 
+socket,id=compare0-0,host=9.61.1.8,port=9001 \
+
+  -chardev socket,id=compare_out,host=9.61.1.8,port=9005,server,nowait \
+
+  -chardev socket,id=compare_out0,host=9.61.1.8,port=9005 \
+
+  -object filter-mirror,id=m0,netdev=hn0,queue=tx,outdev=mirror0 \
+
+  -object filter-redirector,netdev=hn0,id=redire0,queue=rx,indev=compare_out 
+-object filter-redirector,netdev=hn0,id=redire1,queue=rx,outdev=compare0 \
+
+  -object 
+colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0
+
+2 Second node:
+
+x86_64-softmmu/qemu-system-x86_64 -boot c -m 2048 -smp 2 -qmp stdio -vnc :7 
+-name secondary -enable-kvm -cpu qemu64,+kvmclock -device piix3-usb-uhci -usb 
+-usbdevice tablet\
+
+  -drive 
+if=none,id=colo-disk0,file.filename=/mnt/sdd/pure_IMG/linux/redhat/rhel_6.5_64_2U_ide,driver=qcow2,node-name=node0
+ \
+
+  -drive 
+if=virtio,id=active-disk0,driver=replication,mode=secondary,file.driver=qcow2,top-id=active-disk0,file.file.filename=/mnt/ramfstest/active_disk.img,file.backing.driver=qcow2,file.backing.file.filename=/mnt/ramfstest/hidden_disk.img,file.backing.backing=colo-disk0
+  \
+
+   -netdev 
+tap,id=hn1,vhost=off,script=/etc/qemu-ifup2,downscript=/etc/qemu-ifdown2 \
+
+  -device e1000,id=e1,netdev=hn1,mac=52:a4:00:12:78:67 \
+
+  -netdev 
+tap,id=hn0,vhost=off,script=/etc/qemu-ifup,downscript=/etc/qemu-ifdown \
+
+  -device e1000,netdev=hn0,mac=52:a4:00:12:78:66 -chardev 
+socket,id=red0,host=9.61.1.8,port=9003 \
+
+  -chardev socket,id=red1,host=9.61.1.8,port=9004 \
+
+  -object filter-redirector,id=f1,netdev=hn0,queue=tx,indev=red0 \
+
+  -object filter-redirector,id=f2,netdev=hn0,queue=rx,outdev=red1 \
+
+  -object filter-rewriter,id=rew0,netdev=hn0,queue=all -incoming tcp:0:8888
+
+3  Secondary node:
+
+{'execute':'qmp_capabilities'}
+
+{ 'execute': 'nbd-server-start',
+
+  'arguments': {'addr': {'type': 'inet', 'data': {'host': '9.61.1.7', 'port': 
+'8889'} } }
+
+}
+
+{'execute': 'nbd-server-add', 'arguments': {'device': 'colo-disk0', 'writable': 
+true } }
+
+4:Primary Node:
+
+{'execute':'qmp_capabilities'}
+
+
+{ 'execute': 'human-monitor-command',
+
+  'arguments': {'command-line': 'drive_add -n buddy 
+driver=replication,mode=primary,file.driver=nbd,file.host=9.61.1.7,file.port=8889,file.export=colo-disk0,node-name=node0'}}
+
+{ 'execute':'x-blockdev-change', 'arguments':{'parent': 'colo-disk0', 'node': 
+'node0' } }
+
+{ 'execute': 'migrate-set-capabilities',
+
+      'arguments': {'capabilities': [ {'capability': 'x-colo', 'state': true } 
+] } }
+
+{ 'execute': 'migrate', 'arguments': {'uri': 'tcp:9.61.1.7:8888' } }
+
+
+
+
+then can see two runing VMs, whenever you make changes to PVM, SVM will be 
+synced.  
+
+
+
+
+5:Primary Node:
+
+echo c > /proc/sysrq-trigger
+
+
+
+
+6:Secondary node:
+
+{ 'execute': 'nbd-server-stop' }
+
+{ "execute": "x-colo-lost-heartbeat" }
+
+
+
+
+then can see the Secondary node hang at recvmsg recvmsg .
+
+
+
+
+
+
+
+
+
+
+
+
+原始邮件
+
+
+
+发件人: address@hidden
+收件人:王广10165992 address@hidden
+抄送人: address@hidden address@hidden
+日 期 :2017年03月21日 16:27
+主 题 :Re: [Qemu-devel]  答复: Re:  答复: Re: [BUG]COLO failover hang
+
+
+
+
+
+Hi,
+
+On 2017/3/21 16:10, address@hidden wrote:
+> Thank you。
+>
+> I have test aready。
+>
+> When the Primary Node panic,the Secondary Node qemu hang at the same place。
+>
+> Incorrding
+http://wiki.qemu-project.org/Features/COLO
+,kill Primary Node qemu 
+will not produce the problem,but Primary Node panic can。
+>
+> I think due to the feature of channel does not support 
+QIO_CHANNEL_FEATURE_SHUTDOWN.
+>
+>
+
+Yes, you are right, when we do failover for primary/secondary VM, we will 
+shutdown the related
+fd in case it is stuck in the read/write fd.
+
+It seems that you didn't follow the above introduction exactly to do the test. 
+Could you
+share your test procedures ? Especially the commands used in the test.
+
+Thanks,
+Hailiang
+
+> when failover,channel_shutdown could not shut down the channel.
+>
+>
+> so the colo_process_incoming_thread will hang at recvmsg.
+>
+>
+> I test a patch:
+>
+>
+> diff --git a/migration/socket.c b/migration/socket.c
+>
+>
+> index 13966f1..d65a0ea 100644
+>
+>
+> --- a/migration/socket.c
+>
+>
+> +++ b/migration/socket.c
+>
+>
+> @@ -147,8 +147,9 @@ static gboolean 
+socket_accept_incoming_migration(QIOChannel *ioc,
+>
+>
+>       }
+>
+>
+>
+>
+>
+>       trace_migration_socket_incoming_accepted()
+>
+>
+>
+>
+>
+>       qio_channel_set_name(QIO_CHANNEL(sioc), "migration-socket-incoming")
+>
+>
+> +    qio_channel_set_feature(QIO_CHANNEL(sioc), QIO_CHANNEL_FEATURE_SHUTDOWN)
+>
+>
+>       migration_channel_process_incoming(migrate_get_current(),
+>
+>
+>                                          QIO_CHANNEL(sioc))
+>
+>
+>       object_unref(OBJECT(sioc))
+>
+>
+>
+>
+> My test will not hang any more.
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+>
+> 原始邮件
+>
+>
+>
+> 发件人: address@hidden
+> 收件人:王广10165992 address@hidden
+> 抄送人: address@hidden address@hidden
+> 日 期 :2017年03月21日 15:58
+> 主 题 :Re: [Qemu-devel]  答复: Re:  [BUG]COLO failover hang
+>
+>
+>
+>
+>
+> Hi,Wang.
+>
+> You can test this branch:
+>
+>
+https://github.com/coloft/qemu/tree/colo-v5.1-developing-COLO-frame-v21-with-shared-disk
+>
+> and please follow wiki ensure your own configuration correctly.
+>
+>
+http://wiki.qemu-project.org/Features/COLO
+>
+>
+> Thanks
+>
+> Zhang Chen
+>
+>
+> On 03/21/2017 03:27 PM, address@hidden wrote:
+> >
+> > hi.
+> >
+> > I test the git qemu master have the same problem.
+> >
+> > (gdb) bt
+> >
+> > #0  qio_channel_socket_readv (ioc=0x7f65911b4e50, iov=0x7f64ef3fd880,
+> > niov=1, fds=0x0, nfds=0x0, errp=0x0) at io/channel-socket.c:461
+> >
+> > #1  0x00007f658e4aa0c2 in qio_channel_read
+> > (address@hidden, address@hidden "",
+> > address@hidden, address@hidden) at io/channel.c:114
+> >
+> > #2  0x00007f658e3ea990 in channel_get_buffer (opaque=<optimized out>,
+> > buf=0x7f65907cb838 "", pos=<optimized out>, size=32768) at
+> > migration/qemu-file-channel.c:78
+> >
+> > #3  0x00007f658e3e97fc in qemu_fill_buffer (f=0x7f65907cb800) at
+> > migration/qemu-file.c:295
+> >
+> > #4  0x00007f658e3ea2e1 in qemu_peek_byte (address@hidden,
+> > address@hidden) at migration/qemu-file.c:555
+> >
+> > #5  0x00007f658e3ea34b in qemu_get_byte (address@hidden) at
+> > migration/qemu-file.c:568
+> >
+> > #6  0x00007f658e3ea552 in qemu_get_be32 (address@hidden) at
+> > migration/qemu-file.c:648
+> >
+> > #7  0x00007f658e3e66e5 in colo_receive_message (f=0x7f65907cb800,
+> > address@hidden) at migration/colo.c:244
+> >
+> > #8  0x00007f658e3e681e in colo_receive_check_message (f=<optimized
+> > out>, address@hidden,
+> > address@hidden)
+> >
+> >     at migration/colo.c:264
+> >
+> > #9  0x00007f658e3e740e in colo_process_incoming_thread
+> > (opaque=0x7f658eb30360 <mis_current.31286>) at migration/colo.c:577
+> >
+> > #10 0x00007f658be09df3 in start_thread () from /lib64/libpthread.so.0
+> >
+> > #11 0x00007f65881983ed in clone () from /lib64/libc.so.6
+> >
+> > (gdb) p ioc->name
+> >
+> > $2 = 0x7f658ff7d5c0 "migration-socket-incoming"
+> >
+> > (gdb) p ioc->features        Do not support QIO_CHANNEL_FEATURE_SHUTDOWN
+> >
+> > $3 = 0
+> >
+> >
+> > (gdb) bt
+> >
+> > #0  socket_accept_incoming_migration (ioc=0x7fdcceeafa90,
+> > condition=G_IO_IN, opaque=0x7fdcceeafa90) at migration/socket.c:137
+> >
+> > #1  0x00007fdcc6966350 in g_main_dispatch (context=<optimized out>) at
+> > gmain.c:3054
+> >
+> > #2  g_main_context_dispatch (context=<optimized out>,
+> > address@hidden) at gmain.c:3630
+> >
+> > #3  0x00007fdccb8a6dcc in glib_pollfds_poll () at util/main-loop.c:213
+> >
+> > #4  os_host_main_loop_wait (timeout=<optimized out>) at
+> > util/main-loop.c:258
+> >
+> > #5  main_loop_wait (address@hidden) at
+> > util/main-loop.c:506
+> >
+> > #6  0x00007fdccb526187 in main_loop () at vl.c:1898
+> >
+> > #7  main (argc=<optimized out>, argv=<optimized out>, envp=<optimized
+> > out>) at vl.c:4709
+> >
+> > (gdb) p ioc->features
+> >
+> > $1 = 6
+> >
+> > (gdb) p ioc->name
+> >
+> > $2 = 0x7fdcce1b1ab0 "migration-socket-listener"
+> >
+> >
+> > May be socket_accept_incoming_migration should
+> > call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN)??
+> >
+> >
+> > thank you.
+> >
+> >
+> >
+> >
+> >
+> > 原始邮件
+> > address@hidden
+> > address@hidden
+> > address@hidden@huawei.com>
+> > *日 期 :*2017年03月16日 14:46
+> > *主 题 :**Re: [Qemu-devel] COLO failover hang*
+> >
+> >
+> >
+> >
+> > On 03/15/2017 05:06 PM, wangguang wrote:
+> > >   am testing QEMU COLO feature described here [QEMU
+> > > Wiki](
+http://wiki.qemu-project.org/Features/COLO
+).
+> > >
+> > > When the Primary Node panic,the Secondary Node qemu hang.
+> > > hang at recvmsg in qio_channel_socket_readv.
+> > > And  I run  { 'execute': 'nbd-server-stop' } and { "execute":
+> > > "x-colo-lost-heartbeat" } in Secondary VM's
+> > > monitor,the  Secondary Node qemu still hang at recvmsg .
+> > >
+> > > I found that the colo in qemu is not complete yet.
+> > > Do the colo have any plan for development?
+> >
+> > Yes, We are developing. You can see some of patch we pushing.
+> >
+> > > Has anyone ever run it successfully? Any help is appreciated!
+> >
+> > In our internal version can run it successfully,
+> > The failover detail you can ask Zhanghailiang for help.
+> > Next time if you have some question about COLO,
+> > please cc me and zhanghailiang address@hidden
+> >
+> >
+> > Thanks
+> > Zhang Chen
+> >
+> >
+> > >
+> > >
+> > >
+> > > centos7.2+qemu2.7.50
+> > > (gdb) bt
+> > > #0  0x00007f3e00cc86ad in recvmsg () from /lib64/libpthread.so.0
+> > > #1  0x00007f3e0332b738 in qio_channel_socket_readv (ioc=<optimized out>,
+> > > iov=<optimized out>, niov=<optimized out>, fds=0x0, nfds=0x0, errp=0x0) at
+> > > io/channel-socket.c:497
+> > > #2  0x00007f3e03329472 in qio_channel_read (address@hidden,
+> > > address@hidden "", address@hidden,
+> > > address@hidden) at io/channel.c:97
+> > > #3  0x00007f3e032750e0 in channel_get_buffer (opaque=<optimized out>,
+> > > buf=0x7f3e05910f38 "", pos=<optimized out>, size=32768) at
+> > > migration/qemu-file-channel.c:78
+> > > #4  0x00007f3e0327412c in qemu_fill_buffer (f=0x7f3e05910f00) at
+> > > migration/qemu-file.c:257
+> > > #5  0x00007f3e03274a41 in qemu_peek_byte (address@hidden,
+> > > address@hidden) at migration/qemu-file.c:510
+> > > #6  0x00007f3e03274aab in qemu_get_byte (address@hidden) at
+> > > migration/qemu-file.c:523
+> > > #7  0x00007f3e03274cb2 in qemu_get_be32 (address@hidden) at
+> > > migration/qemu-file.c:603
+> > > #8  0x00007f3e03271735 in colo_receive_message (f=0x7f3e05910f00,
+> > > address@hidden) at migration/colo..c:215
+> > > #9  0x00007f3e0327250d in colo_wait_handle_message (errp=0x7f3d62bfaa48,
+> > > checkpoint_request=<synthetic pointer>, f=<optimized out>) at
+> > > migration/colo.c:546
+> > > #10 colo_process_incoming_thread (opaque=0x7f3e067245e0) at
+> > > migration/colo.c:649
+> > > #11 0x00007f3e00cc1df3 in start_thread () from /lib64/libpthread.so.0
+> > > #12 0x00007f3dfc9c03ed in clone () from /lib64/libc.so.6
+> > >
+> > >
+> > >
+> > >
+> > >
+> > > --
+> > > View this message in context:
+http://qemu.11.n7.nabble.com/COLO-failover-hang-tp473250.html
+> > > Sent from the Developer mailing list archive at Nabble.com.
+> > >
+> > >
+> > >
+> > >
+> >
+> > --
+> > Thanks
+> > Zhang Chen
+> >
+> >
+> >
+> >
+> >
+>
+
+diff --git a/migration/socket.c b/migration/socket.c
+
+
+index 13966f1..d65a0ea 100644
+
+
+--- a/migration/socket.c
+
+
++++ b/migration/socket.c
+
+
+@@ -147,8 +147,9 @@ static gboolean socket_accept_incoming_migration(QIOChannel 
+*ioc,
+
+
+     }
+
+
+ 
+
+
+     trace_migration_socket_incoming_accepted()
+
+
+    
+
+
+     qio_channel_set_name(QIO_CHANNEL(sioc), "migration-socket-incoming")
+
+
++    qio_channel_set_feature(QIO_CHANNEL(sioc), QIO_CHANNEL_FEATURE_SHUTDOWN)
+
+
+     migration_channel_process_incoming(migrate_get_current(),
+
+
+                                        QIO_CHANNEL(sioc))
+
+
+     object_unref(OBJECT(sioc))
+
+
+
+
+Is this patch ok? 
+
+I have test it . The test could not hang any more.
+
+
+
+
+
+
+
+
+
+
+
+
+原始邮件
+
+
+
+发件人: address@hidden
+收件人: address@hidden address@hidden
+抄送人: address@hidden address@hidden address@hidden
+日 期 :2017年03月22日 09:11
+主 题 :Re: [Qemu-devel]  答复: Re:  答复: Re: [BUG]COLO failover hang
+
+
+
+
+
+On 2017/3/21 19:56, Dr. David Alan Gilbert wrote:
+> * Hailiang Zhang (address@hidden) wrote:
+>> Hi,
+>>
+>> Thanks for reporting this, and i confirmed it in my test, and it is a bug.
+>>
+>> Though we tried to call qemu_file_shutdown() to shutdown the related fd, in
+>> case COLO thread/incoming thread is stuck in read/write() while do failover,
+>> but it didn't take effect, because all the fd used by COLO (also migration)
+>> has been wrapped by qio channel, and it will not call the shutdown API if
+>> we didn't qio_channel_set_feature(QIO_CHANNEL(sioc), 
+QIO_CHANNEL_FEATURE_SHUTDOWN).
+>>
+>> Cc: Dr. David Alan Gilbert address@hidden
+>>
+>> I doubted migration cancel has the same problem, it may be stuck in write()
+>> if we tried to cancel migration.
+>>
+>> void fd_start_outgoing_migration(MigrationState *s, const char *fdname, 
+Error **errp)
+>> {
+>>      qio_channel_set_name(QIO_CHANNEL(ioc), "migration-fd-outgoing")
+>>      migration_channel_connect(s, ioc, NULL)
+>>      ... ...
+>> We didn't call qio_channel_set_feature(QIO_CHANNEL(sioc), 
+QIO_CHANNEL_FEATURE_SHUTDOWN) above,
+>> and the
+>> migrate_fd_cancel()
+>> {
+>>   ... ...
+>>      if (s->state == MIGRATION_STATUS_CANCELLING && f) {
+>>          qemu_file_shutdown(f)  --> This will not take effect. No ?
+>>      }
+>> }
+>
+> (cc'd in Daniel Berrange).
+> I see that we call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN) 
+at the
+> top of qio_channel_socket_new  so I think that's safe isn't it?
+>
+
+Hmm, you are right, this problem is only exist for the migration incoming fd, 
+thanks.
+
+> Dave
+>
+>> Thanks,
+>> Hailiang
+>>
+>> On 2017/3/21 16:10, address@hidden wrote:
+>>> Thank you。
+>>>
+>>> I have test aready。
+>>>
+>>> When the Primary Node panic,the Secondary Node qemu hang at the same place。
+>>>
+>>> Incorrding
+http://wiki.qemu-project.org/Features/COLO
+,kill Primary Node 
+qemu will not produce the problem,but Primary Node panic can。
+>>>
+>>> I think due to the feature of channel does not support 
+QIO_CHANNEL_FEATURE_SHUTDOWN.
+>>>
+>>>
+>>> when failover,channel_shutdown could not shut down the channel.
+>>>
+>>>
+>>> so the colo_process_incoming_thread will hang at recvmsg.
+>>>
+>>>
+>>> I test a patch:
+>>>
+>>>
+>>> diff --git a/migration/socket.c b/migration/socket.c
+>>>
+>>>
+>>> index 13966f1..d65a0ea 100644
+>>>
+>>>
+>>> --- a/migration/socket.c
+>>>
+>>>
+>>> +++ b/migration/socket.c
+>>>
+>>>
+>>> @@ -147,8 +147,9 @@ static gboolean 
+socket_accept_incoming_migration(QIOChannel *ioc,
+>>>
+>>>
+>>>        }
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>        trace_migration_socket_incoming_accepted()
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>        qio_channel_set_name(QIO_CHANNEL(sioc), "migration-socket-incoming")
+>>>
+>>>
+>>> +    qio_channel_set_feature(QIO_CHANNEL(sioc), 
+QIO_CHANNEL_FEATURE_SHUTDOWN)
+>>>
+>>>
+>>>        migration_channel_process_incoming(migrate_get_current(),
+>>>
+>>>
+>>>                                           QIO_CHANNEL(sioc))
+>>>
+>>>
+>>>        object_unref(OBJECT(sioc))
+>>>
+>>>
+>>>
+>>>
+>>> My test will not hang any more.
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>> 原始邮件
+>>>
+>>>
+>>>
+>>> 发件人: address@hidden
+>>> 收件人:王广10165992 address@hidden
+>>> 抄送人: address@hidden address@hidden
+>>> 日 期 :2017年03月21日 15:58
+>>> 主 题 :Re: [Qemu-devel]  答复: Re:  [BUG]COLO failover hang
+>>>
+>>>
+>>>
+>>>
+>>>
+>>> Hi,Wang.
+>>>
+>>> You can test this branch:
+>>>
+>>>
+https://github.com/coloft/qemu/tree/colo-v5.1-developing-COLO-frame-v21-with-shared-disk
+>>>
+>>> and please follow wiki ensure your own configuration correctly.
+>>>
+>>>
+http://wiki.qemu-project.org/Features/COLO
+>>>
+>>>
+>>> Thanks
+>>>
+>>> Zhang Chen
+>>>
+>>>
+>>> On 03/21/2017 03:27 PM, address@hidden wrote:
+>>> >
+>>> > hi.
+>>> >
+>>> > I test the git qemu master have the same problem.
+>>> >
+>>> > (gdb) bt
+>>> >
+>>> > #0  qio_channel_socket_readv (ioc=0x7f65911b4e50, iov=0x7f64ef3fd880,
+>>> > niov=1, fds=0x0, nfds=0x0, errp=0x0) at io/channel-socket.c:461
+>>> >
+>>> > #1  0x00007f658e4aa0c2 in qio_channel_read
+>>> > (address@hidden, address@hidden "",
+>>> > address@hidden, address@hidden) at io/channel.c:114
+>>> >
+>>> > #2  0x00007f658e3ea990 in channel_get_buffer (opaque=<optimized out>,
+>>> > buf=0x7f65907cb838 "", pos=<optimized out>, size=32768) at
+>>> > migration/qemu-file-channel.c:78
+>>> >
+>>> > #3  0x00007f658e3e97fc in qemu_fill_buffer (f=0x7f65907cb800) at
+>>> > migration/qemu-file.c:295
+>>> >
+>>> > #4  0x00007f658e3ea2e1 in qemu_peek_byte (address@hidden,
+>>> > address@hidden) at migration/qemu-file.c:555
+>>> >
+>>> > #5  0x00007f658e3ea34b in qemu_get_byte (address@hidden) at
+>>> > migration/qemu-file.c:568
+>>> >
+>>> > #6  0x00007f658e3ea552 in qemu_get_be32 (address@hidden) at
+>>> > migration/qemu-file.c:648
+>>> >
+>>> > #7  0x00007f658e3e66e5 in colo_receive_message (f=0x7f65907cb800,
+>>> > address@hidden) at migration/colo.c:244
+>>> >
+>>> > #8  0x00007f658e3e681e in colo_receive_check_message (f=<optimized
+>>> > out>, address@hidden,
+>>> > address@hidden)
+>>> >
+>>> >     at migration/colo.c:264
+>>> >
+>>> > #9  0x00007f658e3e740e in colo_process_incoming_thread
+>>> > (opaque=0x7f658eb30360 <mis_current.31286>) at migration/colo.c:577
+>>> >
+>>> > #10 0x00007f658be09df3 in start_thread () from /lib64/libpthread.so.0
+>>> >
+>>> > #11 0x00007f65881983ed in clone () from /lib64/libc.so.6
+>>> >
+>>> > (gdb) p ioc->name
+>>> >
+>>> > $2 = 0x7f658ff7d5c0 "migration-socket-incoming"
+>>> >
+>>> > (gdb) p ioc->features        Do not support QIO_CHANNEL_FEATURE_SHUTDOWN
+>>> >
+>>> > $3 = 0
+>>> >
+>>> >
+>>> > (gdb) bt
+>>> >
+>>> > #0  socket_accept_incoming_migration (ioc=0x7fdcceeafa90,
+>>> > condition=G_IO_IN, opaque=0x7fdcceeafa90) at migration/socket.c:137
+>>> >
+>>> > #1  0x00007fdcc6966350 in g_main_dispatch (context=<optimized out>) at
+>>> > gmain.c:3054
+>>> >
+>>> > #2  g_main_context_dispatch (context=<optimized out>,
+>>> > address@hidden) at gmain.c:3630
+>>> >
+>>> > #3  0x00007fdccb8a6dcc in glib_pollfds_poll () at util/main-loop.c:213
+>>> >
+>>> > #4  os_host_main_loop_wait (timeout=<optimized out>) at
+>>> > util/main-loop.c:258
+>>> >
+>>> > #5  main_loop_wait (address@hidden) at
+>>> > util/main-loop.c:506
+>>> >
+>>> > #6  0x00007fdccb526187 in main_loop () at vl.c:1898
+>>> >
+>>> > #7  main (argc=<optimized out>, argv=<optimized out>, envp=<optimized
+>>> > out>) at vl.c:4709
+>>> >
+>>> > (gdb) p ioc->features
+>>> >
+>>> > $1 = 6
+>>> >
+>>> > (gdb) p ioc->name
+>>> >
+>>> > $2 = 0x7fdcce1b1ab0 "migration-socket-listener"
+>>> >
+>>> >
+>>> > May be socket_accept_incoming_migration should
+>>> > call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN)??
+>>> >
+>>> >
+>>> > thank you.
+>>> >
+>>> >
+>>> >
+>>> >
+>>> >
+>>> > 原始邮件
+>>> > address@hidden
+>>> > address@hidden
+>>> > address@hidden@huawei.com>
+>>> > *日 期 :*2017年03月16日 14:46
+>>> > *主 题 :**Re: [Qemu-devel] COLO failover hang*
+>>> >
+>>> >
+>>> >
+>>> >
+>>> > On 03/15/2017 05:06 PM, wangguang wrote:
+>>> > >   am testing QEMU COLO feature described here [QEMU
+>>> > > Wiki](
+http://wiki.qemu-project.org/Features/COLO
+).
+>>> > >
+>>> > > When the Primary Node panic,the Secondary Node qemu hang.
+>>> > > hang at recvmsg in qio_channel_socket_readv.
+>>> > > And  I run  { 'execute': 'nbd-server-stop' } and { "execute":
+>>> > > "x-colo-lost-heartbeat" } in Secondary VM's
+>>> > > monitor,the  Secondary Node qemu still hang at recvmsg .
+>>> > >
+>>> > > I found that the colo in qemu is not complete yet.
+>>> > > Do the colo have any plan for development?
+>>> >
+>>> > Yes, We are developing. You can see some of patch we pushing.
+>>> >
+>>> > > Has anyone ever run it successfully? Any help is appreciated!
+>>> >
+>>> > In our internal version can run it successfully,
+>>> > The failover detail you can ask Zhanghailiang for help.
+>>> > Next time if you have some question about COLO,
+>>> > please cc me and zhanghailiang address@hidden
+>>> >
+>>> >
+>>> > Thanks
+>>> > Zhang Chen
+>>> >
+>>> >
+>>> > >
+>>> > >
+>>> > >
+>>> > > centos7.2+qemu2.7.50
+>>> > > (gdb) bt
+>>> > > #0  0x00007f3e00cc86ad in recvmsg () from /lib64/libpthread.so.0
+>>> > > #1  0x00007f3e0332b738 in qio_channel_socket_readv (ioc=<optimized out>,
+>>> > > iov=<optimized out>, niov=<optimized out>, fds=0x0, nfds=0x0, errp=0x0) 
+at
+>>> > > io/channel-socket.c:497
+>>> > > #2  0x00007f3e03329472 in qio_channel_read (address@hidden,
+>>> > > address@hidden "", address@hidden,
+>>> > > address@hidden) at io/channel.c:97
+>>> > > #3  0x00007f3e032750e0 in channel_get_buffer (opaque=<optimized out>,
+>>> > > buf=0x7f3e05910f38 "", pos=<optimized out>, size=32768) at
+>>> > > migration/qemu-file-channel.c:78
+>>> > > #4  0x00007f3e0327412c in qemu_fill_buffer (f=0x7f3e05910f00) at
+>>> > > migration/qemu-file.c:257
+>>> > > #5  0x00007f3e03274a41 in qemu_peek_byte (address@hidden,
+>>> > > address@hidden) at migration/qemu-file.c:510
+>>> > > #6  0x00007f3e03274aab in qemu_get_byte (address@hidden) at
+>>> > > migration/qemu-file.c:523
+>>> > > #7  0x00007f3e03274cb2 in qemu_get_be32 (address@hidden) at
+>>> > > migration/qemu-file.c:603
+>>> > > #8  0x00007f3e03271735 in colo_receive_message (f=0x7f3e05910f00,
+>>> > > address@hidden) at migration/colo.c:215
+>>> > > #9  0x00007f3e0327250d in colo_wait_handle_message (errp=0x7f3d62bfaa48,
+>>> > > checkpoint_request=<synthetic pointer>, f=<optimized out>) at
+>>> > > migration/colo.c:546
+>>> > > #10 colo_process_incoming_thread (opaque=0x7f3e067245e0) at
+>>> > > migration/colo.c:649
+>>> > > #11 0x00007f3e00cc1df3 in start_thread () from /lib64/libpthread.so.0
+>>> > > #12 0x00007f3dfc9c03ed in clone () from /lib64/libc..so.6
+>>> > >
+>>> > >
+>>> > >
+>>> > >
+>>> > >
+>>> > > --
+>>> > > View this message in context:
+http://qemu.11.n7.nabble.com/COLO-failover-hang-tp473250.html
+>>> > > Sent from the Developer mailing list archive at Nabble.com.
+>>> > >
+>>> > >
+>>> > >
+>>> > >
+>>> >
+>>> > --
+>>> > Thanks
+>>> > Zhang Chen
+>>> >
+>>> >
+>>> >
+>>> >
+>>> >
+>>>
+>>
+> --
+> Dr. David Alan Gilbert / address@hidden / Manchester, UK
+>
+> .
+>
+
+Hi,
+
+On 2017/3/22 9:42, address@hidden wrote:
+diff --git a/migration/socket.c b/migration/socket.c
+
+
+index 13966f1..d65a0ea 100644
+
+
+--- a/migration/socket.c
+
+
++++ b/migration/socket.c
+
+
+@@ -147,8 +147,9 @@ static gboolean socket_accept_incoming_migration(QIOChannel 
+*ioc,
+
+
+      }
+
+
+
+
+
+      trace_migration_socket_incoming_accepted()
+
+
+
+
+
+      qio_channel_set_name(QIO_CHANNEL(sioc), "migration-socket-incoming")
+
+
++    qio_channel_set_feature(QIO_CHANNEL(sioc), QIO_CHANNEL_FEATURE_SHUTDOWN)
+
+
+      migration_channel_process_incoming(migrate_get_current(),
+
+
+                                         QIO_CHANNEL(sioc))
+
+
+      object_unref(OBJECT(sioc))
+
+
+
+
+Is this patch ok?
+Yes, i think this works, but a better way maybe to call 
+qio_channel_set_feature()
+in qio_channel_socket_accept(), we didn't set the SHUTDOWN feature for the 
+socket accept fd,
+Or fix it by this:
+
+diff --git a/io/channel-socket.c b/io/channel-socket.c
+index f546c68..ce6894c 100644
+--- a/io/channel-socket.c
++++ b/io/channel-socket.c
+@@ -330,9 +330,8 @@ qio_channel_socket_accept(QIOChannelSocket *ioc,
+                           Error **errp)
+ {
+     QIOChannelSocket *cioc;
+-
+-    cioc = QIO_CHANNEL_SOCKET(object_new(TYPE_QIO_CHANNEL_SOCKET));
+-    cioc->fd = -1;
++
++    cioc = qio_channel_socket_new();
+     cioc->remoteAddrLen = sizeof(ioc->remoteAddr);
+     cioc->localAddrLen = sizeof(ioc->localAddr);
+
+
+Thanks,
+Hailiang
+I have test it . The test could not hang any more.
+
+
+
+
+
+
+
+
+
+
+
+
+原始邮件
+
+
+
+发件人: address@hidden
+收件人: address@hidden address@hidden
+抄送人: address@hidden address@hidden address@hidden
+日 期 :2017年03月22日 09:11
+主 题 :Re: [Qemu-devel]  答复: Re:  答复: Re: [BUG]COLO failover hang
+
+
+
+
+
+On 2017/3/21 19:56, Dr. David Alan Gilbert wrote:
+> * Hailiang Zhang (address@hidden) wrote:
+>> Hi,
+>>
+>> Thanks for reporting this, and i confirmed it in my test, and it is a bug.
+>>
+>> Though we tried to call qemu_file_shutdown() to shutdown the related fd, in
+>> case COLO thread/incoming thread is stuck in read/write() while do failover,
+>> but it didn't take effect, because all the fd used by COLO (also migration)
+>> has been wrapped by qio channel, and it will not call the shutdown API if
+>> we didn't qio_channel_set_feature(QIO_CHANNEL(sioc), 
+QIO_CHANNEL_FEATURE_SHUTDOWN).
+>>
+>> Cc: Dr. David Alan Gilbert address@hidden
+>>
+>> I doubted migration cancel has the same problem, it may be stuck in write()
+>> if we tried to cancel migration.
+>>
+>> void fd_start_outgoing_migration(MigrationState *s, const char *fdname, 
+Error **errp)
+>> {
+>>      qio_channel_set_name(QIO_CHANNEL(ioc), "migration-fd-outgoing")
+>>      migration_channel_connect(s, ioc, NULL)
+>>      ... ...
+>> We didn't call qio_channel_set_feature(QIO_CHANNEL(sioc), 
+QIO_CHANNEL_FEATURE_SHUTDOWN) above,
+>> and the
+>> migrate_fd_cancel()
+>> {
+>>   ... ...
+>>      if (s->state == MIGRATION_STATUS_CANCELLING && f) {
+>>          qemu_file_shutdown(f)  --> This will not take effect. No ?
+>>      }
+>> }
+>
+> (cc'd in Daniel Berrange).
+> I see that we call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN) 
+at the
+> top of qio_channel_socket_new  so I think that's safe isn't it?
+>
+
+Hmm, you are right, this problem is only exist for the migration incoming fd, 
+thanks.
+
+> Dave
+>
+>> Thanks,
+>> Hailiang
+>>
+>> On 2017/3/21 16:10, address@hidden wrote:
+>>> Thank you。
+>>>
+>>> I have test aready。
+>>>
+>>> When the Primary Node panic,the Secondary Node qemu hang at the same place。
+>>>
+>>> Incorrding
+http://wiki.qemu-project.org/Features/COLO
+,kill Primary Node 
+qemu will not produce the problem,but Primary Node panic can。
+>>>
+>>> I think due to the feature of channel does not support 
+QIO_CHANNEL_FEATURE_SHUTDOWN.
+>>>
+>>>
+>>> when failover,channel_shutdown could not shut down the channel.
+>>>
+>>>
+>>> so the colo_process_incoming_thread will hang at recvmsg.
+>>>
+>>>
+>>> I test a patch:
+>>>
+>>>
+>>> diff --git a/migration/socket.c b/migration/socket.c
+>>>
+>>>
+>>> index 13966f1..d65a0ea 100644
+>>>
+>>>
+>>> --- a/migration/socket.c
+>>>
+>>>
+>>> +++ b/migration/socket.c
+>>>
+>>>
+>>> @@ -147,8 +147,9 @@ static gboolean 
+socket_accept_incoming_migration(QIOChannel *ioc,
+>>>
+>>>
+>>>        }
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>        trace_migration_socket_incoming_accepted()
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>        qio_channel_set_name(QIO_CHANNEL(sioc), "migration-socket-incoming")
+>>>
+>>>
+>>> +    qio_channel_set_feature(QIO_CHANNEL(sioc), 
+QIO_CHANNEL_FEATURE_SHUTDOWN)
+>>>
+>>>
+>>>        migration_channel_process_incoming(migrate_get_current(),
+>>>
+>>>
+>>>                                           QIO_CHANNEL(sioc))
+>>>
+>>>
+>>>        object_unref(OBJECT(sioc))
+>>>
+>>>
+>>>
+>>>
+>>> My test will not hang any more.
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>>
+>>> 原始邮件
+>>>
+>>>
+>>>
+>>> 发件人: address@hidden
+>>> 收件人:王广10165992 address@hidden
+>>> 抄送人: address@hidden address@hidden
+>>> 日 期 :2017年03月21日 15:58
+>>> 主 题 :Re: [Qemu-devel]  答复: Re:  [BUG]COLO failover hang
+>>>
+>>>
+>>>
+>>>
+>>>
+>>> Hi,Wang.
+>>>
+>>> You can test this branch:
+>>>
+>>>
+https://github.com/coloft/qemu/tree/colo-v5.1-developing-COLO-frame-v21-with-shared-disk
+>>>
+>>> and please follow wiki ensure your own configuration correctly.
+>>>
+>>>
+http://wiki.qemu-project.org/Features/COLO
+>>>
+>>>
+>>> Thanks
+>>>
+>>> Zhang Chen
+>>>
+>>>
+>>> On 03/21/2017 03:27 PM, address@hidden wrote:
+>>> >
+>>> > hi.
+>>> >
+>>> > I test the git qemu master have the same problem.
+>>> >
+>>> > (gdb) bt
+>>> >
+>>> > #0  qio_channel_socket_readv (ioc=0x7f65911b4e50, iov=0x7f64ef3fd880,
+>>> > niov=1, fds=0x0, nfds=0x0, errp=0x0) at io/channel-socket.c:461
+>>> >
+>>> > #1  0x00007f658e4aa0c2 in qio_channel_read
+>>> > (address@hidden, address@hidden "",
+>>> > address@hidden, address@hidden) at io/channel.c:114
+>>> >
+>>> > #2  0x00007f658e3ea990 in channel_get_buffer (opaque=<optimized out>,
+>>> > buf=0x7f65907cb838 "", pos=<optimized out>, size=32768) at
+>>> > migration/qemu-file-channel.c:78
+>>> >
+>>> > #3  0x00007f658e3e97fc in qemu_fill_buffer (f=0x7f65907cb800) at
+>>> > migration/qemu-file.c:295
+>>> >
+>>> > #4  0x00007f658e3ea2e1 in qemu_peek_byte (address@hidden,
+>>> > address@hidden) at migration/qemu-file.c:555
+>>> >
+>>> > #5  0x00007f658e3ea34b in qemu_get_byte (address@hidden) at
+>>> > migration/qemu-file.c:568
+>>> >
+>>> > #6  0x00007f658e3ea552 in qemu_get_be32 (address@hidden) at
+>>> > migration/qemu-file.c:648
+>>> >
+>>> > #7  0x00007f658e3e66e5 in colo_receive_message (f=0x7f65907cb800,
+>>> > address@hidden) at migration/colo.c:244
+>>> >
+>>> > #8  0x00007f658e3e681e in colo_receive_check_message (f=<optimized
+>>> > out>, address@hidden,
+>>> > address@hidden)
+>>> >
+>>> >     at migration/colo.c:264
+>>> >
+>>> > #9  0x00007f658e3e740e in colo_process_incoming_thread
+>>> > (opaque=0x7f658eb30360 <mis_current.31286>) at migration/colo.c:577
+>>> >
+>>> > #10 0x00007f658be09df3 in start_thread () from /lib64/libpthread.so.0
+>>> >
+>>> > #11 0x00007f65881983ed in clone () from /lib64/libc.so.6
+>>> >
+>>> > (gdb) p ioc->name
+>>> >
+>>> > $2 = 0x7f658ff7d5c0 "migration-socket-incoming"
+>>> >
+>>> > (gdb) p ioc->features        Do not support QIO_CHANNEL_FEATURE_SHUTDOWN
+>>> >
+>>> > $3 = 0
+>>> >
+>>> >
+>>> > (gdb) bt
+>>> >
+>>> > #0  socket_accept_incoming_migration (ioc=0x7fdcceeafa90,
+>>> > condition=G_IO_IN, opaque=0x7fdcceeafa90) at migration/socket.c:137
+>>> >
+>>> > #1  0x00007fdcc6966350 in g_main_dispatch (context=<optimized out>) at
+>>> > gmain.c:3054
+>>> >
+>>> > #2  g_main_context_dispatch (context=<optimized out>,
+>>> > address@hidden) at gmain.c:3630
+>>> >
+>>> > #3  0x00007fdccb8a6dcc in glib_pollfds_poll () at util/main-loop.c:213
+>>> >
+>>> > #4  os_host_main_loop_wait (timeout=<optimized out>) at
+>>> > util/main-loop.c:258
+>>> >
+>>> > #5  main_loop_wait (address@hidden) at
+>>> > util/main-loop.c:506
+>>> >
+>>> > #6  0x00007fdccb526187 in main_loop () at vl.c:1898
+>>> >
+>>> > #7  main (argc=<optimized out>, argv=<optimized out>, envp=<optimized
+>>> > out>) at vl.c:4709
+>>> >
+>>> > (gdb) p ioc->features
+>>> >
+>>> > $1 = 6
+>>> >
+>>> > (gdb) p ioc->name
+>>> >
+>>> > $2 = 0x7fdcce1b1ab0 "migration-socket-listener"
+>>> >
+>>> >
+>>> > May be socket_accept_incoming_migration should
+>>> > call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN)??
+>>> >
+>>> >
+>>> > thank you.
+>>> >
+>>> >
+>>> >
+>>> >
+>>> >
+>>> > 原始邮件
+>>> > address@hidden
+>>> > address@hidden
+>>> > address@hidden@huawei.com>
+>>> > *日 期 :*2017年03月16日 14:46
+>>> > *主 题 :**Re: [Qemu-devel] COLO failover hang*
+>>> >
+>>> >
+>>> >
+>>> >
+>>> > On 03/15/2017 05:06 PM, wangguang wrote:
+>>> > >   am testing QEMU COLO feature described here [QEMU
+>>> > > Wiki](
+http://wiki.qemu-project.org/Features/COLO
+).
+>>> > >
+>>> > > When the Primary Node panic,the Secondary Node qemu hang.
+>>> > > hang at recvmsg in qio_channel_socket_readv.
+>>> > > And  I run  { 'execute': 'nbd-server-stop' } and { "execute":
+>>> > > "x-colo-lost-heartbeat" } in Secondary VM's
+>>> > > monitor,the  Secondary Node qemu still hang at recvmsg .
+>>> > >
+>>> > > I found that the colo in qemu is not complete yet.
+>>> > > Do the colo have any plan for development?
+>>> >
+>>> > Yes, We are developing. You can see some of patch we pushing.
+>>> >
+>>> > > Has anyone ever run it successfully? Any help is appreciated!
+>>> >
+>>> > In our internal version can run it successfully,
+>>> > The failover detail you can ask Zhanghailiang for help.
+>>> > Next time if you have some question about COLO,
+>>> > please cc me and zhanghailiang address@hidden
+>>> >
+>>> >
+>>> > Thanks
+>>> > Zhang Chen
+>>> >
+>>> >
+>>> > >
+>>> > >
+>>> > >
+>>> > > centos7.2+qemu2.7.50
+>>> > > (gdb) bt
+>>> > > #0  0x00007f3e00cc86ad in recvmsg () from /lib64/libpthread.so.0
+>>> > > #1  0x00007f3e0332b738 in qio_channel_socket_readv (ioc=<optimized out>,
+>>> > > iov=<optimized out>, niov=<optimized out>, fds=0x0, nfds=0x0, errp=0x0) 
+at
+>>> > > io/channel-socket.c:497
+>>> > > #2  0x00007f3e03329472 in qio_channel_read (address@hidden,
+>>> > > address@hidden "", address@hidden,
+>>> > > address@hidden) at io/channel.c:97
+>>> > > #3  0x00007f3e032750e0 in channel_get_buffer (opaque=<optimized out>,
+>>> > > buf=0x7f3e05910f38 "", pos=<optimized out>, size=32768) at
+>>> > > migration/qemu-file-channel.c:78
+>>> > > #4  0x00007f3e0327412c in qemu_fill_buffer (f=0x7f3e05910f00) at
+>>> > > migration/qemu-file.c:257
+>>> > > #5  0x00007f3e03274a41 in qemu_peek_byte (address@hidden,
+>>> > > address@hidden) at migration/qemu-file.c:510
+>>> > > #6  0x00007f3e03274aab in qemu_get_byte (address@hidden) at
+>>> > > migration/qemu-file.c:523
+>>> > > #7  0x00007f3e03274cb2 in qemu_get_be32 (address@hidden) at
+>>> > > migration/qemu-file.c:603
+>>> > > #8  0x00007f3e03271735 in colo_receive_message (f=0x7f3e05910f00,
+>>> > > address@hidden) at migration/colo.c:215
+>>> > > #9  0x00007f3e0327250d in colo_wait_handle_message (errp=0x7f3d62bfaa48,
+>>> > > checkpoint_request=<synthetic pointer>, f=<optimized out>) at
+>>> > > migration/colo.c:546
+>>> > > #10 colo_process_incoming_thread (opaque=0x7f3e067245e0) at
+>>> > > migration/colo.c:649
+>>> > > #11 0x00007f3e00cc1df3 in start_thread () from /lib64/libpthread.so.0
+>>> > > #12 0x00007f3dfc9c03ed in clone () from /lib64/libc..so.6
+>>> > >
+>>> > >
+>>> > >
+>>> > >
+>>> > >
+>>> > > --
+>>> > > View this message in context:
+http://qemu.11.n7.nabble.com/COLO-failover-hang-tp473250.html
+>>> > > Sent from the Developer mailing list archive at Nabble.com.
+>>> > >
+>>> > >
+>>> > >
+>>> > >
+>>> >
+>>> > --
+>>> > Thanks
+>>> > Zhang Chen
+>>> >
+>>> >
+>>> >
+>>> >
+>>> >
+>>>
+>>
+> --
+> Dr. David Alan Gilbert / address@hidden / Manchester, UK
+>
+> .
+>
+
diff --git a/results/classifier/zero-shot/016/x86/92957605 b/results/classifier/zero-shot/016/x86/92957605
new file mode 100644
index 000000000..ca0e14db3
--- /dev/null
+++ b/results/classifier/zero-shot/016/x86/92957605
@@ -0,0 +1,445 @@
+x86: 0.807
+debug: 0.518
+TCG: 0.317
+i386: 0.087
+files: 0.084
+PID: 0.043
+virtual: 0.037
+operating system: 0.036
+hypervisor: 0.031
+register: 0.026
+device: 0.024
+network: 0.021
+ppc: 0.021
+VMM: 0.017
+arm: 0.015
+socket: 0.013
+boot: 0.013
+alpha: 0.012
+semantic: 0.012
+risc-v: 0.011
+vnc: 0.007
+user-level: 0.006
+kernel: 0.006
+assembly: 0.006
+peripherals: 0.005
+performance: 0.004
+architecture: 0.004
+graphic: 0.003
+KVM: 0.002
+permissions: 0.001
+mistranslation: 0.001
+
+[Qemu-devel] Fwd:  [BUG] Failed to compile using gcc7.1
+
+Hi all,
+I encountered the same problem on gcc 7.1.1 and found Qu's mail in
+this list from google search.
+
+Temporarily fix it by specifying the string length in snprintf
+directive. Hope this is helpful to other people encountered the same
+problem.
+
+@@ -1,9 +1,7 @@
+---
+--- a/block/blkdebug.c
+-                 "blkdebug:%s:%s", s->config_file ?: "",
+--- a/block/blkverify.c
+-                 "blkverify:%s:%s",
+--- a/hw/usb/bus.c
+-        snprintf(downstream->path, sizeof(downstream->path), "%s.%d",
+-        snprintf(downstream->path, sizeof(downstream->path), "%d", portnr);
+--
++++ b/block/blkdebug.c
++                 "blkdebug:%.2037s:%.2037s", s->config_file ?: "",
++++ b/block/blkverify.c
++                 "blkverify:%.2038s:%.2038s",
++++ b/hw/usb/bus.c
++        snprintf(downstream->path, sizeof(downstream->path), "%.12s.%d",
++        snprintf(downstream->path, sizeof(downstream->path), "%.12d", portnr);
+
+Tsung-en Hsiao
+
+>
+Qu Wenruo Wrote:
+>
+>
+Hi all,
+>
+>
+After upgrading gcc from 6.3.1 to 7.1.1, qemu can't be compiled with gcc.
+>
+>
+The error is:
+>
+>
+------
+>
+CC      block/blkdebug.o
+>
+block/blkdebug.c: In function 'blkdebug_refresh_filename':
+>
+>
+block/blkdebug.c:693:31: error: '%s' directive output may be truncated
+>
+writing up to 4095 bytes into a region of size 4086
+>
+[-Werror=format-truncation=]
+>
+>
+"blkdebug:%s:%s", s->config_file ?: "",
+>
+^~
+>
+In file included from /usr/include/stdio.h:939:0,
+>
+from /home/adam/qemu/include/qemu/osdep.h:68,
+>
+from block/blkdebug.c:25:
+>
+>
+/usr/include/bits/stdio2.h:64:10: note: '__builtin___snprintf_chk' output 11
+>
+or more bytes (assuming 4106) into a destination of size 4096
+>
+>
+return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1,
+>
+^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+>
+__bos (__s), __fmt, __va_arg_pack ());
+>
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+>
+cc1: all warnings being treated as errors
+>
+make: *** [/home/adam/qemu/rules.mak:69: block/blkdebug.o] Error 1
+>
+------
+>
+>
+It seems that gcc 7 is introducing more restrict check for printf.
+>
+>
+If using clang, although there are some extra warning, it can at least pass
+>
+the compile.
+>
+>
+Thanks,
+>
+Qu
+
+Hi Tsung-en,
+
+On 06/11/2017 04:08 PM, Tsung-en Hsiao wrote:
+Hi all,
+I encountered the same problem on gcc 7.1.1 and found Qu's mail in
+this list from google search.
+
+Temporarily fix it by specifying the string length in snprintf
+directive. Hope this is helpful to other people encountered the same
+problem.
+Thank your for sharing this.
+@@ -1,9 +1,7 @@
+---
+--- a/block/blkdebug.c
+-                 "blkdebug:%s:%s", s->config_file ?: "",
+--- a/block/blkverify.c
+-                 "blkverify:%s:%s",
+--- a/hw/usb/bus.c
+-        snprintf(downstream->path, sizeof(downstream->path), "%s.%d",
+-        snprintf(downstream->path, sizeof(downstream->path), "%d", portnr);
+--
++++ b/block/blkdebug.c
++                 "blkdebug:%.2037s:%.2037s", s->config_file ?: "",
+It is a rather funny way to silent this warning :) Truncating the
+filename until it fits.
+However I don't think it is the correct way since there is indeed an
+overflow of bs->exact_filename.
+Apparently exact_filename from "block/block_int.h" is defined to hold a
+pathname:
+char exact_filename[PATH_MAX];
+but is used for more than that (for example in blkdebug.c it might use
+until 10+2*PATH_MAX chars).
+I suppose it started as a buffer to hold a pathname then more block
+drivers were added and this buffer ended used differently.
+If it is a multi-purpose buffer one safer option might be to declare it
+as a GString* and use g_string_printf().
+I CC'ed the block folks to have their feedback.
+
+Regards,
+
+Phil.
++++ b/block/blkverify.c
++                 "blkverify:%.2038s:%.2038s",
++++ b/hw/usb/bus.c
++        snprintf(downstream->path, sizeof(downstream->path), "%.12s.%d",
++        snprintf(downstream->path, sizeof(downstream->path), "%.12d", portnr);
+
+Tsung-en Hsiao
+Qu Wenruo Wrote:
+
+Hi all,
+
+After upgrading gcc from 6.3.1 to 7.1.1, qemu can't be compiled with gcc.
+
+The error is:
+
+------
+ CC      block/blkdebug.o
+block/blkdebug.c: In function 'blkdebug_refresh_filename':
+
+block/blkdebug.c:693:31: error: '%s' directive output may be truncated writing 
+up to 4095 bytes into a region of size 4086 [-Werror=format-truncation=]
+
+                 "blkdebug:%s:%s", s->config_file ?: "",
+                              ^~
+In file included from /usr/include/stdio.h:939:0,
+                from /home/adam/qemu/include/qemu/osdep.h:68,
+                from block/blkdebug.c:25:
+
+/usr/include/bits/stdio2.h:64:10: note: '__builtin___snprintf_chk' output 11 or 
+more bytes (assuming 4106) into a destination of size 4096
+
+  return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1,
+         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       __bos (__s), __fmt, __va_arg_pack ());
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+cc1: all warnings being treated as errors
+make: *** [/home/adam/qemu/rules.mak:69: block/blkdebug.o] Error 1
+------
+
+It seems that gcc 7 is introducing more restrict check for printf.
+
+If using clang, although there are some extra warning, it can at least pass the 
+compile.
+
+Thanks,
+Qu
+
+On 2017-06-12 05:19, Philippe Mathieu-Daudé wrote:
+>
+Hi Tsung-en,
+>
+>
+On 06/11/2017 04:08 PM, Tsung-en Hsiao wrote:
+>
+> Hi all,
+>
+> I encountered the same problem on gcc 7.1.1 and found Qu's mail in
+>
+> this list from google search.
+>
+>
+>
+> Temporarily fix it by specifying the string length in snprintf
+>
+> directive. Hope this is helpful to other people encountered the same
+>
+> problem.
+>
+>
+Thank your for sharing this.
+>
+>
+>
+>
+> @@ -1,9 +1,7 @@
+>
+> ---
+>
+> --- a/block/blkdebug.c
+>
+> -                 "blkdebug:%s:%s", s->config_file ?: "",
+>
+> --- a/block/blkverify.c
+>
+> -                 "blkverify:%s:%s",
+>
+> --- a/hw/usb/bus.c
+>
+> -        snprintf(downstream->path, sizeof(downstream->path), "%s.%d",
+>
+> -        snprintf(downstream->path, sizeof(downstream->path), "%d",
+>
+> portnr);
+>
+> --
+>
+> +++ b/block/blkdebug.c
+>
+> +                 "blkdebug:%.2037s:%.2037s", s->config_file ?: "",
+>
+>
+It is a rather funny way to silent this warning :) Truncating the
+>
+filename until it fits.
+>
+>
+However I don't think it is the correct way since there is indeed an
+>
+overflow of bs->exact_filename.
+>
+>
+Apparently exact_filename from "block/block_int.h" is defined to hold a
+>
+pathname:
+>
+char exact_filename[PATH_MAX];
+>
+>
+but is used for more than that (for example in blkdebug.c it might use
+>
+until 10+2*PATH_MAX chars).
+In any case, truncating the filenames will do just as much as truncating
+the result: You'll get an unusable filename.
+
+>
+I suppose it started as a buffer to hold a pathname then more block
+>
+drivers were added and this buffer ended used differently.
+>
+>
+If it is a multi-purpose buffer one safer option might be to declare it
+>
+as a GString* and use g_string_printf().
+What it is supposed to be now is just an information string we can print
+to the user, because strings are nicer than JSON objects. There are some
+commands that take a filename for identifying a block node, but I dream
+we can get rid of them in 3.0...
+
+The right solution is to remove it altogether and have a
+"char *bdrv_filename(BlockDriverState *bs)" function (which generates
+the filename every time it's called). I've been working on this for some
+years now, actually, but it was never pressing enough to get it finished
+(so I never had enough time).
+
+What we can do in the meantime is to not generate a plain filename if it
+won't fit into bs->exact_filename.
+
+(The easiest way to do this probably would be to truncate
+bs->exact_filename back to an empty string if snprintf() returns a value
+greater than or equal to the length of bs->exact_filename.)
+
+What to do about hw/usb/bus.c I don't know (I guess the best solution
+would be to ignore the warning, but I don't suppose that is going to work).
+
+Max
+
+>
+>
+I CC'ed the block folks to have their feedback.
+>
+>
+Regards,
+>
+>
+Phil.
+>
+>
+> +++ b/block/blkverify.c
+>
+> +                 "blkverify:%.2038s:%.2038s",
+>
+> +++ b/hw/usb/bus.c
+>
+> +        snprintf(downstream->path, sizeof(downstream->path), "%.12s.%d",
+>
+> +        snprintf(downstream->path, sizeof(downstream->path), "%.12d",
+>
+> portnr);
+>
+>
+>
+> Tsung-en Hsiao
+>
+>
+>
+>> Qu Wenruo Wrote:
+>
+>>
+>
+>> Hi all,
+>
+>>
+>
+>> After upgrading gcc from 6.3.1 to 7.1.1, qemu can't be compiled with
+>
+>> gcc.
+>
+>>
+>
+>> The error is:
+>
+>>
+>
+>> ------
+>
+>>  CC      block/blkdebug.o
+>
+>> block/blkdebug.c: In function 'blkdebug_refresh_filename':
+>
+>>
+>
+>> block/blkdebug.c:693:31: error: '%s' directive output may be
+>
+>> truncated writing up to 4095 bytes into a region of size 4086
+>
+>> [-Werror=format-truncation=]
+>
+>>
+>
+>>                  "blkdebug:%s:%s", s->config_file ?: "",
+>
+>>                               ^~
+>
+>> In file included from /usr/include/stdio.h:939:0,
+>
+>>                 from /home/adam/qemu/include/qemu/osdep.h:68,
+>
+>>                 from block/blkdebug.c:25:
+>
+>>
+>
+>> /usr/include/bits/stdio2.h:64:10: note: '__builtin___snprintf_chk'
+>
+>> output 11 or more bytes (assuming 4106) into a destination of size 4096
+>
+>>
+>
+>>   return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1,
+>
+>>          ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+>
+>>        __bos (__s), __fmt, __va_arg_pack ());
+>
+>>        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+>
+>> cc1: all warnings being treated as errors
+>
+>> make: *** [/home/adam/qemu/rules.mak:69: block/blkdebug.o] Error 1
+>
+>> ------
+>
+>>
+>
+>> It seems that gcc 7 is introducing more restrict check for printf.
+>
+>>
+>
+>> If using clang, although there are some extra warning, it can at
+>
+>> least pass the compile.
+>
+>>
+>
+>> Thanks,
+>
+>> Qu
+>
+>
+signature.asc
+Description:
+OpenPGP digital signature
+
diff --git a/results/classifier/zero-shot/016/x86/99674399 b/results/classifier/zero-shot/016/x86/99674399
new file mode 100644
index 000000000..ac24cb491
--- /dev/null
+++ b/results/classifier/zero-shot/016/x86/99674399
@@ -0,0 +1,175 @@
+x86: 0.986
+i386: 0.928
+debug: 0.850
+kernel: 0.321
+operating system: 0.243
+KVM: 0.215
+files: 0.132
+TCG: 0.114
+hypervisor: 0.102
+performance: 0.099
+assembly: 0.080
+virtual: 0.062
+PID: 0.059
+register: 0.048
+semantic: 0.029
+risc-v: 0.017
+boot: 0.016
+user-level: 0.014
+socket: 0.011
+architecture: 0.010
+VMM: 0.010
+device: 0.010
+vnc: 0.007
+alpha: 0.004
+graphic: 0.003
+ppc: 0.002
+network: 0.002
+peripherals: 0.002
+permissions: 0.001
+arm: 0.001
+mistranslation: 0.000
+
+[BUG] qemu crashes on assertion in cpu_asidx_from_attrs when cpu is in smm mode
+
+Hi all!
+
+First, I see this issue:
+https://gitlab.com/qemu-project/qemu/-/issues/1198
+. 
+where some kvm/hardware failure leads to guest crash, and finally to this 
+assertion:
+
+   cpu_asidx_from_attrs: Assertion `ret < cpu->num_ases && ret >= 0' failed.
+
+But in the ticket the talk is about the guest crash and fixing the kernel, not 
+about the final QEMU assertion (which definitely show that something should be 
+fixed in QEMU code too).
+
+
+We've faced same stack one time:
+
+(gdb) bt
+#0  raise () from /lib/x86_64-linux-gnu/libc.so.6
+#1  abort () from /lib/x86_64-linux-gnu/libc.so.6
+#2  ?? () from /lib/x86_64-linux-gnu/libc.so.6
+#3  __assert_fail () from /lib/x86_64-linux-gnu/libc.so.6
+#4  cpu_asidx_from_attrs  at ../hw/core/cpu-sysemu.c:76
+#5  cpu_memory_rw_debug  at ../softmmu/physmem.c:3529
+#6  x86_cpu_dump_state  at ../target/i386/cpu-dump.c:560
+#7  kvm_cpu_exec  at ../accel/kvm/kvm-all.c:3000
+#8  kvm_vcpu_thread_fn  at ../accel/kvm/kvm-accel-ops.c:51
+#9  qemu_thread_start  at ../util/qemu-thread-posix.c:505
+#10 start_thread () from /lib/x86_64-linux-gnu/libpthread.so.0
+#11 clone () from /lib/x86_64-linux-gnu/libc.so.6
+
+
+And what I see:
+
+static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+    return !!attrs.secure;
+}
+
+int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
+{
+    int ret = 0;
+
+    if (cpu->cc->sysemu_ops->asidx_from_attrs) {
+        ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
+        assert(ret < cpu->num_ases && ret >= 0);         <<<<<<<<<<<<<<<<<
+    }
+    return ret;
+}
+
+(gdb) p cpu->num_ases
+$3 = 1
+
+(gdb) fr 5
+#5  0x00005578c8814ba3 in cpu_memory_rw_debug (cpu=c...
+(gdb) p attrs
+$6 = {unspecified = 0, secure = 1, user = 0, memory = 0, requester_id = 0, 
+byte_swap = 0, target_tlb_bit0 = 0, target_tlb_bit1 = 0, target_tlb_bit2 = 0}
+
+so .secure is 1, therefore ret is 1, in the same time num_ases is 1 too and 
+assertion fails.
+
+
+
+Where is .secure from?
+
+static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
+{
+    return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
+}
+
+Ok, it means we in SMM mode.
+
+
+
+On the other hand, it seems that num_ases seems to be always 1 for x86:
+
+vsementsov@vsementsov-lin:~/work/src/qemu/yc-7.2$ git grep 'num_ases = '
+cpu.c:    cpu->num_ases = 0;
+softmmu/cpus.c:        cpu->num_ases = 1;
+target/arm/cpu.c:        cs->num_ases = 3 + has_secure;
+target/arm/cpu.c:        cs->num_ases = 1 + has_secure;
+target/i386/tcg/sysemu/tcg-cpu.c:    cs->num_ases = 2;
+
+
+So, something is wrong around cpu->num_ases and x86_asidx_from_attrs() which 
+may return more in SMM mode.
+
+
+The stack starts in
+//7  0x00005578c882f539 in kvm_cpu_exec (cpu=cpu@entry=0x5578ca2eb340) at 
+../accel/kvm/kvm-all.c:3000
+    if (ret < 0) {
+        cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
+        vm_stop(RUN_STATE_INTERNAL_ERROR);
+    }
+
+So that was some kvm error, and we decided to call cpu_dump_state(). And it 
+crashes. cpu_dump_state() is also called from hmp_info_registers, so I can 
+reproduce the crash with a tiny patch to master (as only CPU_DUMP_CODE path 
+calls cpu_memory_rw_debug(), as it is in kvm_cpu_exec()):
+
+diff --git a/monitor/hmp-cmds-target.c b/monitor/hmp-cmds-target.c
+index ff01cf9d8d..dcf0189048 100644
+--- a/monitor/hmp-cmds-target.c
++++ b/monitor/hmp-cmds-target.c
+@@ -116,7 +116,7 @@ void hmp_info_registers(Monitor *mon, const QDict *qdict)
+         }
+
+         monitor_printf(mon, "\nCPU#%d\n", cs->cpu_index);
+-        cpu_dump_state(cs, NULL, CPU_DUMP_FPU);
++        cpu_dump_state(cs, NULL, CPU_DUMP_CODE);
+     }
+ }
+
+
+Than run
+
+yes "info registers" | ./build/qemu-system-x86_64 -accel kvm -monitor stdio \
+   -global driver=cfi.pflash01,property=secure,value=on \
+   -blockdev "{'driver': 'file', 'filename': 
+'/usr/share/OVMF/OVMF_CODE_4M.secboot.fd', 'node-name': 'ovmf-code', 'read-only': 
+true}" \
+   -blockdev "{'driver': 'file', 'filename': '/usr/share/OVMF/OVMF_VARS_4M.fd', 
+'node-name': 'ovmf-vars', 'read-only': true}" \
+   -machine q35,smm=on,pflash0=ovmf-code,pflash1=ovmf-vars -m 2G -nodefaults
+
+And after some time (less than 20 seconds for me) it leads to
+
+qemu-system-x86_64: ../hw/core/cpu-sysemu.c:76: cpu_asidx_from_attrs: Assertion `ret < 
+cpu->num_ases && ret >= 0' failed.
+Aborted (core dumped)
+
+
+I've no idea how to correctly fix this bug, but I hope that my reproducer and 
+investigation will help a bit.
+
+--
+Best regards,
+Vladimir
+