summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--accel/tcg/translate-all.c6
-rw-r--r--[-rwxr-xr-x]block/blkreplay.c0
-rw-r--r--block/vmdk.c21
-rw-r--r--docs/COLO-FT.txt2
-rw-r--r--docs/interop/vhost-user.txt4
-rw-r--r--docs/replay.txt2
-rw-r--r--hmp.c13
-rw-r--r--hw/core/machine.c3
-rw-r--r--hw/core/qdev.c2
-rw-r--r--include/hw/qdev-core.h5
-rw-r--r--include/qemu/qht.h26
-rw-r--r--linux-user/Makefile.objs2
-rw-r--r--linux-user/elfload.c10
-rw-r--r--linux-user/fd-trans.c1409
-rw-r--r--linux-user/fd-trans.h97
-rw-r--r--linux-user/syscall.c1519
-rw-r--r--linux-user/syscall_defs.h5
-rw-r--r--migration/migration.c17
-rw-r--r--migration/ram.c133
-rw-r--r--migration/ram.h1
-rw-r--r--migration/rdma.c2
-rw-r--r--migration/savevm.c7
-rw-r--r--nbd/server.c84
-rw-r--r--net/net.c4
-rw-r--r--net/slirp.c2
-rw-r--r--[-rwxr-xr-x]pc-bios/hppa-firmware.imgbin215936 -> 215936 bytes
-rw-r--r--[-rwxr-xr-x]pc-bios/palcode-clipperbin152680 -> 152680 bytes
-rw-r--r--[-rwxr-xr-x]pc-bios/u-boot-sam460-20100605.binbin524288 -> 524288 bytes
-rw-r--r--[-rwxr-xr-x]pc-bios/u-boot.e500bin388672 -> 388672 bytes
-rw-r--r--qapi/block-core.json2
-rw-r--r--qapi/migration.json26
-rw-r--r--qemu-deprecated.texi17
-rw-r--r--qemu-seccomp.c19
-rw-r--r--[-rwxr-xr-x]replay/replay-char.c0
-rw-r--r--tcg/i386/tcg-target.inc.c4
-rw-r--r--tests/Makefile.include22
-rw-r--r--tests/migration-test.c36
-rw-r--r--tests/migration/Makefile35
-rw-r--r--tests/migration/i386/Makefile22
-rw-r--r--tests/migration/i386/a-b-bootblock.S (renamed from tests/migration/x86-a-b-bootblock.s)4
-rw-r--r--tests/migration/i386/a-b-bootblock.h (renamed from tests/migration/x86-a-b-bootblock.h)8
-rw-r--r--tests/migration/migration-test.h21
-rwxr-xr-xtests/migration/rebuild-x86-bootblock.sh33
-rw-r--r--tests/qht-bench.c26
-rw-r--r--tests/test-qht.c93
-rwxr-xr-xtests/vm/basevm.py3
-rw-r--r--util/aio-posix.c88
-rw-r--r--util/memfd.c1
-rw-r--r--util/qht.c138
-rw-r--r--util/qsp.c11
-rw-r--r--util/trace-events4
-rw-r--r--vl.c5
53 files changed, 2302 insertions, 1694 deletions
diff --git a/Makefile b/Makefile
index 7bb6675f4a..3730092817 100644
--- a/Makefile
+++ b/Makefile
@@ -980,7 +980,7 @@ txt: qemu-doc.txt docs/interop/qemu-qmp-ref.txt docs/interop/qemu-ga-ref.txt
 
 qemu-doc.html qemu-doc.info qemu-doc.pdf qemu-doc.txt: \
 	qemu-img.texi qemu-nbd.texi qemu-options.texi qemu-option-trace.texi \
-	qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
+	qemu-deprecated.texi qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
 	qemu-monitor-info.texi docs/qemu-block-drivers.texi \
 	docs/qemu-cpu-models.texi
 
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 898c3bb3d1..9ffbbc2fbd 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1282,8 +1282,7 @@ void tb_flush(CPUState *cpu)
  */
 #ifdef CONFIG_USER_ONLY
 
-static void
-do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
+static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
 {
     TranslationBlock *tb = p;
     target_ulong addr = *(target_ulong *)userp;
@@ -1304,8 +1303,7 @@ static void tb_invalidate_check(target_ulong address)
     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
 }
 
-static void
-do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
+static void do_tb_page_check(void *p, uint32_t hash, void *userp)
 {
     TranslationBlock *tb = p;
     int flags1, flags2;
diff --git a/block/blkreplay.c b/block/blkreplay.c
index b5d9efdeca..b5d9efdeca 100755..100644
--- a/block/blkreplay.c
+++ b/block/blkreplay.c
diff --git a/block/vmdk.c b/block/vmdk.c
index a9d0084e36..2c9e86d98f 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -1698,6 +1698,27 @@ static int coroutine_fn
 vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
                            uint64_t bytes, QEMUIOVector *qiov)
 {
+    if (bytes == 0) {
+        /* The caller will write bytes 0 to signal EOF.
+         * When receive it, we align EOF to a sector boundary. */
+        BDRVVmdkState *s = bs->opaque;
+        int i, ret;
+        int64_t length;
+
+        for (i = 0; i < s->num_extents; i++) {
+            length = bdrv_getlength(s->extents[i].file->bs);
+            if (length < 0) {
+                return length;
+            }
+            length = QEMU_ALIGN_UP(length, BDRV_SECTOR_SIZE);
+            ret = bdrv_truncate(s->extents[i].file, length,
+                                PREALLOC_MODE_OFF, NULL);
+            if (ret < 0) {
+                return ret;
+            }
+        }
+        return 0;
+    }
     return vmdk_co_pwritev(bs, offset, bytes, qiov, 0);
 }
 
diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt
index d7c7dcda8f..70cfb9ce7d 100644
--- a/docs/COLO-FT.txt
+++ b/docs/COLO-FT.txt
@@ -104,7 +104,7 @@ Primary side.
 COLO Proxy:
 Delivers packets to Primary and Seconday, and then compare the responses from
 both side. Then decide whether to start a checkpoint according to some rules.
-Please refer to docs/colo-proxy.txt for more informations.
+Please refer to docs/colo-proxy.txt for more information.
 
 Note:
 HeartBeat has not been implemented yet, so you need to trigger failover process
diff --git a/docs/interop/vhost-user.txt b/docs/interop/vhost-user.txt
index f59667f498..c2194711d9 100644
--- a/docs/interop/vhost-user.txt
+++ b/docs/interop/vhost-user.txt
@@ -666,12 +666,12 @@ Master message types
       Equivalent ioctl: VHOST_SET_VRING_ENDIAN
       Master payload: vring state description
 
-      Set the endianess of a VQ for legacy devices. Little-endian is indicated
+      Set the endianness of a VQ for legacy devices. Little-endian is indicated
       with state.num set to 0 and big-endian is indicated with state.num set
       to 1. Other values are invalid.
       This request should be sent only when VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
       has been negotiated.
-      Backends that negotiated this feature should handle both endianesses
+      Backends that negotiated this feature should handle both endiannesses
       and expect this message once (per VQ) during device configuration
       (ie. before the master starts the VQ).
 
diff --git a/docs/replay.txt b/docs/replay.txt
index 2e21e9ccb0..3497585f5a 100644
--- a/docs/replay.txt
+++ b/docs/replay.txt
@@ -320,7 +320,7 @@ Here is the list of events that are written into the log:
    async event id from the following list:
      - REPLAY_ASYNC_EVENT_BH. Bottom-half callback. This event synchronizes
        callbacks that affect virtual machine state, but normally called
-       asyncronously.
+       asynchronously.
        Argument: 8-byte operation id.
      - REPLAY_ASYNC_EVENT_INPUT. Input device event. Contains
        parameters of keyboard and mouse input operations
diff --git a/hmp.c b/hmp.c
index 3a9f797677..61ef120423 100644
--- a/hmp.c
+++ b/hmp.c
@@ -271,6 +271,19 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
                        info->xbzrle_cache->overflow);
     }
 
+    if (info->has_compression) {
+        monitor_printf(mon, "compression pages: %" PRIu64 " pages\n",
+                       info->compression->pages);
+        monitor_printf(mon, "compression busy: %" PRIu64 "\n",
+                       info->compression->busy);
+        monitor_printf(mon, "compression busy rate: %0.2f\n",
+                       info->compression->busy_rate);
+        monitor_printf(mon, "compressed size: %" PRIu64 "\n",
+                       info->compression->compressed_size);
+        monitor_printf(mon, "compression rate: %0.2f\n",
+                       info->compression->compression_rate);
+    }
+
     if (info->has_cpu_throttle_percentage) {
         monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n",
                        info->cpu_throttle_percentage);
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 6b68e1218f..1987557833 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -324,6 +324,9 @@ static void machine_set_enforce_config_section(Object *obj, bool value,
 {
     MachineState *ms = MACHINE(obj);
 
+    warn_report("enforce-config-section is deprecated, please use "
+                "-global migration.send-configuration=on|off instead");
+
     ms->enforce_config_section = value;
 }
 
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 36b788a66b..046d8f1f76 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -643,7 +643,7 @@ static void qdev_get_legacy_property(Object *obj, Visitor *v,
  * the string depends on the property type.  Legacy properties are only
  * needed for "info qtree".
  *
- * Do not use this is new code!  QOM Properties added through this interface
+ * Do not use this in new code!  QOM Properties added through this interface
  * will be given names in the "legacy" namespace.
  */
 static void qdev_property_add_legacy(DeviceState *dev, Property *prop,
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index f1fd0f8736..a24d0dd566 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -51,8 +51,9 @@ struct VMStateDescription;
  * Devices are constructed in two stages,
  * 1) object instantiation via object_initialize() and
  * 2) device realization via #DeviceState:realized property.
- * The former may not fail (it might assert or exit), the latter may return
- * error information to the caller and must be re-entrant.
+ * The former may not fail (and must not abort or exit, since it is called
+ * during device introspection already), and the latter may return error
+ * information to the caller and must be re-entrant.
  * Trivial field initializations should go into #TypeInfo.instance_init.
  * Operations depending on @props static properties should go into @realize.
  * After successful realization, setting static properties will fail.
diff --git a/include/qemu/qht.h b/include/qemu/qht.h
index c9a11cc29a..758c7ac6c8 100644
--- a/include/qemu/qht.h
+++ b/include/qemu/qht.h
@@ -43,7 +43,8 @@ struct qht_stats {
 };
 
 typedef bool (*qht_lookup_func_t)(const void *obj, const void *userp);
-typedef void (*qht_iter_func_t)(struct qht *ht, void *p, uint32_t h, void *up);
+typedef void (*qht_iter_func_t)(void *p, uint32_t h, void *up);
+typedef bool (*qht_iter_bool_func_t)(void *p, uint32_t h, void *up);
 
 #define QHT_MODE_AUTO_RESIZE 0x1 /* auto-resize when heavily loaded */
 #define QHT_MODE_RAW_MUTEXES 0x2 /* bypass the profiler (QSP) */
@@ -103,7 +104,7 @@ bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing);
  * Returns the corresponding pointer when a match is found.
  * Returns NULL otherwise.
  */
-void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
+void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
                         qht_lookup_func_t func);
 
 /**
@@ -114,7 +115,7 @@ void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
  *
  * Calls qht_lookup_custom() using @ht's default comparison function.
  */
-void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash);
+void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash);
 
 /**
  * qht_remove - remove a pointer from the hash table
@@ -179,10 +180,27 @@ bool qht_resize(struct qht *ht, size_t n_elems);
  *
  * Each time it is called, user-provided @func is passed a pointer-hash pair,
  * plus @userp.
+ *
+ * Note: @ht cannot be accessed from @func
+ * See also: qht_iter_remove()
  */
 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp);
 
 /**
+ * qht_iter_remove - Iterate over a QHT, optionally removing entries
+ * @ht: QHT to be iterated over
+ * @func: function to be called for each entry in QHT
+ * @userp: additional pointer to be passed to @func
+ *
+ * Each time it is called, user-provided @func is passed a pointer-hash pair,
+ * plus @userp. If @func returns true, the pointer-hash pair is removed.
+ *
+ * Note: @ht cannot be accessed from @func
+ * See also: qht_iter()
+ */
+void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp);
+
+/**
  * qht_statistics_init - Gather statistics from a QHT
  * @ht: QHT to gather statistics from
  * @stats: pointer to a &struct qht_stats to be filled in
@@ -193,7 +211,7 @@ void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp);
  * When done with @stats, pass the struct to qht_statistics_destroy().
  * Failing to do this will leak memory.
  */
-void qht_statistics_init(struct qht *ht, struct qht_stats *stats);
+void qht_statistics_init(const struct qht *ht, struct qht_stats *stats);
 
 /**
  * qht_statistics_destroy - Destroy a &struct qht_stats
diff --git a/linux-user/Makefile.objs b/linux-user/Makefile.objs
index b5dfb71f25..769b8d8336 100644
--- a/linux-user/Makefile.objs
+++ b/linux-user/Makefile.objs
@@ -1,7 +1,7 @@
 obj-y = main.o syscall.o strace.o mmap.o signal.o \
 	elfload.o linuxload.o uaccess.o uname.o \
 	safe-syscall.o $(TARGET_ABI_DIR)/signal.o \
-        $(TARGET_ABI_DIR)/cpu_loop.o exit.o
+        $(TARGET_ABI_DIR)/cpu_loop.o exit.o fd-trans.o
 
 obj-$(TARGET_HAS_BFLT) += flatload.o
 obj-$(TARGET_I386) += vm86.o
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index e97c4cde49..10bca65b99 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1439,7 +1439,10 @@ struct exec
 #define QMAGIC 0314
 
 /* Necessary parameters */
-#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
+#define TARGET_ELF_EXEC_PAGESIZE \
+        (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
+         TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
+#define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
 #define TARGET_ELF_PAGESTART(_v) ((_v) & \
                                  ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
@@ -2281,7 +2284,7 @@ static void load_elf_image(const char *image_name, int image_fd,
     for (i = 0; i < ehdr->e_phnum; i++) {
         struct elf_phdr *eppnt = phdr + i;
         if (eppnt->p_type == PT_LOAD) {
-            abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
+            abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
             int elf_prot = 0;
 
             if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
@@ -2291,8 +2294,9 @@ static void load_elf_image(const char *image_name, int image_fd,
             vaddr = load_bias + eppnt->p_vaddr;
             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
+            vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
 
-            error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
+            error = target_mmap(vaddr_ps, vaddr_len,
                                 elf_prot, MAP_PRIVATE | MAP_FIXED,
                                 image_fd, eppnt->p_offset - vaddr_po);
             if (error == -1) {
diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c
new file mode 100644
index 0000000000..216b9f0614
--- /dev/null
+++ b/linux-user/fd-trans.c
@@ -0,0 +1,1409 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include <sys/signalfd.h>
+#include <linux/unistd.h>
+#include <linux/audit.h>
+#ifdef CONFIG_INOTIFY
+#include <sys/inotify.h>
+#endif
+#include <linux/netlink.h>
+#ifdef CONFIG_RTNETLINK
+#include <linux/rtnetlink.h>
+#include <linux/if_bridge.h>
+#endif
+#include "qemu.h"
+#include "fd-trans.h"
+
+enum {
+    QEMU_IFLA_BR_UNSPEC,
+    QEMU_IFLA_BR_FORWARD_DELAY,
+    QEMU_IFLA_BR_HELLO_TIME,
+    QEMU_IFLA_BR_MAX_AGE,
+    QEMU_IFLA_BR_AGEING_TIME,
+    QEMU_IFLA_BR_STP_STATE,
+    QEMU_IFLA_BR_PRIORITY,
+    QEMU_IFLA_BR_VLAN_FILTERING,
+    QEMU_IFLA_BR_VLAN_PROTOCOL,
+    QEMU_IFLA_BR_GROUP_FWD_MASK,
+    QEMU_IFLA_BR_ROOT_ID,
+    QEMU_IFLA_BR_BRIDGE_ID,
+    QEMU_IFLA_BR_ROOT_PORT,
+    QEMU_IFLA_BR_ROOT_PATH_COST,
+    QEMU_IFLA_BR_TOPOLOGY_CHANGE,
+    QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+    QEMU_IFLA_BR_HELLO_TIMER,
+    QEMU_IFLA_BR_TCN_TIMER,
+    QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
+    QEMU_IFLA_BR_GC_TIMER,
+    QEMU_IFLA_BR_GROUP_ADDR,
+    QEMU_IFLA_BR_FDB_FLUSH,
+    QEMU_IFLA_BR_MCAST_ROUTER,
+    QEMU_IFLA_BR_MCAST_SNOOPING,
+    QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
+    QEMU_IFLA_BR_MCAST_QUERIER,
+    QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
+    QEMU_IFLA_BR_MCAST_HASH_MAX,
+    QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
+    QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+    QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
+    QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
+    QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
+    QEMU_IFLA_BR_MCAST_QUERY_INTVL,
+    QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
+    QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
+    QEMU_IFLA_BR_NF_CALL_IPTABLES,
+    QEMU_IFLA_BR_NF_CALL_IP6TABLES,
+    QEMU_IFLA_BR_NF_CALL_ARPTABLES,
+    QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
+    QEMU_IFLA_BR_PAD,
+    QEMU_IFLA_BR_VLAN_STATS_ENABLED,
+    QEMU_IFLA_BR_MCAST_STATS_ENABLED,
+    QEMU_IFLA_BR_MCAST_IGMP_VERSION,
+    QEMU_IFLA_BR_MCAST_MLD_VERSION,
+    QEMU___IFLA_BR_MAX,
+};
+
+enum {
+    QEMU_IFLA_UNSPEC,
+    QEMU_IFLA_ADDRESS,
+    QEMU_IFLA_BROADCAST,
+    QEMU_IFLA_IFNAME,
+    QEMU_IFLA_MTU,
+    QEMU_IFLA_LINK,
+    QEMU_IFLA_QDISC,
+    QEMU_IFLA_STATS,
+    QEMU_IFLA_COST,
+    QEMU_IFLA_PRIORITY,
+    QEMU_IFLA_MASTER,
+    QEMU_IFLA_WIRELESS,
+    QEMU_IFLA_PROTINFO,
+    QEMU_IFLA_TXQLEN,
+    QEMU_IFLA_MAP,
+    QEMU_IFLA_WEIGHT,
+    QEMU_IFLA_OPERSTATE,
+    QEMU_IFLA_LINKMODE,
+    QEMU_IFLA_LINKINFO,
+    QEMU_IFLA_NET_NS_PID,
+    QEMU_IFLA_IFALIAS,
+    QEMU_IFLA_NUM_VF,
+    QEMU_IFLA_VFINFO_LIST,
+    QEMU_IFLA_STATS64,
+    QEMU_IFLA_VF_PORTS,
+    QEMU_IFLA_PORT_SELF,
+    QEMU_IFLA_AF_SPEC,
+    QEMU_IFLA_GROUP,
+    QEMU_IFLA_NET_NS_FD,
+    QEMU_IFLA_EXT_MASK,
+    QEMU_IFLA_PROMISCUITY,
+    QEMU_IFLA_NUM_TX_QUEUES,
+    QEMU_IFLA_NUM_RX_QUEUES,
+    QEMU_IFLA_CARRIER,
+    QEMU_IFLA_PHYS_PORT_ID,
+    QEMU_IFLA_CARRIER_CHANGES,
+    QEMU_IFLA_PHYS_SWITCH_ID,
+    QEMU_IFLA_LINK_NETNSID,
+    QEMU_IFLA_PHYS_PORT_NAME,
+    QEMU_IFLA_PROTO_DOWN,
+    QEMU_IFLA_GSO_MAX_SEGS,
+    QEMU_IFLA_GSO_MAX_SIZE,
+    QEMU_IFLA_PAD,
+    QEMU_IFLA_XDP,
+    QEMU_IFLA_EVENT,
+    QEMU_IFLA_NEW_NETNSID,
+    QEMU_IFLA_IF_NETNSID,
+    QEMU_IFLA_CARRIER_UP_COUNT,
+    QEMU_IFLA_CARRIER_DOWN_COUNT,
+    QEMU_IFLA_NEW_IFINDEX,
+    QEMU___IFLA_MAX
+};
+
+enum {
+    QEMU_IFLA_BRPORT_UNSPEC,
+    QEMU_IFLA_BRPORT_STATE,
+    QEMU_IFLA_BRPORT_PRIORITY,
+    QEMU_IFLA_BRPORT_COST,
+    QEMU_IFLA_BRPORT_MODE,
+    QEMU_IFLA_BRPORT_GUARD,
+    QEMU_IFLA_BRPORT_PROTECT,
+    QEMU_IFLA_BRPORT_FAST_LEAVE,
+    QEMU_IFLA_BRPORT_LEARNING,
+    QEMU_IFLA_BRPORT_UNICAST_FLOOD,
+    QEMU_IFLA_BRPORT_PROXYARP,
+    QEMU_IFLA_BRPORT_LEARNING_SYNC,
+    QEMU_IFLA_BRPORT_PROXYARP_WIFI,
+    QEMU_IFLA_BRPORT_ROOT_ID,
+    QEMU_IFLA_BRPORT_BRIDGE_ID,
+    QEMU_IFLA_BRPORT_DESIGNATED_PORT,
+    QEMU_IFLA_BRPORT_DESIGNATED_COST,
+    QEMU_IFLA_BRPORT_ID,
+    QEMU_IFLA_BRPORT_NO,
+    QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+    QEMU_IFLA_BRPORT_CONFIG_PENDING,
+    QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
+    QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
+    QEMU_IFLA_BRPORT_HOLD_TIMER,
+    QEMU_IFLA_BRPORT_FLUSH,
+    QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
+    QEMU_IFLA_BRPORT_PAD,
+    QEMU_IFLA_BRPORT_MCAST_FLOOD,
+    QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
+    QEMU_IFLA_BRPORT_VLAN_TUNNEL,
+    QEMU_IFLA_BRPORT_BCAST_FLOOD,
+    QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
+    QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
+    QEMU___IFLA_BRPORT_MAX
+};
+
+enum {
+    QEMU_IFLA_TUN_UNSPEC,
+    QEMU_IFLA_TUN_OWNER,
+    QEMU_IFLA_TUN_GROUP,
+    QEMU_IFLA_TUN_TYPE,
+    QEMU_IFLA_TUN_PI,
+    QEMU_IFLA_TUN_VNET_HDR,
+    QEMU_IFLA_TUN_PERSIST,
+    QEMU_IFLA_TUN_MULTI_QUEUE,
+    QEMU_IFLA_TUN_NUM_QUEUES,
+    QEMU_IFLA_TUN_NUM_DISABLED_QUEUES,
+    QEMU___IFLA_TUN_MAX,
+};
+
+enum {
+    QEMU_IFLA_INFO_UNSPEC,
+    QEMU_IFLA_INFO_KIND,
+    QEMU_IFLA_INFO_DATA,
+    QEMU_IFLA_INFO_XSTATS,
+    QEMU_IFLA_INFO_SLAVE_KIND,
+    QEMU_IFLA_INFO_SLAVE_DATA,
+    QEMU___IFLA_INFO_MAX,
+};
+
+enum {
+    QEMU_IFLA_INET_UNSPEC,
+    QEMU_IFLA_INET_CONF,
+    QEMU___IFLA_INET_MAX,
+};
+
+enum {
+    QEMU_IFLA_INET6_UNSPEC,
+    QEMU_IFLA_INET6_FLAGS,
+    QEMU_IFLA_INET6_CONF,
+    QEMU_IFLA_INET6_STATS,
+    QEMU_IFLA_INET6_MCAST,
+    QEMU_IFLA_INET6_CACHEINFO,
+    QEMU_IFLA_INET6_ICMP6STATS,
+    QEMU_IFLA_INET6_TOKEN,
+    QEMU_IFLA_INET6_ADDR_GEN_MODE,
+    QEMU___IFLA_INET6_MAX
+};
+
+enum {
+    QEMU_IFLA_XDP_UNSPEC,
+    QEMU_IFLA_XDP_FD,
+    QEMU_IFLA_XDP_ATTACHED,
+    QEMU_IFLA_XDP_FLAGS,
+    QEMU_IFLA_XDP_PROG_ID,
+    QEMU___IFLA_XDP_MAX,
+};
+
+enum {
+    QEMU_RTA_UNSPEC,
+    QEMU_RTA_DST,
+    QEMU_RTA_SRC,
+    QEMU_RTA_IIF,
+    QEMU_RTA_OIF,
+    QEMU_RTA_GATEWAY,
+    QEMU_RTA_PRIORITY,
+    QEMU_RTA_PREFSRC,
+    QEMU_RTA_METRICS,
+    QEMU_RTA_MULTIPATH,
+    QEMU_RTA_PROTOINFO, /* no longer used */
+    QEMU_RTA_FLOW,
+    QEMU_RTA_CACHEINFO,
+    QEMU_RTA_SESSION, /* no longer used */
+    QEMU_RTA_MP_ALGO, /* no longer used */
+    QEMU_RTA_TABLE,
+    QEMU_RTA_MARK,
+    QEMU_RTA_MFC_STATS,
+    QEMU_RTA_VIA,
+    QEMU_RTA_NEWDST,
+    QEMU_RTA_PREF,
+    QEMU_RTA_ENCAP_TYPE,
+    QEMU_RTA_ENCAP,
+    QEMU_RTA_EXPIRES,
+    QEMU_RTA_PAD,
+    QEMU_RTA_UID,
+    QEMU_RTA_TTL_PROPAGATE,
+    QEMU_RTA_IP_PROTO,
+    QEMU_RTA_SPORT,
+    QEMU_RTA_DPORT,
+    QEMU___RTA_MAX
+};
+
+TargetFdTrans **target_fd_trans;
+unsigned int target_fd_max;
+
+static void tswap_nlmsghdr(struct nlmsghdr *nlh)
+{
+    nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
+    nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
+    nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
+    nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
+    nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
+}
+
+static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
+                                              size_t len,
+                                              abi_long (*host_to_target_nlmsg)
+                                                       (struct nlmsghdr *))
+{
+    uint32_t nlmsg_len;
+    abi_long ret;
+
+    while (len > sizeof(struct nlmsghdr)) {
+
+        nlmsg_len = nlh->nlmsg_len;
+        if (nlmsg_len < sizeof(struct nlmsghdr) ||
+            nlmsg_len > len) {
+            break;
+        }
+
+        switch (nlh->nlmsg_type) {
+        case NLMSG_DONE:
+            tswap_nlmsghdr(nlh);
+            return 0;
+        case NLMSG_NOOP:
+            break;
+        case NLMSG_ERROR:
+        {
+            struct nlmsgerr *e = NLMSG_DATA(nlh);
+            e->error = tswap32(e->error);
+            tswap_nlmsghdr(&e->msg);
+            tswap_nlmsghdr(nlh);
+            return 0;
+        }
+        default:
+            ret = host_to_target_nlmsg(nlh);
+            if (ret < 0) {
+                tswap_nlmsghdr(nlh);
+                return ret;
+            }
+            break;
+        }
+        tswap_nlmsghdr(nlh);
+        len -= NLMSG_ALIGN(nlmsg_len);
+        nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
+    }
+    return 0;
+}
+
+static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
+                                              size_t len,
+                                              abi_long (*target_to_host_nlmsg)
+                                                       (struct nlmsghdr *))
+{
+    int ret;
+
+    while (len > sizeof(struct nlmsghdr)) {
+        if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
+            tswap32(nlh->nlmsg_len) > len) {
+            break;
+        }
+        tswap_nlmsghdr(nlh);
+        switch (nlh->nlmsg_type) {
+        case NLMSG_DONE:
+            return 0;
+        case NLMSG_NOOP:
+            break;
+        case NLMSG_ERROR:
+        {
+            struct nlmsgerr *e = NLMSG_DATA(nlh);
+            e->error = tswap32(e->error);
+            tswap_nlmsghdr(&e->msg);
+            return 0;
+        }
+        default:
+            ret = target_to_host_nlmsg(nlh);
+            if (ret < 0) {
+                return ret;
+            }
+        }
+        len -= NLMSG_ALIGN(nlh->nlmsg_len);
+        nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
+    }
+    return 0;
+}
+
+#ifdef CONFIG_RTNETLINK
+static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
+                                               size_t len, void *context,
+                                               abi_long (*host_to_target_nlattr)
+                                                        (struct nlattr *,
+                                                         void *context))
+{
+    unsigned short nla_len;
+    abi_long ret;
+
+    while (len > sizeof(struct nlattr)) {
+        nla_len = nlattr->nla_len;
+        if (nla_len < sizeof(struct nlattr) ||
+            nla_len > len) {
+            break;
+        }
+        ret = host_to_target_nlattr(nlattr, context);
+        nlattr->nla_len = tswap16(nlattr->nla_len);
+        nlattr->nla_type = tswap16(nlattr->nla_type);
+        if (ret < 0) {
+            return ret;
+        }
+        len -= NLA_ALIGN(nla_len);
+        nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
+    }
+    return 0;
+}
+
+static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
+                                               size_t len,
+                                               abi_long (*host_to_target_rtattr)
+                                                        (struct rtattr *))
+{
+    unsigned short rta_len;
+    abi_long ret;
+
+    while (len > sizeof(struct rtattr)) {
+        rta_len = rtattr->rta_len;
+        if (rta_len < sizeof(struct rtattr) ||
+            rta_len > len) {
+            break;
+        }
+        ret = host_to_target_rtattr(rtattr);
+        rtattr->rta_len = tswap16(rtattr->rta_len);
+        rtattr->rta_type = tswap16(rtattr->rta_type);
+        if (ret < 0) {
+            return ret;
+        }
+        len -= RTA_ALIGN(rta_len);
+        rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
+    }
+    return 0;
+}
+
+#define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
+
+static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
+                                                  void *context)
+{
+    uint16_t *u16;
+    uint32_t *u32;
+    uint64_t *u64;
+
+    switch (nlattr->nla_type) {
+    /* no data */
+    case QEMU_IFLA_BR_FDB_FLUSH:
+        break;
+    /* binary */
+    case QEMU_IFLA_BR_GROUP_ADDR:
+        break;
+    /* uint8_t */
+    case QEMU_IFLA_BR_VLAN_FILTERING:
+    case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
+    case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
+    case QEMU_IFLA_BR_MCAST_ROUTER:
+    case QEMU_IFLA_BR_MCAST_SNOOPING:
+    case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
+    case QEMU_IFLA_BR_MCAST_QUERIER:
+    case QEMU_IFLA_BR_NF_CALL_IPTABLES:
+    case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
+    case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
+    case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
+    case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
+    case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
+    case QEMU_IFLA_BR_MCAST_MLD_VERSION:
+        break;
+    /* uint16_t */
+    case QEMU_IFLA_BR_PRIORITY:
+    case QEMU_IFLA_BR_VLAN_PROTOCOL:
+    case QEMU_IFLA_BR_GROUP_FWD_MASK:
+    case QEMU_IFLA_BR_ROOT_PORT:
+    case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
+        u16 = NLA_DATA(nlattr);
+        *u16 = tswap16(*u16);
+        break;
+    /* uint32_t */
+    case QEMU_IFLA_BR_FORWARD_DELAY:
+    case QEMU_IFLA_BR_HELLO_TIME:
+    case QEMU_IFLA_BR_MAX_AGE:
+    case QEMU_IFLA_BR_AGEING_TIME:
+    case QEMU_IFLA_BR_STP_STATE:
+    case QEMU_IFLA_BR_ROOT_PATH_COST:
+    case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
+    case QEMU_IFLA_BR_MCAST_HASH_MAX:
+    case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
+    case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
+        u32 = NLA_DATA(nlattr);
+        *u32 = tswap32(*u32);
+        break;
+    /* uint64_t */
+    case QEMU_IFLA_BR_HELLO_TIMER:
+    case QEMU_IFLA_BR_TCN_TIMER:
+    case QEMU_IFLA_BR_GC_TIMER:
+    case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
+    case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
+    case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
+    case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
+    case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
+    case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
+    case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
+        u64 = NLA_DATA(nlattr);
+        *u64 = tswap64(*u64);
+        break;
+    /* ifla_bridge_id: uin8_t[] */
+    case QEMU_IFLA_BR_ROOT_ID:
+    case QEMU_IFLA_BR_BRIDGE_ID:
+        break;
+    default:
+        gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
+                                                        void *context)
+{
+    uint16_t *u16;
+    uint32_t *u32;
+    uint64_t *u64;
+
+    switch (nlattr->nla_type) {
+    /* uint8_t */
+    case QEMU_IFLA_BRPORT_STATE:
+    case QEMU_IFLA_BRPORT_MODE:
+    case QEMU_IFLA_BRPORT_GUARD:
+    case QEMU_IFLA_BRPORT_PROTECT:
+    case QEMU_IFLA_BRPORT_FAST_LEAVE:
+    case QEMU_IFLA_BRPORT_LEARNING:
+    case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
+    case QEMU_IFLA_BRPORT_PROXYARP:
+    case QEMU_IFLA_BRPORT_LEARNING_SYNC:
+    case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
+    case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
+    case QEMU_IFLA_BRPORT_CONFIG_PENDING:
+    case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
+    case QEMU_IFLA_BRPORT_MCAST_FLOOD:
+    case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
+    case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
+    case QEMU_IFLA_BRPORT_BCAST_FLOOD:
+    case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
+        break;
+    /* uint16_t */
+    case QEMU_IFLA_BRPORT_PRIORITY:
+    case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
+    case QEMU_IFLA_BRPORT_DESIGNATED_COST:
+    case QEMU_IFLA_BRPORT_ID:
+    case QEMU_IFLA_BRPORT_NO:
+    case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
+        u16 = NLA_DATA(nlattr);
+        *u16 = tswap16(*u16);
+        break;
+    /* uin32_t */
+    case QEMU_IFLA_BRPORT_COST:
+        u32 = NLA_DATA(nlattr);
+        *u32 = tswap32(*u32);
+        break;
+    /* uint64_t */
+    case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
+    case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
+    case QEMU_IFLA_BRPORT_HOLD_TIMER:
+        u64 = NLA_DATA(nlattr);
+        *u64 = tswap64(*u64);
+        break;
+    /* ifla_bridge_id: uint8_t[] */
+    case QEMU_IFLA_BRPORT_ROOT_ID:
+    case QEMU_IFLA_BRPORT_BRIDGE_ID:
+        break;
+    default:
+        gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long host_to_target_data_tun_nlattr(struct nlattr *nlattr,
+                                                  void *context)
+{
+    uint32_t *u32;
+
+    switch (nlattr->nla_type) {
+    /* uint8_t */
+    case QEMU_IFLA_TUN_TYPE:
+    case QEMU_IFLA_TUN_PI:
+    case QEMU_IFLA_TUN_VNET_HDR:
+    case QEMU_IFLA_TUN_PERSIST:
+    case QEMU_IFLA_TUN_MULTI_QUEUE:
+        break;
+    /* uint32_t */
+    case QEMU_IFLA_TUN_NUM_QUEUES:
+    case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES:
+    case QEMU_IFLA_TUN_OWNER:
+    case QEMU_IFLA_TUN_GROUP:
+        u32 = NLA_DATA(nlattr);
+        *u32 = tswap32(*u32);
+        break;
+    default:
+        gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr->nla_type);
+        break;
+    }
+    return 0;
+}
+
+struct linkinfo_context {
+    int len;
+    char *name;
+    int slave_len;
+    char *slave_name;
+};
+
+static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
+                                                    void *context)
+{
+    struct linkinfo_context *li_context = context;
+
+    switch (nlattr->nla_type) {
+    /* string */
+    case QEMU_IFLA_INFO_KIND:
+        li_context->name = NLA_DATA(nlattr);
+        li_context->len = nlattr->nla_len - NLA_HDRLEN;
+        break;
+    case QEMU_IFLA_INFO_SLAVE_KIND:
+        li_context->slave_name = NLA_DATA(nlattr);
+        li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
+        break;
+    /* stats */
+    case QEMU_IFLA_INFO_XSTATS:
+        /* FIXME: only used by CAN */
+        break;
+    /* nested */
+    case QEMU_IFLA_INFO_DATA:
+        if (strncmp(li_context->name, "bridge",
+                    li_context->len) == 0) {
+            return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
+                                                  nlattr->nla_len,
+                                                  NULL,
+                                             host_to_target_data_bridge_nlattr);
+        } else if (strncmp(li_context->name, "tun",
+                    li_context->len) == 0) {
+            return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
+                                                  nlattr->nla_len,
+                                                  NULL,
+                                                host_to_target_data_tun_nlattr);
+        } else {
+            gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
+        }
+        break;
+    case QEMU_IFLA_INFO_SLAVE_DATA:
+        if (strncmp(li_context->slave_name, "bridge",
+                    li_context->slave_len) == 0) {
+            return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
+                                                  nlattr->nla_len,
+                                                  NULL,
+                                       host_to_target_slave_data_bridge_nlattr);
+        } else {
+            gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
+                     li_context->slave_name);
+        }
+        break;
+    default:
+        gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
+        break;
+    }
+
+    return 0;
+}
+
+static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
+                                                void *context)
+{
+    uint32_t *u32;
+    int i;
+
+    switch (nlattr->nla_type) {
+    case QEMU_IFLA_INET_CONF:
+        u32 = NLA_DATA(nlattr);
+        for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
+             i++) {
+            u32[i] = tswap32(u32[i]);
+        }
+        break;
+    default:
+        gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
+    }
+    return 0;
+}
+
+static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
+                                                void *context)
+{
+    uint32_t *u32;
+    uint64_t *u64;
+    struct ifla_cacheinfo *ci;
+    int i;
+
+    switch (nlattr->nla_type) {
+    /* binaries */
+    case QEMU_IFLA_INET6_TOKEN:
+        break;
+    /* uint8_t */
+    case QEMU_IFLA_INET6_ADDR_GEN_MODE:
+        break;
+    /* uint32_t */
+    case QEMU_IFLA_INET6_FLAGS:
+        u32 = NLA_DATA(nlattr);
+        *u32 = tswap32(*u32);
+        break;
+    /* uint32_t[] */
+    case QEMU_IFLA_INET6_CONF:
+        u32 = NLA_DATA(nlattr);
+        for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
+             i++) {
+            u32[i] = tswap32(u32[i]);
+        }
+        break;
+    /* ifla_cacheinfo */
+    case QEMU_IFLA_INET6_CACHEINFO:
+        ci = NLA_DATA(nlattr);
+        ci->max_reasm_len = tswap32(ci->max_reasm_len);
+        ci->tstamp = tswap32(ci->tstamp);
+        ci->reachable_time = tswap32(ci->reachable_time);
+        ci->retrans_time = tswap32(ci->retrans_time);
+        break;
+    /* uint64_t[] */
+    case QEMU_IFLA_INET6_STATS:
+    case QEMU_IFLA_INET6_ICMP6STATS:
+        u64 = NLA_DATA(nlattr);
+        for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
+             i++) {
+            u64[i] = tswap64(u64[i]);
+        }
+        break;
+    default:
+        gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
+    }
+    return 0;
+}
+
+static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
+                                                    void *context)
+{
+    switch (nlattr->nla_type) {
+    case AF_INET:
+        return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
+                                              NULL,
+                                             host_to_target_data_inet_nlattr);
+    case AF_INET6:
+        return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
+                                              NULL,
+                                             host_to_target_data_inet6_nlattr);
+    default:
+        gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
+                                               void *context)
+{
+    uint32_t *u32;
+
+    switch (nlattr->nla_type) {
+    /* uint8_t */
+    case QEMU_IFLA_XDP_ATTACHED:
+        break;
+    /* uint32_t */
+    case QEMU_IFLA_XDP_PROG_ID:
+        u32 = NLA_DATA(nlattr);
+        *u32 = tswap32(*u32);
+        break;
+    default:
+        gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
+{
+    uint32_t *u32;
+    struct rtnl_link_stats *st;
+    struct rtnl_link_stats64 *st64;
+    struct rtnl_link_ifmap *map;
+    struct linkinfo_context li_context;
+
+    switch (rtattr->rta_type) {
+    /* binary stream */
+    case QEMU_IFLA_ADDRESS:
+    case QEMU_IFLA_BROADCAST:
+    /* string */
+    case QEMU_IFLA_IFNAME:
+    case QEMU_IFLA_QDISC:
+        break;
+    /* uin8_t */
+    case QEMU_IFLA_OPERSTATE:
+    case QEMU_IFLA_LINKMODE:
+    case QEMU_IFLA_CARRIER:
+    case QEMU_IFLA_PROTO_DOWN:
+        break;
+    /* uint32_t */
+    case QEMU_IFLA_MTU:
+    case QEMU_IFLA_LINK:
+    case QEMU_IFLA_WEIGHT:
+    case QEMU_IFLA_TXQLEN:
+    case QEMU_IFLA_CARRIER_CHANGES:
+    case QEMU_IFLA_NUM_RX_QUEUES:
+    case QEMU_IFLA_NUM_TX_QUEUES:
+    case QEMU_IFLA_PROMISCUITY:
+    case QEMU_IFLA_EXT_MASK:
+    case QEMU_IFLA_LINK_NETNSID:
+    case QEMU_IFLA_GROUP:
+    case QEMU_IFLA_MASTER:
+    case QEMU_IFLA_NUM_VF:
+    case QEMU_IFLA_GSO_MAX_SEGS:
+    case QEMU_IFLA_GSO_MAX_SIZE:
+    case QEMU_IFLA_CARRIER_UP_COUNT:
+    case QEMU_IFLA_CARRIER_DOWN_COUNT:
+        u32 = RTA_DATA(rtattr);
+        *u32 = tswap32(*u32);
+        break;
+    /* struct rtnl_link_stats */
+    case QEMU_IFLA_STATS:
+        st = RTA_DATA(rtattr);
+        st->rx_packets = tswap32(st->rx_packets);
+        st->tx_packets = tswap32(st->tx_packets);
+        st->rx_bytes = tswap32(st->rx_bytes);
+        st->tx_bytes = tswap32(st->tx_bytes);
+        st->rx_errors = tswap32(st->rx_errors);
+        st->tx_errors = tswap32(st->tx_errors);
+        st->rx_dropped = tswap32(st->rx_dropped);
+        st->tx_dropped = tswap32(st->tx_dropped);
+        st->multicast = tswap32(st->multicast);
+        st->collisions = tswap32(st->collisions);
+
+        /* detailed rx_errors: */
+        st->rx_length_errors = tswap32(st->rx_length_errors);
+        st->rx_over_errors = tswap32(st->rx_over_errors);
+        st->rx_crc_errors = tswap32(st->rx_crc_errors);
+        st->rx_frame_errors = tswap32(st->rx_frame_errors);
+        st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
+        st->rx_missed_errors = tswap32(st->rx_missed_errors);
+
+        /* detailed tx_errors */
+        st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
+        st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
+        st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
+        st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
+        st->tx_window_errors = tswap32(st->tx_window_errors);
+
+        /* for cslip etc */
+        st->rx_compressed = tswap32(st->rx_compressed);
+        st->tx_compressed = tswap32(st->tx_compressed);
+        break;
+    /* struct rtnl_link_stats64 */
+    case QEMU_IFLA_STATS64:
+        st64 = RTA_DATA(rtattr);
+        st64->rx_packets = tswap64(st64->rx_packets);
+        st64->tx_packets = tswap64(st64->tx_packets);
+        st64->rx_bytes = tswap64(st64->rx_bytes);
+        st64->tx_bytes = tswap64(st64->tx_bytes);
+        st64->rx_errors = tswap64(st64->rx_errors);
+        st64->tx_errors = tswap64(st64->tx_errors);
+        st64->rx_dropped = tswap64(st64->rx_dropped);
+        st64->tx_dropped = tswap64(st64->tx_dropped);
+        st64->multicast = tswap64(st64->multicast);
+        st64->collisions = tswap64(st64->collisions);
+
+        /* detailed rx_errors: */
+        st64->rx_length_errors = tswap64(st64->rx_length_errors);
+        st64->rx_over_errors = tswap64(st64->rx_over_errors);
+        st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
+        st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
+        st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
+        st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
+
+        /* detailed tx_errors */
+        st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
+        st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
+        st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
+        st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
+        st64->tx_window_errors = tswap64(st64->tx_window_errors);
+
+        /* for cslip etc */
+        st64->rx_compressed = tswap64(st64->rx_compressed);
+        st64->tx_compressed = tswap64(st64->tx_compressed);
+        break;
+    /* struct rtnl_link_ifmap */
+    case QEMU_IFLA_MAP:
+        map = RTA_DATA(rtattr);
+        map->mem_start = tswap64(map->mem_start);
+        map->mem_end = tswap64(map->mem_end);
+        map->base_addr = tswap64(map->base_addr);
+        map->irq = tswap16(map->irq);
+        break;
+    /* nested */
+    case QEMU_IFLA_LINKINFO:
+        memset(&li_context, 0, sizeof(li_context));
+        return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+                                              &li_context,
+                                           host_to_target_data_linkinfo_nlattr);
+    case QEMU_IFLA_AF_SPEC:
+        return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+                                              NULL,
+                                             host_to_target_data_spec_nlattr);
+    case QEMU_IFLA_XDP:
+        return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+                                              NULL,
+                                                host_to_target_data_xdp_nlattr);
+    default:
+        gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
+{
+    uint32_t *u32;
+    struct ifa_cacheinfo *ci;
+
+    switch (rtattr->rta_type) {
+    /* binary: depends on family type */
+    case IFA_ADDRESS:
+    case IFA_LOCAL:
+        break;
+    /* string */
+    case IFA_LABEL:
+        break;
+    /* u32 */
+    case IFA_FLAGS:
+    case IFA_BROADCAST:
+        u32 = RTA_DATA(rtattr);
+        *u32 = tswap32(*u32);
+        break;
+    /* struct ifa_cacheinfo */
+    case IFA_CACHEINFO:
+        ci = RTA_DATA(rtattr);
+        ci->ifa_prefered = tswap32(ci->ifa_prefered);
+        ci->ifa_valid = tswap32(ci->ifa_valid);
+        ci->cstamp = tswap32(ci->cstamp);
+        ci->tstamp = tswap32(ci->tstamp);
+        break;
+    default:
+        gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
+{
+    uint32_t *u32;
+    struct rta_cacheinfo *ci;
+
+    switch (rtattr->rta_type) {
+    /* binary: depends on family type */
+    case QEMU_RTA_GATEWAY:
+    case QEMU_RTA_DST:
+    case QEMU_RTA_PREFSRC:
+        break;
+    /* u8 */
+    case QEMU_RTA_PREF:
+        break;
+    /* u32 */
+    case QEMU_RTA_PRIORITY:
+    case QEMU_RTA_TABLE:
+    case QEMU_RTA_OIF:
+        u32 = RTA_DATA(rtattr);
+        *u32 = tswap32(*u32);
+        break;
+    /* struct rta_cacheinfo */
+    case QEMU_RTA_CACHEINFO:
+        ci = RTA_DATA(rtattr);
+        ci->rta_clntref = tswap32(ci->rta_clntref);
+        ci->rta_lastuse = tswap32(ci->rta_lastuse);
+        ci->rta_expires = tswap32(ci->rta_expires);
+        ci->rta_error = tswap32(ci->rta_error);
+        ci->rta_used = tswap32(ci->rta_used);
+#if defined(RTNETLINK_HAVE_PEERINFO)
+        ci->rta_id = tswap32(ci->rta_id);
+        ci->rta_ts = tswap32(ci->rta_ts);
+        ci->rta_tsage = tswap32(ci->rta_tsage);
+#endif
+        break;
+    default:
+        gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
+                                         uint32_t rtattr_len)
+{
+    return host_to_target_for_each_rtattr(rtattr, rtattr_len,
+                                          host_to_target_data_link_rtattr);
+}
+
+static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
+                                         uint32_t rtattr_len)
+{
+    return host_to_target_for_each_rtattr(rtattr, rtattr_len,
+                                          host_to_target_data_addr_rtattr);
+}
+
+static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
+                                         uint32_t rtattr_len)
+{
+    return host_to_target_for_each_rtattr(rtattr, rtattr_len,
+                                          host_to_target_data_route_rtattr);
+}
+
+static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
+{
+    uint32_t nlmsg_len;
+    struct ifinfomsg *ifi;
+    struct ifaddrmsg *ifa;
+    struct rtmsg *rtm;
+
+    nlmsg_len = nlh->nlmsg_len;
+    switch (nlh->nlmsg_type) {
+    case RTM_NEWLINK:
+    case RTM_DELLINK:
+    case RTM_GETLINK:
+        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
+            ifi = NLMSG_DATA(nlh);
+            ifi->ifi_type = tswap16(ifi->ifi_type);
+            ifi->ifi_index = tswap32(ifi->ifi_index);
+            ifi->ifi_flags = tswap32(ifi->ifi_flags);
+            ifi->ifi_change = tswap32(ifi->ifi_change);
+            host_to_target_link_rtattr(IFLA_RTA(ifi),
+                                       nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
+        }
+        break;
+    case RTM_NEWADDR:
+    case RTM_DELADDR:
+    case RTM_GETADDR:
+        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
+            ifa = NLMSG_DATA(nlh);
+            ifa->ifa_index = tswap32(ifa->ifa_index);
+            host_to_target_addr_rtattr(IFA_RTA(ifa),
+                                       nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
+        }
+        break;
+    case RTM_NEWROUTE:
+    case RTM_DELROUTE:
+    case RTM_GETROUTE:
+        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
+            rtm = NLMSG_DATA(nlh);
+            rtm->rtm_flags = tswap32(rtm->rtm_flags);
+            host_to_target_route_rtattr(RTM_RTA(rtm),
+                                        nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
+        }
+        break;
+    default:
+        return -TARGET_EINVAL;
+    }
+    return 0;
+}
+
+static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
+                                                  size_t len)
+{
+    return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
+}
+
+static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
+                                               size_t len,
+                                               abi_long (*target_to_host_rtattr)
+                                                        (struct rtattr *))
+{
+    abi_long ret;
+
+    while (len >= sizeof(struct rtattr)) {
+        if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
+            tswap16(rtattr->rta_len) > len) {
+            break;
+        }
+        rtattr->rta_len = tswap16(rtattr->rta_len);
+        rtattr->rta_type = tswap16(rtattr->rta_type);
+        ret = target_to_host_rtattr(rtattr);
+        if (ret < 0) {
+            return ret;
+        }
+        len -= RTA_ALIGN(rtattr->rta_len);
+        rtattr = (struct rtattr *)(((char *)rtattr) +
+                 RTA_ALIGN(rtattr->rta_len));
+    }
+    return 0;
+}
+
+static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
+{
+    switch (rtattr->rta_type) {
+    default:
+        gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
+{
+    switch (rtattr->rta_type) {
+    /* binary: depends on family type */
+    case IFA_LOCAL:
+    case IFA_ADDRESS:
+        break;
+    default:
+        gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
+        break;
+    }
+    return 0;
+}
+
+static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
+{
+    uint32_t *u32;
+    switch (rtattr->rta_type) {
+    /* binary: depends on family type */
+    case QEMU_RTA_DST:
+    case QEMU_RTA_SRC:
+    case QEMU_RTA_GATEWAY:
+        break;
+    /* u32 */
+    case QEMU_RTA_PRIORITY:
+    case QEMU_RTA_OIF:
+        u32 = RTA_DATA(rtattr);
+        *u32 = tswap32(*u32);
+        break;
+    default:
+        gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
+        break;
+    }
+    return 0;
+}
+
+static void target_to_host_link_rtattr(struct rtattr *rtattr,
+                                       uint32_t rtattr_len)
+{
+    target_to_host_for_each_rtattr(rtattr, rtattr_len,
+                                   target_to_host_data_link_rtattr);
+}
+
+static void target_to_host_addr_rtattr(struct rtattr *rtattr,
+                                     uint32_t rtattr_len)
+{
+    target_to_host_for_each_rtattr(rtattr, rtattr_len,
+                                   target_to_host_data_addr_rtattr);
+}
+
+static void target_to_host_route_rtattr(struct rtattr *rtattr,
+                                     uint32_t rtattr_len)
+{
+    target_to_host_for_each_rtattr(rtattr, rtattr_len,
+                                   target_to_host_data_route_rtattr);
+}
+
+static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
+{
+    struct ifinfomsg *ifi;
+    struct ifaddrmsg *ifa;
+    struct rtmsg *rtm;
+
+    switch (nlh->nlmsg_type) {
+    case RTM_GETLINK:
+        break;
+    case RTM_NEWLINK:
+    case RTM_DELLINK:
+        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
+            ifi = NLMSG_DATA(nlh);
+            ifi->ifi_type = tswap16(ifi->ifi_type);
+            ifi->ifi_index = tswap32(ifi->ifi_index);
+            ifi->ifi_flags = tswap32(ifi->ifi_flags);
+            ifi->ifi_change = tswap32(ifi->ifi_change);
+            target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
+                                       NLMSG_LENGTH(sizeof(*ifi)));
+        }
+        break;
+    case RTM_GETADDR:
+    case RTM_NEWADDR:
+    case RTM_DELADDR:
+        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
+            ifa = NLMSG_DATA(nlh);
+            ifa->ifa_index = tswap32(ifa->ifa_index);
+            target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
+                                       NLMSG_LENGTH(sizeof(*ifa)));
+        }
+        break;
+    case RTM_GETROUTE:
+        break;
+    case RTM_NEWROUTE:
+    case RTM_DELROUTE:
+        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
+            rtm = NLMSG_DATA(nlh);
+            rtm->rtm_flags = tswap32(rtm->rtm_flags);
+            target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
+                                        NLMSG_LENGTH(sizeof(*rtm)));
+        }
+        break;
+    default:
+        return -TARGET_EOPNOTSUPP;
+    }
+    return 0;
+}
+
+static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
+{
+    return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
+}
+#endif /* CONFIG_RTNETLINK */
+
+static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
+{
+    switch (nlh->nlmsg_type) {
+    default:
+        gemu_log("Unknown host audit message type %d\n",
+                 nlh->nlmsg_type);
+        return -TARGET_EINVAL;
+    }
+    return 0;
+}
+
+static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
+                                                  size_t len)
+{
+    return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
+}
+
+static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
+{
+    switch (nlh->nlmsg_type) {
+    case AUDIT_USER:
+    case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
+    case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
+        break;
+    default:
+        gemu_log("Unknown target audit message type %d\n",
+                 nlh->nlmsg_type);
+        return -TARGET_EINVAL;
+    }
+
+    return 0;
+}
+
+static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
+{
+    return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
+}
+
+static abi_long packet_target_to_host_sockaddr(void *host_addr,
+                                               abi_ulong target_addr,
+                                               socklen_t len)
+{
+    struct sockaddr *addr = host_addr;
+    struct target_sockaddr *target_saddr;
+
+    target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
+    if (!target_saddr) {
+        return -TARGET_EFAULT;
+    }
+
+    memcpy(addr, target_saddr, len);
+    addr->sa_family = tswap16(target_saddr->sa_family);
+    /* spkt_protocol is big-endian */
+
+    unlock_user(target_saddr, target_addr, 0);
+    return 0;
+}
+
+TargetFdTrans target_packet_trans = {
+    .target_to_host_addr = packet_target_to_host_sockaddr,
+};
+
+#ifdef CONFIG_RTNETLINK
+static abi_long netlink_route_target_to_host(void *buf, size_t len)
+{
+    abi_long ret;
+
+    ret = target_to_host_nlmsg_route(buf, len);
+    if (ret < 0) {
+        return ret;
+    }
+
+    return len;
+}
+
+static abi_long netlink_route_host_to_target(void *buf, size_t len)
+{
+    abi_long ret;
+
+    ret = host_to_target_nlmsg_route(buf, len);
+    if (ret < 0) {
+        return ret;
+    }
+
+    return len;
+}
+
+TargetFdTrans target_netlink_route_trans = {
+    .target_to_host_data = netlink_route_target_to_host,
+    .host_to_target_data = netlink_route_host_to_target,
+};
+#endif /* CONFIG_RTNETLINK */
+
+static abi_long netlink_audit_target_to_host(void *buf, size_t len)
+{
+    abi_long ret;
+
+    ret = target_to_host_nlmsg_audit(buf, len);
+    if (ret < 0) {
+        return ret;
+    }
+
+    return len;
+}
+
+static abi_long netlink_audit_host_to_target(void *buf, size_t len)
+{
+    abi_long ret;
+
+    ret = host_to_target_nlmsg_audit(buf, len);
+    if (ret < 0) {
+        return ret;
+    }
+
+    return len;
+}
+
+TargetFdTrans target_netlink_audit_trans = {
+    .target_to_host_data = netlink_audit_target_to_host,
+    .host_to_target_data = netlink_audit_host_to_target,
+};
+
+/* signalfd siginfo conversion */
+
+static void
+host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
+                                const struct signalfd_siginfo *info)
+{
+    int sig = host_to_target_signal(info->ssi_signo);
+
+    /* linux/signalfd.h defines a ssi_addr_lsb
+     * not defined in sys/signalfd.h but used by some kernels
+     */
+
+#ifdef BUS_MCEERR_AO
+    if (tinfo->ssi_signo == SIGBUS &&
+        (tinfo->ssi_code == BUS_MCEERR_AR ||
+         tinfo->ssi_code == BUS_MCEERR_AO)) {
+        uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
+        uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
+        *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
+    }
+#endif
+
+    tinfo->ssi_signo = tswap32(sig);
+    tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
+    tinfo->ssi_code = tswap32(info->ssi_code);
+    tinfo->ssi_pid = tswap32(info->ssi_pid);
+    tinfo->ssi_uid = tswap32(info->ssi_uid);
+    tinfo->ssi_fd = tswap32(info->ssi_fd);
+    tinfo->ssi_tid = tswap32(info->ssi_tid);
+    tinfo->ssi_band = tswap32(info->ssi_band);
+    tinfo->ssi_overrun = tswap32(info->ssi_overrun);
+    tinfo->ssi_trapno = tswap32(info->ssi_trapno);
+    tinfo->ssi_status = tswap32(info->ssi_status);
+    tinfo->ssi_int = tswap32(info->ssi_int);
+    tinfo->ssi_ptr = tswap64(info->ssi_ptr);
+    tinfo->ssi_utime = tswap64(info->ssi_utime);
+    tinfo->ssi_stime = tswap64(info->ssi_stime);
+    tinfo->ssi_addr = tswap64(info->ssi_addr);
+}
+
+static abi_long host_to_target_data_signalfd(void *buf, size_t len)
+{
+    int i;
+
+    for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
+        host_to_target_signalfd_siginfo(buf + i, buf + i);
+    }
+
+    return len;
+}
+
+TargetFdTrans target_signalfd_trans = {
+    .host_to_target_data = host_to_target_data_signalfd,
+};
+
+static abi_long swap_data_eventfd(void *buf, size_t len)
+{
+    uint64_t *counter = buf;
+    int i;
+
+    if (len < sizeof(uint64_t)) {
+        return -EINVAL;
+    }
+
+    for (i = 0; i < len; i += sizeof(uint64_t)) {
+        *counter = tswap64(*counter);
+        counter++;
+    }
+
+    return len;
+}
+
+TargetFdTrans target_eventfd_trans = {
+    .host_to_target_data = swap_data_eventfd,
+    .target_to_host_data = swap_data_eventfd,
+};
+
+#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
+    (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
+     defined(__NR_inotify_init1))
+static abi_long host_to_target_data_inotify(void *buf, size_t len)
+{
+    struct inotify_event *ev;
+    int i;
+    uint32_t name_len;
+
+    for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
+        ev = (struct inotify_event *)((char *)buf + i);
+        name_len = ev->len;
+
+        ev->wd = tswap32(ev->wd);
+        ev->mask = tswap32(ev->mask);
+        ev->cookie = tswap32(ev->cookie);
+        ev->len = tswap32(name_len);
+    }
+
+    return len;
+}
+
+TargetFdTrans target_inotify_trans = {
+    .host_to_target_data = host_to_target_data_inotify,
+};
+#endif
diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h
new file mode 100644
index 0000000000..a3fcdaabc7
--- /dev/null
+++ b/linux-user/fd-trans.h
@@ -0,0 +1,97 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef FD_TRANS_H
+#define FD_TRANS_H
+
+typedef abi_long (*TargetFdDataFunc)(void *, size_t);
+typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
+typedef struct TargetFdTrans {
+    TargetFdDataFunc host_to_target_data;
+    TargetFdDataFunc target_to_host_data;
+    TargetFdAddrFunc target_to_host_addr;
+} TargetFdTrans;
+
+extern TargetFdTrans **target_fd_trans;
+
+extern unsigned int target_fd_max;
+
+static inline TargetFdDataFunc fd_trans_target_to_host_data(int fd)
+{
+    if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
+        return target_fd_trans[fd]->target_to_host_data;
+    }
+    return NULL;
+}
+
+static inline TargetFdDataFunc fd_trans_host_to_target_data(int fd)
+{
+    if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
+        return target_fd_trans[fd]->host_to_target_data;
+    }
+    return NULL;
+}
+
+static inline TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
+{
+    if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
+        return target_fd_trans[fd]->target_to_host_addr;
+    }
+    return NULL;
+}
+
+static inline void fd_trans_register(int fd, TargetFdTrans *trans)
+{
+    unsigned int oldmax;
+
+    if (fd >= target_fd_max) {
+        oldmax = target_fd_max;
+        target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
+        target_fd_trans = g_renew(TargetFdTrans *,
+                                  target_fd_trans, target_fd_max);
+        memset((void *)(target_fd_trans + oldmax), 0,
+               (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
+    }
+    target_fd_trans[fd] = trans;
+}
+
+static inline void fd_trans_unregister(int fd)
+{
+    if (fd >= 0 && fd < target_fd_max) {
+        target_fd_trans[fd] = NULL;
+    }
+}
+
+static inline void fd_trans_dup(int oldfd, int newfd)
+{
+    fd_trans_unregister(newfd);
+    if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
+        fd_trans_register(newfd, target_fd_trans[oldfd]);
+    }
+}
+
+extern TargetFdTrans target_packet_trans;
+#ifdef CONFIG_RTNETLINK
+extern TargetFdTrans target_netlink_route_trans;
+#endif
+extern TargetFdTrans target_netlink_audit_trans;
+extern TargetFdTrans target_signalfd_trans;
+extern TargetFdTrans target_eventfd_trans;
+#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
+    (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
+     defined(__NR_inotify_init1))
+extern TargetFdTrans target_inotify_trans;
+#endif
+#endif
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 850b72a0c7..ae3c0dfef7 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -102,15 +102,11 @@
 #include <linux/blkpg.h>
 #include <netpacket/packet.h>
 #include <linux/netlink.h>
-#ifdef CONFIG_RTNETLINK
-#include <linux/rtnetlink.h>
-#include <linux/if_bridge.h>
-#endif
-#include <linux/audit.h>
 #include "linux_loop.h"
 #include "uname.h"
 
 #include "qemu.h"
+#include "fd-trans.h"
 
 #ifndef CLONE_IO
 #define CLONE_IO                0x80000000      /* Clone io context */
@@ -360,298 +356,6 @@ static bitmask_transtbl fcntl_flags_tbl[] = {
   { 0, 0, 0, 0 }
 };
 
-enum {
-    QEMU_IFLA_BR_UNSPEC,
-    QEMU_IFLA_BR_FORWARD_DELAY,
-    QEMU_IFLA_BR_HELLO_TIME,
-    QEMU_IFLA_BR_MAX_AGE,
-    QEMU_IFLA_BR_AGEING_TIME,
-    QEMU_IFLA_BR_STP_STATE,
-    QEMU_IFLA_BR_PRIORITY,
-    QEMU_IFLA_BR_VLAN_FILTERING,
-    QEMU_IFLA_BR_VLAN_PROTOCOL,
-    QEMU_IFLA_BR_GROUP_FWD_MASK,
-    QEMU_IFLA_BR_ROOT_ID,
-    QEMU_IFLA_BR_BRIDGE_ID,
-    QEMU_IFLA_BR_ROOT_PORT,
-    QEMU_IFLA_BR_ROOT_PATH_COST,
-    QEMU_IFLA_BR_TOPOLOGY_CHANGE,
-    QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
-    QEMU_IFLA_BR_HELLO_TIMER,
-    QEMU_IFLA_BR_TCN_TIMER,
-    QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
-    QEMU_IFLA_BR_GC_TIMER,
-    QEMU_IFLA_BR_GROUP_ADDR,
-    QEMU_IFLA_BR_FDB_FLUSH,
-    QEMU_IFLA_BR_MCAST_ROUTER,
-    QEMU_IFLA_BR_MCAST_SNOOPING,
-    QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
-    QEMU_IFLA_BR_MCAST_QUERIER,
-    QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
-    QEMU_IFLA_BR_MCAST_HASH_MAX,
-    QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
-    QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
-    QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
-    QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
-    QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
-    QEMU_IFLA_BR_MCAST_QUERY_INTVL,
-    QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
-    QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
-    QEMU_IFLA_BR_NF_CALL_IPTABLES,
-    QEMU_IFLA_BR_NF_CALL_IP6TABLES,
-    QEMU_IFLA_BR_NF_CALL_ARPTABLES,
-    QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
-    QEMU_IFLA_BR_PAD,
-    QEMU_IFLA_BR_VLAN_STATS_ENABLED,
-    QEMU_IFLA_BR_MCAST_STATS_ENABLED,
-    QEMU_IFLA_BR_MCAST_IGMP_VERSION,
-    QEMU_IFLA_BR_MCAST_MLD_VERSION,
-    QEMU___IFLA_BR_MAX,
-};
-
-enum {
-    QEMU_IFLA_UNSPEC,
-    QEMU_IFLA_ADDRESS,
-    QEMU_IFLA_BROADCAST,
-    QEMU_IFLA_IFNAME,
-    QEMU_IFLA_MTU,
-    QEMU_IFLA_LINK,
-    QEMU_IFLA_QDISC,
-    QEMU_IFLA_STATS,
-    QEMU_IFLA_COST,
-    QEMU_IFLA_PRIORITY,
-    QEMU_IFLA_MASTER,
-    QEMU_IFLA_WIRELESS,
-    QEMU_IFLA_PROTINFO,
-    QEMU_IFLA_TXQLEN,
-    QEMU_IFLA_MAP,
-    QEMU_IFLA_WEIGHT,
-    QEMU_IFLA_OPERSTATE,
-    QEMU_IFLA_LINKMODE,
-    QEMU_IFLA_LINKINFO,
-    QEMU_IFLA_NET_NS_PID,
-    QEMU_IFLA_IFALIAS,
-    QEMU_IFLA_NUM_VF,
-    QEMU_IFLA_VFINFO_LIST,
-    QEMU_IFLA_STATS64,
-    QEMU_IFLA_VF_PORTS,
-    QEMU_IFLA_PORT_SELF,
-    QEMU_IFLA_AF_SPEC,
-    QEMU_IFLA_GROUP,
-    QEMU_IFLA_NET_NS_FD,
-    QEMU_IFLA_EXT_MASK,
-    QEMU_IFLA_PROMISCUITY,
-    QEMU_IFLA_NUM_TX_QUEUES,
-    QEMU_IFLA_NUM_RX_QUEUES,
-    QEMU_IFLA_CARRIER,
-    QEMU_IFLA_PHYS_PORT_ID,
-    QEMU_IFLA_CARRIER_CHANGES,
-    QEMU_IFLA_PHYS_SWITCH_ID,
-    QEMU_IFLA_LINK_NETNSID,
-    QEMU_IFLA_PHYS_PORT_NAME,
-    QEMU_IFLA_PROTO_DOWN,
-    QEMU_IFLA_GSO_MAX_SEGS,
-    QEMU_IFLA_GSO_MAX_SIZE,
-    QEMU_IFLA_PAD,
-    QEMU_IFLA_XDP,
-    QEMU_IFLA_EVENT,
-    QEMU_IFLA_NEW_NETNSID,
-    QEMU_IFLA_IF_NETNSID,
-    QEMU_IFLA_CARRIER_UP_COUNT,
-    QEMU_IFLA_CARRIER_DOWN_COUNT,
-    QEMU_IFLA_NEW_IFINDEX,
-    QEMU___IFLA_MAX
-};
-
-enum {
-    QEMU_IFLA_BRPORT_UNSPEC,
-    QEMU_IFLA_BRPORT_STATE,
-    QEMU_IFLA_BRPORT_PRIORITY,
-    QEMU_IFLA_BRPORT_COST,
-    QEMU_IFLA_BRPORT_MODE,
-    QEMU_IFLA_BRPORT_GUARD,
-    QEMU_IFLA_BRPORT_PROTECT,
-    QEMU_IFLA_BRPORT_FAST_LEAVE,
-    QEMU_IFLA_BRPORT_LEARNING,
-    QEMU_IFLA_BRPORT_UNICAST_FLOOD,
-    QEMU_IFLA_BRPORT_PROXYARP,
-    QEMU_IFLA_BRPORT_LEARNING_SYNC,
-    QEMU_IFLA_BRPORT_PROXYARP_WIFI,
-    QEMU_IFLA_BRPORT_ROOT_ID,
-    QEMU_IFLA_BRPORT_BRIDGE_ID,
-    QEMU_IFLA_BRPORT_DESIGNATED_PORT,
-    QEMU_IFLA_BRPORT_DESIGNATED_COST,
-    QEMU_IFLA_BRPORT_ID,
-    QEMU_IFLA_BRPORT_NO,
-    QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
-    QEMU_IFLA_BRPORT_CONFIG_PENDING,
-    QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
-    QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
-    QEMU_IFLA_BRPORT_HOLD_TIMER,
-    QEMU_IFLA_BRPORT_FLUSH,
-    QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
-    QEMU_IFLA_BRPORT_PAD,
-    QEMU_IFLA_BRPORT_MCAST_FLOOD,
-    QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
-    QEMU_IFLA_BRPORT_VLAN_TUNNEL,
-    QEMU_IFLA_BRPORT_BCAST_FLOOD,
-    QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
-    QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
-    QEMU___IFLA_BRPORT_MAX
-};
-
-enum {
-    QEMU_IFLA_TUN_UNSPEC,
-    QEMU_IFLA_TUN_OWNER,
-    QEMU_IFLA_TUN_GROUP,
-    QEMU_IFLA_TUN_TYPE,
-    QEMU_IFLA_TUN_PI,
-    QEMU_IFLA_TUN_VNET_HDR,
-    QEMU_IFLA_TUN_PERSIST,
-    QEMU_IFLA_TUN_MULTI_QUEUE,
-    QEMU_IFLA_TUN_NUM_QUEUES,
-    QEMU_IFLA_TUN_NUM_DISABLED_QUEUES,
-    QEMU___IFLA_TUN_MAX,
-};
-
-enum {
-    QEMU_IFLA_INFO_UNSPEC,
-    QEMU_IFLA_INFO_KIND,
-    QEMU_IFLA_INFO_DATA,
-    QEMU_IFLA_INFO_XSTATS,
-    QEMU_IFLA_INFO_SLAVE_KIND,
-    QEMU_IFLA_INFO_SLAVE_DATA,
-    QEMU___IFLA_INFO_MAX,
-};
-
-enum {
-    QEMU_IFLA_INET_UNSPEC,
-    QEMU_IFLA_INET_CONF,
-    QEMU___IFLA_INET_MAX,
-};
-
-enum {
-    QEMU_IFLA_INET6_UNSPEC,
-    QEMU_IFLA_INET6_FLAGS,
-    QEMU_IFLA_INET6_CONF,
-    QEMU_IFLA_INET6_STATS,
-    QEMU_IFLA_INET6_MCAST,
-    QEMU_IFLA_INET6_CACHEINFO,
-    QEMU_IFLA_INET6_ICMP6STATS,
-    QEMU_IFLA_INET6_TOKEN,
-    QEMU_IFLA_INET6_ADDR_GEN_MODE,
-    QEMU___IFLA_INET6_MAX
-};
-
-enum {
-    QEMU_IFLA_XDP_UNSPEC,
-    QEMU_IFLA_XDP_FD,
-    QEMU_IFLA_XDP_ATTACHED,
-    QEMU_IFLA_XDP_FLAGS,
-    QEMU_IFLA_XDP_PROG_ID,
-    QEMU___IFLA_XDP_MAX,
-};
-
-enum {
-    QEMU_RTA_UNSPEC,
-    QEMU_RTA_DST,
-    QEMU_RTA_SRC,
-    QEMU_RTA_IIF,
-    QEMU_RTA_OIF,
-    QEMU_RTA_GATEWAY,
-    QEMU_RTA_PRIORITY,
-    QEMU_RTA_PREFSRC,
-    QEMU_RTA_METRICS,
-    QEMU_RTA_MULTIPATH,
-    QEMU_RTA_PROTOINFO, /* no longer used */
-    QEMU_RTA_FLOW,
-    QEMU_RTA_CACHEINFO,
-    QEMU_RTA_SESSION, /* no longer used */
-    QEMU_RTA_MP_ALGO, /* no longer used */
-    QEMU_RTA_TABLE,
-    QEMU_RTA_MARK,
-    QEMU_RTA_MFC_STATS,
-    QEMU_RTA_VIA,
-    QEMU_RTA_NEWDST,
-    QEMU_RTA_PREF,
-    QEMU_RTA_ENCAP_TYPE,
-    QEMU_RTA_ENCAP,
-    QEMU_RTA_EXPIRES,
-    QEMU_RTA_PAD,
-    QEMU_RTA_UID,
-    QEMU_RTA_TTL_PROPAGATE,
-    QEMU_RTA_IP_PROTO,
-    QEMU_RTA_SPORT,
-    QEMU_RTA_DPORT,
-    QEMU___RTA_MAX
-};
-
-typedef abi_long (*TargetFdDataFunc)(void *, size_t);
-typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
-typedef struct TargetFdTrans {
-    TargetFdDataFunc host_to_target_data;
-    TargetFdDataFunc target_to_host_data;
-    TargetFdAddrFunc target_to_host_addr;
-} TargetFdTrans;
-
-static TargetFdTrans **target_fd_trans;
-
-static unsigned int target_fd_max;
-
-static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
-{
-    if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
-        return target_fd_trans[fd]->target_to_host_data;
-    }
-    return NULL;
-}
-
-static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
-{
-    if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
-        return target_fd_trans[fd]->host_to_target_data;
-    }
-    return NULL;
-}
-
-static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
-{
-    if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
-        return target_fd_trans[fd]->target_to_host_addr;
-    }
-    return NULL;
-}
-
-static void fd_trans_register(int fd, TargetFdTrans *trans)
-{
-    unsigned int oldmax;
-
-    if (fd >= target_fd_max) {
-        oldmax = target_fd_max;
-        target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
-        target_fd_trans = g_renew(TargetFdTrans *,
-                                  target_fd_trans, target_fd_max);
-        memset((void *)(target_fd_trans + oldmax), 0,
-               (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
-    }
-    target_fd_trans[fd] = trans;
-}
-
-static void fd_trans_unregister(int fd)
-{
-    if (fd >= 0 && fd < target_fd_max) {
-        target_fd_trans[fd] = NULL;
-    }
-}
-
-static void fd_trans_dup(int oldfd, int newfd)
-{
-    fd_trans_unregister(newfd);
-    if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
-        fd_trans_register(newfd, target_fd_trans[oldfd]);
-    }
-}
-
 static int sys_getcwd1(char *buf, size_t size)
 {
   if (getcwd(buf, size) == NULL) {
@@ -2076,968 +1780,6 @@ static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
     return 0;
 }
 
-static void tswap_nlmsghdr(struct nlmsghdr *nlh)
-{
-    nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
-    nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
-    nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
-    nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
-    nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
-}
-
-static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
-                                              size_t len,
-                                              abi_long (*host_to_target_nlmsg)
-                                                       (struct nlmsghdr *))
-{
-    uint32_t nlmsg_len;
-    abi_long ret;
-
-    while (len > sizeof(struct nlmsghdr)) {
-
-        nlmsg_len = nlh->nlmsg_len;
-        if (nlmsg_len < sizeof(struct nlmsghdr) ||
-            nlmsg_len > len) {
-            break;
-        }
-
-        switch (nlh->nlmsg_type) {
-        case NLMSG_DONE:
-            tswap_nlmsghdr(nlh);
-            return 0;
-        case NLMSG_NOOP:
-            break;
-        case NLMSG_ERROR:
-        {
-            struct nlmsgerr *e = NLMSG_DATA(nlh);
-            e->error = tswap32(e->error);
-            tswap_nlmsghdr(&e->msg);
-            tswap_nlmsghdr(nlh);
-            return 0;
-        }
-        default:
-            ret = host_to_target_nlmsg(nlh);
-            if (ret < 0) {
-                tswap_nlmsghdr(nlh);
-                return ret;
-            }
-            break;
-        }
-        tswap_nlmsghdr(nlh);
-        len -= NLMSG_ALIGN(nlmsg_len);
-        nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
-    }
-    return 0;
-}
-
-static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
-                                              size_t len,
-                                              abi_long (*target_to_host_nlmsg)
-                                                       (struct nlmsghdr *))
-{
-    int ret;
-
-    while (len > sizeof(struct nlmsghdr)) {
-        if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
-            tswap32(nlh->nlmsg_len) > len) {
-            break;
-        }
-        tswap_nlmsghdr(nlh);
-        switch (nlh->nlmsg_type) {
-        case NLMSG_DONE:
-            return 0;
-        case NLMSG_NOOP:
-            break;
-        case NLMSG_ERROR:
-        {
-            struct nlmsgerr *e = NLMSG_DATA(nlh);
-            e->error = tswap32(e->error);
-            tswap_nlmsghdr(&e->msg);
-            return 0;
-        }
-        default:
-            ret = target_to_host_nlmsg(nlh);
-            if (ret < 0) {
-                return ret;
-            }
-        }
-        len -= NLMSG_ALIGN(nlh->nlmsg_len);
-        nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
-    }
-    return 0;
-}
-
-#ifdef CONFIG_RTNETLINK
-static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
-                                               size_t len, void *context,
-                                               abi_long (*host_to_target_nlattr)
-                                                        (struct nlattr *,
-                                                         void *context))
-{
-    unsigned short nla_len;
-    abi_long ret;
-
-    while (len > sizeof(struct nlattr)) {
-        nla_len = nlattr->nla_len;
-        if (nla_len < sizeof(struct nlattr) ||
-            nla_len > len) {
-            break;
-        }
-        ret = host_to_target_nlattr(nlattr, context);
-        nlattr->nla_len = tswap16(nlattr->nla_len);
-        nlattr->nla_type = tswap16(nlattr->nla_type);
-        if (ret < 0) {
-            return ret;
-        }
-        len -= NLA_ALIGN(nla_len);
-        nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
-    }
-    return 0;
-}
-
-static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
-                                               size_t len,
-                                               abi_long (*host_to_target_rtattr)
-                                                        (struct rtattr *))
-{
-    unsigned short rta_len;
-    abi_long ret;
-
-    while (len > sizeof(struct rtattr)) {
-        rta_len = rtattr->rta_len;
-        if (rta_len < sizeof(struct rtattr) ||
-            rta_len > len) {
-            break;
-        }
-        ret = host_to_target_rtattr(rtattr);
-        rtattr->rta_len = tswap16(rtattr->rta_len);
-        rtattr->rta_type = tswap16(rtattr->rta_type);
-        if (ret < 0) {
-            return ret;
-        }
-        len -= RTA_ALIGN(rta_len);
-        rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
-    }
-    return 0;
-}
-
-#define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
-
-static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
-                                                  void *context)
-{
-    uint16_t *u16;
-    uint32_t *u32;
-    uint64_t *u64;
-
-    switch (nlattr->nla_type) {
-    /* no data */
-    case QEMU_IFLA_BR_FDB_FLUSH:
-        break;
-    /* binary */
-    case QEMU_IFLA_BR_GROUP_ADDR:
-        break;
-    /* uint8_t */
-    case QEMU_IFLA_BR_VLAN_FILTERING:
-    case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
-    case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
-    case QEMU_IFLA_BR_MCAST_ROUTER:
-    case QEMU_IFLA_BR_MCAST_SNOOPING:
-    case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
-    case QEMU_IFLA_BR_MCAST_QUERIER:
-    case QEMU_IFLA_BR_NF_CALL_IPTABLES:
-    case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
-    case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
-    case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
-    case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
-    case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
-    case QEMU_IFLA_BR_MCAST_MLD_VERSION:
-        break;
-    /* uint16_t */
-    case QEMU_IFLA_BR_PRIORITY:
-    case QEMU_IFLA_BR_VLAN_PROTOCOL:
-    case QEMU_IFLA_BR_GROUP_FWD_MASK:
-    case QEMU_IFLA_BR_ROOT_PORT:
-    case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
-        u16 = NLA_DATA(nlattr);
-        *u16 = tswap16(*u16);
-        break;
-    /* uint32_t */
-    case QEMU_IFLA_BR_FORWARD_DELAY:
-    case QEMU_IFLA_BR_HELLO_TIME:
-    case QEMU_IFLA_BR_MAX_AGE:
-    case QEMU_IFLA_BR_AGEING_TIME:
-    case QEMU_IFLA_BR_STP_STATE:
-    case QEMU_IFLA_BR_ROOT_PATH_COST:
-    case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
-    case QEMU_IFLA_BR_MCAST_HASH_MAX:
-    case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
-    case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
-        u32 = NLA_DATA(nlattr);
-        *u32 = tswap32(*u32);
-        break;
-    /* uint64_t */
-    case QEMU_IFLA_BR_HELLO_TIMER:
-    case QEMU_IFLA_BR_TCN_TIMER:
-    case QEMU_IFLA_BR_GC_TIMER:
-    case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
-    case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
-    case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
-    case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
-    case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
-    case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
-    case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
-        u64 = NLA_DATA(nlattr);
-        *u64 = tswap64(*u64);
-        break;
-    /* ifla_bridge_id: uin8_t[] */
-    case QEMU_IFLA_BR_ROOT_ID:
-    case QEMU_IFLA_BR_BRIDGE_ID:
-        break;
-    default:
-        gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
-                                                        void *context)
-{
-    uint16_t *u16;
-    uint32_t *u32;
-    uint64_t *u64;
-
-    switch (nlattr->nla_type) {
-    /* uint8_t */
-    case QEMU_IFLA_BRPORT_STATE:
-    case QEMU_IFLA_BRPORT_MODE:
-    case QEMU_IFLA_BRPORT_GUARD:
-    case QEMU_IFLA_BRPORT_PROTECT:
-    case QEMU_IFLA_BRPORT_FAST_LEAVE:
-    case QEMU_IFLA_BRPORT_LEARNING:
-    case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
-    case QEMU_IFLA_BRPORT_PROXYARP:
-    case QEMU_IFLA_BRPORT_LEARNING_SYNC:
-    case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
-    case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
-    case QEMU_IFLA_BRPORT_CONFIG_PENDING:
-    case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
-    case QEMU_IFLA_BRPORT_MCAST_FLOOD:
-    case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
-    case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
-    case QEMU_IFLA_BRPORT_BCAST_FLOOD:
-    case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
-        break;
-    /* uint16_t */
-    case QEMU_IFLA_BRPORT_PRIORITY:
-    case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
-    case QEMU_IFLA_BRPORT_DESIGNATED_COST:
-    case QEMU_IFLA_BRPORT_ID:
-    case QEMU_IFLA_BRPORT_NO:
-    case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
-        u16 = NLA_DATA(nlattr);
-        *u16 = tswap16(*u16);
-        break;
-    /* uin32_t */
-    case QEMU_IFLA_BRPORT_COST:
-        u32 = NLA_DATA(nlattr);
-        *u32 = tswap32(*u32);
-        break;
-    /* uint64_t */
-    case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
-    case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
-    case QEMU_IFLA_BRPORT_HOLD_TIMER:
-        u64 = NLA_DATA(nlattr);
-        *u64 = tswap64(*u64);
-        break;
-    /* ifla_bridge_id: uint8_t[] */
-    case QEMU_IFLA_BRPORT_ROOT_ID:
-    case QEMU_IFLA_BRPORT_BRIDGE_ID:
-        break;
-    default:
-        gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long host_to_target_data_tun_nlattr(struct nlattr *nlattr,
-                                                  void *context)
-{
-    uint32_t *u32;
-
-    switch (nlattr->nla_type) {
-    /* uint8_t */
-    case QEMU_IFLA_TUN_TYPE:
-    case QEMU_IFLA_TUN_PI:
-    case QEMU_IFLA_TUN_VNET_HDR:
-    case QEMU_IFLA_TUN_PERSIST:
-    case QEMU_IFLA_TUN_MULTI_QUEUE:
-        break;
-    /* uint32_t */
-    case QEMU_IFLA_TUN_NUM_QUEUES:
-    case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES:
-    case QEMU_IFLA_TUN_OWNER:
-    case QEMU_IFLA_TUN_GROUP:
-        u32 = NLA_DATA(nlattr);
-        *u32 = tswap32(*u32);
-        break;
-    default:
-        gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr->nla_type);
-        break;
-    }
-    return 0;
-}
-
-struct linkinfo_context {
-    int len;
-    char *name;
-    int slave_len;
-    char *slave_name;
-};
-
-static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
-                                                    void *context)
-{
-    struct linkinfo_context *li_context = context;
-
-    switch (nlattr->nla_type) {
-    /* string */
-    case QEMU_IFLA_INFO_KIND:
-        li_context->name = NLA_DATA(nlattr);
-        li_context->len = nlattr->nla_len - NLA_HDRLEN;
-        break;
-    case QEMU_IFLA_INFO_SLAVE_KIND:
-        li_context->slave_name = NLA_DATA(nlattr);
-        li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
-        break;
-    /* stats */
-    case QEMU_IFLA_INFO_XSTATS:
-        /* FIXME: only used by CAN */
-        break;
-    /* nested */
-    case QEMU_IFLA_INFO_DATA:
-        if (strncmp(li_context->name, "bridge",
-                    li_context->len) == 0) {
-            return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
-                                                  nlattr->nla_len,
-                                                  NULL,
-                                             host_to_target_data_bridge_nlattr);
-        } else if (strncmp(li_context->name, "tun",
-                    li_context->len) == 0) {
-            return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
-                                                  nlattr->nla_len,
-                                                  NULL,
-                                                host_to_target_data_tun_nlattr);
-        } else {
-            gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
-        }
-        break;
-    case QEMU_IFLA_INFO_SLAVE_DATA:
-        if (strncmp(li_context->slave_name, "bridge",
-                    li_context->slave_len) == 0) {
-            return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
-                                                  nlattr->nla_len,
-                                                  NULL,
-                                       host_to_target_slave_data_bridge_nlattr);
-        } else {
-            gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
-                     li_context->slave_name);
-        }
-        break;
-    default:
-        gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
-        break;
-    }
-
-    return 0;
-}
-
-static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
-                                                void *context)
-{
-    uint32_t *u32;
-    int i;
-
-    switch (nlattr->nla_type) {
-    case QEMU_IFLA_INET_CONF:
-        u32 = NLA_DATA(nlattr);
-        for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
-             i++) {
-            u32[i] = tswap32(u32[i]);
-        }
-        break;
-    default:
-        gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
-    }
-    return 0;
-}
-
-static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
-                                                void *context)
-{
-    uint32_t *u32;
-    uint64_t *u64;
-    struct ifla_cacheinfo *ci;
-    int i;
-
-    switch (nlattr->nla_type) {
-    /* binaries */
-    case QEMU_IFLA_INET6_TOKEN:
-        break;
-    /* uint8_t */
-    case QEMU_IFLA_INET6_ADDR_GEN_MODE:
-        break;
-    /* uint32_t */
-    case QEMU_IFLA_INET6_FLAGS:
-        u32 = NLA_DATA(nlattr);
-        *u32 = tswap32(*u32);
-        break;
-    /* uint32_t[] */
-    case QEMU_IFLA_INET6_CONF:
-        u32 = NLA_DATA(nlattr);
-        for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
-             i++) {
-            u32[i] = tswap32(u32[i]);
-        }
-        break;
-    /* ifla_cacheinfo */
-    case QEMU_IFLA_INET6_CACHEINFO:
-        ci = NLA_DATA(nlattr);
-        ci->max_reasm_len = tswap32(ci->max_reasm_len);
-        ci->tstamp = tswap32(ci->tstamp);
-        ci->reachable_time = tswap32(ci->reachable_time);
-        ci->retrans_time = tswap32(ci->retrans_time);
-        break;
-    /* uint64_t[] */
-    case QEMU_IFLA_INET6_STATS:
-    case QEMU_IFLA_INET6_ICMP6STATS:
-        u64 = NLA_DATA(nlattr);
-        for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
-             i++) {
-            u64[i] = tswap64(u64[i]);
-        }
-        break;
-    default:
-        gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
-    }
-    return 0;
-}
-
-static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
-                                                    void *context)
-{
-    switch (nlattr->nla_type) {
-    case AF_INET:
-        return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
-                                              NULL,
-                                             host_to_target_data_inet_nlattr);
-    case AF_INET6:
-        return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
-                                              NULL,
-                                             host_to_target_data_inet6_nlattr);
-    default:
-        gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
-                                               void *context)
-{
-    uint32_t *u32;
-
-    switch (nlattr->nla_type) {
-    /* uint8_t */
-    case QEMU_IFLA_XDP_ATTACHED:
-        break;
-    /* uint32_t */
-    case QEMU_IFLA_XDP_PROG_ID:
-        u32 = NLA_DATA(nlattr);
-        *u32 = tswap32(*u32);
-        break;
-    default:
-        gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
-{
-    uint32_t *u32;
-    struct rtnl_link_stats *st;
-    struct rtnl_link_stats64 *st64;
-    struct rtnl_link_ifmap *map;
-    struct linkinfo_context li_context;
-
-    switch (rtattr->rta_type) {
-    /* binary stream */
-    case QEMU_IFLA_ADDRESS:
-    case QEMU_IFLA_BROADCAST:
-    /* string */
-    case QEMU_IFLA_IFNAME:
-    case QEMU_IFLA_QDISC:
-        break;
-    /* uin8_t */
-    case QEMU_IFLA_OPERSTATE:
-    case QEMU_IFLA_LINKMODE:
-    case QEMU_IFLA_CARRIER:
-    case QEMU_IFLA_PROTO_DOWN:
-        break;
-    /* uint32_t */
-    case QEMU_IFLA_MTU:
-    case QEMU_IFLA_LINK:
-    case QEMU_IFLA_WEIGHT:
-    case QEMU_IFLA_TXQLEN:
-    case QEMU_IFLA_CARRIER_CHANGES:
-    case QEMU_IFLA_NUM_RX_QUEUES:
-    case QEMU_IFLA_NUM_TX_QUEUES:
-    case QEMU_IFLA_PROMISCUITY:
-    case QEMU_IFLA_EXT_MASK:
-    case QEMU_IFLA_LINK_NETNSID:
-    case QEMU_IFLA_GROUP:
-    case QEMU_IFLA_MASTER:
-    case QEMU_IFLA_NUM_VF:
-    case QEMU_IFLA_GSO_MAX_SEGS:
-    case QEMU_IFLA_GSO_MAX_SIZE:
-    case QEMU_IFLA_CARRIER_UP_COUNT:
-    case QEMU_IFLA_CARRIER_DOWN_COUNT:
-        u32 = RTA_DATA(rtattr);
-        *u32 = tswap32(*u32);
-        break;
-    /* struct rtnl_link_stats */
-    case QEMU_IFLA_STATS:
-        st = RTA_DATA(rtattr);
-        st->rx_packets = tswap32(st->rx_packets);
-        st->tx_packets = tswap32(st->tx_packets);
-        st->rx_bytes = tswap32(st->rx_bytes);
-        st->tx_bytes = tswap32(st->tx_bytes);
-        st->rx_errors = tswap32(st->rx_errors);
-        st->tx_errors = tswap32(st->tx_errors);
-        st->rx_dropped = tswap32(st->rx_dropped);
-        st->tx_dropped = tswap32(st->tx_dropped);
-        st->multicast = tswap32(st->multicast);
-        st->collisions = tswap32(st->collisions);
-
-        /* detailed rx_errors: */
-        st->rx_length_errors = tswap32(st->rx_length_errors);
-        st->rx_over_errors = tswap32(st->rx_over_errors);
-        st->rx_crc_errors = tswap32(st->rx_crc_errors);
-        st->rx_frame_errors = tswap32(st->rx_frame_errors);
-        st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
-        st->rx_missed_errors = tswap32(st->rx_missed_errors);
-
-        /* detailed tx_errors */
-        st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
-        st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
-        st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
-        st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
-        st->tx_window_errors = tswap32(st->tx_window_errors);
-
-        /* for cslip etc */
-        st->rx_compressed = tswap32(st->rx_compressed);
-        st->tx_compressed = tswap32(st->tx_compressed);
-        break;
-    /* struct rtnl_link_stats64 */
-    case QEMU_IFLA_STATS64:
-        st64 = RTA_DATA(rtattr);
-        st64->rx_packets = tswap64(st64->rx_packets);
-        st64->tx_packets = tswap64(st64->tx_packets);
-        st64->rx_bytes = tswap64(st64->rx_bytes);
-        st64->tx_bytes = tswap64(st64->tx_bytes);
-        st64->rx_errors = tswap64(st64->rx_errors);
-        st64->tx_errors = tswap64(st64->tx_errors);
-        st64->rx_dropped = tswap64(st64->rx_dropped);
-        st64->tx_dropped = tswap64(st64->tx_dropped);
-        st64->multicast = tswap64(st64->multicast);
-        st64->collisions = tswap64(st64->collisions);
-
-        /* detailed rx_errors: */
-        st64->rx_length_errors = tswap64(st64->rx_length_errors);
-        st64->rx_over_errors = tswap64(st64->rx_over_errors);
-        st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
-        st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
-        st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
-        st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
-
-        /* detailed tx_errors */
-        st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
-        st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
-        st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
-        st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
-        st64->tx_window_errors = tswap64(st64->tx_window_errors);
-
-        /* for cslip etc */
-        st64->rx_compressed = tswap64(st64->rx_compressed);
-        st64->tx_compressed = tswap64(st64->tx_compressed);
-        break;
-    /* struct rtnl_link_ifmap */
-    case QEMU_IFLA_MAP:
-        map = RTA_DATA(rtattr);
-        map->mem_start = tswap64(map->mem_start);
-        map->mem_end = tswap64(map->mem_end);
-        map->base_addr = tswap64(map->base_addr);
-        map->irq = tswap16(map->irq);
-        break;
-    /* nested */
-    case QEMU_IFLA_LINKINFO:
-        memset(&li_context, 0, sizeof(li_context));
-        return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
-                                              &li_context,
-                                           host_to_target_data_linkinfo_nlattr);
-    case QEMU_IFLA_AF_SPEC:
-        return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
-                                              NULL,
-                                             host_to_target_data_spec_nlattr);
-    case QEMU_IFLA_XDP:
-        return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
-                                              NULL,
-                                                host_to_target_data_xdp_nlattr);
-    default:
-        gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
-{
-    uint32_t *u32;
-    struct ifa_cacheinfo *ci;
-
-    switch (rtattr->rta_type) {
-    /* binary: depends on family type */
-    case IFA_ADDRESS:
-    case IFA_LOCAL:
-        break;
-    /* string */
-    case IFA_LABEL:
-        break;
-    /* u32 */
-    case IFA_FLAGS:
-    case IFA_BROADCAST:
-        u32 = RTA_DATA(rtattr);
-        *u32 = tswap32(*u32);
-        break;
-    /* struct ifa_cacheinfo */
-    case IFA_CACHEINFO:
-        ci = RTA_DATA(rtattr);
-        ci->ifa_prefered = tswap32(ci->ifa_prefered);
-        ci->ifa_valid = tswap32(ci->ifa_valid);
-        ci->cstamp = tswap32(ci->cstamp);
-        ci->tstamp = tswap32(ci->tstamp);
-        break;
-    default:
-        gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
-{
-    uint32_t *u32;
-    struct rta_cacheinfo *ci;
-
-    switch (rtattr->rta_type) {
-    /* binary: depends on family type */
-    case QEMU_RTA_GATEWAY:
-    case QEMU_RTA_DST:
-    case QEMU_RTA_PREFSRC:
-        break;
-    /* u8 */
-    case QEMU_RTA_PREF:
-        break;
-    /* u32 */
-    case QEMU_RTA_PRIORITY:
-    case QEMU_RTA_TABLE:
-    case QEMU_RTA_OIF:
-        u32 = RTA_DATA(rtattr);
-        *u32 = tswap32(*u32);
-        break;
-    /* struct rta_cacheinfo */
-    case QEMU_RTA_CACHEINFO:
-        ci = RTA_DATA(rtattr);
-        ci->rta_clntref = tswap32(ci->rta_clntref);
-        ci->rta_lastuse = tswap32(ci->rta_lastuse);
-        ci->rta_expires = tswap32(ci->rta_expires);
-        ci->rta_error = tswap32(ci->rta_error);
-        ci->rta_used = tswap32(ci->rta_used);
-#if defined(RTNETLINK_HAVE_PEERINFO)
-        ci->rta_id = tswap32(ci->rta_id);
-        ci->rta_ts = tswap32(ci->rta_ts);
-        ci->rta_tsage = tswap32(ci->rta_tsage);
-#endif
-        break;
-    default:
-        gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
-                                         uint32_t rtattr_len)
-{
-    return host_to_target_for_each_rtattr(rtattr, rtattr_len,
-                                          host_to_target_data_link_rtattr);
-}
-
-static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
-                                         uint32_t rtattr_len)
-{
-    return host_to_target_for_each_rtattr(rtattr, rtattr_len,
-                                          host_to_target_data_addr_rtattr);
-}
-
-static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
-                                         uint32_t rtattr_len)
-{
-    return host_to_target_for_each_rtattr(rtattr, rtattr_len,
-                                          host_to_target_data_route_rtattr);
-}
-
-static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
-{
-    uint32_t nlmsg_len;
-    struct ifinfomsg *ifi;
-    struct ifaddrmsg *ifa;
-    struct rtmsg *rtm;
-
-    nlmsg_len = nlh->nlmsg_len;
-    switch (nlh->nlmsg_type) {
-    case RTM_NEWLINK:
-    case RTM_DELLINK:
-    case RTM_GETLINK:
-        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
-            ifi = NLMSG_DATA(nlh);
-            ifi->ifi_type = tswap16(ifi->ifi_type);
-            ifi->ifi_index = tswap32(ifi->ifi_index);
-            ifi->ifi_flags = tswap32(ifi->ifi_flags);
-            ifi->ifi_change = tswap32(ifi->ifi_change);
-            host_to_target_link_rtattr(IFLA_RTA(ifi),
-                                       nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
-        }
-        break;
-    case RTM_NEWADDR:
-    case RTM_DELADDR:
-    case RTM_GETADDR:
-        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
-            ifa = NLMSG_DATA(nlh);
-            ifa->ifa_index = tswap32(ifa->ifa_index);
-            host_to_target_addr_rtattr(IFA_RTA(ifa),
-                                       nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
-        }
-        break;
-    case RTM_NEWROUTE:
-    case RTM_DELROUTE:
-    case RTM_GETROUTE:
-        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
-            rtm = NLMSG_DATA(nlh);
-            rtm->rtm_flags = tswap32(rtm->rtm_flags);
-            host_to_target_route_rtattr(RTM_RTA(rtm),
-                                        nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
-        }
-        break;
-    default:
-        return -TARGET_EINVAL;
-    }
-    return 0;
-}
-
-static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
-                                                  size_t len)
-{
-    return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
-}
-
-static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
-                                               size_t len,
-                                               abi_long (*target_to_host_rtattr)
-                                                        (struct rtattr *))
-{
-    abi_long ret;
-
-    while (len >= sizeof(struct rtattr)) {
-        if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
-            tswap16(rtattr->rta_len) > len) {
-            break;
-        }
-        rtattr->rta_len = tswap16(rtattr->rta_len);
-        rtattr->rta_type = tswap16(rtattr->rta_type);
-        ret = target_to_host_rtattr(rtattr);
-        if (ret < 0) {
-            return ret;
-        }
-        len -= RTA_ALIGN(rtattr->rta_len);
-        rtattr = (struct rtattr *)(((char *)rtattr) +
-                 RTA_ALIGN(rtattr->rta_len));
-    }
-    return 0;
-}
-
-static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
-{
-    switch (rtattr->rta_type) {
-    default:
-        gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
-{
-    switch (rtattr->rta_type) {
-    /* binary: depends on family type */
-    case IFA_LOCAL:
-    case IFA_ADDRESS:
-        break;
-    default:
-        gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
-        break;
-    }
-    return 0;
-}
-
-static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
-{
-    uint32_t *u32;
-    switch (rtattr->rta_type) {
-    /* binary: depends on family type */
-    case QEMU_RTA_DST:
-    case QEMU_RTA_SRC:
-    case QEMU_RTA_GATEWAY:
-        break;
-    /* u32 */
-    case QEMU_RTA_PRIORITY:
-    case QEMU_RTA_OIF:
-        u32 = RTA_DATA(rtattr);
-        *u32 = tswap32(*u32);
-        break;
-    default:
-        gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
-        break;
-    }
-    return 0;
-}
-
-static void target_to_host_link_rtattr(struct rtattr *rtattr,
-                                       uint32_t rtattr_len)
-{
-    target_to_host_for_each_rtattr(rtattr, rtattr_len,
-                                   target_to_host_data_link_rtattr);
-}
-
-static void target_to_host_addr_rtattr(struct rtattr *rtattr,
-                                     uint32_t rtattr_len)
-{
-    target_to_host_for_each_rtattr(rtattr, rtattr_len,
-                                   target_to_host_data_addr_rtattr);
-}
-
-static void target_to_host_route_rtattr(struct rtattr *rtattr,
-                                     uint32_t rtattr_len)
-{
-    target_to_host_for_each_rtattr(rtattr, rtattr_len,
-                                   target_to_host_data_route_rtattr);
-}
-
-static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
-{
-    struct ifinfomsg *ifi;
-    struct ifaddrmsg *ifa;
-    struct rtmsg *rtm;
-
-    switch (nlh->nlmsg_type) {
-    case RTM_GETLINK:
-        break;
-    case RTM_NEWLINK:
-    case RTM_DELLINK:
-        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
-            ifi = NLMSG_DATA(nlh);
-            ifi->ifi_type = tswap16(ifi->ifi_type);
-            ifi->ifi_index = tswap32(ifi->ifi_index);
-            ifi->ifi_flags = tswap32(ifi->ifi_flags);
-            ifi->ifi_change = tswap32(ifi->ifi_change);
-            target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
-                                       NLMSG_LENGTH(sizeof(*ifi)));
-        }
-        break;
-    case RTM_GETADDR:
-    case RTM_NEWADDR:
-    case RTM_DELADDR:
-        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
-            ifa = NLMSG_DATA(nlh);
-            ifa->ifa_index = tswap32(ifa->ifa_index);
-            target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
-                                       NLMSG_LENGTH(sizeof(*ifa)));
-        }
-        break;
-    case RTM_GETROUTE:
-        break;
-    case RTM_NEWROUTE:
-    case RTM_DELROUTE:
-        if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
-            rtm = NLMSG_DATA(nlh);
-            rtm->rtm_flags = tswap32(rtm->rtm_flags);
-            target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
-                                        NLMSG_LENGTH(sizeof(*rtm)));
-        }
-        break;
-    default:
-        return -TARGET_EOPNOTSUPP;
-    }
-    return 0;
-}
-
-static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
-{
-    return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
-}
-#endif /* CONFIG_RTNETLINK */
-
-static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
-{
-    switch (nlh->nlmsg_type) {
-    default:
-        gemu_log("Unknown host audit message type %d\n",
-                 nlh->nlmsg_type);
-        return -TARGET_EINVAL;
-    }
-    return 0;
-}
-
-static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
-                                                  size_t len)
-{
-    return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
-}
-
-static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
-{
-    switch (nlh->nlmsg_type) {
-    case AUDIT_USER:
-    case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
-    case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
-        break;
-    default:
-        gemu_log("Unknown target audit message type %d\n",
-                 nlh->nlmsg_type);
-        return -TARGET_EINVAL;
-    }
-
-    return 0;
-}
-
-static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
-{
-    return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
-}
-
 /* do_setsockopt() Must return target values and target errnos. */
 static abi_long do_setsockopt(int sockfd, int level, int optname,
                               abi_ulong optval_addr, socklen_t optlen)
@@ -3290,6 +2032,24 @@ set_timeout:
 		unlock_user (dev_ifname, optval_addr, 0);
 		return ret;
 	}
+        case TARGET_SO_LINGER:
+        {
+                struct linger lg;
+                struct target_linger *tlg;
+
+                if (optlen != sizeof(struct target_linger)) {
+                    return -TARGET_EINVAL;
+                }
+                if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
+                    return -TARGET_EFAULT;
+                }
+                __get_user(lg.l_onoff, &tlg->l_onoff);
+                __get_user(lg.l_linger, &tlg->l_linger);
+                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
+                                &lg, sizeof(lg)));
+                unlock_user_struct(tlg, optval_addr, 0);
+                return ret;
+        }
             /* Options with 'int' argument.  */
         case TARGET_SO_DEBUG:
 		optname = SO_DEBUG;
@@ -3381,7 +2141,6 @@ static abi_long do_getsockopt(int sockfd, int level, int optname,
         level = SOL_SOCKET;
         switch (optname) {
         /* These don't just return a single integer */
-        case TARGET_SO_LINGER:
         case TARGET_SO_RCVTIMEO:
         case TARGET_SO_SNDTIMEO:
         case TARGET_SO_PEERNAME:
@@ -3419,6 +2178,39 @@ static abi_long do_getsockopt(int sockfd, int level, int optname,
             }
             break;
         }
+        case TARGET_SO_LINGER:
+        {
+            struct linger lg;
+            socklen_t lglen;
+            struct target_linger *tlg;
+
+            if (get_user_u32(len, optlen)) {
+                return -TARGET_EFAULT;
+            }
+            if (len < 0) {
+                return -TARGET_EINVAL;
+            }
+
+            lglen = sizeof(lg);
+            ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
+                                       &lg, &lglen));
+            if (ret < 0) {
+                return ret;
+            }
+            if (len > lglen) {
+                len = lglen;
+            }
+            if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
+                return -TARGET_EFAULT;
+            }
+            __put_user(lg.l_onoff, &tlg->l_onoff);
+            __put_user(lg.l_linger, &tlg->l_linger);
+            unlock_user_struct(tlg, optval_addr, 1);
+            if (put_user_u32(len, optlen)) {
+                return -TARGET_EFAULT;
+            }
+            break;
+        }
         /* Options with 'int' argument.  */
         case TARGET_SO_DEBUG:
             optname = SO_DEBUG;
@@ -3733,90 +2525,6 @@ static int sock_flags_fixup(int fd, int target_type)
     return fd;
 }
 
-static abi_long packet_target_to_host_sockaddr(void *host_addr,
-                                               abi_ulong target_addr,
-                                               socklen_t len)
-{
-    struct sockaddr *addr = host_addr;
-    struct target_sockaddr *target_saddr;
-
-    target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
-    if (!target_saddr) {
-        return -TARGET_EFAULT;
-    }
-
-    memcpy(addr, target_saddr, len);
-    addr->sa_family = tswap16(target_saddr->sa_family);
-    /* spkt_protocol is big-endian */
-
-    unlock_user(target_saddr, target_addr, 0);
-    return 0;
-}
-
-static TargetFdTrans target_packet_trans = {
-    .target_to_host_addr = packet_target_to_host_sockaddr,
-};
-
-#ifdef CONFIG_RTNETLINK
-static abi_long netlink_route_target_to_host(void *buf, size_t len)
-{
-    abi_long ret;
-
-    ret = target_to_host_nlmsg_route(buf, len);
-    if (ret < 0) {
-        return ret;
-    }
-
-    return len;
-}
-
-static abi_long netlink_route_host_to_target(void *buf, size_t len)
-{
-    abi_long ret;
-
-    ret = host_to_target_nlmsg_route(buf, len);
-    if (ret < 0) {
-        return ret;
-    }
-
-    return len;
-}
-
-static TargetFdTrans target_netlink_route_trans = {
-    .target_to_host_data = netlink_route_target_to_host,
-    .host_to_target_data = netlink_route_host_to_target,
-};
-#endif /* CONFIG_RTNETLINK */
-
-static abi_long netlink_audit_target_to_host(void *buf, size_t len)
-{
-    abi_long ret;
-
-    ret = target_to_host_nlmsg_audit(buf, len);
-    if (ret < 0) {
-        return ret;
-    }
-
-    return len;
-}
-
-static abi_long netlink_audit_host_to_target(void *buf, size_t len)
-{
-    abi_long ret;
-
-    ret = host_to_target_nlmsg_audit(buf, len);
-    if (ret < 0) {
-        return ret;
-    }
-
-    return len;
-}
-
-static TargetFdTrans target_netlink_audit_trans = {
-    .target_to_host_data = netlink_audit_target_to_host,
-    .host_to_target_data = netlink_audit_host_to_target,
-};
-
 /* do_socket() Must return target values and target errnos. */
 static abi_long do_socket(int domain, int type, int protocol)
 {
@@ -7596,61 +6304,6 @@ static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
 
 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
 
-/* signalfd siginfo conversion */
-
-static void
-host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
-                                const struct signalfd_siginfo *info)
-{
-    int sig = host_to_target_signal(info->ssi_signo);
-
-    /* linux/signalfd.h defines a ssi_addr_lsb
-     * not defined in sys/signalfd.h but used by some kernels
-     */
-
-#ifdef BUS_MCEERR_AO
-    if (tinfo->ssi_signo == SIGBUS &&
-        (tinfo->ssi_code == BUS_MCEERR_AR ||
-         tinfo->ssi_code == BUS_MCEERR_AO)) {
-        uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
-        uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
-        *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
-    }
-#endif
-
-    tinfo->ssi_signo = tswap32(sig);
-    tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
-    tinfo->ssi_code = tswap32(info->ssi_code);
-    tinfo->ssi_pid = tswap32(info->ssi_pid);
-    tinfo->ssi_uid = tswap32(info->ssi_uid);
-    tinfo->ssi_fd = tswap32(info->ssi_fd);
-    tinfo->ssi_tid = tswap32(info->ssi_tid);
-    tinfo->ssi_band = tswap32(info->ssi_band);
-    tinfo->ssi_overrun = tswap32(info->ssi_overrun);
-    tinfo->ssi_trapno = tswap32(info->ssi_trapno);
-    tinfo->ssi_status = tswap32(info->ssi_status);
-    tinfo->ssi_int = tswap32(info->ssi_int);
-    tinfo->ssi_ptr = tswap64(info->ssi_ptr);
-    tinfo->ssi_utime = tswap64(info->ssi_utime);
-    tinfo->ssi_stime = tswap64(info->ssi_stime);
-    tinfo->ssi_addr = tswap64(info->ssi_addr);
-}
-
-static abi_long host_to_target_data_signalfd(void *buf, size_t len)
-{
-    int i;
-
-    for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
-        host_to_target_signalfd_siginfo(buf + i, buf + i);
-    }
-
-    return len;
-}
-
-static TargetFdTrans target_signalfd_trans = {
-    .host_to_target_data = host_to_target_data_signalfd,
-};
-
 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
 {
     int host_flags;
@@ -7976,55 +6629,6 @@ static target_timer_t get_timer_id(abi_long arg)
     return timerid;
 }
 
-static abi_long swap_data_eventfd(void *buf, size_t len)
-{
-    uint64_t *counter = buf;
-    int i;
-
-    if (len < sizeof(uint64_t)) {
-        return -EINVAL;
-    }
-
-    for (i = 0; i < len; i += sizeof(uint64_t)) {
-        *counter = tswap64(*counter);
-        counter++;
-    }
-
-    return len;
-}
-
-static TargetFdTrans target_eventfd_trans = {
-    .host_to_target_data = swap_data_eventfd,
-    .target_to_host_data = swap_data_eventfd,
-};
-
-#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
-    (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
-     defined(__NR_inotify_init1))
-static abi_long host_to_target_data_inotify(void *buf, size_t len)
-{
-    struct inotify_event *ev;
-    int i;
-    uint32_t name_len;
-
-    for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
-        ev = (struct inotify_event *)((char *)buf + i);
-        name_len = ev->len;
-
-        ev->wd = tswap32(ev->wd);
-        ev->mask = tswap32(ev->mask);
-        ev->cookie = tswap32(ev->cookie);
-        ev->len = tswap32(name_len);
-    }
-
-    return len;
-}
-
-static TargetFdTrans target_inotify_trans = {
-    .host_to_target_data = host_to_target_data_inotify,
-};
-#endif
-
 static int target_to_host_cpu_mask(unsigned long *host_mask,
                                    size_t host_size,
                                    abi_ulong target_addr,
@@ -8168,6 +6772,9 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
         }
         return ret;
     case TARGET_NR_write:
+        if (arg2 == 0 && arg3 == 0) {
+            return get_errno(safe_write(arg1, 0, 0));
+        }
         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
             return -TARGET_EFAULT;
         if (fd_trans_target_to_host_data(arg1)) {
@@ -9272,7 +7879,21 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
             unlock_user_struct(target_rlim, arg2, 0);
-            return get_errno(setrlimit(resource, &rlim));
+            /*
+             * If we just passed through resource limit settings for memory then
+             * they would also apply to QEMU's own allocations, and QEMU will
+             * crash or hang or die if its allocations fail. Ideally we would
+             * track the guest allocations in QEMU and apply the limits ourselves.
+             * For now, just tell the guest the call succeeded but don't actually
+             * limit anything.
+             */
+            if (resource != RLIMIT_AS &&
+                resource != RLIMIT_DATA &&
+                resource != RLIMIT_STACK) {
+                return get_errno(setrlimit(resource, &rlim));
+            } else {
+                return 0;
+            }
         }
 #endif
 #ifdef TARGET_NR_getrlimit
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 40bb60ef4c..18d434d6dc 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -203,6 +203,11 @@ struct target_ip_mreq_source {
     uint32_t imr_sourceaddr;
 };
 
+struct target_linger {
+    abi_int l_onoff;        /* Linger active                */
+    abi_int l_linger;       /* How long to linger for       */
+};
+
 struct target_timeval {
     abi_long tv_sec;
     abi_long tv_usec;
diff --git a/migration/migration.c b/migration/migration.c
index 05d0a7296a..d6ae879dc8 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -758,6 +758,18 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
         info->xbzrle_cache->overflow = xbzrle_counters.overflow;
     }
 
+    if (migrate_use_compression()) {
+        info->has_compression = true;
+        info->compression = g_malloc0(sizeof(*info->compression));
+        info->compression->pages = compression_counters.pages;
+        info->compression->busy = compression_counters.busy;
+        info->compression->busy_rate = compression_counters.busy_rate;
+        info->compression->compressed_size =
+                                    compression_counters.compressed_size;
+        info->compression->compression_rate =
+                                    compression_counters.compression_rate;
+    }
+
     if (cpu_throttle_active()) {
         info->has_cpu_throttle_percentage = true;
         info->cpu_throttle_percentage = cpu_throttle_get_percentage();
@@ -2268,7 +2280,10 @@ out:
              */
             if (postcopy_pause_return_path_thread(ms)) {
                 /* Reload rp, reset the rest */
-                rp = ms->rp_state.from_dst_file;
+                if (rp != ms->rp_state.from_dst_file) {
+                    qemu_fclose(rp);
+                    rp = ms->rp_state.from_dst_file;
+                }
                 ms->rp_state.error = false;
                 goto retry;
             }
diff --git a/migration/ram.c b/migration/ram.c
index f6fd8e5e09..bc38d98cc3 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -301,10 +301,19 @@ struct RAMState {
     uint64_t num_dirty_pages_period;
     /* xbzrle misses since the beginning of the period */
     uint64_t xbzrle_cache_miss_prev;
-    /* number of iterations at the beginning of period */
-    uint64_t iterations_prev;
-    /* Iterations since start */
-    uint64_t iterations;
+
+    /* compression statistics since the beginning of the period */
+    /* amount of count that no free thread to compress data */
+    uint64_t compress_thread_busy_prev;
+    /* amount bytes after compression */
+    uint64_t compressed_size_prev;
+    /* amount of compressed pages */
+    uint64_t compress_pages_prev;
+
+    /* total handled target pages at the beginning of period */
+    uint64_t target_page_count_prev;
+    /* total handled target pages since start */
+    uint64_t target_page_count;
     /* number of dirty bits in the bitmap */
     uint64_t migration_dirty_pages;
     /* protects modification of the bitmap */
@@ -338,6 +347,8 @@ struct PageSearchStatus {
 };
 typedef struct PageSearchStatus PageSearchStatus;
 
+CompressionStats compression_counters;
+
 struct CompressParam {
     bool done;
     bool quit;
@@ -420,28 +431,14 @@ static void *do_data_compress(void *opaque)
     return NULL;
 }
 
-static inline void terminate_compression_threads(void)
-{
-    int idx, thread_count;
-
-    thread_count = migrate_compress_threads();
-
-    for (idx = 0; idx < thread_count; idx++) {
-        qemu_mutex_lock(&comp_param[idx].mutex);
-        comp_param[idx].quit = true;
-        qemu_cond_signal(&comp_param[idx].cond);
-        qemu_mutex_unlock(&comp_param[idx].mutex);
-    }
-}
-
 static void compress_threads_save_cleanup(void)
 {
     int i, thread_count;
 
-    if (!migrate_use_compression()) {
+    if (!migrate_use_compression() || !comp_param) {
         return;
     }
-    terminate_compression_threads();
+
     thread_count = migrate_compress_threads();
     for (i = 0; i < thread_count; i++) {
         /*
@@ -451,6 +448,12 @@ static void compress_threads_save_cleanup(void)
         if (!comp_param[i].file) {
             break;
         }
+
+        qemu_mutex_lock(&comp_param[i].mutex);
+        comp_param[i].quit = true;
+        qemu_cond_signal(&comp_param[i].cond);
+        qemu_mutex_unlock(&comp_param[i].mutex);
+
         qemu_thread_join(compress_threads + i);
         qemu_mutex_destroy(&comp_param[i].mutex);
         qemu_cond_destroy(&comp_param[i].cond);
@@ -648,8 +651,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
         return -1;
     }
 
-    be32_to_cpus(&msg.magic);
-    be32_to_cpus(&msg.version);
+    msg.magic = be32_to_cpu(msg.magic);
+    msg.version = be32_to_cpu(msg.version);
 
     if (msg.magic != MULTIFD_MAGIC) {
         error_setg(errp, "multifd: received packet magic %x "
@@ -734,7 +737,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
     RAMBlock *block;
     int i;
 
-    be32_to_cpus(&packet->magic);
+    packet->magic = be32_to_cpu(packet->magic);
     if (packet->magic != MULTIFD_MAGIC) {
         error_setg(errp, "multifd: received packet "
                    "magic %x and expected magic %x",
@@ -742,7 +745,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
         return -1;
     }
 
-    be32_to_cpus(&packet->version);
+    packet->version = be32_to_cpu(packet->version);
     if (packet->version != MULTIFD_VERSION) {
         error_setg(errp, "multifd: received packet "
                    "version %d and expected version %d",
@@ -752,7 +755,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
 
     p->flags = be32_to_cpu(packet->flags);
 
-    be32_to_cpus(&packet->size);
+    packet->size = be32_to_cpu(packet->size);
     if (packet->size > migrate_multifd_page_count()) {
         error_setg(errp, "multifd: received packet "
                    "with size %d and expected maximum size %d",
@@ -1592,21 +1595,42 @@ uint64_t ram_pagesize_summary(void)
 
 static void migration_update_rates(RAMState *rs, int64_t end_time)
 {
-    uint64_t iter_count = rs->iterations - rs->iterations_prev;
+    uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
+    double compressed_size;
 
     /* calculate period counters */
     ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
                 / (end_time - rs->time_last_bitmap_sync);
 
-    if (!iter_count) {
+    if (!page_count) {
         return;
     }
 
     if (migrate_use_xbzrle()) {
         xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
-            rs->xbzrle_cache_miss_prev) / iter_count;
+            rs->xbzrle_cache_miss_prev) / page_count;
         rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
     }
+
+    if (migrate_use_compression()) {
+        compression_counters.busy_rate = (double)(compression_counters.busy -
+            rs->compress_thread_busy_prev) / page_count;
+        rs->compress_thread_busy_prev = compression_counters.busy;
+
+        compressed_size = compression_counters.compressed_size -
+                          rs->compressed_size_prev;
+        if (compressed_size) {
+            double uncompressed_size = (compression_counters.pages -
+                                    rs->compress_pages_prev) * TARGET_PAGE_SIZE;
+
+            /* Compression-Ratio = Uncompressed-size / Compressed-size */
+            compression_counters.compression_rate =
+                                        uncompressed_size / compressed_size;
+
+            rs->compress_pages_prev = compression_counters.pages;
+            rs->compressed_size_prev = compression_counters.compressed_size;
+        }
+    }
 }
 
 static void migration_bitmap_sync(RAMState *rs)
@@ -1662,7 +1686,7 @@ static void migration_bitmap_sync(RAMState *rs)
 
         migration_update_rates(rs, end_time);
 
-        rs->iterations_prev = rs->iterations;
+        rs->target_page_count_prev = rs->target_page_count;
 
         /* reset period counters */
         rs->time_last_bitmap_sync = end_time;
@@ -1888,17 +1912,25 @@ exit:
 static void
 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
 {
+    ram_counters.transferred += bytes_xmit;
+
     if (param->zero_page) {
         ram_counters.duplicate++;
+        return;
     }
-    ram_counters.transferred += bytes_xmit;
+
+    /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
+    compression_counters.compressed_size += bytes_xmit - 8;
+    compression_counters.pages++;
 }
 
+static bool save_page_use_compression(RAMState *rs);
+
 static void flush_compressed_data(RAMState *rs)
 {
     int idx, len, thread_count;
 
-    if (!migrate_use_compression()) {
+    if (!save_page_use_compression(rs)) {
         return;
     }
     thread_count = migrate_compress_threads();
@@ -1996,17 +2028,22 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
         pss->page = 0;
         pss->block = QLIST_NEXT_RCU(pss->block, next);
         if (!pss->block) {
+            /*
+             * If memory migration starts over, we will meet a dirtied page
+             * which may still exists in compression threads's ring, so we
+             * should flush the compressed data to make sure the new page
+             * is not overwritten by the old one in the destination.
+             *
+             * Also If xbzrle is on, stop using the data compression at this
+             * point. In theory, xbzrle can do better than compression.
+             */
+            flush_compressed_data(rs);
+
             /* Hit the end of the list */
             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
             /* Flag that we've looped */
             pss->complete_round = true;
             rs->ram_bulk_stage = false;
-            if (migrate_use_xbzrle()) {
-                /* If xbzrle is on, stop using the data compression at this
-                 * point. In theory, xbzrle can do better than compression.
-                 */
-                flush_compressed_data(rs);
-            }
         }
         /* Didn't find anything this time, but try again on the new block */
         *again = true;
@@ -2259,6 +2296,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
         return true;
     }
 
+    compression_counters.busy++;
     return false;
 }
 
@@ -2372,7 +2410,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
  *
  * Called within an RCU critical section.
  *
- * Returns the number of pages written where zero means no dirty pages
+ * Returns the number of pages written where zero means no dirty pages,
+ * or negative on error
  *
  * @rs: current RAM state
  * @last_stage: if we are at the completion stage
@@ -3196,7 +3235,13 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
             done = 1;
             break;
         }
-        rs->iterations++;
+
+        if (pages < 0) {
+            qemu_file_set_error(f, pages);
+            break;
+        }
+
+        rs->target_page_count += pages;
 
         /* we want to check in the 1st loop, just in case it was the 1st time
            and we had to sync the dirty bitmap.
@@ -3212,7 +3257,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
         }
         i++;
     }
-    flush_compressed_data(rs);
     rcu_read_unlock();
 
     /*
@@ -3238,7 +3282,7 @@ out:
 /**
  * ram_save_complete: function called to send the remaining amount of ram
  *
- * Returns zero to indicate success
+ * Returns zero to indicate success or negative on error
  *
  * Called with iothread lock
  *
@@ -3249,6 +3293,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 {
     RAMState **temp = opaque;
     RAMState *rs = *temp;
+    int ret = 0;
 
     rcu_read_lock();
 
@@ -3269,6 +3314,10 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
         if (pages == 0) {
             break;
         }
+        if (pages < 0) {
+            ret = pages;
+            break;
+        }
     }
 
     flush_compressed_data(rs);
@@ -3280,7 +3329,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
     qemu_fflush(f);
 
-    return 0;
+    return ret;
 }
 
 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
diff --git a/migration/ram.h b/migration/ram.h
index 457bf54b8c..a139066846 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -36,6 +36,7 @@
 
 extern MigrationStats ram_counters;
 extern XBZRLECacheStats xbzrle_counters;
+extern CompressionStats compression_counters;
 
 int xbzrle_cache_resize(int64_t new_size, Error **errp);
 uint64_t ram_bytes_remaining(void);
diff --git a/migration/rdma.c b/migration/rdma.c
index ae07515e83..9b2e7e10aa 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -4012,7 +4012,7 @@ static void rdma_accept_incoming_migration(void *opaque)
 void rdma_start_incoming_migration(const char *host_port, Error **errp)
 {
     int ret;
-    RDMAContext *rdma, *rdma_return_path;
+    RDMAContext *rdma, *rdma_return_path = NULL;
     Error *local_err = NULL;
 
     trace_rdma_start_incoming_migration();
diff --git a/migration/savevm.c b/migration/savevm.c
index 13e51f0e34..2d10e45582 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1679,6 +1679,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
     qemu_loadvm_state_cleanup();
 
     rcu_unregister_thread();
+    mis->have_listen_thread = false;
     return NULL;
 }
 
@@ -2078,7 +2079,9 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
     /* Find savevm section */
     se = find_se(idstr, instance_id);
     if (se == NULL) {
-        error_report("Unknown savevm section or instance '%s' %d",
+        error_report("Unknown savevm section or instance '%s' %d. "
+                     "Make sure that your current VM setup matches your "
+                     "saved VM setup, including any hotplugged devices",
                      idstr, instance_id);
         return -EINVAL;
     }
@@ -2330,11 +2333,13 @@ int qemu_loadvm_state(QEMUFile *f)
     if (migrate_get_current()->send_configuration) {
         if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
             error_report("Configuration section missing");
+            qemu_loadvm_state_cleanup();
             return -EINVAL;
         }
         ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
 
         if (ret) {
+            qemu_loadvm_state_cleanup();
             return ret;
         }
     }
diff --git a/nbd/server.c b/nbd/server.c
index ea5fe0eb33..c3dd402b45 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1844,37 +1844,68 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
     return ret;
 }
 
-static int blockstatus_to_extent_be(BlockDriverState *bs, uint64_t offset,
-                                    uint64_t bytes, NBDExtent *extent)
+/*
+ * Populate @extents from block status. Update @bytes to be the actual
+ * length encoded (which may be smaller than the original), and update
+ * @nb_extents to the number of extents used.
+ *
+ * Returns zero on success and -errno on bdrv_block_status_above failure.
+ */
+static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset,
+                                  uint64_t *bytes, NBDExtent *extents,
+                                  unsigned int *nb_extents)
 {
-    uint64_t remaining_bytes = bytes;
+    uint64_t remaining_bytes = *bytes;
+    NBDExtent *extent = extents, *extents_end = extents + *nb_extents;
+    bool first_extent = true;
 
+    assert(*nb_extents);
     while (remaining_bytes) {
         uint32_t flags;
         int64_t num;
         int ret = bdrv_block_status_above(bs, NULL, offset, remaining_bytes,
                                           &num, NULL, NULL);
+
         if (ret < 0) {
             return ret;
         }
 
         flags = (ret & BDRV_BLOCK_ALLOCATED ? 0 : NBD_STATE_HOLE) |
                 (ret & BDRV_BLOCK_ZERO      ? NBD_STATE_ZERO : 0);
+        offset += num;
+        remaining_bytes -= num;
 
-        if (remaining_bytes == bytes) {
+        if (first_extent) {
             extent->flags = flags;
+            extent->length = num;
+            first_extent = false;
+            continue;
         }
 
-        if (flags != extent->flags) {
-            break;
+        if (flags == extent->flags) {
+            /* extend current extent */
+            extent->length += num;
+        } else {
+            if (extent + 1 == extents_end) {
+                break;
+            }
+
+            /* start new extent */
+            extent++;
+            extent->flags = flags;
+            extent->length = num;
         }
+    }
 
-        offset += num;
-        remaining_bytes -= num;
+    extents_end = extent + 1;
+
+    for (extent = extents; extent < extents_end; extent++) {
+        cpu_to_be32s(&extent->flags);
+        cpu_to_be32s(&extent->length);
     }
 
-    cpu_to_be32s(&extent->flags);
-    extent->length = cpu_to_be32(bytes - remaining_bytes);
+    *bytes -= remaining_bytes;
+    *nb_extents = extents_end - extents;
 
     return 0;
 }
@@ -1910,21 +1941,29 @@ static int nbd_co_send_extents(NBDClient *client, uint64_t handle,
 /* Get block status from the exported device and send it to the client */
 static int nbd_co_send_block_status(NBDClient *client, uint64_t handle,
                                     BlockDriverState *bs, uint64_t offset,
-                                    uint32_t length, bool last,
-                                    uint32_t context_id, Error **errp)
+                                    uint32_t length, bool dont_fragment,
+                                    bool last, uint32_t context_id,
+                                    Error **errp)
 {
     int ret;
-    NBDExtent extent;
+    unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BITMAP_EXTENTS;
+    NBDExtent *extents = g_new(NBDExtent, nb_extents);
+    uint64_t final_length = length;
 
-    ret = blockstatus_to_extent_be(bs, offset, length, &extent);
+    ret = blockstatus_to_extents(bs, offset, &final_length, extents,
+                                 &nb_extents);
     if (ret < 0) {
+        g_free(extents);
         return nbd_co_send_structured_error(
                 client, handle, -ret, "can't get block status", errp);
     }
 
-    return nbd_co_send_extents(client, handle, &extent, 1,
-                               be32_to_cpu(extent.length), last,
-                               context_id, errp);
+    ret = nbd_co_send_extents(client, handle, extents, nb_extents,
+                              final_length, last, context_id, errp);
+
+    g_free(extents);
+
+    return ret;
 }
 
 /*
@@ -1951,6 +1990,8 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset,
 
     assert(begin < overall_end && nb_extents);
     while (begin < overall_end && i < nb_extents) {
+        bool next_dirty = !dirty;
+
         if (dirty) {
             end = bdrv_dirty_bitmap_next_zero(bitmap, begin);
         } else {
@@ -1962,6 +2003,7 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset,
             end = MIN(bdrv_dirty_bitmap_size(bitmap),
                       begin + UINT32_MAX + 1 -
                       bdrv_dirty_bitmap_granularity(bitmap));
+            next_dirty = dirty;
         }
         if (dont_fragment && end > overall_end) {
             end = overall_end;
@@ -1971,7 +2013,7 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset,
         extents[i].flags = cpu_to_be32(dirty ? NBD_STATE_DIRTY : 0);
         i++;
         begin = end;
-        dirty = !dirty;
+        dirty = next_dirty;
     }
 
     bdrv_dirty_iter_free(it);
@@ -2228,10 +2270,12 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
             (client->export_meta.base_allocation ||
              client->export_meta.bitmap))
         {
+            bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE;
+
             if (client->export_meta.base_allocation) {
                 ret = nbd_co_send_block_status(client, request->handle,
                                                blk_bs(exp->blk), request->from,
-                                               request->len,
+                                               request->len, dont_fragment,
                                                !client->export_meta.bitmap,
                                                NBD_META_ID_BASE_ALLOCATION,
                                                errp);
@@ -2244,7 +2288,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
                 ret = nbd_co_send_bitmap(client, request->handle,
                                          client->exp->export_bitmap,
                                          request->from, request->len,
-                                         request->flags & NBD_CMD_FLAG_REQ_ONE,
+                                         dont_fragment,
                                          true, NBD_META_ID_DIRTY_BITMAP, errp);
                 if (ret < 0) {
                     return ret;
diff --git a/net/net.c b/net/net.c
index 2a3133990c..cdcd5cf634 100644
--- a/net/net.c
+++ b/net/net.c
@@ -984,6 +984,10 @@ static int net_client_init1(const void *object, bool is_netdev, Error **errp)
         /* missing optional values have been initialized to "all bits zero" */
         name = net->has_id ? net->id : net->name;
 
+        if (net->has_name) {
+            warn_report("The 'name' parameter is deprecated, use 'id' instead");
+        }
+
         /* Map the old options to the new flat type */
         switch (opts->type) {
         case NET_LEGACY_OPTIONS_TYPE_NONE:
diff --git a/net/slirp.c b/net/slirp.c
index c18060f778..c93b64dd91 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -404,6 +404,8 @@ static SlirpState *slirp_lookup(Monitor *mon, const char *hub_id,
                 monitor_printf(mon, "unrecognized (hub-id, stackname) pair\n");
                 return NULL;
             }
+            warn_report("Using 'hub-id' is deprecated, specify the netdev id "
+                        "directly instead");
         } else {
             nc = qemu_find_netdev(name);
             if (!nc) {
diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img
index 4ec0dbfc4a..4ec0dbfc4a 100755..100644
--- a/pc-bios/hppa-firmware.img
+++ b/pc-bios/hppa-firmware.img
Binary files differdiff --git a/pc-bios/palcode-clipper b/pc-bios/palcode-clipper
index 1df377a0fd..1df377a0fd 100755..100644
--- a/pc-bios/palcode-clipper
+++ b/pc-bios/palcode-clipper
Binary files differdiff --git a/pc-bios/u-boot-sam460-20100605.bin b/pc-bios/u-boot-sam460-20100605.bin
index e17de77c19..e17de77c19 100755..100644
--- a/pc-bios/u-boot-sam460-20100605.bin
+++ b/pc-bios/u-boot-sam460-20100605.bin
Binary files differdiff --git a/pc-bios/u-boot.e500 b/pc-bios/u-boot.e500
index 25537f8fe3..25537f8fe3 100755..100644
--- a/pc-bios/u-boot.e500
+++ b/pc-bios/u-boot.e500
Binary files differdiff --git a/qapi/block-core.json b/qapi/block-core.json
index ac3b48ee54..58ec9931c7 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1935,6 +1935,8 @@
 ##
 # @x-block-dirty-bitmap-merge:
 #
+# FIXME: Rename @src_name and @dst_name to src-name and dst-name.
+#
 # Merge @src_name dirty bitmap to @dst_name dirty bitmap. @src_name dirty
 # bitmap is unchanged. On error, @dst_name is unchanged.
 #
diff --git a/qapi/migration.json b/qapi/migration.json
index f62d3f9a4b..6e8c21258a 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -76,6 +76,27 @@
            'overflow': 'int' } }
 
 ##
+# @CompressionStats:
+#
+# Detailed migration compression statistics
+#
+# @pages: amount of pages compressed and transferred to the target VM
+#
+# @busy: count of times that no free thread was available to compress data
+#
+# @busy-rate: rate of thread busy
+#
+# @compressed-size: amount of bytes after compression
+#
+# @compression-rate: rate of compressed size
+#
+# Since: 3.1
+##
+{ 'struct': 'CompressionStats',
+  'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
+	   'compressed-size': 'int', 'compression-rate': 'number' } }
+
+##
 # @MigrationStatus:
 #
 # An enumeration of migration status.
@@ -172,6 +193,8 @@
 #           only present when the postcopy-blocktime migration capability
 #           is enabled. (Since 3.0)
 #
+# @compression: migration compression statistics, only returned if compression
+#           feature is on and status is 'active' or 'completed' (Since 3.1)
 #
 # Since: 0.14.0
 ##
@@ -186,7 +209,8 @@
            '*cpu-throttle-percentage': 'int',
            '*error-desc': 'str',
            '*postcopy-blocktime' : 'uint32',
-           '*postcopy-vcpu-blocktime': ['uint32']} }
+           '*postcopy-vcpu-blocktime': ['uint32'],
+           '*compression': 'CompressionStats'} }
 
 ##
 # @query-migrate:
diff --git a/qemu-deprecated.texi b/qemu-deprecated.texi
index 2283fc52c3..16ff946b55 100644
--- a/qemu-deprecated.texi
+++ b/qemu-deprecated.texi
@@ -35,6 +35,11 @@ which is the default.
 
 @section System emulator command line arguments
 
+@subsection -machine enforce-config-section=on|off (since 3.1)
+
+The @option{enforce-config-section} parameter is replaced by the
+@option{-global migration.send-configuration=@var{on|off}} option.
+
 @subsection -no-kvm (since 1.3.0)
 
 The ``-no-kvm'' argument is now a synonym for setting
@@ -83,6 +88,11 @@ The 'file' driver for drives is no longer appropriate for character or host
 devices and will only accept regular files (S_IFREG). The correct driver
 for these file types is 'host_cdrom' or 'host_device' as appropriate.
 
+@subsection -net ...,name=@var{name} (since 3.1)
+
+The @option{name} parameter of the @option{-net} option is a synonym
+for the @option{id} parameter, which should now be used instead.
+
 @section QEMU Machine Protocol (QMP) commands
 
 @subsection block-dirty-bitmap-add "autoload" parameter (since 2.12.0)
@@ -99,6 +109,13 @@ The ``query-cpus'' command is replaced by the ``query-cpus-fast'' command.
 The ``arch'' output member of the ``query-cpus-fast'' command is
 replaced by the ``target'' output member.
 
+@section System emulator human monitor commands
+
+@subsection The hub_id parameter of 'hostfwd_add' / 'hostfwd_remove' (since 3.1)
+
+The @option{[hub_id name]} parameter tuple of the 'hostfwd_add' and
+'hostfwd_remove' HMP commands has been replaced by @option{netdev_id}.
+
 @section System emulator devices
 
 @subsection ivshmem (since 2.6.0)
diff --git a/qemu-seccomp.c b/qemu-seccomp.c
index 4729eb107f..1baa5c69ed 100644
--- a/qemu-seccomp.c
+++ b/qemu-seccomp.c
@@ -282,7 +282,24 @@ static QemuOptsList qemu_sandbox_opts = {
 
 static void seccomp_register(void)
 {
-    qemu_add_opts(&qemu_sandbox_opts);
+    bool add = false;
+
+    /* FIXME: use seccomp_api_get() >= 2 check when released */
+
+#if defined(SECCOMP_FILTER_FLAG_TSYNC)
+    int check;
+
+    /* check host TSYNC capability, it returns errno == ENOSYS if unavailable */
+    check = qemu_seccomp(SECCOMP_SET_MODE_FILTER,
+                         SECCOMP_FILTER_FLAG_TSYNC, NULL);
+    if (check < 0 && errno == EFAULT) {
+        add = true;
+    }
+#endif
+
+    if (add) {
+        qemu_add_opts(&qemu_sandbox_opts);
+    }
 }
 opts_init(seccomp_register);
 #endif
diff --git a/replay/replay-char.c b/replay/replay-char.c
index 736cc8c2e6..736cc8c2e6 100755..100644
--- a/replay/replay-char.c
+++ b/replay/replay-char.c
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index a91e4f1313..436195894b 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -302,11 +302,7 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
     return 0;
 }
 
-#if TCG_TARGET_REG_BITS == 64
 # define LOWREGMASK(x)	((x) & 7)
-#else
-# define LOWREGMASK(x)	(x)
-#endif
 
 #define P_EXT		0x100		/* 0x0f opcode prefix */
 #define P_EXT38         0x200           /* 0x0f 0x38 opcode prefix */
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 87c81d1dcc..d0c0a92e67 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -255,12 +255,8 @@ check-qtest-pci-$(CONFIG_IVSHMEM_DEVICE) += tests/ivshmem-test$(EXESUF)
 gcov-files-pci-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c
 check-qtest-pci-y += tests/megasas-test$(EXESUF)
 gcov-files-pci-y += hw/scsi/megasas.c
-check-qtest-$(CONFIG_VMXNET3_PCI) += tests/vmxnet3-test$(EXESUF)
-gcov-files-$(CONFIG_VMXNET3_PCI) += hw/net/vmxnet3.c
-check-qtest-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
-check-qtest-$(CONFIG_WDT_IB700) += tests/wdt_ib700-test$(EXESUF)
-gcov-files-$(CONFIG_WDT_IB700) += hw/watchdog/watchdog.c hw/watchdog/wdt_ib700.c
 
+check-qtest-i386-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
 check-qtest-i386-y += tests/fdc-test$(EXESUF)
 gcov-files-i386-y = hw/block/fdc.c
 check-qtest-i386-y += tests/ide-test$(EXESUF)
@@ -277,9 +273,13 @@ check-qtest-i386-y += tests/ipmi-bt-test$(EXESUF)
 check-qtest-i386-y += tests/i440fx-test$(EXESUF)
 check-qtest-i386-y += tests/fw_cfg-test$(EXESUF)
 check-qtest-i386-y += tests/drive_del-test$(EXESUF)
+check-qtest-i386-$(CONFIG_WDT_IB700) += tests/wdt_ib700-test$(EXESUF)
+gcov-files-i386-$(CONFIG_WDT_IB700) += hw/watchdog/watchdog.c hw/watchdog/wdt_ib700.c
 check-qtest-i386-y += tests/tco-test$(EXESUF)
 check-qtest-i386-y += $(check-qtest-pci-y)
 gcov-files-i386-y += $(gcov-files-pci-y)
+check-qtest-i386-$(CONFIG_VMXNET3_PCI) += tests/vmxnet3-test$(EXESUF)
+gcov-files-i386-$(CONFIG_VMXNET3_PCI) += hw/net/vmxnet3.c
 gcov-files-i386-y += hw/net/net_rx_pkt.c
 gcov-files-i386-y += hw/net/net_tx_pkt.c
 check-qtest-i386-$(CONFIG_PVPANIC) += tests/pvpanic-test$(EXESUF)
@@ -332,8 +332,15 @@ check-qtest-m68k-y = tests/boot-serial-test$(EXESUF)
 
 check-qtest-microblaze-y = tests/boot-serial-test$(EXESUF)
 
+check-qtest-mips-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
+check-qtest-mips64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
+check-qtest-mips64el-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
 check-qtest-moxie-y = tests/boot-serial-test$(EXESUF)
 
+check-qtest-ppc-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
 check-qtest-ppc-y += tests/boot-order-test$(EXESUF)
 check-qtest-ppc-y += tests/prom-env-test$(EXESUF)
 check-qtest-ppc-y += tests/drive_del-test$(EXESUF)
@@ -366,11 +373,16 @@ check-qtest-ppc64-$(CONFIG_IVSHMEM_DEVICE) += tests/ivshmem-test$(EXESUF)
 gcov-files-ppc64-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c
 check-qtest-ppc64-y += tests/cpu-plug-test$(EXESUF)
 
+check-qtest-sh4-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
+check-qtest-sh4eb-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
 check-qtest-sparc-y = tests/prom-env-test$(EXESUF)
 check-qtest-sparc-y += tests/m48t59-test$(EXESUF)
 gcov-files-sparc-y = hw/timer/m48t59.c
 check-qtest-sparc-y += tests/boot-serial-test$(EXESUF)
 
+check-qtest-sparc64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
 check-qtest-sparc64-y += tests/prom-env-test$(EXESUF)
 check-qtest-sparc64-y += tests/boot-serial-test$(EXESUF)
 
diff --git a/tests/migration-test.c b/tests/migration-test.c
index 0e687b7512..20f38f1930 100644
--- a/tests/migration-test.c
+++ b/tests/migration-test.c
@@ -21,11 +21,13 @@
 #include "chardev/char.h"
 #include "sysemu/sysemu.h"
 
+#include "migration/migration-test.h"
+
 /* TODO actually test the results and get rid of this */
 #define qtest_qmp_discard_response(...) qobject_unref(qtest_qmp(__VA_ARGS__))
 
-const unsigned start_address = 1024 * 1024;
-const unsigned end_address = 100 * 1024 * 1024;
+unsigned start_address;
+unsigned end_address;
 bool got_stop;
 static bool uffd_feature_thread_id;
 
@@ -80,10 +82,10 @@ static bool ufd_version_check(void)
 
 static const char *tmpfs;
 
-/* A simple PC boot sector that modifies memory (1-100MB) quickly
- * outputting a 'B' every so often if it's still running.
+/* The boot file modifies memory area in [start_address, end_address)
+ * repeatedly. It outputs a 'B' at a fixed rate while it's still running.
  */
-#include "tests/migration/x86-a-b-bootblock.h"
+#include "tests/migration/i386/a-b-bootblock.h"
 
 static void init_bootfile_x86(const char *bootpath)
 {
@@ -270,11 +272,11 @@ static void wait_for_migration_pass(QTestState *who)
 static void check_guests_ram(QTestState *who)
 {
     /* Our ASM test will have been incrementing one byte from each page from
-     * 1MB to <100MB in order.
-     * This gives us a constraint that any page's byte should be equal or less
-     * than the previous pages byte (mod 256); and they should all be equal
-     * except for one transition at the point where we meet the incrementer.
-     * (We're running this with the guest stopped).
+     * start_address to < end_address in order. This gives us a constraint
+     * that any page's byte should be equal or less than the previous pages
+     * byte (mod 256); and they should all be equal except for one transition
+     * at the point where we meet the incrementer. (We're running this with
+     * the guest stopped).
      */
     unsigned address;
     uint8_t first_byte;
@@ -285,7 +287,8 @@ static void check_guests_ram(QTestState *who)
     qtest_memread(who, start_address, &first_byte, 1);
     last_byte = first_byte;
 
-    for (address = start_address + 4096; address < end_address; address += 4096)
+    for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address;
+         address += TEST_MEM_PAGE_SIZE)
     {
         uint8_t b;
         qtest_memread(who, address, &b, 1);
@@ -437,12 +440,14 @@ static int test_migrate_start(QTestState **from, QTestState **to,
                                   " -drive file=%s,format=raw"
                                   " -incoming %s",
                                   accel, tmpfs, bootpath, uri);
+        start_address = X86_TEST_MEM_START;
+        end_address = X86_TEST_MEM_END;
     } else if (strcmp(arch, "ppc64") == 0) {
-        cmd_src = g_strdup_printf("-machine accel=%s -m 256M"
+        cmd_src = g_strdup_printf("-machine accel=%s -m 256M -nodefaults"
                                   " -name source,debug-threads=on"
                                   " -serial file:%s/src_serial"
-                                  " -prom-env '"
-                                  "boot-command=hex .\" _\" begin %x %x "
+                                  " -prom-env 'use-nvramrc?=true' -prom-env "
+                                  "'nvramrc=hex .\" _\" begin %x %x "
                                   "do i c@ 1 + i c! 1000 +loop .\" B\" 0 "
                                   "until'",  accel, tmpfs, end_address,
                                   start_address);
@@ -451,6 +456,9 @@ static int test_migrate_start(QTestState **from, QTestState **to,
                                   " -serial file:%s/dest_serial"
                                   " -incoming %s",
                                   accel, tmpfs, uri);
+
+        start_address = PPC_TEST_MEM_START;
+        end_address = PPC_TEST_MEM_END;
     } else {
         g_assert_not_reached();
     }
diff --git a/tests/migration/Makefile b/tests/migration/Makefile
new file mode 100644
index 0000000000..dc3b551976
--- /dev/null
+++ b/tests/migration/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+
+TARGET_LIST = i386
+
+SRC_PATH = ../..
+
+override define __note
+/* This file is automatically generated from the assembly file in
+ * tests/migration/$@. Edit that file and then run "make all"
+ * inside tests/migration to update, and then remember to send both
+ * the header and the assembler differences in your patch submission.
+ */
+endef
+export __note
+
+find-arch-cross-cc = $(lastword $(shell grep -h "CROSS_CC_GUEST=" $(wildcard $(SRC_PATH)/$(patsubst i386,*86*,$(1))-softmmu/config-target.mak) /dev/null))
+parse-cross-prefix = $(subst gcc,,$(patsubst cc,gcc,$(patsubst CROSS_CC_GUEST="%",%,$(call find-arch-cross-cc,$(1)))))
+gen-cross-prefix = $(patsubst %-,CROSS_PREFIX=%-,$(call parse-cross-prefix,$(1)))
+
+.PHONY: all $(TARGET_LIST)
+
+all: $(TARGET_LIST)
+
+$(TARGET_LIST):
+	$(MAKE) -C $@ $(call gen-cross-prefix,$@)
+
+clean:
+	for target in $(TARGET_LIST); do \
+		$(MAKE) -C $$target clean; \
+	done
diff --git a/tests/migration/i386/Makefile b/tests/migration/i386/Makefile
new file mode 100644
index 0000000000..5c0324134a
--- /dev/null
+++ b/tests/migration/i386/Makefile
@@ -0,0 +1,22 @@
+# To specify cross compiler prefix, use CROSS_PREFIX=
+#   $ make CROSS_PREFIX=x86_64-linux-gnu-
+
+.PHONY: all clean
+all: a-b-bootblock.h
+
+a-b-bootblock.h: x86.bootsect
+	echo "$$__note" > header.tmp
+	xxd -i $< | sed -e 's/.*int.*//' >> header.tmp
+	mv header.tmp $@
+
+x86.bootsect: x86.boot
+	dd if=$< of=$@ bs=256 count=2 skip=124
+
+x86.boot: x86.o
+	$(CROSS_PREFIX)objcopy -O binary $< $@
+
+x86.o: a-b-bootblock.S
+	$(CROSS_PREFIX)gcc -m32 -march=i486 -c $< -o $@
+
+clean:
+	@rm -rf *.boot *.o *.bootsect
diff --git a/tests/migration/x86-a-b-bootblock.s b/tests/migration/i386/a-b-bootblock.S
index b1642641a7..3f97f28023 100644
--- a/tests/migration/x86-a-b-bootblock.s
+++ b/tests/migration/i386/a-b-bootblock.S
@@ -3,10 +3,6 @@
 #  range.
 #  Outputs an initial 'A' on serial followed by repeated 'B's
 #
-# run   tests/migration/rebuild-x86-bootblock.sh
-#   to regenerate the hex, and remember to include both the .h and .s
-#   in any patches.
-#
 # Copyright (c) 2016 Red Hat, Inc. and/or its affiliates
 # This work is licensed under the terms of the GNU GPL, version 2 or later.
 # See the COPYING file in the top-level directory.
diff --git a/tests/migration/x86-a-b-bootblock.h b/tests/migration/i386/a-b-bootblock.h
index 78a151fe2a..7d459d4fde 100644
--- a/tests/migration/x86-a-b-bootblock.h
+++ b/tests/migration/i386/a-b-bootblock.h
@@ -1,7 +1,7 @@
-/* This file is automatically generated from
- * tests/migration/x86-a-b-bootblock.s, edit that and then run
- * tests/migration/rebuild-x86-bootblock.sh to update,
- * and then remember to send both in your patch submission.
+/* This file is automatically generated from the assembly file in
+ * tests/migration/i386. Edit that file and then run "make all"
+ * inside tests/migration to update, and then remember to send both
+ * the header and the assembler differences in your patch submission.
  */
 unsigned char x86_bootsect[] = {
   0xfa, 0x0f, 0x01, 0x16, 0x74, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00,
diff --git a/tests/migration/migration-test.h b/tests/migration/migration-test.h
new file mode 100644
index 0000000000..c4c0c526b6
--- /dev/null
+++ b/tests/migration/migration-test.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef _TEST_MIGRATION_H_
+#define _TEST_MIGRATION_H_
+
+/* Common */
+#define TEST_MEM_PAGE_SIZE 4096
+
+/* x86 */
+#define X86_TEST_MEM_START (1 * 1024 * 1024)
+#define X86_TEST_MEM_END   (100 * 1024 * 1024)
+
+/* PPC */
+#define PPC_TEST_MEM_START (1 * 1024 * 1024)
+#define PPC_TEST_MEM_END   (100 * 1024 * 1024)
+
+#endif /* _TEST_MIGRATION_H_ */
diff --git a/tests/migration/rebuild-x86-bootblock.sh b/tests/migration/rebuild-x86-bootblock.sh
deleted file mode 100755
index 86cec5d284..0000000000
--- a/tests/migration/rebuild-x86-bootblock.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
-# This work is licensed under the terms of the GNU GPL, version 2 or later.
-# See the COPYING file in the top-level directory.
-#
-# Author: dgilbert@redhat.com
-
-ASMFILE=$PWD/tests/migration/x86-a-b-bootblock.s
-HEADER=$PWD/tests/migration/x86-a-b-bootblock.h
-
-if [ ! -e "$ASMFILE" ]
-then
-  echo "Couldn't find $ASMFILE" >&2
-  exit 1
-fi
-
-ASM_WORK_DIR=$(mktemp -d --tmpdir X86BB.XXXXXX)
-cd "$ASM_WORK_DIR" &&
-as --32 -march=i486 "$ASMFILE" -o x86.o &&
-objcopy -O binary x86.o x86.boot &&
-dd if=x86.boot of=x86.bootsect bs=256 count=2 skip=124 &&
-xxd -i x86.bootsect |
-sed -e 's/.*int.*//' > x86.hex &&
-cat - x86.hex <<HERE > "$HEADER"
-/* This file is automatically generated from
- * tests/migration/x86-a-b-bootblock.s, edit that and then run
- * tests/migration/rebuild-x86-bootblock.sh to update,
- * and then remember to send both in your patch submission.
- */
-HERE
-
-rm x86.hex x86.bootsect x86.boot x86.o
-cd .. && rmdir "$ASM_WORK_DIR"
diff --git a/tests/qht-bench.c b/tests/qht-bench.c
index f492b3a20a..2089e2bed1 100644
--- a/tests/qht-bench.c
+++ b/tests/qht-bench.c
@@ -53,6 +53,7 @@ static unsigned long resize_delay = 1000;
 static double resize_rate; /* 0.0 to 1.0 */
 static unsigned int n_rz_threads = 1;
 static QemuThread *rz_threads;
+static bool precompute_hash;
 
 static double update_rate; /* 0.0 to 1.0 */
 static uint64_t update_threshold;
@@ -101,11 +102,18 @@ static bool is_equal(const void *ap, const void *bp)
     return *a == *b;
 }
 
-static inline uint32_t h(unsigned long v)
+static uint32_t h(unsigned long v)
 {
     return tb_hash_func7(v, 0, 0, 0, 0);
 }
 
+static uint32_t hval(unsigned long v)
+{
+    return v;
+}
+
+static uint32_t (*hfunc)(unsigned long v) = h;
+
 /*
  * From: https://en.wikipedia.org/wiki/Xorshift
  * This is faster than rand_r(), and gives us a wider range (RAND_MAX is only
@@ -149,7 +157,7 @@ static void do_rw(struct thread_info *info)
         bool read;
 
         p = &keys[info->r & (lookup_range - 1)];
-        hash = h(*p);
+        hash = hfunc(*p);
         read = qht_lookup(&ht, p, hash);
         if (read) {
             stats->rd++;
@@ -158,7 +166,7 @@ static void do_rw(struct thread_info *info)
         }
     } else {
         p = &keys[info->r & (update_range - 1)];
-        hash = h(*p);
+        hash = hfunc(*p);
         if (info->write_op) {
             bool written = false;
 
@@ -289,7 +297,9 @@ static void htable_init(void)
     /* avoid allocating memory later by allocating all the keys now */
     keys = g_malloc(sizeof(*keys) * n);
     for (i = 0; i < n; i++) {
-        keys[i] = populate_offset + i;
+        long val = populate_offset + i;
+
+        keys[i] = precompute_hash ? h(val) : hval(val);
     }
 
     /* some sanity checks */
@@ -321,7 +331,7 @@ static void htable_init(void)
 
             r = xorshift64star(r);
             p = &keys[r & (init_range - 1)];
-            hash = h(*p);
+            hash = hfunc(*p);
             if (qht_insert(&ht, p, hash, NULL)) {
                 break;
             }
@@ -412,7 +422,7 @@ static void parse_args(int argc, char *argv[])
     int c;
 
     for (;;) {
-        c = getopt(argc, argv, "d:D:g:k:K:l:hn:N:o:r:Rs:S:u:");
+        c = getopt(argc, argv, "d:D:g:k:K:l:hn:N:o:pr:Rs:S:u:");
         if (c < 0) {
             break;
         }
@@ -451,6 +461,10 @@ static void parse_args(int argc, char *argv[])
         case 'o':
             populate_offset = atol(optarg);
             break;
+        case 'p':
+            precompute_hash = true;
+            hfunc = hval;
+            break;
         case 'r':
             update_range = pow2ceil(atol(optarg));
             break;
diff --git a/tests/test-qht.c b/tests/test-qht.c
index dda6a067be..4d23cefab6 100644
--- a/tests/test-qht.c
+++ b/tests/test-qht.c
@@ -41,7 +41,7 @@ static void insert(int a, int b)
     }
 }
 
-static void rm(int init, int end)
+static void do_rm(int init, int end, bool exist)
 {
     int i;
 
@@ -49,10 +49,24 @@ static void rm(int init, int end)
         uint32_t hash;
 
         hash = arr[i];
-        g_assert_true(qht_remove(&ht, &arr[i], hash));
+        if (exist) {
+            g_assert_true(qht_remove(&ht, &arr[i], hash));
+        } else {
+            g_assert_false(qht_remove(&ht, &arr[i], hash));
+        }
     }
 }
 
+static void rm(int init, int end)
+{
+    do_rm(init, end, true);
+}
+
+static void rm_nonexist(int init, int end)
+{
+    do_rm(init, end, false);
+}
+
 static void check(int a, int b, bool expected)
 {
     struct qht_stats stats;
@@ -84,7 +98,7 @@ static void check(int a, int b, bool expected)
     qht_statistics_destroy(&stats);
 }
 
-static void count_func(struct qht *ht, void *p, uint32_t hash, void *userp)
+static void count_func(void *p, uint32_t hash, void *userp)
 {
     unsigned int *curr = userp;
 
@@ -108,14 +122,79 @@ static void iter_check(unsigned int count)
     g_assert_cmpuint(curr, ==, count);
 }
 
+static void sum_func(void *p, uint32_t hash, void *userp)
+{
+    uint32_t *sum = userp;
+    uint32_t a = *(uint32_t *)p;
+
+    *sum += a;
+}
+
+static void iter_sum_check(unsigned int expected)
+{
+    unsigned int sum = 0;
+
+    qht_iter(&ht, sum_func, &sum);
+    g_assert_cmpuint(sum, ==, expected);
+}
+
+static bool rm_mod_func(void *p, uint32_t hash, void *userp)
+{
+    uint32_t a = *(uint32_t *)p;
+    unsigned int mod = *(unsigned int *)userp;
+
+    return a % mod == 0;
+}
+
+static void iter_rm_mod(unsigned int mod)
+{
+    qht_iter_remove(&ht, rm_mod_func, &mod);
+}
+
+static void iter_rm_mod_check(unsigned int mod)
+{
+    unsigned int expected = 0;
+    unsigned int i;
+
+    for (i = 0; i < N; i++) {
+        if (i % mod == 0) {
+            continue;
+        }
+        expected += i;
+    }
+    iter_sum_check(expected);
+}
+
 static void qht_do_test(unsigned int mode, size_t init_entries)
 {
     /* under KVM we might fetch stats from an uninitialized qht */
     check_n(0);
 
     qht_init(&ht, is_equal, 0, mode);
+    rm_nonexist(0, 4);
+    /*
+     * Test that we successfully delete the last element in a bucket.
+     * This is a hard-to-reach code path when resizing is on, but without
+     * resizing we can easily hit it if init_entries <= 1.
+     * Given that the number of elements per bucket can be 4 or 6 depending on
+     * the host's pointer size, test the removal of the 4th and 6th elements.
+     */
+    insert(0, 4);
+    rm_nonexist(5, 6);
+    rm(3, 4);
+    check_n(3);
+    insert(3, 6);
+    rm(5, 6);
+    check_n(5);
+    rm_nonexist(7, 8);
+    iter_rm_mod(1);
+
+    if (!(mode & QHT_MODE_AUTO_RESIZE)) {
+        qht_resize(&ht, init_entries * 4 + 4);
+    }
 
     check_n(0);
+    rm_nonexist(0, 10);
     insert(0, N);
     check(0, N, true);
     check_n(N);
@@ -138,8 +217,12 @@ static void qht_do_test(unsigned int mode, size_t init_entries)
     insert(10, 150);
     check_n(N);
 
-    rm(1, 2);
-    check_n(N - 1);
+    qht_reset(&ht);
+    insert(0, N);
+    rm_nonexist(N, N + 32);
+    iter_rm_mod(10);
+    iter_rm_mod_check(10);
+    check_n(N * 9 / 10);
     qht_reset_size(&ht, 0);
     check_n(0);
     check(0, N, false);
diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py
index 7e58d9e0ca..cafbc6b3a5 100755
--- a/tests/vm/basevm.py
+++ b/tests/vm/basevm.py
@@ -65,6 +65,7 @@ class BaseVM(object):
             self._stdout = self._devnull
         self._args = [ \
             "-nodefaults", "-m", "4G",
+            "-cpu", "max",
             "-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22",
             "-device", "virtio-net-pci,netdev=vnet",
             "-vnc", "127.0.0.1:0,to=20",
@@ -72,11 +73,9 @@ class BaseVM(object):
         if vcpus:
             self._args += ["-smp", str(vcpus)]
         if os.access("/dev/kvm", os.R_OK | os.W_OK):
-            self._args += ["-cpu", "host"]
             self._args += ["-enable-kvm"]
         else:
             logging.info("KVM not available, not using -enable-kvm")
-            self._args += ["-cpu", "max"]
         self._data_args = []
 
     def _download_with_cache(self, url, sha256sum=None):
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 131ba6b4a8..621b3025d8 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -211,6 +211,7 @@ void aio_set_fd_handler(AioContext *ctx,
     AioHandler *node;
     bool is_new = false;
     bool deleted = false;
+    int poll_disable_change;
 
     qemu_lockcnt_lock(&ctx->list_lock);
 
@@ -244,11 +245,9 @@ void aio_set_fd_handler(AioContext *ctx,
             QLIST_REMOVE(node, node);
             deleted = true;
         }
-
-        if (!node->io_poll) {
-            ctx->poll_disable_cnt--;
-        }
+        poll_disable_change = -!node->io_poll;
     } else {
+        poll_disable_change = !io_poll - (node && !node->io_poll);
         if (node == NULL) {
             /* Alloc and insert if it's not already there */
             node = g_new0(AioHandler, 1);
@@ -257,10 +256,6 @@ void aio_set_fd_handler(AioContext *ctx,
 
             g_source_add_poll(&ctx->source, &node->pfd);
             is_new = true;
-
-            ctx->poll_disable_cnt += !io_poll;
-        } else {
-            ctx->poll_disable_cnt += !io_poll - !node->io_poll;
         }
 
         /* Update handler with latest information */
@@ -274,6 +269,15 @@ void aio_set_fd_handler(AioContext *ctx,
         node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
     }
 
+    /* No need to order poll_disable_cnt writes against other updates;
+     * the counter is only used to avoid wasting time and latency on
+     * iterated polling when the system call will be ultimately necessary.
+     * Changing handlers is a rare event, and a little wasted polling until
+     * the aio_notify below is not an issue.
+     */
+    atomic_set(&ctx->poll_disable_cnt,
+               atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
+
     aio_epoll_update(ctx, node, is_new);
     qemu_lockcnt_unlock(&ctx->list_lock);
     aio_notify(ctx);
@@ -486,7 +490,7 @@ static void add_pollfd(AioHandler *node)
     npfd++;
 }
 
-static bool run_poll_handlers_once(AioContext *ctx)
+static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
 {
     bool progress = false;
     AioHandler *node;
@@ -494,9 +498,11 @@ static bool run_poll_handlers_once(AioContext *ctx)
     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
         if (!node->deleted && node->io_poll &&
             aio_node_check(ctx, node->is_external) &&
-            node->io_poll(node->opaque) &&
-            node->opaque != &ctx->notifier) {
-            progress = true;
+            node->io_poll(node->opaque)) {
+            *timeout = 0;
+            if (node->opaque != &ctx->notifier) {
+                progress = true;
+            }
         }
 
         /* Caller handles freeing deleted nodes.  Don't do it here. */
@@ -518,31 +524,38 @@ static bool run_poll_handlers_once(AioContext *ctx)
  *
  * Returns: true if progress was made, false otherwise
  */
-static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
+static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
 {
     bool progress;
-    int64_t end_time;
+    int64_t start_time, elapsed_time;
 
     assert(ctx->notify_me);
     assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
-    assert(ctx->poll_disable_cnt == 0);
-
-    trace_run_poll_handlers_begin(ctx, max_ns);
 
-    end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
+    trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
 
+    start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
     do {
-        progress = run_poll_handlers_once(ctx);
-    } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
+        progress = run_poll_handlers_once(ctx, timeout);
+        elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
+    } while (!progress && elapsed_time < max_ns
+             && !atomic_read(&ctx->poll_disable_cnt));
 
-    trace_run_poll_handlers_end(ctx, progress);
+    /* If time has passed with no successful polling, adjust *timeout to
+     * keep the same ending time.
+     */
+    if (*timeout != -1) {
+        *timeout -= MIN(*timeout, elapsed_time);
+    }
 
+    trace_run_poll_handlers_end(ctx, progress, *timeout);
     return progress;
 }
 
 /* try_poll_mode:
  * @ctx: the AioContext
- * @blocking: busy polling is only attempted when blocking is true
+ * @timeout: timeout for blocking wait, computed by the caller and updated if
+ *    polling succeeds.
  *
  * ctx->notify_me must be non-zero so this function can detect aio_notify().
  *
@@ -550,19 +563,16 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
  *
  * Returns: true if progress was made, false otherwise
  */
-static bool try_poll_mode(AioContext *ctx, bool blocking)
+static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
 {
-    if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
-        /* See qemu_soonest_timeout() uint64_t hack */
-        int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
-                             (uint64_t)ctx->poll_ns);
+    /* See qemu_soonest_timeout() uint64_t hack */
+    int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);
 
-        if (max_ns) {
-            poll_set_started(ctx, true);
+    if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
+        poll_set_started(ctx, true);
 
-            if (run_poll_handlers(ctx, max_ns)) {
-                return true;
-            }
+        if (run_poll_handlers(ctx, max_ns, timeout)) {
+            return true;
         }
     }
 
@@ -571,7 +581,7 @@ static bool try_poll_mode(AioContext *ctx, bool blocking)
     /* Even if we don't run busy polling, try polling once in case it can make
      * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
      */
-    return run_poll_handlers_once(ctx);
+    return run_poll_handlers_once(ctx, timeout);
 }
 
 bool aio_poll(AioContext *ctx, bool blocking)
@@ -601,8 +611,14 @@ bool aio_poll(AioContext *ctx, bool blocking)
         start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
     }
 
-    progress = try_poll_mode(ctx, blocking);
-    if (!progress) {
+    timeout = blocking ? aio_compute_timeout(ctx) : 0;
+    progress = try_poll_mode(ctx, &timeout);
+    assert(!(timeout && progress));
+
+    /* If polling is allowed, non-blocking aio_poll does not need the
+     * system call---a single round of run_poll_handlers_once suffices.
+     */
+    if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
         assert(npfd == 0);
 
         /* fill pollfds */
@@ -616,8 +632,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
             }
         }
 
-        timeout = blocking ? aio_compute_timeout(ctx) : 0;
-
         /* wait until next event */
         if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
             AioHandler epoll_handler;
diff --git a/util/memfd.c b/util/memfd.c
index d248a53c3c..6287946b61 100644
--- a/util/memfd.c
+++ b/util/memfd.c
@@ -187,6 +187,7 @@ bool qemu_memfd_alloc_check(void)
         int fd;
         void *ptr;
 
+        fd = -1;
         ptr = qemu_memfd_alloc("test", 4096, 0, &fd, NULL);
         memfd_check = ptr ? MEMFD_OK : MEMFD_KO;
         qemu_memfd_free(ptr, 4096, fd);
diff --git a/util/qht.c b/util/qht.c
index 1e3a072e25..aa51be3c52 100644
--- a/util/qht.c
+++ b/util/qht.c
@@ -89,6 +89,19 @@
 #define QHT_BUCKET_ENTRIES 4
 #endif
 
+enum qht_iter_type {
+    QHT_ITER_VOID,    /* do nothing; use retvoid */
+    QHT_ITER_RM,      /* remove element if retbool returns true */
+};
+
+struct qht_iter {
+    union {
+        qht_iter_func_t retvoid;
+        qht_iter_bool_func_t retbool;
+    } f;
+    enum qht_iter_type type;
+};
+
 /*
  * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise
  * the profiler (QSP) will deadlock.
@@ -223,7 +236,7 @@ static inline void qht_head_init(struct qht_bucket *b)
 }
 
 static inline
-struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
+struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
 {
     return &map->buckets[hash & (map->n_buckets - 1)];
 }
@@ -255,7 +268,8 @@ static void qht_map_unlock_buckets(struct qht_map *map)
  * Call with at least a bucket lock held.
  * @map should be the value read before acquiring the lock (or locks).
  */
-static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
+static inline bool qht_map_is_stale__locked(const struct qht *ht,
+                                            const struct qht_map *map)
 {
     return map != ht->map;
 }
@@ -324,12 +338,12 @@ struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
     return b;
 }
 
-static inline bool qht_map_needs_resize(struct qht_map *map)
+static inline bool qht_map_needs_resize(const struct qht_map *map)
 {
     return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
 }
 
-static inline void qht_chain_destroy(struct qht_bucket *head)
+static inline void qht_chain_destroy(const struct qht_bucket *head)
 {
     struct qht_bucket *curr = head->next;
     struct qht_bucket *prev;
@@ -469,10 +483,10 @@ bool qht_reset_size(struct qht *ht, size_t n_elems)
 }
 
 static inline
-void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
+void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
                     const void *userp, uint32_t hash)
 {
-    struct qht_bucket *b = head;
+    const struct qht_bucket *b = head;
     int i;
 
     do {
@@ -496,7 +510,7 @@ void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
 }
 
 static __attribute__((noinline))
-void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
+void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
                            const void *userp, uint32_t hash)
 {
     unsigned int version;
@@ -509,11 +523,11 @@ void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
     return ret;
 }
 
-void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
+void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
                         qht_lookup_func_t func)
 {
-    struct qht_bucket *b;
-    struct qht_map *map;
+    const struct qht_bucket *b;
+    const struct qht_map *map;
     unsigned int version;
     void *ret;
 
@@ -532,13 +546,16 @@ void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
     return qht_lookup__slowpath(b, func, userp, hash);
 }
 
-void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash)
+void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
 {
     return qht_lookup_custom(ht, userp, hash, ht->cmp);
 }
 
-/* call with head->lock held */
-static void *qht_insert__locked(struct qht *ht, struct qht_map *map,
+/*
+ * call with head->lock held
+ * @ht is const since it is only used for ht->cmp()
+ */
+static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
                                 struct qht_bucket *head, void *p, uint32_t hash,
                                 bool *needs_resize)
 {
@@ -632,7 +649,7 @@ bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
     return false;
 }
 
-static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
+static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
 {
     if (pos == QHT_BUCKET_ENTRIES - 1) {
         if (b->next == NULL) {
@@ -658,7 +675,7 @@ qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
 }
 
 /*
- * Find the last valid entry in @head, and swap it with @orig[pos], which has
+ * Find the last valid entry in @orig, and swap it with @orig[pos], which has
  * just been invalidated.
  */
 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
@@ -692,8 +709,7 @@ static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
 
 /* call with b->lock held */
 static inline
-bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head,
-                        const void *p, uint32_t hash)
+bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
 {
     struct qht_bucket *b = head;
     int i;
@@ -728,15 +744,16 @@ bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
     qht_debug_assert(p);
 
     b = qht_bucket_lock__no_stale(ht, hash, &map);
-    ret = qht_remove__locked(map, b, p, hash);
+    ret = qht_remove__locked(b, p, hash);
     qht_bucket_debug__locked(b);
     qemu_spin_unlock(&b->lock);
     return ret;
 }
 
-static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
-                                   qht_iter_func_t func, void *userp)
+static inline void qht_bucket_iter(struct qht_bucket *head,
+                                   const struct qht_iter *iter, void *userp)
 {
+    struct qht_bucket *b = head;
     int i;
 
     do {
@@ -744,37 +761,83 @@ static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
             if (b->pointers[i] == NULL) {
                 return;
             }
-            func(ht, b->pointers[i], b->hashes[i], userp);
+            switch (iter->type) {
+            case QHT_ITER_VOID:
+                iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
+                break;
+            case QHT_ITER_RM:
+                if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
+                    /* replace i with the last valid element in the bucket */
+                    seqlock_write_begin(&head->sequence);
+                    qht_bucket_remove_entry(b, i);
+                    seqlock_write_end(&head->sequence);
+                    qht_bucket_debug__locked(b);
+                    /* reevaluate i, since it just got replaced */
+                    i--;
+                    continue;
+                }
+                break;
+            default:
+                g_assert_not_reached();
+            }
         }
         b = b->next;
     } while (b);
 }
 
 /* call with all of the map's locks held */
-static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
-                                            qht_iter_func_t func, void *userp)
+static inline void qht_map_iter__all_locked(struct qht_map *map,
+                                            const struct qht_iter *iter,
+                                            void *userp)
 {
     size_t i;
 
     for (i = 0; i < map->n_buckets; i++) {
-        qht_bucket_iter(ht, &map->buckets[i], func, userp);
+        qht_bucket_iter(&map->buckets[i], iter, userp);
     }
 }
 
-void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
+static inline void
+do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
 {
     struct qht_map *map;
 
     map = atomic_rcu_read(&ht->map);
     qht_map_lock_buckets(map);
-    /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */
-    qht_map_iter__all_locked(ht, map, func, userp);
+    qht_map_iter__all_locked(map, iter, userp);
     qht_map_unlock_buckets(map);
 }
 
-static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
+void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
+{
+    const struct qht_iter iter = {
+        .f.retvoid = func,
+        .type = QHT_ITER_VOID,
+    };
+
+    do_qht_iter(ht, &iter, userp);
+}
+
+void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp)
+{
+    const struct qht_iter iter = {
+        .f.retbool = func,
+        .type = QHT_ITER_RM,
+    };
+
+    do_qht_iter(ht, &iter, userp);
+}
+
+struct qht_map_copy_data {
+    struct qht *ht;
+    struct qht_map *new;
+};
+
+static void qht_map_copy(void *p, uint32_t hash, void *userp)
 {
-    struct qht_map *new = userp;
+    struct qht_map_copy_data *data = userp;
+    struct qht *ht = data->ht;
+    struct qht_map *new = data->new;
     struct qht_bucket *b = qht_map_to_bucket(new, hash);
 
     /* no need to acquire b->lock because no thread has seen this map yet */
@@ -788,6 +851,11 @@ static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
 {
     struct qht_map *old;
+    const struct qht_iter iter = {
+        .f.retvoid = qht_map_copy,
+        .type = QHT_ITER_VOID,
+    };
+    struct qht_map_copy_data data;
 
     old = ht->map;
     qht_map_lock_buckets(old);
@@ -802,7 +870,9 @@ static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
     }
 
     g_assert(new->n_buckets != old->n_buckets);
-    qht_map_iter__all_locked(ht, old, qht_map_copy, new);
+    data.ht = ht;
+    data.new = new;
+    qht_map_iter__all_locked(old, &iter, &data);
     qht_map_debug__all_locked(new);
 
     atomic_rcu_set(&ht->map, new);
@@ -829,9 +899,9 @@ bool qht_resize(struct qht *ht, size_t n_elems)
 }
 
 /* pass @stats to qht_statistics_destroy() when done */
-void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
+void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
 {
-    struct qht_map *map;
+    const struct qht_map *map;
     int i;
 
     map = atomic_rcu_read(&ht->map);
@@ -848,8 +918,8 @@ void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
     stats->head_buckets = map->n_buckets;
 
     for (i = 0; i < map->n_buckets; i++) {
-        struct qht_bucket *head = &map->buckets[i];
-        struct qht_bucket *b;
+        const struct qht_bucket *head = &map->buckets[i];
+        const struct qht_bucket *b;
         unsigned int version;
         size_t buckets;
         size_t entries;
diff --git a/util/qsp.c b/util/qsp.c
index b0c2575d10..2de3a97594 100644
--- a/util/qsp.c
+++ b/util/qsp.c
@@ -533,7 +533,7 @@ static gint qsp_tree_cmp(gconstpointer ap, gconstpointer bp, gpointer up)
     }
 }
 
-static void qsp_sort(struct qht *ht, void *p, uint32_t h, void *userp)
+static void qsp_sort(void *p, uint32_t h, void *userp)
 {
     QSPEntry *e = p;
     GTree *tree = userp;
@@ -541,7 +541,7 @@ static void qsp_sort(struct qht *ht, void *p, uint32_t h, void *userp)
     g_tree_insert(tree, e, NULL);
 }
 
-static void qsp_aggregate(struct qht *global_ht, void *p, uint32_t h, void *up)
+static void qsp_aggregate(void *p, uint32_t h, void *up)
 {
     struct qht *ht = up;
     const QSPEntry *e = p;
@@ -553,7 +553,7 @@ static void qsp_aggregate(struct qht *global_ht, void *p, uint32_t h, void *up)
     qsp_entry_aggregate(agg, e);
 }
 
-static void qsp_iter_diff(struct qht *orig, void *p, uint32_t hash, void *htp)
+static void qsp_iter_diff(void *p, uint32_t hash, void *htp)
 {
     struct qht *ht = htp;
     QSPEntry *old = p;
@@ -583,8 +583,7 @@ static void qsp_diff(struct qht *orig, struct qht *new)
     qht_iter(orig, qsp_iter_diff, new);
 }
 
-static void
-qsp_iter_callsite_coalesce(struct qht *orig, void *p, uint32_t h, void *htp)
+static void qsp_iter_callsite_coalesce(void *p, uint32_t h, void *htp)
 {
     struct qht *ht = htp;
     QSPEntry *old = p;
@@ -603,7 +602,7 @@ qsp_iter_callsite_coalesce(struct qht *orig, void *p, uint32_t h, void *htp)
     e->n_acqs += old->n_acqs;
 }
 
-static void qsp_ht_delete(struct qht *ht, void *p, uint32_t h, void *htp)
+static void qsp_ht_delete(void *p, uint32_t h, void *htp)
 {
     g_free(p);
 }
diff --git a/util/trace-events b/util/trace-events
index 4822434c89..79569b7fdf 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -1,8 +1,8 @@
 # See docs/devel/tracing.txt for syntax documentation.
 
 # util/aio-posix.c
-run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
-run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
+run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_ns %"PRId64 " timeout %"PRId64
+run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
 poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
 poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
 
diff --git a/vl.c b/vl.c
index 694bb67890..cc55fe04a2 100644
--- a/vl.c
+++ b/vl.c
@@ -3917,8 +3917,8 @@ int main(int argc, char **argv, char **envp)
     }
 
 #ifdef CONFIG_SECCOMP
-    if (qemu_opts_foreach(qemu_find_opts("sandbox"),
-                          parse_sandbox, NULL, NULL)) {
+    olist = qemu_find_opts_err("sandbox", NULL);
+    if (olist && qemu_opts_foreach(olist, parse_sandbox, NULL, NULL)) {
         exit(1);
     }
 #endif
@@ -4530,6 +4530,7 @@ int main(int argc, char **argv, char **envp)
         if (load_snapshot(loadvm, &local_err) < 0) {
             error_report_err(local_err);
             autostart = 0;
+            exit(1);
         }
     }