summary refs log tree commit diff stats
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/arm/ast2400.c21
-rw-r--r--hw/arm/palmetto-bmc.c2
-rw-r--r--hw/block/dataplane/virtio-blk.c83
-rw-r--r--hw/block/dataplane/virtio-blk.h2
-rw-r--r--hw/block/m25p80.c398
-rw-r--r--hw/block/virtio-blk.c57
-rw-r--r--hw/char/cadence_uart.c7
-rw-r--r--hw/ide/macio.c2
-rw-r--r--hw/intc/arm_gicv3_cpuif.c2
-rw-r--r--hw/intc/xics.c11
-rw-r--r--hw/misc/Makefile.objs1
-rw-r--r--hw/misc/aspeed_scu.c284
-rw-r--r--hw/misc/trace-events3
-rw-r--r--hw/net/cadence_gem.c13
-rw-r--r--hw/net/e1000.c18
-rw-r--r--hw/net/e1000e_core.c6
-rw-r--r--hw/net/e1000x_common.c2
-rw-r--r--hw/net/eepro100.c8
-rw-r--r--hw/net/mipsnet.c8
-rw-r--r--hw/net/rocker/rocker_tlv.h6
-rw-r--r--hw/net/rtl8139.c49
-rw-r--r--hw/net/virtio-net.c2
-rw-r--r--hw/net/vmware_utils.h55
-rw-r--r--hw/net/vmxnet3.c197
-rw-r--r--hw/ppc/ppce500_spin.c2
-rw-r--r--hw/ppc/spapr.c4
-rw-r--r--hw/s390x/virtio-ccw.c133
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c9
-rw-r--r--hw/scsi/virtio-scsi.c5
-rw-r--r--hw/sh4/sh_pci.c4
-rw-r--r--hw/virtio/vhost.c13
-rw-r--r--hw/virtio/virtio-bus.c132
-rw-r--r--hw/virtio/virtio-mmio.c128
-rw-r--r--hw/virtio/virtio-pci.c124
34 files changed, 1223 insertions, 568 deletions
diff --git a/hw/arm/ast2400.c b/hw/arm/ast2400.c
index 4a9de0e10c..b14a82fcde 100644
--- a/hw/arm/ast2400.c
+++ b/hw/arm/ast2400.c
@@ -24,9 +24,12 @@
 #define AST2400_IOMEM_SIZE       0x00200000
 #define AST2400_IOMEM_BASE       0x1E600000
 #define AST2400_VIC_BASE         0x1E6C0000
+#define AST2400_SCU_BASE         0x1E6E2000
 #define AST2400_TIMER_BASE       0x1E782000
 #define AST2400_I2C_BASE         0x1E78A000
 
+#define AST2400_A0_SILICON_REV   0x02000303
+
 static const int uart_irqs[] = { 9, 32, 33, 34, 10 };
 static const int timer_irqs[] = { 16, 17, 18, 35, 36, 37, 38, 39, };
 
@@ -72,6 +75,16 @@ static void ast2400_init(Object *obj)
     object_initialize(&s->i2c, sizeof(s->i2c), TYPE_ASPEED_I2C);
     object_property_add_child(obj, "i2c", OBJECT(&s->i2c), NULL);
     qdev_set_parent_bus(DEVICE(&s->i2c), sysbus_get_default());
+
+    object_initialize(&s->scu, sizeof(s->scu), TYPE_ASPEED_SCU);
+    object_property_add_child(obj, "scu", OBJECT(&s->scu), NULL);
+    qdev_set_parent_bus(DEVICE(&s->scu), sysbus_get_default());
+    qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev",
+                         AST2400_A0_SILICON_REV);
+    object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu),
+                              "hw-strap1", &error_abort);
+    object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu),
+                              "hw-strap2", &error_abort);
 }
 
 static void ast2400_realize(DeviceState *dev, Error **errp)
@@ -110,6 +123,14 @@ static void ast2400_realize(DeviceState *dev, Error **errp)
         sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
     }
 
+    /* SCU */
+    object_property_set_bool(OBJECT(&s->scu), true, "realized", &err);
+    if (err) {
+        error_propagate(errp, err);
+        return;
+    }
+    sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, AST2400_SCU_BASE);
+
     /* UART - attach an 8250 to the IO space as our UART5 */
     if (serial_hds[0]) {
         qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]);
diff --git a/hw/arm/palmetto-bmc.c b/hw/arm/palmetto-bmc.c
index a51d960510..b8eed21348 100644
--- a/hw/arm/palmetto-bmc.c
+++ b/hw/arm/palmetto-bmc.c
@@ -44,6 +44,8 @@ static void palmetto_bmc_init(MachineState *machine)
                                 &bmc->ram);
     object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram),
                                    &error_abort);
+    object_property_set_int(OBJECT(&bmc->soc), 0x120CE416, "hw-strap1",
+                            &error_abort);
     object_property_set_bool(OBJECT(&bmc->soc), true, "realized",
                              &error_abort);
 
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 2073f9a270..54b9ac1da6 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -31,11 +31,9 @@ struct VirtIOBlockDataPlane {
     bool stopping;
 
     VirtIOBlkConf *conf;
-
     VirtIODevice *vdev;
-    VirtQueue *vq;                  /* virtqueue vring */
-    EventNotifier *guest_notifier;  /* irq */
     QEMUBH *bh;                     /* bh for guest notification */
+    unsigned long *batch_notify_vqs;
 
     /* Note that these EventNotifiers are assigned by value.  This is
      * fine as long as you do not call event_notifier_cleanup on them
@@ -47,20 +45,36 @@ struct VirtIOBlockDataPlane {
 };
 
 /* Raise an interrupt to signal guest, if necessary */
-void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s)
+void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
 {
+    set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
     qemu_bh_schedule(s->bh);
 }
 
 static void notify_guest_bh(void *opaque)
 {
     VirtIOBlockDataPlane *s = opaque;
+    unsigned nvqs = s->conf->num_queues;
+    unsigned long bitmap[BITS_TO_LONGS(nvqs)];
+    unsigned j;
 
-    if (!virtio_should_notify(s->vdev, s->vq)) {
-        return;
-    }
+    memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
+    memset(s->batch_notify_vqs, 0, sizeof(bitmap));
+
+    for (j = 0; j < nvqs; j += BITS_PER_LONG) {
+        unsigned long bits = bitmap[j];
 
-    event_notifier_set(s->guest_notifier);
+        while (bits != 0) {
+            unsigned i = j + ctzl(bits);
+            VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+            if (virtio_should_notify(s->vdev, vq)) {
+                event_notifier_set(virtio_queue_get_guest_notifier(vq));
+            }
+
+            bits &= bits - 1; /* clear right-most bit */
+        }
+    }
 }
 
 /* Context: QEMU global mutex held */
@@ -79,7 +93,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
     }
 
     /* Don't try if transport does not support notifiers. */
-    if (!k->set_guest_notifiers || !k->set_host_notifier) {
+    if (!k->set_guest_notifiers || !k->ioeventfd_started) {
         error_setg(errp,
                    "device is incompatible with dataplane "
                    "(transport does not support notifiers)");
@@ -104,6 +118,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
     }
     s->ctx = iothread_get_aio_context(s->iothread);
     s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
+    s->batch_notify_vqs = bitmap_new(conf->num_queues);
 
     *dataplane = s;
 }
@@ -116,6 +131,7 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
     }
 
     virtio_blk_data_plane_stop(s);
+    g_free(s->batch_notify_vqs);
     qemu_bh_delete(s->bh);
     object_unref(OBJECT(s->iothread));
     g_free(s);
@@ -138,6 +154,8 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
     VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
+    unsigned i;
+    unsigned nvqs = s->conf->num_queues;
     int r;
 
     if (vblk->dataplane_started || s->starting) {
@@ -145,22 +163,25 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
     }
 
     s->starting = true;
-    s->vq = virtio_get_queue(s->vdev, 0);
 
     /* Set up guest notifier (irq) */
-    r = k->set_guest_notifiers(qbus->parent, 1, true);
+    r = k->set_guest_notifiers(qbus->parent, nvqs, true);
     if (r != 0) {
         fprintf(stderr, "virtio-blk failed to set guest notifier (%d), "
                 "ensure -enable-kvm is set\n", r);
         goto fail_guest_notifiers;
     }
-    s->guest_notifier = virtio_queue_get_guest_notifier(s->vq);
 
     /* Set up virtqueue notify */
-    r = k->set_host_notifier(qbus->parent, 0, true);
-    if (r != 0) {
-        fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
-        goto fail_host_notifier;
+    for (i = 0; i < nvqs; i++) {
+        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
+        if (r != 0) {
+            fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
+            while (i--) {
+                virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
+            }
+            goto fail_guest_notifiers;
+        }
     }
 
     s->starting = false;
@@ -170,17 +191,23 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
     blk_set_aio_context(s->conf->conf.blk, s->ctx);
 
     /* Kick right away to begin processing requests already in vring */
-    event_notifier_set(virtio_queue_get_host_notifier(s->vq));
+    for (i = 0; i < nvqs; i++) {
+        VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+        event_notifier_set(virtio_queue_get_host_notifier(vq));
+    }
 
     /* Get this show started by hooking up our callbacks */
     aio_context_acquire(s->ctx);
-    virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx,
-                                               virtio_blk_data_plane_handle_output);
+    for (i = 0; i < nvqs; i++) {
+        VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+        virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
+                virtio_blk_data_plane_handle_output);
+    }
     aio_context_release(s->ctx);
     return;
 
-  fail_host_notifier:
-    k->set_guest_notifiers(qbus->parent, 1, false);
   fail_guest_notifiers:
     vblk->dataplane_disabled = true;
     s->starting = false;
@@ -193,6 +220,8 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
     VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
+    unsigned i;
+    unsigned nvqs = s->conf->num_queues;
 
     if (!vblk->dataplane_started || s->stopping) {
         return;
@@ -210,17 +239,23 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
     aio_context_acquire(s->ctx);
 
     /* Stop notifications for new requests from guest */
-    virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, NULL);
+    for (i = 0; i < nvqs; i++) {
+        VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+        virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
+    }
 
     /* Drain and switch bs back to the QEMU main loop */
     blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
 
     aio_context_release(s->ctx);
 
-    k->set_host_notifier(qbus->parent, 0, false);
+    for (i = 0; i < nvqs; i++) {
+        virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
+    }
 
     /* Clean up guest notifier (irq) */
-    k->set_guest_notifiers(qbus->parent, 1, false);
+    k->set_guest_notifiers(qbus->parent, nvqs, false);
 
     vblk->dataplane_started = false;
     s->stopping = false;
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
index 0714c11a2b..b1f0b95b32 100644
--- a/hw/block/dataplane/virtio-blk.h
+++ b/hw/block/dataplane/virtio-blk.h
@@ -26,6 +26,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
 void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
 void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
 void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s);
+void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
 
 #endif /* HW_DATAPLANE_VIRTIO_BLK_H */
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 51d8596056..326b688e83 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -53,12 +53,17 @@
 /* 16 MiB max in 3 byte address mode */
 #define MAX_3BYTES_SIZE 0x1000000
 
+#define SPI_NOR_MAX_ID_LEN 6
+
 typedef struct FlashPartInfo {
     const char *part_name;
-    /* jedec code. (jedec >> 16) & 0xff is the 1st byte, >> 8 the 2nd etc */
-    uint32_t jedec;
-    /* extended jedec code */
-    uint16_t ext_jedec;
+    /*
+     * This array stores the ID bytes.
+     * The first three bytes are the JEDIC ID.
+     * JEDEC ID zero means "no ID" (mostly older chips).
+     */
+    uint8_t id[SPI_NOR_MAX_ID_LEN];
+    uint8_t id_len;
     /* there is confusion between manufacturers as to what a sector is. In this
      * device model, a "sector" is the size that is erased by the ERASE_SECTOR
      * command (opcode 0xd8).
@@ -70,11 +75,33 @@ typedef struct FlashPartInfo {
 } FlashPartInfo;
 
 /* adapted from linux */
-
-#define INFO(_part_name, _jedec, _ext_jedec, _sector_size, _n_sectors, _flags)\
-    .part_name = (_part_name),\
-    .jedec = (_jedec),\
-    .ext_jedec = (_ext_jedec),\
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\
+    .part_name = _part_name,\
+    .id = {\
+        ((_jedec_id) >> 16) & 0xff,\
+        ((_jedec_id) >> 8) & 0xff,\
+        (_jedec_id) & 0xff,\
+        ((_ext_id) >> 8) & 0xff,\
+        (_ext_id) & 0xff,\
+          },\
+    .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),\
+    .sector_size = (_sector_size),\
+    .n_sectors = (_n_sectors),\
+    .page_size = 256,\
+    .flags = (_flags),
+
+#define INFO6(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\
+    .part_name = _part_name,\
+    .id = {\
+        ((_jedec_id) >> 16) & 0xff,\
+        ((_jedec_id) >> 8) & 0xff,\
+        (_jedec_id) & 0xff,\
+        ((_ext_id) >> 16) & 0xff,\
+        ((_ext_id) >> 8) & 0xff,\
+        (_ext_id) & 0xff,\
+          },\
+    .id_len = 6,\
     .sector_size = (_sector_size),\
     .n_sectors = (_n_sectors),\
     .page_size = 256,\
@@ -102,12 +129,26 @@ typedef struct FlashPartInfo {
 #define EVCFG_QUAD_IO_ENABLED (1 << 7)
 #define NVCFG_4BYTE_ADDR_MASK (1 << 0)
 #define NVCFG_LOWER_SEGMENT_MASK (1 << 1)
-#define CFG_UPPER_128MB_SEG_ENABLED 0x3
 
 /* Numonyx (Micron) Flag Status Register macros */
 #define FSR_4BYTE_ADDR_MODE_ENABLED 0x1
 #define FSR_FLASH_READY (1 << 7)
 
+/* Spansion configuration registers macros. */
+#define SPANSION_QUAD_CFG_POS 0
+#define SPANSION_QUAD_CFG_LEN 1
+#define SPANSION_DUMMY_CLK_POS 0
+#define SPANSION_DUMMY_CLK_LEN 4
+#define SPANSION_ADDR_LEN_POS 7
+#define SPANSION_ADDR_LEN_LEN 1
+
+/*
+ * Spansion read mode command length in bytes,
+ * the mode is currently not supported.
+*/
+
+#define SPANSION_CONTINUOUS_READ_MODE_CMD_LEN 1
+
 static const FlashPartInfo known_devices[] = {
     /* Atmel -- some are (confusingly) marketed as "DataFlash" */
     { INFO("at25fs010",   0x1f6601,      0,  32 << 10,   4, ER_4K) },
@@ -158,6 +199,8 @@ static const FlashPartInfo known_devices[] = {
     { INFO("mx25l12855e", 0xc22618,      0,  64 << 10, 256, 0) },
     { INFO("mx25l25635e", 0xc22019,      0,  64 << 10, 512, 0) },
     { INFO("mx25l25655e", 0xc22619,      0,  64 << 10, 512, 0) },
+    { INFO("mx66u51235f", 0xc2253a,      0,  64 << 10, 1024, ER_4K | ER_32K) },
+    { INFO("mx66u1g45g",  0xc2253b,      0,  64 << 10, 2048, ER_4K | ER_32K) },
 
     /* Micron */
     { INFO("n25q032a11",  0x20bb16,      0,  64 << 10,  64, ER_4K) },
@@ -168,6 +211,11 @@ static const FlashPartInfo known_devices[] = {
     { INFO("n25q128a13",  0x20ba18,      0,  64 << 10, 256, ER_4K) },
     { INFO("n25q256a11",  0x20bb19,      0,  64 << 10, 512, ER_4K) },
     { INFO("n25q256a13",  0x20ba19,      0,  64 << 10, 512, ER_4K) },
+    { INFO("n25q128",     0x20ba18,      0,  64 << 10, 256, 0) },
+    { INFO("n25q256a",    0x20ba19,      0,  64 << 10, 512, ER_4K) },
+    { INFO("n25q512a",    0x20ba20,      0,  64 << 10, 1024, ER_4K) },
+    { INFO("mt25ql01g",   0x20ba21,      0,  64 << 10, 2048, ER_4K) },
+    { INFO("mt25qu01g",   0x20bb21,      0,  64 << 10, 2048, ER_4K) },
 
     /* Spansion -- single (large) sector size only, at least
      * for the chips listed here (without boot sectors).
@@ -176,8 +224,8 @@ static const FlashPartInfo known_devices[] = {
     { INFO("s25sl064p",   0x010216, 0x4d00,  64 << 10, 128, ER_4K) },
     { INFO("s25fl256s0",  0x010219, 0x4d00, 256 << 10, 128, 0) },
     { INFO("s25fl256s1",  0x010219, 0x4d01,  64 << 10, 512, 0) },
-    { INFO("s25fl512s",   0x010220, 0x4d00, 256 << 10, 256, 0) },
-    { INFO("s70fl01gs",   0x010221, 0x4d00, 256 << 10, 256, 0) },
+    { INFO6("s25fl512s",  0x010220, 0x4d0080, 256 << 10, 256, 0) },
+    { INFO6("s70fl01gs",  0x010221, 0x4d0080, 256 << 10, 512, 0) },
     { INFO("s25sl12800",  0x012018, 0x0300, 256 << 10,  64, 0) },
     { INFO("s25sl12801",  0x012018, 0x0301,  64 << 10, 256, 0) },
     { INFO("s25fl129p0",  0x012018, 0x4d00, 256 << 10,  64, 0) },
@@ -190,6 +238,10 @@ static const FlashPartInfo known_devices[] = {
     { INFO("s25fl016k",   0xef4015,      0,  64 << 10,  32, ER_4K | ER_32K) },
     { INFO("s25fl064k",   0xef4017,      0,  64 << 10, 128, ER_4K | ER_32K) },
 
+    /* Spansion --  boot sectors support  */
+    { INFO6("s25fs512s",    0x010220, 0x4d0081, 256 << 10, 256, 0) },
+    { INFO6("s70fs01gs",    0x010221, 0x4d0081, 256 << 10, 512, 0) },
+
     /* SST -- large erase sizes are "overlays", "sectors" are 4<< 10 */
     { INFO("sst25vf040b", 0xbf258d,      0,  64 << 10,   8, ER_4K) },
     { INFO("sst25vf080b", 0xbf258e,      0,  64 << 10,  16, ER_4K) },
@@ -240,10 +292,6 @@ static const FlashPartInfo known_devices[] = {
     { INFO("w25q80",      0xef5014,      0,  64 << 10,  16, ER_4K) },
     { INFO("w25q80bl",    0xef4014,      0,  64 << 10,  16, ER_4K) },
     { INFO("w25q256",     0xef4019,      0,  64 << 10, 512, ER_4K) },
-
-    { INFO("n25q128",      0x20ba18,      0,  64 << 10, 256, 0) },
-    { INFO("n25q256a",     0x20ba19,      0,  64 << 10, 512, ER_4K) },
-    { INFO("n25q512a",     0x20ba20,      0,  64 << 10, 1024, ER_4K) },
 };
 
 typedef enum {
@@ -255,6 +303,7 @@ typedef enum {
     JEDEC_READ = 0x9f,
     BULK_ERASE = 0xc7,
     READ_FSR = 0x70,
+    RDCR = 0x15,
 
     READ = 0x03,
     READ4 = 0x13,
@@ -271,12 +320,14 @@ typedef enum {
 
     PP = 0x02,
     PP4 = 0x12,
+    PP4_4 = 0x3e,
     DPP = 0xa2,
     QPP = 0x32,
 
     ERASE_4K = 0x20,
     ERASE4_4K = 0x21,
     ERASE_32K = 0x52,
+    ERASE4_32K = 0x5c,
     ERASE_SECTOR = 0xd8,
     ERASE4_SECTOR = 0xdc,
 
@@ -289,6 +340,13 @@ typedef enum {
     RESET_ENABLE = 0x66,
     RESET_MEMORY = 0x99,
 
+    /*
+     * Micron: 0x35 - enable QPI
+     * Spansion: 0x35 - read control register
+     */
+    RDCR_EQIO = 0x35,
+    RSTQIO = 0xf5,
+
     RNVCR = 0xB5,
     WNVCR = 0xB1,
 
@@ -304,9 +362,18 @@ typedef enum {
     STATE_PAGE_PROGRAM,
     STATE_READ,
     STATE_COLLECTING_DATA,
+    STATE_COLLECTING_VAR_LEN_DATA,
     STATE_READING_DATA,
 } CMDState;
 
+typedef enum {
+    MAN_SPANSION,
+    MAN_MACRONIX,
+    MAN_NUMONYX,
+    MAN_WINBOND,
+    MAN_GENERIC,
+} Manufacturer;
+
 typedef struct Flash {
     SSISlave parent_obj;
 
@@ -324,11 +391,22 @@ typedef struct Flash {
     uint8_t cmd_in_progress;
     uint64_t cur_addr;
     uint32_t nonvolatile_cfg;
+    /* Configuration register for Macronix */
     uint32_t volatile_cfg;
     uint32_t enh_volatile_cfg;
+    /* Spansion cfg registers. */
+    uint8_t spansion_cr1nv;
+    uint8_t spansion_cr2nv;
+    uint8_t spansion_cr3nv;
+    uint8_t spansion_cr4nv;
+    uint8_t spansion_cr1v;
+    uint8_t spansion_cr2v;
+    uint8_t spansion_cr3v;
+    uint8_t spansion_cr4v;
     bool write_enable;
     bool four_bytes_address_mode;
     bool reset_enable;
+    bool quad_enable;
     uint8_t ear;
 
     int64_t dirty_page;
@@ -350,6 +428,22 @@ typedef struct M25P80Class {
 #define M25P80_GET_CLASS(obj) \
      OBJECT_GET_CLASS(M25P80Class, (obj), TYPE_M25P80)
 
+static inline Manufacturer get_man(Flash *s)
+{
+    switch (s->pi->id[0]) {
+    case 0x20:
+        return MAN_NUMONYX;
+    case 0xEF:
+        return MAN_WINBOND;
+    case 0x01:
+        return MAN_SPANSION;
+    case 0xC2:
+        return MAN_MACRONIX;
+    default:
+        return MAN_GENERIC;
+    }
+}
+
 static void blk_sync_complete(void *opaque, int ret)
 {
     /* do nothing. Masters do not directly interact with the backing store,
@@ -398,6 +492,7 @@ static void flash_erase(Flash *s, int offset, FlashCMD cmd)
         capa_to_assert = ER_4K;
         break;
     case ERASE_32K:
+    case ERASE4_32K:
         len = 32 << 10;
         capa_to_assert = ER_32K;
         break;
@@ -468,9 +563,11 @@ static inline int get_addr_length(Flash *s)
 
    switch (s->cmd_in_progress) {
    case PP4:
+   case PP4_4:
    case READ4:
    case QIOR4:
    case ERASE4_4K:
+   case ERASE4_32K:
    case ERASE4_SECTOR:
    case FAST_READ4:
    case DOR4:
@@ -494,7 +591,7 @@ static void complete_collecting_data(Flash *s)
     }
 
     if (get_addr_length(s) == 3) {
-        s->cur_addr += (s->ear & 0x3) * MAX_3BYTES_SIZE;
+        s->cur_addr += s->ear * MAX_3BYTES_SIZE;
     }
 
     s->state = STATE_IDLE;
@@ -504,6 +601,7 @@ static void complete_collecting_data(Flash *s)
     case QPP:
     case PP:
     case PP4:
+    case PP4_4:
         s->state = STATE_PAGE_PROGRAM;
         break;
     case READ:
@@ -523,11 +621,25 @@ static void complete_collecting_data(Flash *s)
     case ERASE_4K:
     case ERASE4_4K:
     case ERASE_32K:
+    case ERASE4_32K:
     case ERASE_SECTOR:
     case ERASE4_SECTOR:
         flash_erase(s, s->cur_addr, s->cmd_in_progress);
         break;
     case WRSR:
+        switch (get_man(s)) {
+        case MAN_SPANSION:
+            s->quad_enable = !!(s->data[1] & 0x02);
+            break;
+        case MAN_MACRONIX:
+            s->quad_enable = extract32(s->data[0], 6, 1);
+            if (s->len > 1) {
+                s->four_bytes_address_mode = extract32(s->data[1], 5, 1);
+            }
+            break;
+        default:
+            break;
+        }
         if (s->write_enable) {
             s->write_enable = false;
         }
@@ -561,8 +673,10 @@ static void reset_memory(Flash *s)
     s->state = STATE_IDLE;
     s->write_enable = false;
     s->reset_enable = false;
+    s->quad_enable = false;
 
-    if (((s->pi->jedec >> 16) & 0xFF) == JEDEC_NUMONYX) {
+    switch (get_man(s)) {
+    case MAN_NUMONYX:
         s->volatile_cfg = 0;
         s->volatile_cfg |= VCFG_DUMMY;
         s->volatile_cfg |= VCFG_WRAP_SEQUENTIAL;
@@ -592,16 +706,147 @@ static void reset_memory(Flash *s)
             s->four_bytes_address_mode = true;
         }
         if (!(s->nonvolatile_cfg & NVCFG_LOWER_SEGMENT_MASK)) {
-            s->ear = CFG_UPPER_128MB_SEG_ENABLED;
+            s->ear = s->size / MAX_3BYTES_SIZE - 1;
         }
+        break;
+    case MAN_MACRONIX:
+        s->volatile_cfg = 0x7;
+        break;
+    case MAN_SPANSION:
+        s->spansion_cr1v = s->spansion_cr1nv;
+        s->spansion_cr2v = s->spansion_cr2nv;
+        s->spansion_cr3v = s->spansion_cr3nv;
+        s->spansion_cr4v = s->spansion_cr4nv;
+        s->quad_enable = extract32(s->spansion_cr1v,
+                                   SPANSION_QUAD_CFG_POS,
+                                   SPANSION_QUAD_CFG_LEN
+                                   );
+        s->four_bytes_address_mode = extract32(s->spansion_cr2v,
+                SPANSION_ADDR_LEN_POS,
+                SPANSION_ADDR_LEN_LEN
+                );
+        break;
+    default:
+        break;
     }
 
     DB_PRINT_L(0, "Reset done.\n");
 }
 
+static void decode_fast_read_cmd(Flash *s)
+{
+    s->needed_bytes = get_addr_length(s);
+    switch (get_man(s)) {
+    /* Dummy cycles - modeled with bytes writes instead of bits */
+    case MAN_WINBOND:
+        s->needed_bytes += 8;
+        break;
+    case MAN_NUMONYX:
+        s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
+        break;
+    case MAN_MACRONIX:
+        if (extract32(s->volatile_cfg, 6, 2) == 1) {
+            s->needed_bytes += 6;
+        } else {
+            s->needed_bytes += 8;
+        }
+        break;
+    case MAN_SPANSION:
+        s->needed_bytes += extract32(s->spansion_cr2v,
+                                    SPANSION_DUMMY_CLK_POS,
+                                    SPANSION_DUMMY_CLK_LEN
+                                    );
+        break;
+    default:
+        break;
+    }
+    s->pos = 0;
+    s->len = 0;
+    s->state = STATE_COLLECTING_DATA;
+}
+
+static void decode_dio_read_cmd(Flash *s)
+{
+    s->needed_bytes = get_addr_length(s);
+    /* Dummy cycles modeled with bytes writes instead of bits */
+    switch (get_man(s)) {
+    case MAN_WINBOND:
+        s->needed_bytes += 8;
+        break;
+    case MAN_SPANSION:
+        s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN;
+        s->needed_bytes += extract32(s->spansion_cr2v,
+                                    SPANSION_DUMMY_CLK_POS,
+                                    SPANSION_DUMMY_CLK_LEN
+                                    );
+        break;
+    case MAN_NUMONYX:
+        s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
+        break;
+    case MAN_MACRONIX:
+        switch (extract32(s->volatile_cfg, 6, 2)) {
+        case 1:
+            s->needed_bytes += 6;
+            break;
+        case 2:
+            s->needed_bytes += 8;
+            break;
+        default:
+            s->needed_bytes += 4;
+            break;
+        }
+        break;
+    default:
+        break;
+    }
+    s->pos = 0;
+    s->len = 0;
+    s->state = STATE_COLLECTING_DATA;
+}
+
+static void decode_qio_read_cmd(Flash *s)
+{
+    s->needed_bytes = get_addr_length(s);
+    /* Dummy cycles modeled with bytes writes instead of bits */
+    switch (get_man(s)) {
+    case MAN_WINBOND:
+        s->needed_bytes += 8;
+        break;
+    case MAN_SPANSION:
+        s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN;
+        s->needed_bytes += extract32(s->spansion_cr2v,
+                                    SPANSION_DUMMY_CLK_POS,
+                                    SPANSION_DUMMY_CLK_LEN
+                                    );
+        break;
+    case MAN_NUMONYX:
+        s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
+        break;
+    case MAN_MACRONIX:
+        switch (extract32(s->volatile_cfg, 6, 2)) {
+        case 1:
+            s->needed_bytes += 4;
+            break;
+        case 2:
+            s->needed_bytes += 8;
+            break;
+        default:
+            s->needed_bytes += 6;
+            break;
+        }
+        break;
+    default:
+        break;
+    }
+    s->pos = 0;
+    s->len = 0;
+    s->state = STATE_COLLECTING_DATA;
+}
+
 static void decode_new_cmd(Flash *s, uint32_t value)
 {
     s->cmd_in_progress = value;
+    int i;
     DB_PRINT_L(0, "decoded new command:%x\n", value);
 
     if (value != RESET_MEMORY) {
@@ -613,6 +858,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
     case ERASE_4K:
     case ERASE4_4K:
     case ERASE_32K:
+    case ERASE4_32K:
     case ERASE_SECTOR:
     case ERASE4_SECTOR:
     case READ:
@@ -621,6 +867,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
     case QPP:
     case PP:
     case PP4:
+    case PP4_4:
         s->needed_bytes = get_addr_length(s);
         s->pos = 0;
         s->len = 0;
@@ -633,56 +880,35 @@ static void decode_new_cmd(Flash *s, uint32_t value)
     case DOR4:
     case QOR:
     case QOR4:
-        s->needed_bytes = get_addr_length(s);
-        if (((s->pi->jedec >> 16) & 0xFF) == JEDEC_NUMONYX) {
-            /* Dummy cycles modeled with bytes writes instead of bits */
-            s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
-        }
-        s->pos = 0;
-        s->len = 0;
-        s->state = STATE_COLLECTING_DATA;
+        decode_fast_read_cmd(s);
         break;
 
     case DIOR:
     case DIOR4:
-        switch ((s->pi->jedec >> 16) & 0xFF) {
-        case JEDEC_WINBOND:
-        case JEDEC_SPANSION:
-            s->needed_bytes = 4;
-            break;
-        default:
-            s->needed_bytes = get_addr_length(s);
-            /* Dummy cycles modeled with bytes writes instead of bits */
-            s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
-        }
-        s->pos = 0;
-        s->len = 0;
-        s->state = STATE_COLLECTING_DATA;
+        decode_dio_read_cmd(s);
         break;
 
     case QIOR:
     case QIOR4:
-        switch ((s->pi->jedec >> 16) & 0xFF) {
-        case JEDEC_WINBOND:
-        case JEDEC_SPANSION:
-            s->needed_bytes = 6;
-            break;
-        default:
-            s->needed_bytes = get_addr_length(s);
-            /* Dummy cycles modeled with bytes writes instead of bits */
-            s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
-        }
-        s->pos = 0;
-        s->len = 0;
-        s->state = STATE_COLLECTING_DATA;
+        decode_qio_read_cmd(s);
         break;
 
     case WRSR:
         if (s->write_enable) {
-            s->needed_bytes = 1;
+            switch (get_man(s)) {
+            case MAN_SPANSION:
+                s->needed_bytes = 2;
+                s->state = STATE_COLLECTING_DATA;
+                break;
+            case MAN_MACRONIX:
+                s->needed_bytes = 2;
+                s->state = STATE_COLLECTING_VAR_LEN_DATA;
+                break;
+            default:
+                s->needed_bytes = 1;
+                s->state = STATE_COLLECTING_DATA;
+            }
             s->pos = 0;
-            s->len = 0;
-            s->state = STATE_COLLECTING_DATA;
         }
         break;
 
@@ -695,6 +921,9 @@ static void decode_new_cmd(Flash *s, uint32_t value)
 
     case RDSR:
         s->data[0] = (!!s->write_enable) << 1;
+        if (get_man(s) == MAN_MACRONIX) {
+            s->data[0] |= (!!s->quad_enable) << 6;
+        }
         s->pos = 0;
         s->len = 1;
         s->state = STATE_READING_DATA;
@@ -712,17 +941,20 @@ static void decode_new_cmd(Flash *s, uint32_t value)
 
     case JEDEC_READ:
         DB_PRINT_L(0, "populated jedec code\n");
-        s->data[0] = (s->pi->jedec >> 16) & 0xff;
-        s->data[1] = (s->pi->jedec >> 8) & 0xff;
-        s->data[2] = s->pi->jedec & 0xff;
-        if (s->pi->ext_jedec) {
-            s->data[3] = (s->pi->ext_jedec >> 8) & 0xff;
-            s->data[4] = s->pi->ext_jedec & 0xff;
-            s->len = 5;
-        } else {
-            s->len = 3;
+        for (i = 0; i < s->pi->id_len; i++) {
+            s->data[i] = s->pi->id[i];
         }
+
+        s->len = s->pi->id_len;
+        s->pos = 0;
+        s->state = STATE_READING_DATA;
+        break;
+
+    case RDCR:
+        s->data[0] = s->volatile_cfg & 0xFF;
+        s->data[0] |= (!!s->four_bytes_address_mode) << 5;
         s->pos = 0;
+        s->len = 1;
         s->state = STATE_READING_DATA;
         break;
 
@@ -765,7 +997,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
         s->state = STATE_READING_DATA;
         break;
     case WNVCR:
-        if (s->write_enable) {
+        if (s->write_enable && get_man(s) == MAN_NUMONYX) {
             s->needed_bytes = 2;
             s->pos = 0;
             s->len = 0;
@@ -808,6 +1040,24 @@ static void decode_new_cmd(Flash *s, uint32_t value)
             reset_memory(s);
         }
         break;
+    case RDCR_EQIO:
+        switch (get_man(s)) {
+        case MAN_SPANSION:
+            s->data[0] = (!!s->quad_enable) << 1;
+            s->pos = 0;
+            s->len = 1;
+            s->state = STATE_READING_DATA;
+            break;
+        case MAN_MACRONIX:
+            s->quad_enable = true;
+            break;
+        default:
+            break;
+        }
+        break;
+    case RSTQIO:
+        s->quad_enable = false;
+        break;
     default:
         qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value);
         break;
@@ -819,6 +1069,9 @@ static int m25p80_cs(SSISlave *ss, bool select)
     Flash *s = M25P80(ss);
 
     if (select) {
+        if (s->state == STATE_COLLECTING_VAR_LEN_DATA) {
+            complete_collecting_data(s);
+        }
         s->len = 0;
         s->pos = 0;
         s->state = STATE_IDLE;
@@ -852,6 +1105,7 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
         break;
 
     case STATE_COLLECTING_DATA:
+    case STATE_COLLECTING_VAR_LEN_DATA:
         s->data[s->len] = (uint8_t)tx;
         s->len++;
 
@@ -926,13 +1180,18 @@ static void m25p80_pre_save(void *opaque)
 }
 
 static Property m25p80_properties[] = {
+    /* This is default value for Micron flash */
     DEFINE_PROP_UINT32("nonvolatile-cfg", Flash, nonvolatile_cfg, 0x8FFF),
+    DEFINE_PROP_UINT8("spansion-cr1nv", Flash, spansion_cr1nv, 0x0),
+    DEFINE_PROP_UINT8("spansion-cr2nv", Flash, spansion_cr2nv, 0x8),
+    DEFINE_PROP_UINT8("spansion-cr3nv", Flash, spansion_cr3nv, 0x2),
+    DEFINE_PROP_UINT8("spansion-cr4nv", Flash, spansion_cr4nv, 0x10),
     DEFINE_PROP_END_OF_LIST(),
 };
 
 static const VMStateDescription vmstate_m25p80 = {
     .name = "xilinx_spi",
-    .version_id = 2,
+    .version_id = 3,
     .minimum_version_id = 1,
     .pre_save = m25p80_pre_save,
     .fields = (VMStateField[]) {
@@ -950,6 +1209,11 @@ static const VMStateDescription vmstate_m25p80 = {
         VMSTATE_UINT32_V(nonvolatile_cfg, Flash, 2),
         VMSTATE_UINT32_V(volatile_cfg, Flash, 2),
         VMSTATE_UINT32_V(enh_volatile_cfg, Flash, 2),
+        VMSTATE_BOOL_V(quad_enable, Flash, 3),
+        VMSTATE_UINT8_V(spansion_cr1nv, Flash, 3),
+        VMSTATE_UINT8_V(spansion_cr2nv, Flash, 3),
+        VMSTATE_UINT8_V(spansion_cr3nv, Flash, 3),
+        VMSTATE_UINT8_V(spansion_cr4nv, Flash, 3),
         VMSTATE_END_OF_LIST()
     }
 };
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 284e64667c..fb43bbaa46 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -29,9 +29,11 @@
 #include "hw/virtio/virtio-bus.h"
 #include "hw/virtio/virtio-access.h"
 
-void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req)
+void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
+                             VirtIOBlockReq *req)
 {
     req->dev = s;
+    req->vq = vq;
     req->qiov.size = 0;
     req->in_len = 0;
     req->next = NULL;
@@ -53,11 +55,11 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
     trace_virtio_blk_req_complete(req, status);
 
     stb_p(&req->in->status, status);
-    virtqueue_push(s->vq, &req->elem, req->in_len);
+    virtqueue_push(req->vq, &req->elem, req->in_len);
     if (s->dataplane_started && !s->dataplane_disabled) {
-        virtio_blk_data_plane_notify(s->dataplane);
+        virtio_blk_data_plane_notify(s->dataplane, req->vq);
     } else {
-        virtio_notify(vdev, s->vq);
+        virtio_notify(vdev, req->vq);
     }
 }
 
@@ -187,12 +189,12 @@ out:
 
 #endif
 
-static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
+static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
 {
-    VirtIOBlockReq *req = virtqueue_pop(s->vq, sizeof(VirtIOBlockReq));
+    VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
 
     if (req) {
-        virtio_blk_init_request(s, req);
+        virtio_blk_init_request(s, vq, req);
     }
     return req;
 }
@@ -583,7 +585,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
 
     blk_io_plug(s->blk);
 
-    while ((req = virtio_blk_get_request(s))) {
+    while ((req = virtio_blk_get_request(s, vq))) {
         virtio_blk_handle_request(req, &mrb);
     }
 
@@ -708,6 +710,7 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
     blkcfg.physical_block_exp = get_physical_block_exp(conf);
     blkcfg.alignment_offset = 0;
     blkcfg.wce = blk_enable_write_cache(s->blk);
+    virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
     memcpy(config, &blkcfg, sizeof(struct virtio_blk_config));
 }
 
@@ -751,6 +754,9 @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
     if (blk_is_read_only(s->blk)) {
         virtio_add_feature(&features, VIRTIO_BLK_F_RO);
     }
+    if (s->conf.num_queues > 1) {
+        virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
+    }
 
     return features;
 }
@@ -795,11 +801,6 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
 static void virtio_blk_save(QEMUFile *f, void *opaque)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
-    VirtIOBlock *s = VIRTIO_BLK(vdev);
-
-    if (s->dataplane) {
-        virtio_blk_data_plane_stop(s->dataplane);
-    }
 
     virtio_save(vdev, f);
 }
@@ -811,6 +812,11 @@ static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
 
     while (req) {
         qemu_put_sbyte(f, 1);
+
+        if (s->conf.num_queues > 1) {
+            qemu_put_be32(f, virtio_get_queue_index(req->vq));
+        }
+
         qemu_put_virtqueue_element(f, &req->elem);
         req = req->next;
     }
@@ -834,9 +840,22 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
     VirtIOBlock *s = VIRTIO_BLK(vdev);
 
     while (qemu_get_sbyte(f)) {
+        unsigned nvqs = s->conf.num_queues;
+        unsigned vq_idx = 0;
         VirtIOBlockReq *req;
+
+        if (nvqs > 1) {
+            vq_idx = qemu_get_be32(f);
+
+            if (vq_idx >= nvqs) {
+                error_report("Invalid virtqueue index in request list: %#x",
+                             vq_idx);
+                return -EINVAL;
+            }
+        }
+
         req = qemu_get_virtqueue_element(f, sizeof(VirtIOBlockReq));
-        virtio_blk_init_request(s, req);
+        virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
         req->next = s->rq;
         s->rq = req;
     }
@@ -862,6 +881,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
     VirtIOBlkConf *conf = &s->conf;
     Error *err = NULL;
     static int virtio_blk_id;
+    unsigned i;
 
     if (!conf->conf.blk) {
         error_setg(errp, "drive property not set");
@@ -871,6 +891,10 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
         error_setg(errp, "Device needs media, but drive is empty");
         return;
     }
+    if (!conf->num_queues) {
+        error_setg(errp, "num-queues property must be larger than 0");
+        return;
+    }
 
     blkconf_serial(&conf->conf, &conf->serial);
     s->original_wce = blk_enable_write_cache(conf->conf.blk);
@@ -888,7 +912,9 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
     s->rq = NULL;
     s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
 
-    s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output);
+    for (i = 0; i < conf->num_queues; i++) {
+        virtio_add_queue(vdev, 128, virtio_blk_handle_output);
+    }
     virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
     if (err != NULL) {
         error_propagate(errp, err);
@@ -941,6 +967,7 @@ static Property virtio_blk_properties[] = {
 #endif
     DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
                     true),
+    DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1),
     DEFINE_PROP_END_OF_LIST(),
 };
 
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index 65179fa500..e3bc52f7df 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -288,8 +288,11 @@ static gboolean cadence_uart_xmit(GIOChannel *chan, GIOCondition cond,
     }
 
     ret = qemu_chr_fe_write(s->chr, s->tx_fifo, s->tx_count);
-    s->tx_count -= ret;
-    memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count);
+
+    if (ret >= 0) {
+        s->tx_count -= ret;
+        memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count);
+    }
 
     if (s->tx_count) {
         guint r = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index fa57352fc8..56cc50661f 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -406,7 +406,7 @@ static void pmac_ide_flush(DBDMA_io *io)
     IDEState *s = idebus_active_if(&m->bus);
 
     if (s->bus->dma->aiocb) {
-        blk_drain_all();
+        blk_drain(s->blk);
     }
 }
 
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 5b2972ea9c..4633172bec 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -975,6 +975,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
                 r = CP_ACCESS_TRAP_EL3;
             }
+            break;
         default:
             g_assert_not_reached();
         }
@@ -1006,6 +1007,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env,
             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
                 r = CP_ACCESS_TRAP_EL3;
             }
+            break;
         default:
             g_assert_not_reached();
         }
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index cce7f3d112..2e83d41b14 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -694,17 +694,6 @@ static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
         lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
 }
 
-void xics_set_irq_type(XICSState *icp, int irq, bool lsi)
-{
-    int src = xics_find_source(icp, irq);
-    ICSState *ics;
-
-    assert(src >= 0);
-
-    ics = &icp->ics[src];
-    ics_set_irq_type(ics, irq - ics->offset, lsi);
-}
-
 #define ICS_IRQ_FREE(ics, srcno)   \
     (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
 
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index ffb49c11ac..54020aa06c 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -52,3 +52,4 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
 obj-$(CONFIG_EDU) += edu.o
 obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
 obj-$(CONFIG_AUX) += aux.o
+obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o
diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
new file mode 100644
index 0000000000..23f51752b0
--- /dev/null
+++ b/hw/misc/aspeed_scu.c
@@ -0,0 +1,284 @@
+/*
+ * ASPEED System Control Unit
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later.  See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/misc/aspeed_scu.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "trace.h"
+
+#define TO_REG(offset) ((offset) >> 2)
+
+#define PROT_KEY             TO_REG(0x00)
+#define SYS_RST_CTRL         TO_REG(0x04)
+#define CLK_SEL              TO_REG(0x08)
+#define CLK_STOP_CTRL        TO_REG(0x0C)
+#define FREQ_CNTR_CTRL       TO_REG(0x10)
+#define FREQ_CNTR_EVAL       TO_REG(0x14)
+#define IRQ_CTRL             TO_REG(0x18)
+#define D2PLL_PARAM          TO_REG(0x1C)
+#define MPLL_PARAM           TO_REG(0x20)
+#define HPLL_PARAM           TO_REG(0x24)
+#define FREQ_CNTR_RANGE      TO_REG(0x28)
+#define MISC_CTRL1           TO_REG(0x2C)
+#define PCI_CTRL1            TO_REG(0x30)
+#define PCI_CTRL2            TO_REG(0x34)
+#define PCI_CTRL3            TO_REG(0x38)
+#define SYS_RST_STATUS       TO_REG(0x3C)
+#define SOC_SCRATCH1         TO_REG(0x40)
+#define SOC_SCRATCH2         TO_REG(0x44)
+#define MAC_CLK_DELAY        TO_REG(0x48)
+#define MISC_CTRL2           TO_REG(0x4C)
+#define VGA_SCRATCH1         TO_REG(0x50)
+#define VGA_SCRATCH2         TO_REG(0x54)
+#define VGA_SCRATCH3         TO_REG(0x58)
+#define VGA_SCRATCH4         TO_REG(0x5C)
+#define VGA_SCRATCH5         TO_REG(0x60)
+#define VGA_SCRATCH6         TO_REG(0x64)
+#define VGA_SCRATCH7         TO_REG(0x68)
+#define VGA_SCRATCH8         TO_REG(0x6C)
+#define HW_STRAP1            TO_REG(0x70)
+#define RNG_CTRL             TO_REG(0x74)
+#define RNG_DATA             TO_REG(0x78)
+#define SILICON_REV          TO_REG(0x7C)
+#define PINMUX_CTRL1         TO_REG(0x80)
+#define PINMUX_CTRL2         TO_REG(0x84)
+#define PINMUX_CTRL3         TO_REG(0x88)
+#define PINMUX_CTRL4         TO_REG(0x8C)
+#define PINMUX_CTRL5         TO_REG(0x90)
+#define PINMUX_CTRL6         TO_REG(0x94)
+#define WDT_RST_CTRL         TO_REG(0x9C)
+#define PINMUX_CTRL7         TO_REG(0xA0)
+#define PINMUX_CTRL8         TO_REG(0xA4)
+#define PINMUX_CTRL9         TO_REG(0xA8)
+#define WAKEUP_EN            TO_REG(0xC0)
+#define WAKEUP_CTRL          TO_REG(0xC4)
+#define HW_STRAP2            TO_REG(0xD0)
+#define FREE_CNTR4           TO_REG(0xE0)
+#define FREE_CNTR4_EXT       TO_REG(0xE4)
+#define CPU2_CTRL            TO_REG(0x100)
+#define CPU2_BASE_SEG1       TO_REG(0x104)
+#define CPU2_BASE_SEG2       TO_REG(0x108)
+#define CPU2_BASE_SEG3       TO_REG(0x10C)
+#define CPU2_BASE_SEG4       TO_REG(0x110)
+#define CPU2_BASE_SEG5       TO_REG(0x114)
+#define CPU2_CACHE_CTRL      TO_REG(0x118)
+#define UART_HPLL_CLK        TO_REG(0x160)
+#define PCIE_CTRL            TO_REG(0x180)
+#define BMC_MMIO_CTRL        TO_REG(0x184)
+#define RELOC_DECODE_BASE1   TO_REG(0x188)
+#define RELOC_DECODE_BASE2   TO_REG(0x18C)
+#define MAILBOX_DECODE_BASE  TO_REG(0x190)
+#define SRAM_DECODE_BASE1    TO_REG(0x194)
+#define SRAM_DECODE_BASE2    TO_REG(0x198)
+#define BMC_REV              TO_REG(0x19C)
+#define BMC_DEV_ID           TO_REG(0x1A4)
+
+#define PROT_KEY_UNLOCK 0x1688A8A8
+#define SCU_IO_REGION_SIZE 0x20000
+
+#define AST2400_A0_SILICON_REV     0x02000303U
+
+static const uint32_t ast2400_a0_resets[ASPEED_SCU_NR_REGS] = {
+     [SYS_RST_CTRL]    = 0xFFCFFEDCU,
+     [CLK_SEL]         = 0xF3F40000U,
+     [CLK_STOP_CTRL]   = 0x19FC3E8BU,
+     [D2PLL_PARAM]     = 0x00026108U,
+     [MPLL_PARAM]      = 0x00030291U,
+     [HPLL_PARAM]      = 0x00000291U,
+     [MISC_CTRL1]      = 0x00000010U,
+     [PCI_CTRL1]       = 0x20001A03U,
+     [PCI_CTRL2]       = 0x20001A03U,
+     [PCI_CTRL3]       = 0x04000030U,
+     [SYS_RST_STATUS]  = 0x00000001U,
+     [SOC_SCRATCH1]    = 0x000000C0U, /* SoC completed DRAM init */
+     [MISC_CTRL2]      = 0x00000023U,
+     [RNG_CTRL]        = 0x0000000EU,
+     [PINMUX_CTRL2]    = 0x0000F000U,
+     [PINMUX_CTRL3]    = 0x01000000U,
+     [PINMUX_CTRL4]    = 0x000000FFU,
+     [PINMUX_CTRL5]    = 0x0000A000U,
+     [WDT_RST_CTRL]    = 0x003FFFF3U,
+     [PINMUX_CTRL8]    = 0xFFFF0000U,
+     [PINMUX_CTRL9]    = 0x000FFFFFU,
+     [FREE_CNTR4]      = 0x000000FFU,
+     [FREE_CNTR4_EXT]  = 0x000000FFU,
+     [CPU2_BASE_SEG1]  = 0x80000000U,
+     [CPU2_BASE_SEG4]  = 0x1E600000U,
+     [CPU2_BASE_SEG5]  = 0xC0000000U,
+     [UART_HPLL_CLK]   = 0x00001903U,
+     [PCIE_CTRL]       = 0x0000007BU,
+     [BMC_DEV_ID]      = 0x00002402U
+};
+
+static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
+{
+    AspeedSCUState *s = ASPEED_SCU(opaque);
+    int reg = TO_REG(offset);
+
+    if (reg >= ARRAY_SIZE(s->regs)) {
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
+                      __func__, offset);
+        return 0;
+    }
+
+    switch (reg) {
+    case WAKEUP_EN:
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "%s: Read of write-only offset 0x%" HWADDR_PRIx "\n",
+                      __func__, offset);
+        break;
+    }
+
+    return s->regs[reg];
+}
+
+static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data,
+                             unsigned size)
+{
+    AspeedSCUState *s = ASPEED_SCU(opaque);
+    int reg = TO_REG(offset);
+
+    if (reg >= ARRAY_SIZE(s->regs)) {
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
+                      __func__, offset);
+        return;
+    }
+
+    if (reg > PROT_KEY && reg < CPU2_BASE_SEG1 &&
+            s->regs[PROT_KEY] != PROT_KEY_UNLOCK) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__);
+        return;
+    }
+
+    trace_aspeed_scu_write(offset, size, data);
+
+    switch (reg) {
+    case FREQ_CNTR_EVAL:
+    case VGA_SCRATCH1 ... VGA_SCRATCH8:
+    case RNG_DATA:
+    case SILICON_REV:
+    case FREE_CNTR4:
+    case FREE_CNTR4_EXT:
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n",
+                      __func__, offset);
+        return;
+    }
+
+    s->regs[reg] = data;
+}
+
+static const MemoryRegionOps aspeed_scu_ops = {
+    .read = aspeed_scu_read,
+    .write = aspeed_scu_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid.min_access_size = 4,
+    .valid.max_access_size = 4,
+    .valid.unaligned = false,
+};
+
+static void aspeed_scu_reset(DeviceState *dev)
+{
+    AspeedSCUState *s = ASPEED_SCU(dev);
+    const uint32_t *reset;
+
+    switch (s->silicon_rev) {
+    case AST2400_A0_SILICON_REV:
+        reset = ast2400_a0_resets;
+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    memcpy(s->regs, reset, sizeof(s->regs));
+    s->regs[SILICON_REV] = s->silicon_rev;
+    s->regs[HW_STRAP1] = s->hw_strap1;
+    s->regs[HW_STRAP2] = s->hw_strap2;
+}
+
+static uint32_t aspeed_silicon_revs[] = { AST2400_A0_SILICON_REV, };
+
+static bool is_supported_silicon_rev(uint32_t silicon_rev)
+{
+    int i;
+
+    for (i = 0; i < ARRAY_SIZE(aspeed_silicon_revs); i++) {
+        if (silicon_rev == aspeed_silicon_revs[i]) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static void aspeed_scu_realize(DeviceState *dev, Error **errp)
+{
+    SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+    AspeedSCUState *s = ASPEED_SCU(dev);
+
+    if (!is_supported_silicon_rev(s->silicon_rev)) {
+        error_setg(errp, "Unknown silicon revision: 0x%" PRIx32,
+                s->silicon_rev);
+        return;
+    }
+
+    memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_scu_ops, s,
+                          TYPE_ASPEED_SCU, SCU_IO_REGION_SIZE);
+
+    sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static const VMStateDescription vmstate_aspeed_scu = {
+    .name = "aspeed.scu",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32_ARRAY(regs, AspeedSCUState, ASPEED_SCU_NR_REGS),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static Property aspeed_scu_properties[] = {
+    DEFINE_PROP_UINT32("silicon-rev", AspeedSCUState, silicon_rev, 0),
+    DEFINE_PROP_UINT32("hw-strap1", AspeedSCUState, hw_strap1, 0),
+    DEFINE_PROP_UINT32("hw-strap2", AspeedSCUState, hw_strap1, 0),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void aspeed_scu_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    dc->realize = aspeed_scu_realize;
+    dc->reset = aspeed_scu_reset;
+    dc->desc = "ASPEED System Control Unit";
+    dc->vmsd = &vmstate_aspeed_scu;
+    dc->props = aspeed_scu_properties;
+}
+
+static const TypeInfo aspeed_scu_info = {
+    .name = TYPE_ASPEED_SCU,
+    .parent = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(AspeedSCUState),
+    .class_init = aspeed_scu_class_init,
+};
+
+static void aspeed_scu_register_types(void)
+{
+    type_register_static(&aspeed_scu_info);
+}
+
+type_init(aspeed_scu_register_types);
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
index 16b6701cbf..ea52a14d78 100644
--- a/hw/misc/trace-events
+++ b/hw/misc/trace-events
@@ -50,3 +50,6 @@ milkymist_pfpu_memory_read(uint32_t addr, uint32_t value) "addr %08x value %08x"
 milkymist_pfpu_memory_write(uint32_t addr, uint32_t value) "addr %08x value %08x"
 milkymist_pfpu_vectout(uint32_t a, uint32_t b, uint32_t dma_ptr) "a %08x b %08x dma_ptr %08x"
 milkymist_pfpu_pulse_irq(void) "Pulse IRQ"
+
+# hw/misc/aspeed_scu.c
+aspeed_scu_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index 0346f3e335..8a4be1e667 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -274,6 +274,11 @@ static inline unsigned tx_desc_get_last(unsigned *desc)
     return (desc[1] & DESC_1_TX_LAST) ? 1 : 0;
 }
 
+static inline void tx_desc_set_last(unsigned *desc)
+{
+    desc[1] |= DESC_1_TX_LAST;
+}
+
 static inline unsigned tx_desc_get_length(unsigned *desc)
 {
     return desc[1] & DESC_1_LENGTH;
@@ -664,6 +669,13 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
                  GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL;
     bytes_to_copy = size;
 
+    /* Hardware allows a zero value here but warns against it. To avoid QEMU
+     * indefinite loops we enforce a minimum value here
+     */
+    if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) {
+        rxbufsize = GEM_DMACFG_RBUFSZ_MUL;
+    }
+
     /* Pad to minimum length. Assume FCS field is stripped, logic
      * below will increment it to the real minimum of 64 when
      * not FCS stripping
@@ -932,6 +944,7 @@ static void gem_transmit(CadenceGEMState *s)
 
         /* read next descriptor */
         if (tx_desc_get_wrap(desc)) {
+            tx_desc_set_last(desc);
             packet_desc_addr = s->regs[GEM_TXQBASE];
         } else {
             packet_desc_addr += 8;
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 1202371271..06ca7b2638 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -536,7 +536,7 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
 static void
 xmit_seg(E1000State *s)
 {
-    uint16_t len, *sp;
+    uint16_t len;
     unsigned int frames = s->tx.tso_frames, css, sofar;
     struct e1000_tx *tp = &s->tx;
 
@@ -547,7 +547,7 @@ xmit_seg(E1000State *s)
         if (tp->props.ip) {    /* IPv4 */
             stw_be_p(tp->data+css+2, tp->size - css);
             stw_be_p(tp->data+css+4,
-                     be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
+                     lduw_be_p(tp->data + css + 4) + frames);
         } else {         /* IPv6 */
             stw_be_p(tp->data+css+4, tp->size - css);
         }
@@ -567,8 +567,9 @@ xmit_seg(E1000State *s)
         if (tp->props.sum_needed & E1000_TXD_POPTS_TXSM) {
             unsigned int phsum;
             // add pseudo-header length before checksum calculation
-            sp = (uint16_t *)(tp->data + tp->props.tucso);
-            phsum = be16_to_cpup(sp) + len;
+            void *sp = tp->data + tp->props.tucso;
+
+            phsum = lduw_be_p(sp) + len;
             phsum = (phsum >> 16) + (phsum & 0xffff);
             stw_be_p(sp, phsum);
         }
@@ -759,9 +760,9 @@ receive_filter(E1000State *s, const uint8_t *buf, int size)
 
     if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) &&
         e1000x_vlan_rx_filter_enabled(s->mac_reg)) {
-        uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
-        uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
-                                     ((vid >> 5) & 0x7f));
+        uint16_t vid = lduw_be_p(buf + 14);
+        uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) +
+                                 ((vid >> 5) & 0x7f));
         if ((vfta & (1 << (vid & 0x1f))) == 0)
             return 0;
     }
@@ -889,8 +890,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
 
     if (e1000x_vlan_enabled(s->mac_reg) &&
         e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
-        vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
-                                                                + 14)));
+        vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14));
         iov_ofs = 4;
         if (filter_buf == iov->iov_base) {
             memmove(filter_buf + 4, filter_buf, 12);
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index 4549acb120..6050d8b7f8 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -1019,9 +1019,9 @@ e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size)
 
     if (e1000x_is_vlan_packet(buf, core->vet) &&
         e1000x_vlan_rx_filter_enabled(core->mac)) {
-        uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
-        uint32_t vfta = le32_to_cpup((uint32_t *)(core->mac + VFTA) +
-                                     ((vid >> 5) & 0x7f));
+        uint16_t vid = lduw_be_p(buf + 14);
+        uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) +
+                                 ((vid >> 5) & 0x7f));
         if ((vfta & (1 << (vid & 0x1f))) == 0) {
             trace_e1000e_rx_flt_vlan_mismatch(vid);
             return false;
diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c
index 94f85c98c8..eb0e097137 100644
--- a/hw/net/e1000x_common.c
+++ b/hw/net/e1000x_common.c
@@ -47,7 +47,7 @@ bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac)
 
 bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet)
 {
-    uint16_t eth_proto = be16_to_cpup((uint16_t *)(buf + 12));
+    uint16_t eth_proto = lduw_be_p(buf + 12);
     bool res = (eth_proto == vet);
 
     trace_e1000x_vlan_is_vlan_pkt(res, eth_proto, vet);
diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
index 9b4b9b59d2..b10c419838 100644
--- a/hw/net/eepro100.c
+++ b/hw/net/eepro100.c
@@ -352,14 +352,14 @@ static unsigned e100_compute_mcast_idx(const uint8_t *ep)
 static uint16_t e100_read_reg2(EEPRO100State *s, E100RegisterOffset addr)
 {
     assert(!((uintptr_t)&s->mem[addr] & 1));
-    return le16_to_cpup((uint16_t *)&s->mem[addr]);
+    return lduw_le_p(&s->mem[addr]);
 }
 
 /* Read a 32 bit control/status (CSR) register. */
 static uint32_t e100_read_reg4(EEPRO100State *s, E100RegisterOffset addr)
 {
     assert(!((uintptr_t)&s->mem[addr] & 3));
-    return le32_to_cpup((uint32_t *)&s->mem[addr]);
+    return ldl_le_p(&s->mem[addr]);
 }
 
 /* Write a 16 bit control/status (CSR) register. */
@@ -367,7 +367,7 @@ static void e100_write_reg2(EEPRO100State *s, E100RegisterOffset addr,
                             uint16_t val)
 {
     assert(!((uintptr_t)&s->mem[addr] & 1));
-    cpu_to_le16w((uint16_t *)&s->mem[addr], val);
+    stw_le_p(&s->mem[addr], val);
 }
 
 /* Read a 32 bit control/status (CSR) register. */
@@ -375,7 +375,7 @@ static void e100_write_reg4(EEPRO100State *s, E100RegisterOffset addr,
                             uint32_t val)
 {
     assert(!((uintptr_t)&s->mem[addr] & 3));
-    cpu_to_le32w((uint32_t *)&s->mem[addr], val);
+    stl_le_p(&s->mem[addr], val);
 }
 
 #if defined(DEBUG_EEPRO100)
diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c
index cf8b8236df..5115adcaea 100644
--- a/hw/net/mipsnet.c
+++ b/hw/net/mipsnet.c
@@ -183,10 +183,12 @@ static void mipsnet_ioport_write(void *opaque, hwaddr addr,
         break;
     case MIPSNET_TX_DATA_BUFFER:
         s->tx_buffer[s->tx_written++] = val;
-        if (s->tx_written == s->tx_count) {
+        if ((s->tx_written >= MAX_ETH_FRAME_SIZE)
+            || (s->tx_written == s->tx_count)) {
             /* Send buffer. */
-            trace_mipsnet_send(s->tx_count);
-            qemu_send_packet(qemu_get_queue(s->nic), s->tx_buffer, s->tx_count);
+            trace_mipsnet_send(s->tx_written);
+            qemu_send_packet(qemu_get_queue(s->nic),
+                                s->tx_buffer, s->tx_written);
             s->tx_count = s->tx_written = 0;
             s->intctl |= MIPSNET_INTCTL_TXDONE;
             s->busy = 1;
diff --git a/hw/net/rocker/rocker_tlv.h b/hw/net/rocker/rocker_tlv.h
index e3c4ab6793..88561648f0 100644
--- a/hw/net/rocker/rocker_tlv.h
+++ b/hw/net/rocker/rocker_tlv.h
@@ -106,17 +106,17 @@ static inline uint64_t rocker_tlv_get_u64(const RockerTlv *tlv)
 
 static inline uint16_t rocker_tlv_get_le16(const RockerTlv *tlv)
 {
-    return le16_to_cpup((uint16_t *) rocker_tlv_data(tlv));
+    return lduw_le_p(rocker_tlv_data(tlv));
 }
 
 static inline uint32_t rocker_tlv_get_le32(const RockerTlv *tlv)
 {
-    return le32_to_cpup((uint32_t *) rocker_tlv_data(tlv));
+    return ldl_le_p(rocker_tlv_data(tlv));
 }
 
 static inline uint64_t rocker_tlv_get_le64(const RockerTlv *tlv)
 {
-    return le64_to_cpup((uint64_t *) rocker_tlv_data(tlv));
+    return ldq_le_p(rocker_tlv_data(tlv));
 }
 
 static inline void rocker_tlv_parse(RockerTlv **tb, int maxtype,
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index 562c1fded2..07297cb78f 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -1013,8 +1013,8 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t
         uint32_t rx_space = rxdw0 & CP_RX_BUFFER_SIZE_MASK;
 
         /* write VLAN info to descriptor variables. */
-        if (s->CpCmd & CPlusRxVLAN && be16_to_cpup((uint16_t *)
-                &buf[ETH_ALEN * 2]) == ETH_P_VLAN) {
+        if (s->CpCmd & CPlusRxVLAN &&
+            lduw_be_p(&buf[ETH_ALEN * 2]) == ETH_P_VLAN) {
             dot1q_buf = &buf[ETH_ALEN * 2];
             size -= VLAN_HLEN;
             /* if too small buffer, use the tailroom added duing expansion */
@@ -1024,11 +1024,10 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t
 
             rxdw1 &= ~CP_RX_VLAN_TAG_MASK;
             /* BE + ~le_to_cpu()~ + cpu_to_le() = BE */
-            rxdw1 |= CP_RX_TAVA | le16_to_cpup((uint16_t *)
-                &dot1q_buf[ETHER_TYPE_LEN]);
+            rxdw1 |= CP_RX_TAVA | lduw_le_p(&dot1q_buf[ETHER_TYPE_LEN]);
 
             DPRINTF("C+ Rx mode : extracted vlan tag with tci: ""%u\n",
-                be16_to_cpup((uint16_t *)&dot1q_buf[ETHER_TYPE_LEN]));
+                lduw_be_p(&dot1q_buf[ETHER_TYPE_LEN]));
         } else {
             /* reset VLAN tag flag */
             rxdw1 &= ~CP_RX_TAVA;
@@ -1352,29 +1351,6 @@ static void RTL8139TallyCounters_dma_write(RTL8139State *s, dma_addr_t tc_addr)
     pci_dma_write(d, tc_addr + 62,    (uint8_t *)&val16, 2);
 }
 
-/* Loads values of tally counters from VM state file */
-
-static const VMStateDescription vmstate_tally_counters = {
-    .name = "tally_counters",
-    .version_id = 1,
-    .minimum_version_id = 1,
-    .fields = (VMStateField[]) {
-        VMSTATE_UINT64(TxOk, RTL8139TallyCounters),
-        VMSTATE_UINT64(RxOk, RTL8139TallyCounters),
-        VMSTATE_UINT64(TxERR, RTL8139TallyCounters),
-        VMSTATE_UINT32(RxERR, RTL8139TallyCounters),
-        VMSTATE_UINT16(MissPkt, RTL8139TallyCounters),
-        VMSTATE_UINT16(FAE, RTL8139TallyCounters),
-        VMSTATE_UINT32(Tx1Col, RTL8139TallyCounters),
-        VMSTATE_UINT32(TxMCol, RTL8139TallyCounters),
-        VMSTATE_UINT64(RxOkPhy, RTL8139TallyCounters),
-        VMSTATE_UINT64(RxOkBrd, RTL8139TallyCounters),
-        VMSTATE_UINT16(TxAbt, RTL8139TallyCounters),
-        VMSTATE_UINT16(TxUndrn, RTL8139TallyCounters),
-        VMSTATE_END_OF_LIST()
-    }
-};
-
 static void rtl8139_ChipCmd_write(RTL8139State *s, uint32_t val)
 {
     DeviceState *d = DEVICE(s);
@@ -3222,7 +3198,7 @@ static void rtl8139_pre_save(void *opaque)
 
 static const VMStateDescription vmstate_rtl8139 = {
     .name = "rtl8139",
-    .version_id = 4,
+    .version_id = 5,
     .minimum_version_id = 3,
     .post_load = rtl8139_post_load,
     .pre_save  = rtl8139_pre_save,
@@ -3293,8 +3269,19 @@ static const VMStateDescription vmstate_rtl8139 = {
         VMSTATE_UINT32(TimerInt, RTL8139State),
         VMSTATE_INT64(TCTR_base, RTL8139State),
 
-        VMSTATE_STRUCT(tally_counters, RTL8139State, 0,
-                       vmstate_tally_counters, RTL8139TallyCounters),
+        VMSTATE_UINT64(tally_counters.TxOk, RTL8139State),
+        VMSTATE_UINT64(tally_counters.RxOk, RTL8139State),
+        VMSTATE_UINT64(tally_counters.TxERR, RTL8139State),
+        VMSTATE_UINT32(tally_counters.RxERR, RTL8139State),
+        VMSTATE_UINT16(tally_counters.MissPkt, RTL8139State),
+        VMSTATE_UINT16(tally_counters.FAE, RTL8139State),
+        VMSTATE_UINT32(tally_counters.Tx1Col, RTL8139State),
+        VMSTATE_UINT32(tally_counters.TxMCol, RTL8139State),
+        VMSTATE_UINT64(tally_counters.RxOkPhy, RTL8139State),
+        VMSTATE_UINT64(tally_counters.RxOkBrd, RTL8139State),
+        VMSTATE_UINT32_V(tally_counters.RxOkMul, RTL8139State, 5),
+        VMSTATE_UINT16(tally_counters.TxAbt, RTL8139State),
+        VMSTATE_UINT16(tally_counters.TxUndrn, RTL8139State),
 
         VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4),
         VMSTATE_END_OF_LIST()
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 5798f87d8e..7e6a60aa12 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1051,7 +1051,7 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
     ptr += n->host_hdr_len;
 
     if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
-        int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
+        int vid = lduw_be_p(ptr + 14) & 0xfff;
         if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
             return 0;
     }
diff --git a/hw/net/vmware_utils.h b/hw/net/vmware_utils.h
index c0dbb2ff41..550060170e 100644
--- a/hw/net/vmware_utils.h
+++ b/hw/net/vmware_utils.h
@@ -26,97 +26,104 @@
  *
  */
 static inline void
-vmw_shmem_read(hwaddr addr, void *buf, int len)
+vmw_shmem_read(PCIDevice *d, hwaddr addr, void *buf, int len)
 {
     VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf);
-    cpu_physical_memory_read(addr, buf, len);
+    pci_dma_read(d, addr, buf, len);
 }
 
 static inline void
-vmw_shmem_write(hwaddr addr, void *buf, int len)
+vmw_shmem_write(PCIDevice *d, hwaddr addr, void *buf, int len)
 {
     VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf);
-    cpu_physical_memory_write(addr, buf, len);
+    pci_dma_write(d, addr, buf, len);
 }
 
 static inline void
-vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write)
+vmw_shmem_rw(PCIDevice *d, hwaddr addr, void *buf, int len, int is_write)
 {
     VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d",
               addr, len, buf, is_write);
 
-    cpu_physical_memory_rw(addr, buf, len, is_write);
+    if (is_write)
+        pci_dma_write(d, addr, buf, len);
+    else
+        pci_dma_read(d, addr, buf, len);
 }
 
 static inline void
-vmw_shmem_set(hwaddr addr, uint8_t val, int len)
+vmw_shmem_set(PCIDevice *d, hwaddr addr, uint8_t val, int len)
 {
     int i;
     VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val);
 
     for (i = 0; i < len; i++) {
-        cpu_physical_memory_write(addr + i, &val, 1);
+        pci_dma_write(d, addr + i, &val, 1);
     }
 }
 
 static inline uint32_t
-vmw_shmem_ld8(hwaddr addr)
+vmw_shmem_ld8(PCIDevice *d, hwaddr addr)
 {
-    uint8_t res = ldub_phys(&address_space_memory, addr);
+    uint8_t res;
+    pci_dma_read(d, addr, &res, 1);
     VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
     return res;
 }
 
 static inline void
-vmw_shmem_st8(hwaddr addr, uint8_t value)
+vmw_shmem_st8(PCIDevice *d, hwaddr addr, uint8_t value)
 {
     VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
-    stb_phys(&address_space_memory, addr, value);
+    pci_dma_write(d, addr, &value, 1);
 }
 
 static inline uint32_t
-vmw_shmem_ld16(hwaddr addr)
+vmw_shmem_ld16(PCIDevice *d, hwaddr addr)
 {
-    uint16_t res = lduw_le_phys(&address_space_memory, addr);
+    uint16_t res;
+    pci_dma_read(d, addr, &res, 2);
     VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
     return res;
 }
 
 static inline void
-vmw_shmem_st16(hwaddr addr, uint16_t value)
+vmw_shmem_st16(PCIDevice *d, hwaddr addr, uint16_t value)
 {
     VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
-    stw_le_phys(&address_space_memory, addr, value);
+    pci_dma_write(d, addr, &value, 2);
 }
 
 static inline uint32_t
-vmw_shmem_ld32(hwaddr addr)
+vmw_shmem_ld32(PCIDevice *d, hwaddr addr)
 {
-    uint32_t res = ldl_le_phys(&address_space_memory, addr);
+    uint32_t res;
+    pci_dma_read(d, addr, &res, 4);
     VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
     return res;
 }
 
 static inline void
-vmw_shmem_st32(hwaddr addr, uint32_t value)
+vmw_shmem_st32(PCIDevice *d, hwaddr addr, uint32_t value)
 {
     VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
-    stl_le_phys(&address_space_memory, addr, value);
+    pci_dma_write(d, addr, &value, 4);
 }
 
 static inline uint64_t
-vmw_shmem_ld64(hwaddr addr)
+vmw_shmem_ld64(PCIDevice *d, hwaddr addr)
 {
-    uint64_t res = ldq_le_phys(&address_space_memory, addr);
+    uint64_t res;
+    pci_dma_read(d, addr, &res, 8);
     VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
     return res;
 }
 
 static inline void
-vmw_shmem_st64(hwaddr addr, uint64_t value)
+vmw_shmem_st64(PCIDevice *d, hwaddr addr, uint64_t value)
 {
     VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
-    stq_le_phys(&address_space_memory, addr, value);
+    pci_dma_write(d, addr, &value, 8);
 }
 
 /* Macros for simplification of operations on array-style registers */
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index d97897670d..92236d3919 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -74,54 +74,54 @@
 #define VMXNET3_MAX_NMSIX_INTRS   (1)
 
 /* Macros for rings descriptors access */
-#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
-    (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
+#define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \
+    (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
 
-#define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \
-    (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
+#define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \
+    (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
 
-#define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \
-    (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
+#define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \
+    (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
 
-#define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \
-    (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
+#define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \
+    (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
 
-#define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \
-    (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
+#define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \
+    (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
 
-#define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \
-    (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
+#define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \
+    (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
 
-#define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \
-    (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
+#define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \
+    (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
 
-#define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \
-    (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
+#define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \
+    (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
 
-#define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \
-    (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
+#define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \
+    (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
 
-#define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \
-    (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
+#define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \
+    (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
 
 /* Macros for guest driver shared area access */
-#define VMXNET3_READ_DRV_SHARED64(shpa, field) \
-    (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \
+    (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
 
-#define VMXNET3_READ_DRV_SHARED32(shpa, field) \
-    (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \
+    (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
 
-#define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \
-    (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
+#define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \
+    (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
 
-#define VMXNET3_READ_DRV_SHARED16(shpa, field) \
-    (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \
+    (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
 
-#define VMXNET3_READ_DRV_SHARED8(shpa, field) \
-    (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \
+    (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
 
-#define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \
-    (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
+#define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \
+    (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
 
 #define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag))
 
@@ -147,7 +147,8 @@ typedef struct {
     uint8_t gen;
 } Vmxnet3Ring;
 
-static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
+static inline void vmxnet3_ring_init(PCIDevice *d,
+				     Vmxnet3Ring *ring,
                                      hwaddr pa,
                                      size_t size,
                                      size_t cell_size,
@@ -160,7 +161,7 @@ static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
     ring->next = 0;
 
     if (zero_region) {
-        vmw_shmem_set(pa, 0, size * cell_size);
+        vmw_shmem_set(d, pa, 0, size * cell_size);
     }
 }
 
@@ -190,14 +191,16 @@ static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring)
     return ring->pa + ring->next * ring->cell_size;
 }
 
-static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff)
+static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring *ring,
+					       void *buff)
 {
-    vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
+    vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
 }
 
-static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff)
+static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring *ring,
+						void *buff)
 {
-    vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
+    vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
 }
 
 static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring)
@@ -456,9 +459,9 @@ vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked)
     vmxnet3_update_interrupt_line_state(s, lidx);
 }
 
-static bool vmxnet3_verify_driver_magic(hwaddr dshmem)
+static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem)
 {
-    return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC);
+    return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == VMXNET3_REV1_MAGIC);
 }
 
 #define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF)
@@ -526,13 +529,14 @@ vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx)
 static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
 {
     struct Vmxnet3_TxCompDesc txcq_descr;
+    PCIDevice *d = PCI_DEVICE(s);
 
     VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
 
     txcq_descr.txdIdx = tx_ridx;
     txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
 
-    vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr);
+    vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr);
 
     /* Flush changes in TX descriptor before changing the counter value */
     smp_wmb();
@@ -688,13 +692,14 @@ vmxnet3_pop_next_tx_descr(VMXNET3State *s,
                           uint32_t *descr_idx)
 {
     Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring;
+    PCIDevice *d = PCI_DEVICE(s);
 
-    vmxnet3_ring_read_curr_cell(ring, txd);
+    vmxnet3_ring_read_curr_cell(d, ring, txd);
     if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
         /* Only read after generation field verification */
         smp_rmb();
         /* Re-read to be sure we got the latest version */
-        vmxnet3_ring_read_curr_cell(ring, txd);
+        vmxnet3_ring_read_curr_cell(d, ring, txd);
         VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
         *descr_idx = vmxnet3_ring_curr_cell_idx(ring);
         vmxnet3_inc_tx_consumption_counter(s, qidx);
@@ -782,9 +787,11 @@ static inline void
 vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx,
                            struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
 {
+    PCIDevice *d = PCI_DEVICE(s);
+
     Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
     *didx = vmxnet3_ring_curr_cell_idx(ring);
-    vmxnet3_ring_read_curr_cell(ring, dbuf);
+    vmxnet3_ring_read_curr_cell(d, ring, dbuf);
 }
 
 static inline uint8_t
@@ -802,9 +809,8 @@ vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen)
     hwaddr daddr =
         vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring);
 
-    pci_dma_read(PCI_DEVICE(s), daddr,
-                 &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
-
+    pci_dma_read(PCI_DEVICE(s),
+                 daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
     ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring);
 
     if (rxcd.gen != ring_gen) {
@@ -1058,6 +1064,7 @@ static bool
 vmxnet3_indicate_packet(VMXNET3State *s)
 {
     struct Vmxnet3_RxDesc rxd;
+    PCIDevice *d = PCI_DEVICE(s);
     bool is_head = true;
     uint32_t rxd_idx;
     uint32_t rx_ridx = 0;
@@ -1091,7 +1098,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
         }
 
         chunk_size = MIN(bytes_left, rxd.len);
-        vmxnet3_pci_dma_writev(PCI_DEVICE(s), data, bytes_copied,
+        vmxnet3_pci_dma_writev(d, data, bytes_copied,
                                le64_to_cpu(rxd.addr), chunk_size);
         bytes_copied += chunk_size;
         bytes_left -= chunk_size;
@@ -1099,7 +1106,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
         vmxnet3_dump_rx_descr(&rxd);
 
         if (ready_rxcd_pa != 0) {
-            pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd));
+            pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
         }
 
         memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
@@ -1131,7 +1138,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
         rxcd.eop = 1;
         rxcd.err = (bytes_left != 0);
 
-        pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd));
+        pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
 
         /* Flush RX descriptor changes */
         smp_wmb();
@@ -1250,7 +1257,9 @@ static void vmxnet3_reset(VMXNET3State *s)
 
 static void vmxnet3_update_rx_mode(VMXNET3State *s)
 {
-    s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
+    PCIDevice *d = PCI_DEVICE(s);
+
+    s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
                                            devRead.rxFilterConf.rxMode);
     VMW_CFPRN("RX mode: 0x%08X", s->rx_mode);
 }
@@ -1258,9 +1267,10 @@ static void vmxnet3_update_rx_mode(VMXNET3State *s)
 static void vmxnet3_update_vlan_filters(VMXNET3State *s)
 {
     int i;
+    PCIDevice *d = PCI_DEVICE(s);
 
     /* Copy configuration from shared memory */
-    VMXNET3_READ_DRV_SHARED(s->drv_shmem,
+    VMXNET3_READ_DRV_SHARED(d, s->drv_shmem,
                             devRead.rxFilterConf.vfTable,
                             s->vlan_table,
                             sizeof(s->vlan_table));
@@ -1281,8 +1291,10 @@ static void vmxnet3_update_vlan_filters(VMXNET3State *s)
 
 static void vmxnet3_update_mcast_filters(VMXNET3State *s)
 {
+    PCIDevice *d = PCI_DEVICE(s);
+
     uint16_t list_bytes =
-        VMXNET3_READ_DRV_SHARED16(s->drv_shmem,
+        VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem,
                                   devRead.rxFilterConf.mfTableLen);
 
     s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]);
@@ -1299,10 +1311,10 @@ static void vmxnet3_update_mcast_filters(VMXNET3State *s)
     } else {
         int i;
         hwaddr mcast_list_pa =
-            VMXNET3_READ_DRV_SHARED64(s->drv_shmem,
+            VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem,
                                       devRead.rxFilterConf.mfTablePA);
 
-        pci_dma_read(PCI_DEVICE(s), mcast_list_pa, s->mcast_list, list_bytes);
+        pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes);
 
         VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len);
         for (i = 0; i < s->mcast_list_len; i++) {
@@ -1328,19 +1340,20 @@ static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s)
 static void vmxnet3_fill_stats(VMXNET3State *s)
 {
     int i;
+    PCIDevice *d = PCI_DEVICE(s);
 
     if (!s->device_active)
         return;
 
     for (i = 0; i < s->txq_num; i++) {
-        pci_dma_write(PCI_DEVICE(s),
+        pci_dma_write(d,
                       s->txq_descr[i].tx_stats_pa,
                       &s->txq_descr[i].txq_stats,
                       sizeof(s->txq_descr[i].txq_stats));
     }
 
     for (i = 0; i < s->rxq_num; i++) {
-        pci_dma_write(PCI_DEVICE(s),
+        pci_dma_write(d,
                       s->rxq_descr[i].rx_stats_pa,
                       &s->rxq_descr[i].rxq_stats,
                       sizeof(s->rxq_descr[i].rxq_stats));
@@ -1350,8 +1363,9 @@ static void vmxnet3_fill_stats(VMXNET3State *s)
 static void vmxnet3_adjust_by_guest_type(VMXNET3State *s)
 {
     struct Vmxnet3_GOSInfo gos;
+    PCIDevice *d = PCI_DEVICE(s);
 
-    VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos,
+    VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos,
                             &gos, sizeof(gos));
     s->rx_packets_compound =
         (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true;
@@ -1371,13 +1385,14 @@ vmxnet3_dump_conf_descr(const char *name,
 static void vmxnet3_update_pm_state(VMXNET3State *s)
 {
     struct Vmxnet3_VariableLenConfDesc pm_descr;
+    PCIDevice *d = PCI_DEVICE(s);
 
     pm_descr.confLen =
-        VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen);
+        VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confLen);
     pm_descr.confVer =
-        VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer);
+        VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confVer);
     pm_descr.confPA =
-        VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA);
+        VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.pmConfDesc.confPA);
 
     vmxnet3_dump_conf_descr("PM State", &pm_descr);
 }
@@ -1386,8 +1401,9 @@ static void vmxnet3_update_features(VMXNET3State *s)
 {
     uint32_t guest_features;
     int rxcso_supported;
+    PCIDevice *d = PCI_DEVICE(s);
 
-    guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
+    guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
                                                devRead.misc.uptFeatures);
 
     rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM);
@@ -1462,12 +1478,13 @@ static void vmxnet3_activate_device(VMXNET3State *s)
 {
     int i;
     static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1;
+    PCIDevice *d = PCI_DEVICE(s);
     hwaddr qdescr_table_pa;
     uint64_t pa;
     uint32_t size;
 
     /* Verify configuration consistency */
-    if (!vmxnet3_verify_driver_magic(s->drv_shmem)) {
+    if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) {
         VMW_ERPRN("Device configuration received from driver is invalid");
         return;
     }
@@ -1483,11 +1500,11 @@ static void vmxnet3_activate_device(VMXNET3State *s)
     vmxnet3_update_pm_state(s);
     vmxnet3_setup_rx_filtering(s);
     /* Cache fields from shared memory */
-    s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu);
+    s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu);
     VMW_CFPRN("MTU is %u", s->mtu);
 
     s->max_rx_frags =
-        VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG);
+        VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG);
 
     if (s->max_rx_frags == 0) {
         s->max_rx_frags = 1;
@@ -1496,24 +1513,24 @@ static void vmxnet3_activate_device(VMXNET3State *s)
     VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags);
 
     s->event_int_idx =
-        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx);
+        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.eventIntrIdx);
     assert(vmxnet3_verify_intx(s, s->event_int_idx));
     VMW_CFPRN("Events interrupt line is %u", s->event_int_idx);
 
     s->auto_int_masking =
-        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask);
+        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask);
     VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking);
 
     s->txq_num =
-        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues);
+        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues);
     s->rxq_num =
-        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues);
+        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues);
 
     VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
     vmxnet3_validate_queues(s);
 
     qdescr_table_pa =
-        VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA);
+        VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA);
     VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa);
 
     /*
@@ -1529,25 +1546,25 @@ static void vmxnet3_activate_device(VMXNET3State *s)
 
         /* Read interrupt number for this TX queue */
         s->txq_descr[i].intr_idx =
-            VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx);
+            VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx);
         assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx));
 
         VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx);
 
         /* Read rings memory locations for TX queues */
-        pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA);
-        size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize);
+        pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA);
+        size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize);
 
-        vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size,
+        vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size,
                           sizeof(struct Vmxnet3_TxDesc), false);
         VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring);
 
         s->max_tx_frags += size;
 
         /* TXC ring */
-        pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA);
-        size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize);
-        vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size,
+        pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA);
+        size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize);
+        vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size,
                           sizeof(struct Vmxnet3_TxCompDesc), true);
         VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
 
@@ -1558,7 +1575,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
                sizeof(s->txq_descr[i].txq_stats));
 
         /* Fill device-managed parameters for queues */
-        VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa,
+        VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa,
                                        ctrl.txThreshold,
                                        VMXNET3_DEF_TX_THRESHOLD);
     }
@@ -1578,7 +1595,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
 
         /* Read interrupt number for this RX queue */
         s->rxq_descr[i].intr_idx =
-            VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx);
+            VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx);
         assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx));
 
         VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx);
@@ -1586,18 +1603,18 @@ static void vmxnet3_activate_device(VMXNET3State *s)
         /* Read rings memory locations */
         for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) {
             /* RX rings */
-            pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]);
-            size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]);
-            vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size,
+            pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]);
+            size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]);
+            vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size,
                               sizeof(struct Vmxnet3_RxDesc), false);
             VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d",
                       i, j, pa, size);
         }
 
         /* RXC ring */
-        pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA);
-        size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize);
-        vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size,
+        pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA);
+        size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize);
+        vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size,
                           sizeof(struct Vmxnet3_RxCompDesc), true);
         VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size);
 
@@ -1764,19 +1781,21 @@ static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
 static void vmxnet3_set_events(VMXNET3State *s, uint32_t val)
 {
     uint32_t events;
+    PCIDevice *d = PCI_DEVICE(s);
 
     VMW_CBPRN("Setting events: 0x%x", val);
-    events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val;
-    VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
+    events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val;
+    VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
 }
 
 static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
 {
+    PCIDevice *d = PCI_DEVICE(s);
     uint32_t events;
 
     VMW_CBPRN("Clearing events: 0x%x", val);
-    events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val;
-    VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
+    events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val;
+    VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
 }
 
 static void
diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index 76bd78bfd7..225177b5af 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -104,7 +104,7 @@ static void spin_kick(void *data)
     hwaddr map_start;
 
     cpu_synchronize_state(cpu);
-    stl_p(&curspin->pir, env->spr[SPR_PIR]);
+    stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]);
     env->nip = ldq_p(&curspin->addr) & (map_size - 1);
     env->gpr[3] = ldq_p(&curspin->r3);
     env->gpr[4] = 0;
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 778fa255a9..0b6bb9ce1a 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -2367,8 +2367,8 @@ static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine)
 
         cpu_item->type = spapr_get_cpu_core_type(machine->cpu_model);
         cpu_item->vcpus_count = smp_threads;
-        cpu_props->has_core = true;
-        cpu_props->core = i * smt;
+        cpu_props->has_core_id = true;
+        cpu_props->core_id = i * smt;
         /* TODO: add 'has_node/node' here to describe
            to which node core belongs */
 
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 1625e6b38b..8b709e362e 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -69,92 +69,58 @@ VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
     return vdev;
 }
 
-static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n,
-                                              bool assign, bool set_handler)
+static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
 {
-    VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
-    VirtQueue *vq = virtio_get_queue(vdev, n);
-    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
-    int r = 0;
-    SubchDev *sch = dev->sch;
-    uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
+    virtio_bus_start_ioeventfd(&dev->bus);
+}
 
-    if (assign) {
-        r = event_notifier_init(notifier, 1);
-        if (r < 0) {
-            error_report("%s: unable to init event notifier: %d", __func__, r);
-            return r;
-        }
-        virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
-        r = s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
-        if (r < 0) {
-            error_report("%s: unable to assign ioeventfd: %d", __func__, r);
-            virtio_queue_set_host_notifier_fd_handler(vq, false, false);
-            event_notifier_cleanup(notifier);
-            return r;
-        }
-    } else {
-        virtio_queue_set_host_notifier_fd_handler(vq, false, false);
-        s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
-        event_notifier_cleanup(notifier);
-    }
-    return r;
+static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
+{
+    virtio_bus_stop_ioeventfd(&dev->bus);
 }
 
-static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
+static bool virtio_ccw_ioeventfd_started(DeviceState *d)
 {
-    VirtIODevice *vdev;
-    int n, r;
+    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
 
-    if (!(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) ||
-        dev->ioeventfd_disabled ||
-        dev->ioeventfd_started) {
-        return;
-    }
-    vdev = virtio_bus_get_device(&dev->bus);
-    for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
-        r = virtio_ccw_set_guest2host_notifier(dev, n, true, true);
-        if (r < 0) {
-            goto assign_error;
-        }
-    }
-    dev->ioeventfd_started = true;
-    return;
+    return dev->ioeventfd_started;
+}
 
-  assign_error:
-    while (--n >= 0) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
-        r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
-        assert(r >= 0);
+static void virtio_ccw_ioeventfd_set_started(DeviceState *d, bool started,
+                                             bool err)
+{
+    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+    dev->ioeventfd_started = started;
+    if (err) {
+        /* Disable ioeventfd for this device. */
+        dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
     }
-    dev->ioeventfd_started = false;
-    /* Disable ioeventfd for this device. */
-    dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
-    error_report("%s: failed. Fallback to userspace (slower).", __func__);
 }
 
-static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
+static bool virtio_ccw_ioeventfd_disabled(DeviceState *d)
 {
-    VirtIODevice *vdev;
-    int n, r;
+    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
 
-    if (!dev->ioeventfd_started) {
-        return;
-    }
-    vdev = virtio_bus_get_device(&dev->bus);
-    for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
-        r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
-        assert(r >= 0);
-    }
-    dev->ioeventfd_started = false;
+    return dev->ioeventfd_disabled ||
+        !(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD);
+}
+
+static void virtio_ccw_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+    dev->ioeventfd_disabled = disabled;
+}
+
+static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
+                                       int n, bool assign)
+{
+    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+    SubchDev *sch = dev->sch;
+    uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
+
+    return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
 }
 
 VirtualCssBus *virtual_css_bus_init(void)
@@ -1157,19 +1123,6 @@ static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
     return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
 }
 
-static int virtio_ccw_set_host_notifier(DeviceState *d, int n, bool assign)
-{
-    VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
-    /* Stop using the generic ioeventfd, we are doing eventfd handling
-     * ourselves below */
-    dev->ioeventfd_disabled = assign;
-    if (assign) {
-        virtio_ccw_stop_ioeventfd(dev);
-    }
-    return virtio_ccw_set_guest2host_notifier(dev, n, assign, false);
-}
-
 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
 {
     int r;
@@ -1798,7 +1751,6 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
     k->notify = virtio_ccw_notify;
     k->vmstate_change = virtio_ccw_vmstate_change;
     k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
-    k->set_host_notifier = virtio_ccw_set_host_notifier;
     k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
     k->save_queue = virtio_ccw_save_queue;
     k->load_queue = virtio_ccw_load_queue;
@@ -1807,6 +1759,11 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
     k->device_plugged = virtio_ccw_device_plugged;
     k->post_plugged = virtio_ccw_post_plugged;
     k->device_unplugged = virtio_ccw_device_unplugged;
+    k->ioeventfd_started = virtio_ccw_ioeventfd_started;
+    k->ioeventfd_set_started = virtio_ccw_ioeventfd_set_started;
+    k->ioeventfd_disabled = virtio_ccw_ioeventfd_disabled;
+    k->ioeventfd_set_disabled = virtio_ccw_ioeventfd_set_disabled;
+    k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
 }
 
 static const TypeInfo virtio_ccw_bus_info = {
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 1a49f1e4b7..18ced31493 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -31,7 +31,7 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
     s->ctx = iothread_get_aio_context(vs->conf.iothread);
 
     /* Don't try if transport does not support notifiers. */
-    if (!k->set_guest_notifiers || !k->set_host_notifier) {
+    if (!k->set_guest_notifiers || !k->ioeventfd_started) {
         fprintf(stderr, "virtio-scsi: Failed to set iothread "
                    "(transport does not support notifiers)");
         exit(1);
@@ -69,11 +69,10 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
                                   void (*fn)(VirtIODevice *vdev, VirtQueue *vq))
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
-    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
     int rc;
 
     /* Set up virtqueue notify */
-    rc = k->set_host_notifier(qbus->parent, n, true);
+    rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true);
     if (rc != 0) {
         fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n",
                 rc);
@@ -159,7 +158,7 @@ fail_vrings:
     virtio_scsi_clear_aio(s);
     aio_context_release(s->ctx);
     for (i = 0; i < vs->conf.num_queues + 2; i++) {
-        k->set_host_notifier(qbus->parent, i, false);
+        virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
     }
     k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
 fail_guest_notifiers:
@@ -198,7 +197,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
     aio_context_release(s->ctx);
 
     for (i = 0; i < vs->conf.num_queues + 2; i++) {
-        k->set_host_notifier(qbus->parent, i, false);
+        virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
     }
 
     /* Clean up guest notifier (irq) */
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 71d09d3ef3..e8179d6616 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -666,11 +666,6 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
 static void virtio_scsi_save(QEMUFile *f, void *opaque)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
-    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
-
-    if (s->dataplane_started) {
-        virtio_scsi_dataplane_stop(s);
-    }
     virtio_save(vdev, f);
 }
 
diff --git a/hw/sh4/sh_pci.c b/hw/sh4/sh_pci.c
index e820a32307..1747628f3d 100644
--- a/hw/sh4/sh_pci.c
+++ b/hw/sh4/sh_pci.c
@@ -55,7 +55,7 @@ static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val,
 
     switch(addr) {
     case 0 ... 0xfc:
-        cpu_to_le32w((uint32_t*)(pcic->dev->config + addr), val);
+        stl_le_p(pcic->dev->config + addr, val);
         break;
     case 0x1c0:
         pcic->par = val;
@@ -85,7 +85,7 @@ static uint64_t sh_pci_reg_read (void *p, hwaddr addr,
 
     switch(addr) {
     case 0 ... 0xfc:
-        return le32_to_cpup((uint32_t*)(pcic->dev->config + addr));
+        return ldl_le_p(pcic->dev->config + addr);
     case 0x1c0:
         return pcic->par;
     case 0x1c4:
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 81cc5b0ae3..a01394d5ac 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1110,14 +1110,15 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     VirtioBusState *vbus = VIRTIO_BUS(qbus);
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
     int i, r, e;
-    if (!k->set_host_notifier) {
+    if (!k->ioeventfd_started) {
         fprintf(stderr, "binding does not support host notifiers\n");
         r = -ENOSYS;
         goto fail;
     }
 
     for (i = 0; i < hdev->nvqs; ++i) {
-        r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
+        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+                                         true);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
             goto fail_vq;
@@ -1127,7 +1128,8 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     return 0;
 fail_vq:
     while (--i >= 0) {
-        e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+                                         false);
         if (e < 0) {
             fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
             fflush(stderr);
@@ -1146,12 +1148,11 @@ fail:
 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
-    VirtioBusState *vbus = VIRTIO_BUS(qbus);
-    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
     int i, r;
 
     for (i = 0; i < hdev->nvqs; ++i) {
-        r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+                                         false);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
             fflush(stderr);
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index 574f0e23f8..131376027b 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -146,6 +146,138 @@ void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
     }
 }
 
+/*
+ * This function handles both assigning the ioeventfd handler and
+ * registering it with the kernel.
+ * assign: register/deregister ioeventfd with the kernel
+ * set_handler: use the generic ioeventfd handler
+ */
+static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
+                                      int n, bool assign, bool set_handler)
+{
+    VirtIODevice *vdev = virtio_bus_get_device(bus);
+    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+    VirtQueue *vq = virtio_get_queue(vdev, n);
+    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+    int r = 0;
+
+    if (assign) {
+        r = event_notifier_init(notifier, 1);
+        if (r < 0) {
+            error_report("%s: unable to init event notifier: %d", __func__, r);
+            return r;
+        }
+        virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
+        r = k->ioeventfd_assign(proxy, notifier, n, assign);
+        if (r < 0) {
+            error_report("%s: unable to assign ioeventfd: %d", __func__, r);
+            virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+            event_notifier_cleanup(notifier);
+            return r;
+        }
+    } else {
+        virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+        k->ioeventfd_assign(proxy, notifier, n, assign);
+        event_notifier_cleanup(notifier);
+    }
+    return r;
+}
+
+void virtio_bus_start_ioeventfd(VirtioBusState *bus)
+{
+    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+    DeviceState *proxy = DEVICE(BUS(bus)->parent);
+    VirtIODevice *vdev;
+    int n, r;
+
+    if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) {
+        return;
+    }
+    if (k->ioeventfd_disabled(proxy)) {
+        return;
+    }
+    vdev = virtio_bus_get_device(bus);
+    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+        if (!virtio_queue_get_num(vdev, n)) {
+            continue;
+        }
+        r = set_host_notifier_internal(proxy, bus, n, true, true);
+        if (r < 0) {
+            goto assign_error;
+        }
+    }
+    k->ioeventfd_set_started(proxy, true, false);
+    return;
+
+assign_error:
+    while (--n >= 0) {
+        if (!virtio_queue_get_num(vdev, n)) {
+            continue;
+        }
+
+        r = set_host_notifier_internal(proxy, bus, n, false, false);
+        assert(r >= 0);
+    }
+    k->ioeventfd_set_started(proxy, false, true);
+    error_report("%s: failed. Fallback to userspace (slower).", __func__);
+}
+
+void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
+{
+    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+    DeviceState *proxy = DEVICE(BUS(bus)->parent);
+    VirtIODevice *vdev;
+    int n, r;
+
+    if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) {
+        return;
+    }
+    vdev = virtio_bus_get_device(bus);
+    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+        if (!virtio_queue_get_num(vdev, n)) {
+            continue;
+        }
+        r = set_host_notifier_internal(proxy, bus, n, false, false);
+        assert(r >= 0);
+    }
+    k->ioeventfd_set_started(proxy, false, false);
+}
+
+/*
+ * This function switches from/to the generic ioeventfd handler.
+ * assign==false means 'use generic ioeventfd handler'.
+ */
+int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
+{
+    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+    DeviceState *proxy = DEVICE(BUS(bus)->parent);
+    VirtIODevice *vdev = virtio_bus_get_device(bus);
+    VirtQueue *vq = virtio_get_queue(vdev, n);
+
+    if (!k->ioeventfd_started) {
+        return -ENOSYS;
+    }
+    if (assign) {
+        /*
+         * Stop using the generic ioeventfd, we are doing eventfd handling
+         * ourselves below
+         */
+        k->ioeventfd_set_disabled(proxy, true);
+    }
+    /*
+     * Just switch the handler, don't deassign the ioeventfd.
+     * Otherwise, there's a window where we don't have an
+     * ioeventfd and we may end up with a notification where
+     * we don't expect one.
+     */
+    virtio_queue_set_host_notifier_fd_handler(vq, assign, !assign);
+    if (!assign) {
+        /* Use generic ioeventfd handler again. */
+        k->ioeventfd_set_disabled(proxy, false);
+    }
+    return 0;
+}
+
 static char *virtio_bus_get_dev_path(DeviceState *dev)
 {
     BusState *bus = qdev_get_parent_bus(dev);
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index d4cd91f8c4..eb84b74532 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -93,90 +93,59 @@ typedef struct {
     bool ioeventfd_started;
 } VirtIOMMIOProxy;
 
-static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy,
-                                                  int n, bool assign,
-                                                  bool set_handler)
+static bool virtio_mmio_ioeventfd_started(DeviceState *d)
 {
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    VirtQueue *vq = virtio_get_queue(vdev, n);
-    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
-    int r = 0;
+    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 
-    if (assign) {
-        r = event_notifier_init(notifier, 1);
-        if (r < 0) {
-            error_report("%s: unable to init event notifier: %d",
-                         __func__, r);
-            return r;
-        }
-        virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
-        memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
-                                  true, n, notifier);
-    } else {
-        memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
-                                  true, n, notifier);
-        virtio_queue_set_host_notifier_fd_handler(vq, false, false);
-        event_notifier_cleanup(notifier);
-    }
-    return r;
+    return proxy->ioeventfd_started;
 }
 
-static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
+static void virtio_mmio_ioeventfd_set_started(DeviceState *d, bool started,
+                                              bool err)
 {
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    int n, r;
+    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 
-    if (!kvm_eventfds_enabled() ||
-        proxy->ioeventfd_disabled ||
-        proxy->ioeventfd_started) {
-        return;
-    }
+    proxy->ioeventfd_started = started;
+}
 
-    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
+static bool virtio_mmio_ioeventfd_disabled(DeviceState *d)
+{
+    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 
-        r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true);
-        if (r < 0) {
-            goto assign_error;
-        }
-    }
-    proxy->ioeventfd_started = true;
-    return;
+    return !kvm_eventfds_enabled() || proxy->ioeventfd_disabled;
+}
 
-assign_error:
-    while (--n >= 0) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
+static void virtio_mmio_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 
-        r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
-        assert(r >= 0);
-    }
-    proxy->ioeventfd_started = false;
-    error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+    proxy->ioeventfd_disabled = disabled;
 }
 
-static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
+static int virtio_mmio_ioeventfd_assign(DeviceState *d,
+                                        EventNotifier *notifier,
+                                        int n, bool assign)
 {
-    int r;
-    int n;
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
 
-    if (!proxy->ioeventfd_started) {
-        return;
+    if (assign) {
+        memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+                                  true, n, notifier);
+    } else {
+        memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+                                  true, n, notifier);
     }
+    return 0;
+}
 
-    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
+static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+    virtio_bus_start_ioeventfd(&proxy->bus);
+}
 
-        r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
-        assert(r >= 0);
-    }
-    proxy->ioeventfd_started = false;
+static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+    virtio_bus_stop_ioeventfd(&proxy->bus);
 }
 
 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
@@ -498,25 +467,6 @@ assign_error:
     return r;
 }
 
-static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n,
-                                         bool assign)
-{
-    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
-
-    /* Stop using ioeventfd for virtqueue kick if the device starts using host
-     * notifiers.  This makes it easy to avoid stepping on each others' toes.
-     */
-    proxy->ioeventfd_disabled = assign;
-    if (assign) {
-        virtio_mmio_stop_ioeventfd(proxy);
-    }
-    /* We don't need to start here: it's not needed because backend
-     * currently only stops on status change away from ok,
-     * reset, vmstop and such. If we do add code to start here,
-     * need to check vmstate, device state etc. */
-    return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false);
-}
-
 /* virtio-mmio device */
 
 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
@@ -558,8 +508,12 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
     k->notify = virtio_mmio_update_irq;
     k->save_config = virtio_mmio_save_config;
     k->load_config = virtio_mmio_load_config;
-    k->set_host_notifier = virtio_mmio_set_host_notifier;
     k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
+    k->ioeventfd_started = virtio_mmio_ioeventfd_started;
+    k->ioeventfd_set_started = virtio_mmio_ioeventfd_set_started;
+    k->ioeventfd_disabled = virtio_mmio_ioeventfd_disabled;
+    k->ioeventfd_set_disabled = virtio_mmio_ioeventfd_set_disabled;
+    k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
     k->has_variable_vring_alignment = true;
     bus_class->max_dev = 1;
 }
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 1a0278304b..2b34b43060 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -262,14 +262,44 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
     return 0;
 }
 
+static bool virtio_pci_ioeventfd_started(DeviceState *d)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+    return proxy->ioeventfd_started;
+}
+
+static void virtio_pci_ioeventfd_set_started(DeviceState *d, bool started,
+                                             bool err)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+    proxy->ioeventfd_started = started;
+}
+
+static bool virtio_pci_ioeventfd_disabled(DeviceState *d)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+    return proxy->ioeventfd_disabled ||
+        !(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD);
+}
+
+static void virtio_pci_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+    proxy->ioeventfd_disabled = disabled;
+}
+
 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
 
-static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
-                                                 int n, bool assign, bool set_handler)
+static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
+                                       int n, bool assign)
 {
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtQueue *vq = virtio_get_queue(vdev, n);
-    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
     bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
     bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
     bool fast_mmio = kvm_ioeventfd_any_length_enabled();
@@ -280,16 +310,8 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
     hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
                          virtio_get_queue_index(vq);
     hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
-    int r = 0;
 
     if (assign) {
-        r = event_notifier_init(notifier, 1);
-        if (r < 0) {
-            error_report("%s: unable to init event notifier: %d",
-                         __func__, r);
-            return r;
-        }
-        virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
         if (modern) {
             if (fast_mmio) {
                 memory_region_add_eventfd(modern_mr, modern_addr, 0,
@@ -325,68 +347,18 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
             memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
                                       true, n, notifier);
         }
-        virtio_queue_set_host_notifier_fd_handler(vq, false, false);
-        event_notifier_cleanup(notifier);
     }
-    return r;
+    return 0;
 }
 
 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
 {
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    int n, r;
-
-    if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
-        proxy->ioeventfd_disabled ||
-        proxy->ioeventfd_started) {
-        return;
-    }
-
-    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
-
-        r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
-        if (r < 0) {
-            goto assign_error;
-        }
-    }
-    proxy->ioeventfd_started = true;
-    return;
-
-assign_error:
-    while (--n >= 0) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
-
-        r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
-        assert(r >= 0);
-    }
-    proxy->ioeventfd_started = false;
-    error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+    virtio_bus_start_ioeventfd(&proxy->bus);
 }
 
 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
 {
-    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
-    int r;
-    int n;
-
-    if (!proxy->ioeventfd_started) {
-        return;
-    }
-
-    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
-        if (!virtio_queue_get_num(vdev, n)) {
-            continue;
-        }
-
-        r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
-        assert(r >= 0);
-    }
-    proxy->ioeventfd_started = false;
+    virtio_bus_stop_ioeventfd(&proxy->bus);
 }
 
 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
@@ -1110,24 +1082,6 @@ assign_error:
     return r;
 }
 
-static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
-{
-    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
-    /* Stop using ioeventfd for virtqueue kick if the device starts using host
-     * notifiers.  This makes it easy to avoid stepping on each others' toes.
-     */
-    proxy->ioeventfd_disabled = assign;
-    if (assign) {
-        virtio_pci_stop_ioeventfd(proxy);
-    }
-    /* We don't need to start here: it's not needed because backend
-     * currently only stops on status change away from ok,
-     * reset, vmstop and such. If we do add code to start here,
-     * need to check vmstate, device state etc. */
-    return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
-}
-
 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
 {
     VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
@@ -2488,12 +2442,16 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
     k->load_extra_state = virtio_pci_load_extra_state;
     k->has_extra_state = virtio_pci_has_extra_state;
     k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
-    k->set_host_notifier = virtio_pci_set_host_notifier;
     k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
     k->vmstate_change = virtio_pci_vmstate_change;
     k->device_plugged = virtio_pci_device_plugged;
     k->device_unplugged = virtio_pci_device_unplugged;
     k->query_nvectors = virtio_pci_query_nvectors;
+    k->ioeventfd_started = virtio_pci_ioeventfd_started;
+    k->ioeventfd_set_started = virtio_pci_ioeventfd_set_started;
+    k->ioeventfd_disabled = virtio_pci_ioeventfd_disabled;
+    k->ioeventfd_set_disabled = virtio_pci_ioeventfd_set_disabled;
+    k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
 }
 
 static const TypeInfo virtio_pci_bus_info = {