summary refs log tree commit diff stats
path: root/hw/dataplane
diff options
context:
space:
mode:
Diffstat (limited to 'hw/dataplane')
-rw-r--r--hw/dataplane/Makefile.objs3
-rw-r--r--hw/dataplane/event-poll.c100
-rw-r--r--hw/dataplane/event-poll.h40
-rw-r--r--hw/dataplane/hostmem.c176
-rw-r--r--hw/dataplane/hostmem.h57
-rw-r--r--hw/dataplane/ioq.c117
-rw-r--r--hw/dataplane/ioq.h57
-rw-r--r--hw/dataplane/virtio-blk.c465
-rw-r--r--hw/dataplane/virtio-blk.h29
-rw-r--r--hw/dataplane/vring.c362
-rw-r--r--hw/dataplane/vring.h62
11 files changed, 1468 insertions, 0 deletions
diff --git a/hw/dataplane/Makefile.objs b/hw/dataplane/Makefile.objs
new file mode 100644
index 0000000000..682aa9e7ee
--- /dev/null
+++ b/hw/dataplane/Makefile.objs
@@ -0,0 +1,3 @@
+ifeq ($(CONFIG_VIRTIO), y)
+common-obj-$(CONFIG_VIRTIO_BLK_DATA_PLANE) += hostmem.o vring.o event-poll.o ioq.o virtio-blk.o
+endif
diff --git a/hw/dataplane/event-poll.c b/hw/dataplane/event-poll.c
new file mode 100644
index 0000000000..2b55c6e255
--- /dev/null
+++ b/hw/dataplane/event-poll.c
@@ -0,0 +1,100 @@
+/*
+ * Event loop with file descriptor polling
+ *
+ * Copyright 2012 IBM, Corp.
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <sys/epoll.h>
+#include "hw/dataplane/event-poll.h"
+
+/* Add an event notifier and its callback for polling */
+void event_poll_add(EventPoll *poll, EventHandler *handler,
+                    EventNotifier *notifier, EventCallback *callback)
+{
+    struct epoll_event event = {
+        .events = EPOLLIN,
+        .data.ptr = handler,
+    };
+    handler->notifier = notifier;
+    handler->callback = callback;
+    if (epoll_ctl(poll->epoll_fd, EPOLL_CTL_ADD,
+                  event_notifier_get_fd(notifier), &event) != 0) {
+        fprintf(stderr, "failed to add event handler to epoll: %m\n");
+        exit(1);
+    }
+}
+
+/* Event callback for stopping event_poll() */
+static void handle_stop(EventHandler *handler)
+{
+    /* Do nothing */
+}
+
+void event_poll_init(EventPoll *poll)
+{
+    /* Create epoll file descriptor */
+    poll->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+    if (poll->epoll_fd < 0) {
+        fprintf(stderr, "epoll_create1 failed: %m\n");
+        exit(1);
+    }
+
+    /* Set up stop notifier */
+    if (event_notifier_init(&poll->stop_notifier, 0) < 0) {
+        fprintf(stderr, "failed to init stop notifier\n");
+        exit(1);
+    }
+    event_poll_add(poll, &poll->stop_handler,
+                   &poll->stop_notifier, handle_stop);
+}
+
+void event_poll_cleanup(EventPoll *poll)
+{
+    event_notifier_cleanup(&poll->stop_notifier);
+    close(poll->epoll_fd);
+    poll->epoll_fd = -1;
+}
+
+/* Block until the next event and invoke its callback */
+void event_poll(EventPoll *poll)
+{
+    EventHandler *handler;
+    struct epoll_event event;
+    int nevents;
+
+    /* Wait for the next event.  Only do one event per call to keep the
+     * function simple, this could be changed later. */
+    do {
+        nevents = epoll_wait(poll->epoll_fd, &event, 1, -1);
+    } while (nevents < 0 && errno == EINTR);
+    if (unlikely(nevents != 1)) {
+        fprintf(stderr, "epoll_wait failed: %m\n");
+        exit(1); /* should never happen */
+    }
+
+    /* Find out which event handler has become active */
+    handler = event.data.ptr;
+
+    /* Clear the eventfd */
+    event_notifier_test_and_clear(handler->notifier);
+
+    /* Handle the event */
+    handler->callback(handler);
+}
+
+/* Stop event_poll()
+ *
+ * This function can be used from another thread.
+ */
+void event_poll_notify(EventPoll *poll)
+{
+    event_notifier_set(&poll->stop_notifier);
+}
diff --git a/hw/dataplane/event-poll.h b/hw/dataplane/event-poll.h
new file mode 100644
index 0000000000..3e8d3ec7d5
--- /dev/null
+++ b/hw/dataplane/event-poll.h
@@ -0,0 +1,40 @@
+/*
+ * Event loop with file descriptor polling
+ *
+ * Copyright 2012 IBM, Corp.
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef EVENT_POLL_H
+#define EVENT_POLL_H
+
+#include "qemu/event_notifier.h"
+
+typedef struct EventHandler EventHandler;
+typedef void EventCallback(EventHandler *handler);
+struct EventHandler {
+    EventNotifier *notifier;        /* eventfd */
+    EventCallback *callback;        /* callback function */
+};
+
+typedef struct {
+    int epoll_fd;                   /* epoll(2) file descriptor */
+    EventNotifier stop_notifier;    /* stop poll notifier */
+    EventHandler stop_handler;      /* stop poll handler */
+} EventPoll;
+
+void event_poll_add(EventPoll *poll, EventHandler *handler,
+                    EventNotifier *notifier, EventCallback *callback);
+void event_poll_init(EventPoll *poll);
+void event_poll_cleanup(EventPoll *poll);
+void event_poll(EventPoll *poll);
+void event_poll_notify(EventPoll *poll);
+
+#endif /* EVENT_POLL_H */
diff --git a/hw/dataplane/hostmem.c b/hw/dataplane/hostmem.c
new file mode 100644
index 0000000000..380537e06d
--- /dev/null
+++ b/hw/dataplane/hostmem.c
@@ -0,0 +1,176 @@
+/*
+ * Thread-safe guest to host memory mapping
+ *
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "exec/address-spaces.h"
+#include "hostmem.h"
+
+static int hostmem_lookup_cmp(const void *phys_, const void *region_)
+{
+    hwaddr phys = *(const hwaddr *)phys_;
+    const HostMemRegion *region = region_;
+
+    if (phys < region->guest_addr) {
+        return -1;
+    } else if (phys >= region->guest_addr + region->size) {
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+/**
+ * Map guest physical address to host pointer
+ */
+void *hostmem_lookup(HostMem *hostmem, hwaddr phys, hwaddr len, bool is_write)
+{
+    HostMemRegion *region;
+    void *host_addr = NULL;
+    hwaddr offset_within_region;
+
+    qemu_mutex_lock(&hostmem->current_regions_lock);
+    region = bsearch(&phys, hostmem->current_regions,
+                     hostmem->num_current_regions,
+                     sizeof(hostmem->current_regions[0]),
+                     hostmem_lookup_cmp);
+    if (!region) {
+        goto out;
+    }
+    if (is_write && region->readonly) {
+        goto out;
+    }
+    offset_within_region = phys - region->guest_addr;
+    if (len <= region->size - offset_within_region) {
+        host_addr = region->host_addr + offset_within_region;
+    }
+out:
+    qemu_mutex_unlock(&hostmem->current_regions_lock);
+
+    return host_addr;
+}
+
+/**
+ * Install new regions list
+ */
+static void hostmem_listener_commit(MemoryListener *listener)
+{
+    HostMem *hostmem = container_of(listener, HostMem, listener);
+
+    qemu_mutex_lock(&hostmem->current_regions_lock);
+    g_free(hostmem->current_regions);
+    hostmem->current_regions = hostmem->new_regions;
+    hostmem->num_current_regions = hostmem->num_new_regions;
+    qemu_mutex_unlock(&hostmem->current_regions_lock);
+
+    /* Reset new regions list */
+    hostmem->new_regions = NULL;
+    hostmem->num_new_regions = 0;
+}
+
+/**
+ * Add a MemoryRegionSection to the new regions list
+ */
+static void hostmem_append_new_region(HostMem *hostmem,
+                                      MemoryRegionSection *section)
+{
+    void *ram_ptr = memory_region_get_ram_ptr(section->mr);
+    size_t num = hostmem->num_new_regions;
+    size_t new_size = (num + 1) * sizeof(hostmem->new_regions[0]);
+
+    hostmem->new_regions = g_realloc(hostmem->new_regions, new_size);
+    hostmem->new_regions[num] = (HostMemRegion){
+        .host_addr = ram_ptr + section->offset_within_region,
+        .guest_addr = section->offset_within_address_space,
+        .size = section->size,
+        .readonly = section->readonly,
+    };
+    hostmem->num_new_regions++;
+}
+
+static void hostmem_listener_append_region(MemoryListener *listener,
+                                           MemoryRegionSection *section)
+{
+    HostMem *hostmem = container_of(listener, HostMem, listener);
+
+    /* Ignore non-RAM regions, we may not be able to map them */
+    if (!memory_region_is_ram(section->mr)) {
+        return;
+    }
+
+    /* Ignore regions with dirty logging, we cannot mark them dirty */
+    if (memory_region_is_logging(section->mr)) {
+        return;
+    }
+
+    hostmem_append_new_region(hostmem, section);
+}
+
+/* We don't implement most MemoryListener callbacks, use these nop stubs */
+static void hostmem_listener_dummy(MemoryListener *listener)
+{
+}
+
+static void hostmem_listener_section_dummy(MemoryListener *listener,
+                                           MemoryRegionSection *section)
+{
+}
+
+static void hostmem_listener_eventfd_dummy(MemoryListener *listener,
+                                           MemoryRegionSection *section,
+                                           bool match_data, uint64_t data,
+                                           EventNotifier *e)
+{
+}
+
+static void hostmem_listener_coalesced_mmio_dummy(MemoryListener *listener,
+                                                  MemoryRegionSection *section,
+                                                  hwaddr addr, hwaddr len)
+{
+}
+
+void hostmem_init(HostMem *hostmem)
+{
+    memset(hostmem, 0, sizeof(*hostmem));
+
+    qemu_mutex_init(&hostmem->current_regions_lock);
+
+    hostmem->listener = (MemoryListener){
+        .begin = hostmem_listener_dummy,
+        .commit = hostmem_listener_commit,
+        .region_add = hostmem_listener_append_region,
+        .region_del = hostmem_listener_section_dummy,
+        .region_nop = hostmem_listener_append_region,
+        .log_start = hostmem_listener_section_dummy,
+        .log_stop = hostmem_listener_section_dummy,
+        .log_sync = hostmem_listener_section_dummy,
+        .log_global_start = hostmem_listener_dummy,
+        .log_global_stop = hostmem_listener_dummy,
+        .eventfd_add = hostmem_listener_eventfd_dummy,
+        .eventfd_del = hostmem_listener_eventfd_dummy,
+        .coalesced_mmio_add = hostmem_listener_coalesced_mmio_dummy,
+        .coalesced_mmio_del = hostmem_listener_coalesced_mmio_dummy,
+        .priority = 10,
+    };
+
+    memory_listener_register(&hostmem->listener, &address_space_memory);
+    if (hostmem->num_new_regions > 0) {
+        hostmem_listener_commit(&hostmem->listener);
+    }
+}
+
+void hostmem_finalize(HostMem *hostmem)
+{
+    memory_listener_unregister(&hostmem->listener);
+    g_free(hostmem->new_regions);
+    g_free(hostmem->current_regions);
+    qemu_mutex_destroy(&hostmem->current_regions_lock);
+}
diff --git a/hw/dataplane/hostmem.h b/hw/dataplane/hostmem.h
new file mode 100644
index 0000000000..b2cf09333f
--- /dev/null
+++ b/hw/dataplane/hostmem.h
@@ -0,0 +1,57 @@
+/*
+ * Thread-safe guest to host memory mapping
+ *
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HOSTMEM_H
+#define HOSTMEM_H
+
+#include "exec/memory.h"
+#include "qemu/thread.h"
+
+typedef struct {
+    void *host_addr;
+    hwaddr guest_addr;
+    uint64_t size;
+    bool readonly;
+} HostMemRegion;
+
+typedef struct {
+    /* The listener is invoked when regions change and a new list of regions is
+     * built up completely before they are installed.
+     */
+    MemoryListener listener;
+    HostMemRegion *new_regions;
+    size_t num_new_regions;
+
+    /* Current regions are accessed from multiple threads either to lookup
+     * addresses or to install a new list of regions.  The lock protects the
+     * pointer and the regions.
+     */
+    QemuMutex current_regions_lock;
+    HostMemRegion *current_regions;
+    size_t num_current_regions;
+} HostMem;
+
+void hostmem_init(HostMem *hostmem);
+void hostmem_finalize(HostMem *hostmem);
+
+/**
+ * Map a guest physical address to a pointer
+ *
+ * Note that there is map/unmap mechanism here.  The caller must ensure that
+ * mapped memory is no longer used across events like hot memory unplug.  This
+ * can be done with other mechanisms like bdrv_drain_all() that quiesce
+ * in-flight I/O.
+ */
+void *hostmem_lookup(HostMem *hostmem, hwaddr phys, hwaddr len, bool is_write);
+
+#endif /* HOSTMEM_H */
diff --git a/hw/dataplane/ioq.c b/hw/dataplane/ioq.c
new file mode 100644
index 0000000000..0c9f5c4d60
--- /dev/null
+++ b/hw/dataplane/ioq.c
@@ -0,0 +1,117 @@
+/*
+ * Linux AIO request queue
+ *
+ * Copyright 2012 IBM, Corp.
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "hw/dataplane/ioq.h"
+
+void ioq_init(IOQueue *ioq, int fd, unsigned int max_reqs)
+{
+    int rc;
+
+    ioq->fd = fd;
+    ioq->max_reqs = max_reqs;
+
+    memset(&ioq->io_ctx, 0, sizeof ioq->io_ctx);
+    rc = io_setup(max_reqs, &ioq->io_ctx);
+    if (rc != 0) {
+        fprintf(stderr, "ioq io_setup failed %d\n", rc);
+        exit(1);
+    }
+
+    rc = event_notifier_init(&ioq->io_notifier, 0);
+    if (rc != 0) {
+        fprintf(stderr, "ioq io event notifier creation failed %d\n", rc);
+        exit(1);
+    }
+
+    ioq->freelist = g_malloc0(sizeof ioq->freelist[0] * max_reqs);
+    ioq->freelist_idx = 0;
+
+    ioq->queue = g_malloc0(sizeof ioq->queue[0] * max_reqs);
+    ioq->queue_idx = 0;
+}
+
+void ioq_cleanup(IOQueue *ioq)
+{
+    g_free(ioq->freelist);
+    g_free(ioq->queue);
+
+    event_notifier_cleanup(&ioq->io_notifier);
+    io_destroy(ioq->io_ctx);
+}
+
+EventNotifier *ioq_get_notifier(IOQueue *ioq)
+{
+    return &ioq->io_notifier;
+}
+
+struct iocb *ioq_get_iocb(IOQueue *ioq)
+{
+    /* Underflow cannot happen since ioq is sized for max_reqs */
+    assert(ioq->freelist_idx != 0);
+
+    struct iocb *iocb = ioq->freelist[--ioq->freelist_idx];
+    ioq->queue[ioq->queue_idx++] = iocb;
+    return iocb;
+}
+
+void ioq_put_iocb(IOQueue *ioq, struct iocb *iocb)
+{
+    /* Overflow cannot happen since ioq is sized for max_reqs */
+    assert(ioq->freelist_idx != ioq->max_reqs);
+
+    ioq->freelist[ioq->freelist_idx++] = iocb;
+}
+
+struct iocb *ioq_rdwr(IOQueue *ioq, bool read, struct iovec *iov,
+                      unsigned int count, long long offset)
+{
+    struct iocb *iocb = ioq_get_iocb(ioq);
+
+    if (read) {
+        io_prep_preadv(iocb, ioq->fd, iov, count, offset);
+    } else {
+        io_prep_pwritev(iocb, ioq->fd, iov, count, offset);
+    }
+    io_set_eventfd(iocb, event_notifier_get_fd(&ioq->io_notifier));
+    return iocb;
+}
+
+int ioq_submit(IOQueue *ioq)
+{
+    int rc = io_submit(ioq->io_ctx, ioq->queue_idx, ioq->queue);
+    ioq->queue_idx = 0; /* reset */
+    return rc;
+}
+
+int ioq_run_completion(IOQueue *ioq, IOQueueCompletion *completion,
+                       void *opaque)
+{
+    struct io_event events[ioq->max_reqs];
+    int nevents, i;
+
+    do {
+        nevents = io_getevents(ioq->io_ctx, 0, ioq->max_reqs, events, NULL);
+    } while (nevents < 0 && errno == EINTR);
+    if (nevents < 0) {
+        return nevents;
+    }
+
+    for (i = 0; i < nevents; i++) {
+        ssize_t ret = ((uint64_t)events[i].res2 << 32) | events[i].res;
+
+        completion(events[i].obj, ret, opaque);
+        ioq_put_iocb(ioq, events[i].obj);
+    }
+    return nevents;
+}
diff --git a/hw/dataplane/ioq.h b/hw/dataplane/ioq.h
new file mode 100644
index 0000000000..b49b5de7f4
--- /dev/null
+++ b/hw/dataplane/ioq.h
@@ -0,0 +1,57 @@
+/*
+ * Linux AIO request queue
+ *
+ * Copyright 2012 IBM, Corp.
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef IOQ_H
+#define IOQ_H
+
+#include <libaio.h>
+#include "qemu/event_notifier.h"
+
+typedef struct {
+    int fd;                         /* file descriptor */
+    unsigned int max_reqs;          /* max length of freelist and queue */
+
+    io_context_t io_ctx;            /* Linux AIO context */
+    EventNotifier io_notifier;      /* Linux AIO eventfd */
+
+    /* Requests can complete in any order so a free list is necessary to manage
+     * available iocbs.
+     */
+    struct iocb **freelist;         /* free iocbs */
+    unsigned int freelist_idx;
+
+    /* Multiple requests are queued up before submitting them all in one go */
+    struct iocb **queue;            /* queued iocbs */
+    unsigned int queue_idx;
+} IOQueue;
+
+void ioq_init(IOQueue *ioq, int fd, unsigned int max_reqs);
+void ioq_cleanup(IOQueue *ioq);
+EventNotifier *ioq_get_notifier(IOQueue *ioq);
+struct iocb *ioq_get_iocb(IOQueue *ioq);
+void ioq_put_iocb(IOQueue *ioq, struct iocb *iocb);
+struct iocb *ioq_rdwr(IOQueue *ioq, bool read, struct iovec *iov,
+                      unsigned int count, long long offset);
+int ioq_submit(IOQueue *ioq);
+
+static inline unsigned int ioq_num_queued(IOQueue *ioq)
+{
+    return ioq->queue_idx;
+}
+
+typedef void IOQueueCompletion(struct iocb *iocb, ssize_t ret, void *opaque);
+int ioq_run_completion(IOQueue *ioq, IOQueueCompletion *completion,
+                       void *opaque);
+
+#endif /* IOQ_H */
diff --git a/hw/dataplane/virtio-blk.c b/hw/dataplane/virtio-blk.c
new file mode 100644
index 0000000000..4c4ad8422a
--- /dev/null
+++ b/hw/dataplane/virtio-blk.c
@@ -0,0 +1,465 @@
+/*
+ * Dedicated thread for virtio-blk I/O processing
+ *
+ * Copyright 2012 IBM, Corp.
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "trace.h"
+#include "qemu/iov.h"
+#include "event-poll.h"
+#include "qemu/thread.h"
+#include "vring.h"
+#include "ioq.h"
+#include "migration/migration.h"
+#include "hw/virtio-blk.h"
+#include "hw/dataplane/virtio-blk.h"
+
+enum {
+    SEG_MAX = 126,                  /* maximum number of I/O segments */
+    VRING_MAX = SEG_MAX + 2,        /* maximum number of vring descriptors */
+    REQ_MAX = VRING_MAX,            /* maximum number of requests in the vring,
+                                     * is VRING_MAX / 2 with traditional and
+                                     * VRING_MAX with indirect descriptors */
+};
+
+typedef struct {
+    struct iocb iocb;               /* Linux AIO control block */
+    QEMUIOVector *inhdr;            /* iovecs for virtio_blk_inhdr */
+    unsigned int head;              /* vring descriptor index */
+} VirtIOBlockRequest;
+
+struct VirtIOBlockDataPlane {
+    bool started;
+    QEMUBH *start_bh;
+    QemuThread thread;
+
+    VirtIOBlkConf *blk;
+    int fd;                         /* image file descriptor */
+
+    VirtIODevice *vdev;
+    Vring vring;                    /* virtqueue vring */
+    EventNotifier *guest_notifier;  /* irq */
+
+    EventPoll event_poll;           /* event poller */
+    EventHandler io_handler;        /* Linux AIO completion handler */
+    EventHandler notify_handler;    /* virtqueue notify handler */
+
+    IOQueue ioqueue;                /* Linux AIO queue (should really be per
+                                       dataplane thread) */
+    VirtIOBlockRequest requests[REQ_MAX]; /* pool of requests, managed by the
+                                             queue */
+
+    unsigned int num_reqs;
+
+    Error *migration_blocker;
+};
+
+/* Raise an interrupt to signal guest, if necessary */
+static void notify_guest(VirtIOBlockDataPlane *s)
+{
+    if (!vring_should_notify(s->vdev, &s->vring)) {
+        return;
+    }
+
+    event_notifier_set(s->guest_notifier);
+}
+
+static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque)
+{
+    VirtIOBlockDataPlane *s = opaque;
+    VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
+    struct virtio_blk_inhdr hdr;
+    int len;
+
+    if (likely(ret >= 0)) {
+        hdr.status = VIRTIO_BLK_S_OK;
+        len = ret;
+    } else {
+        hdr.status = VIRTIO_BLK_S_IOERR;
+        len = 0;
+    }
+
+    trace_virtio_blk_data_plane_complete_request(s, req->head, ret);
+
+    qemu_iovec_from_buf(req->inhdr, 0, &hdr, sizeof(hdr));
+    qemu_iovec_destroy(req->inhdr);
+    g_slice_free(QEMUIOVector, req->inhdr);
+
+    /* According to the virtio specification len should be the number of bytes
+     * written to, but for virtio-blk it seems to be the number of bytes
+     * transferred plus the status bytes.
+     */
+    vring_push(&s->vring, req->head, len + sizeof(hdr));
+
+    s->num_reqs--;
+}
+
+static void complete_request_early(VirtIOBlockDataPlane *s, unsigned int head,
+                                   QEMUIOVector *inhdr, unsigned char status)
+{
+    struct virtio_blk_inhdr hdr = {
+        .status = status,
+    };
+
+    qemu_iovec_from_buf(inhdr, 0, &hdr, sizeof(hdr));
+    qemu_iovec_destroy(inhdr);
+    g_slice_free(QEMUIOVector, inhdr);
+
+    vring_push(&s->vring, head, sizeof(hdr));
+    notify_guest(s);
+}
+
+/* Get disk serial number */
+static void do_get_id_cmd(VirtIOBlockDataPlane *s,
+                          struct iovec *iov, unsigned int iov_cnt,
+                          unsigned int head, QEMUIOVector *inhdr)
+{
+    char id[VIRTIO_BLK_ID_BYTES];
+
+    /* Serial number not NUL-terminated when shorter than buffer */
+    strncpy(id, s->blk->serial ? s->blk->serial : "", sizeof(id));
+    iov_from_buf(iov, iov_cnt, 0, id, sizeof(id));
+    complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK);
+}
+
+static int process_request(IOQueue *ioq, struct iovec iov[],
+                           unsigned int out_num, unsigned int in_num,
+                           unsigned int head)
+{
+    VirtIOBlockDataPlane *s = container_of(ioq, VirtIOBlockDataPlane, ioqueue);
+    struct iovec *in_iov = &iov[out_num];
+    struct virtio_blk_outhdr outhdr;
+    QEMUIOVector *inhdr;
+    size_t in_size;
+    struct iocb *iocb;
+
+    /* Copy in outhdr */
+    if (unlikely(iov_to_buf(iov, out_num, 0, &outhdr,
+                            sizeof(outhdr)) != sizeof(outhdr))) {
+        error_report("virtio-blk request outhdr too short");
+        return -EFAULT;
+    }
+    iov_discard_front(&iov, &out_num, sizeof(outhdr));
+
+    /* Grab inhdr for later */
+    in_size = iov_size(in_iov, in_num);
+    if (in_size < sizeof(struct virtio_blk_inhdr)) {
+        error_report("virtio_blk request inhdr too short");
+        return -EFAULT;
+    }
+    inhdr = g_slice_new(QEMUIOVector);
+    qemu_iovec_init(inhdr, 1);
+    qemu_iovec_concat_iov(inhdr, in_iov, in_num,
+            in_size - sizeof(struct virtio_blk_inhdr),
+            sizeof(struct virtio_blk_inhdr));
+    iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
+
+    /* TODO Linux sets the barrier bit even when not advertised! */
+    outhdr.type &= ~VIRTIO_BLK_T_BARRIER;
+
+    switch (outhdr.type) {
+    case VIRTIO_BLK_T_IN:
+        iocb = ioq_rdwr(ioq, true, in_iov, in_num, outhdr.sector * 512);
+        break;
+
+    case VIRTIO_BLK_T_OUT:
+        iocb = ioq_rdwr(ioq, false, iov, out_num, outhdr.sector * 512);
+        break;
+
+    case VIRTIO_BLK_T_SCSI_CMD:
+        /* TODO support SCSI commands */
+        complete_request_early(s, head, inhdr, VIRTIO_BLK_S_UNSUPP);
+        return 0;
+
+    case VIRTIO_BLK_T_FLUSH:
+        /* TODO fdsync not supported by Linux AIO, do it synchronously here! */
+        if (qemu_fdatasync(s->fd) < 0) {
+            complete_request_early(s, head, inhdr, VIRTIO_BLK_S_IOERR);
+        } else {
+            complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK);
+        }
+        return 0;
+
+    case VIRTIO_BLK_T_GET_ID:
+        do_get_id_cmd(s, in_iov, in_num, head, inhdr);
+        return 0;
+
+    default:
+        error_report("virtio-blk unsupported request type %#x", outhdr.type);
+        qemu_iovec_destroy(inhdr);
+        g_slice_free(QEMUIOVector, inhdr);
+        return -EFAULT;
+    }
+
+    /* Fill in virtio block metadata needed for completion */
+    VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
+    req->head = head;
+    req->inhdr = inhdr;
+    return 0;
+}
+
+static void handle_notify(EventHandler *handler)
+{
+    VirtIOBlockDataPlane *s = container_of(handler, VirtIOBlockDataPlane,
+                                           notify_handler);
+
+    /* There is one array of iovecs into which all new requests are extracted
+     * from the vring.  Requests are read from the vring and the translated
+     * descriptors are written to the iovecs array.  The iovecs do not have to
+     * persist across handle_notify() calls because the kernel copies the
+     * iovecs on io_submit().
+     *
+     * Handling io_submit() EAGAIN may require storing the requests across
+     * handle_notify() calls until the kernel has sufficient resources to
+     * accept more I/O.  This is not implemented yet.
+     */
+    struct iovec iovec[VRING_MAX];
+    struct iovec *end = &iovec[VRING_MAX];
+    struct iovec *iov = iovec;
+
+    /* When a request is read from the vring, the index of the first descriptor
+     * (aka head) is returned so that the completed request can be pushed onto
+     * the vring later.
+     *
+     * The number of hypervisor read-only iovecs is out_num.  The number of
+     * hypervisor write-only iovecs is in_num.
+     */
+    int head;
+    unsigned int out_num = 0, in_num = 0;
+    unsigned int num_queued;
+
+    for (;;) {
+        /* Disable guest->host notifies to avoid unnecessary vmexits */
+        vring_disable_notification(s->vdev, &s->vring);
+
+        for (;;) {
+            head = vring_pop(s->vdev, &s->vring, iov, end, &out_num, &in_num);
+            if (head < 0) {
+                break; /* no more requests */
+            }
+
+            trace_virtio_blk_data_plane_process_request(s, out_num, in_num,
+                                                        head);
+
+            if (process_request(&s->ioqueue, iov, out_num, in_num, head) < 0) {
+                vring_set_broken(&s->vring);
+                break;
+            }
+            iov += out_num + in_num;
+        }
+
+        if (likely(head == -EAGAIN)) { /* vring emptied */
+            /* Re-enable guest->host notifies and stop processing the vring.
+             * But if the guest has snuck in more descriptors, keep processing.
+             */
+            if (vring_enable_notification(s->vdev, &s->vring)) {
+                break;
+            }
+        } else { /* head == -ENOBUFS or fatal error, iovecs[] is depleted */
+            /* Since there are no iovecs[] left, stop processing for now.  Do
+             * not re-enable guest->host notifies since the I/O completion
+             * handler knows to check for more vring descriptors anyway.
+             */
+            break;
+        }
+    }
+
+    num_queued = ioq_num_queued(&s->ioqueue);
+    if (num_queued > 0) {
+        s->num_reqs += num_queued;
+
+        int rc = ioq_submit(&s->ioqueue);
+        if (unlikely(rc < 0)) {
+            fprintf(stderr, "ioq_submit failed %d\n", rc);
+            exit(1);
+        }
+    }
+}
+
+static void handle_io(EventHandler *handler)
+{
+    VirtIOBlockDataPlane *s = container_of(handler, VirtIOBlockDataPlane,
+                                           io_handler);
+
+    if (ioq_run_completion(&s->ioqueue, complete_request, s) > 0) {
+        notify_guest(s);
+    }
+
+    /* If there were more requests than iovecs, the vring will not be empty yet
+     * so check again.  There should now be enough resources to process more
+     * requests.
+     */
+    if (unlikely(vring_more_avail(&s->vring))) {
+        handle_notify(&s->notify_handler);
+    }
+}
+
+static void *data_plane_thread(void *opaque)
+{
+    VirtIOBlockDataPlane *s = opaque;
+
+    do {
+        event_poll(&s->event_poll);
+    } while (s->started || s->num_reqs > 0);
+    return NULL;
+}
+
+static void start_data_plane_bh(void *opaque)
+{
+    VirtIOBlockDataPlane *s = opaque;
+
+    qemu_bh_delete(s->start_bh);
+    s->start_bh = NULL;
+    qemu_thread_create(&s->thread, data_plane_thread,
+                       s, QEMU_THREAD_JOINABLE);
+}
+
+bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
+                                  VirtIOBlockDataPlane **dataplane)
+{
+    VirtIOBlockDataPlane *s;
+    int fd;
+
+    *dataplane = NULL;
+
+    if (!blk->data_plane) {
+        return true;
+    }
+
+    if (blk->scsi) {
+        error_report("device is incompatible with x-data-plane, use scsi=off");
+        return false;
+    }
+
+    if (blk->config_wce) {
+        error_report("device is incompatible with x-data-plane, "
+                     "use config-wce=off");
+        return false;
+    }
+
+    fd = raw_get_aio_fd(blk->conf.bs);
+    if (fd < 0) {
+        error_report("drive is incompatible with x-data-plane, "
+                     "use format=raw,cache=none,aio=native");
+        return false;
+    }
+
+    s = g_new0(VirtIOBlockDataPlane, 1);
+    s->vdev = vdev;
+    s->fd = fd;
+    s->blk = blk;
+
+    /* Prevent block operations that conflict with data plane thread */
+    bdrv_set_in_use(blk->conf.bs, 1);
+
+    error_setg(&s->migration_blocker,
+            "x-data-plane does not support migration");
+    migrate_add_blocker(s->migration_blocker);
+
+    *dataplane = s;
+    return true;
+}
+
+void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
+{
+    if (!s) {
+        return;
+    }
+
+    virtio_blk_data_plane_stop(s);
+    migrate_del_blocker(s->migration_blocker);
+    error_free(s->migration_blocker);
+    bdrv_set_in_use(s->blk->conf.bs, 0);
+    g_free(s);
+}
+
+void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
+{
+    VirtQueue *vq;
+    int i;
+
+    if (s->started) {
+        return;
+    }
+
+    vq = virtio_get_queue(s->vdev, 0);
+    if (!vring_setup(&s->vring, s->vdev, 0)) {
+        return;
+    }
+
+    event_poll_init(&s->event_poll);
+
+    /* Set up guest notifier (irq) */
+    if (s->vdev->binding->set_guest_notifiers(s->vdev->binding_opaque,
+                                              true) != 0) {
+        fprintf(stderr, "virtio-blk failed to set guest notifier, "
+                "ensure -enable-kvm is set\n");
+        exit(1);
+    }
+    s->guest_notifier = virtio_queue_get_guest_notifier(vq);
+
+    /* Set up virtqueue notify */
+    if (s->vdev->binding->set_host_notifier(s->vdev->binding_opaque,
+                                            0, true) != 0) {
+        fprintf(stderr, "virtio-blk failed to set host notifier\n");
+        exit(1);
+    }
+    event_poll_add(&s->event_poll, &s->notify_handler,
+                   virtio_queue_get_host_notifier(vq),
+                   handle_notify);
+
+    /* Set up ioqueue */
+    ioq_init(&s->ioqueue, s->fd, REQ_MAX);
+    for (i = 0; i < ARRAY_SIZE(s->requests); i++) {
+        ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb);
+    }
+    event_poll_add(&s->event_poll, &s->io_handler,
+                   ioq_get_notifier(&s->ioqueue), handle_io);
+
+    s->started = true;
+    trace_virtio_blk_data_plane_start(s);
+
+    /* Kick right away to begin processing requests already in vring */
+    event_notifier_set(virtio_queue_get_host_notifier(vq));
+
+    /* Spawn thread in BH so it inherits iothread cpusets */
+    s->start_bh = qemu_bh_new(start_data_plane_bh, s);
+    qemu_bh_schedule(s->start_bh);
+}
+
+void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
+{
+    if (!s->started) {
+        return;
+    }
+    s->started = false;
+    trace_virtio_blk_data_plane_stop(s);
+
+    /* Stop thread or cancel pending thread creation BH */
+    if (s->start_bh) {
+        qemu_bh_delete(s->start_bh);
+        s->start_bh = NULL;
+    } else {
+        event_poll_notify(&s->event_poll);
+        qemu_thread_join(&s->thread);
+    }
+
+    ioq_cleanup(&s->ioqueue);
+
+    s->vdev->binding->set_host_notifier(s->vdev->binding_opaque, 0, false);
+
+    event_poll_cleanup(&s->event_poll);
+
+    /* Clean up guest notifier (irq) */
+    s->vdev->binding->set_guest_notifiers(s->vdev->binding_opaque, false);
+
+    vring_teardown(&s->vring);
+}
diff --git a/hw/dataplane/virtio-blk.h b/hw/dataplane/virtio-blk.h
new file mode 100644
index 0000000000..1e8fdfe418
--- /dev/null
+++ b/hw/dataplane/virtio-blk.h
@@ -0,0 +1,29 @@
+/*
+ * Dedicated thread for virtio-blk I/O processing
+ *
+ * Copyright 2012 IBM, Corp.
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *   Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HW_DATAPLANE_VIRTIO_BLK_H
+#define HW_DATAPLANE_VIRTIO_BLK_H
+
+#include "hw/virtio.h"
+
+typedef struct VirtIOBlockDataPlane VirtIOBlockDataPlane;
+
+bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
+                                  VirtIOBlockDataPlane **dataplane);
+void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
+void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
+void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
+void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
+
+#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
diff --git a/hw/dataplane/vring.c b/hw/dataplane/vring.c
new file mode 100644
index 0000000000..d5d4ef45d1
--- /dev/null
+++ b/hw/dataplane/vring.c
@@ -0,0 +1,362 @@
+/* Copyright 2012 Red Hat, Inc.
+ * Copyright IBM, Corp. 2012
+ *
+ * Based on Linux 2.6.39 vhost code:
+ * Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *         Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * Inspiration, some code, and most witty comments come from
+ * Documentation/virtual/lguest/lguest.c, by Rusty Russell
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include "trace.h"
+#include "hw/dataplane/vring.h"
+
+/* Map the guest's vring to host memory */
+bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
+{
+    hwaddr vring_addr = virtio_queue_get_ring_addr(vdev, n);
+    hwaddr vring_size = virtio_queue_get_ring_size(vdev, n);
+    void *vring_ptr;
+
+    vring->broken = false;
+
+    hostmem_init(&vring->hostmem);
+    vring_ptr = hostmem_lookup(&vring->hostmem, vring_addr, vring_size, true);
+    if (!vring_ptr) {
+        error_report("Failed to map vring "
+                     "addr %#" HWADDR_PRIx " size %" HWADDR_PRIu,
+                     vring_addr, vring_size);
+        vring->broken = true;
+        return false;
+    }
+
+    vring_init(&vring->vr, virtio_queue_get_num(vdev, n), vring_ptr, 4096);
+
+    vring->last_avail_idx = 0;
+    vring->last_used_idx = 0;
+    vring->signalled_used = 0;
+    vring->signalled_used_valid = false;
+
+    trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
+                      vring->vr.desc, vring->vr.avail, vring->vr.used);
+    return true;
+}
+
+void vring_teardown(Vring *vring)
+{
+    hostmem_finalize(&vring->hostmem);
+}
+
+/* Disable guest->host notifies */
+void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
+{
+    if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
+        vring->vr.used->flags |= VRING_USED_F_NO_NOTIFY;
+    }
+}
+
+/* Enable guest->host notifies
+ *
+ * Return true if the vring is empty, false if there are more requests.
+ */
+bool vring_enable_notification(VirtIODevice *vdev, Vring *vring)
+{
+    if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
+        vring_avail_event(&vring->vr) = vring->vr.avail->idx;
+    } else {
+        vring->vr.used->flags &= ~VRING_USED_F_NO_NOTIFY;
+    }
+    smp_mb(); /* ensure update is seen before reading avail_idx */
+    return !vring_more_avail(vring);
+}
+
+/* This is stolen from linux/drivers/vhost/vhost.c:vhost_notify() */
+bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
+{
+    uint16_t old, new;
+    bool v;
+    /* Flush out used index updates. This is paired
+     * with the barrier that the Guest executes when enabling
+     * interrupts. */
+    smp_mb();
+
+    if ((vdev->guest_features & VIRTIO_F_NOTIFY_ON_EMPTY) &&
+        unlikely(vring->vr.avail->idx == vring->last_avail_idx)) {
+        return true;
+    }
+
+    if (!(vdev->guest_features & VIRTIO_RING_F_EVENT_IDX)) {
+        return !(vring->vr.avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
+    }
+    old = vring->signalled_used;
+    v = vring->signalled_used_valid;
+    new = vring->signalled_used = vring->last_used_idx;
+    vring->signalled_used_valid = true;
+
+    if (unlikely(!v)) {
+        return true;
+    }
+
+    return vring_need_event(vring_used_event(&vring->vr), new, old);
+}
+
+/* This is stolen from linux/drivers/vhost/vhost.c. */
+static int get_indirect(Vring *vring,
+                        struct iovec iov[], struct iovec *iov_end,
+                        unsigned int *out_num, unsigned int *in_num,
+                        struct vring_desc *indirect)
+{
+    struct vring_desc desc;
+    unsigned int i = 0, count, found = 0;
+
+    /* Sanity check */
+    if (unlikely(indirect->len % sizeof(desc))) {
+        error_report("Invalid length in indirect descriptor: "
+                     "len %#x not multiple of %#zx",
+                     indirect->len, sizeof(desc));
+        vring->broken = true;
+        return -EFAULT;
+    }
+
+    count = indirect->len / sizeof(desc);
+    /* Buffers are chained via a 16 bit next field, so
+     * we can have at most 2^16 of these. */
+    if (unlikely(count > USHRT_MAX + 1)) {
+        error_report("Indirect buffer length too big: %d", indirect->len);
+        vring->broken = true;
+        return -EFAULT;
+    }
+
+    do {
+        struct vring_desc *desc_ptr;
+
+        /* Translate indirect descriptor */
+        desc_ptr = hostmem_lookup(&vring->hostmem,
+                                  indirect->addr + found * sizeof(desc),
+                                  sizeof(desc), false);
+        if (!desc_ptr) {
+            error_report("Failed to map indirect descriptor "
+                         "addr %#" PRIx64 " len %zu",
+                         (uint64_t)indirect->addr + found * sizeof(desc),
+                         sizeof(desc));
+            vring->broken = true;
+            return -EFAULT;
+        }
+        desc = *desc_ptr;
+
+        /* Ensure descriptor has been loaded before accessing fields */
+        barrier(); /* read_barrier_depends(); */
+
+        if (unlikely(++found > count)) {
+            error_report("Loop detected: last one at %u "
+                         "indirect size %u", i, count);
+            vring->broken = true;
+            return -EFAULT;
+        }
+
+        if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+            error_report("Nested indirect descriptor");
+            vring->broken = true;
+            return -EFAULT;
+        }
+
+        /* Stop for now if there are not enough iovecs available. */
+        if (iov >= iov_end) {
+            return -ENOBUFS;
+        }
+
+        iov->iov_base = hostmem_lookup(&vring->hostmem, desc.addr, desc.len,
+                                       desc.flags & VRING_DESC_F_WRITE);
+        if (!iov->iov_base) {
+            error_report("Failed to map indirect descriptor"
+                         "addr %#" PRIx64 " len %u",
+                         (uint64_t)desc.addr, desc.len);
+            vring->broken = true;
+            return -EFAULT;
+        }
+        iov->iov_len = desc.len;
+        iov++;
+
+        /* If this is an input descriptor, increment that count. */
+        if (desc.flags & VRING_DESC_F_WRITE) {
+            *in_num += 1;
+        } else {
+            /* If it's an output descriptor, they're all supposed
+             * to come before any input descriptors. */
+            if (unlikely(*in_num)) {
+                error_report("Indirect descriptor "
+                             "has out after in: idx %u", i);
+                vring->broken = true;
+                return -EFAULT;
+            }
+            *out_num += 1;
+        }
+        i = desc.next;
+    } while (desc.flags & VRING_DESC_F_NEXT);
+    return 0;
+}
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access.  Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found.  A negative code is
+ * returned on error.
+ *
+ * Stolen from linux/drivers/vhost/vhost.c.
+ */
+int vring_pop(VirtIODevice *vdev, Vring *vring,
+              struct iovec iov[], struct iovec *iov_end,
+              unsigned int *out_num, unsigned int *in_num)
+{
+    struct vring_desc desc;
+    unsigned int i, head, found = 0, num = vring->vr.num;
+    uint16_t avail_idx, last_avail_idx;
+
+    /* If there was a fatal error then refuse operation */
+    if (vring->broken) {
+        return -EFAULT;
+    }
+
+    /* Check it isn't doing very strange things with descriptor numbers. */
+    last_avail_idx = vring->last_avail_idx;
+    avail_idx = vring->vr.avail->idx;
+    barrier(); /* load indices now and not again later */
+
+    if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) {
+        error_report("Guest moved used index from %u to %u",
+                     last_avail_idx, avail_idx);
+        vring->broken = true;
+        return -EFAULT;
+    }
+
+    /* If there's nothing new since last we looked. */
+    if (avail_idx == last_avail_idx) {
+        return -EAGAIN;
+    }
+
+    /* Only get avail ring entries after they have been exposed by guest. */
+    smp_rmb();
+
+    /* Grab the next descriptor number they're advertising, and increment
+     * the index we've seen. */
+    head = vring->vr.avail->ring[last_avail_idx % num];
+
+    /* If their number is silly, that's an error. */
+    if (unlikely(head >= num)) {
+        error_report("Guest says index %u > %u is available", head, num);
+        vring->broken = true;
+        return -EFAULT;
+    }
+
+    if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
+        vring_avail_event(&vring->vr) = vring->vr.avail->idx;
+    }
+
+    /* When we start there are none of either input nor output. */
+    *out_num = *in_num = 0;
+
+    i = head;
+    do {
+        if (unlikely(i >= num)) {
+            error_report("Desc index is %u > %u, head = %u", i, num, head);
+            vring->broken = true;
+            return -EFAULT;
+        }
+        if (unlikely(++found > num)) {
+            error_report("Loop detected: last one at %u vq size %u head %u",
+                         i, num, head);
+            vring->broken = true;
+            return -EFAULT;
+        }
+        desc = vring->vr.desc[i];
+
+        /* Ensure descriptor is loaded before accessing fields */
+        barrier();
+
+        if (desc.flags & VRING_DESC_F_INDIRECT) {
+            int ret = get_indirect(vring, iov, iov_end, out_num, in_num, &desc);
+            if (ret < 0) {
+                return ret;
+            }
+            continue;
+        }
+
+        /* If there are not enough iovecs left, stop for now.  The caller
+         * should check if there are more descs available once they have dealt
+         * with the current set.
+         */
+        if (iov >= iov_end) {
+            return -ENOBUFS;
+        }
+
+        /* TODO handle non-contiguous memory across region boundaries */
+        iov->iov_base = hostmem_lookup(&vring->hostmem, desc.addr, desc.len,
+                                       desc.flags & VRING_DESC_F_WRITE);
+        if (!iov->iov_base) {
+            error_report("Failed to map vring desc addr %#" PRIx64 " len %u",
+                         (uint64_t)desc.addr, desc.len);
+            vring->broken = true;
+            return -EFAULT;
+        }
+        iov->iov_len  = desc.len;
+        iov++;
+
+        if (desc.flags & VRING_DESC_F_WRITE) {
+            /* If this is an input descriptor,
+             * increment that count. */
+            *in_num += 1;
+        } else {
+            /* If it's an output descriptor, they're all supposed
+             * to come before any input descriptors. */
+            if (unlikely(*in_num)) {
+                error_report("Descriptor has out after in: idx %d", i);
+                vring->broken = true;
+                return -EFAULT;
+            }
+            *out_num += 1;
+        }
+        i = desc.next;
+    } while (desc.flags & VRING_DESC_F_NEXT);
+
+    /* On success, increment avail index. */
+    vring->last_avail_idx++;
+    return head;
+}
+
+/* After we've used one of their buffers, we tell them about it.
+ *
+ * Stolen from linux/drivers/vhost/vhost.c.
+ */
+void vring_push(Vring *vring, unsigned int head, int len)
+{
+    struct vring_used_elem *used;
+    uint16_t new;
+
+    /* Don't touch vring if a fatal error occurred */
+    if (vring->broken) {
+        return;
+    }
+
+    /* The virtqueue contains a ring of used buffers.  Get a pointer to the
+     * next entry in that used ring. */
+    used = &vring->vr.used->ring[vring->last_used_idx % vring->vr.num];
+    used->id = head;
+    used->len = len;
+
+    /* Make sure buffer is written before we update index. */
+    smp_wmb();
+
+    new = vring->vr.used->idx = ++vring->last_used_idx;
+    if (unlikely((int16_t)(new - vring->signalled_used) < (uint16_t)1)) {
+        vring->signalled_used_valid = false;
+    }
+}
diff --git a/hw/dataplane/vring.h b/hw/dataplane/vring.h
new file mode 100644
index 0000000000..3274f623f5
--- /dev/null
+++ b/hw/dataplane/vring.h
@@ -0,0 +1,62 @@
+/* Copyright 2012 Red Hat, Inc. and/or its affiliates
+ * Copyright IBM, Corp. 2012
+ *
+ * Based on Linux 2.6.39 vhost code:
+ * Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *         Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * Inspiration, some code, and most witty comments come from
+ * Documentation/virtual/lguest/lguest.c, by Rusty Russell
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#ifndef VRING_H
+#define VRING_H
+
+#include <linux/virtio_ring.h>
+#include "qemu-common.h"
+#include "hw/dataplane/hostmem.h"
+#include "hw/virtio.h"
+
+typedef struct {
+    HostMem hostmem;                /* guest memory mapper */
+    struct vring vr;                /* virtqueue vring mapped to host memory */
+    uint16_t last_avail_idx;        /* last processed avail ring index */
+    uint16_t last_used_idx;         /* last processed used ring index */
+    uint16_t signalled_used;        /* EVENT_IDX state */
+    bool signalled_used_valid;
+    bool broken;                    /* was there a fatal error? */
+} Vring;
+
+static inline unsigned int vring_get_num(Vring *vring)
+{
+    return vring->vr.num;
+}
+
+/* Are there more descriptors available? */
+static inline bool vring_more_avail(Vring *vring)
+{
+    return vring->vr.avail->idx != vring->last_avail_idx;
+}
+
+/* Fail future vring_pop() and vring_push() calls until reset */
+static inline void vring_set_broken(Vring *vring)
+{
+    vring->broken = true;
+}
+
+bool vring_setup(Vring *vring, VirtIODevice *vdev, int n);
+void vring_teardown(Vring *vring);
+void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
+bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
+bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
+int vring_pop(VirtIODevice *vdev, Vring *vring,
+              struct iovec iov[], struct iovec *iov_end,
+              unsigned int *out_num, unsigned int *in_num);
+void vring_push(Vring *vring, unsigned int head, int len);
+
+#endif /* VRING_H */