summary refs log tree commit diff stats
path: root/hw/dma
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-03-01 13:59:19 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2013-04-08 18:13:12 +0200
commit49ab747f668f421138d5b40d83fa279c4c5e278d (patch)
tree943225a04eac885aed038731adf058f2250a2f40 /hw/dma
parentce3b494cb504f96992f2d37ebc8f56deed202b06 (diff)
downloadfocaccia-qemu-49ab747f668f421138d5b40d83fa279c4c5e278d.tar.gz
focaccia-qemu-49ab747f668f421138d5b40d83fa279c4c5e278d.zip
hw: move target-independent files to subdirectories
This patch tackles all files that are compiled once, moving
them to subdirectories of hw/.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'hw/dma')
-rw-r--r--hw/dma/Makefile.objs7
-rw-r--r--hw/dma/i82374.c168
-rw-r--r--hw/dma/i8257.c600
-rw-r--r--hw/dma/pl080.c421
-rw-r--r--hw/dma/pl330.c1653
-rw-r--r--hw/dma/puv3_dma.c109
-rw-r--r--hw/dma/rc4030.c825
-rw-r--r--hw/dma/xilinx_axidma.c523
8 files changed, 4306 insertions, 0 deletions
diff --git a/hw/dma/Makefile.objs b/hw/dma/Makefile.objs
index e69de29bb2..bce31cdf87 100644
--- a/hw/dma/Makefile.objs
+++ b/hw/dma/Makefile.objs
@@ -0,0 +1,7 @@
+common-obj-$(CONFIG_PUV3) += puv3_dma.o
+common-obj-$(CONFIG_RC4030) += rc4030.o
+common-obj-$(CONFIG_PL080) += pl080.o
+common-obj-$(CONFIG_PL330) += pl330.o
+common-obj-$(CONFIG_I82374) += i82374.o
+common-obj-$(CONFIG_I8257) += i8257.o
+common-obj-$(CONFIG_XILINX_AXI) += xilinx_axidma.o
diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c
new file mode 100644
index 0000000000..835639d43c
--- /dev/null
+++ b/hw/dma/i82374.c
@@ -0,0 +1,168 @@
+/*
+ * QEMU Intel 82374 emulation (Enhanced DMA controller)
+ *
+ * Copyright (c) 2010 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/isa/isa.h"
+
+//#define DEBUG_I82374
+
+#ifdef DEBUG_I82374
+#define DPRINTF(fmt, ...) \
+do { fprintf(stderr, "i82374: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+do {} while (0)
+#endif
+#define BADF(fmt, ...) \
+do { fprintf(stderr, "i82374 ERROR: " fmt , ## __VA_ARGS__); } while (0)
+
+typedef struct I82374State {
+    uint8_t commands[8];
+    qemu_irq out;
+} I82374State;
+
+static const VMStateDescription vmstate_i82374 = {
+    .name = "i82374",
+    .version_id = 0,
+    .minimum_version_id = 0,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT8_ARRAY(commands, I82374State, 8),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static uint32_t i82374_read_isr(void *opaque, uint32_t nport)
+{
+    uint32_t val = 0;
+
+    BADF("%s: %08x\n", __func__, nport);
+
+    DPRINTF("%s: %08x=%08x\n", __func__, nport, val);
+    return val;
+}
+
+static void i82374_write_command(void *opaque, uint32_t nport, uint32_t data)
+{
+    DPRINTF("%s: %08x=%08x\n", __func__, nport, data);
+
+    if (data != 0x42) {
+        /* Not Stop S/G command */
+        BADF("%s: %08x=%08x\n", __func__, nport, data);
+    }
+}
+
+static uint32_t i82374_read_status(void *opaque, uint32_t nport)
+{
+    uint32_t val = 0;
+
+    BADF("%s: %08x\n", __func__, nport);
+
+    DPRINTF("%s: %08x=%08x\n", __func__, nport, val);
+    return val;
+}
+
+static void i82374_write_descriptor(void *opaque, uint32_t nport, uint32_t data)
+{
+    DPRINTF("%s: %08x=%08x\n", __func__, nport, data);
+
+    BADF("%s: %08x=%08x\n", __func__, nport, data);
+}
+
+static uint32_t i82374_read_descriptor(void *opaque, uint32_t nport)
+{
+    uint32_t val = 0;
+
+    BADF("%s: %08x\n", __func__, nport);
+
+    DPRINTF("%s: %08x=%08x\n", __func__, nport, val);
+    return val;
+}
+
+static void i82374_init(I82374State *s)
+{
+    DMA_init(1, &s->out);
+    memset(s->commands, 0, sizeof(s->commands));
+}
+
+typedef struct ISAi82374State {
+    ISADevice dev;
+    uint32_t iobase;
+    I82374State state;
+} ISAi82374State;
+
+static const VMStateDescription vmstate_isa_i82374 = {
+    .name = "isa-i82374",
+    .version_id = 0,
+    .minimum_version_id = 0,
+    .fields = (VMStateField[]) {
+        VMSTATE_STRUCT(state, ISAi82374State, 0, vmstate_i82374, I82374State),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static int i82374_isa_init(ISADevice *dev)
+{
+    ISAi82374State *isa = DO_UPCAST(ISAi82374State, dev, dev);
+    I82374State *s = &isa->state;
+
+    register_ioport_read(isa->iobase + 0x0A, 1, 1, i82374_read_isr, s);
+    register_ioport_write(isa->iobase + 0x10, 8, 1, i82374_write_command, s);
+    register_ioport_read(isa->iobase + 0x18, 8, 1, i82374_read_status, s);
+    register_ioport_write(isa->iobase + 0x20, 0x20, 1, i82374_write_descriptor, s);
+    register_ioport_read(isa->iobase + 0x20, 0x20, 1, i82374_read_descriptor, s);
+
+    i82374_init(s);
+
+    qdev_init_gpio_out(&dev->qdev, &s->out, 1);
+
+    return 0;
+}
+
+static Property i82374_properties[] = {
+    DEFINE_PROP_HEX32("iobase", ISAi82374State, iobase, 0x400),
+    DEFINE_PROP_END_OF_LIST()
+};
+
+static void i82374_class_init(ObjectClass *klass, void *data)
+{
+    ISADeviceClass *k = ISA_DEVICE_CLASS(klass);
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    
+    k->init  = i82374_isa_init;
+    dc->vmsd = &vmstate_isa_i82374;
+    dc->props = i82374_properties;
+}
+
+static const TypeInfo i82374_isa_info = {
+    .name  = "i82374",
+    .parent = TYPE_ISA_DEVICE,
+    .instance_size  = sizeof(ISAi82374State),
+    .class_init = i82374_class_init,
+};
+
+static void i82374_register_types(void)
+{
+    type_register_static(&i82374_isa_info);
+}
+
+type_init(i82374_register_types)
diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c
new file mode 100644
index 0000000000..eb60d45178
--- /dev/null
+++ b/hw/dma/i8257.c
@@ -0,0 +1,600 @@
+/*
+ * QEMU DMA emulation
+ *
+ * Copyright (c) 2003-2004 Vassili Karpov (malc)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+#include "qemu/main-loop.h"
+
+/* #define DEBUG_DMA */
+
+#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
+#ifdef DEBUG_DMA
+#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
+#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
+#else
+#define linfo(...)
+#define ldebug(...)
+#endif
+
+struct dma_regs {
+    int now[2];
+    uint16_t base[2];
+    uint8_t mode;
+    uint8_t page;
+    uint8_t pageh;
+    uint8_t dack;
+    uint8_t eop;
+    DMA_transfer_handler transfer_handler;
+    void *opaque;
+};
+
+#define ADDR 0
+#define COUNT 1
+
+static struct dma_cont {
+    uint8_t status;
+    uint8_t command;
+    uint8_t mask;
+    uint8_t flip_flop;
+    int dshift;
+    struct dma_regs regs[4];
+    qemu_irq *cpu_request_exit;
+    MemoryRegion channel_io;
+    MemoryRegion cont_io;
+} dma_controllers[2];
+
+enum {
+    CMD_MEMORY_TO_MEMORY = 0x01,
+    CMD_FIXED_ADDRESS    = 0x02,
+    CMD_BLOCK_CONTROLLER = 0x04,
+    CMD_COMPRESSED_TIME  = 0x08,
+    CMD_CYCLIC_PRIORITY  = 0x10,
+    CMD_EXTENDED_WRITE   = 0x20,
+    CMD_LOW_DREQ         = 0x40,
+    CMD_LOW_DACK         = 0x80,
+    CMD_NOT_SUPPORTED    = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
+    | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
+    | CMD_LOW_DREQ | CMD_LOW_DACK
+
+};
+
+static void DMA_run (void);
+
+static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
+
+static void write_page (void *opaque, uint32_t nport, uint32_t data)
+{
+    struct dma_cont *d = opaque;
+    int ichan;
+
+    ichan = channels[nport & 7];
+    if (-1 == ichan) {
+        dolog ("invalid channel %#x %#x\n", nport, data);
+        return;
+    }
+    d->regs[ichan].page = data;
+}
+
+static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
+{
+    struct dma_cont *d = opaque;
+    int ichan;
+
+    ichan = channels[nport & 7];
+    if (-1 == ichan) {
+        dolog ("invalid channel %#x %#x\n", nport, data);
+        return;
+    }
+    d->regs[ichan].pageh = data;
+}
+
+static uint32_t read_page (void *opaque, uint32_t nport)
+{
+    struct dma_cont *d = opaque;
+    int ichan;
+
+    ichan = channels[nport & 7];
+    if (-1 == ichan) {
+        dolog ("invalid channel read %#x\n", nport);
+        return 0;
+    }
+    return d->regs[ichan].page;
+}
+
+static uint32_t read_pageh (void *opaque, uint32_t nport)
+{
+    struct dma_cont *d = opaque;
+    int ichan;
+
+    ichan = channels[nport & 7];
+    if (-1 == ichan) {
+        dolog ("invalid channel read %#x\n", nport);
+        return 0;
+    }
+    return d->regs[ichan].pageh;
+}
+
+static inline void init_chan (struct dma_cont *d, int ichan)
+{
+    struct dma_regs *r;
+
+    r = d->regs + ichan;
+    r->now[ADDR] = r->base[ADDR] << d->dshift;
+    r->now[COUNT] = 0;
+}
+
+static inline int getff (struct dma_cont *d)
+{
+    int ff;
+
+    ff = d->flip_flop;
+    d->flip_flop = !ff;
+    return ff;
+}
+
+static uint64_t read_chan(void *opaque, hwaddr nport, unsigned size)
+{
+    struct dma_cont *d = opaque;
+    int ichan, nreg, iport, ff, val, dir;
+    struct dma_regs *r;
+
+    iport = (nport >> d->dshift) & 0x0f;
+    ichan = iport >> 1;
+    nreg = iport & 1;
+    r = d->regs + ichan;
+
+    dir = ((r->mode >> 5) & 1) ? -1 : 1;
+    ff = getff (d);
+    if (nreg)
+        val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
+    else
+        val = r->now[ADDR] + r->now[COUNT] * dir;
+
+    ldebug ("read_chan %#x -> %d\n", iport, val);
+    return (val >> (d->dshift + (ff << 3))) & 0xff;
+}
+
+static void write_chan(void *opaque, hwaddr nport, uint64_t data,
+                       unsigned size)
+{
+    struct dma_cont *d = opaque;
+    int iport, ichan, nreg;
+    struct dma_regs *r;
+
+    iport = (nport >> d->dshift) & 0x0f;
+    ichan = iport >> 1;
+    nreg = iport & 1;
+    r = d->regs + ichan;
+    if (getff (d)) {
+        r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
+        init_chan (d, ichan);
+    } else {
+        r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
+    }
+}
+
+static void write_cont(void *opaque, hwaddr nport, uint64_t data,
+                       unsigned size)
+{
+    struct dma_cont *d = opaque;
+    int iport, ichan = 0;
+
+    iport = (nport >> d->dshift) & 0x0f;
+    switch (iport) {
+    case 0x00:                  /* command */
+        if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
+            dolog("command %"PRIx64" not supported\n", data);
+            return;
+        }
+        d->command = data;
+        break;
+
+    case 0x01:
+        ichan = data & 3;
+        if (data & 4) {
+            d->status |= 1 << (ichan + 4);
+        }
+        else {
+            d->status &= ~(1 << (ichan + 4));
+        }
+        d->status &= ~(1 << ichan);
+        DMA_run();
+        break;
+
+    case 0x02:                  /* single mask */
+        if (data & 4)
+            d->mask |= 1 << (data & 3);
+        else
+            d->mask &= ~(1 << (data & 3));
+        DMA_run();
+        break;
+
+    case 0x03:                  /* mode */
+        {
+            ichan = data & 3;
+#ifdef DEBUG_DMA
+            {
+                int op, ai, dir, opmode;
+                op = (data >> 2) & 3;
+                ai = (data >> 4) & 1;
+                dir = (data >> 5) & 1;
+                opmode = (data >> 6) & 3;
+
+                linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
+                       ichan, op, ai, dir, opmode);
+            }
+#endif
+            d->regs[ichan].mode = data;
+            break;
+        }
+
+    case 0x04:                  /* clear flip flop */
+        d->flip_flop = 0;
+        break;
+
+    case 0x05:                  /* reset */
+        d->flip_flop = 0;
+        d->mask = ~0;
+        d->status = 0;
+        d->command = 0;
+        break;
+
+    case 0x06:                  /* clear mask for all channels */
+        d->mask = 0;
+        DMA_run();
+        break;
+
+    case 0x07:                  /* write mask for all channels */
+        d->mask = data;
+        DMA_run();
+        break;
+
+    default:
+        dolog ("unknown iport %#x\n", iport);
+        break;
+    }
+
+#ifdef DEBUG_DMA
+    if (0xc != iport) {
+        linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
+               nport, ichan, data);
+    }
+#endif
+}
+
+static uint64_t read_cont(void *opaque, hwaddr nport, unsigned size)
+{
+    struct dma_cont *d = opaque;
+    int iport, val;
+
+    iport = (nport >> d->dshift) & 0x0f;
+    switch (iport) {
+    case 0x00:                  /* status */
+        val = d->status;
+        d->status &= 0xf0;
+        break;
+    case 0x01:                  /* mask */
+        val = d->mask;
+        break;
+    default:
+        val = 0;
+        break;
+    }
+
+    ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
+    return val;
+}
+
+int DMA_get_channel_mode (int nchan)
+{
+    return dma_controllers[nchan > 3].regs[nchan & 3].mode;
+}
+
+void DMA_hold_DREQ (int nchan)
+{
+    int ncont, ichan;
+
+    ncont = nchan > 3;
+    ichan = nchan & 3;
+    linfo ("held cont=%d chan=%d\n", ncont, ichan);
+    dma_controllers[ncont].status |= 1 << (ichan + 4);
+    DMA_run();
+}
+
+void DMA_release_DREQ (int nchan)
+{
+    int ncont, ichan;
+
+    ncont = nchan > 3;
+    ichan = nchan & 3;
+    linfo ("released cont=%d chan=%d\n", ncont, ichan);
+    dma_controllers[ncont].status &= ~(1 << (ichan + 4));
+    DMA_run();
+}
+
+static void channel_run (int ncont, int ichan)
+{
+    int n;
+    struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
+#ifdef DEBUG_DMA
+    int dir, opmode;
+
+    dir = (r->mode >> 5) & 1;
+    opmode = (r->mode >> 6) & 3;
+
+    if (dir) {
+        dolog ("DMA in address decrement mode\n");
+    }
+    if (opmode != 1) {
+        dolog ("DMA not in single mode select %#x\n", opmode);
+    }
+#endif
+
+    n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
+                             r->now[COUNT], (r->base[COUNT] + 1) << ncont);
+    r->now[COUNT] = n;
+    ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
+}
+
+static QEMUBH *dma_bh;
+
+static void DMA_run (void)
+{
+    struct dma_cont *d;
+    int icont, ichan;
+    int rearm = 0;
+    static int running = 0;
+
+    if (running) {
+        rearm = 1;
+        goto out;
+    } else {
+        running = 1;
+    }
+
+    d = dma_controllers;
+
+    for (icont = 0; icont < 2; icont++, d++) {
+        for (ichan = 0; ichan < 4; ichan++) {
+            int mask;
+
+            mask = 1 << ichan;
+
+            if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
+                channel_run (icont, ichan);
+                rearm = 1;
+            }
+        }
+    }
+
+    running = 0;
+out:
+    if (rearm)
+        qemu_bh_schedule_idle(dma_bh);
+}
+
+static void DMA_run_bh(void *unused)
+{
+    DMA_run();
+}
+
+void DMA_register_channel (int nchan,
+                           DMA_transfer_handler transfer_handler,
+                           void *opaque)
+{
+    struct dma_regs *r;
+    int ichan, ncont;
+
+    ncont = nchan > 3;
+    ichan = nchan & 3;
+
+    r = dma_controllers[ncont].regs + ichan;
+    r->transfer_handler = transfer_handler;
+    r->opaque = opaque;
+}
+
+int DMA_read_memory (int nchan, void *buf, int pos, int len)
+{
+    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
+    hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
+
+    if (r->mode & 0x20) {
+        int i;
+        uint8_t *p = buf;
+
+        cpu_physical_memory_read (addr - pos - len, buf, len);
+        /* What about 16bit transfers? */
+        for (i = 0; i < len >> 1; i++) {
+            uint8_t b = p[len - i - 1];
+            p[i] = b;
+        }
+    }
+    else
+        cpu_physical_memory_read (addr + pos, buf, len);
+
+    return len;
+}
+
+int DMA_write_memory (int nchan, void *buf, int pos, int len)
+{
+    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
+    hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
+
+    if (r->mode & 0x20) {
+        int i;
+        uint8_t *p = buf;
+
+        cpu_physical_memory_write (addr - pos - len, buf, len);
+        /* What about 16bit transfers? */
+        for (i = 0; i < len; i++) {
+            uint8_t b = p[len - i - 1];
+            p[i] = b;
+        }
+    }
+    else
+        cpu_physical_memory_write (addr + pos, buf, len);
+
+    return len;
+}
+
+/* request the emulator to transfer a new DMA memory block ASAP */
+void DMA_schedule(int nchan)
+{
+    struct dma_cont *d = &dma_controllers[nchan > 3];
+
+    qemu_irq_pulse(*d->cpu_request_exit);
+}
+
+static void dma_reset(void *opaque)
+{
+    struct dma_cont *d = opaque;
+    write_cont(d, (0x05 << d->dshift), 0, 1);
+}
+
+static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
+{
+    dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
+           nchan, dma_pos, dma_len);
+    return dma_pos;
+}
+
+
+static const MemoryRegionOps channel_io_ops = {
+    .read = read_chan,
+    .write = write_chan,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+    .impl = {
+        .min_access_size = 1,
+        .max_access_size = 1,
+    },
+};
+
+/* IOport from page_base */
+static const MemoryRegionPortio page_portio_list[] = {
+    { 0x01, 3, 1, .write = write_page, .read = read_page, },
+    { 0x07, 1, 1, .write = write_page, .read = read_page, },
+    PORTIO_END_OF_LIST(),
+};
+
+/* IOport from pageh_base */
+static const MemoryRegionPortio pageh_portio_list[] = {
+    { 0x01, 3, 1, .write = write_pageh, .read = read_pageh, },
+    { 0x07, 3, 1, .write = write_pageh, .read = read_pageh, },
+    PORTIO_END_OF_LIST(),
+};
+
+static const MemoryRegionOps cont_io_ops = {
+    .read = read_cont,
+    .write = write_cont,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+    .impl = {
+        .min_access_size = 1,
+        .max_access_size = 1,
+    },
+};
+
+/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
+static void dma_init2(struct dma_cont *d, int base, int dshift,
+                      int page_base, int pageh_base,
+                      qemu_irq *cpu_request_exit)
+{
+    int i;
+
+    d->dshift = dshift;
+    d->cpu_request_exit = cpu_request_exit;
+
+    memory_region_init_io(&d->channel_io, &channel_io_ops, d,
+                          "dma-chan", 8 << d->dshift);
+    memory_region_add_subregion(isa_address_space_io(NULL),
+                                base, &d->channel_io);
+
+    isa_register_portio_list(NULL, page_base, page_portio_list, d,
+                             "dma-page");
+    if (pageh_base >= 0) {
+        isa_register_portio_list(NULL, pageh_base, pageh_portio_list, d,
+                                 "dma-pageh");
+    }
+
+    memory_region_init_io(&d->cont_io, &cont_io_ops, d, "dma-cont",
+                          8 << d->dshift);
+    memory_region_add_subregion(isa_address_space_io(NULL),
+                                base + (8 << d->dshift), &d->cont_io);
+
+    qemu_register_reset(dma_reset, d);
+    dma_reset(d);
+    for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
+        d->regs[i].transfer_handler = dma_phony_handler;
+    }
+}
+
+static const VMStateDescription vmstate_dma_regs = {
+    .name = "dma_regs",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_INT32_ARRAY(now, struct dma_regs, 2),
+        VMSTATE_UINT16_ARRAY(base, struct dma_regs, 2),
+        VMSTATE_UINT8(mode, struct dma_regs),
+        VMSTATE_UINT8(page, struct dma_regs),
+        VMSTATE_UINT8(pageh, struct dma_regs),
+        VMSTATE_UINT8(dack, struct dma_regs),
+        VMSTATE_UINT8(eop, struct dma_regs),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static int dma_post_load(void *opaque, int version_id)
+{
+    DMA_run();
+
+    return 0;
+}
+
+static const VMStateDescription vmstate_dma = {
+    .name = "dma",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .post_load = dma_post_load,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINT8(command, struct dma_cont),
+        VMSTATE_UINT8(mask, struct dma_cont),
+        VMSTATE_UINT8(flip_flop, struct dma_cont),
+        VMSTATE_INT32(dshift, struct dma_cont),
+        VMSTATE_STRUCT_ARRAY(regs, struct dma_cont, 4, 1, vmstate_dma_regs, struct dma_regs),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
+{
+    dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
+              high_page_enable ? 0x480 : -1, cpu_request_exit);
+    dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
+              high_page_enable ? 0x488 : -1, cpu_request_exit);
+    vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]);
+    vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]);
+
+    dma_bh = qemu_bh_new(DMA_run_bh, NULL);
+}
diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c
new file mode 100644
index 0000000000..00b66b45b0
--- /dev/null
+++ b/hw/dma/pl080.c
@@ -0,0 +1,421 @@
+/*
+ * Arm PrimeCell PL080/PL081 DMA controller
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "hw/sysbus.h"
+
+#define PL080_MAX_CHANNELS 8
+#define PL080_CONF_E    0x1
+#define PL080_CONF_M1   0x2
+#define PL080_CONF_M2   0x4
+
+#define PL080_CCONF_H   0x40000
+#define PL080_CCONF_A   0x20000
+#define PL080_CCONF_L   0x10000
+#define PL080_CCONF_ITC 0x08000
+#define PL080_CCONF_IE  0x04000
+#define PL080_CCONF_E   0x00001
+
+#define PL080_CCTRL_I   0x80000000
+#define PL080_CCTRL_DI  0x08000000
+#define PL080_CCTRL_SI  0x04000000
+#define PL080_CCTRL_D   0x02000000
+#define PL080_CCTRL_S   0x01000000
+
+typedef struct {
+    uint32_t src;
+    uint32_t dest;
+    uint32_t lli;
+    uint32_t ctrl;
+    uint32_t conf;
+} pl080_channel;
+
+typedef struct {
+    SysBusDevice busdev;
+    MemoryRegion iomem;
+    uint8_t tc_int;
+    uint8_t tc_mask;
+    uint8_t err_int;
+    uint8_t err_mask;
+    uint32_t conf;
+    uint32_t sync;
+    uint32_t req_single;
+    uint32_t req_burst;
+    pl080_channel chan[PL080_MAX_CHANNELS];
+    int nchannels;
+    /* Flag to avoid recursive DMA invocations.  */
+    int running;
+    qemu_irq irq;
+} pl080_state;
+
+static const VMStateDescription vmstate_pl080_channel = {
+    .name = "pl080_channel",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(src, pl080_channel),
+        VMSTATE_UINT32(dest, pl080_channel),
+        VMSTATE_UINT32(lli, pl080_channel),
+        VMSTATE_UINT32(ctrl, pl080_channel),
+        VMSTATE_UINT32(conf, pl080_channel),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_pl080 = {
+    .name = "pl080",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT8(tc_int, pl080_state),
+        VMSTATE_UINT8(tc_mask, pl080_state),
+        VMSTATE_UINT8(err_int, pl080_state),
+        VMSTATE_UINT8(err_mask, pl080_state),
+        VMSTATE_UINT32(conf, pl080_state),
+        VMSTATE_UINT32(sync, pl080_state),
+        VMSTATE_UINT32(req_single, pl080_state),
+        VMSTATE_UINT32(req_burst, pl080_state),
+        VMSTATE_UINT8(tc_int, pl080_state),
+        VMSTATE_UINT8(tc_int, pl080_state),
+        VMSTATE_UINT8(tc_int, pl080_state),
+        VMSTATE_STRUCT_ARRAY(chan, pl080_state, PL080_MAX_CHANNELS,
+                             1, vmstate_pl080_channel, pl080_channel),
+        VMSTATE_INT32(running, pl080_state),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const unsigned char pl080_id[] =
+{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
+
+static const unsigned char pl081_id[] =
+{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
+
+static void pl080_update(pl080_state *s)
+{
+    if ((s->tc_int & s->tc_mask)
+            || (s->err_int & s->err_mask))
+        qemu_irq_raise(s->irq);
+    else
+        qemu_irq_lower(s->irq);
+}
+
+static void pl080_run(pl080_state *s)
+{
+    int c;
+    int flow;
+    pl080_channel *ch;
+    int swidth;
+    int dwidth;
+    int xsize;
+    int n;
+    int src_id;
+    int dest_id;
+    int size;
+    uint8_t buff[4];
+    uint32_t req;
+
+    s->tc_mask = 0;
+    for (c = 0; c < s->nchannels; c++) {
+        if (s->chan[c].conf & PL080_CCONF_ITC)
+            s->tc_mask |= 1 << c;
+        if (s->chan[c].conf & PL080_CCONF_IE)
+            s->err_mask |= 1 << c;
+    }
+
+    if ((s->conf & PL080_CONF_E) == 0)
+        return;
+
+hw_error("DMA active\n");
+    /* If we are already in the middle of a DMA operation then indicate that
+       there may be new DMA requests and return immediately.  */
+    if (s->running) {
+        s->running++;
+        return;
+    }
+    s->running = 1;
+    while (s->running) {
+        for (c = 0; c < s->nchannels; c++) {
+            ch = &s->chan[c];
+again:
+            /* Test if thiws channel has any pending DMA requests.  */
+            if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
+                    != PL080_CCONF_E)
+                continue;
+            flow = (ch->conf >> 11) & 7;
+            if (flow >= 4) {
+                hw_error(
+                    "pl080_run: Peripheral flow control not implemented\n");
+            }
+            src_id = (ch->conf >> 1) & 0x1f;
+            dest_id = (ch->conf >> 6) & 0x1f;
+            size = ch->ctrl & 0xfff;
+            req = s->req_single | s->req_burst;
+            switch (flow) {
+            case 0:
+                break;
+            case 1:
+                if ((req & (1u << dest_id)) == 0)
+                    size = 0;
+                break;
+            case 2:
+                if ((req & (1u << src_id)) == 0)
+                    size = 0;
+                break;
+            case 3:
+                if ((req & (1u << src_id)) == 0
+                        || (req & (1u << dest_id)) == 0)
+                    size = 0;
+                break;
+            }
+            if (!size)
+                continue;
+
+            /* Transfer one element.  */
+            /* ??? Should transfer multiple elements for a burst request.  */
+            /* ??? Unclear what the proper behavior is when source and
+               destination widths are different.  */
+            swidth = 1 << ((ch->ctrl >> 18) & 7);
+            dwidth = 1 << ((ch->ctrl >> 21) & 7);
+            for (n = 0; n < dwidth; n+= swidth) {
+                cpu_physical_memory_read(ch->src, buff + n, swidth);
+                if (ch->ctrl & PL080_CCTRL_SI)
+                    ch->src += swidth;
+            }
+            xsize = (dwidth < swidth) ? swidth : dwidth;
+            /* ??? This may pad the value incorrectly for dwidth < 32.  */
+            for (n = 0; n < xsize; n += dwidth) {
+                cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
+                if (ch->ctrl & PL080_CCTRL_DI)
+                    ch->dest += swidth;
+            }
+
+            size--;
+            ch->ctrl = (ch->ctrl & 0xfffff000) | size;
+            if (size == 0) {
+                /* Transfer complete.  */
+                if (ch->lli) {
+                    ch->src = ldl_le_phys(ch->lli);
+                    ch->dest = ldl_le_phys(ch->lli + 4);
+                    ch->ctrl = ldl_le_phys(ch->lli + 12);
+                    ch->lli = ldl_le_phys(ch->lli + 8);
+                } else {
+                    ch->conf &= ~PL080_CCONF_E;
+                }
+                if (ch->ctrl & PL080_CCTRL_I) {
+                    s->tc_int |= 1 << c;
+                }
+            }
+            goto again;
+        }
+        if (--s->running)
+            s->running = 1;
+    }
+}
+
+static uint64_t pl080_read(void *opaque, hwaddr offset,
+                           unsigned size)
+{
+    pl080_state *s = (pl080_state *)opaque;
+    uint32_t i;
+    uint32_t mask;
+
+    if (offset >= 0xfe0 && offset < 0x1000) {
+        if (s->nchannels == 8) {
+            return pl080_id[(offset - 0xfe0) >> 2];
+        } else {
+            return pl081_id[(offset - 0xfe0) >> 2];
+        }
+    }
+    if (offset >= 0x100 && offset < 0x200) {
+        i = (offset & 0xe0) >> 5;
+        if (i >= s->nchannels)
+            goto bad_offset;
+        switch (offset >> 2) {
+        case 0: /* SrcAddr */
+            return s->chan[i].src;
+        case 1: /* DestAddr */
+            return s->chan[i].dest;
+        case 2: /* LLI */
+            return s->chan[i].lli;
+        case 3: /* Control */
+            return s->chan[i].ctrl;
+        case 4: /* Configuration */
+            return s->chan[i].conf;
+        default:
+            goto bad_offset;
+        }
+    }
+    switch (offset >> 2) {
+    case 0: /* IntStatus */
+        return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
+    case 1: /* IntTCStatus */
+        return (s->tc_int & s->tc_mask);
+    case 3: /* IntErrorStatus */
+        return (s->err_int & s->err_mask);
+    case 5: /* RawIntTCStatus */
+        return s->tc_int;
+    case 6: /* RawIntErrorStatus */
+        return s->err_int;
+    case 7: /* EnbldChns */
+        mask = 0;
+        for (i = 0; i < s->nchannels; i++) {
+            if (s->chan[i].conf & PL080_CCONF_E)
+                mask |= 1 << i;
+        }
+        return mask;
+    case 8: /* SoftBReq */
+    case 9: /* SoftSReq */
+    case 10: /* SoftLBReq */
+    case 11: /* SoftLSReq */
+        /* ??? Implement these. */
+        return 0;
+    case 12: /* Configuration */
+        return s->conf;
+    case 13: /* Sync */
+        return s->sync;
+    default:
+    bad_offset:
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "pl080_read: Bad offset %x\n", (int)offset);
+        return 0;
+    }
+}
+
+static void pl080_write(void *opaque, hwaddr offset,
+                        uint64_t value, unsigned size)
+{
+    pl080_state *s = (pl080_state *)opaque;
+    int i;
+
+    if (offset >= 0x100 && offset < 0x200) {
+        i = (offset & 0xe0) >> 5;
+        if (i >= s->nchannels)
+            goto bad_offset;
+        switch (offset >> 2) {
+        case 0: /* SrcAddr */
+            s->chan[i].src = value;
+            break;
+        case 1: /* DestAddr */
+            s->chan[i].dest = value;
+            break;
+        case 2: /* LLI */
+            s->chan[i].lli = value;
+            break;
+        case 3: /* Control */
+            s->chan[i].ctrl = value;
+            break;
+        case 4: /* Configuration */
+            s->chan[i].conf = value;
+            pl080_run(s);
+            break;
+        }
+    }
+    switch (offset >> 2) {
+    case 2: /* IntTCClear */
+        s->tc_int &= ~value;
+        break;
+    case 4: /* IntErrorClear */
+        s->err_int &= ~value;
+        break;
+    case 8: /* SoftBReq */
+    case 9: /* SoftSReq */
+    case 10: /* SoftLBReq */
+    case 11: /* SoftLSReq */
+        /* ??? Implement these.  */
+        qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
+        break;
+    case 12: /* Configuration */
+        s->conf = value;
+        if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
+            qemu_log_mask(LOG_UNIMP,
+                          "pl080_write: Big-endian DMA not implemented\n");
+        }
+        pl080_run(s);
+        break;
+    case 13: /* Sync */
+        s->sync = value;
+        break;
+    default:
+    bad_offset:
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "pl080_write: Bad offset %x\n", (int)offset);
+    }
+    pl080_update(s);
+}
+
+static const MemoryRegionOps pl080_ops = {
+    .read = pl080_read,
+    .write = pl080_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int pl08x_init(SysBusDevice *dev, int nchannels)
+{
+    pl080_state *s = FROM_SYSBUS(pl080_state, dev);
+
+    memory_region_init_io(&s->iomem, &pl080_ops, s, "pl080", 0x1000);
+    sysbus_init_mmio(dev, &s->iomem);
+    sysbus_init_irq(dev, &s->irq);
+    s->nchannels = nchannels;
+    return 0;
+}
+
+static int pl080_init(SysBusDevice *dev)
+{
+    return pl08x_init(dev, 8);
+}
+
+static int pl081_init(SysBusDevice *dev)
+{
+    return pl08x_init(dev, 2);
+}
+
+static void pl080_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+    k->init = pl080_init;
+    dc->no_user = 1;
+    dc->vmsd = &vmstate_pl080;
+}
+
+static const TypeInfo pl080_info = {
+    .name          = "pl080",
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(pl080_state),
+    .class_init    = pl080_class_init,
+};
+
+static void pl081_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+    k->init = pl081_init;
+    dc->no_user = 1;
+    dc->vmsd = &vmstate_pl080;
+}
+
+static const TypeInfo pl081_info = {
+    .name          = "pl081",
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(pl080_state),
+    .class_init    = pl081_class_init,
+};
+
+/* The PL080 and PL081 are the same except for the number of channels
+   they implement (8 and 2 respectively).  */
+static void pl080_register_types(void)
+{
+    type_register_static(&pl080_info);
+    type_register_static(&pl081_info);
+}
+
+type_init(pl080_register_types)
diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
new file mode 100644
index 0000000000..8b33138f30
--- /dev/null
+++ b/hw/dma/pl330.c
@@ -0,0 +1,1653 @@
+/*
+ * ARM PrimeCell PL330 DMA Controller
+ *
+ * Copyright (c) 2009 Samsung Electronics.
+ * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
+ * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com)
+ * Copyright (c) 2012 PetaLogix Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 or later.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "sysemu/dma.h"
+
+#ifndef PL330_ERR_DEBUG
+#define PL330_ERR_DEBUG 0
+#endif
+
+#define DB_PRINT_L(lvl, fmt, args...) do {\
+    if (PL330_ERR_DEBUG >= lvl) {\
+        fprintf(stderr, "PL330: %s:" fmt, __func__, ## args);\
+    } \
+} while (0);
+
+#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
+
+#define PL330_PERIPH_NUM            32
+#define PL330_MAX_BURST_LEN         128
+#define PL330_INSN_MAXSIZE          6
+
+#define PL330_FIFO_OK               0
+#define PL330_FIFO_STALL            1
+#define PL330_FIFO_ERR              (-1)
+
+#define PL330_FAULT_UNDEF_INSTR             (1 <<  0)
+#define PL330_FAULT_OPERAND_INVALID         (1 <<  1)
+#define PL330_FAULT_DMAGO_ERR               (1 <<  4)
+#define PL330_FAULT_EVENT_ERR               (1 <<  5)
+#define PL330_FAULT_CH_PERIPH_ERR           (1 <<  6)
+#define PL330_FAULT_CH_RDWR_ERR             (1 <<  7)
+#define PL330_FAULT_ST_DATA_UNAVAILABLE     (1 << 12)
+#define PL330_FAULT_FIFOEMPTY_ERR           (1 << 13)
+#define PL330_FAULT_INSTR_FETCH_ERR         (1 << 16)
+#define PL330_FAULT_DATA_WRITE_ERR          (1 << 17)
+#define PL330_FAULT_DATA_READ_ERR           (1 << 18)
+#define PL330_FAULT_DBG_INSTR               (1 << 30)
+#define PL330_FAULT_LOCKUP_ERR              (1 << 31)
+
+#define PL330_UNTAGGED              0xff
+
+#define PL330_SINGLE                0x0
+#define PL330_BURST                 0x1
+
+#define PL330_WATCHDOG_LIMIT        1024
+
+/* IOMEM mapped registers */
+#define PL330_REG_DSR               0x000
+#define PL330_REG_DPC               0x004
+#define PL330_REG_INTEN             0x020
+#define PL330_REG_INT_EVENT_RIS     0x024
+#define PL330_REG_INTMIS            0x028
+#define PL330_REG_INTCLR            0x02C
+#define PL330_REG_FSRD              0x030
+#define PL330_REG_FSRC              0x034
+#define PL330_REG_FTRD              0x038
+#define PL330_REG_FTR_BASE          0x040
+#define PL330_REG_CSR_BASE          0x100
+#define PL330_REG_CPC_BASE          0x104
+#define PL330_REG_CHANCTRL          0x400
+#define PL330_REG_DBGSTATUS         0xD00
+#define PL330_REG_DBGCMD            0xD04
+#define PL330_REG_DBGINST0          0xD08
+#define PL330_REG_DBGINST1          0xD0C
+#define PL330_REG_CR0_BASE          0xE00
+#define PL330_REG_PERIPH_ID         0xFE0
+
+#define PL330_IOMEM_SIZE    0x1000
+
+#define CFG_BOOT_ADDR 2
+#define CFG_INS 3
+#define CFG_PNS 4
+#define CFG_CRD 5
+
+static const uint32_t pl330_id[] = {
+    0x30, 0x13, 0x24, 0x00, 0x0D, 0xF0, 0x05, 0xB1
+};
+
+/* DMA channel states as they are described in PL330 Technical Reference Manual
+ * Most of them will not be used in emulation.
+ */
+typedef enum  {
+    pl330_chan_stopped = 0,
+    pl330_chan_executing = 1,
+    pl330_chan_cache_miss = 2,
+    pl330_chan_updating_pc = 3,
+    pl330_chan_waiting_event = 4,
+    pl330_chan_at_barrier = 5,
+    pl330_chan_queue_busy = 6,
+    pl330_chan_waiting_periph = 7,
+    pl330_chan_killing = 8,
+    pl330_chan_completing = 9,
+    pl330_chan_fault_completing = 14,
+    pl330_chan_fault = 15,
+} PL330ChanState;
+
+typedef struct PL330State PL330State;
+
+typedef struct PL330Chan {
+    uint32_t src;
+    uint32_t dst;
+    uint32_t pc;
+    uint32_t control;
+    uint32_t status;
+    uint32_t lc[2];
+    uint32_t fault_type;
+    uint32_t watchdog_timer;
+
+    bool ns;
+    uint8_t request_flag;
+    uint8_t wakeup;
+    uint8_t wfp_sbp;
+
+    uint8_t state;
+    uint8_t stall;
+
+    bool is_manager;
+    PL330State *parent;
+    uint8_t tag;
+} PL330Chan;
+
+static const VMStateDescription vmstate_pl330_chan = {
+    .name = "pl330_chan",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(src, PL330Chan),
+        VMSTATE_UINT32(dst, PL330Chan),
+        VMSTATE_UINT32(pc, PL330Chan),
+        VMSTATE_UINT32(control, PL330Chan),
+        VMSTATE_UINT32(status, PL330Chan),
+        VMSTATE_UINT32_ARRAY(lc, PL330Chan, 2),
+        VMSTATE_UINT32(fault_type, PL330Chan),
+        VMSTATE_UINT32(watchdog_timer, PL330Chan),
+        VMSTATE_BOOL(ns, PL330Chan),
+        VMSTATE_UINT8(request_flag, PL330Chan),
+        VMSTATE_UINT8(wakeup, PL330Chan),
+        VMSTATE_UINT8(wfp_sbp, PL330Chan),
+        VMSTATE_UINT8(state, PL330Chan),
+        VMSTATE_UINT8(stall, PL330Chan),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+typedef struct PL330Fifo {
+    uint8_t *buf;
+    uint8_t *tag;
+    uint32_t head;
+    uint32_t num;
+    uint32_t buf_size;
+} PL330Fifo;
+
+static const VMStateDescription vmstate_pl330_fifo = {
+    .name = "pl330_chan",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, 0, buf_size),
+        VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, 0, buf_size),
+        VMSTATE_UINT32(head, PL330Fifo),
+        VMSTATE_UINT32(num, PL330Fifo),
+        VMSTATE_UINT32(buf_size, PL330Fifo),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+typedef struct PL330QueueEntry {
+    uint32_t addr;
+    uint32_t len;
+    uint8_t n;
+    bool inc;
+    bool z;
+    uint8_t tag;
+    uint8_t seqn;
+} PL330QueueEntry;
+
+static const VMStateDescription vmstate_pl330_queue_entry = {
+    .name = "pl330_queue_entry",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(addr, PL330QueueEntry),
+        VMSTATE_UINT32(len, PL330QueueEntry),
+        VMSTATE_UINT8(n, PL330QueueEntry),
+        VMSTATE_BOOL(inc, PL330QueueEntry),
+        VMSTATE_BOOL(z, PL330QueueEntry),
+        VMSTATE_UINT8(tag, PL330QueueEntry),
+        VMSTATE_UINT8(seqn, PL330QueueEntry),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+typedef struct PL330Queue {
+    PL330State *parent;
+    PL330QueueEntry *queue;
+    uint32_t queue_size;
+} PL330Queue;
+
+static const VMStateDescription vmstate_pl330_queue = {
+    .name = "pl330_queue",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_STRUCT_VARRAY_UINT32(queue, PL330Queue, queue_size, 1,
+                                 vmstate_pl330_queue_entry, PL330QueueEntry),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+struct PL330State {
+    SysBusDevice busdev;
+    MemoryRegion iomem;
+    qemu_irq irq_abort;
+    qemu_irq *irq;
+
+    /* Config registers. cfg[5] = CfgDn. */
+    uint32_t cfg[6];
+#define EVENT_SEC_STATE 3
+#define PERIPH_SEC_STATE 4
+    /* cfg 0 bits and pieces */
+    uint32_t num_chnls;
+    uint8_t num_periph_req;
+    uint8_t num_events;
+    uint8_t mgr_ns_at_rst;
+    /* cfg 1 bits and pieces */
+    uint8_t i_cache_len;
+    uint8_t num_i_cache_lines;
+    /* CRD bits and pieces */
+    uint8_t data_width;
+    uint8_t wr_cap;
+    uint8_t wr_q_dep;
+    uint8_t rd_cap;
+    uint8_t rd_q_dep;
+    uint16_t data_buffer_dep;
+
+    PL330Chan manager;
+    PL330Chan *chan;
+    PL330Fifo fifo;
+    PL330Queue read_queue;
+    PL330Queue write_queue;
+    uint8_t *lo_seqn;
+    uint8_t *hi_seqn;
+    QEMUTimer *timer; /* is used for restore dma. */
+
+    uint32_t inten;
+    uint32_t int_status;
+    uint32_t ev_status;
+    uint32_t dbg[2];
+    uint8_t debug_status;
+    uint8_t num_faulting;
+    uint8_t periph_busy[PL330_PERIPH_NUM];
+
+};
+
+#define TYPE_PL330 "pl330"
+#define PL330(obj) OBJECT_CHECK(PL330State, (obj), TYPE_PL330)
+
+static const VMStateDescription vmstate_pl330 = {
+    .name = "pl330",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_STRUCT(manager, PL330State, 0, vmstate_pl330_chan, PL330Chan),
+        VMSTATE_STRUCT_VARRAY_UINT32(chan, PL330State, num_chnls, 0,
+                                     vmstate_pl330_chan, PL330Chan),
+        VMSTATE_VBUFFER_UINT32(lo_seqn, PL330State, 1, NULL, 0, num_chnls),
+        VMSTATE_VBUFFER_UINT32(hi_seqn, PL330State, 1, NULL, 0, num_chnls),
+        VMSTATE_STRUCT(fifo, PL330State, 0, vmstate_pl330_fifo, PL330Fifo),
+        VMSTATE_STRUCT(read_queue, PL330State, 0, vmstate_pl330_queue,
+                       PL330Queue),
+        VMSTATE_STRUCT(write_queue, PL330State, 0, vmstate_pl330_queue,
+                       PL330Queue),
+        VMSTATE_TIMER(timer, PL330State),
+        VMSTATE_UINT32(inten, PL330State),
+        VMSTATE_UINT32(int_status, PL330State),
+        VMSTATE_UINT32(ev_status, PL330State),
+        VMSTATE_UINT32_ARRAY(dbg, PL330State, 2),
+        VMSTATE_UINT8(debug_status, PL330State),
+        VMSTATE_UINT8(num_faulting, PL330State),
+        VMSTATE_UINT8_ARRAY(periph_busy, PL330State, PL330_PERIPH_NUM),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+typedef struct PL330InsnDesc {
+    /* OPCODE of the instruction */
+    uint8_t opcode;
+    /* Mask so we can select several sibling instructions, such as
+       DMALD, DMALDS and DMALDB */
+    uint8_t opmask;
+    /* Size of instruction in bytes */
+    uint8_t size;
+    /* Interpreter */
+    void (*exec)(PL330Chan *, uint8_t opcode, uint8_t *args, int len);
+} PL330InsnDesc;
+
+
+/* MFIFO Implementation
+ *
+ * MFIFO is implemented as a cyclic buffer of BUF_SIZE size. Tagged bytes are
+ * stored in this buffer. Data is stored in BUF field, tags - in the
+ * corresponding array elements of TAG field.
+ */
+
+/* Initialize queue. */
+
+static void pl330_fifo_init(PL330Fifo *s, uint32_t size)
+{
+    s->buf = g_malloc0(size);
+    s->tag = g_malloc0(size);
+    s->buf_size = size;
+}
+
+/* Cyclic increment */
+
+static inline int pl330_fifo_inc(PL330Fifo *s, int x)
+{
+    return (x + 1) % s->buf_size;
+}
+
+/* Number of empty bytes in MFIFO */
+
+static inline int pl330_fifo_num_free(PL330Fifo *s)
+{
+    return s->buf_size - s->num;
+}
+
+/* Push LEN bytes of data stored in BUF to MFIFO and tag it with TAG.
+ * Zero returned on success, PL330_FIFO_STALL if there is no enough free
+ * space in MFIFO to store requested amount of data. If push was unsuccessful
+ * no data is stored to MFIFO.
+ */
+
+static int pl330_fifo_push(PL330Fifo *s, uint8_t *buf, int len, uint8_t tag)
+{
+    int i;
+
+    if (s->buf_size - s->num < len) {
+        return PL330_FIFO_STALL;
+    }
+    for (i = 0; i < len; i++) {
+        int push_idx = (s->head + s->num + i) % s->buf_size;
+        s->buf[push_idx] = buf[i];
+        s->tag[push_idx] = tag;
+    }
+    s->num += len;
+    return PL330_FIFO_OK;
+}
+
+/* Get LEN bytes of data from MFIFO and store it to BUF. Tag value of each
+ * byte is verified. Zero returned on success, PL330_FIFO_ERR on tag mismatch
+ * and PL330_FIFO_STALL if there is no enough data in MFIFO. If get was
+ * unsuccessful no data is removed from MFIFO.
+ */
+
+static int pl330_fifo_get(PL330Fifo *s, uint8_t *buf, int len, uint8_t tag)
+{
+    int i;
+
+    if (s->num < len) {
+        return PL330_FIFO_STALL;
+    }
+    for (i = 0; i < len; i++) {
+        if (s->tag[s->head] == tag) {
+            int get_idx = (s->head + i) % s->buf_size;
+            buf[i] = s->buf[get_idx];
+        } else { /* Tag mismatch - Rollback transaction */
+            return PL330_FIFO_ERR;
+        }
+    }
+    s->head = (s->head + len) % s->buf_size;
+    s->num -= len;
+    return PL330_FIFO_OK;
+}
+
+/* Reset MFIFO. This completely erases all data in it. */
+
+static inline void pl330_fifo_reset(PL330Fifo *s)
+{
+    s->head = 0;
+    s->num = 0;
+}
+
+/* Return tag of the first byte stored in MFIFO. If MFIFO is empty
+ * PL330_UNTAGGED is returned.
+ */
+
+static inline uint8_t pl330_fifo_tag(PL330Fifo *s)
+{
+    return (!s->num) ? PL330_UNTAGGED : s->tag[s->head];
+}
+
+/* Returns non-zero if tag TAG is present in fifo or zero otherwise */
+
+static int pl330_fifo_has_tag(PL330Fifo *s, uint8_t tag)
+{
+    int i, n;
+
+    i = s->head;
+    for (n = 0; n < s->num; n++) {
+        if (s->tag[i] == tag) {
+            return 1;
+        }
+        i = pl330_fifo_inc(s, i);
+    }
+    return 0;
+}
+
+/* Remove all entry tagged with TAG from MFIFO */
+
+static void pl330_fifo_tagged_remove(PL330Fifo *s, uint8_t tag)
+{
+    int i, t, n;
+
+    t = i = s->head;
+    for (n = 0; n < s->num; n++) {
+        if (s->tag[i] != tag) {
+            s->buf[t] = s->buf[i];
+            s->tag[t] = s->tag[i];
+            t = pl330_fifo_inc(s, t);
+        } else {
+            s->num = s->num - 1;
+        }
+        i = pl330_fifo_inc(s, i);
+    }
+}
+
+/* Read-Write Queue implementation
+ *
+ * A Read-Write Queue stores up to QUEUE_SIZE instructions (loads or stores).
+ * Each instruction is described by source (for loads) or destination (for
+ * stores) address ADDR, width of data to be loaded/stored LEN, number of
+ * stores/loads to be performed N, INC bit, Z bit and TAG to identify channel
+ * this instruction belongs to. Queue does not store any information about
+ * nature of the instruction: is it load or store. PL330 has different queues
+ * for loads and stores so this is already known at the top level where it
+ * matters.
+ *
+ * Queue works as FIFO for instructions with equivalent tags, but can issue
+ * instructions with different tags in arbitrary order. SEQN field attached to
+ * each instruction helps to achieve this. For each TAG queue contains
+ * instructions with consecutive SEQN values ranging from LO_SEQN[TAG] to
+ * HI_SEQN[TAG]-1 inclusive. SEQN is 8-bit unsigned integer, so SEQN=255 is
+ * followed by SEQN=0.
+ *
+ * Z bit indicates that zeroes should be stored. No MFIFO fetches are performed
+ * in this case.
+ */
+
+static void pl330_queue_reset(PL330Queue *s)
+{
+    int i;
+
+    for (i = 0; i < s->queue_size; i++) {
+        s->queue[i].tag = PL330_UNTAGGED;
+    }
+}
+
+/* Initialize queue */
+static void pl330_queue_init(PL330Queue *s, int size, PL330State *parent)
+{
+    s->parent = parent;
+    s->queue = g_new0(PL330QueueEntry, size);
+    s->queue_size = size;
+}
+
+/* Returns pointer to an empty slot or NULL if queue is full */
+static PL330QueueEntry *pl330_queue_find_empty(PL330Queue *s)
+{
+    int i;
+
+    for (i = 0; i < s->queue_size; i++) {
+        if (s->queue[i].tag == PL330_UNTAGGED) {
+            return &s->queue[i];
+        }
+    }
+    return NULL;
+}
+
+/* Put instruction in queue.
+ * Return value:
+ * - zero - OK
+ * - non-zero - queue is full
+ */
+
+static int pl330_queue_put_insn(PL330Queue *s, uint32_t addr,
+                                int len, int n, bool inc, bool z, uint8_t tag)
+{
+    PL330QueueEntry *entry = pl330_queue_find_empty(s);
+
+    if (!entry) {
+        return 1;
+    }
+    entry->tag = tag;
+    entry->addr = addr;
+    entry->len = len;
+    entry->n = n;
+    entry->z = z;
+    entry->inc = inc;
+    entry->seqn = s->parent->hi_seqn[tag];
+    s->parent->hi_seqn[tag]++;
+    return 0;
+}
+
+/* Returns a pointer to queue slot containing instruction which satisfies
+ *  following conditions:
+ *   - it has valid tag value (not PL330_UNTAGGED)
+ *   - if enforce_seq is set it has to be issuable without violating queue
+ *     logic (see above)
+ *   - if TAG argument is not PL330_UNTAGGED this instruction has tag value
+ *     equivalent to the argument TAG value.
+ *  If such instruction cannot be found NULL is returned.
+ */
+
+static PL330QueueEntry *pl330_queue_find_insn(PL330Queue *s, uint8_t tag,
+                                              bool enforce_seq)
+{
+    int i;
+
+    for (i = 0; i < s->queue_size; i++) {
+        if (s->queue[i].tag != PL330_UNTAGGED) {
+            if ((!enforce_seq ||
+                    s->queue[i].seqn == s->parent->lo_seqn[s->queue[i].tag]) &&
+                    (s->queue[i].tag == tag || tag == PL330_UNTAGGED ||
+                    s->queue[i].z)) {
+                return &s->queue[i];
+            }
+        }
+    }
+    return NULL;
+}
+
+/* Removes instruction from queue. */
+
+static inline void pl330_queue_remove_insn(PL330Queue *s, PL330QueueEntry *e)
+{
+    s->parent->lo_seqn[e->tag]++;
+    e->tag = PL330_UNTAGGED;
+}
+
+/* Removes all instructions tagged with TAG from queue. */
+
+static inline void pl330_queue_remove_tagged(PL330Queue *s, uint8_t tag)
+{
+    int i;
+
+    for (i = 0; i < s->queue_size; i++) {
+        if (s->queue[i].tag == tag) {
+            s->queue[i].tag = PL330_UNTAGGED;
+        }
+    }
+}
+
+/* DMA instruction execution engine */
+
+/* Moves DMA channel to the FAULT state and updates it's status. */
+
+static inline void pl330_fault(PL330Chan *ch, uint32_t flags)
+{
+    DB_PRINT("ch: %p, flags: %x\n", ch, flags);
+    ch->fault_type |= flags;
+    if (ch->state == pl330_chan_fault) {
+        return;
+    }
+    ch->state = pl330_chan_fault;
+    ch->parent->num_faulting++;
+    if (ch->parent->num_faulting == 1) {
+        DB_PRINT("abort interrupt raised\n");
+        qemu_irq_raise(ch->parent->irq_abort);
+    }
+}
+
+/*
+ * For information about instructions see PL330 Technical Reference Manual.
+ *
+ * Arguments:
+ *   CH - channel executing the instruction
+ *   OPCODE - opcode
+ *   ARGS - array of 8-bit arguments
+ *   LEN - number of elements in ARGS array
+ */
+
+static void pl330_dmaaddh(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint16_t im = (((uint16_t)args[1]) << 8) | ((uint16_t)args[0]);
+    uint8_t ra = (opcode >> 1) & 1;
+
+    if (ch->is_manager) {
+        pl330_fault(ch, PL330_FAULT_UNDEF_INSTR);
+        return;
+    }
+    if (ra) {
+        ch->dst += im;
+    } else {
+        ch->src += im;
+    }
+}
+
+static void pl330_dmaend(PL330Chan *ch, uint8_t opcode,
+                         uint8_t *args, int len)
+{
+    PL330State *s = ch->parent;
+
+    if (ch->state == pl330_chan_executing && !ch->is_manager) {
+        /* Wait for all transfers to complete */
+        if (pl330_fifo_has_tag(&s->fifo, ch->tag) ||
+            pl330_queue_find_insn(&s->read_queue, ch->tag, false) != NULL ||
+            pl330_queue_find_insn(&s->write_queue, ch->tag, false) != NULL) {
+
+            ch->stall = 1;
+            return;
+        }
+    }
+    DB_PRINT("DMA ending!\n");
+    pl330_fifo_tagged_remove(&s->fifo, ch->tag);
+    pl330_queue_remove_tagged(&s->read_queue, ch->tag);
+    pl330_queue_remove_tagged(&s->write_queue, ch->tag);
+    ch->state = pl330_chan_stopped;
+}
+
+static void pl330_dmaflushp(PL330Chan *ch, uint8_t opcode,
+                                            uint8_t *args, int len)
+{
+    uint8_t periph_id;
+
+    if (args[0] & 7) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    periph_id = (args[0] >> 3) & 0x1f;
+    if (periph_id >= ch->parent->num_periph_req) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+        pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+        return;
+    }
+    /* Do nothing */
+}
+
+static void pl330_dmago(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint8_t chan_id;
+    uint8_t ns;
+    uint32_t pc;
+    PL330Chan *s;
+
+    DB_PRINT("\n");
+
+    if (!ch->is_manager) {
+        pl330_fault(ch, PL330_FAULT_UNDEF_INSTR);
+        return;
+    }
+    ns = !!(opcode & 2);
+    chan_id = args[0] & 7;
+    if ((args[0] >> 3)) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (chan_id >= ch->parent->num_chnls) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    pc = (((uint32_t)args[4]) << 24) | (((uint32_t)args[3]) << 16) |
+         (((uint32_t)args[2]) << 8)  | (((uint32_t)args[1]));
+    if (ch->parent->chan[chan_id].state != pl330_chan_stopped) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (ch->ns && !ns) {
+        pl330_fault(ch, PL330_FAULT_DMAGO_ERR);
+        return;
+    }
+    s = &ch->parent->chan[chan_id];
+    s->ns = ns;
+    s->pc = pc;
+    s->state = pl330_chan_executing;
+}
+
+static void pl330_dmald(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint8_t bs = opcode & 3;
+    uint32_t size, num;
+    bool inc;
+
+    if (bs == 2) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if ((bs == 1 && ch->request_flag == PL330_BURST) ||
+        (bs == 3 && ch->request_flag == PL330_SINGLE)) {
+        /* Perform NOP */
+        return;
+    }
+    if (bs == 1 && ch->request_flag == PL330_SINGLE) {
+        num = 1;
+    } else {
+        num = ((ch->control >> 4) & 0xf) + 1;
+    }
+    size = (uint32_t)1 << ((ch->control >> 1) & 0x7);
+    inc = !!(ch->control & 1);
+    ch->stall = pl330_queue_put_insn(&ch->parent->read_queue, ch->src,
+                                    size, num, inc, 0, ch->tag);
+    if (!ch->stall) {
+        DB_PRINT("channel:%d address:%08x size:%d num:%d %c\n",
+                 ch->tag, ch->src, size, num, inc ? 'Y' : 'N');
+        ch->src += inc ? size * num - (ch->src & (size - 1)) : 0;
+    }
+}
+
+static void pl330_dmaldp(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint8_t periph_id;
+
+    if (args[0] & 7) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    periph_id = (args[0] >> 3) & 0x1f;
+    if (periph_id >= ch->parent->num_periph_req) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+        pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+        return;
+    }
+    pl330_dmald(ch, opcode, args, len);
+}
+
+static void pl330_dmalp(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint8_t lc = (opcode & 2) >> 1;
+
+    ch->lc[lc] = args[0];
+}
+
+static void pl330_dmakill(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    if (ch->state == pl330_chan_fault ||
+        ch->state == pl330_chan_fault_completing) {
+        /* This is the only way for a channel to leave the faulting state */
+        ch->fault_type = 0;
+        ch->parent->num_faulting--;
+        if (ch->parent->num_faulting == 0) {
+            DB_PRINT("abort interrupt lowered\n");
+            qemu_irq_lower(ch->parent->irq_abort);
+        }
+    }
+    ch->state = pl330_chan_killing;
+    pl330_fifo_tagged_remove(&ch->parent->fifo, ch->tag);
+    pl330_queue_remove_tagged(&ch->parent->read_queue, ch->tag);
+    pl330_queue_remove_tagged(&ch->parent->write_queue, ch->tag);
+    ch->state = pl330_chan_stopped;
+}
+
+static void pl330_dmalpend(PL330Chan *ch, uint8_t opcode,
+                                    uint8_t *args, int len)
+{
+    uint8_t nf = (opcode & 0x10) >> 4;
+    uint8_t bs = opcode & 3;
+    uint8_t lc = (opcode & 4) >> 2;
+
+    if (bs == 2) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if ((bs == 1 && ch->request_flag == PL330_BURST) ||
+        (bs == 3 && ch->request_flag == PL330_SINGLE)) {
+        /* Perform NOP */
+        return;
+    }
+    if (!nf || ch->lc[lc]) {
+        if (nf) {
+            ch->lc[lc]--;
+        }
+        DB_PRINT("loop reiteration\n");
+        ch->pc -= args[0];
+        ch->pc -= len + 1;
+        /* "ch->pc -= args[0] + len + 1" is incorrect when args[0] == 256 */
+    } else {
+        DB_PRINT("loop fallthrough\n");
+    }
+}
+
+
+static void pl330_dmamov(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint8_t rd = args[0] & 7;
+    uint32_t im;
+
+    if ((args[0] >> 3)) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    im = (((uint32_t)args[4]) << 24) | (((uint32_t)args[3]) << 16) |
+         (((uint32_t)args[2]) << 8)  | (((uint32_t)args[1]));
+    switch (rd) {
+    case 0:
+        ch->src = im;
+        break;
+    case 1:
+        ch->control = im;
+        break;
+    case 2:
+        ch->dst = im;
+        break;
+    default:
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+}
+
+static void pl330_dmanop(PL330Chan *ch, uint8_t opcode,
+                         uint8_t *args, int len)
+{
+    /* NOP is NOP. */
+}
+
+static void pl330_dmarmb(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+   if (pl330_queue_find_insn(&ch->parent->read_queue, ch->tag, false)) {
+        ch->state = pl330_chan_at_barrier;
+        ch->stall = 1;
+        return;
+    } else {
+        ch->state = pl330_chan_executing;
+    }
+}
+
+static void pl330_dmasev(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint8_t ev_id;
+
+    if (args[0] & 7) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    ev_id = (args[0] >> 3) & 0x1f;
+    if (ev_id >= ch->parent->num_events) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (ch->ns && !(ch->parent->cfg[CFG_INS] & (1 << ev_id))) {
+        pl330_fault(ch, PL330_FAULT_EVENT_ERR);
+        return;
+    }
+    if (ch->parent->inten & (1 << ev_id)) {
+        ch->parent->int_status |= (1 << ev_id);
+        DB_PRINT("event interrupt raised %d\n", ev_id);
+        qemu_irq_raise(ch->parent->irq[ev_id]);
+    }
+    ch->parent->ev_status |= (1 << ev_id);
+}
+
+static void pl330_dmast(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+    uint8_t bs = opcode & 3;
+    uint32_t size, num;
+    bool inc;
+
+    if (bs == 2) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if ((bs == 1 && ch->request_flag == PL330_BURST) ||
+        (bs == 3 && ch->request_flag == PL330_SINGLE)) {
+        /* Perform NOP */
+        return;
+    }
+    num = ((ch->control >> 18) & 0xf) + 1;
+    size = (uint32_t)1 << ((ch->control >> 15) & 0x7);
+    inc = !!((ch->control >> 14) & 1);
+    ch->stall = pl330_queue_put_insn(&ch->parent->write_queue, ch->dst,
+                                    size, num, inc, 0, ch->tag);
+    if (!ch->stall) {
+        DB_PRINT("channel:%d address:%08x size:%d num:%d %c\n",
+                 ch->tag, ch->dst, size, num, inc ? 'Y' : 'N');
+        ch->dst += inc ? size * num - (ch->dst & (size - 1)) : 0;
+    }
+}
+
+static void pl330_dmastp(PL330Chan *ch, uint8_t opcode,
+                         uint8_t *args, int len)
+{
+    uint8_t periph_id;
+
+    if (args[0] & 7) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    periph_id = (args[0] >> 3) & 0x1f;
+    if (periph_id >= ch->parent->num_periph_req) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+        pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+        return;
+    }
+    pl330_dmast(ch, opcode, args, len);
+}
+
+static void pl330_dmastz(PL330Chan *ch, uint8_t opcode,
+                         uint8_t *args, int len)
+{
+    uint32_t size, num;
+    bool inc;
+
+    num = ((ch->control >> 18) & 0xf) + 1;
+    size = (uint32_t)1 << ((ch->control >> 15) & 0x7);
+    inc = !!((ch->control >> 14) & 1);
+    ch->stall = pl330_queue_put_insn(&ch->parent->write_queue, ch->dst,
+                                    size, num, inc, 1, ch->tag);
+    if (inc) {
+        ch->dst += size * num;
+    }
+}
+
+static void pl330_dmawfe(PL330Chan *ch, uint8_t opcode,
+                         uint8_t *args, int len)
+{
+    uint8_t ev_id;
+    int i;
+
+    if (args[0] & 5) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    ev_id = (args[0] >> 3) & 0x1f;
+    if (ev_id >= ch->parent->num_events) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (ch->ns && !(ch->parent->cfg[CFG_INS] & (1 << ev_id))) {
+        pl330_fault(ch, PL330_FAULT_EVENT_ERR);
+        return;
+    }
+    ch->wakeup = ev_id;
+    ch->state = pl330_chan_waiting_event;
+    if (~ch->parent->inten & ch->parent->ev_status & 1 << ev_id) {
+        ch->state = pl330_chan_executing;
+        /* If anyone else is currently waiting on the same event, let them
+         * clear the ev_status so they pick up event as well
+         */
+        for (i = 0; i < ch->parent->num_chnls; ++i) {
+            PL330Chan *peer = &ch->parent->chan[i];
+            if (peer->state == pl330_chan_waiting_event &&
+                    peer->wakeup == ev_id) {
+                return;
+            }
+        }
+        ch->parent->ev_status &= ~(1 << ev_id);
+    } else {
+        ch->stall = 1;
+    }
+}
+
+static void pl330_dmawfp(PL330Chan *ch, uint8_t opcode,
+                         uint8_t *args, int len)
+{
+    uint8_t bs = opcode & 3;
+    uint8_t periph_id;
+
+    if (args[0] & 7) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    periph_id = (args[0] >> 3) & 0x1f;
+    if (periph_id >= ch->parent->num_periph_req) {
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+    if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+        pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+        return;
+    }
+    switch (bs) {
+    case 0: /* S */
+        ch->request_flag = PL330_SINGLE;
+        ch->wfp_sbp = 0;
+        break;
+    case 1: /* P */
+        ch->request_flag = PL330_BURST;
+        ch->wfp_sbp = 2;
+        break;
+    case 2: /* B */
+        ch->request_flag = PL330_BURST;
+        ch->wfp_sbp = 1;
+        break;
+    default:
+        pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+        return;
+    }
+
+    if (ch->parent->periph_busy[periph_id]) {
+        ch->state = pl330_chan_waiting_periph;
+        ch->stall = 1;
+    } else if (ch->state == pl330_chan_waiting_periph) {
+        ch->state = pl330_chan_executing;
+    }
+}
+
+static void pl330_dmawmb(PL330Chan *ch, uint8_t opcode,
+                         uint8_t *args, int len)
+{
+    if (pl330_queue_find_insn(&ch->parent->write_queue, ch->tag, false)) {
+        ch->state = pl330_chan_at_barrier;
+        ch->stall = 1;
+        return;
+    } else {
+        ch->state = pl330_chan_executing;
+    }
+}
+
+/* NULL terminated array of the instruction descriptions. */
+static const PL330InsnDesc insn_desc[] = {
+    { .opcode = 0x54, .opmask = 0xFD, .size = 3, .exec = pl330_dmaaddh, },
+    { .opcode = 0x00, .opmask = 0xFF, .size = 1, .exec = pl330_dmaend, },
+    { .opcode = 0x35, .opmask = 0xFF, .size = 2, .exec = pl330_dmaflushp, },
+    { .opcode = 0xA0, .opmask = 0xFD, .size = 6, .exec = pl330_dmago, },
+    { .opcode = 0x04, .opmask = 0xFC, .size = 1, .exec = pl330_dmald, },
+    { .opcode = 0x25, .opmask = 0xFD, .size = 2, .exec = pl330_dmaldp, },
+    { .opcode = 0x20, .opmask = 0xFD, .size = 2, .exec = pl330_dmalp, },
+    /* dmastp  must be before dmalpend in this list, because their maps
+     * are overlapping
+     */
+    { .opcode = 0x29, .opmask = 0xFD, .size = 2, .exec = pl330_dmastp, },
+    { .opcode = 0x28, .opmask = 0xE8, .size = 2, .exec = pl330_dmalpend, },
+    { .opcode = 0x01, .opmask = 0xFF, .size = 1, .exec = pl330_dmakill, },
+    { .opcode = 0xBC, .opmask = 0xFF, .size = 6, .exec = pl330_dmamov, },
+    { .opcode = 0x18, .opmask = 0xFF, .size = 1, .exec = pl330_dmanop, },
+    { .opcode = 0x12, .opmask = 0xFF, .size = 1, .exec = pl330_dmarmb, },
+    { .opcode = 0x34, .opmask = 0xFF, .size = 2, .exec = pl330_dmasev, },
+    { .opcode = 0x08, .opmask = 0xFC, .size = 1, .exec = pl330_dmast, },
+    { .opcode = 0x0C, .opmask = 0xFF, .size = 1, .exec = pl330_dmastz, },
+    { .opcode = 0x36, .opmask = 0xFF, .size = 2, .exec = pl330_dmawfe, },
+    { .opcode = 0x30, .opmask = 0xFC, .size = 2, .exec = pl330_dmawfp, },
+    { .opcode = 0x13, .opmask = 0xFF, .size = 1, .exec = pl330_dmawmb, },
+    { .opcode = 0x00, .opmask = 0x00, .size = 0, .exec = NULL, }
+};
+
+/* Instructions which can be issued via debug registers. */
+static const PL330InsnDesc debug_insn_desc[] = {
+    { .opcode = 0xA0, .opmask = 0xFD, .size = 6, .exec = pl330_dmago, },
+    { .opcode = 0x01, .opmask = 0xFF, .size = 1, .exec = pl330_dmakill, },
+    { .opcode = 0x34, .opmask = 0xFF, .size = 2, .exec = pl330_dmasev, },
+    { .opcode = 0x00, .opmask = 0x00, .size = 0, .exec = NULL, }
+};
+
+static inline const PL330InsnDesc *pl330_fetch_insn(PL330Chan *ch)
+{
+    uint8_t opcode;
+    int i;
+
+    dma_memory_read(&dma_context_memory, ch->pc, &opcode, 1);
+    for (i = 0; insn_desc[i].size; i++) {
+        if ((opcode & insn_desc[i].opmask) == insn_desc[i].opcode) {
+            return &insn_desc[i];
+        }
+    }
+    return NULL;
+}
+
+static inline void pl330_exec_insn(PL330Chan *ch, const PL330InsnDesc *insn)
+{
+    uint8_t buf[PL330_INSN_MAXSIZE];
+
+    assert(insn->size <= PL330_INSN_MAXSIZE);
+    dma_memory_read(&dma_context_memory, ch->pc, buf, insn->size);
+    insn->exec(ch, buf[0], &buf[1], insn->size - 1);
+}
+
+static inline void pl330_update_pc(PL330Chan *ch,
+                                   const PL330InsnDesc *insn)
+{
+    ch->pc += insn->size;
+}
+
+/* Try to execute current instruction in channel CH. Number of executed
+   instructions is returned (0 or 1). */
+static int pl330_chan_exec(PL330Chan *ch)
+{
+    const PL330InsnDesc *insn;
+
+    if (ch->state != pl330_chan_executing &&
+            ch->state != pl330_chan_waiting_periph &&
+            ch->state != pl330_chan_at_barrier &&
+            ch->state != pl330_chan_waiting_event) {
+        DB_PRINT("%d\n", ch->state);
+        return 0;
+    }
+    ch->stall = 0;
+    insn = pl330_fetch_insn(ch);
+    if (!insn) {
+        DB_PRINT("pl330 undefined instruction\n");
+        pl330_fault(ch, PL330_FAULT_UNDEF_INSTR);
+        return 0;
+    }
+    pl330_exec_insn(ch, insn);
+    if (!ch->stall) {
+        pl330_update_pc(ch, insn);
+        ch->watchdog_timer = 0;
+        return 1;
+    /* WDT only active in exec state */
+    } else if (ch->state == pl330_chan_executing) {
+        ch->watchdog_timer++;
+        if (ch->watchdog_timer >= PL330_WATCHDOG_LIMIT) {
+            pl330_fault(ch, PL330_FAULT_LOCKUP_ERR);
+        }
+    }
+    return 0;
+}
+
+/* Try to execute 1 instruction in each channel, one instruction from read
+   queue and one instruction from write queue. Number of successfully executed
+   instructions is returned. */
+static int pl330_exec_cycle(PL330Chan *channel)
+{
+    PL330State *s = channel->parent;
+    PL330QueueEntry *q;
+    int i;
+    int num_exec = 0;
+    int fifo_res = 0;
+    uint8_t buf[PL330_MAX_BURST_LEN];
+
+    /* Execute one instruction in each channel */
+    num_exec += pl330_chan_exec(channel);
+
+    /* Execute one instruction from read queue */
+    q = pl330_queue_find_insn(&s->read_queue, PL330_UNTAGGED, true);
+    if (q != NULL && q->len <= pl330_fifo_num_free(&s->fifo)) {
+        int len = q->len - (q->addr & (q->len - 1));
+
+        dma_memory_read(&dma_context_memory, q->addr, buf, len);
+        if (PL330_ERR_DEBUG > 1) {
+            DB_PRINT("PL330 read from memory @%08x (size = %08x):\n",
+                      q->addr, len);
+            hexdump((char *)buf, stderr, "", len);
+        }
+        fifo_res = pl330_fifo_push(&s->fifo, buf, len, q->tag);
+        if (fifo_res == PL330_FIFO_OK) {
+            if (q->inc) {
+                q->addr += len;
+            }
+            q->n--;
+            if (!q->n) {
+                pl330_queue_remove_insn(&s->read_queue, q);
+            }
+            num_exec++;
+        }
+    }
+
+    /* Execute one instruction from write queue. */
+    q = pl330_queue_find_insn(&s->write_queue, pl330_fifo_tag(&s->fifo), true);
+    if (q != NULL) {
+        int len = q->len - (q->addr & (q->len - 1));
+
+        if (q->z) {
+            for (i = 0; i < len; i++) {
+                buf[i] = 0;
+            }
+        } else {
+            fifo_res = pl330_fifo_get(&s->fifo, buf, len, q->tag);
+        }
+        if (fifo_res == PL330_FIFO_OK || q->z) {
+            dma_memory_write(&dma_context_memory, q->addr, buf, len);
+            if (PL330_ERR_DEBUG > 1) {
+                DB_PRINT("PL330 read from memory @%08x (size = %08x):\n",
+                         q->addr, len);
+                hexdump((char *)buf, stderr, "", len);
+            }
+            if (q->inc) {
+                q->addr += len;
+            }
+            num_exec++;
+        } else if (fifo_res == PL330_FIFO_STALL) {
+            pl330_fault(&channel->parent->chan[q->tag],
+                                PL330_FAULT_FIFOEMPTY_ERR);
+        }
+        q->n--;
+        if (!q->n) {
+            pl330_queue_remove_insn(&s->write_queue, q);
+        }
+    }
+
+    return num_exec;
+}
+
+static int pl330_exec_channel(PL330Chan *channel)
+{
+    int insr_exec = 0;
+
+    /* TODO: Is it all right to execute everything or should we do per-cycle
+       simulation? */
+    while (pl330_exec_cycle(channel)) {
+        insr_exec++;
+    }
+
+    /* Detect deadlock */
+    if (channel->state == pl330_chan_executing) {
+        pl330_fault(channel, PL330_FAULT_LOCKUP_ERR);
+    }
+    /* Situation when one of the queues has deadlocked but all channels
+     * have finished their programs should be impossible.
+     */
+
+    return insr_exec;
+}
+
+static inline void pl330_exec(PL330State *s)
+{
+    DB_PRINT("\n");
+    int i, insr_exec;
+    do {
+        insr_exec = pl330_exec_channel(&s->manager);
+
+        for (i = 0; i < s->num_chnls; i++) {
+            insr_exec += pl330_exec_channel(&s->chan[i]);
+        }
+    } while (insr_exec);
+}
+
+static void pl330_exec_cycle_timer(void *opaque)
+{
+    PL330State *s = (PL330State *)opaque;
+    pl330_exec(s);
+}
+
+/* Stop or restore dma operations */
+
+static void pl330_dma_stop_irq(void *opaque, int irq, int level)
+{
+    PL330State *s = (PL330State *)opaque;
+
+    if (s->periph_busy[irq] != level) {
+        s->periph_busy[irq] = level;
+        qemu_mod_timer(s->timer, qemu_get_clock_ns(vm_clock));
+    }
+}
+
+static void pl330_debug_exec(PL330State *s)
+{
+    uint8_t args[5];
+    uint8_t opcode;
+    uint8_t chan_id;
+    int i;
+    PL330Chan *ch;
+    const PL330InsnDesc *insn;
+
+    s->debug_status = 1;
+    chan_id = (s->dbg[0] >>  8) & 0x07;
+    opcode  = (s->dbg[0] >> 16) & 0xff;
+    args[0] = (s->dbg[0] >> 24) & 0xff;
+    args[1] = (s->dbg[1] >>  0) & 0xff;
+    args[2] = (s->dbg[1] >>  8) & 0xff;
+    args[3] = (s->dbg[1] >> 16) & 0xff;
+    args[4] = (s->dbg[1] >> 24) & 0xff;
+    DB_PRINT("chan id: %d\n", chan_id);
+    if (s->dbg[0] & 1) {
+        ch = &s->chan[chan_id];
+    } else {
+        ch = &s->manager;
+    }
+    insn = NULL;
+    for (i = 0; debug_insn_desc[i].size; i++) {
+        if ((opcode & debug_insn_desc[i].opmask) == debug_insn_desc[i].opcode) {
+            insn = &debug_insn_desc[i];
+        }
+    }
+    if (!insn) {
+        pl330_fault(ch, PL330_FAULT_UNDEF_INSTR | PL330_FAULT_DBG_INSTR);
+        return ;
+    }
+    ch->stall = 0;
+    insn->exec(ch, opcode, args, insn->size - 1);
+    if (ch->fault_type) {
+        ch->fault_type |= PL330_FAULT_DBG_INSTR;
+    }
+    if (ch->stall) {
+        qemu_log_mask(LOG_UNIMP, "pl330: stall of debug instruction not "
+                      "implemented\n");
+    }
+    s->debug_status = 0;
+}
+
+/* IOMEM mapped registers */
+
+static void pl330_iomem_write(void *opaque, hwaddr offset,
+                              uint64_t value, unsigned size)
+{
+    PL330State *s = (PL330State *) opaque;
+    uint32_t i;
+
+    DB_PRINT("addr: %08x data: %08x\n", (unsigned)offset, (unsigned)value);
+
+    switch (offset) {
+    case PL330_REG_INTEN:
+        s->inten = value;
+        break;
+    case PL330_REG_INTCLR:
+        for (i = 0; i < s->num_events; i++) {
+            if (s->int_status & s->inten & value & (1 << i)) {
+                DB_PRINT("event interrupt lowered %d\n", i);
+                qemu_irq_lower(s->irq[i]);
+            }
+        }
+        s->ev_status &= ~(value & s->inten);
+        s->int_status &= ~(value & s->inten);
+        break;
+    case PL330_REG_DBGCMD:
+        if ((value & 3) == 0) {
+            pl330_debug_exec(s);
+            pl330_exec(s);
+        } else {
+            qemu_log_mask(LOG_GUEST_ERROR, "pl330: write of illegal value %u "
+                          "for offset " TARGET_FMT_plx "\n", (unsigned)value,
+                          offset);
+        }
+        break;
+    case PL330_REG_DBGINST0:
+        DB_PRINT("s->dbg[0] = %08x\n", (unsigned)value);
+        s->dbg[0] = value;
+        break;
+    case PL330_REG_DBGINST1:
+        DB_PRINT("s->dbg[1] = %08x\n", (unsigned)value);
+        s->dbg[1] = value;
+        break;
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad write offset " TARGET_FMT_plx
+                      "\n", offset);
+        break;
+    }
+}
+
+static inline uint32_t pl330_iomem_read_imp(void *opaque,
+        hwaddr offset)
+{
+    PL330State *s = (PL330State *)opaque;
+    int chan_id;
+    int i;
+    uint32_t res;
+
+    if (offset >= PL330_REG_PERIPH_ID && offset < PL330_REG_PERIPH_ID + 32) {
+        return pl330_id[(offset - PL330_REG_PERIPH_ID) >> 2];
+    }
+    if (offset >= PL330_REG_CR0_BASE && offset < PL330_REG_CR0_BASE + 24) {
+        return s->cfg[(offset - PL330_REG_CR0_BASE) >> 2];
+    }
+    if (offset >= PL330_REG_CHANCTRL && offset < PL330_REG_DBGSTATUS) {
+        offset -= PL330_REG_CHANCTRL;
+        chan_id = offset >> 5;
+        if (chan_id >= s->num_chnls) {
+            qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+                          TARGET_FMT_plx "\n", offset);
+            return 0;
+        }
+        switch (offset & 0x1f) {
+        case 0x00:
+            return s->chan[chan_id].src;
+        case 0x04:
+            return s->chan[chan_id].dst;
+        case 0x08:
+            return s->chan[chan_id].control;
+        case 0x0C:
+            return s->chan[chan_id].lc[0];
+        case 0x10:
+            return s->chan[chan_id].lc[1];
+        default:
+            qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+                          TARGET_FMT_plx "\n", offset);
+            return 0;
+        }
+    }
+    if (offset >= PL330_REG_CSR_BASE && offset < 0x400) {
+        offset -= PL330_REG_CSR_BASE;
+        chan_id = offset >> 3;
+        if (chan_id >= s->num_chnls) {
+            qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+                          TARGET_FMT_plx "\n", offset);
+            return 0;
+        }
+        switch ((offset >> 2) & 1) {
+        case 0x0:
+            res = (s->chan[chan_id].ns << 21) |
+                    (s->chan[chan_id].wakeup << 4) |
+                    (s->chan[chan_id].state) |
+                    (s->chan[chan_id].wfp_sbp << 14);
+            return res;
+        case 0x1:
+            return s->chan[chan_id].pc;
+        default:
+            qemu_log_mask(LOG_GUEST_ERROR, "pl330: read error\n");
+            return 0;
+        }
+    }
+    if (offset >= PL330_REG_FTR_BASE && offset < 0x100) {
+        offset -= PL330_REG_FTR_BASE;
+        chan_id = offset >> 2;
+        if (chan_id >= s->num_chnls) {
+            qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+                          TARGET_FMT_plx "\n", offset);
+            return 0;
+        }
+        return s->chan[chan_id].fault_type;
+    }
+    switch (offset) {
+    case PL330_REG_DSR:
+        return (s->manager.ns << 9) | (s->manager.wakeup << 4) |
+            (s->manager.state & 0xf);
+    case PL330_REG_DPC:
+        return s->manager.pc;
+    case PL330_REG_INTEN:
+        return s->inten;
+    case PL330_REG_INT_EVENT_RIS:
+        return s->ev_status;
+    case PL330_REG_INTMIS:
+        return s->int_status;
+    case PL330_REG_INTCLR:
+        /* Documentation says that we can't read this register
+         * but linux kernel does it
+         */
+        return 0;
+    case PL330_REG_FSRD:
+        return s->manager.state ? 1 : 0;
+    case PL330_REG_FSRC:
+        res = 0;
+        for (i = 0; i < s->num_chnls; i++) {
+            if (s->chan[i].state == pl330_chan_fault ||
+                s->chan[i].state == pl330_chan_fault_completing) {
+                res |= 1 << i;
+            }
+        }
+        return res;
+    case PL330_REG_FTRD:
+        return s->manager.fault_type;
+    case PL330_REG_DBGSTATUS:
+        return s->debug_status;
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+                      TARGET_FMT_plx "\n", offset);
+    }
+    return 0;
+}
+
+static uint64_t pl330_iomem_read(void *opaque, hwaddr offset,
+        unsigned size)
+{
+    int ret = pl330_iomem_read_imp(opaque, offset);
+    DB_PRINT("addr: %08x data: %08x\n", (unsigned)offset, ret);
+    return ret;
+}
+
+static const MemoryRegionOps pl330_ops = {
+    .read = pl330_iomem_read,
+    .write = pl330_iomem_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+    .impl = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    }
+};
+
+/* Controller logic and initialization */
+
+static void pl330_chan_reset(PL330Chan *ch)
+{
+    ch->src = 0;
+    ch->dst = 0;
+    ch->pc = 0;
+    ch->state = pl330_chan_stopped;
+    ch->watchdog_timer = 0;
+    ch->stall = 0;
+    ch->control = 0;
+    ch->status = 0;
+    ch->fault_type = 0;
+}
+
+static void pl330_reset(DeviceState *d)
+{
+    int i;
+    PL330State *s = PL330(d);
+
+    s->inten = 0;
+    s->int_status = 0;
+    s->ev_status = 0;
+    s->debug_status = 0;
+    s->num_faulting = 0;
+    s->manager.ns = s->mgr_ns_at_rst;
+    pl330_fifo_reset(&s->fifo);
+    pl330_queue_reset(&s->read_queue);
+    pl330_queue_reset(&s->write_queue);
+
+    for (i = 0; i < s->num_chnls; i++) {
+        pl330_chan_reset(&s->chan[i]);
+    }
+    for (i = 0; i < s->num_periph_req; i++) {
+        s->periph_busy[i] = 0;
+    }
+
+    qemu_del_timer(s->timer);
+}
+
+static void pl330_realize(DeviceState *dev, Error **errp)
+{
+    int i;
+    PL330State *s = PL330(dev);
+
+    sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq_abort);
+    memory_region_init_io(&s->iomem, &pl330_ops, s, "dma", PL330_IOMEM_SIZE);
+    sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+
+    s->timer = qemu_new_timer_ns(vm_clock, pl330_exec_cycle_timer, s);
+
+    s->cfg[0] = (s->mgr_ns_at_rst ? 0x4 : 0) |
+                (s->num_periph_req > 0 ? 1 : 0) |
+                ((s->num_chnls - 1) & 0x7) << 4 |
+                ((s->num_periph_req - 1) & 0x1f) << 12 |
+                ((s->num_events - 1) & 0x1f) << 17;
+
+    switch (s->i_cache_len) {
+    case (4):
+        s->cfg[1] |= 2;
+        break;
+    case (8):
+        s->cfg[1] |= 3;
+        break;
+    case (16):
+        s->cfg[1] |= 4;
+        break;
+    case (32):
+        s->cfg[1] |= 5;
+        break;
+    default:
+        error_setg(errp, "Bad value for i-cache_len property: %d\n",
+                   s->i_cache_len);
+        return;
+    }
+    s->cfg[1] |= ((s->num_i_cache_lines - 1) & 0xf) << 4;
+
+    s->chan = g_new0(PL330Chan, s->num_chnls);
+    s->hi_seqn = g_new0(uint8_t, s->num_chnls);
+    s->lo_seqn = g_new0(uint8_t, s->num_chnls);
+    for (i = 0; i < s->num_chnls; i++) {
+        s->chan[i].parent = s;
+        s->chan[i].tag = (uint8_t)i;
+    }
+    s->manager.parent = s;
+    s->manager.tag = s->num_chnls;
+    s->manager.is_manager = true;
+
+    s->irq = g_new0(qemu_irq, s->num_events);
+    for (i = 0; i < s->num_events; i++) {
+        sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
+    }
+
+    qdev_init_gpio_in(dev, pl330_dma_stop_irq, PL330_PERIPH_NUM);
+
+    switch (s->data_width) {
+    case (32):
+        s->cfg[CFG_CRD] |= 0x2;
+        break;
+    case (64):
+        s->cfg[CFG_CRD] |= 0x3;
+        break;
+    case (128):
+        s->cfg[CFG_CRD] |= 0x4;
+        break;
+    default:
+        error_setg(errp, "Bad value for data_width property: %d\n",
+                   s->data_width);
+        return;
+    }
+
+    s->cfg[CFG_CRD] |= ((s->wr_cap - 1) & 0x7) << 4 |
+                    ((s->wr_q_dep - 1) & 0xf) << 8 |
+                    ((s->rd_cap - 1) & 0x7) << 12 |
+                    ((s->rd_q_dep - 1) & 0xf) << 16 |
+                    ((s->data_buffer_dep - 1) & 0x1ff) << 20;
+
+    pl330_queue_init(&s->read_queue, s->rd_q_dep, s);
+    pl330_queue_init(&s->write_queue, s->wr_q_dep, s);
+    pl330_fifo_init(&s->fifo, s->data_buffer_dep);
+}
+
+static Property pl330_properties[] = {
+    /* CR0 */
+    DEFINE_PROP_UINT32("num_chnls", PL330State, num_chnls, 8),
+    DEFINE_PROP_UINT8("num_periph_req", PL330State, num_periph_req, 4),
+    DEFINE_PROP_UINT8("num_events", PL330State, num_events, 16),
+    DEFINE_PROP_UINT8("mgr_ns_at_rst", PL330State, mgr_ns_at_rst, 0),
+    /* CR1 */
+    DEFINE_PROP_UINT8("i-cache_len", PL330State, i_cache_len, 4),
+    DEFINE_PROP_UINT8("num_i-cache_lines", PL330State, num_i_cache_lines, 8),
+    /* CR2-4 */
+    DEFINE_PROP_UINT32("boot_addr", PL330State, cfg[CFG_BOOT_ADDR], 0),
+    DEFINE_PROP_UINT32("INS", PL330State, cfg[CFG_INS], 0),
+    DEFINE_PROP_UINT32("PNS", PL330State, cfg[CFG_PNS], 0),
+    /* CRD */
+    DEFINE_PROP_UINT8("data_width", PL330State, data_width, 64),
+    DEFINE_PROP_UINT8("wr_cap", PL330State, wr_cap, 8),
+    DEFINE_PROP_UINT8("wr_q_dep", PL330State, wr_q_dep, 16),
+    DEFINE_PROP_UINT8("rd_cap", PL330State, rd_cap, 8),
+    DEFINE_PROP_UINT8("rd_q_dep", PL330State, rd_q_dep, 16),
+    DEFINE_PROP_UINT16("data_buffer_dep", PL330State, data_buffer_dep, 256),
+
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pl330_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+
+    dc->realize = pl330_realize;
+    dc->reset = pl330_reset;
+    dc->props = pl330_properties;
+    dc->vmsd = &vmstate_pl330;
+}
+
+static const TypeInfo pl330_type_info = {
+    .name           = TYPE_PL330,
+    .parent         = TYPE_SYS_BUS_DEVICE,
+    .instance_size  = sizeof(PL330State),
+    .class_init      = pl330_class_init,
+};
+
+static void pl330_register_types(void)
+{
+    type_register_static(&pl330_type_info);
+}
+
+type_init(pl330_register_types)
diff --git a/hw/dma/puv3_dma.c b/hw/dma/puv3_dma.c
new file mode 100644
index 0000000000..32844b5f75
--- /dev/null
+++ b/hw/dma/puv3_dma.c
@@ -0,0 +1,109 @@
+/*
+ * DMA device simulation in PKUnity SoC
+ *
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or any later version.
+ * See the COPYING file in the top-level directory.
+ */
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+
+#undef DEBUG_PUV3
+#include "hw/unicore32/puv3.h"
+
+#define PUV3_DMA_CH_NR          (6)
+#define PUV3_DMA_CH_MASK        (0xff)
+#define PUV3_DMA_CH(offset)     ((offset) >> 8)
+
+typedef struct {
+    SysBusDevice busdev;
+    MemoryRegion iomem;
+    uint32_t reg_CFG[PUV3_DMA_CH_NR];
+} PUV3DMAState;
+
+static uint64_t puv3_dma_read(void *opaque, hwaddr offset,
+        unsigned size)
+{
+    PUV3DMAState *s = opaque;
+    uint32_t ret = 0;
+
+    assert(PUV3_DMA_CH(offset) < PUV3_DMA_CH_NR);
+
+    switch (offset & PUV3_DMA_CH_MASK) {
+    case 0x10:
+        ret = s->reg_CFG[PUV3_DMA_CH(offset)];
+        break;
+    default:
+        DPRINTF("Bad offset 0x%x\n", offset);
+    }
+    DPRINTF("offset 0x%x, value 0x%x\n", offset, ret);
+
+    return ret;
+}
+
+static void puv3_dma_write(void *opaque, hwaddr offset,
+        uint64_t value, unsigned size)
+{
+    PUV3DMAState *s = opaque;
+
+    assert(PUV3_DMA_CH(offset) < PUV3_DMA_CH_NR);
+
+    switch (offset & PUV3_DMA_CH_MASK) {
+    case 0x10:
+        s->reg_CFG[PUV3_DMA_CH(offset)] = value;
+        break;
+    default:
+        DPRINTF("Bad offset 0x%x\n", offset);
+    }
+    DPRINTF("offset 0x%x, value 0x%x\n", offset, value);
+}
+
+static const MemoryRegionOps puv3_dma_ops = {
+    .read = puv3_dma_read,
+    .write = puv3_dma_write,
+    .impl = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int puv3_dma_init(SysBusDevice *dev)
+{
+    PUV3DMAState *s = FROM_SYSBUS(PUV3DMAState, dev);
+    int i;
+
+    for (i = 0; i < PUV3_DMA_CH_NR; i++) {
+        s->reg_CFG[i] = 0x0;
+    }
+
+    memory_region_init_io(&s->iomem, &puv3_dma_ops, s, "puv3_dma",
+            PUV3_REGS_OFFSET);
+    sysbus_init_mmio(dev, &s->iomem);
+
+    return 0;
+}
+
+static void puv3_dma_class_init(ObjectClass *klass, void *data)
+{
+    SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
+
+    sdc->init = puv3_dma_init;
+}
+
+static const TypeInfo puv3_dma_info = {
+    .name = "puv3_dma",
+    .parent = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(PUV3DMAState),
+    .class_init = puv3_dma_class_init,
+};
+
+static void puv3_dma_register_type(void)
+{
+    type_register_static(&puv3_dma_info);
+}
+
+type_init(puv3_dma_register_type)
diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c
new file mode 100644
index 0000000000..03f92f1ab6
--- /dev/null
+++ b/hw/dma/rc4030.c
@@ -0,0 +1,825 @@
+/*
+ * QEMU JAZZ RC4030 chipset
+ *
+ * Copyright (c) 2007-2009 Herve Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/hw.h"
+#include "hw/mips/mips.h"
+#include "qemu/timer.h"
+
+/********************************************************/
+/* debug rc4030 */
+
+//#define DEBUG_RC4030
+//#define DEBUG_RC4030_DMA
+
+#ifdef DEBUG_RC4030
+#define DPRINTF(fmt, ...) \
+do { printf("rc4030: " fmt , ## __VA_ARGS__); } while (0)
+static const char* irq_names[] = { "parallel", "floppy", "sound", "video",
+            "network", "scsi", "keyboard", "mouse", "serial0", "serial1" };
+#else
+#define DPRINTF(fmt, ...)
+#endif
+
+#define RC4030_ERROR(fmt, ...) \
+do { fprintf(stderr, "rc4030 ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
+
+/********************************************************/
+/* rc4030 emulation                                     */
+
+typedef struct dma_pagetable_entry {
+    int32_t frame;
+    int32_t owner;
+} QEMU_PACKED dma_pagetable_entry;
+
+#define DMA_PAGESIZE    4096
+#define DMA_REG_ENABLE  1
+#define DMA_REG_COUNT   2
+#define DMA_REG_ADDRESS 3
+
+#define DMA_FLAG_ENABLE     0x0001
+#define DMA_FLAG_MEM_TO_DEV 0x0002
+#define DMA_FLAG_TC_INTR    0x0100
+#define DMA_FLAG_MEM_INTR   0x0200
+#define DMA_FLAG_ADDR_INTR  0x0400
+
+typedef struct rc4030State
+{
+    uint32_t config; /* 0x0000: RC4030 config register */
+    uint32_t revision; /* 0x0008: RC4030 Revision register */
+    uint32_t invalid_address_register; /* 0x0010: Invalid Address register */
+
+    /* DMA */
+    uint32_t dma_regs[8][4];
+    uint32_t dma_tl_base; /* 0x0018: DMA transl. table base */
+    uint32_t dma_tl_limit; /* 0x0020: DMA transl. table limit */
+
+    /* cache */
+    uint32_t cache_maint; /* 0x0030: Cache Maintenance */
+    uint32_t remote_failed_address; /* 0x0038: Remote Failed Address */
+    uint32_t memory_failed_address; /* 0x0040: Memory Failed Address */
+    uint32_t cache_ptag; /* 0x0048: I/O Cache Physical Tag */
+    uint32_t cache_ltag; /* 0x0050: I/O Cache Logical Tag */
+    uint32_t cache_bmask; /* 0x0058: I/O Cache Byte Mask */
+
+    uint32_t nmi_interrupt; /* 0x0200: interrupt source */
+    uint32_t offset210;
+    uint32_t nvram_protect; /* 0x0220: NV ram protect register */
+    uint32_t rem_speed[16];
+    uint32_t imr_jazz; /* Local bus int enable mask */
+    uint32_t isr_jazz; /* Local bus int source */
+
+    /* timer */
+    QEMUTimer *periodic_timer;
+    uint32_t itr; /* Interval timer reload */
+
+    qemu_irq timer_irq;
+    qemu_irq jazz_bus_irq;
+
+    MemoryRegion iomem_chipset;
+    MemoryRegion iomem_jazzio;
+} rc4030State;
+
+static void set_next_tick(rc4030State *s)
+{
+    qemu_irq_lower(s->timer_irq);
+    uint32_t tm_hz;
+
+    tm_hz = 1000 / (s->itr + 1);
+
+    qemu_mod_timer(s->periodic_timer, qemu_get_clock_ns(vm_clock) +
+                   get_ticks_per_sec() / tm_hz);
+}
+
+/* called for accesses to rc4030 */
+static uint32_t rc4030_readl(void *opaque, hwaddr addr)
+{
+    rc4030State *s = opaque;
+    uint32_t val;
+
+    addr &= 0x3fff;
+    switch (addr & ~0x3) {
+    /* Global config register */
+    case 0x0000:
+        val = s->config;
+        break;
+    /* Revision register */
+    case 0x0008:
+        val = s->revision;
+        break;
+    /* Invalid Address register */
+    case 0x0010:
+        val = s->invalid_address_register;
+        break;
+    /* DMA transl. table base */
+    case 0x0018:
+        val = s->dma_tl_base;
+        break;
+    /* DMA transl. table limit */
+    case 0x0020:
+        val = s->dma_tl_limit;
+        break;
+    /* Remote Failed Address */
+    case 0x0038:
+        val = s->remote_failed_address;
+        break;
+    /* Memory Failed Address */
+    case 0x0040:
+        val = s->memory_failed_address;
+        break;
+    /* I/O Cache Byte Mask */
+    case 0x0058:
+        val = s->cache_bmask;
+        /* HACK */
+        if (s->cache_bmask == (uint32_t)-1)
+            s->cache_bmask = 0;
+        break;
+    /* Remote Speed Registers */
+    case 0x0070:
+    case 0x0078:
+    case 0x0080:
+    case 0x0088:
+    case 0x0090:
+    case 0x0098:
+    case 0x00a0:
+    case 0x00a8:
+    case 0x00b0:
+    case 0x00b8:
+    case 0x00c0:
+    case 0x00c8:
+    case 0x00d0:
+    case 0x00d8:
+    case 0x00e0:
+    case 0x00e8:
+        val = s->rem_speed[(addr - 0x0070) >> 3];
+        break;
+    /* DMA channel base address */
+    case 0x0100:
+    case 0x0108:
+    case 0x0110:
+    case 0x0118:
+    case 0x0120:
+    case 0x0128:
+    case 0x0130:
+    case 0x0138:
+    case 0x0140:
+    case 0x0148:
+    case 0x0150:
+    case 0x0158:
+    case 0x0160:
+    case 0x0168:
+    case 0x0170:
+    case 0x0178:
+    case 0x0180:
+    case 0x0188:
+    case 0x0190:
+    case 0x0198:
+    case 0x01a0:
+    case 0x01a8:
+    case 0x01b0:
+    case 0x01b8:
+    case 0x01c0:
+    case 0x01c8:
+    case 0x01d0:
+    case 0x01d8:
+    case 0x01e0:
+    case 0x01e8:
+    case 0x01f0:
+    case 0x01f8:
+        {
+            int entry = (addr - 0x0100) >> 5;
+            int idx = (addr & 0x1f) >> 3;
+            val = s->dma_regs[entry][idx];
+        }
+        break;
+    /* Interrupt source */
+    case 0x0200:
+        val = s->nmi_interrupt;
+        break;
+    /* Error type */
+    case 0x0208:
+        val = 0;
+        break;
+    /* Offset 0x0210 */
+    case 0x0210:
+        val = s->offset210;
+        break;
+    /* NV ram protect register */
+    case 0x0220:
+        val = s->nvram_protect;
+        break;
+    /* Interval timer count */
+    case 0x0230:
+        val = 0;
+        qemu_irq_lower(s->timer_irq);
+        break;
+    /* EISA interrupt */
+    case 0x0238:
+        val = 7; /* FIXME: should be read from EISA controller */
+        break;
+    default:
+        RC4030_ERROR("invalid read [" TARGET_FMT_plx "]\n", addr);
+        val = 0;
+        break;
+    }
+
+    if ((addr & ~3) != 0x230) {
+        DPRINTF("read 0x%02x at " TARGET_FMT_plx "\n", val, addr);
+    }
+
+    return val;
+}
+
+static uint32_t rc4030_readw(void *opaque, hwaddr addr)
+{
+    uint32_t v = rc4030_readl(opaque, addr & ~0x3);
+    if (addr & 0x2)
+        return v >> 16;
+    else
+        return v & 0xffff;
+}
+
+static uint32_t rc4030_readb(void *opaque, hwaddr addr)
+{
+    uint32_t v = rc4030_readl(opaque, addr & ~0x3);
+    return (v >> (8 * (addr & 0x3))) & 0xff;
+}
+
+static void rc4030_writel(void *opaque, hwaddr addr, uint32_t val)
+{
+    rc4030State *s = opaque;
+    addr &= 0x3fff;
+
+    DPRINTF("write 0x%02x at " TARGET_FMT_plx "\n", val, addr);
+
+    switch (addr & ~0x3) {
+    /* Global config register */
+    case 0x0000:
+        s->config = val;
+        break;
+    /* DMA transl. table base */
+    case 0x0018:
+        s->dma_tl_base = val;
+        break;
+    /* DMA transl. table limit */
+    case 0x0020:
+        s->dma_tl_limit = val;
+        break;
+    /* DMA transl. table invalidated */
+    case 0x0028:
+        break;
+    /* Cache Maintenance */
+    case 0x0030:
+        s->cache_maint = val;
+        break;
+    /* I/O Cache Physical Tag */
+    case 0x0048:
+        s->cache_ptag = val;
+        break;
+    /* I/O Cache Logical Tag */
+    case 0x0050:
+        s->cache_ltag = val;
+        break;
+    /* I/O Cache Byte Mask */
+    case 0x0058:
+        s->cache_bmask |= val; /* HACK */
+        break;
+    /* I/O Cache Buffer Window */
+    case 0x0060:
+        /* HACK */
+        if (s->cache_ltag == 0x80000001 && s->cache_bmask == 0xf0f0f0f) {
+            hwaddr dest = s->cache_ptag & ~0x1;
+            dest += (s->cache_maint & 0x3) << 3;
+            cpu_physical_memory_write(dest, &val, 4);
+        }
+        break;
+    /* Remote Speed Registers */
+    case 0x0070:
+    case 0x0078:
+    case 0x0080:
+    case 0x0088:
+    case 0x0090:
+    case 0x0098:
+    case 0x00a0:
+    case 0x00a8:
+    case 0x00b0:
+    case 0x00b8:
+    case 0x00c0:
+    case 0x00c8:
+    case 0x00d0:
+    case 0x00d8:
+    case 0x00e0:
+    case 0x00e8:
+        s->rem_speed[(addr - 0x0070) >> 3] = val;
+        break;
+    /* DMA channel base address */
+    case 0x0100:
+    case 0x0108:
+    case 0x0110:
+    case 0x0118:
+    case 0x0120:
+    case 0x0128:
+    case 0x0130:
+    case 0x0138:
+    case 0x0140:
+    case 0x0148:
+    case 0x0150:
+    case 0x0158:
+    case 0x0160:
+    case 0x0168:
+    case 0x0170:
+    case 0x0178:
+    case 0x0180:
+    case 0x0188:
+    case 0x0190:
+    case 0x0198:
+    case 0x01a0:
+    case 0x01a8:
+    case 0x01b0:
+    case 0x01b8:
+    case 0x01c0:
+    case 0x01c8:
+    case 0x01d0:
+    case 0x01d8:
+    case 0x01e0:
+    case 0x01e8:
+    case 0x01f0:
+    case 0x01f8:
+        {
+            int entry = (addr - 0x0100) >> 5;
+            int idx = (addr & 0x1f) >> 3;
+            s->dma_regs[entry][idx] = val;
+        }
+        break;
+    /* Offset 0x0210 */
+    case 0x0210:
+        s->offset210 = val;
+        break;
+    /* Interval timer reload */
+    case 0x0228:
+        s->itr = val;
+        qemu_irq_lower(s->timer_irq);
+        set_next_tick(s);
+        break;
+    /* EISA interrupt */
+    case 0x0238:
+        break;
+    default:
+        RC4030_ERROR("invalid write of 0x%02x at [" TARGET_FMT_plx "]\n", val, addr);
+        break;
+    }
+}
+
+static void rc4030_writew(void *opaque, hwaddr addr, uint32_t val)
+{
+    uint32_t old_val = rc4030_readl(opaque, addr & ~0x3);
+
+    if (addr & 0x2)
+        val = (val << 16) | (old_val & 0x0000ffff);
+    else
+        val = val | (old_val & 0xffff0000);
+    rc4030_writel(opaque, addr & ~0x3, val);
+}
+
+static void rc4030_writeb(void *opaque, hwaddr addr, uint32_t val)
+{
+    uint32_t old_val = rc4030_readl(opaque, addr & ~0x3);
+
+    switch (addr & 3) {
+    case 0:
+        val = val | (old_val & 0xffffff00);
+        break;
+    case 1:
+        val = (val << 8) | (old_val & 0xffff00ff);
+        break;
+    case 2:
+        val = (val << 16) | (old_val & 0xff00ffff);
+        break;
+    case 3:
+        val = (val << 24) | (old_val & 0x00ffffff);
+        break;
+    }
+    rc4030_writel(opaque, addr & ~0x3, val);
+}
+
+static const MemoryRegionOps rc4030_ops = {
+    .old_mmio = {
+        .read = { rc4030_readb, rc4030_readw, rc4030_readl, },
+        .write = { rc4030_writeb, rc4030_writew, rc4030_writel, },
+    },
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void update_jazz_irq(rc4030State *s)
+{
+    uint16_t pending;
+
+    pending = s->isr_jazz & s->imr_jazz;
+
+#ifdef DEBUG_RC4030
+    if (s->isr_jazz != 0) {
+        uint32_t irq = 0;
+        DPRINTF("pending irqs:");
+        for (irq = 0; irq < ARRAY_SIZE(irq_names); irq++) {
+            if (s->isr_jazz & (1 << irq)) {
+                printf(" %s", irq_names[irq]);
+                if (!(s->imr_jazz & (1 << irq))) {
+                    printf("(ignored)");
+                }
+            }
+        }
+        printf("\n");
+    }
+#endif
+
+    if (pending != 0)
+        qemu_irq_raise(s->jazz_bus_irq);
+    else
+        qemu_irq_lower(s->jazz_bus_irq);
+}
+
+static void rc4030_irq_jazz_request(void *opaque, int irq, int level)
+{
+    rc4030State *s = opaque;
+
+    if (level) {
+        s->isr_jazz |= 1 << irq;
+    } else {
+        s->isr_jazz &= ~(1 << irq);
+    }
+
+    update_jazz_irq(s);
+}
+
+static void rc4030_periodic_timer(void *opaque)
+{
+    rc4030State *s = opaque;
+
+    set_next_tick(s);
+    qemu_irq_raise(s->timer_irq);
+}
+
+static uint32_t jazzio_readw(void *opaque, hwaddr addr)
+{
+    rc4030State *s = opaque;
+    uint32_t val;
+    uint32_t irq;
+    addr &= 0xfff;
+
+    switch (addr) {
+    /* Local bus int source */
+    case 0x00: {
+        uint32_t pending = s->isr_jazz & s->imr_jazz;
+        val = 0;
+        irq = 0;
+        while (pending) {
+            if (pending & 1) {
+                DPRINTF("returning irq %s\n", irq_names[irq]);
+                val = (irq + 1) << 2;
+                break;
+            }
+            irq++;
+            pending >>= 1;
+        }
+        break;
+    }
+    /* Local bus int enable mask */
+    case 0x02:
+        val = s->imr_jazz;
+        break;
+    default:
+        RC4030_ERROR("(jazz io controller) invalid read [" TARGET_FMT_plx "]\n", addr);
+        val = 0;
+    }
+
+    DPRINTF("(jazz io controller) read 0x%04x at " TARGET_FMT_plx "\n", val, addr);
+
+    return val;
+}
+
+static uint32_t jazzio_readb(void *opaque, hwaddr addr)
+{
+    uint32_t v;
+    v = jazzio_readw(opaque, addr & ~0x1);
+    return (v >> (8 * (addr & 0x1))) & 0xff;
+}
+
+static uint32_t jazzio_readl(void *opaque, hwaddr addr)
+{
+    uint32_t v;
+    v = jazzio_readw(opaque, addr);
+    v |= jazzio_readw(opaque, addr + 2) << 16;
+    return v;
+}
+
+static void jazzio_writew(void *opaque, hwaddr addr, uint32_t val)
+{
+    rc4030State *s = opaque;
+    addr &= 0xfff;
+
+    DPRINTF("(jazz io controller) write 0x%04x at " TARGET_FMT_plx "\n", val, addr);
+
+    switch (addr) {
+    /* Local bus int enable mask */
+    case 0x02:
+        s->imr_jazz = val;
+        update_jazz_irq(s);
+        break;
+    default:
+        RC4030_ERROR("(jazz io controller) invalid write of 0x%04x at [" TARGET_FMT_plx "]\n", val, addr);
+        break;
+    }
+}
+
+static void jazzio_writeb(void *opaque, hwaddr addr, uint32_t val)
+{
+    uint32_t old_val = jazzio_readw(opaque, addr & ~0x1);
+
+    switch (addr & 1) {
+    case 0:
+        val = val | (old_val & 0xff00);
+        break;
+    case 1:
+        val = (val << 8) | (old_val & 0x00ff);
+        break;
+    }
+    jazzio_writew(opaque, addr & ~0x1, val);
+}
+
+static void jazzio_writel(void *opaque, hwaddr addr, uint32_t val)
+{
+    jazzio_writew(opaque, addr, val & 0xffff);
+    jazzio_writew(opaque, addr + 2, (val >> 16) & 0xffff);
+}
+
+static const MemoryRegionOps jazzio_ops = {
+    .old_mmio = {
+        .read = { jazzio_readb, jazzio_readw, jazzio_readl, },
+        .write = { jazzio_writeb, jazzio_writew, jazzio_writel, },
+    },
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void rc4030_reset(void *opaque)
+{
+    rc4030State *s = opaque;
+    int i;
+
+    s->config = 0x410; /* some boards seem to accept 0x104 too */
+    s->revision = 1;
+    s->invalid_address_register = 0;
+
+    memset(s->dma_regs, 0, sizeof(s->dma_regs));
+    s->dma_tl_base = s->dma_tl_limit = 0;
+
+    s->remote_failed_address = s->memory_failed_address = 0;
+    s->cache_maint = 0;
+    s->cache_ptag = s->cache_ltag = 0;
+    s->cache_bmask = 0;
+
+    s->offset210 = 0x18186;
+    s->nvram_protect = 7;
+    for (i = 0; i < 15; i++)
+        s->rem_speed[i] = 7;
+    s->imr_jazz = 0x10; /* XXX: required by firmware, but why? */
+    s->isr_jazz = 0;
+
+    s->itr = 0;
+
+    qemu_irq_lower(s->timer_irq);
+    qemu_irq_lower(s->jazz_bus_irq);
+}
+
+static int rc4030_load(QEMUFile *f, void *opaque, int version_id)
+{
+    rc4030State* s = opaque;
+    int i, j;
+
+    if (version_id != 2)
+        return -EINVAL;
+
+    s->config = qemu_get_be32(f);
+    s->invalid_address_register = qemu_get_be32(f);
+    for (i = 0; i < 8; i++)
+        for (j = 0; j < 4; j++)
+            s->dma_regs[i][j] = qemu_get_be32(f);
+    s->dma_tl_base = qemu_get_be32(f);
+    s->dma_tl_limit = qemu_get_be32(f);
+    s->cache_maint = qemu_get_be32(f);
+    s->remote_failed_address = qemu_get_be32(f);
+    s->memory_failed_address = qemu_get_be32(f);
+    s->cache_ptag = qemu_get_be32(f);
+    s->cache_ltag = qemu_get_be32(f);
+    s->cache_bmask = qemu_get_be32(f);
+    s->offset210 = qemu_get_be32(f);
+    s->nvram_protect = qemu_get_be32(f);
+    for (i = 0; i < 15; i++)
+        s->rem_speed[i] = qemu_get_be32(f);
+    s->imr_jazz = qemu_get_be32(f);
+    s->isr_jazz = qemu_get_be32(f);
+    s->itr = qemu_get_be32(f);
+
+    set_next_tick(s);
+    update_jazz_irq(s);
+
+    return 0;
+}
+
+static void rc4030_save(QEMUFile *f, void *opaque)
+{
+    rc4030State* s = opaque;
+    int i, j;
+
+    qemu_put_be32(f, s->config);
+    qemu_put_be32(f, s->invalid_address_register);
+    for (i = 0; i < 8; i++)
+        for (j = 0; j < 4; j++)
+            qemu_put_be32(f, s->dma_regs[i][j]);
+    qemu_put_be32(f, s->dma_tl_base);
+    qemu_put_be32(f, s->dma_tl_limit);
+    qemu_put_be32(f, s->cache_maint);
+    qemu_put_be32(f, s->remote_failed_address);
+    qemu_put_be32(f, s->memory_failed_address);
+    qemu_put_be32(f, s->cache_ptag);
+    qemu_put_be32(f, s->cache_ltag);
+    qemu_put_be32(f, s->cache_bmask);
+    qemu_put_be32(f, s->offset210);
+    qemu_put_be32(f, s->nvram_protect);
+    for (i = 0; i < 15; i++)
+        qemu_put_be32(f, s->rem_speed[i]);
+    qemu_put_be32(f, s->imr_jazz);
+    qemu_put_be32(f, s->isr_jazz);
+    qemu_put_be32(f, s->itr);
+}
+
+void rc4030_dma_memory_rw(void *opaque, hwaddr addr, uint8_t *buf, int len, int is_write)
+{
+    rc4030State *s = opaque;
+    hwaddr entry_addr;
+    hwaddr phys_addr;
+    dma_pagetable_entry entry;
+    int index;
+    int ncpy, i;
+
+    i = 0;
+    for (;;) {
+        if (i == len) {
+            break;
+        }
+
+        ncpy = DMA_PAGESIZE - (addr & (DMA_PAGESIZE - 1));
+        if (ncpy > len - i)
+            ncpy = len - i;
+
+        /* Get DMA translation table entry */
+        index = addr / DMA_PAGESIZE;
+        if (index >= s->dma_tl_limit / sizeof(dma_pagetable_entry)) {
+            break;
+        }
+        entry_addr = s->dma_tl_base + index * sizeof(dma_pagetable_entry);
+        /* XXX: not sure. should we really use only lowest bits? */
+        entry_addr &= 0x7fffffff;
+        cpu_physical_memory_read(entry_addr, &entry, sizeof(entry));
+
+        /* Read/write data at right place */
+        phys_addr = entry.frame + (addr & (DMA_PAGESIZE - 1));
+        cpu_physical_memory_rw(phys_addr, &buf[i], ncpy, is_write);
+
+        i += ncpy;
+        addr += ncpy;
+    }
+}
+
+static void rc4030_do_dma(void *opaque, int n, uint8_t *buf, int len, int is_write)
+{
+    rc4030State *s = opaque;
+    hwaddr dma_addr;
+    int dev_to_mem;
+
+    s->dma_regs[n][DMA_REG_ENABLE] &= ~(DMA_FLAG_TC_INTR | DMA_FLAG_MEM_INTR | DMA_FLAG_ADDR_INTR);
+
+    /* Check DMA channel consistency */
+    dev_to_mem = (s->dma_regs[n][DMA_REG_ENABLE] & DMA_FLAG_MEM_TO_DEV) ? 0 : 1;
+    if (!(s->dma_regs[n][DMA_REG_ENABLE] & DMA_FLAG_ENABLE) ||
+        (is_write != dev_to_mem)) {
+        s->dma_regs[n][DMA_REG_ENABLE] |= DMA_FLAG_MEM_INTR;
+        s->nmi_interrupt |= 1 << n;
+        return;
+    }
+
+    /* Get start address and len */
+    if (len > s->dma_regs[n][DMA_REG_COUNT])
+        len = s->dma_regs[n][DMA_REG_COUNT];
+    dma_addr = s->dma_regs[n][DMA_REG_ADDRESS];
+
+    /* Read/write data at right place */
+    rc4030_dma_memory_rw(opaque, dma_addr, buf, len, is_write);
+
+    s->dma_regs[n][DMA_REG_ENABLE] |= DMA_FLAG_TC_INTR;
+    s->dma_regs[n][DMA_REG_COUNT] -= len;
+
+#ifdef DEBUG_RC4030_DMA
+    {
+        int i, j;
+        printf("rc4030 dma: Copying %d bytes %s host %p\n",
+            len, is_write ? "from" : "to", buf);
+        for (i = 0; i < len; i += 16) {
+            int n = 16;
+            if (n > len - i) {
+                n = len - i;
+            }
+            for (j = 0; j < n; j++)
+                printf("%02x ", buf[i + j]);
+            while (j++ < 16)
+                printf("   ");
+            printf("| ");
+            for (j = 0; j < n; j++)
+                printf("%c", isprint(buf[i + j]) ? buf[i + j] : '.');
+            printf("\n");
+        }
+    }
+#endif
+}
+
+struct rc4030DMAState {
+    void *opaque;
+    int n;
+};
+
+void rc4030_dma_read(void *dma, uint8_t *buf, int len)
+{
+    rc4030_dma s = dma;
+    rc4030_do_dma(s->opaque, s->n, buf, len, 0);
+}
+
+void rc4030_dma_write(void *dma, uint8_t *buf, int len)
+{
+    rc4030_dma s = dma;
+    rc4030_do_dma(s->opaque, s->n, buf, len, 1);
+}
+
+static rc4030_dma *rc4030_allocate_dmas(void *opaque, int n)
+{
+    rc4030_dma *s;
+    struct rc4030DMAState *p;
+    int i;
+
+    s = (rc4030_dma *)g_malloc0(sizeof(rc4030_dma) * n);
+    p = (struct rc4030DMAState *)g_malloc0(sizeof(struct rc4030DMAState) * n);
+    for (i = 0; i < n; i++) {
+        p->opaque = opaque;
+        p->n = i;
+        s[i] = p;
+        p++;
+    }
+    return s;
+}
+
+void *rc4030_init(qemu_irq timer, qemu_irq jazz_bus,
+                  qemu_irq **irqs, rc4030_dma **dmas,
+                  MemoryRegion *sysmem)
+{
+    rc4030State *s;
+
+    s = g_malloc0(sizeof(rc4030State));
+
+    *irqs = qemu_allocate_irqs(rc4030_irq_jazz_request, s, 16);
+    *dmas = rc4030_allocate_dmas(s, 4);
+
+    s->periodic_timer = qemu_new_timer_ns(vm_clock, rc4030_periodic_timer, s);
+    s->timer_irq = timer;
+    s->jazz_bus_irq = jazz_bus;
+
+    qemu_register_reset(rc4030_reset, s);
+    register_savevm(NULL, "rc4030", 0, 2, rc4030_save, rc4030_load, s);
+    rc4030_reset(s);
+
+    memory_region_init_io(&s->iomem_chipset, &rc4030_ops, s,
+                          "rc4030.chipset", 0x300);
+    memory_region_add_subregion(sysmem, 0x80000000, &s->iomem_chipset);
+    memory_region_init_io(&s->iomem_jazzio, &jazzio_ops, s,
+                          "rc4030.jazzio", 0x00001000);
+    memory_region_add_subregion(sysmem, 0xf0000000, &s->iomem_jazzio);
+
+    return s;
+}
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
new file mode 100644
index 0000000000..8db1a74acf
--- /dev/null
+++ b/hw/dma/xilinx_axidma.c
@@ -0,0 +1,523 @@
+/*
+ * QEMU model of Xilinx AXI-DMA block.
+ *
+ * Copyright (c) 2011 Edgar E. Iglesias.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "hw/ptimer.h"
+#include "qemu/log.h"
+#include "hw/qdev-addr.h"
+
+#include "hw/stream.h"
+
+#define D(x)
+
+#define R_DMACR             (0x00 / 4)
+#define R_DMASR             (0x04 / 4)
+#define R_CURDESC           (0x08 / 4)
+#define R_TAILDESC          (0x10 / 4)
+#define R_MAX               (0x30 / 4)
+
+enum {
+    DMACR_RUNSTOP = 1,
+    DMACR_TAILPTR_MODE = 2,
+    DMACR_RESET = 4
+};
+
+enum {
+    DMASR_HALTED = 1,
+    DMASR_IDLE  = 2,
+    DMASR_IOC_IRQ  = 1 << 12,
+    DMASR_DLY_IRQ  = 1 << 13,
+
+    DMASR_IRQ_MASK = 7 << 12
+};
+
+struct SDesc {
+    uint64_t nxtdesc;
+    uint64_t buffer_address;
+    uint64_t reserved;
+    uint32_t control;
+    uint32_t status;
+    uint32_t app[6];
+};
+
+enum {
+    SDESC_CTRL_EOF = (1 << 26),
+    SDESC_CTRL_SOF = (1 << 27),
+
+    SDESC_CTRL_LEN_MASK = (1 << 23) - 1
+};
+
+enum {
+    SDESC_STATUS_EOF = (1 << 26),
+    SDESC_STATUS_SOF_BIT = 27,
+    SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
+    SDESC_STATUS_COMPLETE = (1 << 31)
+};
+
+struct Stream {
+    QEMUBH *bh;
+    ptimer_state *ptimer;
+    qemu_irq irq;
+
+    int nr;
+
+    struct SDesc desc;
+    int pos;
+    unsigned int complete_cnt;
+    uint32_t regs[R_MAX];
+};
+
+struct XilinxAXIDMA {
+    SysBusDevice busdev;
+    MemoryRegion iomem;
+    uint32_t freqhz;
+    StreamSlave *tx_dev;
+
+    struct Stream streams[2];
+};
+
+/*
+ * Helper calls to extract info from desriptors and other trivial
+ * state from regs.
+ */
+static inline int stream_desc_sof(struct SDesc *d)
+{
+    return d->control & SDESC_CTRL_SOF;
+}
+
+static inline int stream_desc_eof(struct SDesc *d)
+{
+    return d->control & SDESC_CTRL_EOF;
+}
+
+static inline int stream_resetting(struct Stream *s)
+{
+    return !!(s->regs[R_DMACR] & DMACR_RESET);
+}
+
+static inline int stream_running(struct Stream *s)
+{
+    return s->regs[R_DMACR] & DMACR_RUNSTOP;
+}
+
+static inline int stream_halted(struct Stream *s)
+{
+    return s->regs[R_DMASR] & DMASR_HALTED;
+}
+
+static inline int stream_idle(struct Stream *s)
+{
+    return !!(s->regs[R_DMASR] & DMASR_IDLE);
+}
+
+static void stream_reset(struct Stream *s)
+{
+    s->regs[R_DMASR] = DMASR_HALTED;  /* starts up halted.  */
+    s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold.  */
+}
+
+/* Map an offset addr into a channel index.  */
+static inline int streamid_from_addr(hwaddr addr)
+{
+    int sid;
+
+    sid = addr / (0x30);
+    sid &= 1;
+    return sid;
+}
+
+#ifdef DEBUG_ENET
+static void stream_desc_show(struct SDesc *d)
+{
+    qemu_log("buffer_addr  = " PRIx64 "\n", d->buffer_address);
+    qemu_log("nxtdesc      = " PRIx64 "\n", d->nxtdesc);
+    qemu_log("control      = %x\n", d->control);
+    qemu_log("status       = %x\n", d->status);
+}
+#endif
+
+static void stream_desc_load(struct Stream *s, hwaddr addr)
+{
+    struct SDesc *d = &s->desc;
+    int i;
+
+    cpu_physical_memory_read(addr, (void *) d, sizeof *d);
+
+    /* Convert from LE into host endianness.  */
+    d->buffer_address = le64_to_cpu(d->buffer_address);
+    d->nxtdesc = le64_to_cpu(d->nxtdesc);
+    d->control = le32_to_cpu(d->control);
+    d->status = le32_to_cpu(d->status);
+    for (i = 0; i < ARRAY_SIZE(d->app); i++) {
+        d->app[i] = le32_to_cpu(d->app[i]);
+    }
+}
+
+static void stream_desc_store(struct Stream *s, hwaddr addr)
+{
+    struct SDesc *d = &s->desc;
+    int i;
+
+    /* Convert from host endianness into LE.  */
+    d->buffer_address = cpu_to_le64(d->buffer_address);
+    d->nxtdesc = cpu_to_le64(d->nxtdesc);
+    d->control = cpu_to_le32(d->control);
+    d->status = cpu_to_le32(d->status);
+    for (i = 0; i < ARRAY_SIZE(d->app); i++) {
+        d->app[i] = cpu_to_le32(d->app[i]);
+    }
+    cpu_physical_memory_write(addr, (void *) d, sizeof *d);
+}
+
+static void stream_update_irq(struct Stream *s)
+{
+    unsigned int pending, mask, irq;
+
+    pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
+    mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
+
+    irq = pending & mask;
+
+    qemu_set_irq(s->irq, !!irq);
+}
+
+static void stream_reload_complete_cnt(struct Stream *s)
+{
+    unsigned int comp_th;
+    comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
+    s->complete_cnt = comp_th;
+}
+
+static void timer_hit(void *opaque)
+{
+    struct Stream *s = opaque;
+
+    stream_reload_complete_cnt(s);
+    s->regs[R_DMASR] |= DMASR_DLY_IRQ;
+    stream_update_irq(s);
+}
+
+static void stream_complete(struct Stream *s)
+{
+    unsigned int comp_delay;
+
+    /* Start the delayed timer.  */
+    comp_delay = s->regs[R_DMACR] >> 24;
+    if (comp_delay) {
+        ptimer_stop(s->ptimer);
+        ptimer_set_count(s->ptimer, comp_delay);
+        ptimer_run(s->ptimer, 1);
+    }
+
+    s->complete_cnt--;
+    if (s->complete_cnt == 0) {
+        /* Raise the IOC irq.  */
+        s->regs[R_DMASR] |= DMASR_IOC_IRQ;
+        stream_reload_complete_cnt(s);
+    }
+}
+
+static void stream_process_mem2s(struct Stream *s,
+                                 StreamSlave *tx_dev)
+{
+    uint32_t prev_d;
+    unsigned char txbuf[16 * 1024];
+    unsigned int txlen;
+    uint32_t app[6];
+
+    if (!stream_running(s) || stream_idle(s)) {
+        return;
+    }
+
+    while (1) {
+        stream_desc_load(s, s->regs[R_CURDESC]);
+
+        if (s->desc.status & SDESC_STATUS_COMPLETE) {
+            s->regs[R_DMASR] |= DMASR_IDLE;
+            break;
+        }
+
+        if (stream_desc_sof(&s->desc)) {
+            s->pos = 0;
+            memcpy(app, s->desc.app, sizeof app);
+        }
+
+        txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
+        if ((txlen + s->pos) > sizeof txbuf) {
+            hw_error("%s: too small internal txbuf! %d\n", __func__,
+                     txlen + s->pos);
+        }
+
+        cpu_physical_memory_read(s->desc.buffer_address,
+                                 txbuf + s->pos, txlen);
+        s->pos += txlen;
+
+        if (stream_desc_eof(&s->desc)) {
+            stream_push(tx_dev, txbuf, s->pos, app);
+            s->pos = 0;
+            stream_complete(s);
+        }
+
+        /* Update the descriptor.  */
+        s->desc.status = txlen | SDESC_STATUS_COMPLETE;
+        stream_desc_store(s, s->regs[R_CURDESC]);
+
+        /* Advance.  */
+        prev_d = s->regs[R_CURDESC];
+        s->regs[R_CURDESC] = s->desc.nxtdesc;
+        if (prev_d == s->regs[R_TAILDESC]) {
+            s->regs[R_DMASR] |= DMASR_IDLE;
+            break;
+        }
+    }
+}
+
+static void stream_process_s2mem(struct Stream *s,
+                                 unsigned char *buf, size_t len, uint32_t *app)
+{
+    uint32_t prev_d;
+    unsigned int rxlen;
+    int pos = 0;
+    int sof = 1;
+
+    if (!stream_running(s) || stream_idle(s)) {
+        return;
+    }
+
+    while (len) {
+        stream_desc_load(s, s->regs[R_CURDESC]);
+
+        if (s->desc.status & SDESC_STATUS_COMPLETE) {
+            s->regs[R_DMASR] |= DMASR_IDLE;
+            break;
+        }
+
+        rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
+        if (rxlen > len) {
+            /* It fits.  */
+            rxlen = len;
+        }
+
+        cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
+        len -= rxlen;
+        pos += rxlen;
+
+        /* Update the descriptor.  */
+        if (!len) {
+            int i;
+
+            stream_complete(s);
+            for (i = 0; i < 5; i++) {
+                s->desc.app[i] = app[i];
+            }
+            s->desc.status |= SDESC_STATUS_EOF;
+        }
+
+        s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
+        s->desc.status |= SDESC_STATUS_COMPLETE;
+        stream_desc_store(s, s->regs[R_CURDESC]);
+        sof = 0;
+
+        /* Advance.  */
+        prev_d = s->regs[R_CURDESC];
+        s->regs[R_CURDESC] = s->desc.nxtdesc;
+        if (prev_d == s->regs[R_TAILDESC]) {
+            s->regs[R_DMASR] |= DMASR_IDLE;
+            break;
+        }
+    }
+}
+
+static void
+axidma_push(StreamSlave *obj, unsigned char *buf, size_t len, uint32_t *app)
+{
+    struct XilinxAXIDMA *d = FROM_SYSBUS(typeof(*d), SYS_BUS_DEVICE(obj));
+    struct Stream *s = &d->streams[1];
+
+    if (!app) {
+        hw_error("No stream app data!\n");
+    }
+    stream_process_s2mem(s, buf, len, app);
+    stream_update_irq(s);
+}
+
+static uint64_t axidma_read(void *opaque, hwaddr addr,
+                            unsigned size)
+{
+    struct XilinxAXIDMA *d = opaque;
+    struct Stream *s;
+    uint32_t r = 0;
+    int sid;
+
+    sid = streamid_from_addr(addr);
+    s = &d->streams[sid];
+
+    addr = addr % 0x30;
+    addr >>= 2;
+    switch (addr) {
+        case R_DMACR:
+            /* Simulate one cycles reset delay.  */
+            s->regs[addr] &= ~DMACR_RESET;
+            r = s->regs[addr];
+            break;
+        case R_DMASR:
+            s->regs[addr] &= 0xffff;
+            s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
+            s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
+            r = s->regs[addr];
+            break;
+        default:
+            r = s->regs[addr];
+            D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+                           __func__, sid, addr * 4, r));
+            break;
+    }
+    return r;
+
+}
+
+static void axidma_write(void *opaque, hwaddr addr,
+                         uint64_t value, unsigned size)
+{
+    struct XilinxAXIDMA *d = opaque;
+    struct Stream *s;
+    int sid;
+
+    sid = streamid_from_addr(addr);
+    s = &d->streams[sid];
+
+    addr = addr % 0x30;
+    addr >>= 2;
+    switch (addr) {
+        case R_DMACR:
+            /* Tailptr mode is always on.  */
+            value |= DMACR_TAILPTR_MODE;
+            /* Remember our previous reset state.  */
+            value |= (s->regs[addr] & DMACR_RESET);
+            s->regs[addr] = value;
+
+            if (value & DMACR_RESET) {
+                stream_reset(s);
+            }
+
+            if ((value & 1) && !stream_resetting(s)) {
+                /* Start processing.  */
+                s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
+            }
+            stream_reload_complete_cnt(s);
+            break;
+
+        case R_DMASR:
+            /* Mask away write to clear irq lines.  */
+            value &= ~(value & DMASR_IRQ_MASK);
+            s->regs[addr] = value;
+            break;
+
+        case R_TAILDESC:
+            s->regs[addr] = value;
+            s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle.  */
+            if (!sid) {
+                stream_process_mem2s(s, d->tx_dev);
+            }
+            break;
+        default:
+            D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+                  __func__, sid, addr * 4, (unsigned)value));
+            s->regs[addr] = value;
+            break;
+    }
+    stream_update_irq(s);
+}
+
+static const MemoryRegionOps axidma_ops = {
+    .read = axidma_read,
+    .write = axidma_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int xilinx_axidma_init(SysBusDevice *dev)
+{
+    struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
+    int i;
+
+    sysbus_init_irq(dev, &s->streams[0].irq);
+    sysbus_init_irq(dev, &s->streams[1].irq);
+
+    memory_region_init_io(&s->iomem, &axidma_ops, s,
+                          "xlnx.axi-dma", R_MAX * 4 * 2);
+    sysbus_init_mmio(dev, &s->iomem);
+
+    for (i = 0; i < 2; i++) {
+        stream_reset(&s->streams[i]);
+        s->streams[i].nr = i;
+        s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
+        s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
+        ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
+    }
+    return 0;
+}
+
+static void xilinx_axidma_initfn(Object *obj)
+{
+    struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), SYS_BUS_DEVICE(obj));
+
+    object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
+                             (Object **) &s->tx_dev, NULL);
+}
+
+static Property axidma_properties[] = {
+    DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void axidma_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+    StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
+
+    k->init = xilinx_axidma_init;
+    dc->props = axidma_properties;
+    ssc->push = axidma_push;
+}
+
+static const TypeInfo axidma_info = {
+    .name          = "xlnx.axi-dma",
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(struct XilinxAXIDMA),
+    .class_init    = axidma_class_init,
+    .instance_init = xilinx_axidma_initfn,
+    .interfaces = (InterfaceInfo[]) {
+        { TYPE_STREAM_SLAVE },
+        { }
+    }
+};
+
+static void xilinx_axidma_register_types(void)
+{
+    type_register_static(&axidma_info);
+}
+
+type_init(xilinx_axidma_register_types)