summary refs log tree commit diff stats
path: root/hw/dma
diff options
context:
space:
mode:
Diffstat (limited to 'hw/dma')
-rw-r--r--hw/dma/Makefile.objs6
-rw-r--r--hw/dma/etraxfs_dma.c781
-rw-r--r--hw/dma/omap_dma.c2101
-rw-r--r--hw/dma/pxa2xx_dma.c574
-rw-r--r--hw/dma/soc_dma.c366
-rw-r--r--hw/dma/sparc32_dma.c315
-rw-r--r--hw/dma/sun4m_iommu.c387
7 files changed, 4530 insertions, 0 deletions
diff --git a/hw/dma/Makefile.objs b/hw/dma/Makefile.objs
index bce31cdf87..0e65ed0d74 100644
--- a/hw/dma/Makefile.objs
+++ b/hw/dma/Makefile.objs
@@ -5,3 +5,9 @@ common-obj-$(CONFIG_PL330) += pl330.o
 common-obj-$(CONFIG_I82374) += i82374.o
 common-obj-$(CONFIG_I8257) += i8257.o
 common-obj-$(CONFIG_XILINX_AXI) += xilinx_axidma.o
+common-obj-$(CONFIG_ETRAXFS) += etraxfs_dma.o
+common-obj-$(CONFIG_STP2000) += sparc32_dma.o
+common-obj-$(CONFIG_SUN4M) += sun4m_iommu.o
+
+obj-$(CONFIG_OMAP) += omap_dma.o soc_dma.o
+obj-$(CONFIG_PXA2XX) += pxa2xx_dma.o
diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c
new file mode 100644
index 0000000000..6a8c222502
--- /dev/null
+++ b/hw/dma/etraxfs_dma.c
@@ -0,0 +1,781 @@
+/*
+ * QEMU ETRAX DMA Controller.
+ *
+ * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <sys/time.h>
+#include "hw/hw.h"
+#include "exec/address-spaces.h"
+#include "qemu-common.h"
+#include "sysemu/sysemu.h"
+
+#include "hw/cris/etraxfs_dma.h"
+
+#define D(x)
+
+#define RW_DATA           (0x0 / 4)
+#define RW_SAVED_DATA     (0x58 / 4)
+#define RW_SAVED_DATA_BUF (0x5c / 4)
+#define RW_GROUP          (0x60 / 4)
+#define RW_GROUP_DOWN     (0x7c / 4)
+#define RW_CMD            (0x80 / 4)
+#define RW_CFG            (0x84 / 4)
+#define RW_STAT           (0x88 / 4)
+#define RW_INTR_MASK      (0x8c / 4)
+#define RW_ACK_INTR       (0x90 / 4)
+#define R_INTR            (0x94 / 4)
+#define R_MASKED_INTR     (0x98 / 4)
+#define RW_STREAM_CMD     (0x9c / 4)
+
+#define DMA_REG_MAX       (0x100 / 4)
+
+/* descriptors */
+
+// ------------------------------------------------------------ dma_descr_group
+typedef struct dma_descr_group {
+  uint32_t                      next;
+  unsigned                      eol        : 1;
+  unsigned                      tol        : 1;
+  unsigned                      bol        : 1;
+  unsigned                                 : 1;
+  unsigned                      intr       : 1;
+  unsigned                                 : 2;
+  unsigned                      en         : 1;
+  unsigned                                 : 7;
+  unsigned                      dis        : 1;
+  unsigned                      md         : 16;
+  struct dma_descr_group       *up;
+  union {
+    struct dma_descr_context   *context;
+    struct dma_descr_group     *group;
+  }                             down;
+} dma_descr_group;
+
+// ---------------------------------------------------------- dma_descr_context
+typedef struct dma_descr_context {
+  uint32_t                      next;
+  unsigned                      eol        : 1;
+  unsigned                                 : 3;
+  unsigned                      intr       : 1;
+  unsigned                                 : 1;
+  unsigned                      store_mode : 1;
+  unsigned                      en         : 1;
+  unsigned                                 : 7;
+  unsigned                      dis        : 1;
+  unsigned                      md0        : 16;
+  unsigned                      md1;
+  unsigned                      md2;
+  unsigned                      md3;
+  unsigned                      md4;
+  uint32_t                      saved_data;
+  uint32_t                      saved_data_buf;
+} dma_descr_context;
+
+// ------------------------------------------------------------- dma_descr_data
+typedef struct dma_descr_data {
+  uint32_t                      next;
+  uint32_t                      buf;
+  unsigned                      eol        : 1;
+  unsigned                                 : 2;
+  unsigned                      out_eop    : 1;
+  unsigned                      intr       : 1;
+  unsigned                      wait       : 1;
+  unsigned                                 : 2;
+  unsigned                                 : 3;
+  unsigned                      in_eop     : 1;
+  unsigned                                 : 4;
+  unsigned                      md         : 16;
+  uint32_t                      after;
+} dma_descr_data;
+
+/* Constants */
+enum {
+  regk_dma_ack_pkt                         = 0x00000100,
+  regk_dma_anytime                         = 0x00000001,
+  regk_dma_array                           = 0x00000008,
+  regk_dma_burst                           = 0x00000020,
+  regk_dma_client                          = 0x00000002,
+  regk_dma_copy_next                       = 0x00000010,
+  regk_dma_copy_up                         = 0x00000020,
+  regk_dma_data_at_eol                     = 0x00000001,
+  regk_dma_dis_c                           = 0x00000010,
+  regk_dma_dis_g                           = 0x00000020,
+  regk_dma_idle                            = 0x00000001,
+  regk_dma_intern                          = 0x00000004,
+  regk_dma_load_c                          = 0x00000200,
+  regk_dma_load_c_n                        = 0x00000280,
+  regk_dma_load_c_next                     = 0x00000240,
+  regk_dma_load_d                          = 0x00000140,
+  regk_dma_load_g                          = 0x00000300,
+  regk_dma_load_g_down                     = 0x000003c0,
+  regk_dma_load_g_next                     = 0x00000340,
+  regk_dma_load_g_up                       = 0x00000380,
+  regk_dma_next_en                         = 0x00000010,
+  regk_dma_next_pkt                        = 0x00000010,
+  regk_dma_no                              = 0x00000000,
+  regk_dma_only_at_wait                    = 0x00000000,
+  regk_dma_restore                         = 0x00000020,
+  regk_dma_rst                             = 0x00000001,
+  regk_dma_running                         = 0x00000004,
+  regk_dma_rw_cfg_default                  = 0x00000000,
+  regk_dma_rw_cmd_default                  = 0x00000000,
+  regk_dma_rw_intr_mask_default            = 0x00000000,
+  regk_dma_rw_stat_default                 = 0x00000101,
+  regk_dma_rw_stream_cmd_default           = 0x00000000,
+  regk_dma_save_down                       = 0x00000020,
+  regk_dma_save_up                         = 0x00000020,
+  regk_dma_set_reg                         = 0x00000050,
+  regk_dma_set_w_size1                     = 0x00000190,
+  regk_dma_set_w_size2                     = 0x000001a0,
+  regk_dma_set_w_size4                     = 0x000001c0,
+  regk_dma_stopped                         = 0x00000002,
+  regk_dma_store_c                         = 0x00000002,
+  regk_dma_store_descr                     = 0x00000000,
+  regk_dma_store_g                         = 0x00000004,
+  regk_dma_store_md                        = 0x00000001,
+  regk_dma_sw                              = 0x00000008,
+  regk_dma_update_down                     = 0x00000020,
+  regk_dma_yes                             = 0x00000001
+};
+
+enum dma_ch_state
+{
+	RST = 1,
+	STOPPED = 2,
+	RUNNING = 4
+};
+
+struct fs_dma_channel
+{
+	qemu_irq irq;
+	struct etraxfs_dma_client *client;
+
+	/* Internal status.  */
+	int stream_cmd_src;
+	enum dma_ch_state state;
+
+	unsigned int input : 1;
+	unsigned int eol : 1;
+
+	struct dma_descr_group current_g;
+	struct dma_descr_context current_c;
+	struct dma_descr_data current_d;
+
+	/* Control registers.  */
+	uint32_t regs[DMA_REG_MAX];
+};
+
+struct fs_dma_ctrl
+{
+	MemoryRegion mmio;
+	int nr_channels;
+	struct fs_dma_channel *channels;
+
+        QEMUBH *bh;
+};
+
+static void DMA_run(void *opaque);
+static int channel_out_run(struct fs_dma_ctrl *ctrl, int c);
+
+static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
+{
+	return ctrl->channels[c].regs[reg];
+}
+
+static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
+{
+	return channel_reg(ctrl, c, RW_CFG) & 2;
+}
+
+static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
+{
+	return (channel_reg(ctrl, c, RW_CFG) & 1)
+		&& ctrl->channels[c].client;
+}
+
+static inline int fs_channel(hwaddr addr)
+{
+	/* Every channel has a 0x2000 ctrl register map.  */
+	return addr >> 13;
+}
+
+#ifdef USE_THIS_DEAD_CODE
+static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
+{
+	hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
+
+	/* Load and decode. FIXME: handle endianness.  */
+	cpu_physical_memory_read (addr, 
+				  (void *) &ctrl->channels[c].current_g, 
+				  sizeof ctrl->channels[c].current_g);
+}
+
+static void dump_c(int ch, struct dma_descr_context *c)
+{
+	printf("%s ch=%d\n", __func__, ch);
+	printf("next=%x\n", c->next);
+	printf("saved_data=%x\n", c->saved_data);
+	printf("saved_data_buf=%x\n", c->saved_data_buf);
+	printf("eol=%x\n", (uint32_t) c->eol);
+}
+
+static void dump_d(int ch, struct dma_descr_data *d)
+{
+	printf("%s ch=%d\n", __func__, ch);
+	printf("next=%x\n", d->next);
+	printf("buf=%x\n", d->buf);
+	printf("after=%x\n", d->after);
+	printf("intr=%x\n", (uint32_t) d->intr);
+	printf("out_eop=%x\n", (uint32_t) d->out_eop);
+	printf("in_eop=%x\n", (uint32_t) d->in_eop);
+	printf("eol=%x\n", (uint32_t) d->eol);
+}
+#endif
+
+static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
+{
+	hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
+
+	/* Load and decode. FIXME: handle endianness.  */
+	cpu_physical_memory_read (addr, 
+				  (void *) &ctrl->channels[c].current_c, 
+				  sizeof ctrl->channels[c].current_c);
+
+	D(dump_c(c, &ctrl->channels[c].current_c));
+	/* I guess this should update the current pos.  */
+	ctrl->channels[c].regs[RW_SAVED_DATA] =
+		(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
+	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+		(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
+}
+
+static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
+{
+	hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
+
+	/* Load and decode. FIXME: handle endianness.  */
+	D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+	cpu_physical_memory_read (addr,
+				  (void *) &ctrl->channels[c].current_d, 
+				  sizeof ctrl->channels[c].current_d);
+
+	D(dump_d(c, &ctrl->channels[c].current_d));
+	ctrl->channels[c].regs[RW_DATA] = addr;
+}
+
+static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
+{
+	hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
+
+	/* Encode and store. FIXME: handle endianness.  */
+	D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+	D(dump_d(c, &ctrl->channels[c].current_d));
+	cpu_physical_memory_write (addr,
+				  (void *) &ctrl->channels[c].current_c,
+				  sizeof ctrl->channels[c].current_c);
+}
+
+static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
+{
+	hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
+
+	/* Encode and store. FIXME: handle endianness.  */
+	D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+	cpu_physical_memory_write (addr,
+				  (void *) &ctrl->channels[c].current_d, 
+				  sizeof ctrl->channels[c].current_d);
+}
+
+static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
+{
+	/* FIXME:  */
+}
+
+static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
+{
+	if (ctrl->channels[c].client)
+	{
+		ctrl->channels[c].eol = 0;
+		ctrl->channels[c].state = RUNNING;
+		if (!ctrl->channels[c].input)
+			channel_out_run(ctrl, c);
+	} else
+		printf("WARNING: starting DMA ch %d with no client\n", c);
+
+        qemu_bh_schedule_idle(ctrl->bh);
+}
+
+static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
+{
+	if (!channel_en(ctrl, c) 
+	    || channel_stopped(ctrl, c)
+	    || ctrl->channels[c].state != RUNNING
+	    /* Only reload the current data descriptor if it has eol set.  */
+	    || !ctrl->channels[c].current_d.eol) {
+		D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", 
+			 c, ctrl->channels[c].state,
+			 channel_stopped(ctrl, c),
+			 channel_en(ctrl,c),
+			 ctrl->channels[c].eol));
+		D(dump_d(c, &ctrl->channels[c].current_d));
+		return;
+	}
+
+	/* Reload the current descriptor.  */
+	channel_load_d(ctrl, c);
+
+	/* If the current descriptor cleared the eol flag and we had already
+	   reached eol state, do the continue.  */
+	if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
+		D(printf("continue %d ok %x\n", c,
+			 ctrl->channels[c].current_d.next));
+		ctrl->channels[c].regs[RW_SAVED_DATA] =
+			(uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
+		channel_load_d(ctrl, c);
+		ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+			(uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
+
+		channel_start(ctrl, c);
+	}
+	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+		(uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
+}
+
+static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
+{
+	unsigned int cmd = v & ((1 << 10) - 1);
+
+	D(printf("%s ch=%d cmd=%x\n",
+		 __func__, c, cmd));
+	if (cmd & regk_dma_load_d) {
+		channel_load_d(ctrl, c);
+		if (cmd & regk_dma_burst)
+			channel_start(ctrl, c);
+	}
+
+	if (cmd & regk_dma_load_c) {
+		channel_load_c(ctrl, c);
+	}
+}
+
+static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
+{
+	D(printf("%s %d\n", __func__, c));
+        ctrl->channels[c].regs[R_INTR] &=
+		~(ctrl->channels[c].regs[RW_ACK_INTR]);
+
+        ctrl->channels[c].regs[R_MASKED_INTR] =
+		ctrl->channels[c].regs[R_INTR]
+		& ctrl->channels[c].regs[RW_INTR_MASK];
+
+	D(printf("%s: chan=%d masked_intr=%x\n", __func__, 
+		 c,
+		 ctrl->channels[c].regs[R_MASKED_INTR]));
+
+        qemu_set_irq(ctrl->channels[c].irq,
+		     !!ctrl->channels[c].regs[R_MASKED_INTR]);
+}
+
+static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
+{
+	uint32_t len;
+	uint32_t saved_data_buf;
+	unsigned char buf[2 * 1024];
+
+	struct dma_context_metadata meta;
+	bool send_context = true;
+
+	if (ctrl->channels[c].eol)
+		return 0;
+
+	do {
+		bool out_eop;
+		D(printf("ch=%d buf=%x after=%x\n",
+			 c,
+			 (uint32_t)ctrl->channels[c].current_d.buf,
+			 (uint32_t)ctrl->channels[c].current_d.after));
+
+		if (send_context) {
+			if (ctrl->channels[c].client->client.metadata_push) {
+				meta.metadata = ctrl->channels[c].current_d.md;
+				ctrl->channels[c].client->client.metadata_push(
+					ctrl->channels[c].client->client.opaque,
+					&meta);
+			}
+			send_context = false;
+		}
+
+		channel_load_d(ctrl, c);
+		saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
+		len = (uint32_t)(unsigned long)
+			ctrl->channels[c].current_d.after;
+		len -= saved_data_buf;
+
+		if (len > sizeof buf)
+			len = sizeof buf;
+		cpu_physical_memory_read (saved_data_buf, buf, len);
+
+		out_eop = ((saved_data_buf + len) ==
+		           ctrl->channels[c].current_d.after) &&
+			ctrl->channels[c].current_d.out_eop;
+
+		D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
+		         saved_data_buf, len, out_eop));
+
+		if (ctrl->channels[c].client->client.push)
+			ctrl->channels[c].client->client.push(
+				ctrl->channels[c].client->client.opaque,
+				buf, len, out_eop);
+		else
+			printf("WARNING: DMA ch%d dataloss,"
+			       " no attached client.\n", c);
+
+		saved_data_buf += len;
+
+		if (saved_data_buf == (uint32_t)(unsigned long)
+				ctrl->channels[c].current_d.after) {
+			/* Done. Step to next.  */
+			if (ctrl->channels[c].current_d.out_eop) {
+				send_context = true;
+			}
+			if (ctrl->channels[c].current_d.intr) {
+				/* data intr.  */
+				D(printf("signal intr %d eol=%d\n",
+					len, ctrl->channels[c].current_d.eol));
+				ctrl->channels[c].regs[R_INTR] |= (1 << 2);
+				channel_update_irq(ctrl, c);
+			}
+			channel_store_d(ctrl, c);
+			if (ctrl->channels[c].current_d.eol) {
+				D(printf("channel %d EOL\n", c));
+				ctrl->channels[c].eol = 1;
+
+				/* Mark the context as disabled.  */
+				ctrl->channels[c].current_c.dis = 1;
+				channel_store_c(ctrl, c);
+
+				channel_stop(ctrl, c);
+			} else {
+				ctrl->channels[c].regs[RW_SAVED_DATA] =
+					(uint32_t)(unsigned long)ctrl->
+						channels[c].current_d.next;
+				/* Load new descriptor.  */
+				channel_load_d(ctrl, c);
+				saved_data_buf = (uint32_t)(unsigned long)
+					ctrl->channels[c].current_d.buf;
+			}
+
+			ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+							saved_data_buf;
+			D(dump_d(c, &ctrl->channels[c].current_d));
+		}
+		ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
+	} while (!ctrl->channels[c].eol);
+	return 1;
+}
+
+static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, 
+			      unsigned char *buf, int buflen, int eop)
+{
+	uint32_t len;
+	uint32_t saved_data_buf;
+
+	if (ctrl->channels[c].eol == 1)
+		return 0;
+
+	channel_load_d(ctrl, c);
+	saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
+	len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
+	len -= saved_data_buf;
+	
+	if (len > buflen)
+		len = buflen;
+
+	cpu_physical_memory_write (saved_data_buf, buf, len);
+	saved_data_buf += len;
+
+	if (saved_data_buf ==
+	    (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
+	    || eop) {
+		uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
+
+		D(printf("in dscr end len=%d\n", 
+			 ctrl->channels[c].current_d.after
+			 - ctrl->channels[c].current_d.buf));
+		ctrl->channels[c].current_d.after = saved_data_buf;
+
+		/* Done. Step to next.  */
+		if (ctrl->channels[c].current_d.intr) {
+			/* TODO: signal eop to the client.  */
+			/* data intr.  */
+			ctrl->channels[c].regs[R_INTR] |= 3;
+		}
+		if (eop) {
+			ctrl->channels[c].current_d.in_eop = 1;
+			ctrl->channels[c].regs[R_INTR] |= 8;
+		}
+		if (r_intr != ctrl->channels[c].regs[R_INTR])
+			channel_update_irq(ctrl, c);
+
+		channel_store_d(ctrl, c);
+		D(dump_d(c, &ctrl->channels[c].current_d));
+
+		if (ctrl->channels[c].current_d.eol) {
+			D(printf("channel %d EOL\n", c));
+			ctrl->channels[c].eol = 1;
+
+			/* Mark the context as disabled.  */
+			ctrl->channels[c].current_c.dis = 1;
+			channel_store_c(ctrl, c);
+
+			channel_stop(ctrl, c);
+		} else {
+			ctrl->channels[c].regs[RW_SAVED_DATA] =
+				(uint32_t)(unsigned long)ctrl->
+					channels[c].current_d.next;
+			/* Load new descriptor.  */
+			channel_load_d(ctrl, c);
+			saved_data_buf = (uint32_t)(unsigned long)
+				ctrl->channels[c].current_d.buf;
+		}
+	}
+
+	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
+	return len;
+}
+
+static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
+{
+	if (ctrl->channels[c].client->client.pull) {
+		ctrl->channels[c].client->client.pull(
+			ctrl->channels[c].client->client.opaque);
+		return 1;
+	} else
+		return 0;
+}
+
+static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
+{
+        hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr);
+        return 0;
+}
+
+static uint64_t
+dma_read(void *opaque, hwaddr addr, unsigned int size)
+{
+        struct fs_dma_ctrl *ctrl = opaque;
+	int c;
+	uint32_t r = 0;
+
+	if (size != 4) {
+		dma_rinvalid(opaque, addr);
+	}
+
+	/* Make addr relative to this channel and bounded to nr regs.  */
+	c = fs_channel(addr);
+	addr &= 0xff;
+	addr >>= 2;
+	switch (addr)
+	{
+		case RW_STAT:
+			r = ctrl->channels[c].state & 7;
+			r |= ctrl->channels[c].eol << 5;
+			r |= ctrl->channels[c].stream_cmd_src << 8;
+			break;
+
+		default:
+			r = ctrl->channels[c].regs[addr];
+			D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n",
+				  __func__, c, addr));
+			break;
+	}
+	return r;
+}
+
+static void
+dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
+{
+        hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr);
+}
+
+static void
+dma_update_state(struct fs_dma_ctrl *ctrl, int c)
+{
+	if (ctrl->channels[c].regs[RW_CFG] & 2)
+		ctrl->channels[c].state = STOPPED;
+	if (!(ctrl->channels[c].regs[RW_CFG] & 1))
+		ctrl->channels[c].state = RST;
+}
+
+static void
+dma_write(void *opaque, hwaddr addr,
+	  uint64_t val64, unsigned int size)
+{
+        struct fs_dma_ctrl *ctrl = opaque;
+	uint32_t value = val64;
+	int c;
+
+	if (size != 4) {
+		dma_winvalid(opaque, addr, value);
+	}
+
+        /* Make addr relative to this channel and bounded to nr regs.  */
+	c = fs_channel(addr);
+        addr &= 0xff;
+        addr >>= 2;
+        switch (addr)
+	{
+		case RW_DATA:
+			ctrl->channels[c].regs[addr] = value;
+			break;
+
+		case RW_CFG:
+			ctrl->channels[c].regs[addr] = value;
+			dma_update_state(ctrl, c);
+			break;
+		case RW_CMD:
+			/* continue.  */
+			if (value & ~1)
+				printf("Invalid store to ch=%d RW_CMD %x\n",
+				       c, value);
+			ctrl->channels[c].regs[addr] = value;
+			channel_continue(ctrl, c);
+			break;
+
+		case RW_SAVED_DATA:
+		case RW_SAVED_DATA_BUF:
+		case RW_GROUP:
+		case RW_GROUP_DOWN:
+			ctrl->channels[c].regs[addr] = value;
+			break;
+
+		case RW_ACK_INTR:
+		case RW_INTR_MASK:
+			ctrl->channels[c].regs[addr] = value;
+			channel_update_irq(ctrl, c);
+			if (addr == RW_ACK_INTR)
+				ctrl->channels[c].regs[RW_ACK_INTR] = 0;
+			break;
+
+		case RW_STREAM_CMD:
+			if (value & ~1023)
+				printf("Invalid store to ch=%d "
+				       "RW_STREAMCMD %x\n",
+				       c, value);
+			ctrl->channels[c].regs[addr] = value;
+			D(printf("stream_cmd ch=%d\n", c));
+			channel_stream_cmd(ctrl, c, value);
+			break;
+
+	        default:
+			D(printf ("%s c=%d " TARGET_FMT_plx "\n",
+				__func__, c, addr));
+			break;
+        }
+}
+
+static const MemoryRegionOps dma_ops = {
+	.read = dma_read,
+	.write = dma_write,
+	.endianness = DEVICE_NATIVE_ENDIAN,
+	.valid = {
+		.min_access_size = 1,
+		.max_access_size = 4
+	}
+};
+
+static int etraxfs_dmac_run(void *opaque)
+{
+	struct fs_dma_ctrl *ctrl = opaque;
+	int i;
+	int p = 0;
+
+	for (i = 0; 
+	     i < ctrl->nr_channels;
+	     i++)
+	{
+		if (ctrl->channels[i].state == RUNNING)
+		{
+			if (ctrl->channels[i].input) {
+				p += channel_in_run(ctrl, i);
+			} else {
+				p += channel_out_run(ctrl, i);
+			}
+		}
+	}
+	return p;
+}
+
+int etraxfs_dmac_input(struct etraxfs_dma_client *client, 
+		       void *buf, int len, int eop)
+{
+	return channel_in_process(client->ctrl, client->channel, 
+				  buf, len, eop);
+}
+
+/* Connect an IRQ line with a channel.  */
+void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
+{
+	struct fs_dma_ctrl *ctrl = opaque;
+	ctrl->channels[c].irq = *line;
+	ctrl->channels[c].input = input;
+}
+
+void etraxfs_dmac_connect_client(void *opaque, int c, 
+				 struct etraxfs_dma_client *cl)
+{
+	struct fs_dma_ctrl *ctrl = opaque;
+	cl->ctrl = ctrl;
+	cl->channel = c;
+	ctrl->channels[c].client = cl;
+}
+
+
+static void DMA_run(void *opaque)
+{
+    struct fs_dma_ctrl *etraxfs_dmac = opaque;
+    int p = 1;
+
+    if (runstate_is_running())
+        p = etraxfs_dmac_run(etraxfs_dmac);
+
+    if (p)
+        qemu_bh_schedule_idle(etraxfs_dmac->bh);
+}
+
+void *etraxfs_dmac_init(hwaddr base, int nr_channels)
+{
+	struct fs_dma_ctrl *ctrl = NULL;
+
+	ctrl = g_malloc0(sizeof *ctrl);
+
+        ctrl->bh = qemu_bh_new(DMA_run, ctrl);
+
+	ctrl->nr_channels = nr_channels;
+	ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
+
+	memory_region_init_io(&ctrl->mmio, &dma_ops, ctrl, "etraxfs-dma",
+			      nr_channels * 0x2000);
+	memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
+
+	return ctrl;
+}
diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c
new file mode 100644
index 0000000000..184fcee1a7
--- /dev/null
+++ b/hw/dma/omap_dma.c
@@ -0,0 +1,2101 @@
+/*
+ * TI OMAP DMA gigacell.
+ *
+ * Copyright (C) 2006-2008 Andrzej Zaborowski  <balrog@zabor.org>
+ * Copyright (C) 2007-2008 Lauro Ramos Venancio  <lauro.venancio@indt.org.br>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "hw/arm/omap.h"
+#include "hw/irq.h"
+#include "hw/arm/soc_dma.h"
+
+struct omap_dma_channel_s {
+    /* transfer data */
+    int burst[2];
+    int pack[2];
+    int endian[2];
+    int endian_lock[2];
+    int translate[2];
+    enum omap_dma_port port[2];
+    hwaddr addr[2];
+    omap_dma_addressing_t mode[2];
+    uint32_t elements;
+    uint16_t frames;
+    int32_t frame_index[2];
+    int16_t element_index[2];
+    int data_type;
+
+    /* transfer type */
+    int transparent_copy;
+    int constant_fill;
+    uint32_t color;
+    int prefetch;
+
+    /* auto init and linked channel data */
+    int end_prog;
+    int repeat;
+    int auto_init;
+    int link_enabled;
+    int link_next_ch;
+
+    /* interruption data */
+    int interrupts;
+    int status;
+    int cstatus;
+
+    /* state data */
+    int active;
+    int enable;
+    int sync;
+    int src_sync;
+    int pending_request;
+    int waiting_end_prog;
+    uint16_t cpc;
+    int set_update;
+
+    /* sync type */
+    int fs;
+    int bs;
+
+    /* compatibility */
+    int omap_3_1_compatible_disable;
+
+    qemu_irq irq;
+    struct omap_dma_channel_s *sibling;
+
+    struct omap_dma_reg_set_s {
+        hwaddr src, dest;
+        int frame;
+        int element;
+        int pck_element;
+        int frame_delta[2];
+        int elem_delta[2];
+        int frames;
+        int elements;
+        int pck_elements;
+    } active_set;
+
+    struct soc_dma_ch_s *dma;
+
+    /* unused parameters */
+    int write_mode;
+    int priority;
+    int interleave_disabled;
+    int type;
+    int suspend;
+    int buf_disable;
+};
+
+struct omap_dma_s {
+    struct soc_dma_s *dma;
+    MemoryRegion iomem;
+
+    struct omap_mpu_state_s *mpu;
+    omap_clk clk;
+    qemu_irq irq[4];
+    void (*intr_update)(struct omap_dma_s *s);
+    enum omap_dma_model model;
+    int omap_3_1_mapping_disabled;
+
+    uint32_t gcr;
+    uint32_t ocp;
+    uint32_t caps[5];
+    uint32_t irqen[4];
+    uint32_t irqstat[4];
+
+    int chans;
+    struct omap_dma_channel_s ch[32];
+    struct omap_dma_lcd_channel_s lcd_ch;
+};
+
+/* Interrupts */
+#define TIMEOUT_INTR    (1 << 0)
+#define EVENT_DROP_INTR (1 << 1)
+#define HALF_FRAME_INTR (1 << 2)
+#define END_FRAME_INTR  (1 << 3)
+#define LAST_FRAME_INTR (1 << 4)
+#define END_BLOCK_INTR  (1 << 5)
+#define SYNC            (1 << 6)
+#define END_PKT_INTR	(1 << 7)
+#define TRANS_ERR_INTR	(1 << 8)
+#define MISALIGN_INTR	(1 << 11)
+
+static inline void omap_dma_interrupts_update(struct omap_dma_s *s)
+{
+    return s->intr_update(s);
+}
+
+static void omap_dma_channel_load(struct omap_dma_channel_s *ch)
+{
+    struct omap_dma_reg_set_s *a = &ch->active_set;
+    int i, normal;
+    int omap_3_1 = !ch->omap_3_1_compatible_disable;
+
+    /*
+     * TODO: verify address ranges and alignment
+     * TODO: port endianness
+     */
+
+    a->src = ch->addr[0];
+    a->dest = ch->addr[1];
+    a->frames = ch->frames;
+    a->elements = ch->elements;
+    a->pck_elements = ch->frame_index[!ch->src_sync];
+    a->frame = 0;
+    a->element = 0;
+    a->pck_element = 0;
+
+    if (unlikely(!ch->elements || !ch->frames)) {
+        printf("%s: bad DMA request\n", __FUNCTION__);
+        return;
+    }
+
+    for (i = 0; i < 2; i ++)
+        switch (ch->mode[i]) {
+        case constant:
+            a->elem_delta[i] = 0;
+            a->frame_delta[i] = 0;
+            break;
+        case post_incremented:
+            a->elem_delta[i] = ch->data_type;
+            a->frame_delta[i] = 0;
+            break;
+        case single_index:
+            a->elem_delta[i] = ch->data_type +
+                    ch->element_index[omap_3_1 ? 0 : i] - 1;
+            a->frame_delta[i] = 0;
+            break;
+        case double_index:
+            a->elem_delta[i] = ch->data_type +
+                    ch->element_index[omap_3_1 ? 0 : i] - 1;
+            a->frame_delta[i] = ch->frame_index[omap_3_1 ? 0 : i] -
+                    ch->element_index[omap_3_1 ? 0 : i];
+            break;
+        default:
+            break;
+        }
+
+    normal = !ch->transparent_copy && !ch->constant_fill &&
+            /* FIFO is big-endian so either (ch->endian[n] == 1) OR
+             * (ch->endian_lock[n] == 1) mean no endianism conversion.  */
+            (ch->endian[0] | ch->endian_lock[0]) ==
+            (ch->endian[1] | ch->endian_lock[1]);
+    for (i = 0; i < 2; i ++) {
+        /* TODO: for a->frame_delta[i] > 0 still use the fast path, just
+         * limit min_elems in omap_dma_transfer_setup to the nearest frame
+         * end.  */
+        if (!a->elem_delta[i] && normal &&
+                        (a->frames == 1 || !a->frame_delta[i]))
+            ch->dma->type[i] = soc_dma_access_const;
+        else if (a->elem_delta[i] == ch->data_type && normal &&
+                        (a->frames == 1 || !a->frame_delta[i]))
+            ch->dma->type[i] = soc_dma_access_linear;
+        else
+            ch->dma->type[i] = soc_dma_access_other;
+
+        ch->dma->vaddr[i] = ch->addr[i];
+    }
+    soc_dma_ch_update(ch->dma);
+}
+
+static void omap_dma_activate_channel(struct omap_dma_s *s,
+                struct omap_dma_channel_s *ch)
+{
+    if (!ch->active) {
+        if (ch->set_update) {
+            /* It's not clear when the active set is supposed to be
+             * loaded from registers.  We're already loading it when the
+             * channel is enabled, and for some guests this is not enough
+             * but that may be also because of a race condition (no
+             * delays in qemu) in the guest code, which we're just
+             * working around here.  */
+            omap_dma_channel_load(ch);
+            ch->set_update = 0;
+        }
+
+        ch->active = 1;
+        soc_dma_set_request(ch->dma, 1);
+        if (ch->sync)
+            ch->status |= SYNC;
+    }
+}
+
+static void omap_dma_deactivate_channel(struct omap_dma_s *s,
+                struct omap_dma_channel_s *ch)
+{
+    /* Update cpc */
+    ch->cpc = ch->active_set.dest & 0xffff;
+
+    if (ch->pending_request && !ch->waiting_end_prog && ch->enable) {
+        /* Don't deactivate the channel */
+        ch->pending_request = 0;
+        return;
+    }
+
+    /* Don't deactive the channel if it is synchronized and the DMA request is
+       active */
+    if (ch->sync && ch->enable && (s->dma->drqbmp & (1 << ch->sync)))
+        return;
+
+    if (ch->active) {
+        ch->active = 0;
+        ch->status &= ~SYNC;
+        soc_dma_set_request(ch->dma, 0);
+    }
+}
+
+static void omap_dma_enable_channel(struct omap_dma_s *s,
+                struct omap_dma_channel_s *ch)
+{
+    if (!ch->enable) {
+        ch->enable = 1;
+        ch->waiting_end_prog = 0;
+        omap_dma_channel_load(ch);
+        /* TODO: theoretically if ch->sync && ch->prefetch &&
+         * !s->dma->drqbmp[ch->sync], we should also activate and fetch
+         * from source and then stall until signalled.  */
+        if ((!ch->sync) || (s->dma->drqbmp & (1 << ch->sync)))
+            omap_dma_activate_channel(s, ch);
+    }
+}
+
+static void omap_dma_disable_channel(struct omap_dma_s *s,
+                struct omap_dma_channel_s *ch)
+{
+    if (ch->enable) {
+        ch->enable = 0;
+        /* Discard any pending request */
+        ch->pending_request = 0;
+        omap_dma_deactivate_channel(s, ch);
+    }
+}
+
+static void omap_dma_channel_end_prog(struct omap_dma_s *s,
+                struct omap_dma_channel_s *ch)
+{
+    if (ch->waiting_end_prog) {
+        ch->waiting_end_prog = 0;
+        if (!ch->sync || ch->pending_request) {
+            ch->pending_request = 0;
+            omap_dma_activate_channel(s, ch);
+        }
+    }
+}
+
+static void omap_dma_interrupts_3_1_update(struct omap_dma_s *s)
+{
+    struct omap_dma_channel_s *ch = s->ch;
+
+    /* First three interrupts are shared between two channels each. */
+    if (ch[0].status | ch[6].status)
+        qemu_irq_raise(ch[0].irq);
+    if (ch[1].status | ch[7].status)
+        qemu_irq_raise(ch[1].irq);
+    if (ch[2].status | ch[8].status)
+        qemu_irq_raise(ch[2].irq);
+    if (ch[3].status)
+        qemu_irq_raise(ch[3].irq);
+    if (ch[4].status)
+        qemu_irq_raise(ch[4].irq);
+    if (ch[5].status)
+        qemu_irq_raise(ch[5].irq);
+}
+
+static void omap_dma_interrupts_3_2_update(struct omap_dma_s *s)
+{
+    struct omap_dma_channel_s *ch = s->ch;
+    int i;
+
+    for (i = s->chans; i; ch ++, i --)
+        if (ch->status)
+            qemu_irq_raise(ch->irq);
+}
+
+static void omap_dma_enable_3_1_mapping(struct omap_dma_s *s)
+{
+    s->omap_3_1_mapping_disabled = 0;
+    s->chans = 9;
+    s->intr_update = omap_dma_interrupts_3_1_update;
+}
+
+static void omap_dma_disable_3_1_mapping(struct omap_dma_s *s)
+{
+    s->omap_3_1_mapping_disabled = 1;
+    s->chans = 16;
+    s->intr_update = omap_dma_interrupts_3_2_update;
+}
+
+static void omap_dma_process_request(struct omap_dma_s *s, int request)
+{
+    int channel;
+    int drop_event = 0;
+    struct omap_dma_channel_s *ch = s->ch;
+
+    for (channel = 0; channel < s->chans; channel ++, ch ++) {
+        if (ch->enable && ch->sync == request) {
+            if (!ch->active)
+                omap_dma_activate_channel(s, ch);
+            else if (!ch->pending_request)
+                ch->pending_request = 1;
+            else {
+                /* Request collision */
+                /* Second request received while processing other request */
+                ch->status |= EVENT_DROP_INTR;
+                drop_event = 1;
+            }
+        }
+    }
+
+    if (drop_event)
+        omap_dma_interrupts_update(s);
+}
+
+static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma)
+{
+    uint8_t value[4];
+    struct omap_dma_channel_s *ch = dma->opaque;
+    struct omap_dma_reg_set_s *a = &ch->active_set;
+    int bytes = dma->bytes;
+#ifdef MULTI_REQ
+    uint16_t status = ch->status;
+#endif
+
+    do {
+        /* Transfer a single element */
+        /* FIXME: check the endianness */
+        if (!ch->constant_fill)
+            cpu_physical_memory_read(a->src, value, ch->data_type);
+        else
+            *(uint32_t *) value = ch->color;
+
+        if (!ch->transparent_copy || *(uint32_t *) value != ch->color)
+            cpu_physical_memory_write(a->dest, value, ch->data_type);
+
+        a->src += a->elem_delta[0];
+        a->dest += a->elem_delta[1];
+        a->element ++;
+
+#ifndef MULTI_REQ
+        if (a->element == a->elements) {
+            /* End of Frame */
+            a->element = 0;
+            a->src += a->frame_delta[0];
+            a->dest += a->frame_delta[1];
+            a->frame ++;
+
+            /* If the channel is async, update cpc */
+            if (!ch->sync)
+                ch->cpc = a->dest & 0xffff;
+        }
+    } while ((bytes -= ch->data_type));
+#else
+        /* If the channel is element synchronized, deactivate it */
+        if (ch->sync && !ch->fs && !ch->bs)
+            omap_dma_deactivate_channel(s, ch);
+
+        /* If it is the last frame, set the LAST_FRAME interrupt */
+        if (a->element == 1 && a->frame == a->frames - 1)
+            if (ch->interrupts & LAST_FRAME_INTR)
+                ch->status |= LAST_FRAME_INTR;
+
+        /* If the half of the frame was reached, set the HALF_FRAME
+           interrupt */
+        if (a->element == (a->elements >> 1))
+            if (ch->interrupts & HALF_FRAME_INTR)
+                ch->status |= HALF_FRAME_INTR;
+
+        if (ch->fs && ch->bs) {
+            a->pck_element ++;
+            /* Check if a full packet has beed transferred.  */
+            if (a->pck_element == a->pck_elements) {
+                a->pck_element = 0;
+
+                /* Set the END_PKT interrupt */
+                if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync)
+                    ch->status |= END_PKT_INTR;
+
+                /* If the channel is packet-synchronized, deactivate it */
+                if (ch->sync)
+                    omap_dma_deactivate_channel(s, ch);
+            }
+        }
+
+        if (a->element == a->elements) {
+            /* End of Frame */
+            a->element = 0;
+            a->src += a->frame_delta[0];
+            a->dest += a->frame_delta[1];
+            a->frame ++;
+
+            /* If the channel is frame synchronized, deactivate it */
+            if (ch->sync && ch->fs && !ch->bs)
+                omap_dma_deactivate_channel(s, ch);
+
+            /* If the channel is async, update cpc */
+            if (!ch->sync)
+                ch->cpc = a->dest & 0xffff;
+
+            /* Set the END_FRAME interrupt */
+            if (ch->interrupts & END_FRAME_INTR)
+                ch->status |= END_FRAME_INTR;
+
+            if (a->frame == a->frames) {
+                /* End of Block */
+                /* Disable the channel */
+
+                if (ch->omap_3_1_compatible_disable) {
+                    omap_dma_disable_channel(s, ch);
+                    if (ch->link_enabled)
+                        omap_dma_enable_channel(s,
+                                        &s->ch[ch->link_next_ch]);
+                } else {
+                    if (!ch->auto_init)
+                        omap_dma_disable_channel(s, ch);
+                    else if (ch->repeat || ch->end_prog)
+                        omap_dma_channel_load(ch);
+                    else {
+                        ch->waiting_end_prog = 1;
+                        omap_dma_deactivate_channel(s, ch);
+                    }
+                }
+
+                if (ch->interrupts & END_BLOCK_INTR)
+                    ch->status |= END_BLOCK_INTR;
+            }
+        }
+    } while (status == ch->status && ch->active);
+
+    omap_dma_interrupts_update(s);
+#endif
+}
+
+enum {
+    omap_dma_intr_element_sync,
+    omap_dma_intr_last_frame,
+    omap_dma_intr_half_frame,
+    omap_dma_intr_frame,
+    omap_dma_intr_frame_sync,
+    omap_dma_intr_packet,
+    omap_dma_intr_packet_sync,
+    omap_dma_intr_block,
+    __omap_dma_intr_last,
+};
+
+static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma)
+{
+    struct omap_dma_port_if_s *src_p, *dest_p;
+    struct omap_dma_reg_set_s *a;
+    struct omap_dma_channel_s *ch = dma->opaque;
+    struct omap_dma_s *s = dma->dma->opaque;
+    int frames, min_elems, elements[__omap_dma_intr_last];
+
+    a = &ch->active_set;
+
+    src_p = &s->mpu->port[ch->port[0]];
+    dest_p = &s->mpu->port[ch->port[1]];
+    if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) ||
+                    (!dest_p->addr_valid(s->mpu, a->dest))) {
+#if 0
+        /* Bus time-out */
+        if (ch->interrupts & TIMEOUT_INTR)
+            ch->status |= TIMEOUT_INTR;
+        omap_dma_deactivate_channel(s, ch);
+        continue;
+#endif
+        printf("%s: Bus time-out in DMA%i operation\n",
+                        __FUNCTION__, dma->num);
+    }
+
+    min_elems = INT_MAX;
+
+    /* Check all the conditions that terminate the transfer starting
+     * with those that can occur the soonest.  */
+#define INTR_CHECK(cond, id, nelements)	\
+    if (cond) {			\
+        elements[id] = nelements;	\
+        if (elements[id] < min_elems)	\
+            min_elems = elements[id];	\
+    } else				\
+        elements[id] = INT_MAX;
+
+    /* Elements */
+    INTR_CHECK(
+                    ch->sync && !ch->fs && !ch->bs,
+                    omap_dma_intr_element_sync,
+                    1)
+
+    /* Frames */
+    /* TODO: for transfers where entire frames can be read and written
+     * using memcpy() but a->frame_delta is non-zero, try to still do
+     * transfers using soc_dma but limit min_elems to a->elements - ...
+     * See also the TODO in omap_dma_channel_load.  */
+    INTR_CHECK(
+                    (ch->interrupts & LAST_FRAME_INTR) &&
+                    ((a->frame < a->frames - 1) || !a->element),
+                    omap_dma_intr_last_frame,
+                    (a->frames - a->frame - 2) * a->elements +
+                    (a->elements - a->element + 1))
+    INTR_CHECK(
+                    ch->interrupts & HALF_FRAME_INTR,
+                    omap_dma_intr_half_frame,
+                    (a->elements >> 1) +
+                    (a->element >= (a->elements >> 1) ? a->elements : 0) -
+                    a->element)
+    INTR_CHECK(
+                    ch->sync && ch->fs && (ch->interrupts & END_FRAME_INTR),
+                    omap_dma_intr_frame,
+                    a->elements - a->element)
+    INTR_CHECK(
+                    ch->sync && ch->fs && !ch->bs,
+                    omap_dma_intr_frame_sync,
+                    a->elements - a->element)
+
+    /* Packets */
+    INTR_CHECK(
+                    ch->fs && ch->bs &&
+                    (ch->interrupts & END_PKT_INTR) && !ch->src_sync,
+                    omap_dma_intr_packet,
+                    a->pck_elements - a->pck_element)
+    INTR_CHECK(
+                    ch->fs && ch->bs && ch->sync,
+                    omap_dma_intr_packet_sync,
+                    a->pck_elements - a->pck_element)
+
+    /* Blocks */
+    INTR_CHECK(
+                    1,
+                    omap_dma_intr_block,
+                    (a->frames - a->frame - 1) * a->elements +
+                    (a->elements - a->element))
+
+    dma->bytes = min_elems * ch->data_type;
+
+    /* Set appropriate interrupts and/or deactivate channels */
+
+#ifdef MULTI_REQ
+    /* TODO: should all of this only be done if dma->update, and otherwise
+     * inside omap_dma_transfer_generic below - check what's faster.  */
+    if (dma->update) {
+#endif
+
+        /* If the channel is element synchronized, deactivate it */
+        if (min_elems == elements[omap_dma_intr_element_sync])
+            omap_dma_deactivate_channel(s, ch);
+
+        /* If it is the last frame, set the LAST_FRAME interrupt */
+        if (min_elems == elements[omap_dma_intr_last_frame])
+            ch->status |= LAST_FRAME_INTR;
+
+        /* If exactly half of the frame was reached, set the HALF_FRAME
+           interrupt */
+        if (min_elems == elements[omap_dma_intr_half_frame])
+            ch->status |= HALF_FRAME_INTR;
+
+        /* If a full packet has been transferred, set the END_PKT interrupt */
+        if (min_elems == elements[omap_dma_intr_packet])
+            ch->status |= END_PKT_INTR;
+
+        /* If the channel is packet-synchronized, deactivate it */
+        if (min_elems == elements[omap_dma_intr_packet_sync])
+            omap_dma_deactivate_channel(s, ch);
+
+        /* If the channel is frame synchronized, deactivate it */
+        if (min_elems == elements[omap_dma_intr_frame_sync])
+            omap_dma_deactivate_channel(s, ch);
+
+        /* Set the END_FRAME interrupt */
+        if (min_elems == elements[omap_dma_intr_frame])
+            ch->status |= END_FRAME_INTR;
+
+        if (min_elems == elements[omap_dma_intr_block]) {
+            /* End of Block */
+            /* Disable the channel */
+
+            if (ch->omap_3_1_compatible_disable) {
+                omap_dma_disable_channel(s, ch);
+                if (ch->link_enabled)
+                    omap_dma_enable_channel(s, &s->ch[ch->link_next_ch]);
+            } else {
+                if (!ch->auto_init)
+                    omap_dma_disable_channel(s, ch);
+                else if (ch->repeat || ch->end_prog)
+                    omap_dma_channel_load(ch);
+                else {
+                    ch->waiting_end_prog = 1;
+                    omap_dma_deactivate_channel(s, ch);
+                }
+            }
+
+            if (ch->interrupts & END_BLOCK_INTR)
+                ch->status |= END_BLOCK_INTR;
+        }
+
+        /* Update packet number */
+        if (ch->fs && ch->bs) {
+            a->pck_element += min_elems;
+            a->pck_element %= a->pck_elements;
+        }
+
+        /* TODO: check if we really need to update anything here or perhaps we
+         * can skip part of this.  */
+#ifndef MULTI_REQ
+        if (dma->update) {
+#endif
+            a->element += min_elems;
+
+            frames = a->element / a->elements;
+            a->element = a->element % a->elements;
+            a->frame += frames;
+            a->src += min_elems * a->elem_delta[0] + frames * a->frame_delta[0];
+            a->dest += min_elems * a->elem_delta[1] + frames * a->frame_delta[1];
+
+            /* If the channel is async, update cpc */
+            if (!ch->sync && frames)
+                ch->cpc = a->dest & 0xffff;
+
+            /* TODO: if the destination port is IMIF or EMIFF, set the dirty
+             * bits on it.  */
+#ifndef MULTI_REQ
+        }
+#else
+    }
+#endif
+
+    omap_dma_interrupts_update(s);
+}
+
+void omap_dma_reset(struct soc_dma_s *dma)
+{
+    int i;
+    struct omap_dma_s *s = dma->opaque;
+
+    soc_dma_reset(s->dma);
+    if (s->model < omap_dma_4)
+        s->gcr = 0x0004;
+    else
+        s->gcr = 0x00010010;
+    s->ocp = 0x00000000;
+    memset(&s->irqstat, 0, sizeof(s->irqstat));
+    memset(&s->irqen, 0, sizeof(s->irqen));
+    s->lcd_ch.src = emiff;
+    s->lcd_ch.condition = 0;
+    s->lcd_ch.interrupts = 0;
+    s->lcd_ch.dual = 0;
+    if (s->model < omap_dma_4)
+        omap_dma_enable_3_1_mapping(s);
+    for (i = 0; i < s->chans; i ++) {
+        s->ch[i].suspend = 0;
+        s->ch[i].prefetch = 0;
+        s->ch[i].buf_disable = 0;
+        s->ch[i].src_sync = 0;
+        memset(&s->ch[i].burst, 0, sizeof(s->ch[i].burst));
+        memset(&s->ch[i].port, 0, sizeof(s->ch[i].port));
+        memset(&s->ch[i].mode, 0, sizeof(s->ch[i].mode));
+        memset(&s->ch[i].frame_index, 0, sizeof(s->ch[i].frame_index));
+        memset(&s->ch[i].element_index, 0, sizeof(s->ch[i].element_index));
+        memset(&s->ch[i].endian, 0, sizeof(s->ch[i].endian));
+        memset(&s->ch[i].endian_lock, 0, sizeof(s->ch[i].endian_lock));
+        memset(&s->ch[i].translate, 0, sizeof(s->ch[i].translate));
+        s->ch[i].write_mode = 0;
+        s->ch[i].data_type = 0;
+        s->ch[i].transparent_copy = 0;
+        s->ch[i].constant_fill = 0;
+        s->ch[i].color = 0x00000000;
+        s->ch[i].end_prog = 0;
+        s->ch[i].repeat = 0;
+        s->ch[i].auto_init = 0;
+        s->ch[i].link_enabled = 0;
+        if (s->model < omap_dma_4)
+            s->ch[i].interrupts = 0x0003;
+        else
+            s->ch[i].interrupts = 0x0000;
+        s->ch[i].status = 0;
+        s->ch[i].cstatus = 0;
+        s->ch[i].active = 0;
+        s->ch[i].enable = 0;
+        s->ch[i].sync = 0;
+        s->ch[i].pending_request = 0;
+        s->ch[i].waiting_end_prog = 0;
+        s->ch[i].cpc = 0x0000;
+        s->ch[i].fs = 0;
+        s->ch[i].bs = 0;
+        s->ch[i].omap_3_1_compatible_disable = 0;
+        memset(&s->ch[i].active_set, 0, sizeof(s->ch[i].active_set));
+        s->ch[i].priority = 0;
+        s->ch[i].interleave_disabled = 0;
+        s->ch[i].type = 0;
+    }
+}
+
+static int omap_dma_ch_reg_read(struct omap_dma_s *s,
+                struct omap_dma_channel_s *ch, int reg, uint16_t *value)
+{
+    switch (reg) {
+    case 0x00:	/* SYS_DMA_CSDP_CH0 */
+        *value = (ch->burst[1] << 14) |
+                (ch->pack[1] << 13) |
+                (ch->port[1] << 9) |
+                (ch->burst[0] << 7) |
+                (ch->pack[0] << 6) |
+                (ch->port[0] << 2) |
+                (ch->data_type >> 1);
+        break;
+
+    case 0x02:	/* SYS_DMA_CCR_CH0 */
+        if (s->model <= omap_dma_3_1)
+            *value = 0 << 10;			/* FIFO_FLUSH reads as 0 */
+        else
+            *value = ch->omap_3_1_compatible_disable << 10;
+        *value |= (ch->mode[1] << 14) |
+                (ch->mode[0] << 12) |
+                (ch->end_prog << 11) |
+                (ch->repeat << 9) |
+                (ch->auto_init << 8) |
+                (ch->enable << 7) |
+                (ch->priority << 6) |
+                (ch->fs << 5) | ch->sync;
+        break;
+
+    case 0x04:	/* SYS_DMA_CICR_CH0 */
+        *value = ch->interrupts;
+        break;
+
+    case 0x06:	/* SYS_DMA_CSR_CH0 */
+        *value = ch->status;
+        ch->status &= SYNC;
+        if (!ch->omap_3_1_compatible_disable && ch->sibling) {
+            *value |= (ch->sibling->status & 0x3f) << 6;
+            ch->sibling->status &= SYNC;
+        }
+        qemu_irq_lower(ch->irq);
+        break;
+
+    case 0x08:	/* SYS_DMA_CSSA_L_CH0 */
+        *value = ch->addr[0] & 0x0000ffff;
+        break;
+
+    case 0x0a:	/* SYS_DMA_CSSA_U_CH0 */
+        *value = ch->addr[0] >> 16;
+        break;
+
+    case 0x0c:	/* SYS_DMA_CDSA_L_CH0 */
+        *value = ch->addr[1] & 0x0000ffff;
+        break;
+
+    case 0x0e:	/* SYS_DMA_CDSA_U_CH0 */
+        *value = ch->addr[1] >> 16;
+        break;
+
+    case 0x10:	/* SYS_DMA_CEN_CH0 */
+        *value = ch->elements;
+        break;
+
+    case 0x12:	/* SYS_DMA_CFN_CH0 */
+        *value = ch->frames;
+        break;
+
+    case 0x14:	/* SYS_DMA_CFI_CH0 */
+        *value = ch->frame_index[0];
+        break;
+
+    case 0x16:	/* SYS_DMA_CEI_CH0 */
+        *value = ch->element_index[0];
+        break;
+
+    case 0x18:	/* SYS_DMA_CPC_CH0 or DMA_CSAC */
+        if (ch->omap_3_1_compatible_disable)
+            *value = ch->active_set.src & 0xffff;	/* CSAC */
+        else
+            *value = ch->cpc;
+        break;
+
+    case 0x1a:	/* DMA_CDAC */
+        *value = ch->active_set.dest & 0xffff;	/* CDAC */
+        break;
+
+    case 0x1c:	/* DMA_CDEI */
+        *value = ch->element_index[1];
+        break;
+
+    case 0x1e:	/* DMA_CDFI */
+        *value = ch->frame_index[1];
+        break;
+
+    case 0x20:	/* DMA_COLOR_L */
+        *value = ch->color & 0xffff;
+        break;
+
+    case 0x22:	/* DMA_COLOR_U */
+        *value = ch->color >> 16;
+        break;
+
+    case 0x24:	/* DMA_CCR2 */
+        *value = (ch->bs << 2) |
+                (ch->transparent_copy << 1) |
+                ch->constant_fill;
+        break;
+
+    case 0x28:	/* DMA_CLNK_CTRL */
+        *value = (ch->link_enabled << 15) |
+                (ch->link_next_ch & 0xf);
+        break;
+
+    case 0x2a:	/* DMA_LCH_CTRL */
+        *value = (ch->interleave_disabled << 15) |
+                ch->type;
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static int omap_dma_ch_reg_write(struct omap_dma_s *s,
+                struct omap_dma_channel_s *ch, int reg, uint16_t value)
+{
+    switch (reg) {
+    case 0x00:	/* SYS_DMA_CSDP_CH0 */
+        ch->burst[1] = (value & 0xc000) >> 14;
+        ch->pack[1] = (value & 0x2000) >> 13;
+        ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9);
+        ch->burst[0] = (value & 0x0180) >> 7;
+        ch->pack[0] = (value & 0x0040) >> 6;
+        ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2);
+        ch->data_type = 1 << (value & 3);
+        if (ch->port[0] >= __omap_dma_port_last)
+            printf("%s: invalid DMA port %i\n", __FUNCTION__,
+                            ch->port[0]);
+        if (ch->port[1] >= __omap_dma_port_last)
+            printf("%s: invalid DMA port %i\n", __FUNCTION__,
+                            ch->port[1]);
+        if ((value & 3) == 3)
+            printf("%s: bad data_type for DMA channel\n", __FUNCTION__);
+        break;
+
+    case 0x02:	/* SYS_DMA_CCR_CH0 */
+        ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
+        ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
+        ch->end_prog = (value & 0x0800) >> 11;
+        if (s->model >= omap_dma_3_2)
+            ch->omap_3_1_compatible_disable  = (value >> 10) & 0x1;
+        ch->repeat = (value & 0x0200) >> 9;
+        ch->auto_init = (value & 0x0100) >> 8;
+        ch->priority = (value & 0x0040) >> 6;
+        ch->fs = (value & 0x0020) >> 5;
+        ch->sync = value & 0x001f;
+
+        if (value & 0x0080)
+            omap_dma_enable_channel(s, ch);
+        else
+            omap_dma_disable_channel(s, ch);
+
+        if (ch->end_prog)
+            omap_dma_channel_end_prog(s, ch);
+
+        break;
+
+    case 0x04:	/* SYS_DMA_CICR_CH0 */
+        ch->interrupts = value & 0x3f;
+        break;
+
+    case 0x06:	/* SYS_DMA_CSR_CH0 */
+        OMAP_RO_REG((hwaddr) reg);
+        break;
+
+    case 0x08:	/* SYS_DMA_CSSA_L_CH0 */
+        ch->addr[0] &= 0xffff0000;
+        ch->addr[0] |= value;
+        break;
+
+    case 0x0a:	/* SYS_DMA_CSSA_U_CH0 */
+        ch->addr[0] &= 0x0000ffff;
+        ch->addr[0] |= (uint32_t) value << 16;
+        break;
+
+    case 0x0c:	/* SYS_DMA_CDSA_L_CH0 */
+        ch->addr[1] &= 0xffff0000;
+        ch->addr[1] |= value;
+        break;
+
+    case 0x0e:	/* SYS_DMA_CDSA_U_CH0 */
+        ch->addr[1] &= 0x0000ffff;
+        ch->addr[1] |= (uint32_t) value << 16;
+        break;
+
+    case 0x10:	/* SYS_DMA_CEN_CH0 */
+        ch->elements = value;
+        break;
+
+    case 0x12:	/* SYS_DMA_CFN_CH0 */
+        ch->frames = value;
+        break;
+
+    case 0x14:	/* SYS_DMA_CFI_CH0 */
+        ch->frame_index[0] = (int16_t) value;
+        break;
+
+    case 0x16:	/* SYS_DMA_CEI_CH0 */
+        ch->element_index[0] = (int16_t) value;
+        break;
+
+    case 0x18:	/* SYS_DMA_CPC_CH0 or DMA_CSAC */
+        OMAP_RO_REG((hwaddr) reg);
+        break;
+
+    case 0x1c:	/* DMA_CDEI */
+        ch->element_index[1] = (int16_t) value;
+        break;
+
+    case 0x1e:	/* DMA_CDFI */
+        ch->frame_index[1] = (int16_t) value;
+        break;
+
+    case 0x20:	/* DMA_COLOR_L */
+        ch->color &= 0xffff0000;
+        ch->color |= value;
+        break;
+
+    case 0x22:	/* DMA_COLOR_U */
+        ch->color &= 0xffff;
+        ch->color |= value << 16;
+        break;
+
+    case 0x24:	/* DMA_CCR2 */
+        ch->bs = (value >> 2) & 0x1;
+        ch->transparent_copy = (value >> 1) & 0x1;
+        ch->constant_fill = value & 0x1;
+        break;
+
+    case 0x28:	/* DMA_CLNK_CTRL */
+        ch->link_enabled = (value >> 15) & 0x1;
+        if (value & (1 << 14)) {			/* Stop_Lnk */
+            ch->link_enabled = 0;
+            omap_dma_disable_channel(s, ch);
+        }
+        ch->link_next_ch = value & 0x1f;
+        break;
+
+    case 0x2a:	/* DMA_LCH_CTRL */
+        ch->interleave_disabled = (value >> 15) & 0x1;
+        ch->type = value & 0xf;
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
+                uint16_t value)
+{
+    switch (offset) {
+    case 0xbc0:	/* DMA_LCD_CSDP */
+        s->brust_f2 = (value >> 14) & 0x3;
+        s->pack_f2 = (value >> 13) & 0x1;
+        s->data_type_f2 = (1 << ((value >> 11) & 0x3));
+        s->brust_f1 = (value >> 7) & 0x3;
+        s->pack_f1 = (value >> 6) & 0x1;
+        s->data_type_f1 = (1 << ((value >> 0) & 0x3));
+        break;
+
+    case 0xbc2:	/* DMA_LCD_CCR */
+        s->mode_f2 = (value >> 14) & 0x3;
+        s->mode_f1 = (value >> 12) & 0x3;
+        s->end_prog = (value >> 11) & 0x1;
+        s->omap_3_1_compatible_disable = (value >> 10) & 0x1;
+        s->repeat = (value >> 9) & 0x1;
+        s->auto_init = (value >> 8) & 0x1;
+        s->running = (value >> 7) & 0x1;
+        s->priority = (value >> 6) & 0x1;
+        s->bs = (value >> 4) & 0x1;
+        break;
+
+    case 0xbc4:	/* DMA_LCD_CTRL */
+        s->dst = (value >> 8) & 0x1;
+        s->src = ((value >> 6) & 0x3) << 1;
+        s->condition = 0;
+        /* Assume no bus errors and thus no BUS_ERROR irq bits.  */
+        s->interrupts = (value >> 1) & 1;
+        s->dual = value & 1;
+        break;
+
+    case 0xbc8:	/* TOP_B1_L */
+        s->src_f1_top &= 0xffff0000;
+        s->src_f1_top |= 0x0000ffff & value;
+        break;
+
+    case 0xbca:	/* TOP_B1_U */
+        s->src_f1_top &= 0x0000ffff;
+        s->src_f1_top |= value << 16;
+        break;
+
+    case 0xbcc:	/* BOT_B1_L */
+        s->src_f1_bottom &= 0xffff0000;
+        s->src_f1_bottom |= 0x0000ffff & value;
+        break;
+
+    case 0xbce:	/* BOT_B1_U */
+        s->src_f1_bottom &= 0x0000ffff;
+        s->src_f1_bottom |= (uint32_t) value << 16;
+        break;
+
+    case 0xbd0:	/* TOP_B2_L */
+        s->src_f2_top &= 0xffff0000;
+        s->src_f2_top |= 0x0000ffff & value;
+        break;
+
+    case 0xbd2:	/* TOP_B2_U */
+        s->src_f2_top &= 0x0000ffff;
+        s->src_f2_top |= (uint32_t) value << 16;
+        break;
+
+    case 0xbd4:	/* BOT_B2_L */
+        s->src_f2_bottom &= 0xffff0000;
+        s->src_f2_bottom |= 0x0000ffff & value;
+        break;
+
+    case 0xbd6:	/* BOT_B2_U */
+        s->src_f2_bottom &= 0x0000ffff;
+        s->src_f2_bottom |= (uint32_t) value << 16;
+        break;
+
+    case 0xbd8:	/* DMA_LCD_SRC_EI_B1 */
+        s->element_index_f1 = value;
+        break;
+
+    case 0xbda:	/* DMA_LCD_SRC_FI_B1_L */
+        s->frame_index_f1 &= 0xffff0000;
+        s->frame_index_f1 |= 0x0000ffff & value;
+        break;
+
+    case 0xbf4:	/* DMA_LCD_SRC_FI_B1_U */
+        s->frame_index_f1 &= 0x0000ffff;
+        s->frame_index_f1 |= (uint32_t) value << 16;
+        break;
+
+    case 0xbdc:	/* DMA_LCD_SRC_EI_B2 */
+        s->element_index_f2 = value;
+        break;
+
+    case 0xbde:	/* DMA_LCD_SRC_FI_B2_L */
+        s->frame_index_f2 &= 0xffff0000;
+        s->frame_index_f2 |= 0x0000ffff & value;
+        break;
+
+    case 0xbf6:	/* DMA_LCD_SRC_FI_B2_U */
+        s->frame_index_f2 &= 0x0000ffff;
+        s->frame_index_f2 |= (uint32_t) value << 16;
+        break;
+
+    case 0xbe0:	/* DMA_LCD_SRC_EN_B1 */
+        s->elements_f1 = value;
+        break;
+
+    case 0xbe4:	/* DMA_LCD_SRC_FN_B1 */
+        s->frames_f1 = value;
+        break;
+
+    case 0xbe2:	/* DMA_LCD_SRC_EN_B2 */
+        s->elements_f2 = value;
+        break;
+
+    case 0xbe6:	/* DMA_LCD_SRC_FN_B2 */
+        s->frames_f2 = value;
+        break;
+
+    case 0xbea:	/* DMA_LCD_LCH_CTRL */
+        s->lch_type = value & 0xf;
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
+                uint16_t *ret)
+{
+    switch (offset) {
+    case 0xbc0:	/* DMA_LCD_CSDP */
+        *ret = (s->brust_f2 << 14) |
+            (s->pack_f2 << 13) |
+            ((s->data_type_f2 >> 1) << 11) |
+            (s->brust_f1 << 7) |
+            (s->pack_f1 << 6) |
+            ((s->data_type_f1 >> 1) << 0);
+        break;
+
+    case 0xbc2:	/* DMA_LCD_CCR */
+        *ret = (s->mode_f2 << 14) |
+            (s->mode_f1 << 12) |
+            (s->end_prog << 11) |
+            (s->omap_3_1_compatible_disable << 10) |
+            (s->repeat << 9) |
+            (s->auto_init << 8) |
+            (s->running << 7) |
+            (s->priority << 6) |
+            (s->bs << 4);
+        break;
+
+    case 0xbc4:	/* DMA_LCD_CTRL */
+        qemu_irq_lower(s->irq);
+        *ret = (s->dst << 8) |
+            ((s->src & 0x6) << 5) |
+            (s->condition << 3) |
+            (s->interrupts << 1) |
+            s->dual;
+        break;
+
+    case 0xbc8:	/* TOP_B1_L */
+        *ret = s->src_f1_top & 0xffff;
+        break;
+
+    case 0xbca:	/* TOP_B1_U */
+        *ret = s->src_f1_top >> 16;
+        break;
+
+    case 0xbcc:	/* BOT_B1_L */
+        *ret = s->src_f1_bottom & 0xffff;
+        break;
+
+    case 0xbce:	/* BOT_B1_U */
+        *ret = s->src_f1_bottom >> 16;
+        break;
+
+    case 0xbd0:	/* TOP_B2_L */
+        *ret = s->src_f2_top & 0xffff;
+        break;
+
+    case 0xbd2:	/* TOP_B2_U */
+        *ret = s->src_f2_top >> 16;
+        break;
+
+    case 0xbd4:	/* BOT_B2_L */
+        *ret = s->src_f2_bottom & 0xffff;
+        break;
+
+    case 0xbd6:	/* BOT_B2_U */
+        *ret = s->src_f2_bottom >> 16;
+        break;
+
+    case 0xbd8:	/* DMA_LCD_SRC_EI_B1 */
+        *ret = s->element_index_f1;
+        break;
+
+    case 0xbda:	/* DMA_LCD_SRC_FI_B1_L */
+        *ret = s->frame_index_f1 & 0xffff;
+        break;
+
+    case 0xbf4:	/* DMA_LCD_SRC_FI_B1_U */
+        *ret = s->frame_index_f1 >> 16;
+        break;
+
+    case 0xbdc:	/* DMA_LCD_SRC_EI_B2 */
+        *ret = s->element_index_f2;
+        break;
+
+    case 0xbde:	/* DMA_LCD_SRC_FI_B2_L */
+        *ret = s->frame_index_f2 & 0xffff;
+        break;
+
+    case 0xbf6:	/* DMA_LCD_SRC_FI_B2_U */
+        *ret = s->frame_index_f2 >> 16;
+        break;
+
+    case 0xbe0:	/* DMA_LCD_SRC_EN_B1 */
+        *ret = s->elements_f1;
+        break;
+
+    case 0xbe4:	/* DMA_LCD_SRC_FN_B1 */
+        *ret = s->frames_f1;
+        break;
+
+    case 0xbe2:	/* DMA_LCD_SRC_EN_B2 */
+        *ret = s->elements_f2;
+        break;
+
+    case 0xbe6:	/* DMA_LCD_SRC_FN_B2 */
+        *ret = s->frames_f2;
+        break;
+
+    case 0xbea:	/* DMA_LCD_LCH_CTRL */
+        *ret = s->lch_type;
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
+                uint16_t value)
+{
+    switch (offset) {
+    case 0x300:	/* SYS_DMA_LCD_CTRL */
+        s->src = (value & 0x40) ? imif : emiff;
+        s->condition = 0;
+        /* Assume no bus errors and thus no BUS_ERROR irq bits.  */
+        s->interrupts = (value >> 1) & 1;
+        s->dual = value & 1;
+        break;
+
+    case 0x302:	/* SYS_DMA_LCD_TOP_F1_L */
+        s->src_f1_top &= 0xffff0000;
+        s->src_f1_top |= 0x0000ffff & value;
+        break;
+
+    case 0x304:	/* SYS_DMA_LCD_TOP_F1_U */
+        s->src_f1_top &= 0x0000ffff;
+        s->src_f1_top |= value << 16;
+        break;
+
+    case 0x306:	/* SYS_DMA_LCD_BOT_F1_L */
+        s->src_f1_bottom &= 0xffff0000;
+        s->src_f1_bottom |= 0x0000ffff & value;
+        break;
+
+    case 0x308:	/* SYS_DMA_LCD_BOT_F1_U */
+        s->src_f1_bottom &= 0x0000ffff;
+        s->src_f1_bottom |= value << 16;
+        break;
+
+    case 0x30a:	/* SYS_DMA_LCD_TOP_F2_L */
+        s->src_f2_top &= 0xffff0000;
+        s->src_f2_top |= 0x0000ffff & value;
+        break;
+
+    case 0x30c:	/* SYS_DMA_LCD_TOP_F2_U */
+        s->src_f2_top &= 0x0000ffff;
+        s->src_f2_top |= value << 16;
+        break;
+
+    case 0x30e:	/* SYS_DMA_LCD_BOT_F2_L */
+        s->src_f2_bottom &= 0xffff0000;
+        s->src_f2_bottom |= 0x0000ffff & value;
+        break;
+
+    case 0x310:	/* SYS_DMA_LCD_BOT_F2_U */
+        s->src_f2_bottom &= 0x0000ffff;
+        s->src_f2_bottom |= value << 16;
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
+                uint16_t *ret)
+{
+    int i;
+
+    switch (offset) {
+    case 0x300:	/* SYS_DMA_LCD_CTRL */
+        i = s->condition;
+        s->condition = 0;
+        qemu_irq_lower(s->irq);
+        *ret = ((s->src == imif) << 6) | (i << 3) |
+                (s->interrupts << 1) | s->dual;
+        break;
+
+    case 0x302:	/* SYS_DMA_LCD_TOP_F1_L */
+        *ret = s->src_f1_top & 0xffff;
+        break;
+
+    case 0x304:	/* SYS_DMA_LCD_TOP_F1_U */
+        *ret = s->src_f1_top >> 16;
+        break;
+
+    case 0x306:	/* SYS_DMA_LCD_BOT_F1_L */
+        *ret = s->src_f1_bottom & 0xffff;
+        break;
+
+    case 0x308:	/* SYS_DMA_LCD_BOT_F1_U */
+        *ret = s->src_f1_bottom >> 16;
+        break;
+
+    case 0x30a:	/* SYS_DMA_LCD_TOP_F2_L */
+        *ret = s->src_f2_top & 0xffff;
+        break;
+
+    case 0x30c:	/* SYS_DMA_LCD_TOP_F2_U */
+        *ret = s->src_f2_top >> 16;
+        break;
+
+    case 0x30e:	/* SYS_DMA_LCD_BOT_F2_L */
+        *ret = s->src_f2_bottom & 0xffff;
+        break;
+
+    case 0x310:	/* SYS_DMA_LCD_BOT_F2_U */
+        *ret = s->src_f2_bottom >> 16;
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value)
+{
+    switch (offset) {
+    case 0x400:	/* SYS_DMA_GCR */
+        s->gcr = value;
+        break;
+
+    case 0x404:	/* DMA_GSCR */
+        if (value & 0x8)
+            omap_dma_disable_3_1_mapping(s);
+        else
+            omap_dma_enable_3_1_mapping(s);
+        break;
+
+    case 0x408:	/* DMA_GRST */
+        if (value & 0x1)
+            omap_dma_reset(s->dma);
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static int omap_dma_sys_read(struct omap_dma_s *s, int offset,
+                uint16_t *ret)
+{
+    switch (offset) {
+    case 0x400:	/* SYS_DMA_GCR */
+        *ret = s->gcr;
+        break;
+
+    case 0x404:	/* DMA_GSCR */
+        *ret = s->omap_3_1_mapping_disabled << 3;
+        break;
+
+    case 0x408:	/* DMA_GRST */
+        *ret = 0;
+        break;
+
+    case 0x442:	/* DMA_HW_ID */
+    case 0x444:	/* DMA_PCh2_ID */
+    case 0x446:	/* DMA_PCh0_ID */
+    case 0x448:	/* DMA_PCh1_ID */
+    case 0x44a:	/* DMA_PChG_ID */
+    case 0x44c:	/* DMA_PChD_ID */
+        *ret = 1;
+        break;
+
+    case 0x44e:	/* DMA_CAPS_0_U */
+        *ret = (s->caps[0] >> 16) & 0xffff;
+        break;
+    case 0x450:	/* DMA_CAPS_0_L */
+        *ret = (s->caps[0] >>  0) & 0xffff;
+        break;
+
+    case 0x452:	/* DMA_CAPS_1_U */
+        *ret = (s->caps[1] >> 16) & 0xffff;
+        break;
+    case 0x454:	/* DMA_CAPS_1_L */
+        *ret = (s->caps[1] >>  0) & 0xffff;
+        break;
+
+    case 0x456:	/* DMA_CAPS_2 */
+        *ret = s->caps[2];
+        break;
+
+    case 0x458:	/* DMA_CAPS_3 */
+        *ret = s->caps[3];
+        break;
+
+    case 0x45a:	/* DMA_CAPS_4 */
+        *ret = s->caps[4];
+        break;
+
+    case 0x460:	/* DMA_PCh2_SR */
+    case 0x480:	/* DMA_PCh0_SR */
+    case 0x482:	/* DMA_PCh1_SR */
+    case 0x4c0:	/* DMA_PChD_SR_0 */
+        printf("%s: Physical Channel Status Registers not implemented.\n",
+               __FUNCTION__);
+        *ret = 0xff;
+        break;
+
+    default:
+        return 1;
+    }
+    return 0;
+}
+
+static uint64_t omap_dma_read(void *opaque, hwaddr addr,
+                              unsigned size)
+{
+    struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+    int reg, ch;
+    uint16_t ret;
+
+    if (size != 2) {
+        return omap_badwidth_read16(opaque, addr);
+    }
+
+    switch (addr) {
+    case 0x300 ... 0x3fe:
+        if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
+            if (omap_dma_3_1_lcd_read(&s->lcd_ch, addr, &ret))
+                break;
+            return ret;
+        }
+        /* Fall through. */
+    case 0x000 ... 0x2fe:
+        reg = addr & 0x3f;
+        ch = (addr >> 6) & 0x0f;
+        if (omap_dma_ch_reg_read(s, &s->ch[ch], reg, &ret))
+            break;
+        return ret;
+
+    case 0x404 ... 0x4fe:
+        if (s->model <= omap_dma_3_1)
+            break;
+        /* Fall through. */
+    case 0x400:
+        if (omap_dma_sys_read(s, addr, &ret))
+            break;
+        return ret;
+
+    case 0xb00 ... 0xbfe:
+        if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
+            if (omap_dma_3_2_lcd_read(&s->lcd_ch, addr, &ret))
+                break;
+            return ret;
+        }
+        break;
+    }
+
+    OMAP_BAD_REG(addr);
+    return 0;
+}
+
+static void omap_dma_write(void *opaque, hwaddr addr,
+                           uint64_t value, unsigned size)
+{
+    struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+    int reg, ch;
+
+    if (size != 2) {
+        return omap_badwidth_write16(opaque, addr, value);
+    }
+
+    switch (addr) {
+    case 0x300 ... 0x3fe:
+        if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
+            if (omap_dma_3_1_lcd_write(&s->lcd_ch, addr, value))
+                break;
+            return;
+        }
+        /* Fall through.  */
+    case 0x000 ... 0x2fe:
+        reg = addr & 0x3f;
+        ch = (addr >> 6) & 0x0f;
+        if (omap_dma_ch_reg_write(s, &s->ch[ch], reg, value))
+            break;
+        return;
+
+    case 0x404 ... 0x4fe:
+        if (s->model <= omap_dma_3_1)
+            break;
+    case 0x400:
+        /* Fall through. */
+        if (omap_dma_sys_write(s, addr, value))
+            break;
+        return;
+
+    case 0xb00 ... 0xbfe:
+        if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
+            if (omap_dma_3_2_lcd_write(&s->lcd_ch, addr, value))
+                break;
+            return;
+        }
+        break;
+    }
+
+    OMAP_BAD_REG(addr);
+}
+
+static const MemoryRegionOps omap_dma_ops = {
+    .read = omap_dma_read,
+    .write = omap_dma_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void omap_dma_request(void *opaque, int drq, int req)
+{
+    struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+    /* The request pins are level triggered in QEMU.  */
+    if (req) {
+        if (~s->dma->drqbmp & (1 << drq)) {
+            s->dma->drqbmp |= 1 << drq;
+            omap_dma_process_request(s, drq);
+        }
+    } else
+        s->dma->drqbmp &= ~(1 << drq);
+}
+
+/* XXX: this won't be needed once soc_dma knows about clocks.  */
+static void omap_dma_clk_update(void *opaque, int line, int on)
+{
+    struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+    int i;
+
+    s->dma->freq = omap_clk_getrate(s->clk);
+
+    for (i = 0; i < s->chans; i ++)
+        if (s->ch[i].active)
+            soc_dma_set_request(s->ch[i].dma, on);
+}
+
+static void omap_dma_setcaps(struct omap_dma_s *s)
+{
+    switch (s->model) {
+    default:
+    case omap_dma_3_1:
+        break;
+    case omap_dma_3_2:
+    case omap_dma_4:
+        /* XXX Only available for sDMA */
+        s->caps[0] =
+                (1 << 19) |	/* Constant Fill Capability */
+                (1 << 18);	/* Transparent BLT Capability */
+        s->caps[1] =
+                (1 << 1);	/* 1-bit palettized capability (DMA 3.2 only) */
+        s->caps[2] =
+                (1 << 8) |	/* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */
+                (1 << 7) |	/* DST_DOUBLE_INDEX_ADRS_CPBLTY */
+                (1 << 6) |	/* DST_SINGLE_INDEX_ADRS_CPBLTY */
+                (1 << 5) |	/* DST_POST_INCRMNT_ADRS_CPBLTY */
+                (1 << 4) |	/* DST_CONST_ADRS_CPBLTY */
+                (1 << 3) |	/* SRC_DOUBLE_INDEX_ADRS_CPBLTY */
+                (1 << 2) |	/* SRC_SINGLE_INDEX_ADRS_CPBLTY */
+                (1 << 1) |	/* SRC_POST_INCRMNT_ADRS_CPBLTY */
+                (1 << 0);	/* SRC_CONST_ADRS_CPBLTY */
+        s->caps[3] =
+                (1 << 6) |	/* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */
+                (1 << 7) |	/* PKT_SYNCHR_CPBLTY (DMA 4 only) */
+                (1 << 5) |	/* CHANNEL_CHAINING_CPBLTY */
+                (1 << 4) |	/* LCh_INTERLEAVE_CPBLTY */
+                (1 << 3) |	/* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */
+                (1 << 2) |	/* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */
+                (1 << 1) |	/* FRAME_SYNCHR_CPBLTY */
+                (1 << 0);	/* ELMNT_SYNCHR_CPBLTY */
+        s->caps[4] =
+                (1 << 7) |	/* PKT_INTERRUPT_CPBLTY (DMA 4 only) */
+                (1 << 6) |	/* SYNC_STATUS_CPBLTY */
+                (1 << 5) |	/* BLOCK_INTERRUPT_CPBLTY */
+                (1 << 4) |	/* LAST_FRAME_INTERRUPT_CPBLTY */
+                (1 << 3) |	/* FRAME_INTERRUPT_CPBLTY */
+                (1 << 2) |	/* HALF_FRAME_INTERRUPT_CPBLTY */
+                (1 << 1) |	/* EVENT_DROP_INTERRUPT_CPBLTY */
+                (1 << 0);	/* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */
+        break;
+    }
+}
+
+struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs,
+                MemoryRegion *sysmem,
+                qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk,
+                enum omap_dma_model model)
+{
+    int num_irqs, memsize, i;
+    struct omap_dma_s *s = (struct omap_dma_s *)
+            g_malloc0(sizeof(struct omap_dma_s));
+
+    if (model <= omap_dma_3_1) {
+        num_irqs = 6;
+        memsize = 0x800;
+    } else {
+        num_irqs = 16;
+        memsize = 0xc00;
+    }
+    s->model = model;
+    s->mpu = mpu;
+    s->clk = clk;
+    s->lcd_ch.irq = lcd_irq;
+    s->lcd_ch.mpu = mpu;
+
+    s->dma = soc_dma_init((model <= omap_dma_3_1) ? 9 : 16);
+    s->dma->freq = omap_clk_getrate(clk);
+    s->dma->transfer_fn = omap_dma_transfer_generic;
+    s->dma->setup_fn = omap_dma_transfer_setup;
+    s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 32);
+    s->dma->opaque = s;
+
+    while (num_irqs --)
+        s->ch[num_irqs].irq = irqs[num_irqs];
+    for (i = 0; i < 3; i ++) {
+        s->ch[i].sibling = &s->ch[i + 6];
+        s->ch[i + 6].sibling = &s->ch[i];
+    }
+    for (i = (model <= omap_dma_3_1) ? 8 : 15; i >= 0; i --) {
+        s->ch[i].dma = &s->dma->ch[i];
+        s->dma->ch[i].opaque = &s->ch[i];
+    }
+
+    omap_dma_setcaps(s);
+    omap_clk_adduser(s->clk, qemu_allocate_irqs(omap_dma_clk_update, s, 1)[0]);
+    omap_dma_reset(s->dma);
+    omap_dma_clk_update(s, 0, 1);
+
+    memory_region_init_io(&s->iomem, &omap_dma_ops, s, "omap.dma", memsize);
+    memory_region_add_subregion(sysmem, base, &s->iomem);
+
+    mpu->drq = s->dma->drq;
+
+    return s->dma;
+}
+
+static void omap_dma_interrupts_4_update(struct omap_dma_s *s)
+{
+    struct omap_dma_channel_s *ch = s->ch;
+    uint32_t bmp, bit;
+
+    for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1)
+        if (ch->status) {
+            bmp |= bit;
+            ch->cstatus |= ch->status;
+            ch->status = 0;
+        }
+    if ((s->irqstat[0] |= s->irqen[0] & bmp))
+        qemu_irq_raise(s->irq[0]);
+    if ((s->irqstat[1] |= s->irqen[1] & bmp))
+        qemu_irq_raise(s->irq[1]);
+    if ((s->irqstat[2] |= s->irqen[2] & bmp))
+        qemu_irq_raise(s->irq[2]);
+    if ((s->irqstat[3] |= s->irqen[3] & bmp))
+        qemu_irq_raise(s->irq[3]);
+}
+
+static uint64_t omap_dma4_read(void *opaque, hwaddr addr,
+                               unsigned size)
+{
+    struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+    int irqn = 0, chnum;
+    struct omap_dma_channel_s *ch;
+
+    if (size == 1) {
+        return omap_badwidth_read16(opaque, addr);
+    }
+
+    switch (addr) {
+    case 0x00:	/* DMA4_REVISION */
+        return 0x40;
+
+    case 0x14:	/* DMA4_IRQSTATUS_L3 */
+        irqn ++;
+        /* fall through */
+    case 0x10:	/* DMA4_IRQSTATUS_L2 */
+        irqn ++;
+        /* fall through */
+    case 0x0c:	/* DMA4_IRQSTATUS_L1 */
+        irqn ++;
+        /* fall through */
+    case 0x08:	/* DMA4_IRQSTATUS_L0 */
+        return s->irqstat[irqn];
+
+    case 0x24:	/* DMA4_IRQENABLE_L3 */
+        irqn ++;
+        /* fall through */
+    case 0x20:	/* DMA4_IRQENABLE_L2 */
+        irqn ++;
+        /* fall through */
+    case 0x1c:	/* DMA4_IRQENABLE_L1 */
+        irqn ++;
+        /* fall through */
+    case 0x18:	/* DMA4_IRQENABLE_L0 */
+        return s->irqen[irqn];
+
+    case 0x28:	/* DMA4_SYSSTATUS */
+        return 1;						/* RESETDONE */
+
+    case 0x2c:	/* DMA4_OCP_SYSCONFIG */
+        return s->ocp;
+
+    case 0x64:	/* DMA4_CAPS_0 */
+        return s->caps[0];
+    case 0x6c:	/* DMA4_CAPS_2 */
+        return s->caps[2];
+    case 0x70:	/* DMA4_CAPS_3 */
+        return s->caps[3];
+    case 0x74:	/* DMA4_CAPS_4 */
+        return s->caps[4];
+
+    case 0x78:	/* DMA4_GCR */
+        return s->gcr;
+
+    case 0x80 ... 0xfff:
+        addr -= 0x80;
+        chnum = addr / 0x60;
+        ch = s->ch + chnum;
+        addr -= chnum * 0x60;
+        break;
+
+    default:
+        OMAP_BAD_REG(addr);
+        return 0;
+    }
+
+    /* Per-channel registers */
+    switch (addr) {
+    case 0x00:	/* DMA4_CCR */
+        return (ch->buf_disable << 25) |
+                (ch->src_sync << 24) |
+                (ch->prefetch << 23) |
+                ((ch->sync & 0x60) << 14) |
+                (ch->bs << 18) |
+                (ch->transparent_copy << 17) |
+                (ch->constant_fill << 16) |
+                (ch->mode[1] << 14) |
+                (ch->mode[0] << 12) |
+                (0 << 10) | (0 << 9) |
+                (ch->suspend << 8) |
+                (ch->enable << 7) |
+                (ch->priority << 6) |
+                (ch->fs << 5) | (ch->sync & 0x1f);
+
+    case 0x04:	/* DMA4_CLNK_CTRL */
+        return (ch->link_enabled << 15) | ch->link_next_ch;
+
+    case 0x08:	/* DMA4_CICR */
+        return ch->interrupts;
+
+    case 0x0c:	/* DMA4_CSR */
+        return ch->cstatus;
+
+    case 0x10:	/* DMA4_CSDP */
+        return (ch->endian[0] << 21) |
+                (ch->endian_lock[0] << 20) |
+                (ch->endian[1] << 19) |
+                (ch->endian_lock[1] << 18) |
+                (ch->write_mode << 16) |
+                (ch->burst[1] << 14) |
+                (ch->pack[1] << 13) |
+                (ch->translate[1] << 9) |
+                (ch->burst[0] << 7) |
+                (ch->pack[0] << 6) |
+                (ch->translate[0] << 2) |
+                (ch->data_type >> 1);
+
+    case 0x14:	/* DMA4_CEN */
+        return ch->elements;
+
+    case 0x18:	/* DMA4_CFN */
+        return ch->frames;
+
+    case 0x1c:	/* DMA4_CSSA */
+        return ch->addr[0];
+
+    case 0x20:	/* DMA4_CDSA */
+        return ch->addr[1];
+
+    case 0x24:	/* DMA4_CSEI */
+        return ch->element_index[0];
+
+    case 0x28:	/* DMA4_CSFI */
+        return ch->frame_index[0];
+
+    case 0x2c:	/* DMA4_CDEI */
+        return ch->element_index[1];
+
+    case 0x30:	/* DMA4_CDFI */
+        return ch->frame_index[1];
+
+    case 0x34:	/* DMA4_CSAC */
+        return ch->active_set.src & 0xffff;
+
+    case 0x38:	/* DMA4_CDAC */
+        return ch->active_set.dest & 0xffff;
+
+    case 0x3c:	/* DMA4_CCEN */
+        return ch->active_set.element;
+
+    case 0x40:	/* DMA4_CCFN */
+        return ch->active_set.frame;
+
+    case 0x44:	/* DMA4_COLOR */
+        /* XXX only in sDMA */
+        return ch->color;
+
+    default:
+        OMAP_BAD_REG(addr);
+        return 0;
+    }
+}
+
+static void omap_dma4_write(void *opaque, hwaddr addr,
+                            uint64_t value, unsigned size)
+{
+    struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+    int chnum, irqn = 0;
+    struct omap_dma_channel_s *ch;
+
+    if (size == 1) {
+        return omap_badwidth_write16(opaque, addr, value);
+    }
+
+    switch (addr) {
+    case 0x14:	/* DMA4_IRQSTATUS_L3 */
+        irqn ++;
+        /* fall through */
+    case 0x10:	/* DMA4_IRQSTATUS_L2 */
+        irqn ++;
+        /* fall through */
+    case 0x0c:	/* DMA4_IRQSTATUS_L1 */
+        irqn ++;
+        /* fall through */
+    case 0x08:	/* DMA4_IRQSTATUS_L0 */
+        s->irqstat[irqn] &= ~value;
+        if (!s->irqstat[irqn])
+            qemu_irq_lower(s->irq[irqn]);
+        return;
+
+    case 0x24:	/* DMA4_IRQENABLE_L3 */
+        irqn ++;
+        /* fall through */
+    case 0x20:	/* DMA4_IRQENABLE_L2 */
+        irqn ++;
+        /* fall through */
+    case 0x1c:	/* DMA4_IRQENABLE_L1 */
+        irqn ++;
+        /* fall through */
+    case 0x18:	/* DMA4_IRQENABLE_L0 */
+        s->irqen[irqn] = value;
+        return;
+
+    case 0x2c:	/* DMA4_OCP_SYSCONFIG */
+        if (value & 2)						/* SOFTRESET */
+            omap_dma_reset(s->dma);
+        s->ocp = value & 0x3321;
+        if (((s->ocp >> 12) & 3) == 3)				/* MIDLEMODE */
+            fprintf(stderr, "%s: invalid DMA power mode\n", __FUNCTION__);
+        return;
+
+    case 0x78:	/* DMA4_GCR */
+        s->gcr = value & 0x00ff00ff;
+	if ((value & 0xff) == 0x00)		/* MAX_CHANNEL_FIFO_DEPTH */
+            fprintf(stderr, "%s: wrong FIFO depth in GCR\n", __FUNCTION__);
+        return;
+
+    case 0x80 ... 0xfff:
+        addr -= 0x80;
+        chnum = addr / 0x60;
+        ch = s->ch + chnum;
+        addr -= chnum * 0x60;
+        break;
+
+    case 0x00:	/* DMA4_REVISION */
+    case 0x28:	/* DMA4_SYSSTATUS */
+    case 0x64:	/* DMA4_CAPS_0 */
+    case 0x6c:	/* DMA4_CAPS_2 */
+    case 0x70:	/* DMA4_CAPS_3 */
+    case 0x74:	/* DMA4_CAPS_4 */
+        OMAP_RO_REG(addr);
+        return;
+
+    default:
+        OMAP_BAD_REG(addr);
+        return;
+    }
+
+    /* Per-channel registers */
+    switch (addr) {
+    case 0x00:	/* DMA4_CCR */
+        ch->buf_disable = (value >> 25) & 1;
+        ch->src_sync = (value >> 24) & 1;	/* XXX For CamDMA must be 1 */
+        if (ch->buf_disable && !ch->src_sync)
+            fprintf(stderr, "%s: Buffering disable is not allowed in "
+                            "destination synchronised mode\n", __FUNCTION__);
+        ch->prefetch = (value >> 23) & 1;
+        ch->bs = (value >> 18) & 1;
+        ch->transparent_copy = (value >> 17) & 1;
+        ch->constant_fill = (value >> 16) & 1;
+        ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
+        ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
+        ch->suspend = (value & 0x0100) >> 8;
+        ch->priority = (value & 0x0040) >> 6;
+        ch->fs = (value & 0x0020) >> 5;
+        if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1])
+            fprintf(stderr, "%s: For a packet transfer at least one port "
+                            "must be constant-addressed\n", __FUNCTION__);
+        ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060);
+        /* XXX must be 0x01 for CamDMA */
+
+        if (value & 0x0080)
+            omap_dma_enable_channel(s, ch);
+        else
+            omap_dma_disable_channel(s, ch);
+
+        break;
+
+    case 0x04:	/* DMA4_CLNK_CTRL */
+        ch->link_enabled = (value >> 15) & 0x1;
+        ch->link_next_ch = value & 0x1f;
+        break;
+
+    case 0x08:	/* DMA4_CICR */
+        ch->interrupts = value & 0x09be;
+        break;
+
+    case 0x0c:	/* DMA4_CSR */
+        ch->cstatus &= ~value;
+        break;
+
+    case 0x10:	/* DMA4_CSDP */
+        ch->endian[0] =(value >> 21) & 1;
+        ch->endian_lock[0] =(value >> 20) & 1;
+        ch->endian[1] =(value >> 19) & 1;
+        ch->endian_lock[1] =(value >> 18) & 1;
+        if (ch->endian[0] != ch->endian[1])
+            fprintf(stderr, "%s: DMA endiannes conversion enable attempt\n",
+                            __FUNCTION__);
+        ch->write_mode = (value >> 16) & 3;
+        ch->burst[1] = (value & 0xc000) >> 14;
+        ch->pack[1] = (value & 0x2000) >> 13;
+        ch->translate[1] = (value & 0x1e00) >> 9;
+        ch->burst[0] = (value & 0x0180) >> 7;
+        ch->pack[0] = (value & 0x0040) >> 6;
+        ch->translate[0] = (value & 0x003c) >> 2;
+        if (ch->translate[0] | ch->translate[1])
+            fprintf(stderr, "%s: bad MReqAddressTranslate sideband signal\n",
+                            __FUNCTION__);
+        ch->data_type = 1 << (value & 3);
+        if ((value & 3) == 3)
+            printf("%s: bad data_type for DMA channel\n", __FUNCTION__);
+        break;
+
+    case 0x14:	/* DMA4_CEN */
+        ch->set_update = 1;
+        ch->elements = value & 0xffffff;
+        break;
+
+    case 0x18:	/* DMA4_CFN */
+        ch->frames = value & 0xffff;
+        ch->set_update = 1;
+        break;
+
+    case 0x1c:	/* DMA4_CSSA */
+        ch->addr[0] = (hwaddr) (uint32_t) value;
+        ch->set_update = 1;
+        break;
+
+    case 0x20:	/* DMA4_CDSA */
+        ch->addr[1] = (hwaddr) (uint32_t) value;
+        ch->set_update = 1;
+        break;
+
+    case 0x24:	/* DMA4_CSEI */
+        ch->element_index[0] = (int16_t) value;
+        ch->set_update = 1;
+        break;
+
+    case 0x28:	/* DMA4_CSFI */
+        ch->frame_index[0] = (int32_t) value;
+        ch->set_update = 1;
+        break;
+
+    case 0x2c:	/* DMA4_CDEI */
+        ch->element_index[1] = (int16_t) value;
+        ch->set_update = 1;
+        break;
+
+    case 0x30:	/* DMA4_CDFI */
+        ch->frame_index[1] = (int32_t) value;
+        ch->set_update = 1;
+        break;
+
+    case 0x44:	/* DMA4_COLOR */
+        /* XXX only in sDMA */
+        ch->color = value;
+        break;
+
+    case 0x34:	/* DMA4_CSAC */
+    case 0x38:	/* DMA4_CDAC */
+    case 0x3c:	/* DMA4_CCEN */
+    case 0x40:	/* DMA4_CCFN */
+        OMAP_RO_REG(addr);
+        break;
+
+    default:
+        OMAP_BAD_REG(addr);
+    }
+}
+
+static const MemoryRegionOps omap_dma4_ops = {
+    .read = omap_dma4_read,
+    .write = omap_dma4_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs,
+                MemoryRegion *sysmem,
+                struct omap_mpu_state_s *mpu, int fifo,
+                int chans, omap_clk iclk, omap_clk fclk)
+{
+    int i;
+    struct omap_dma_s *s = (struct omap_dma_s *)
+            g_malloc0(sizeof(struct omap_dma_s));
+
+    s->model = omap_dma_4;
+    s->chans = chans;
+    s->mpu = mpu;
+    s->clk = fclk;
+
+    s->dma = soc_dma_init(s->chans);
+    s->dma->freq = omap_clk_getrate(fclk);
+    s->dma->transfer_fn = omap_dma_transfer_generic;
+    s->dma->setup_fn = omap_dma_transfer_setup;
+    s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64);
+    s->dma->opaque = s;
+    for (i = 0; i < s->chans; i ++) {
+        s->ch[i].dma = &s->dma->ch[i];
+        s->dma->ch[i].opaque = &s->ch[i];
+    }
+
+    memcpy(&s->irq, irqs, sizeof(s->irq));
+    s->intr_update = omap_dma_interrupts_4_update;
+
+    omap_dma_setcaps(s);
+    omap_clk_adduser(s->clk, qemu_allocate_irqs(omap_dma_clk_update, s, 1)[0]);
+    omap_dma_reset(s->dma);
+    omap_dma_clk_update(s, 0, !!s->dma->freq);
+
+    memory_region_init_io(&s->iomem, &omap_dma4_ops, s, "omap.dma4", 0x1000);
+    memory_region_add_subregion(sysmem, base, &s->iomem);
+
+    mpu->drq = s->dma->drq;
+
+    return s->dma;
+}
+
+struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma)
+{
+    struct omap_dma_s *s = dma->opaque;
+
+    return &s->lcd_ch;
+}
diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c
new file mode 100644
index 0000000000..6e4c1f6d62
--- /dev/null
+++ b/hw/dma/pxa2xx_dma.c
@@ -0,0 +1,574 @@
+/*
+ * Intel XScale PXA255/270 DMA controller.
+ *
+ * Copyright (c) 2006 Openedhand Ltd.
+ * Copyright (c) 2006 Thorsten Zitterell
+ * Written by Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "hw/hw.h"
+#include "hw/arm/pxa.h"
+#include "hw/sysbus.h"
+
+#define PXA255_DMA_NUM_CHANNELS 16
+#define PXA27X_DMA_NUM_CHANNELS 32
+
+#define PXA2XX_DMA_NUM_REQUESTS 75
+
+typedef struct {
+    uint32_t descr;
+    uint32_t src;
+    uint32_t dest;
+    uint32_t cmd;
+    uint32_t state;
+    int request;
+} PXA2xxDMAChannel;
+
+typedef struct PXA2xxDMAState {
+    SysBusDevice busdev;
+    MemoryRegion iomem;
+    qemu_irq irq;
+
+    uint32_t stopintr;
+    uint32_t eorintr;
+    uint32_t rasintr;
+    uint32_t startintr;
+    uint32_t endintr;
+
+    uint32_t align;
+    uint32_t pio;
+
+    int channels;
+    PXA2xxDMAChannel *chan;
+
+    uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
+
+    /* Flag to avoid recursive DMA invocations.  */
+    int running;
+} PXA2xxDMAState;
+
+#define DCSR0	0x0000	/* DMA Control / Status register for Channel 0 */
+#define DCSR31	0x007c	/* DMA Control / Status register for Channel 31 */
+#define DALGN	0x00a0	/* DMA Alignment register */
+#define DPCSR	0x00a4	/* DMA Programmed I/O Control Status register */
+#define DRQSR0	0x00e0	/* DMA DREQ<0> Status register */
+#define DRQSR1	0x00e4	/* DMA DREQ<1> Status register */
+#define DRQSR2	0x00e8	/* DMA DREQ<2> Status register */
+#define DINT	0x00f0	/* DMA Interrupt register */
+#define DRCMR0	0x0100	/* Request to Channel Map register 0 */
+#define DRCMR63	0x01fc	/* Request to Channel Map register 63 */
+#define D_CH0	0x0200	/* Channel 0 Descriptor start */
+#define DRCMR64	0x1100	/* Request to Channel Map register 64 */
+#define DRCMR74	0x1128	/* Request to Channel Map register 74 */
+
+/* Per-channel register */
+#define DDADR	0x00
+#define DSADR	0x01
+#define DTADR	0x02
+#define DCMD	0x03
+
+/* Bit-field masks */
+#define DRCMR_CHLNUM		0x1f
+#define DRCMR_MAPVLD		(1 << 7)
+#define DDADR_STOP		(1 << 0)
+#define DDADR_BREN		(1 << 1)
+#define DCMD_LEN		0x1fff
+#define DCMD_WIDTH(x)		(1 << ((((x) >> 14) & 3) - 1))
+#define DCMD_SIZE(x)		(4 << (((x) >> 16) & 3))
+#define DCMD_FLYBYT		(1 << 19)
+#define DCMD_FLYBYS		(1 << 20)
+#define DCMD_ENDIRQEN		(1 << 21)
+#define DCMD_STARTIRQEN		(1 << 22)
+#define DCMD_CMPEN		(1 << 25)
+#define DCMD_FLOWTRG		(1 << 28)
+#define DCMD_FLOWSRC		(1 << 29)
+#define DCMD_INCTRGADDR		(1 << 30)
+#define DCMD_INCSRCADDR		(1 << 31)
+#define DCSR_BUSERRINTR		(1 << 0)
+#define DCSR_STARTINTR		(1 << 1)
+#define DCSR_ENDINTR		(1 << 2)
+#define DCSR_STOPINTR		(1 << 3)
+#define DCSR_RASINTR		(1 << 4)
+#define DCSR_REQPEND		(1 << 8)
+#define DCSR_EORINT		(1 << 9)
+#define DCSR_CMPST		(1 << 10)
+#define DCSR_MASKRUN		(1 << 22)
+#define DCSR_RASIRQEN		(1 << 23)
+#define DCSR_CLRCMPST		(1 << 24)
+#define DCSR_SETCMPST		(1 << 25)
+#define DCSR_EORSTOPEN		(1 << 26)
+#define DCSR_EORJMPEN		(1 << 27)
+#define DCSR_EORIRQEN		(1 << 28)
+#define DCSR_STOPIRQEN		(1 << 29)
+#define DCSR_NODESCFETCH	(1 << 30)
+#define DCSR_RUN		(1 << 31)
+
+static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
+{
+    if (ch >= 0) {
+        if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
+                (s->chan[ch].state & DCSR_STOPINTR))
+            s->stopintr |= 1 << ch;
+        else
+            s->stopintr &= ~(1 << ch);
+
+        if ((s->chan[ch].state & DCSR_EORIRQEN) &&
+                (s->chan[ch].state & DCSR_EORINT))
+            s->eorintr |= 1 << ch;
+        else
+            s->eorintr &= ~(1 << ch);
+
+        if ((s->chan[ch].state & DCSR_RASIRQEN) &&
+                (s->chan[ch].state & DCSR_RASINTR))
+            s->rasintr |= 1 << ch;
+        else
+            s->rasintr &= ~(1 << ch);
+
+        if (s->chan[ch].state & DCSR_STARTINTR)
+            s->startintr |= 1 << ch;
+        else
+            s->startintr &= ~(1 << ch);
+
+        if (s->chan[ch].state & DCSR_ENDINTR)
+            s->endintr |= 1 << ch;
+        else
+            s->endintr &= ~(1 << ch);
+    }
+
+    if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
+        qemu_irq_raise(s->irq);
+    else
+        qemu_irq_lower(s->irq);
+}
+
+static inline void pxa2xx_dma_descriptor_fetch(
+                PXA2xxDMAState *s, int ch)
+{
+    uint32_t desc[4];
+    hwaddr daddr = s->chan[ch].descr & ~0xf;
+    if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
+        daddr += 32;
+
+    cpu_physical_memory_read(daddr, (uint8_t *) desc, 16);
+    s->chan[ch].descr = desc[DDADR];
+    s->chan[ch].src = desc[DSADR];
+    s->chan[ch].dest = desc[DTADR];
+    s->chan[ch].cmd = desc[DCMD];
+
+    if (s->chan[ch].cmd & DCMD_FLOWSRC)
+        s->chan[ch].src &= ~3;
+    if (s->chan[ch].cmd & DCMD_FLOWTRG)
+        s->chan[ch].dest &= ~3;
+
+    if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
+        printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
+
+    if (s->chan[ch].cmd & DCMD_STARTIRQEN)
+        s->chan[ch].state |= DCSR_STARTINTR;
+}
+
+static void pxa2xx_dma_run(PXA2xxDMAState *s)
+{
+    int c, srcinc, destinc;
+    uint32_t n, size;
+    uint32_t width;
+    uint32_t length;
+    uint8_t buffer[32];
+    PXA2xxDMAChannel *ch;
+
+    if (s->running ++)
+        return;
+
+    while (s->running) {
+        s->running = 1;
+        for (c = 0; c < s->channels; c ++) {
+            ch = &s->chan[c];
+
+            while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
+                /* Test for pending requests */
+                if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
+                    break;
+
+                length = ch->cmd & DCMD_LEN;
+                size = DCMD_SIZE(ch->cmd);
+                width = DCMD_WIDTH(ch->cmd);
+
+                srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
+                destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
+
+                while (length) {
+                    size = MIN(length, size);
+
+                    for (n = 0; n < size; n += width) {
+                        cpu_physical_memory_read(ch->src, buffer + n, width);
+                        ch->src += srcinc;
+                    }
+
+                    for (n = 0; n < size; n += width) {
+                        cpu_physical_memory_write(ch->dest, buffer + n, width);
+                        ch->dest += destinc;
+                    }
+
+                    length -= size;
+
+                    if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
+                            !ch->request) {
+                        ch->state |= DCSR_EORINT;
+                        if (ch->state & DCSR_EORSTOPEN)
+                            ch->state |= DCSR_STOPINTR;
+                        if ((ch->state & DCSR_EORJMPEN) &&
+                                        !(ch->state & DCSR_NODESCFETCH))
+                            pxa2xx_dma_descriptor_fetch(s, c);
+                        break;
+		    }
+                }
+
+                ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
+
+                /* Is the transfer complete now? */
+                if (!length) {
+                    if (ch->cmd & DCMD_ENDIRQEN)
+                        ch->state |= DCSR_ENDINTR;
+
+                    if ((ch->state & DCSR_NODESCFETCH) ||
+                                (ch->descr & DDADR_STOP) ||
+                                (ch->state & DCSR_EORSTOPEN)) {
+                        ch->state |= DCSR_STOPINTR;
+                        ch->state &= ~DCSR_RUN;
+
+                        break;
+                    }
+
+                    ch->state |= DCSR_STOPINTR;
+                    break;
+                }
+            }
+        }
+
+        s->running --;
+    }
+}
+
+static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
+                                unsigned size)
+{
+    PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
+    unsigned int channel;
+
+    if (size != 4) {
+        hw_error("%s: Bad access width\n", __FUNCTION__);
+        return 5;
+    }
+
+    switch (offset) {
+    case DRCMR64 ... DRCMR74:
+        offset -= DRCMR64 - DRCMR0 - (64 << 2);
+        /* Fall through */
+    case DRCMR0 ... DRCMR63:
+        channel = (offset - DRCMR0) >> 2;
+        return s->req[channel];
+
+    case DRQSR0:
+    case DRQSR1:
+    case DRQSR2:
+        return 0;
+
+    case DCSR0 ... DCSR31:
+        channel = offset >> 2;
+	if (s->chan[channel].request)
+            return s->chan[channel].state | DCSR_REQPEND;
+        return s->chan[channel].state;
+
+    case DINT:
+        return s->stopintr | s->eorintr | s->rasintr |
+                s->startintr | s->endintr;
+
+    case DALGN:
+        return s->align;
+
+    case DPCSR:
+        return s->pio;
+    }
+
+    if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
+        channel = (offset - D_CH0) >> 4;
+        switch ((offset & 0x0f) >> 2) {
+        case DDADR:
+            return s->chan[channel].descr;
+        case DSADR:
+            return s->chan[channel].src;
+        case DTADR:
+            return s->chan[channel].dest;
+        case DCMD:
+            return s->chan[channel].cmd;
+        }
+    }
+
+    hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset);
+    return 7;
+}
+
+static void pxa2xx_dma_write(void *opaque, hwaddr offset,
+                             uint64_t value, unsigned size)
+{
+    PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
+    unsigned int channel;
+
+    if (size != 4) {
+        hw_error("%s: Bad access width\n", __FUNCTION__);
+        return;
+    }
+
+    switch (offset) {
+    case DRCMR64 ... DRCMR74:
+        offset -= DRCMR64 - DRCMR0 - (64 << 2);
+        /* Fall through */
+    case DRCMR0 ... DRCMR63:
+        channel = (offset - DRCMR0) >> 2;
+
+        if (value & DRCMR_MAPVLD)
+            if ((value & DRCMR_CHLNUM) > s->channels)
+                hw_error("%s: Bad DMA channel %i\n",
+                         __FUNCTION__, (unsigned)value & DRCMR_CHLNUM);
+
+        s->req[channel] = value;
+        break;
+
+    case DRQSR0:
+    case DRQSR1:
+    case DRQSR2:
+        /* Nothing to do */
+        break;
+
+    case DCSR0 ... DCSR31:
+        channel = offset >> 2;
+        s->chan[channel].state &= 0x0000071f & ~(value &
+                        (DCSR_EORINT | DCSR_ENDINTR |
+                         DCSR_STARTINTR | DCSR_BUSERRINTR));
+        s->chan[channel].state |= value & 0xfc800000;
+
+        if (s->chan[channel].state & DCSR_STOPIRQEN)
+            s->chan[channel].state &= ~DCSR_STOPINTR;
+
+        if (value & DCSR_NODESCFETCH) {
+            /* No-descriptor-fetch mode */
+            if (value & DCSR_RUN) {
+                s->chan[channel].state &= ~DCSR_STOPINTR;
+                pxa2xx_dma_run(s);
+            }
+        } else {
+            /* Descriptor-fetch mode */
+            if (value & DCSR_RUN) {
+                s->chan[channel].state &= ~DCSR_STOPINTR;
+                pxa2xx_dma_descriptor_fetch(s, channel);
+                pxa2xx_dma_run(s);
+            }
+        }
+
+        /* Shouldn't matter as our DMA is synchronous.  */
+        if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
+            s->chan[channel].state |= DCSR_STOPINTR;
+
+        if (value & DCSR_CLRCMPST)
+            s->chan[channel].state &= ~DCSR_CMPST;
+        if (value & DCSR_SETCMPST)
+            s->chan[channel].state |= DCSR_CMPST;
+
+        pxa2xx_dma_update(s, channel);
+        break;
+
+    case DALGN:
+        s->align = value;
+        break;
+
+    case DPCSR:
+        s->pio = value & 0x80000001;
+        break;
+
+    default:
+        if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
+            channel = (offset - D_CH0) >> 4;
+            switch ((offset & 0x0f) >> 2) {
+            case DDADR:
+                s->chan[channel].descr = value;
+                break;
+            case DSADR:
+                s->chan[channel].src = value;
+                break;
+            case DTADR:
+                s->chan[channel].dest = value;
+                break;
+            case DCMD:
+                s->chan[channel].cmd = value;
+                break;
+            default:
+                goto fail;
+            }
+
+            break;
+        }
+    fail:
+        hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __FUNCTION__, offset);
+    }
+}
+
+static const MemoryRegionOps pxa2xx_dma_ops = {
+    .read = pxa2xx_dma_read,
+    .write = pxa2xx_dma_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void pxa2xx_dma_request(void *opaque, int req_num, int on)
+{
+    PXA2xxDMAState *s = opaque;
+    int ch;
+    if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
+        hw_error("%s: Bad DMA request %i\n", __FUNCTION__, req_num);
+
+    if (!(s->req[req_num] & DRCMR_MAPVLD))
+        return;
+    ch = s->req[req_num] & DRCMR_CHLNUM;
+
+    if (!s->chan[ch].request && on)
+        s->chan[ch].state |= DCSR_RASINTR;
+    else
+        s->chan[ch].state &= ~DCSR_RASINTR;
+    if (s->chan[ch].request && !on)
+        s->chan[ch].state |= DCSR_EORINT;
+
+    s->chan[ch].request = on;
+    if (on) {
+        pxa2xx_dma_run(s);
+        pxa2xx_dma_update(s, ch);
+    }
+}
+
+static int pxa2xx_dma_init(SysBusDevice *dev)
+{
+    int i;
+    PXA2xxDMAState *s;
+    s = FROM_SYSBUS(PXA2xxDMAState, dev);
+
+    if (s->channels <= 0) {
+        return -1;
+    }
+
+    s->chan = g_malloc0(sizeof(PXA2xxDMAChannel) * s->channels);
+
+    memset(s->chan, 0, sizeof(PXA2xxDMAChannel) * s->channels);
+    for (i = 0; i < s->channels; i ++)
+        s->chan[i].state = DCSR_STOPINTR;
+
+    memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
+
+    qdev_init_gpio_in(&dev->qdev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
+
+    memory_region_init_io(&s->iomem, &pxa2xx_dma_ops, s,
+                          "pxa2xx.dma", 0x00010000);
+    sysbus_init_mmio(dev, &s->iomem);
+    sysbus_init_irq(dev, &s->irq);
+
+    return 0;
+}
+
+DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
+{
+    DeviceState *dev;
+
+    dev = qdev_create(NULL, "pxa2xx-dma");
+    qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
+    qdev_init_nofail(dev);
+
+    sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
+    sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
+
+    return dev;
+}
+
+DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
+{
+    DeviceState *dev;
+
+    dev = qdev_create(NULL, "pxa2xx-dma");
+    qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
+    qdev_init_nofail(dev);
+
+    sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
+    sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
+
+    return dev;
+}
+
+static bool is_version_0(void *opaque, int version_id)
+{
+    return version_id == 0;
+}
+
+static VMStateDescription vmstate_pxa2xx_dma_chan = {
+    .name = "pxa2xx_dma_chan",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(descr, PXA2xxDMAChannel),
+        VMSTATE_UINT32(src, PXA2xxDMAChannel),
+        VMSTATE_UINT32(dest, PXA2xxDMAChannel),
+        VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
+        VMSTATE_UINT32(state, PXA2xxDMAChannel),
+        VMSTATE_INT32(request, PXA2xxDMAChannel),
+        VMSTATE_END_OF_LIST(),
+    },
+};
+
+static VMStateDescription vmstate_pxa2xx_dma = {
+    .name = "pxa2xx_dma",
+    .version_id = 1,
+    .minimum_version_id = 0,
+    .minimum_version_id_old = 0,
+    .fields = (VMStateField[]) {
+        VMSTATE_UNUSED_TEST(is_version_0, 4),
+        VMSTATE_UINT32(stopintr, PXA2xxDMAState),
+        VMSTATE_UINT32(eorintr, PXA2xxDMAState),
+        VMSTATE_UINT32(rasintr, PXA2xxDMAState),
+        VMSTATE_UINT32(startintr, PXA2xxDMAState),
+        VMSTATE_UINT32(endintr, PXA2xxDMAState),
+        VMSTATE_UINT32(align, PXA2xxDMAState),
+        VMSTATE_UINT32(pio, PXA2xxDMAState),
+        VMSTATE_BUFFER(req, PXA2xxDMAState),
+        VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
+                vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
+        VMSTATE_END_OF_LIST(),
+    },
+};
+
+static Property pxa2xx_dma_properties[] = {
+    DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+    k->init = pxa2xx_dma_init;
+    dc->desc = "PXA2xx DMA controller";
+    dc->vmsd = &vmstate_pxa2xx_dma;
+    dc->props = pxa2xx_dma_properties;
+}
+
+static const TypeInfo pxa2xx_dma_info = {
+    .name          = "pxa2xx-dma",
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(PXA2xxDMAState),
+    .class_init    = pxa2xx_dma_class_init,
+};
+
+static void pxa2xx_dma_register_types(void)
+{
+    type_register_static(&pxa2xx_dma_info);
+}
+
+type_init(pxa2xx_dma_register_types)
diff --git a/hw/dma/soc_dma.c b/hw/dma/soc_dma.c
new file mode 100644
index 0000000000..5e3491d373
--- /dev/null
+++ b/hw/dma/soc_dma.c
@@ -0,0 +1,366 @@
+/*
+ * On-chip DMA controller framework.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "hw/arm/soc_dma.h"
+
+static void transfer_mem2mem(struct soc_dma_ch_s *ch)
+{
+    memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
+    ch->paddr[0] += ch->bytes;
+    ch->paddr[1] += ch->bytes;
+}
+
+static void transfer_mem2fifo(struct soc_dma_ch_s *ch)
+{
+    ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
+    ch->paddr[0] += ch->bytes;
+}
+
+static void transfer_fifo2mem(struct soc_dma_ch_s *ch)
+{
+    ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
+    ch->paddr[1] += ch->bytes;
+}
+
+/* This is further optimisable but isn't very important because often
+ * DMA peripherals forbid this kind of transfers and even when they don't,
+ * oprating systems may not need to use them.  */
+static void *fifo_buf;
+static int fifo_size;
+static void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
+{
+    if (ch->bytes > fifo_size)
+        fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes);
+
+    /* Implement as transfer_fifo2linear + transfer_linear2fifo.  */
+    ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
+    ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
+}
+
+struct dma_s {
+    struct soc_dma_s soc;
+    int chnum;
+    uint64_t ch_enable_mask;
+    int64_t channel_freq;
+    int enabled_count;
+
+    struct memmap_entry_s {
+        enum soc_dma_port_type type;
+        hwaddr addr;
+        union {
+           struct {
+               void *opaque;
+               soc_dma_io_t fn;
+               int out;
+           } fifo;
+           struct {
+               void *base;
+               size_t size;
+           } mem;
+        } u;
+    } *memmap;
+    int memmap_size;
+
+    struct soc_dma_ch_s ch[0];
+};
+
+static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
+{
+    int64_t now = qemu_get_clock_ns(vm_clock);
+    struct dma_s *dma = (struct dma_s *) ch->dma;
+
+    qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq);
+}
+
+static void soc_dma_ch_run(void *opaque)
+{
+    struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
+
+    ch->running = 1;
+    ch->dma->setup_fn(ch);
+    ch->transfer_fn(ch);
+    ch->running = 0;
+
+    if (ch->enable)
+        soc_dma_ch_schedule(ch, ch->bytes);
+    ch->bytes = 0;
+}
+
+static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
+                hwaddr addr)
+{
+    struct memmap_entry_s *lo;
+    int hi;
+
+    lo = dma->memmap;
+    hi = dma->memmap_size;
+
+    while (hi > 1) {
+        hi /= 2;
+        if (lo[hi].addr <= addr)
+            lo += hi;
+    }
+
+    return lo;
+}
+
+static inline enum soc_dma_port_type soc_dma_ch_update_type(
+                struct soc_dma_ch_s *ch, int port)
+{
+    struct dma_s *dma = (struct dma_s *) ch->dma;
+    struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
+
+    if (entry->type == soc_dma_port_fifo) {
+        while (entry < dma->memmap + dma->memmap_size &&
+                        entry->u.fifo.out != port)
+            entry ++;
+        if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
+            return soc_dma_port_other;
+
+        if (ch->type[port] != soc_dma_access_const)
+            return soc_dma_port_other;
+
+        ch->io_fn[port] = entry->u.fifo.fn;
+        ch->io_opaque[port] = entry->u.fifo.opaque;
+        return soc_dma_port_fifo;
+    } else if (entry->type == soc_dma_port_mem) {
+        if (entry->addr > ch->vaddr[port] ||
+                        entry->addr + entry->u.mem.size <= ch->vaddr[port])
+            return soc_dma_port_other;
+
+        /* TODO: support constant memory address for source port as used for
+         * drawing solid rectangles by PalmOS(R).  */
+        if (ch->type[port] != soc_dma_access_const)
+            return soc_dma_port_other;
+
+        ch->paddr[port] = (uint8_t *) entry->u.mem.base +
+                (ch->vaddr[port] - entry->addr);
+        /* TODO: save bytes left to the end of the mapping somewhere so we
+         * can check we're not reading beyond it.  */
+        return soc_dma_port_mem;
+    } else
+        return soc_dma_port_other;
+}
+
+void soc_dma_ch_update(struct soc_dma_ch_s *ch)
+{
+    enum soc_dma_port_type src, dst;
+
+    src = soc_dma_ch_update_type(ch, 0);
+    if (src == soc_dma_port_other) {
+        ch->update = 0;
+        ch->transfer_fn = ch->dma->transfer_fn;
+        return;
+    }
+    dst = soc_dma_ch_update_type(ch, 1);
+
+    /* TODO: use src and dst as array indices.  */
+    if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
+        ch->transfer_fn = transfer_mem2mem;
+    else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
+        ch->transfer_fn = transfer_mem2fifo;
+    else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
+        ch->transfer_fn = transfer_fifo2mem;
+    else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
+        ch->transfer_fn = transfer_fifo2fifo;
+    else
+        ch->transfer_fn = ch->dma->transfer_fn;
+
+    ch->update = (dst != soc_dma_port_other);
+}
+
+static void soc_dma_ch_freq_update(struct dma_s *s)
+{
+    if (s->enabled_count)
+        /* We completely ignore channel priorities and stuff */
+        s->channel_freq = s->soc.freq / s->enabled_count;
+    else {
+        /* TODO: Signal that we want to disable the functional clock and let
+         * the platform code decide what to do with it, i.e. check that
+         * auto-idle is enabled in the clock controller and if we are stopping
+         * the clock, do the same with any parent clocks that had only one
+         * user keeping them on and auto-idle enabled.  */
+    }
+}
+
+void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
+{
+    struct dma_s *dma = (struct dma_s *) ch->dma;
+
+    dma->enabled_count += level - ch->enable;
+
+    if (level)
+        dma->ch_enable_mask |= 1 << ch->num;
+    else
+        dma->ch_enable_mask &= ~(1 << ch->num);
+
+    if (level != ch->enable) {
+        soc_dma_ch_freq_update(dma);
+        ch->enable = level;
+
+        if (!ch->enable)
+            qemu_del_timer(ch->timer);
+        else if (!ch->running)
+            soc_dma_ch_run(ch);
+        else
+            soc_dma_ch_schedule(ch, 1);
+    }
+}
+
+void soc_dma_reset(struct soc_dma_s *soc)
+{
+    struct dma_s *s = (struct dma_s *) soc;
+
+    s->soc.drqbmp = 0;
+    s->ch_enable_mask = 0;
+    s->enabled_count = 0;
+    soc_dma_ch_freq_update(s);
+}
+
+/* TODO: take a functional-clock argument */
+struct soc_dma_s *soc_dma_init(int n)
+{
+    int i;
+    struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch));
+
+    s->chnum = n;
+    s->soc.ch = s->ch;
+    for (i = 0; i < n; i ++) {
+        s->ch[i].dma = &s->soc;
+        s->ch[i].num = i;
+        s->ch[i].timer = qemu_new_timer_ns(vm_clock, soc_dma_ch_run, &s->ch[i]);
+    }
+
+    soc_dma_reset(&s->soc);
+    fifo_size = 0;
+
+    return &s->soc;
+}
+
+void soc_dma_port_add_fifo(struct soc_dma_s *soc, hwaddr virt_base,
+                soc_dma_io_t fn, void *opaque, int out)
+{
+    struct memmap_entry_s *entry;
+    struct dma_s *dma = (struct dma_s *) soc;
+
+    dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
+                    (dma->memmap_size + 1));
+    entry = soc_dma_lookup(dma, virt_base);
+
+    if (dma->memmap_size) {
+        if (entry->type == soc_dma_port_mem) {
+            if (entry->addr <= virt_base &&
+                            entry->addr + entry->u.mem.size > virt_base) {
+                fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
+                                " collides with RAM region at " TARGET_FMT_lx
+                                "-" TARGET_FMT_lx "\n", __FUNCTION__,
+                                (target_ulong) virt_base,
+                                (target_ulong) entry->addr, (target_ulong)
+                                (entry->addr + entry->u.mem.size));
+                exit(-1);
+            }
+
+            if (entry->addr <= virt_base)
+                entry ++;
+        } else
+            while (entry < dma->memmap + dma->memmap_size &&
+                            entry->addr <= virt_base) {
+                if (entry->addr == virt_base && entry->u.fifo.out == out) {
+                    fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
+                                    " collides FIFO at " TARGET_FMT_lx "\n",
+                                    __FUNCTION__, (target_ulong) virt_base,
+                                    (target_ulong) entry->addr);
+                    exit(-1);
+                }
+
+                entry ++;
+            }
+
+        memmove(entry + 1, entry,
+                        (uint8_t *) (dma->memmap + dma->memmap_size ++) -
+                        (uint8_t *) entry);
+    } else
+        dma->memmap_size ++;
+
+    entry->addr          = virt_base;
+    entry->type          = soc_dma_port_fifo;
+    entry->u.fifo.fn     = fn;
+    entry->u.fifo.opaque = opaque;
+    entry->u.fifo.out    = out;
+}
+
+void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
+                hwaddr virt_base, size_t size)
+{
+    struct memmap_entry_s *entry;
+    struct dma_s *dma = (struct dma_s *) soc;
+
+    dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
+                    (dma->memmap_size + 1));
+    entry = soc_dma_lookup(dma, virt_base);
+
+    if (dma->memmap_size) {
+        if (entry->type == soc_dma_port_mem) {
+            if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
+                            (entry->addr <= virt_base &&
+                             entry->addr + entry->u.mem.size > virt_base)) {
+                fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
+                                " collides with RAM region at " TARGET_FMT_lx
+                                "-" TARGET_FMT_lx "\n", __FUNCTION__,
+                                (target_ulong) virt_base,
+                                (target_ulong) (virt_base + size),
+                                (target_ulong) entry->addr, (target_ulong)
+                                (entry->addr + entry->u.mem.size));
+                exit(-1);
+            }
+
+            if (entry->addr <= virt_base)
+                entry ++;
+        } else {
+            if (entry->addr >= virt_base &&
+                            entry->addr < virt_base + size) {
+                fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
+                                " collides with FIFO at " TARGET_FMT_lx
+                                "\n", __FUNCTION__,
+                                (target_ulong) virt_base,
+                                (target_ulong) (virt_base + size),
+                                (target_ulong) entry->addr);
+                exit(-1);
+            }
+
+            while (entry < dma->memmap + dma->memmap_size &&
+                            entry->addr <= virt_base)
+                entry ++;
+	}
+
+        memmove(entry + 1, entry,
+                        (uint8_t *) (dma->memmap + dma->memmap_size ++) -
+                        (uint8_t *) entry);
+    } else
+        dma->memmap_size ++;
+
+    entry->addr          = virt_base;
+    entry->type          = soc_dma_port_mem;
+    entry->u.mem.base    = phys_base;
+    entry->u.mem.size    = size;
+}
+
+/* TODO: port removal for ports like PCMCIA memory */
diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c
new file mode 100644
index 0000000000..fd21533f15
--- /dev/null
+++ b/hw/dma/sparc32_dma.c
@@ -0,0 +1,315 @@
+/*
+ * QEMU Sparc32 DMA controller emulation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * Modifications:
+ *  2010-Feb-14 Artyom Tarasenko : reworked irq generation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/hw.h"
+#include "hw/sparc/sparc32_dma.h"
+#include "hw/sparc/sun4m.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+
+/*
+ * This is the DMA controller part of chip STP2000 (Master I/O), also
+ * produced as NCR89C100. See
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
+ * and
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/DMA2.txt
+ */
+
+#define DMA_REGS 4
+#define DMA_SIZE (4 * sizeof(uint32_t))
+/* We need the mask, because one instance of the device is not page
+   aligned (ledma, start address 0x0010) */
+#define DMA_MASK (DMA_SIZE - 1)
+/* OBP says 0x20 bytes for ledma, the extras are aliased to espdma */
+#define DMA_ETH_SIZE (8 * sizeof(uint32_t))
+#define DMA_MAX_REG_OFFSET (2 * DMA_SIZE - 1)
+
+#define DMA_VER 0xa0000000
+#define DMA_INTR 1
+#define DMA_INTREN 0x10
+#define DMA_WRITE_MEM 0x100
+#define DMA_EN 0x200
+#define DMA_LOADED 0x04000000
+#define DMA_DRAIN_FIFO 0x40
+#define DMA_RESET 0x80
+
+/* XXX SCSI and ethernet should have different read-only bit masks */
+#define DMA_CSR_RO_MASK 0xfe000007
+
+typedef struct DMAState DMAState;
+
+struct DMAState {
+    SysBusDevice busdev;
+    MemoryRegion iomem;
+    uint32_t dmaregs[DMA_REGS];
+    qemu_irq irq;
+    void *iommu;
+    qemu_irq gpio[2];
+    uint32_t is_ledma;
+};
+
+enum {
+    GPIO_RESET = 0,
+    GPIO_DMA,
+};
+
+/* Note: on sparc, the lance 16 bit bus is swapped */
+void ledma_memory_read(void *opaque, hwaddr addr,
+                       uint8_t *buf, int len, int do_bswap)
+{
+    DMAState *s = opaque;
+    int i;
+
+    addr |= s->dmaregs[3];
+    trace_ledma_memory_read(addr);
+    if (do_bswap) {
+        sparc_iommu_memory_read(s->iommu, addr, buf, len);
+    } else {
+        addr &= ~1;
+        len &= ~1;
+        sparc_iommu_memory_read(s->iommu, addr, buf, len);
+        for(i = 0; i < len; i += 2) {
+            bswap16s((uint16_t *)(buf + i));
+        }
+    }
+}
+
+void ledma_memory_write(void *opaque, hwaddr addr,
+                        uint8_t *buf, int len, int do_bswap)
+{
+    DMAState *s = opaque;
+    int l, i;
+    uint16_t tmp_buf[32];
+
+    addr |= s->dmaregs[3];
+    trace_ledma_memory_write(addr);
+    if (do_bswap) {
+        sparc_iommu_memory_write(s->iommu, addr, buf, len);
+    } else {
+        addr &= ~1;
+        len &= ~1;
+        while (len > 0) {
+            l = len;
+            if (l > sizeof(tmp_buf))
+                l = sizeof(tmp_buf);
+            for(i = 0; i < l; i += 2) {
+                tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
+            }
+            sparc_iommu_memory_write(s->iommu, addr, (uint8_t *)tmp_buf, l);
+            len -= l;
+            buf += l;
+            addr += l;
+        }
+    }
+}
+
+static void dma_set_irq(void *opaque, int irq, int level)
+{
+    DMAState *s = opaque;
+    if (level) {
+        s->dmaregs[0] |= DMA_INTR;
+        if (s->dmaregs[0] & DMA_INTREN) {
+            trace_sparc32_dma_set_irq_raise();
+            qemu_irq_raise(s->irq);
+        }
+    } else {
+        if (s->dmaregs[0] & DMA_INTR) {
+            s->dmaregs[0] &= ~DMA_INTR;
+            if (s->dmaregs[0] & DMA_INTREN) {
+                trace_sparc32_dma_set_irq_lower();
+                qemu_irq_lower(s->irq);
+            }
+        }
+    }
+}
+
+void espdma_memory_read(void *opaque, uint8_t *buf, int len)
+{
+    DMAState *s = opaque;
+
+    trace_espdma_memory_read(s->dmaregs[1]);
+    sparc_iommu_memory_read(s->iommu, s->dmaregs[1], buf, len);
+    s->dmaregs[1] += len;
+}
+
+void espdma_memory_write(void *opaque, uint8_t *buf, int len)
+{
+    DMAState *s = opaque;
+
+    trace_espdma_memory_write(s->dmaregs[1]);
+    sparc_iommu_memory_write(s->iommu, s->dmaregs[1], buf, len);
+    s->dmaregs[1] += len;
+}
+
+static uint64_t dma_mem_read(void *opaque, hwaddr addr,
+                             unsigned size)
+{
+    DMAState *s = opaque;
+    uint32_t saddr;
+
+    if (s->is_ledma && (addr > DMA_MAX_REG_OFFSET)) {
+        /* aliased to espdma, but we can't get there from here */
+        /* buggy driver if using undocumented behavior, just return 0 */
+        trace_sparc32_dma_mem_readl(addr, 0);
+        return 0;
+    }
+    saddr = (addr & DMA_MASK) >> 2;
+    trace_sparc32_dma_mem_readl(addr, s->dmaregs[saddr]);
+    return s->dmaregs[saddr];
+}
+
+static void dma_mem_write(void *opaque, hwaddr addr,
+                          uint64_t val, unsigned size)
+{
+    DMAState *s = opaque;
+    uint32_t saddr;
+
+    if (s->is_ledma && (addr > DMA_MAX_REG_OFFSET)) {
+        /* aliased to espdma, but we can't get there from here */
+        trace_sparc32_dma_mem_writel(addr, 0, val);
+        return;
+    }
+    saddr = (addr & DMA_MASK) >> 2;
+    trace_sparc32_dma_mem_writel(addr, s->dmaregs[saddr], val);
+    switch (saddr) {
+    case 0:
+        if (val & DMA_INTREN) {
+            if (s->dmaregs[0] & DMA_INTR) {
+                trace_sparc32_dma_set_irq_raise();
+                qemu_irq_raise(s->irq);
+            }
+        } else {
+            if (s->dmaregs[0] & (DMA_INTR | DMA_INTREN)) {
+                trace_sparc32_dma_set_irq_lower();
+                qemu_irq_lower(s->irq);
+            }
+        }
+        if (val & DMA_RESET) {
+            qemu_irq_raise(s->gpio[GPIO_RESET]);
+            qemu_irq_lower(s->gpio[GPIO_RESET]);
+        } else if (val & DMA_DRAIN_FIFO) {
+            val &= ~DMA_DRAIN_FIFO;
+        } else if (val == 0)
+            val = DMA_DRAIN_FIFO;
+
+        if (val & DMA_EN && !(s->dmaregs[0] & DMA_EN)) {
+            trace_sparc32_dma_enable_raise();
+            qemu_irq_raise(s->gpio[GPIO_DMA]);
+        } else if (!(val & DMA_EN) && !!(s->dmaregs[0] & DMA_EN)) {
+            trace_sparc32_dma_enable_lower();
+            qemu_irq_lower(s->gpio[GPIO_DMA]);
+        }
+
+        val &= ~DMA_CSR_RO_MASK;
+        val |= DMA_VER;
+        s->dmaregs[0] = (s->dmaregs[0] & DMA_CSR_RO_MASK) | val;
+        break;
+    case 1:
+        s->dmaregs[0] |= DMA_LOADED;
+        /* fall through */
+    default:
+        s->dmaregs[saddr] = val;
+        break;
+    }
+}
+
+static const MemoryRegionOps dma_mem_ops = {
+    .read = dma_mem_read,
+    .write = dma_mem_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+    .valid = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+};
+
+static void dma_reset(DeviceState *d)
+{
+    DMAState *s = container_of(d, DMAState, busdev.qdev);
+
+    memset(s->dmaregs, 0, DMA_SIZE);
+    s->dmaregs[0] = DMA_VER;
+}
+
+static const VMStateDescription vmstate_dma = {
+    .name ="sparc32_dma",
+    .version_id = 2,
+    .minimum_version_id = 2,
+    .minimum_version_id_old = 2,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINT32_ARRAY(dmaregs, DMAState, DMA_REGS),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static int sparc32_dma_init1(SysBusDevice *dev)
+{
+    DMAState *s = FROM_SYSBUS(DMAState, dev);
+    int reg_size;
+
+    sysbus_init_irq(dev, &s->irq);
+
+    reg_size = s->is_ledma ? DMA_ETH_SIZE : DMA_SIZE;
+    memory_region_init_io(&s->iomem, &dma_mem_ops, s, "dma", reg_size);
+    sysbus_init_mmio(dev, &s->iomem);
+
+    qdev_init_gpio_in(&dev->qdev, dma_set_irq, 1);
+    qdev_init_gpio_out(&dev->qdev, s->gpio, 2);
+
+    return 0;
+}
+
+static Property sparc32_dma_properties[] = {
+    DEFINE_PROP_PTR("iommu_opaque", DMAState, iommu),
+    DEFINE_PROP_UINT32("is_ledma", DMAState, is_ledma, 0),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void sparc32_dma_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+    k->init = sparc32_dma_init1;
+    dc->reset = dma_reset;
+    dc->vmsd = &vmstate_dma;
+    dc->props = sparc32_dma_properties;
+}
+
+static const TypeInfo sparc32_dma_info = {
+    .name          = "sparc32_dma",
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(DMAState),
+    .class_init    = sparc32_dma_class_init,
+};
+
+static void sparc32_dma_register_types(void)
+{
+    type_register_static(&sparc32_dma_info);
+}
+
+type_init(sparc32_dma_register_types)
diff --git a/hw/dma/sun4m_iommu.c b/hw/dma/sun4m_iommu.c
new file mode 100644
index 0000000000..8312bff5fe
--- /dev/null
+++ b/hw/dma/sun4m_iommu.c
@@ -0,0 +1,387 @@
+/*
+ * QEMU Sun4m iommu emulation
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sparc/sun4m.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+
+/*
+ * I/O MMU used by Sun4m systems
+ *
+ * Chipset docs:
+ * "Sun-4M System Architecture (revision 2.0) by Chuck Narad", 950-1373-01,
+ * http://mediacast.sun.com/users/Barton808/media/Sun4M_SystemArchitecture_edited2.pdf
+ */
+
+#define IOMMU_NREGS         (4*4096/4)
+#define IOMMU_CTRL          (0x0000 >> 2)
+#define IOMMU_CTRL_IMPL     0xf0000000 /* Implementation */
+#define IOMMU_CTRL_VERS     0x0f000000 /* Version */
+#define IOMMU_CTRL_RNGE     0x0000001c /* Mapping RANGE */
+#define IOMMU_RNGE_16MB     0x00000000 /* 0xff000000 -> 0xffffffff */
+#define IOMMU_RNGE_32MB     0x00000004 /* 0xfe000000 -> 0xffffffff */
+#define IOMMU_RNGE_64MB     0x00000008 /* 0xfc000000 -> 0xffffffff */
+#define IOMMU_RNGE_128MB    0x0000000c /* 0xf8000000 -> 0xffffffff */
+#define IOMMU_RNGE_256MB    0x00000010 /* 0xf0000000 -> 0xffffffff */
+#define IOMMU_RNGE_512MB    0x00000014 /* 0xe0000000 -> 0xffffffff */
+#define IOMMU_RNGE_1GB      0x00000018 /* 0xc0000000 -> 0xffffffff */
+#define IOMMU_RNGE_2GB      0x0000001c /* 0x80000000 -> 0xffffffff */
+#define IOMMU_CTRL_ENAB     0x00000001 /* IOMMU Enable */
+#define IOMMU_CTRL_MASK     0x0000001d
+
+#define IOMMU_BASE          (0x0004 >> 2)
+#define IOMMU_BASE_MASK     0x07fffc00
+
+#define IOMMU_TLBFLUSH      (0x0014 >> 2)
+#define IOMMU_TLBFLUSH_MASK 0xffffffff
+
+#define IOMMU_PGFLUSH       (0x0018 >> 2)
+#define IOMMU_PGFLUSH_MASK  0xffffffff
+
+#define IOMMU_AFSR          (0x1000 >> 2)
+#define IOMMU_AFSR_ERR      0x80000000 /* LE, TO, or BE asserted */
+#define IOMMU_AFSR_LE       0x40000000 /* SBUS reports error after
+                                          transaction */
+#define IOMMU_AFSR_TO       0x20000000 /* Write access took more than
+                                          12.8 us. */
+#define IOMMU_AFSR_BE       0x10000000 /* Write access received error
+                                          acknowledge */
+#define IOMMU_AFSR_SIZE     0x0e000000 /* Size of transaction causing error */
+#define IOMMU_AFSR_S        0x01000000 /* Sparc was in supervisor mode */
+#define IOMMU_AFSR_RESV     0x00800000 /* Reserved, forced to 0x8 by
+                                          hardware */
+#define IOMMU_AFSR_ME       0x00080000 /* Multiple errors occurred */
+#define IOMMU_AFSR_RD       0x00040000 /* A read operation was in progress */
+#define IOMMU_AFSR_FAV      0x00020000 /* IOMMU afar has valid contents */
+#define IOMMU_AFSR_MASK     0xff0fffff
+
+#define IOMMU_AFAR          (0x1004 >> 2)
+
+#define IOMMU_AER           (0x1008 >> 2) /* Arbiter Enable Register */
+#define IOMMU_AER_EN_P0_ARB 0x00000001    /* MBus master 0x8 (Always 1) */
+#define IOMMU_AER_EN_P1_ARB 0x00000002    /* MBus master 0x9 */
+#define IOMMU_AER_EN_P2_ARB 0x00000004    /* MBus master 0xa */
+#define IOMMU_AER_EN_P3_ARB 0x00000008    /* MBus master 0xb */
+#define IOMMU_AER_EN_0      0x00010000    /* SBus slot 0 */
+#define IOMMU_AER_EN_1      0x00020000    /* SBus slot 1 */
+#define IOMMU_AER_EN_2      0x00040000    /* SBus slot 2 */
+#define IOMMU_AER_EN_3      0x00080000    /* SBus slot 3 */
+#define IOMMU_AER_EN_F      0x00100000    /* SBus on-board */
+#define IOMMU_AER_SBW       0x80000000    /* S-to-M asynchronous writes */
+#define IOMMU_AER_MASK      0x801f000f
+
+#define IOMMU_SBCFG0        (0x1010 >> 2) /* SBUS configration per-slot */
+#define IOMMU_SBCFG1        (0x1014 >> 2) /* SBUS configration per-slot */
+#define IOMMU_SBCFG2        (0x1018 >> 2) /* SBUS configration per-slot */
+#define IOMMU_SBCFG3        (0x101c >> 2) /* SBUS configration per-slot */
+#define IOMMU_SBCFG_SAB30   0x00010000 /* Phys-address bit 30 when
+                                          bypass enabled */
+#define IOMMU_SBCFG_BA16    0x00000004 /* Slave supports 16 byte bursts */
+#define IOMMU_SBCFG_BA8     0x00000002 /* Slave supports 8 byte bursts */
+#define IOMMU_SBCFG_BYPASS  0x00000001 /* Bypass IOMMU, treat all addresses
+                                          produced by this device as pure
+                                          physical. */
+#define IOMMU_SBCFG_MASK    0x00010003
+
+#define IOMMU_ARBEN         (0x2000 >> 2) /* SBUS arbitration enable */
+#define IOMMU_ARBEN_MASK    0x001f0000
+#define IOMMU_MID           0x00000008
+
+#define IOMMU_MASK_ID       (0x3018 >> 2) /* Mask ID */
+#define IOMMU_MASK_ID_MASK  0x00ffffff
+
+#define IOMMU_MSII_MASK     0x26000000 /* microSPARC II mask number */
+#define IOMMU_TS_MASK       0x23000000 /* turboSPARC mask number */
+
+/* The format of an iopte in the page tables */
+#define IOPTE_PAGE          0xffffff00 /* Physical page number (PA[35:12]) */
+#define IOPTE_CACHE         0x00000080 /* Cached (in vme IOCACHE or
+                                          Viking/MXCC) */
+#define IOPTE_WRITE         0x00000004 /* Writable */
+#define IOPTE_VALID         0x00000002 /* IOPTE is valid */
+#define IOPTE_WAZ           0x00000001 /* Write as zeros */
+
+#define IOMMU_PAGE_SHIFT    12
+#define IOMMU_PAGE_SIZE     (1 << IOMMU_PAGE_SHIFT)
+#define IOMMU_PAGE_MASK     ~(IOMMU_PAGE_SIZE - 1)
+
+typedef struct IOMMUState {
+    SysBusDevice busdev;
+    MemoryRegion iomem;
+    uint32_t regs[IOMMU_NREGS];
+    hwaddr iostart;
+    qemu_irq irq;
+    uint32_t version;
+} IOMMUState;
+
+static uint64_t iommu_mem_read(void *opaque, hwaddr addr,
+                               unsigned size)
+{
+    IOMMUState *s = opaque;
+    hwaddr saddr;
+    uint32_t ret;
+
+    saddr = addr >> 2;
+    switch (saddr) {
+    default:
+        ret = s->regs[saddr];
+        break;
+    case IOMMU_AFAR:
+    case IOMMU_AFSR:
+        ret = s->regs[saddr];
+        qemu_irq_lower(s->irq);
+        break;
+    }
+    trace_sun4m_iommu_mem_readl(saddr, ret);
+    return ret;
+}
+
+static void iommu_mem_write(void *opaque, hwaddr addr,
+                            uint64_t val, unsigned size)
+{
+    IOMMUState *s = opaque;
+    hwaddr saddr;
+
+    saddr = addr >> 2;
+    trace_sun4m_iommu_mem_writel(saddr, val);
+    switch (saddr) {
+    case IOMMU_CTRL:
+        switch (val & IOMMU_CTRL_RNGE) {
+        case IOMMU_RNGE_16MB:
+            s->iostart = 0xffffffffff000000ULL;
+            break;
+        case IOMMU_RNGE_32MB:
+            s->iostart = 0xfffffffffe000000ULL;
+            break;
+        case IOMMU_RNGE_64MB:
+            s->iostart = 0xfffffffffc000000ULL;
+            break;
+        case IOMMU_RNGE_128MB:
+            s->iostart = 0xfffffffff8000000ULL;
+            break;
+        case IOMMU_RNGE_256MB:
+            s->iostart = 0xfffffffff0000000ULL;
+            break;
+        case IOMMU_RNGE_512MB:
+            s->iostart = 0xffffffffe0000000ULL;
+            break;
+        case IOMMU_RNGE_1GB:
+            s->iostart = 0xffffffffc0000000ULL;
+            break;
+        default:
+        case IOMMU_RNGE_2GB:
+            s->iostart = 0xffffffff80000000ULL;
+            break;
+        }
+        trace_sun4m_iommu_mem_writel_ctrl(s->iostart);
+        s->regs[saddr] = ((val & IOMMU_CTRL_MASK) | s->version);
+        break;
+    case IOMMU_BASE:
+        s->regs[saddr] = val & IOMMU_BASE_MASK;
+        break;
+    case IOMMU_TLBFLUSH:
+        trace_sun4m_iommu_mem_writel_tlbflush(val);
+        s->regs[saddr] = val & IOMMU_TLBFLUSH_MASK;
+        break;
+    case IOMMU_PGFLUSH:
+        trace_sun4m_iommu_mem_writel_pgflush(val);
+        s->regs[saddr] = val & IOMMU_PGFLUSH_MASK;
+        break;
+    case IOMMU_AFAR:
+        s->regs[saddr] = val;
+        qemu_irq_lower(s->irq);
+        break;
+    case IOMMU_AER:
+        s->regs[saddr] = (val & IOMMU_AER_MASK) | IOMMU_AER_EN_P0_ARB;
+        break;
+    case IOMMU_AFSR:
+        s->regs[saddr] = (val & IOMMU_AFSR_MASK) | IOMMU_AFSR_RESV;
+        qemu_irq_lower(s->irq);
+        break;
+    case IOMMU_SBCFG0:
+    case IOMMU_SBCFG1:
+    case IOMMU_SBCFG2:
+    case IOMMU_SBCFG3:
+        s->regs[saddr] = val & IOMMU_SBCFG_MASK;
+        break;
+    case IOMMU_ARBEN:
+        // XXX implement SBus probing: fault when reading unmapped
+        // addresses, fault cause and address stored to MMU/IOMMU
+        s->regs[saddr] = (val & IOMMU_ARBEN_MASK) | IOMMU_MID;
+        break;
+    case IOMMU_MASK_ID:
+        s->regs[saddr] |= val & IOMMU_MASK_ID_MASK;
+        break;
+    default:
+        s->regs[saddr] = val;
+        break;
+    }
+}
+
+static const MemoryRegionOps iommu_mem_ops = {
+    .read = iommu_mem_read,
+    .write = iommu_mem_write,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+    .valid = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+};
+
+static uint32_t iommu_page_get_flags(IOMMUState *s, hwaddr addr)
+{
+    uint32_t ret;
+    hwaddr iopte;
+    hwaddr pa = addr;
+
+    iopte = s->regs[IOMMU_BASE] << 4;
+    addr &= ~s->iostart;
+    iopte += (addr >> (IOMMU_PAGE_SHIFT - 2)) & ~3;
+    ret = ldl_be_phys(iopte);
+    trace_sun4m_iommu_page_get_flags(pa, iopte, ret);
+    return ret;
+}
+
+static hwaddr iommu_translate_pa(hwaddr addr,
+                                             uint32_t pte)
+{
+    hwaddr pa;
+
+    pa = ((pte & IOPTE_PAGE) << 4) + (addr & ~IOMMU_PAGE_MASK);
+    trace_sun4m_iommu_translate_pa(addr, pa, pte);
+    return pa;
+}
+
+static void iommu_bad_addr(IOMMUState *s, hwaddr addr,
+                           int is_write)
+{
+    trace_sun4m_iommu_bad_addr(addr);
+    s->regs[IOMMU_AFSR] = IOMMU_AFSR_ERR | IOMMU_AFSR_LE | IOMMU_AFSR_RESV |
+        IOMMU_AFSR_FAV;
+    if (!is_write)
+        s->regs[IOMMU_AFSR] |= IOMMU_AFSR_RD;
+    s->regs[IOMMU_AFAR] = addr;
+    qemu_irq_raise(s->irq);
+}
+
+void sparc_iommu_memory_rw(void *opaque, hwaddr addr,
+                           uint8_t *buf, int len, int is_write)
+{
+    int l;
+    uint32_t flags;
+    hwaddr page, phys_addr;
+
+    while (len > 0) {
+        page = addr & IOMMU_PAGE_MASK;
+        l = (page + IOMMU_PAGE_SIZE) - addr;
+        if (l > len)
+            l = len;
+        flags = iommu_page_get_flags(opaque, page);
+        if (!(flags & IOPTE_VALID)) {
+            iommu_bad_addr(opaque, page, is_write);
+            return;
+        }
+        phys_addr = iommu_translate_pa(addr, flags);
+        if (is_write) {
+            if (!(flags & IOPTE_WRITE)) {
+                iommu_bad_addr(opaque, page, is_write);
+                return;
+            }
+            cpu_physical_memory_write(phys_addr, buf, l);
+        } else {
+            cpu_physical_memory_read(phys_addr, buf, l);
+        }
+        len -= l;
+        buf += l;
+        addr += l;
+    }
+}
+
+static const VMStateDescription vmstate_iommu = {
+    .name ="iommu",
+    .version_id = 2,
+    .minimum_version_id = 2,
+    .minimum_version_id_old = 2,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINT32_ARRAY(regs, IOMMUState, IOMMU_NREGS),
+        VMSTATE_UINT64(iostart, IOMMUState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static void iommu_reset(DeviceState *d)
+{
+    IOMMUState *s = container_of(d, IOMMUState, busdev.qdev);
+
+    memset(s->regs, 0, IOMMU_NREGS * 4);
+    s->iostart = 0;
+    s->regs[IOMMU_CTRL] = s->version;
+    s->regs[IOMMU_ARBEN] = IOMMU_MID;
+    s->regs[IOMMU_AFSR] = IOMMU_AFSR_RESV;
+    s->regs[IOMMU_AER] = IOMMU_AER_EN_P0_ARB | IOMMU_AER_EN_P1_ARB;
+    s->regs[IOMMU_MASK_ID] = IOMMU_TS_MASK;
+}
+
+static int iommu_init1(SysBusDevice *dev)
+{
+    IOMMUState *s = FROM_SYSBUS(IOMMUState, dev);
+
+    sysbus_init_irq(dev, &s->irq);
+
+    memory_region_init_io(&s->iomem, &iommu_mem_ops, s, "iommu",
+                          IOMMU_NREGS * sizeof(uint32_t));
+    sysbus_init_mmio(dev, &s->iomem);
+
+    return 0;
+}
+
+static Property iommu_properties[] = {
+    DEFINE_PROP_HEX32("version", IOMMUState, version, 0),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void iommu_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+    k->init = iommu_init1;
+    dc->reset = iommu_reset;
+    dc->vmsd = &vmstate_iommu;
+    dc->props = iommu_properties;
+}
+
+static const TypeInfo iommu_info = {
+    .name          = "iommu",
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(IOMMUState),
+    .class_init    = iommu_class_init,
+};
+
+static void iommu_register_types(void)
+{
+    type_register_static(&iommu_info);
+}
+
+type_init(iommu_register_types)