summary refs log tree commit diff stats
path: root/target/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'target/sparc')
-rw-r--r--target/sparc/Makefile.objs7
-rw-r--r--target/sparc/TODO88
-rw-r--r--target/sparc/asi.h311
-rw-r--r--target/sparc/cc_helper.c471
-rw-r--r--target/sparc/cpu-qom.h56
-rw-r--r--target/sparc/cpu.c895
-rw-r--r--target/sparc/cpu.h779
-rw-r--r--target/sparc/fop_helper.c400
-rw-r--r--target/sparc/gdbstub.c209
-rw-r--r--target/sparc/helper.c257
-rw-r--r--target/sparc/helper.h168
-rw-r--r--target/sparc/int32_helper.c175
-rw-r--r--target/sparc/int64_helper.c205
-rw-r--r--target/sparc/ldst_helper.c1709
-rw-r--r--target/sparc/machine.c194
-rw-r--r--target/sparc/mmu_helper.c880
-rw-r--r--target/sparc/monitor.c159
-rw-r--r--target/sparc/trace-events28
-rw-r--r--target/sparc/translate.c5924
-rw-r--r--target/sparc/vis_helper.c490
-rw-r--r--target/sparc/win_helper.c400
21 files changed, 13805 insertions, 0 deletions
diff --git a/target/sparc/Makefile.objs b/target/sparc/Makefile.objs
new file mode 100644
index 0000000000..ec905698c5
--- /dev/null
+++ b/target/sparc/Makefile.objs
@@ -0,0 +1,7 @@
+obj-$(CONFIG_SOFTMMU) += machine.o monitor.o
+obj-y += translate.o helper.o cpu.o
+obj-y += fop_helper.o cc_helper.o win_helper.o mmu_helper.o ldst_helper.o
+obj-$(TARGET_SPARC) += int32_helper.o
+obj-$(TARGET_SPARC64) += int64_helper.o
+obj-$(TARGET_SPARC64) += vis_helper.o
+obj-y += gdbstub.o
diff --git a/target/sparc/TODO b/target/sparc/TODO
new file mode 100644
index 0000000000..b8c727e858
--- /dev/null
+++ b/target/sparc/TODO
@@ -0,0 +1,88 @@
+TODO-list:
+
+CPU common:
+- Unimplemented features/bugs:
+ - Delay slot handling may fail sometimes (branch end of page, delay
+ slot next page)
+ - Atomical instructions
+ - CPU features should match real CPUs (also ASI selection)
+- Optimizations/improvements:
+ - Condition code/branch handling like x86, also for FPU?
+ - Remove remaining explicit alignment checks
+ - Global register for regwptr, so that windowed registers can be
+ accessed directly
+ - Improve Sparc32plus addressing
+ - NPC/PC static optimisations (use JUMP_TB when possible)? (Is this
+ obsolete?)
+ - Synthetic instructions
+ - MMU model dependent on CPU model
+ - Select ASI helper at translation time (on V9 only if known)
+ - KQemu/KVM support for VM only
+ - Hardware breakpoint/watchpoint support
+ - Cache emulation mode
+ - Reverse-endian pages
+ - Faster FPU emulation
+ - Busy loop detection
+
+Sparc32 CPUs:
+- Unimplemented features/bugs:
+ - Sun4/Sun4c MMUs
+ - Some V8 ASIs
+
+Sparc64 CPUs:
+- Unimplemented features/bugs:
+ - Interrupt handling
+ - Secondary address space, other MMU functions
+ - Many V9/UA2005/UA2007 ASIs
+ - Rest of V9 instructions, missing VIS instructions
+ - IG/MG/AG vs. UA2007 globals
+ - Full hypervisor support
+ - SMP/CMT
+ - Sun4v CPUs
+
+Sun4:
+- To be added
+
+Sun4c:
+- A lot of unimplemented features
+- Maybe split from Sun4m
+
+Sun4m:
+- Unimplemented features/bugs:
+ - Hardware devices do not match real boards
+ - Floppy does not work
+ - CS4231: merge with cs4231a, add DMA
+ - Add cg6, bwtwo
+ - Arbitrary resolution support
+ - PCI for MicroSparc-IIe
+ - JavaStation machines
+ - SBus slot probing, FCode ROM support
+ - SMP probing support
+ - Interrupt routing does not match real HW
+ - SuSE 7.3 keyboard sometimes unresponsive
+ - Gentoo 2004.1 SMP does not work
+ - SS600MP ledma -> lebuffer
+ - Type 5 keyboard
+ - Less fixed hardware choices
+ - DBRI audio (Am7930)
+ - BPP parallel
+ - Diagnostic switch
+ - ESP PIO mode
+
+Sun4d:
+- A lot of unimplemented features:
+ - SBI
+ - IO-unit
+- Maybe split from Sun4m
+
+Sun4u:
+- Unimplemented features/bugs:
+ - Interrupt controller
+ - PCI/IOMMU support (Simba, JIO, Tomatillo, Psycho, Schizo, Safari...)
+ - SMP
+ - Happy Meal Ethernet, flash, I2C, GPIO
+ - A lot of real machine types
+
+Sun4v:
+- A lot of unimplemented features
+ - A lot of real machine types
diff --git a/target/sparc/asi.h b/target/sparc/asi.h
new file mode 100644
index 0000000000..c9a1849600
--- /dev/null
+++ b/target/sparc/asi.h
@@ -0,0 +1,311 @@
+#ifndef _SPARC_ASI_H
+#define _SPARC_ASI_H
+
+/* asi.h:  Address Space Identifier values for the sparc.
+ *
+ * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au)
+ * Joint edition for sun4c+sun4m: Pete A. Zaitcev <zaitcev@ipmce.su>
+ */
+
+/* The first batch are for the sun4c. */
+
+#define ASI_NULL1           0x00
+#define ASI_NULL2           0x01
+
+/* sun4c and sun4 control registers and mmu/vac ops */
+#define ASI_CONTROL         0x02
+#define ASI_SEGMAP          0x03
+#define ASI_PTE             0x04
+#define ASI_HWFLUSHSEG      0x05
+#define ASI_HWFLUSHPAGE     0x06
+#define ASI_REGMAP          0x06
+#define ASI_HWFLUSHCONTEXT  0x07
+
+#define ASI_USERTXT         0x08
+#define ASI_KERNELTXT       0x09
+#define ASI_USERDATA        0x0a
+#define ASI_KERNELDATA      0x0b
+
+/* VAC Cache flushing on sun4c and sun4 */
+#define ASI_FLUSHSEG        0x0c
+#define ASI_FLUSHPG         0x0d
+#define ASI_FLUSHCTX        0x0e
+
+/* SPARCstation-5: only 6 bits are decoded. */
+/* wo = Write Only, rw = Read Write;        */
+/* ss = Single Size, as = All Sizes;        */
+#define ASI_M_RES00         0x00   /* Don't touch... */
+#define ASI_M_UNA01         0x01   /* Same here... */
+#define ASI_M_MXCC          0x02   /* Access to TI VIKING MXCC registers */
+#define ASI_M_FLUSH_PROBE   0x03   /* Reference MMU Flush/Probe; rw, ss */
+#define ASI_M_MMUREGS       0x04   /* MMU Registers; rw, ss */
+#define ASI_M_TLBDIAG       0x05   /* MMU TLB only Diagnostics */
+#define ASI_M_DIAGS         0x06   /* Reference MMU Diagnostics */
+#define ASI_M_IODIAG        0x07   /* MMU I/O TLB only Diagnostics */
+#define ASI_M_USERTXT       0x08   /* Same as ASI_USERTXT; rw, as */
+#define ASI_M_KERNELTXT     0x09   /* Same as ASI_KERNELTXT; rw, as */
+#define ASI_M_USERDATA      0x0A   /* Same as ASI_USERDATA; rw, as */
+#define ASI_M_KERNELDATA    0x0B   /* Same as ASI_KERNELDATA; rw, as */
+#define ASI_M_TXTC_TAG      0x0C   /* Instruction Cache Tag; rw, ss */
+#define ASI_M_TXTC_DATA     0x0D   /* Instruction Cache Data; rw, ss */
+#define ASI_M_DATAC_TAG     0x0E   /* Data Cache Tag; rw, ss */
+#define ASI_M_DATAC_DATA    0x0F   /* Data Cache Data; rw, ss */
+
+/* The following cache flushing ASIs work only with the 'sta'
+ * instruction. Results are unpredictable for 'swap' and 'ldstuba',
+ * so don't do it.
+ */
+
+/* These ASI flushes affect external caches too. */
+#define ASI_M_FLUSH_PAGE    0x10   /* Flush I&D Cache Line (page); wo, ss */
+#define ASI_M_FLUSH_SEG     0x11   /* Flush I&D Cache Line (seg); wo, ss */
+#define ASI_M_FLUSH_REGION  0x12   /* Flush I&D Cache Line (region); wo, ss */
+#define ASI_M_FLUSH_CTX     0x13   /* Flush I&D Cache Line (context); wo, ss */
+#define ASI_M_FLUSH_USER    0x14   /* Flush I&D Cache Line (user); wo, ss */
+
+/* Block-copy operations are available only on certain V8 cpus. */
+#define ASI_M_BCOPY         0x17   /* Block copy */
+
+/* These affect only the ICACHE and are Ross HyperSparc and TurboSparc specific. */
+#define ASI_M_IFLUSH_PAGE   0x18   /* Flush I Cache Line (page); wo, ss */
+#define ASI_M_IFLUSH_SEG    0x19   /* Flush I Cache Line (seg); wo, ss */
+#define ASI_M_IFLUSH_REGION 0x1A   /* Flush I Cache Line (region); wo, ss */
+#define ASI_M_IFLUSH_CTX    0x1B   /* Flush I Cache Line (context); wo, ss */
+#define ASI_M_IFLUSH_USER   0x1C   /* Flush I Cache Line (user); wo, ss */
+
+/* Block-fill operations are available on certain V8 cpus */
+#define ASI_M_BFILL         0x1F
+
+/* This allows direct access to main memory, actually 0x20 to 0x2f are
+ * the available ASI's for physical ram pass-through, but I don't have
+ * any idea what the other ones do....
+ */
+
+#define ASI_M_BYPASS       0x20   /* Reference MMU bypass; rw, as */
+#define ASI_M_FBMEM        0x29   /* Graphics card frame buffer access */
+#define ASI_M_VMEUS        0x2A   /* VME user 16-bit access */
+#define ASI_M_VMEPS        0x2B   /* VME priv 16-bit access */
+#define ASI_M_VMEUT        0x2C   /* VME user 32-bit access */
+#define ASI_M_VMEPT        0x2D   /* VME priv 32-bit access */
+#define ASI_M_SBUS         0x2E   /* Direct SBus access */
+#define ASI_M_CTL          0x2F   /* Control Space (ECC and MXCC are here) */
+
+
+/* This is ROSS HyperSparc only. */
+#define ASI_M_FLUSH_IWHOLE 0x31   /* Flush entire ICACHE; wo, ss */
+
+/* Tsunami/Viking/TurboSparc i/d cache flash clear. */
+#define ASI_M_IC_FLCLEAR   0x36
+#define ASI_M_DC_FLCLEAR   0x37
+
+#define ASI_M_DCDR         0x39   /* Data Cache Diagnostics Register rw, ss */
+
+#define ASI_M_VIKING_TMP1  0x40	  /* Emulation temporary 1 on Viking */
+/* only available on SuperSparc I */
+/* #define ASI_M_VIKING_TMP2  0x41 */  /* Emulation temporary 2 on Viking */
+
+#define ASI_M_ACTION       0x4c   /* Breakpoint Action Register (GNU/Viking) */
+
+/* LEON ASI */
+#define ASI_LEON_NOCACHE        0x01
+
+#define ASI_LEON_DCACHE_MISS    0x01
+
+#define ASI_LEON_CACHEREGS      0x02
+#define ASI_LEON_IFLUSH         0x10
+#define ASI_LEON_DFLUSH         0x11
+
+#define ASI_LEON_MMUFLUSH       0x18
+#define ASI_LEON_MMUREGS        0x19
+#define ASI_LEON_BYPASS         0x1c
+#define ASI_LEON_FLUSH_PAGE     0x10
+
+/* V9 Architecture mandary ASIs. */
+#define ASI_N			0x04 /* Nucleus				*/
+#define ASI_NL			0x0c /* Nucleus, little endian		*/
+#define ASI_AIUP		0x10 /* Primary, user			*/
+#define ASI_AIUS		0x11 /* Secondary, user			*/
+#define ASI_AIUPL		0x18 /* Primary, user, little endian	*/
+#define ASI_AIUSL		0x19 /* Secondary, user, little endian	*/
+#define ASI_P			0x80 /* Primary, implicit		*/
+#define ASI_S			0x81 /* Secondary, implicit		*/
+#define ASI_PNF			0x82 /* Primary, no fault		*/
+#define ASI_SNF			0x83 /* Secondary, no fault		*/
+#define ASI_PL			0x88 /* Primary, implicit, l-endian	*/
+#define ASI_SL			0x89 /* Secondary, implicit, l-endian	*/
+#define ASI_PNFL		0x8a /* Primary, no fault, l-endian	*/
+#define ASI_SNFL		0x8b /* Secondary, no fault, l-endian	*/
+
+/* SpitFire and later extended ASIs.  The "(III)" marker designates
+ * UltraSparc-III and later specific ASIs.  The "(CMT)" marker designates
+ * Chip Multi Threading specific ASIs.  "(NG)" designates Niagara specific
+ * ASIs, "(4V)" designates SUN4V specific ASIs.  "(NG4)" designates SPARC-T4
+ * and later ASIs.
+ */
+#define ASI_REAL                0x14 /* Real address, cachable          */
+#define ASI_PHYS_USE_EC		0x14 /* PADDR, E-cachable		*/
+#define ASI_REAL_IO             0x15 /* Real address, non-cachable      */
+#define ASI_PHYS_BYPASS_EC_E	0x15 /* PADDR, E-bit			*/
+#define ASI_BLK_AIUP_4V		0x16 /* (4V) Prim, user, block ld/st	*/
+#define ASI_BLK_AIUS_4V		0x17 /* (4V) Sec, user, block ld/st	*/
+#define ASI_REAL_L              0x1c /* Real address, cachable, LE      */
+#define ASI_PHYS_USE_EC_L	0x1c /* PADDR, E-cachable, little endian*/
+#define ASI_REAL_IO_L           0x1d /* Real address, non-cachable, LE  */
+#define ASI_PHYS_BYPASS_EC_E_L	0x1d /* PADDR, E-bit, little endian	*/
+#define ASI_BLK_AIUP_L_4V	0x1e /* (4V) Prim, user, block, l-endian*/
+#define ASI_BLK_AIUS_L_4V	0x1f /* (4V) Sec, user, block, l-endian	*/
+#define ASI_SCRATCHPAD		0x20 /* (4V) Scratch Pad Registers	*/
+#define ASI_MMU			0x21 /* (4V) MMU Context Registers	*/
+#define ASI_TWINX_AIUP          0x22 /* twin load, primary user         */
+#define ASI_TWINX_AIUS          0x23 /* twin load, secondary user       */
+#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
+					 * secondary, user
+					 */
+#define ASI_NUCLEUS_QUAD_LDD	0x24 /* Cachable, qword load		*/
+#define ASI_QUEUE		0x25 /* (4V) Interrupt Queue Registers	*/
+#define ASI_TWINX_REAL          0x26 /* twin load, real, cachable       */
+#define ASI_QUAD_LDD_PHYS_4V	0x26 /* (4V) Physical, qword load	*/
+#define ASI_TWINX_N             0x27 /* twin load, nucleus              */
+#define ASI_TWINX_AIUP_L        0x2a /* twin load, primary user, LE     */
+#define ASI_TWINX_AIUS_L        0x2b /* twin load, secondary user, LE   */
+#define ASI_NUCLEUS_QUAD_LDD_L	0x2c /* Cachable, qword load, l-endian 	*/
+#define ASI_TWINX_REAL_L        0x2e /* twin load, real, cachable, LE   */
+#define ASI_QUAD_LDD_PHYS_L_4V	0x2e /* (4V) Phys, qword load, l-endian	*/
+#define ASI_TWINX_NL            0x2f /* twin load, nucleus, LE          */
+#define ASI_PCACHE_DATA_STATUS	0x30 /* (III) PCache data stat RAM diag	*/
+#define ASI_PCACHE_DATA		0x31 /* (III) PCache data RAM diag	*/
+#define ASI_PCACHE_TAG		0x32 /* (III) PCache tag RAM diag	*/
+#define ASI_PCACHE_SNOOP_TAG	0x33 /* (III) PCache snoop tag RAM diag	*/
+#define ASI_QUAD_LDD_PHYS	0x34 /* (III+) PADDR, qword load	*/
+#define ASI_WCACHE_VALID_BITS	0x38 /* (III) WCache Valid Bits diag	*/
+#define ASI_WCACHE_DATA		0x39 /* (III) WCache data RAM diag	*/
+#define ASI_WCACHE_TAG		0x3a /* (III) WCache tag RAM diag	*/
+#define ASI_WCACHE_SNOOP_TAG	0x3b /* (III) WCache snoop tag RAM diag	*/
+#define ASI_QUAD_LDD_PHYS_L	0x3c /* (III+) PADDR, qw-load, l-endian	*/
+#define ASI_SRAM_FAST_INIT	0x40 /* (III+) Fast SRAM init		*/
+#define ASI_CORE_AVAILABLE	0x41 /* (CMT) LP Available		*/
+#define ASI_CORE_ENABLE_STAT	0x41 /* (CMT) LP Enable Status		*/
+#define ASI_CORE_ENABLE		0x41 /* (CMT) LP Enable RW		*/
+#define ASI_XIR_STEERING	0x41 /* (CMT) XIR Steering RW		*/
+#define ASI_CORE_RUNNING_RW	0x41 /* (CMT) LP Running RW		*/
+#define ASI_CORE_RUNNING_W1S	0x41 /* (CMT) LP Running Write-One Set	*/
+#define ASI_CORE_RUNNING_W1C	0x41 /* (CMT) LP Running Write-One Clr	*/
+#define ASI_CORE_RUNNING_STAT	0x41 /* (CMT) LP Running Status		*/
+#define ASI_CMT_ERROR_STEERING	0x41 /* (CMT) Error Steering RW		*/
+#define ASI_DCACHE_INVALIDATE	0x42 /* (III) DCache Invalidate diag	*/
+#define ASI_DCACHE_UTAG		0x43 /* (III) DCache uTag diag		*/
+#define ASI_DCACHE_SNOOP_TAG	0x44 /* (III) DCache snoop tag RAM diag	*/
+#define ASI_LSU_CONTROL		0x45 /* Load-store control unit		*/
+#define ASI_DCU_CONTROL_REG	0x45 /* (III) DCache Unit Control reg	*/
+#define ASI_DCACHE_DATA		0x46 /* DCache data-ram diag access	*/
+#define ASI_DCACHE_TAG		0x47 /* Dcache tag/valid ram diag access*/
+#define ASI_INTR_DISPATCH_STAT	0x48 /* IRQ vector dispatch status	*/
+#define ASI_INTR_RECEIVE	0x49 /* IRQ vector receive status	*/
+#define ASI_UPA_CONFIG		0x4a /* UPA config space		*/
+#define ASI_JBUS_CONFIG		0x4a /* (IIIi) JBUS Config Register	*/
+#define ASI_SAFARI_CONFIG	0x4a /* (III) Safari Config Register	*/
+#define ASI_SAFARI_ADDRESS	0x4a /* (III) Safari Address Register	*/
+#define ASI_ESTATE_ERROR_EN	0x4b /* E-cache error enable space	*/
+#define ASI_AFSR		0x4c /* Async fault status register	*/
+#define ASI_AFAR		0x4d /* Async fault address register	*/
+#define ASI_EC_TAG_DATA		0x4e /* E-cache tag/valid ram diag acc	*/
+#define ASI_IMMU		0x50 /* Insn-MMU main register space	*/
+#define ASI_IMMU_TSB_8KB_PTR	0x51 /* Insn-MMU 8KB TSB pointer reg	*/
+#define ASI_IMMU_TSB_64KB_PTR	0x52 /* Insn-MMU 64KB TSB pointer reg	*/
+#define ASI_ITLB_DATA_IN	0x54 /* Insn-MMU TLB data in reg	*/
+#define ASI_ITLB_DATA_ACCESS	0x55 /* Insn-MMU TLB data access reg	*/
+#define ASI_ITLB_TAG_READ	0x56 /* Insn-MMU TLB tag read reg	*/
+#define ASI_IMMU_DEMAP		0x57 /* Insn-MMU TLB demap		*/
+#define ASI_DMMU		0x58 /* Data-MMU main register space	*/
+#define ASI_DMMU_TSB_8KB_PTR	0x59 /* Data-MMU 8KB TSB pointer reg	*/
+#define ASI_DMMU_TSB_64KB_PTR	0x5a /* Data-MMU 16KB TSB pointer reg	*/
+#define ASI_DMMU_TSB_DIRECT_PTR	0x5b /* Data-MMU TSB direct pointer reg	*/
+#define ASI_DTLB_DATA_IN	0x5c /* Data-MMU TLB data in reg	*/
+#define ASI_DTLB_DATA_ACCESS	0x5d /* Data-MMU TLB data access reg	*/
+#define ASI_DTLB_TAG_READ	0x5e /* Data-MMU TLB tag read reg	*/
+#define ASI_DMMU_DEMAP		0x5f /* Data-MMU TLB demap		*/
+#define ASI_IIU_INST_TRAP	0x60 /* (III) Instruction Breakpoint	*/
+#define ASI_INTR_ID		0x63 /* (CMT) Interrupt ID register	*/
+#define ASI_CORE_ID		0x63 /* (CMT) LP ID register		*/
+#define ASI_CESR_ID		0x63 /* (CMT) CESR ID register		*/
+#define ASI_IC_INSTR		0x66 /* Insn cache instrucion ram diag	*/
+#define ASI_IC_TAG		0x67 /* Insn cache tag/valid ram diag 	*/
+#define ASI_IC_STAG		0x68 /* (III) Insn cache snoop tag ram	*/
+#define ASI_IC_PRE_DECODE	0x6e /* Insn cache pre-decode ram diag	*/
+#define ASI_IC_NEXT_FIELD	0x6f /* Insn cache next-field ram diag	*/
+#define ASI_BRPRED_ARRAY	0x6f /* (III) Branch Prediction RAM diag*/
+#define ASI_BLK_AIUP		0x70 /* Primary, user, block load/store	*/
+#define ASI_BLK_AIUS		0x71 /* Secondary, user, block ld/st	*/
+#define ASI_MCU_CTRL_REG	0x72 /* (III) Memory controller regs	*/
+#define ASI_EC_DATA		0x74 /* (III) E-cache data staging reg	*/
+#define ASI_EC_CTRL		0x75 /* (III) E-cache control reg	*/
+#define ASI_EC_W		0x76 /* E-cache diag write access	*/
+#define ASI_UDB_ERROR_W		0x77 /* External UDB error regs W	*/
+#define ASI_UDB_CONTROL_W	0x77 /* External UDB control regs W	*/
+#define ASI_INTR_W		0x77 /* IRQ vector dispatch write	*/
+#define ASI_INTR_DATAN_W	0x77 /* (III) Out irq vector data reg N	*/
+#define ASI_INTR_DISPATCH_W	0x77 /* (III) Interrupt vector dispatch	*/
+#define ASI_BLK_AIUPL		0x78 /* Primary, user, little, blk ld/st*/
+#define ASI_BLK_AIUSL		0x79 /* Secondary, user, little, blk ld/st*/
+#define ASI_EC_R		0x7e /* E-cache diag read access	*/
+#define ASI_UDBH_ERROR_R	0x7f /* External UDB error regs rd hi	*/
+#define ASI_UDBL_ERROR_R	0x7f /* External UDB error regs rd low	*/
+#define ASI_UDBH_CONTROL_R	0x7f /* External UDB control regs rd hi	*/
+#define ASI_UDBL_CONTROL_R	0x7f /* External UDB control regs rd low*/
+#define ASI_INTR_R		0x7f /* IRQ vector dispatch read	*/
+#define ASI_INTR_DATAN_R	0x7f /* (III) In irq vector data reg N	*/
+#define ASI_PIC			0xb0 /* (NG4) PIC registers		*/
+#define ASI_PST8_P		0xc0 /* Primary, 8 8-bit, partial	*/
+#define ASI_PST8_S		0xc1 /* Secondary, 8 8-bit, partial	*/
+#define ASI_PST16_P		0xc2 /* Primary, 4 16-bit, partial	*/
+#define ASI_PST16_S		0xc3 /* Secondary, 4 16-bit, partial	*/
+#define ASI_PST32_P		0xc4 /* Primary, 2 32-bit, partial	*/
+#define ASI_PST32_S		0xc5 /* Secondary, 2 32-bit, partial	*/
+#define ASI_PST8_PL		0xc8 /* Primary, 8 8-bit, partial, L	*/
+#define ASI_PST8_SL		0xc9 /* Secondary, 8 8-bit, partial, L	*/
+#define ASI_PST16_PL		0xca /* Primary, 4 16-bit, partial, L	*/
+#define ASI_PST16_SL		0xcb /* Secondary, 4 16-bit, partial, L	*/
+#define ASI_PST32_PL		0xcc /* Primary, 2 32-bit, partial, L	*/
+#define ASI_PST32_SL		0xcd /* Secondary, 2 32-bit, partial, L	*/
+#define ASI_FL8_P		0xd0 /* Primary, 1 8-bit, fpu ld/st	*/
+#define ASI_FL8_S		0xd1 /* Secondary, 1 8-bit, fpu ld/st	*/
+#define ASI_FL16_P		0xd2 /* Primary, 1 16-bit, fpu ld/st	*/
+#define ASI_FL16_S		0xd3 /* Secondary, 1 16-bit, fpu ld/st	*/
+#define ASI_FL8_PL		0xd8 /* Primary, 1 8-bit, fpu ld/st, L	*/
+#define ASI_FL8_SL		0xd9 /* Secondary, 1 8-bit, fpu ld/st, L*/
+#define ASI_FL16_PL		0xda /* Primary, 1 16-bit, fpu ld/st, L	*/
+#define ASI_FL16_SL		0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
+#define ASI_BLK_COMMIT_P	0xe0 /* Primary, blk store commit	*/
+#define ASI_BLK_COMMIT_S	0xe1 /* Secondary, blk store commit	*/
+#define ASI_TWINX_P             0xe2 /* twin load, primary implicit     */
+#define ASI_BLK_INIT_QUAD_LDD_P	0xe2 /* (NG) init-store, twin load,
+				      * primary, implicit */
+#define ASI_TWINX_S             0xe3 /* twin load, secondary implicit   */
+#define ASI_BLK_INIT_QUAD_LDD_S	0xe3 /* (NG) init-store, twin load,
+				      * secondary, implicit */
+#define ASI_TWINX_PL            0xea /* twin load, primary implicit, LE */
+#define ASI_TWINX_SL            0xeb /* twin load, secondary implicit, LE */
+#define ASI_BLK_P		0xf0 /* Primary, blk ld/st		*/
+#define ASI_BLK_S		0xf1 /* Secondary, blk ld/st		*/
+#define ASI_ST_BLKINIT_MRU_P	0xf2 /* (NG4) init-store, twin load,
+				      * Most-Recently-Used, primary,
+				      * implicit
+				      */
+#define ASI_ST_BLKINIT_MRU_S	0xf2 /* (NG4) init-store, twin load,
+				      * Most-Recently-Used, secondary,
+				      * implicit
+				      */
+#define ASI_BLK_PL		0xf8 /* Primary, blk ld/st, little	*/
+#define ASI_BLK_SL		0xf9 /* Secondary, blk ld/st, little	*/
+#define ASI_ST_BLKINIT_MRU_PL	0xfa /* (NG4) init-store, twin load,
+				      * Most-Recently-Used, primary,
+				      * implicit, little-endian
+				      */
+#define ASI_ST_BLKINIT_MRU_SL	0xfb /* (NG4) init-store, twin load,
+				      * Most-Recently-Used, secondary,
+				      * implicit, little-endian
+				      */
+
+#endif /* _SPARC_ASI_H */
diff --git a/target/sparc/cc_helper.c b/target/sparc/cc_helper.c
new file mode 100644
index 0000000000..a410a0b98f
--- /dev/null
+++ b/target/sparc/cc_helper.c
@@ -0,0 +1,471 @@
+/*
+ * Helpers for lazy condition code handling
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+static uint32_t compute_all_flags(CPUSPARCState *env)
+{
+    return env->psr & PSR_ICC;
+}
+
+static uint32_t compute_C_flags(CPUSPARCState *env)
+{
+    return env->psr & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_icc(int32_t dst)
+{
+    uint32_t ret = 0;
+
+    if (dst == 0) {
+        ret = PSR_ZERO;
+    } else if (dst < 0) {
+        ret = PSR_NEG;
+    }
+    return ret;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_flags_xcc(CPUSPARCState *env)
+{
+    return env->xcc & PSR_ICC;
+}
+
+static uint32_t compute_C_flags_xcc(CPUSPARCState *env)
+{
+    return env->xcc & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_xcc(target_long dst)
+{
+    uint32_t ret = 0;
+
+    if (!dst) {
+        ret = PSR_ZERO;
+    } else if (dst < 0) {
+        ret = PSR_NEG;
+    }
+    return ret;
+}
+#endif
+
+static inline uint32_t get_V_div_icc(target_ulong src2)
+{
+    uint32_t ret = 0;
+
+    if (src2 != 0) {
+        ret = PSR_OVF;
+    }
+    return ret;
+}
+
+static uint32_t compute_all_div(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_V_div_icc(CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_div(CPUSPARCState *env)
+{
+    return 0;
+}
+
+static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
+{
+    uint32_t ret = 0;
+
+    if (dst < src1) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
+                                      uint32_t src2)
+{
+    uint32_t ret = 0;
+
+    if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
+                                     uint32_t src2)
+{
+    uint32_t ret = 0;
+
+    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
+        ret = PSR_OVF;
+    }
+    return ret;
+}
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
+{
+    uint32_t ret = 0;
+
+    if (dst < src1) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
+                                      target_ulong src2)
+{
+    uint32_t ret = 0;
+
+    if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
+                                     target_ulong src2)
+{
+    uint32_t ret = 0;
+
+    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
+        ret = PSR_OVF;
+    }
+    return ret;
+}
+
+static uint32_t compute_all_add_xcc(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_xcc(CC_DST);
+    ret |= get_C_add_xcc(CC_DST, CC_SRC);
+    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_add_xcc(CPUSPARCState *env)
+{
+    return get_C_add_xcc(CC_DST, CC_SRC);
+}
+#endif
+
+static uint32_t compute_all_add(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_add_icc(CC_DST, CC_SRC);
+    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_add(CPUSPARCState *env)
+{
+    return get_C_add_icc(CC_DST, CC_SRC);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_addx_xcc(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_xcc(CC_DST);
+    ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_addx_xcc(CPUSPARCState *env)
+{
+    return get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_addx(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_addx(CPUSPARCState *env)
+{
+    return get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+}
+
+static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
+{
+    uint32_t ret = 0;
+
+    if ((src1 | src2) & 0x3) {
+        ret = PSR_OVF;
+    }
+    return ret;
+}
+
+static uint32_t compute_all_tadd(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_add_icc(CC_DST, CC_SRC);
+    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_all_taddtv(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_add_icc(CC_DST, CC_SRC);
+    return ret;
+}
+
+static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
+{
+    uint32_t ret = 0;
+
+    if (src1 < src2) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
+                                      uint32_t src2)
+{
+    uint32_t ret = 0;
+
+    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
+                                     uint32_t src2)
+{
+    uint32_t ret = 0;
+
+    if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
+        ret = PSR_OVF;
+    }
+    return ret;
+}
+
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
+{
+    uint32_t ret = 0;
+
+    if (src1 < src2) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
+                                      target_ulong src2)
+{
+    uint32_t ret = 0;
+
+    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
+        ret = PSR_CARRY;
+    }
+    return ret;
+}
+
+static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
+                                     target_ulong src2)
+{
+    uint32_t ret = 0;
+
+    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
+        ret = PSR_OVF;
+    }
+    return ret;
+}
+
+static uint32_t compute_all_sub_xcc(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_xcc(CC_DST);
+    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
+    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_sub_xcc(CPUSPARCState *env)
+{
+    return get_C_sub_xcc(CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_sub(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_sub(CPUSPARCState *env)
+{
+    return get_C_sub_icc(CC_SRC, CC_SRC2);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_subx_xcc(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_xcc(CC_DST);
+    ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_subx_xcc(CPUSPARCState *env)
+{
+    return get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_subx(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_C_subx(CPUSPARCState *env)
+{
+    return get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+}
+
+static uint32_t compute_all_tsub(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_all_tsubtv(CPUSPARCState *env)
+{
+    uint32_t ret;
+
+    ret = get_NZ_icc(CC_DST);
+    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+    return ret;
+}
+
+static uint32_t compute_all_logic(CPUSPARCState *env)
+{
+    return get_NZ_icc(CC_DST);
+}
+
+static uint32_t compute_C_logic(CPUSPARCState *env)
+{
+    return 0;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_logic_xcc(CPUSPARCState *env)
+{
+    return get_NZ_xcc(CC_DST);
+}
+#endif
+
+typedef struct CCTable {
+    uint32_t (*compute_all)(CPUSPARCState *env); /* return all the flags */
+    uint32_t (*compute_c)(CPUSPARCState *env);  /* return the C flag */
+} CCTable;
+
+static const CCTable icc_table[CC_OP_NB] = {
+    /* CC_OP_DYNAMIC should never happen */
+    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
+    [CC_OP_DIV] = { compute_all_div, compute_C_div },
+    [CC_OP_ADD] = { compute_all_add, compute_C_add },
+    [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
+    [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
+    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
+    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
+    [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
+    [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
+    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
+    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
+};
+
+#ifdef TARGET_SPARC64
+static const CCTable xcc_table[CC_OP_NB] = {
+    /* CC_OP_DYNAMIC should never happen */
+    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
+    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
+    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
+    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
+    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
+    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
+    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
+    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
+    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
+};
+#endif
+
+void helper_compute_psr(CPUSPARCState *env)
+{
+    uint32_t new_psr;
+
+    new_psr = icc_table[CC_OP].compute_all(env);
+    env->psr = new_psr;
+#ifdef TARGET_SPARC64
+    new_psr = xcc_table[CC_OP].compute_all(env);
+    env->xcc = new_psr;
+#endif
+    CC_OP = CC_OP_FLAGS;
+}
+
+uint32_t helper_compute_C_icc(CPUSPARCState *env)
+{
+    return icc_table[CC_OP].compute_c(env) >> PSR_CARRY_SHIFT;
+}
diff --git a/target/sparc/cpu-qom.h b/target/sparc/cpu-qom.h
new file mode 100644
index 0000000000..f63af728ee
--- /dev/null
+++ b/target/sparc/cpu-qom.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU SPARC CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_SPARC_CPU_QOM_H
+#define QEMU_SPARC_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#ifdef TARGET_SPARC64
+#define TYPE_SPARC_CPU "sparc64-cpu"
+#else
+#define TYPE_SPARC_CPU "sparc-cpu"
+#endif
+
+#define SPARC_CPU_CLASS(klass) \
+    OBJECT_CLASS_CHECK(SPARCCPUClass, (klass), TYPE_SPARC_CPU)
+#define SPARC_CPU(obj) \
+    OBJECT_CHECK(SPARCCPU, (obj), TYPE_SPARC_CPU)
+#define SPARC_CPU_GET_CLASS(obj) \
+    OBJECT_GET_CLASS(SPARCCPUClass, (obj), TYPE_SPARC_CPU)
+
+/**
+ * SPARCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A SPARC CPU model.
+ */
+typedef struct SPARCCPUClass {
+    /*< private >*/
+    CPUClass parent_class;
+    /*< public >*/
+
+    DeviceRealize parent_realize;
+    void (*parent_reset)(CPUState *cpu);
+} SPARCCPUClass;
+
+typedef struct SPARCCPU SPARCCPU;
+
+#endif
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
new file mode 100644
index 0000000000..4e07b92fbd
--- /dev/null
+++ b/target/sparc/cpu.c
@@ -0,0 +1,895 @@
+/*
+ * Sparc CPU init helpers
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu/error-report.h"
+#include "exec/exec-all.h"
+
+//#define DEBUG_FEATURES
+
+static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model);
+
+/* CPUClass::reset() */
+static void sparc_cpu_reset(CPUState *s)
+{
+    SPARCCPU *cpu = SPARC_CPU(s);
+    SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu);
+    CPUSPARCState *env = &cpu->env;
+
+    scc->parent_reset(s);
+
+    memset(env, 0, offsetof(CPUSPARCState, version));
+    tlb_flush(s, 1);
+    env->cwp = 0;
+#ifndef TARGET_SPARC64
+    env->wim = 1;
+#endif
+    env->regwptr = env->regbase + (env->cwp * 16);
+    CC_OP = CC_OP_FLAGS;
+#if defined(CONFIG_USER_ONLY)
+#ifdef TARGET_SPARC64
+    env->cleanwin = env->nwindows - 2;
+    env->cansave = env->nwindows - 2;
+    env->pstate = PS_RMO | PS_PEF | PS_IE;
+    env->asi = 0x82; /* Primary no-fault */
+#endif
+#else
+#if !defined(TARGET_SPARC64)
+    env->psret = 0;
+    env->psrs = 1;
+    env->psrps = 1;
+#endif
+#ifdef TARGET_SPARC64
+    env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG;
+    env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0;
+    env->tl = env->maxtl;
+    cpu_tsptr(env)->tt = TT_POWER_ON_RESET;
+    env->lsu = 0;
+#else
+    env->mmuregs[0] &= ~(MMU_E | MMU_NF);
+    env->mmuregs[0] |= env->def->mmu_bm;
+#endif
+    env->pc = 0;
+    env->npc = env->pc + 4;
+#endif
+    env->cache_control = 0;
+}
+
+static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+    if (interrupt_request & CPU_INTERRUPT_HARD) {
+        SPARCCPU *cpu = SPARC_CPU(cs);
+        CPUSPARCState *env = &cpu->env;
+
+        if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) {
+            int pil = env->interrupt_index & 0xf;
+            int type = env->interrupt_index & 0xf0;
+
+            if (type != TT_EXTINT || cpu_pil_allowed(env, pil)) {
+                cs->exception_index = env->interrupt_index;
+                sparc_cpu_do_interrupt(cs);
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+    info->print_insn = print_insn_sparc;
+#ifdef TARGET_SPARC64
+    info->mach = bfd_mach_sparc_v9b;
+#endif
+}
+
+static void sparc_cpu_parse_features(CPUState *cs, char *features,
+                                     Error **errp);
+
+static int cpu_sparc_register(SPARCCPU *cpu, const char *cpu_model)
+{
+    CPUSPARCState *env = &cpu->env;
+    char *s = g_strdup(cpu_model);
+    char *featurestr, *name = strtok(s, ",");
+    sparc_def_t def1, *def = &def1;
+    Error *err = NULL;
+
+    if (cpu_sparc_find_by_name(def, name) < 0) {
+        g_free(s);
+        return -1;
+    }
+
+    env->def = g_memdup(def, sizeof(*def));
+
+    featurestr = strtok(NULL, ",");
+    sparc_cpu_parse_features(CPU(cpu), featurestr, &err);
+    g_free(s);
+    if (err) {
+        error_report_err(err);
+        return -1;
+    }
+
+    env->version = def->iu_version;
+    env->fsr = def->fpu_version;
+    env->nwindows = def->nwindows;
+#if !defined(TARGET_SPARC64)
+    env->mmuregs[0] |= def->mmu_version;
+    cpu_sparc_set_id(env, 0);
+    env->mxccregs[7] |= def->mxcc_version;
+#else
+    env->mmu_version = def->mmu_version;
+    env->maxtl = def->maxtl;
+    env->version |= def->maxtl << 8;
+    env->version |= def->nwindows - 1;
+#endif
+    return 0;
+}
+
+SPARCCPU *cpu_sparc_init(const char *cpu_model)
+{
+    SPARCCPU *cpu;
+
+    cpu = SPARC_CPU(object_new(TYPE_SPARC_CPU));
+
+    if (cpu_sparc_register(cpu, cpu_model) < 0) {
+        object_unref(OBJECT(cpu));
+        return NULL;
+    }
+
+    object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+    return cpu;
+}
+
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu)
+{
+#if !defined(TARGET_SPARC64)
+    env->mxccregs[7] = ((cpu + 8) & 0xf) << 24;
+#endif
+}
+
+static const sparc_def_t sparc_defs[] = {
+#ifdef TARGET_SPARC64
+    {
+        .name = "Fujitsu Sparc64",
+        .iu_version = ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 4,
+        .maxtl = 4,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Fujitsu Sparc64 III",
+        .iu_version = ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 5,
+        .maxtl = 4,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Fujitsu Sparc64 IV",
+        .iu_version = ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Fujitsu Sparc64 V",
+        .iu_version = ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI UltraSparc I",
+        .iu_version = ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI UltraSparc II",
+        .iu_version = ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI UltraSparc IIi",
+        .iu_version = ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI UltraSparc IIe",
+        .iu_version = ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Sun UltraSparc III",
+        .iu_version = ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Sun UltraSparc III Cu",
+        .iu_version = ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_3,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Sun UltraSparc IIIi",
+        .iu_version = ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Sun UltraSparc IV",
+        .iu_version = ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_4,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Sun UltraSparc IV+",
+        .iu_version = ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT,
+    },
+    {
+        .name = "Sun UltraSparc IIIi+",
+        .iu_version = ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_3,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Sun UltraSparc T1",
+        /* defined in sparc_ifu_fdp.v and ctu.h */
+        .iu_version = ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_sun4v,
+        .nwindows = 8,
+        .maxtl = 6,
+        .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+        | CPU_FEATURE_GL,
+    },
+    {
+        .name = "Sun UltraSparc T2",
+        /* defined in tlu_asi_ctl.v and n2_revid_cust.v */
+        .iu_version = ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_sun4v,
+        .nwindows = 8,
+        .maxtl = 6,
+        .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+        | CPU_FEATURE_GL,
+    },
+    {
+        .name = "NEC UltraSparc I",
+        .iu_version = ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+        .fpu_version = 0x00000000,
+        .mmu_version = mmu_us_12,
+        .nwindows = 8,
+        .maxtl = 5,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+#else
+    {
+        .name = "Fujitsu MB86904",
+        .iu_version = 0x04 << 24, /* Impl 0, ver 4 */
+        .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+        .mmu_version = 0x04 << 24, /* Impl 0, ver 4 */
+        .mmu_bm = 0x00004000,
+        .mmu_ctpr_mask = 0x00ffffc0,
+        .mmu_cxr_mask = 0x000000ff,
+        .mmu_sfsr_mask = 0x00016fff,
+        .mmu_trcr_mask = 0x00ffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "Fujitsu MB86907",
+        .iu_version = 0x05 << 24, /* Impl 0, ver 5 */
+        .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+        .mmu_version = 0x05 << 24, /* Impl 0, ver 5 */
+        .mmu_bm = 0x00004000,
+        .mmu_ctpr_mask = 0xffffffc0,
+        .mmu_cxr_mask = 0x000000ff,
+        .mmu_sfsr_mask = 0x00016fff,
+        .mmu_trcr_mask = 0xffffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI MicroSparc I",
+        .iu_version = 0x41000000,
+        .fpu_version = 4 << 17,
+        .mmu_version = 0x41000000,
+        .mmu_bm = 0x00004000,
+        .mmu_ctpr_mask = 0x007ffff0,
+        .mmu_cxr_mask = 0x0000003f,
+        .mmu_sfsr_mask = 0x00016fff,
+        .mmu_trcr_mask = 0x0000003f,
+        .nwindows = 7,
+        .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL |
+        CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT |
+        CPU_FEATURE_FMUL,
+    },
+    {
+        .name = "TI MicroSparc II",
+        .iu_version = 0x42000000,
+        .fpu_version = 4 << 17,
+        .mmu_version = 0x02000000,
+        .mmu_bm = 0x00004000,
+        .mmu_ctpr_mask = 0x00ffffc0,
+        .mmu_cxr_mask = 0x000000ff,
+        .mmu_sfsr_mask = 0x00016fff,
+        .mmu_trcr_mask = 0x00ffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI MicroSparc IIep",
+        .iu_version = 0x42000000,
+        .fpu_version = 4 << 17,
+        .mmu_version = 0x04000000,
+        .mmu_bm = 0x00004000,
+        .mmu_ctpr_mask = 0x00ffffc0,
+        .mmu_cxr_mask = 0x000000ff,
+        .mmu_sfsr_mask = 0x00016bff,
+        .mmu_trcr_mask = 0x00ffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI SuperSparc 40", /* STP1020NPGA */
+        .iu_version = 0x41000000, /* SuperSPARC 2.x */
+        .fpu_version = 0 << 17,
+        .mmu_version = 0x00000800, /* SuperSPARC 2.x, no MXCC */
+        .mmu_bm = 0x00002000,
+        .mmu_ctpr_mask = 0xffffffc0,
+        .mmu_cxr_mask = 0x0000ffff,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI SuperSparc 50", /* STP1020PGA */
+        .iu_version = 0x40000000, /* SuperSPARC 3.x */
+        .fpu_version = 0 << 17,
+        .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */
+        .mmu_bm = 0x00002000,
+        .mmu_ctpr_mask = 0xffffffc0,
+        .mmu_cxr_mask = 0x0000ffff,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI SuperSparc 51",
+        .iu_version = 0x40000000, /* SuperSPARC 3.x */
+        .fpu_version = 0 << 17,
+        .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */
+        .mmu_bm = 0x00002000,
+        .mmu_ctpr_mask = 0xffffffc0,
+        .mmu_cxr_mask = 0x0000ffff,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .mxcc_version = 0x00000104,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI SuperSparc 60", /* STP1020APGA */
+        .iu_version = 0x40000000, /* SuperSPARC 3.x */
+        .fpu_version = 0 << 17,
+        .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */
+        .mmu_bm = 0x00002000,
+        .mmu_ctpr_mask = 0xffffffc0,
+        .mmu_cxr_mask = 0x0000ffff,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI SuperSparc 61",
+        .iu_version = 0x44000000, /* SuperSPARC 3.x */
+        .fpu_version = 0 << 17,
+        .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */
+        .mmu_bm = 0x00002000,
+        .mmu_ctpr_mask = 0xffffffc0,
+        .mmu_cxr_mask = 0x0000ffff,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .mxcc_version = 0x00000104,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "TI SuperSparc II",
+        .iu_version = 0x40000000, /* SuperSPARC II 1.x */
+        .fpu_version = 0 << 17,
+        .mmu_version = 0x08000000, /* SuperSPARC II 1.x, MXCC */
+        .mmu_bm = 0x00002000,
+        .mmu_ctpr_mask = 0xffffffc0,
+        .mmu_cxr_mask = 0x0000ffff,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .mxcc_version = 0x00000104,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES,
+    },
+    {
+        .name = "LEON2",
+        .iu_version = 0xf2000000,
+        .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+        .mmu_version = 0xf2000000,
+        .mmu_bm = 0x00004000,
+        .mmu_ctpr_mask = 0x007ffff0,
+        .mmu_cxr_mask = 0x0000003f,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN,
+    },
+    {
+        .name = "LEON3",
+        .iu_version = 0xf3000000,
+        .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+        .mmu_version = 0xf3000000,
+        .mmu_bm = 0x00000000,
+        .mmu_ctpr_mask = 0xfffffffc,
+        .mmu_cxr_mask = 0x000000ff,
+        .mmu_sfsr_mask = 0xffffffff,
+        .mmu_trcr_mask = 0xffffffff,
+        .nwindows = 8,
+        .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN |
+        CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL | CPU_FEATURE_POWERDOWN |
+        CPU_FEATURE_CASA,
+    },
+#endif
+};
+
+static const char * const feature_name[] = {
+    "float",
+    "float128",
+    "swap",
+    "mul",
+    "div",
+    "flush",
+    "fsqrt",
+    "fmul",
+    "vis1",
+    "vis2",
+    "fsmuld",
+    "hypv",
+    "cmt",
+    "gl",
+};
+
+static void print_features(FILE *f, fprintf_function cpu_fprintf,
+                           uint32_t features, const char *prefix)
+{
+    unsigned int i;
+
+    for (i = 0; i < ARRAY_SIZE(feature_name); i++) {
+        if (feature_name[i] && (features & (1 << i))) {
+            if (prefix) {
+                (*cpu_fprintf)(f, "%s", prefix);
+            }
+            (*cpu_fprintf)(f, "%s ", feature_name[i]);
+        }
+    }
+}
+
+static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features)
+{
+    unsigned int i;
+
+    for (i = 0; i < ARRAY_SIZE(feature_name); i++) {
+        if (feature_name[i] && !strcmp(flagname, feature_name[i])) {
+            *features |= 1 << i;
+            return;
+        }
+    }
+    error_report("CPU feature %s not found", flagname);
+}
+
+static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *name)
+{
+    unsigned int i;
+    const sparc_def_t *def = NULL;
+
+    for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+        if (strcasecmp(name, sparc_defs[i].name) == 0) {
+            def = &sparc_defs[i];
+        }
+    }
+    if (!def) {
+        return -1;
+    }
+    memcpy(cpu_def, def, sizeof(*def));
+    return 0;
+}
+
+static void sparc_cpu_parse_features(CPUState *cs, char *features,
+                                     Error **errp)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    sparc_def_t *cpu_def = cpu->env.def;
+    char *featurestr;
+    uint32_t plus_features = 0;
+    uint32_t minus_features = 0;
+    uint64_t iu_version;
+    uint32_t fpu_version, mmu_version, nwindows;
+
+    featurestr = features ? strtok(features, ",") : NULL;
+    while (featurestr) {
+        char *val;
+
+        if (featurestr[0] == '+') {
+            add_flagname_to_bitmaps(featurestr + 1, &plus_features);
+        } else if (featurestr[0] == '-') {
+            add_flagname_to_bitmaps(featurestr + 1, &minus_features);
+        } else if ((val = strchr(featurestr, '='))) {
+            *val = 0; val++;
+            if (!strcmp(featurestr, "iu_version")) {
+                char *err;
+
+                iu_version = strtoll(val, &err, 0);
+                if (!*val || *err) {
+                    error_setg(errp, "bad numerical value %s", val);
+                    return;
+                }
+                cpu_def->iu_version = iu_version;
+#ifdef DEBUG_FEATURES
+                fprintf(stderr, "iu_version %" PRIx64 "\n", iu_version);
+#endif
+            } else if (!strcmp(featurestr, "fpu_version")) {
+                char *err;
+
+                fpu_version = strtol(val, &err, 0);
+                if (!*val || *err) {
+                    error_setg(errp, "bad numerical value %s", val);
+                    return;
+                }
+                cpu_def->fpu_version = fpu_version;
+#ifdef DEBUG_FEATURES
+                fprintf(stderr, "fpu_version %x\n", fpu_version);
+#endif
+            } else if (!strcmp(featurestr, "mmu_version")) {
+                char *err;
+
+                mmu_version = strtol(val, &err, 0);
+                if (!*val || *err) {
+                    error_setg(errp, "bad numerical value %s", val);
+                    return;
+                }
+                cpu_def->mmu_version = mmu_version;
+#ifdef DEBUG_FEATURES
+                fprintf(stderr, "mmu_version %x\n", mmu_version);
+#endif
+            } else if (!strcmp(featurestr, "nwindows")) {
+                char *err;
+
+                nwindows = strtol(val, &err, 0);
+                if (!*val || *err || nwindows > MAX_NWINDOWS ||
+                    nwindows < MIN_NWINDOWS) {
+                    error_setg(errp, "bad numerical value %s", val);
+                    return;
+                }
+                cpu_def->nwindows = nwindows;
+#ifdef DEBUG_FEATURES
+                fprintf(stderr, "nwindows %d\n", nwindows);
+#endif
+            } else {
+                error_setg(errp, "unrecognized feature %s", featurestr);
+                return;
+            }
+        } else {
+            error_setg(errp, "feature string `%s' not in format "
+                             "(+feature|-feature|feature=xyz)", featurestr);
+            return;
+        }
+        featurestr = strtok(NULL, ",");
+    }
+    cpu_def->features |= plus_features;
+    cpu_def->features &= ~minus_features;
+#ifdef DEBUG_FEATURES
+    print_features(stderr, fprintf, cpu_def->features, NULL);
+#endif
+}
+
+void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+    unsigned int i;
+
+    for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+        (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx
+                       " FPU %08x MMU %08x NWINS %d ",
+                       sparc_defs[i].name,
+                       sparc_defs[i].iu_version,
+                       sparc_defs[i].fpu_version,
+                       sparc_defs[i].mmu_version,
+                       sparc_defs[i].nwindows);
+        print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES &
+                       ~sparc_defs[i].features, "-");
+        print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES &
+                       sparc_defs[i].features, "+");
+        (*cpu_fprintf)(f, "\n");
+    }
+    (*cpu_fprintf)(f, "Default CPU feature flags (use '-' to remove): ");
+    print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES, NULL);
+    (*cpu_fprintf)(f, "\n");
+    (*cpu_fprintf)(f, "Available CPU feature flags (use '+' to add): ");
+    print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES, NULL);
+    (*cpu_fprintf)(f, "\n");
+    (*cpu_fprintf)(f, "Numerical features (use '=' to set): iu_version "
+                   "fpu_version mmu_version nwindows\n");
+}
+
+static void cpu_print_cc(FILE *f, fprintf_function cpu_fprintf,
+                         uint32_t cc)
+{
+    cpu_fprintf(f, "%c%c%c%c", cc & PSR_NEG ? 'N' : '-',
+                cc & PSR_ZERO ? 'Z' : '-', cc & PSR_OVF ? 'V' : '-',
+                cc & PSR_CARRY ? 'C' : '-');
+}
+
+#ifdef TARGET_SPARC64
+#define REGS_PER_LINE 4
+#else
+#define REGS_PER_LINE 8
+#endif
+
+void sparc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+                          int flags)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    int i, x;
+
+    cpu_fprintf(f, "pc: " TARGET_FMT_lx "  npc: " TARGET_FMT_lx "\n", env->pc,
+                env->npc);
+
+    for (i = 0; i < 8; i++) {
+        if (i % REGS_PER_LINE == 0) {
+            cpu_fprintf(f, "%%g%d-%d:", i, i + REGS_PER_LINE - 1);
+        }
+        cpu_fprintf(f, " " TARGET_FMT_lx, env->gregs[i]);
+        if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+            cpu_fprintf(f, "\n");
+        }
+    }
+    for (x = 0; x < 3; x++) {
+        for (i = 0; i < 8; i++) {
+            if (i % REGS_PER_LINE == 0) {
+                cpu_fprintf(f, "%%%c%d-%d: ",
+                            x == 0 ? 'o' : (x == 1 ? 'l' : 'i'),
+                            i, i + REGS_PER_LINE - 1);
+            }
+            cpu_fprintf(f, TARGET_FMT_lx " ", env->regwptr[i + x * 8]);
+            if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+                cpu_fprintf(f, "\n");
+            }
+        }
+    }
+
+    for (i = 0; i < TARGET_DPREGS; i++) {
+        if ((i & 3) == 0) {
+            cpu_fprintf(f, "%%f%02d: ", i * 2);
+        }
+        cpu_fprintf(f, " %016" PRIx64, env->fpr[i].ll);
+        if ((i & 3) == 3) {
+            cpu_fprintf(f, "\n");
+        }
+    }
+#ifdef TARGET_SPARC64
+    cpu_fprintf(f, "pstate: %08x ccr: %02x (icc: ", env->pstate,
+                (unsigned)cpu_get_ccr(env));
+    cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << PSR_CARRY_SHIFT);
+    cpu_fprintf(f, " xcc: ");
+    cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << (PSR_CARRY_SHIFT - 4));
+    cpu_fprintf(f, ") asi: %02x tl: %d pil: %x\n", env->asi, env->tl,
+                env->psrpil);
+    cpu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate: %d "
+                "cleanwin: %d cwp: %d\n",
+                env->cansave, env->canrestore, env->otherwin, env->wstate,
+                env->cleanwin, env->nwindows - 1 - env->cwp);
+    cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: "
+                TARGET_FMT_lx "\n", env->fsr, env->y, env->fprs);
+#else
+    cpu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env));
+    cpu_print_cc(f, cpu_fprintf, cpu_get_psr(env));
+    cpu_fprintf(f, " SPE: %c%c%c) wim: %08x\n", env->psrs ? 'S' : '-',
+                env->psrps ? 'P' : '-', env->psret ? 'E' : '-',
+                env->wim);
+    cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx "\n",
+                env->fsr, env->y);
+#endif
+    cpu_fprintf(f, "\n");
+}
+
+static void sparc_cpu_set_pc(CPUState *cs, vaddr value)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+
+    cpu->env.pc = value;
+    cpu->env.npc = value + 4;
+}
+
+static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+
+    cpu->env.pc = tb->pc;
+    cpu->env.npc = tb->cs_base;
+}
+
+static bool sparc_cpu_has_work(CPUState *cs)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+
+    return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+           cpu_interrupts_enabled(env);
+}
+
+static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+    CPUState *cs = CPU(dev);
+    SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(dev);
+    Error *local_err = NULL;
+#if defined(CONFIG_USER_ONLY)
+    SPARCCPU *cpu = SPARC_CPU(dev);
+    CPUSPARCState *env = &cpu->env;
+
+    if ((env->def->features & CPU_FEATURE_FLOAT)) {
+        env->def->features |= CPU_FEATURE_FLOAT128;
+    }
+#endif
+
+    cpu_exec_realizefn(cs, &local_err);
+    if (local_err != NULL) {
+        error_propagate(errp, local_err);
+        return;
+    }
+
+    qemu_init_vcpu(cs);
+
+    scc->parent_realize(dev, errp);
+}
+
+static void sparc_cpu_initfn(Object *obj)
+{
+    CPUState *cs = CPU(obj);
+    SPARCCPU *cpu = SPARC_CPU(obj);
+    CPUSPARCState *env = &cpu->env;
+
+    cs->env_ptr = env;
+
+    if (tcg_enabled()) {
+        gen_intermediate_code_init(env);
+    }
+}
+
+static void sparc_cpu_uninitfn(Object *obj)
+{
+    SPARCCPU *cpu = SPARC_CPU(obj);
+    CPUSPARCState *env = &cpu->env;
+
+    g_free(env->def);
+}
+
+static void sparc_cpu_class_init(ObjectClass *oc, void *data)
+{
+    SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
+    CPUClass *cc = CPU_CLASS(oc);
+    DeviceClass *dc = DEVICE_CLASS(oc);
+
+    scc->parent_realize = dc->realize;
+    dc->realize = sparc_cpu_realizefn;
+
+    scc->parent_reset = cc->reset;
+    cc->reset = sparc_cpu_reset;
+
+    cc->has_work = sparc_cpu_has_work;
+    cc->do_interrupt = sparc_cpu_do_interrupt;
+    cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt;
+    cc->dump_state = sparc_cpu_dump_state;
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+    cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
+#endif
+    cc->set_pc = sparc_cpu_set_pc;
+    cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
+    cc->gdb_read_register = sparc_cpu_gdb_read_register;
+    cc->gdb_write_register = sparc_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+    cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault;
+#else
+    cc->do_unassigned_access = sparc_cpu_unassigned_access;
+    cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
+    cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
+    cc->vmsd = &vmstate_sparc_cpu;
+#endif
+    cc->disas_set_info = cpu_sparc_disas_set_info;
+
+#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
+    cc->gdb_num_core_regs = 86;
+#else
+    cc->gdb_num_core_regs = 72;
+#endif
+}
+
+static const TypeInfo sparc_cpu_type_info = {
+    .name = TYPE_SPARC_CPU,
+    .parent = TYPE_CPU,
+    .instance_size = sizeof(SPARCCPU),
+    .instance_init = sparc_cpu_initfn,
+    .instance_finalize = sparc_cpu_uninitfn,
+    .abstract = false,
+    .class_size = sizeof(SPARCCPUClass),
+    .class_init = sparc_cpu_class_init,
+};
+
+static void sparc_cpu_register_types(void)
+{
+    type_register_static(&sparc_cpu_type_info);
+}
+
+type_init(sparc_cpu_register_types)
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
new file mode 100644
index 0000000000..5fb0ed1aad
--- /dev/null
+++ b/target/sparc/cpu.h
@@ -0,0 +1,779 @@
+#ifndef SPARC_CPU_H
+#define SPARC_CPU_H
+
+#include "qemu-common.h"
+#include "qemu/bswap.h"
+#include "cpu-qom.h"
+
+#define ALIGNED_ONLY
+
+#if !defined(TARGET_SPARC64)
+#define TARGET_LONG_BITS 32
+#define TARGET_DPREGS 16
+#define TARGET_PAGE_BITS 12 /* 4k */
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#else
+#define TARGET_LONG_BITS 64
+#define TARGET_DPREGS 32
+#define TARGET_PAGE_BITS 13 /* 8k */
+#define TARGET_PHYS_ADDR_SPACE_BITS 41
+# ifdef TARGET_ABI32
+#  define TARGET_VIRT_ADDR_SPACE_BITS 32
+# else
+#  define TARGET_VIRT_ADDR_SPACE_BITS 44
+# endif
+#endif
+
+#define CPUArchState struct CPUSPARCState
+
+#include "exec/cpu-defs.h"
+
+#include "fpu/softfloat.h"
+
+/*#define EXCP_INTERRUPT 0x100*/
+
+/* trap definitions */
+#ifndef TARGET_SPARC64
+#define TT_TFAULT   0x01
+#define TT_ILL_INSN 0x02
+#define TT_PRIV_INSN 0x03
+#define TT_NFPU_INSN 0x04
+#define TT_WIN_OVF  0x05
+#define TT_WIN_UNF  0x06
+#define TT_UNALIGNED 0x07
+#define TT_FP_EXCP  0x08
+#define TT_DFAULT   0x09
+#define TT_TOVF     0x0a
+#define TT_EXTINT   0x10
+#define TT_CODE_ACCESS 0x21
+#define TT_UNIMP_FLUSH 0x25
+#define TT_DATA_ACCESS 0x29
+#define TT_DIV_ZERO 0x2a
+#define TT_NCP_INSN 0x24
+#define TT_TRAP     0x80
+#else
+#define TT_POWER_ON_RESET 0x01
+#define TT_TFAULT   0x08
+#define TT_CODE_ACCESS 0x0a
+#define TT_ILL_INSN 0x10
+#define TT_UNIMP_FLUSH TT_ILL_INSN
+#define TT_PRIV_INSN 0x11
+#define TT_NFPU_INSN 0x20
+#define TT_FP_EXCP  0x21
+#define TT_TOVF     0x23
+#define TT_CLRWIN   0x24
+#define TT_DIV_ZERO 0x28
+#define TT_DFAULT   0x30
+#define TT_DATA_ACCESS 0x32
+#define TT_UNALIGNED 0x34
+#define TT_PRIV_ACT 0x37
+#define TT_EXTINT   0x40
+#define TT_IVEC     0x60
+#define TT_TMISS    0x64
+#define TT_DMISS    0x68
+#define TT_DPROT    0x6c
+#define TT_SPILL    0x80
+#define TT_FILL     0xc0
+#define TT_WOTHER   (1 << 5)
+#define TT_TRAP     0x100
+#endif
+
+#define PSR_NEG_SHIFT 23
+#define PSR_NEG   (1 << PSR_NEG_SHIFT)
+#define PSR_ZERO_SHIFT 22
+#define PSR_ZERO  (1 << PSR_ZERO_SHIFT)
+#define PSR_OVF_SHIFT 21
+#define PSR_OVF   (1 << PSR_OVF_SHIFT)
+#define PSR_CARRY_SHIFT 20
+#define PSR_CARRY (1 << PSR_CARRY_SHIFT)
+#define PSR_ICC   (PSR_NEG|PSR_ZERO|PSR_OVF|PSR_CARRY)
+#if !defined(TARGET_SPARC64)
+#define PSR_EF    (1<<12)
+#define PSR_PIL   0xf00
+#define PSR_S     (1<<7)
+#define PSR_PS    (1<<6)
+#define PSR_ET    (1<<5)
+#define PSR_CWP   0x1f
+#endif
+
+#define CC_SRC (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_DST (env->cc_dst)
+#define CC_OP  (env->cc_op)
+
+/* Even though lazy evaluation of CPU condition codes tends to be less
+ * important on RISC systems where condition codes are only updated
+ * when explicitly requested, SPARC uses it to update 32-bit and 64-bit
+ * condition codes.
+ */
+enum {
+    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+    CC_OP_FLAGS,   /* all cc are back in status register */
+    CC_OP_DIV,     /* modify N, Z and V, C = 0*/
+    CC_OP_ADD,     /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_ADDX,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_TADD,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_TADDTV,  /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+    CC_OP_SUB,     /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_SUBX,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_TSUB,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
+    CC_OP_TSUBTV,  /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+    CC_OP_LOGIC,   /* modify N and Z, C = V = 0, CC_DST = res */
+    CC_OP_NB,
+};
+
+/* Trap base register */
+#define TBR_BASE_MASK 0xfffff000
+
+#if defined(TARGET_SPARC64)
+#define PS_TCT   (1<<12) /* UA2007, impl.dep. trap on control transfer */
+#define PS_IG    (1<<11) /* v9, zero on UA2007 */
+#define PS_MG    (1<<10) /* v9, zero on UA2007 */
+#define PS_CLE   (1<<9) /* UA2007 */
+#define PS_TLE   (1<<8) /* UA2007 */
+#define PS_RMO   (1<<7)
+#define PS_RED   (1<<5) /* v9, zero on UA2007 */
+#define PS_PEF   (1<<4) /* enable fpu */
+#define PS_AM    (1<<3) /* address mask */
+#define PS_PRIV  (1<<2)
+#define PS_IE    (1<<1)
+#define PS_AG    (1<<0) /* v9, zero on UA2007 */
+
+#define FPRS_FEF (1<<2)
+
+#define HS_PRIV  (1<<2)
+#endif
+
+/* Fcc */
+#define FSR_RD1        (1ULL << 31)
+#define FSR_RD0        (1ULL << 30)
+#define FSR_RD_MASK    (FSR_RD1 | FSR_RD0)
+#define FSR_RD_NEAREST 0
+#define FSR_RD_ZERO    FSR_RD0
+#define FSR_RD_POS     FSR_RD1
+#define FSR_RD_NEG     (FSR_RD1 | FSR_RD0)
+
+#define FSR_NVM   (1ULL << 27)
+#define FSR_OFM   (1ULL << 26)
+#define FSR_UFM   (1ULL << 25)
+#define FSR_DZM   (1ULL << 24)
+#define FSR_NXM   (1ULL << 23)
+#define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM)
+
+#define FSR_NVA   (1ULL << 9)
+#define FSR_OFA   (1ULL << 8)
+#define FSR_UFA   (1ULL << 7)
+#define FSR_DZA   (1ULL << 6)
+#define FSR_NXA   (1ULL << 5)
+#define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+#define FSR_NVC   (1ULL << 4)
+#define FSR_OFC   (1ULL << 3)
+#define FSR_UFC   (1ULL << 2)
+#define FSR_DZC   (1ULL << 1)
+#define FSR_NXC   (1ULL << 0)
+#define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC)
+
+#define FSR_FTT2   (1ULL << 16)
+#define FSR_FTT1   (1ULL << 15)
+#define FSR_FTT0   (1ULL << 14)
+//gcc warns about constant overflow for ~FSR_FTT_MASK
+//#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0)
+#ifdef TARGET_SPARC64
+#define FSR_FTT_NMASK      0xfffffffffffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL
+#define FSR_LDFSR_OLDMASK  0x0000003f000fc000ULL
+#define FSR_LDXFSR_MASK    0x0000003fcfc00fffULL
+#define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL
+#else
+#define FSR_FTT_NMASK      0xfffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL
+#define FSR_LDFSR_OLDMASK  0x000fc000ULL
+#endif
+#define FSR_LDFSR_MASK     0xcfc00fffULL
+#define FSR_FTT_IEEE_EXCP (1ULL << 14)
+#define FSR_FTT_UNIMPFPOP (3ULL << 14)
+#define FSR_FTT_SEQ_ERROR (4ULL << 14)
+#define FSR_FTT_INVAL_FPR (6ULL << 14)
+
+#define FSR_FCC1_SHIFT 11
+#define FSR_FCC1  (1ULL << FSR_FCC1_SHIFT)
+#define FSR_FCC0_SHIFT 10
+#define FSR_FCC0  (1ULL << FSR_FCC0_SHIFT)
+
+/* MMU */
+#define MMU_E     (1<<0)
+#define MMU_NF    (1<<1)
+
+#define PTE_ENTRYTYPE_MASK 3
+#define PTE_ACCESS_MASK    0x1c
+#define PTE_ACCESS_SHIFT   2
+#define PTE_PPN_SHIFT      7
+#define PTE_ADDR_MASK      0xffffff00
+
+#define PG_ACCESSED_BIT 5
+#define PG_MODIFIED_BIT 6
+#define PG_CACHE_BIT    7
+
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT)
+#define PG_CACHE_MASK    (1 << PG_CACHE_BIT)
+
+/* 3 <= NWINDOWS <= 32. */
+#define MIN_NWINDOWS 3
+#define MAX_NWINDOWS 32
+
+#if !defined(TARGET_SPARC64)
+#define NB_MMU_MODES 3
+#else
+#define NB_MMU_MODES 7
+typedef struct trap_state {
+    uint64_t tpc;
+    uint64_t tnpc;
+    uint64_t tstate;
+    uint32_t tt;
+} trap_state;
+#endif
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+typedef struct sparc_def_t {
+    const char *name;
+    target_ulong iu_version;
+    uint32_t fpu_version;
+    uint32_t mmu_version;
+    uint32_t mmu_bm;
+    uint32_t mmu_ctpr_mask;
+    uint32_t mmu_cxr_mask;
+    uint32_t mmu_sfsr_mask;
+    uint32_t mmu_trcr_mask;
+    uint32_t mxcc_version;
+    uint32_t features;
+    uint32_t nwindows;
+    uint32_t maxtl;
+} sparc_def_t;
+
+#define CPU_FEATURE_FLOAT        (1 << 0)
+#define CPU_FEATURE_FLOAT128     (1 << 1)
+#define CPU_FEATURE_SWAP         (1 << 2)
+#define CPU_FEATURE_MUL          (1 << 3)
+#define CPU_FEATURE_DIV          (1 << 4)
+#define CPU_FEATURE_FLUSH        (1 << 5)
+#define CPU_FEATURE_FSQRT        (1 << 6)
+#define CPU_FEATURE_FMUL         (1 << 7)
+#define CPU_FEATURE_VIS1         (1 << 8)
+#define CPU_FEATURE_VIS2         (1 << 9)
+#define CPU_FEATURE_FSMULD       (1 << 10)
+#define CPU_FEATURE_HYPV         (1 << 11)
+#define CPU_FEATURE_CMT          (1 << 12)
+#define CPU_FEATURE_GL           (1 << 13)
+#define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */
+#define CPU_FEATURE_ASR17        (1 << 15)
+#define CPU_FEATURE_CACHE_CTRL   (1 << 16)
+#define CPU_FEATURE_POWERDOWN    (1 << 17)
+#define CPU_FEATURE_CASA         (1 << 18)
+
+#ifndef TARGET_SPARC64
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP |  \
+                              CPU_FEATURE_MUL | CPU_FEATURE_DIV |     \
+                              CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+                              CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD)
+#else
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP |  \
+                              CPU_FEATURE_MUL | CPU_FEATURE_DIV |     \
+                              CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+                              CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 |   \
+                              CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \
+                              CPU_FEATURE_CASA)
+enum {
+    mmu_us_12, // Ultrasparc < III (64 entry TLB)
+    mmu_us_3,  // Ultrasparc III (512 entry TLB)
+    mmu_us_4,  // Ultrasparc IV (several TLBs, 32 and 256MB pages)
+    mmu_sun4v, // T1, T2
+};
+#endif
+
+#define TTE_VALID_BIT       (1ULL << 63)
+#define TTE_NFO_BIT         (1ULL << 60)
+#define TTE_USED_BIT        (1ULL << 41)
+#define TTE_LOCKED_BIT      (1ULL <<  6)
+#define TTE_SIDEEFFECT_BIT  (1ULL <<  3)
+#define TTE_PRIV_BIT        (1ULL <<  2)
+#define TTE_W_OK_BIT        (1ULL <<  1)
+#define TTE_GLOBAL_BIT      (1ULL <<  0)
+
+#define TTE_IS_VALID(tte)   ((tte) & TTE_VALID_BIT)
+#define TTE_IS_NFO(tte)     ((tte) & TTE_NFO_BIT)
+#define TTE_IS_USED(tte)    ((tte) & TTE_USED_BIT)
+#define TTE_IS_LOCKED(tte)  ((tte) & TTE_LOCKED_BIT)
+#define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT)
+#define TTE_IS_PRIV(tte)    ((tte) & TTE_PRIV_BIT)
+#define TTE_IS_W_OK(tte)    ((tte) & TTE_W_OK_BIT)
+#define TTE_IS_GLOBAL(tte)  ((tte) & TTE_GLOBAL_BIT)
+
+#define TTE_SET_USED(tte)   ((tte) |= TTE_USED_BIT)
+#define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT)
+
+#define TTE_PGSIZE(tte)     (((tte) >> 61) & 3ULL)
+#define TTE_PA(tte)         ((tte) & 0x1ffffffe000ULL)
+
+#define SFSR_NF_BIT         (1ULL << 24)   /* JPS1 NoFault */
+#define SFSR_TM_BIT         (1ULL << 15)   /* JPS1 TLB Miss */
+#define SFSR_FT_VA_IMMU_BIT (1ULL << 13)   /* USIIi VA out of range (IMMU) */
+#define SFSR_FT_VA_DMMU_BIT (1ULL << 12)   /* USIIi VA out of range (DMMU) */
+#define SFSR_FT_NFO_BIT     (1ULL << 11)   /* NFO page access */
+#define SFSR_FT_ILL_BIT     (1ULL << 10)   /* illegal LDA/STA ASI */
+#define SFSR_FT_ATOMIC_BIT  (1ULL <<  9)   /* atomic op on noncacheable area */
+#define SFSR_FT_NF_E_BIT    (1ULL <<  8)   /* NF access on side effect area */
+#define SFSR_FT_PRIV_BIT    (1ULL <<  7)   /* privilege violation */
+#define SFSR_PR_BIT         (1ULL <<  3)   /* privilege mode */
+#define SFSR_WRITE_BIT      (1ULL <<  2)   /* write access mode */
+#define SFSR_OW_BIT         (1ULL <<  1)   /* status overwritten */
+#define SFSR_VALID_BIT      (1ULL <<  0)   /* status valid */
+
+#define SFSR_ASI_SHIFT      16             /* 23:16 ASI value */
+#define SFSR_ASI_MASK       (0xffULL << SFSR_ASI_SHIFT)
+#define SFSR_CT_PRIMARY     (0ULL <<  4)   /* 5:4 context type */
+#define SFSR_CT_SECONDARY   (1ULL <<  4)
+#define SFSR_CT_NUCLEUS     (2ULL <<  4)
+#define SFSR_CT_NOTRANS     (3ULL <<  4)
+#define SFSR_CT_MASK        (3ULL <<  4)
+
+/* Leon3 cache control */
+
+/* Cache control: emulate the behavior of cache control registers but without
+   any effect on the emulated */
+
+#define CACHE_STATE_MASK 0x3
+#define CACHE_DISABLED   0x0
+#define CACHE_FROZEN     0x1
+#define CACHE_ENABLED    0x3
+
+/* Cache Control register fields */
+
+#define CACHE_CTRL_IF (1 <<  4)  /* Instruction Cache Freeze on Interrupt */
+#define CACHE_CTRL_DF (1 <<  5)  /* Data Cache Freeze on Interrupt */
+#define CACHE_CTRL_DP (1 << 14)  /* Data cache flush pending */
+#define CACHE_CTRL_IP (1 << 15)  /* Instruction cache flush pending */
+#define CACHE_CTRL_IB (1 << 16)  /* Instruction burst fetch */
+#define CACHE_CTRL_FI (1 << 21)  /* Flush Instruction cache (Write only) */
+#define CACHE_CTRL_FD (1 << 22)  /* Flush Data cache (Write only) */
+#define CACHE_CTRL_DS (1 << 23)  /* Data cache snoop enable */
+
+typedef struct SparcTLBEntry {
+    uint64_t tag;
+    uint64_t tte;
+} SparcTLBEntry;
+
+struct CPUTimer
+{
+    const char *name;
+    uint32_t    frequency;
+    uint32_t    disabled;
+    uint64_t    disabled_mask;
+    uint32_t    npt;
+    uint64_t    npt_mask;
+    int64_t     clock_offset;
+    QEMUTimer  *qtimer;
+};
+
+typedef struct CPUTimer CPUTimer;
+
+typedef struct CPUSPARCState CPUSPARCState;
+
+struct CPUSPARCState {
+    target_ulong gregs[8]; /* general registers */
+    target_ulong *regwptr; /* pointer to current register window */
+    target_ulong pc;       /* program counter */
+    target_ulong npc;      /* next program counter */
+    target_ulong y;        /* multiply/divide register */
+
+    /* emulator internal flags handling */
+    target_ulong cc_src, cc_src2;
+    target_ulong cc_dst;
+    uint32_t cc_op;
+
+    target_ulong cond; /* conditional branch result (XXX: save it in a
+                          temporary register when possible) */
+
+    uint32_t psr;      /* processor state register */
+    target_ulong fsr;      /* FPU state register */
+    CPU_DoubleU fpr[TARGET_DPREGS];  /* floating point registers */
+    uint32_t cwp;      /* index of current register window (extracted
+                          from PSR) */
+#if !defined(TARGET_SPARC64) || defined(TARGET_ABI32)
+    uint32_t wim;      /* window invalid mask */
+#endif
+    target_ulong tbr;  /* trap base register */
+#if !defined(TARGET_SPARC64)
+    int      psrs;     /* supervisor mode (extracted from PSR) */
+    int      psrps;    /* previous supervisor mode */
+    int      psret;    /* enable traps */
+#endif
+    uint32_t psrpil;   /* interrupt blocking level */
+    uint32_t pil_in;   /* incoming interrupt level bitmap */
+#if !defined(TARGET_SPARC64)
+    int      psref;    /* enable fpu */
+#endif
+    int interrupt_index;
+    /* NOTE: we allow 8 more registers to handle wrapping */
+    target_ulong regbase[MAX_NWINDOWS * 16 + 8];
+
+    CPU_COMMON
+
+    /* Fields from here on are preserved across CPU reset. */
+    target_ulong version;
+    uint32_t nwindows;
+
+    /* MMU regs */
+#if defined(TARGET_SPARC64)
+    uint64_t lsu;
+#define DMMU_E 0x8
+#define IMMU_E 0x4
+    //typedef struct SparcMMU
+    union {
+        uint64_t immuregs[16];
+        struct {
+            uint64_t tsb_tag_target;
+            uint64_t unused_mmu_primary_context;   // use DMMU
+            uint64_t unused_mmu_secondary_context; // use DMMU
+            uint64_t sfsr;
+            uint64_t sfar;
+            uint64_t tsb;
+            uint64_t tag_access;
+        } immu;
+    };
+    union {
+        uint64_t dmmuregs[16];
+        struct {
+            uint64_t tsb_tag_target;
+            uint64_t mmu_primary_context;
+            uint64_t mmu_secondary_context;
+            uint64_t sfsr;
+            uint64_t sfar;
+            uint64_t tsb;
+            uint64_t tag_access;
+        } dmmu;
+    };
+    SparcTLBEntry itlb[64];
+    SparcTLBEntry dtlb[64];
+    uint32_t mmu_version;
+#else
+    uint32_t mmuregs[32];
+    uint64_t mxccdata[4];
+    uint64_t mxccregs[8];
+    uint32_t mmubpctrv, mmubpctrc, mmubpctrs;
+    uint64_t mmubpaction;
+    uint64_t mmubpregs[4];
+    uint64_t prom_addr;
+#endif
+    /* temporary float registers */
+    float128 qt0, qt1;
+    float_status fp_status;
+#if defined(TARGET_SPARC64)
+#define MAXTL_MAX 8
+#define MAXTL_MASK (MAXTL_MAX - 1)
+    trap_state ts[MAXTL_MAX];
+    uint32_t xcc;               /* Extended integer condition codes */
+    uint32_t asi;
+    uint32_t pstate;
+    uint32_t tl;
+    uint32_t maxtl;
+    uint32_t cansave, canrestore, otherwin, wstate, cleanwin;
+    uint64_t agregs[8]; /* alternate general registers */
+    uint64_t bgregs[8]; /* backup for normal global registers */
+    uint64_t igregs[8]; /* interrupt general registers */
+    uint64_t mgregs[8]; /* mmu general registers */
+    uint64_t fprs;
+    uint64_t tick_cmpr, stick_cmpr;
+    CPUTimer *tick, *stick;
+#define TICK_NPT_MASK        0x8000000000000000ULL
+#define TICK_INT_DIS         0x8000000000000000ULL
+    uint64_t gsr;
+    uint32_t gl; // UA2005
+    /* UA 2005 hyperprivileged registers */
+    uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr;
+    CPUTimer *hstick; // UA 2005
+    /* Interrupt vector registers */
+    uint64_t ivec_status;
+    uint64_t ivec_data[3];
+    uint32_t softint;
+#define SOFTINT_TIMER   1
+#define SOFTINT_STIMER  (1 << 16)
+#define SOFTINT_INTRMASK (0xFFFE)
+#define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER)
+#endif
+    sparc_def_t *def;
+
+    void *irq_manager;
+    void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno);
+
+    /* Leon3 cache control */
+    uint32_t cache_control;
+};
+
+/**
+ * SPARCCPU:
+ * @env: #CPUSPARCState
+ *
+ * A SPARC CPU.
+ */
+struct SPARCCPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+
+    CPUSPARCState env;
+};
+
+static inline SPARCCPU *sparc_env_get_cpu(CPUSPARCState *env)
+{
+    return container_of(env, SPARCCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(sparc_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(SPARCCPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_sparc_cpu;
+#endif
+
+void sparc_cpu_do_interrupt(CPUState *cpu);
+void sparc_cpu_dump_state(CPUState *cpu, FILE *f,
+                          fprintf_function cpu_fprintf, int flags);
+hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+                                                 MMUAccessType access_type,
+                                                 int mmu_idx,
+                                                 uintptr_t retaddr);
+void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN;
+
+#ifndef NO_CPU_IO_DEFS
+/* cpu_init.c */
+SPARCCPU *cpu_sparc_init(const char *cpu_model);
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
+void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+/* mmu_helper.c */
+int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+                               int mmu_idx);
+target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env);
+
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+                              uint8_t *buf, int len, bool is_write);
+#endif
+
+
+/* translate.c */
+void gen_intermediate_code_init(CPUSPARCState *env);
+
+/* cpu-exec.c */
+
+/* win_helper.c */
+target_ulong cpu_get_psr(CPUSPARCState *env1);
+void cpu_put_psr(CPUSPARCState *env1, target_ulong val);
+void cpu_put_psr_raw(CPUSPARCState *env1, target_ulong val);
+#ifdef TARGET_SPARC64
+target_ulong cpu_get_ccr(CPUSPARCState *env1);
+void cpu_put_ccr(CPUSPARCState *env1, target_ulong val);
+target_ulong cpu_get_cwp64(CPUSPARCState *env1);
+void cpu_put_cwp64(CPUSPARCState *env1, int cwp);
+void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate);
+#endif
+int cpu_cwp_inc(CPUSPARCState *env1, int cwp);
+int cpu_cwp_dec(CPUSPARCState *env1, int cwp);
+void cpu_set_cwp(CPUSPARCState *env1, int new_cwp);
+
+/* int_helper.c */
+void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno);
+
+/* sun4m.c, sun4u.c */
+void cpu_check_irqs(CPUSPARCState *env);
+
+/* leon3.c */
+void leon3_irq_ack(void *irq_manager, int intno);
+
+#if defined (TARGET_SPARC64)
+
+static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask)
+{
+    return (x & mask) == (y & mask);
+}
+
+#define MMU_CONTEXT_BITS 13
+#define MMU_CONTEXT_MASK ((1 << MMU_CONTEXT_BITS) - 1)
+
+static inline int tlb_compare_context(const SparcTLBEntry *tlb,
+                                      uint64_t context)
+{
+    return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK);
+}
+
+#endif
+#endif
+
+/* cpu-exec.c */
+#if !defined(CONFIG_USER_ONLY)
+void sparc_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
+                                 bool is_write, bool is_exec, int is_asi,
+                                 unsigned size);
+#if defined(TARGET_SPARC64)
+hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
+                                           int mmu_idx);
+#endif
+#endif
+int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#ifndef NO_CPU_IO_DEFS
+#define cpu_init(cpu_model) CPU(cpu_sparc_init(cpu_model))
+#endif
+
+#define cpu_signal_handler cpu_sparc_signal_handler
+#define cpu_list sparc_cpu_list
+
+/* MMU modes definitions */
+#if defined (TARGET_SPARC64)
+#define MMU_USER_IDX   0
+#define MMU_USER_SECONDARY_IDX   1
+#define MMU_KERNEL_IDX 2
+#define MMU_KERNEL_SECONDARY_IDX 3
+#define MMU_NUCLEUS_IDX 4
+#define MMU_HYPV_IDX   5
+#define MMU_PHYS_IDX   6
+#else
+#define MMU_USER_IDX   0
+#define MMU_KERNEL_IDX 1
+#define MMU_PHYS_IDX   2
+#endif
+
+#if defined (TARGET_SPARC64)
+static inline int cpu_has_hypervisor(CPUSPARCState *env1)
+{
+    return env1->def->features & CPU_FEATURE_HYPV;
+}
+
+static inline int cpu_hypervisor_mode(CPUSPARCState *env1)
+{
+    return cpu_has_hypervisor(env1) && (env1->hpstate & HS_PRIV);
+}
+
+static inline int cpu_supervisor_mode(CPUSPARCState *env1)
+{
+    return env1->pstate & PS_PRIV;
+}
+#endif
+
+static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
+{
+#if defined(CONFIG_USER_ONLY)
+    return MMU_USER_IDX;
+#elif !defined(TARGET_SPARC64)
+    if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
+        return MMU_PHYS_IDX;
+    } else {
+        return env->psrs;
+    }
+#else
+    /* IMMU or DMMU disabled.  */
+    if (ifetch
+        ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0
+        : (env->lsu & DMMU_E) == 0) {
+        return MMU_PHYS_IDX;
+    } else if (env->tl > 0) {
+        return MMU_NUCLEUS_IDX;
+    } else if (cpu_hypervisor_mode(env)) {
+        return MMU_HYPV_IDX;
+    } else if (cpu_supervisor_mode(env)) {
+        return MMU_KERNEL_IDX;
+    } else {
+        return MMU_USER_IDX;
+    }
+#endif
+}
+
+static inline int cpu_interrupts_enabled(CPUSPARCState *env1)
+{
+#if !defined (TARGET_SPARC64)
+    if (env1->psret != 0)
+        return 1;
+#else
+    if (env1->pstate & PS_IE)
+        return 1;
+#endif
+
+    return 0;
+}
+
+static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil)
+{
+#if !defined(TARGET_SPARC64)
+    /* level 15 is non-maskable on sparc v8 */
+    return pil == 15 || pil > env1->psrpil;
+#else
+    return pil > env1->psrpil;
+#endif
+}
+
+#include "exec/cpu-all.h"
+
+#ifdef TARGET_SPARC64
+/* sun4u.c */
+void cpu_tick_set_count(CPUTimer *timer, uint64_t count);
+uint64_t cpu_tick_get_count(CPUTimer *timer);
+void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit);
+trap_state* cpu_tsptr(CPUSPARCState* env);
+#endif
+
+#define TB_FLAG_MMU_MASK     7
+#define TB_FLAG_FPU_ENABLED  (1 << 4)
+#define TB_FLAG_AM_ENABLED   (1 << 5)
+#define TB_FLAG_ASI_SHIFT    24
+
+static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
+                                        target_ulong *cs_base, uint32_t *pflags)
+{
+    uint32_t flags;
+    *pc = env->pc;
+    *cs_base = env->npc;
+    flags = cpu_mmu_index(env, false);
+#ifdef TARGET_SPARC64
+    if (env->pstate & PS_AM) {
+        flags |= TB_FLAG_AM_ENABLED;
+    }
+    if ((env->def->features & CPU_FEATURE_FLOAT)
+        && (env->pstate & PS_PEF)
+        && (env->fprs & FPRS_FEF)) {
+        flags |= TB_FLAG_FPU_ENABLED;
+    }
+    flags |= env->asi << TB_FLAG_ASI_SHIFT;
+#else
+    if ((env->def->features & CPU_FEATURE_FLOAT) && env->psref) {
+        flags |= TB_FLAG_FPU_ENABLED;
+    }
+#endif
+    *pflags = flags;
+}
+
+static inline bool tb_fpu_enabled(int tb_flags)
+{
+#if defined(CONFIG_USER_ONLY)
+    return true;
+#else
+    return tb_flags & TB_FLAG_FPU_ENABLED;
+#endif
+}
+
+static inline bool tb_am_enabled(int tb_flags)
+{
+#ifndef TARGET_SPARC64
+    return false;
+#else
+    return tb_flags & TB_FLAG_AM_ENABLED;
+#endif
+}
+
+#endif
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
new file mode 100644
index 0000000000..c7fb176e4c
--- /dev/null
+++ b/target/sparc/fop_helper.c
@@ -0,0 +1,400 @@
+/*
+ * FPU op helpers
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#define QT0 (env->qt0)
+#define QT1 (env->qt1)
+
+static target_ulong do_check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra)
+{
+    target_ulong status = get_float_exception_flags(&env->fp_status);
+    target_ulong fsr = env->fsr;
+
+    if (unlikely(status)) {
+        /* Keep exception flags clear for next time.  */
+        set_float_exception_flags(0, &env->fp_status);
+
+        /* Copy IEEE 754 flags into FSR */
+        if (status & float_flag_invalid) {
+            fsr |= FSR_NVC;
+        }
+        if (status & float_flag_overflow) {
+            fsr |= FSR_OFC;
+        }
+        if (status & float_flag_underflow) {
+            fsr |= FSR_UFC;
+        }
+        if (status & float_flag_divbyzero) {
+            fsr |= FSR_DZC;
+        }
+        if (status & float_flag_inexact) {
+            fsr |= FSR_NXC;
+        }
+
+        if ((fsr & FSR_CEXC_MASK) & ((fsr & FSR_TEM_MASK) >> 23)) {
+            CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+            /* Unmasked exception, generate a trap.  Note that while
+               the helper is marked as NO_WG, we can get away with
+               writing to cpu state along the exception path, since
+               TCG generated code will never see the write.  */
+            env->fsr = fsr | FSR_FTT_IEEE_EXCP;
+            cs->exception_index = TT_FP_EXCP;
+            cpu_loop_exit_restore(cs, ra);
+        } else {
+            /* Accumulate exceptions */
+            fsr |= (fsr & FSR_CEXC_MASK) << 5;
+        }
+    }
+
+    return fsr;
+}
+
+target_ulong helper_check_ieee_exceptions(CPUSPARCState *env)
+{
+    return do_check_ieee_exceptions(env, GETPC());
+}
+
+#define F_HELPER(name, p) void helper_f##name##p(CPUSPARCState *env)
+
+#define F_BINOP(name)                                           \
+    float32 helper_f ## name ## s (CPUSPARCState *env, float32 src1, \
+                                   float32 src2)                \
+    {                                                           \
+        return float32_ ## name (src1, src2, &env->fp_status);  \
+    }                                                           \
+    float64 helper_f ## name ## d (CPUSPARCState * env, float64 src1,\
+                                   float64 src2)                \
+    {                                                           \
+        return float64_ ## name (src1, src2, &env->fp_status);  \
+    }                                                           \
+    F_HELPER(name, q)                                           \
+    {                                                           \
+        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
+    }
+
+F_BINOP(add);
+F_BINOP(sub);
+F_BINOP(mul);
+F_BINOP(div);
+#undef F_BINOP
+
+float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2)
+{
+    return float64_mul(float32_to_float64(src1, &env->fp_status),
+                       float32_to_float64(src2, &env->fp_status),
+                       &env->fp_status);
+}
+
+void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2)
+{
+    QT0 = float128_mul(float64_to_float128(src1, &env->fp_status),
+                       float64_to_float128(src2, &env->fp_status),
+                       &env->fp_status);
+}
+
+float32 helper_fnegs(float32 src)
+{
+    return float32_chs(src);
+}
+
+#ifdef TARGET_SPARC64
+float64 helper_fnegd(float64 src)
+{
+    return float64_chs(src);
+}
+
+F_HELPER(neg, q)
+{
+    QT0 = float128_chs(QT1);
+}
+#endif
+
+/* Integer to float conversion.  */
+float32 helper_fitos(CPUSPARCState *env, int32_t src)
+{
+    return int32_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fitod(CPUSPARCState *env, int32_t src)
+{
+    return int32_to_float64(src, &env->fp_status);
+}
+
+void helper_fitoq(CPUSPARCState *env, int32_t src)
+{
+    QT0 = int32_to_float128(src, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+float32 helper_fxtos(CPUSPARCState *env, int64_t src)
+{
+    return int64_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fxtod(CPUSPARCState *env, int64_t src)
+{
+    return int64_to_float64(src, &env->fp_status);
+}
+
+void helper_fxtoq(CPUSPARCState *env, int64_t src)
+{
+    QT0 = int64_to_float128(src, &env->fp_status);
+}
+#endif
+#undef F_HELPER
+
+/* floating point conversion */
+float32 helper_fdtos(CPUSPARCState *env, float64 src)
+{
+    return float64_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fstod(CPUSPARCState *env, float32 src)
+{
+    return float32_to_float64(src, &env->fp_status);
+}
+
+float32 helper_fqtos(CPUSPARCState *env)
+{
+    return float128_to_float32(QT1, &env->fp_status);
+}
+
+void helper_fstoq(CPUSPARCState *env, float32 src)
+{
+    QT0 = float32_to_float128(src, &env->fp_status);
+}
+
+float64 helper_fqtod(CPUSPARCState *env)
+{
+    return float128_to_float64(QT1, &env->fp_status);
+}
+
+void helper_fdtoq(CPUSPARCState *env, float64 src)
+{
+    QT0 = float64_to_float128(src, &env->fp_status);
+}
+
+/* Float to integer conversion.  */
+int32_t helper_fstoi(CPUSPARCState *env, float32 src)
+{
+    return float32_to_int32_round_to_zero(src, &env->fp_status);
+}
+
+int32_t helper_fdtoi(CPUSPARCState *env, float64 src)
+{
+    return float64_to_int32_round_to_zero(src, &env->fp_status);
+}
+
+int32_t helper_fqtoi(CPUSPARCState *env)
+{
+    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+int64_t helper_fstox(CPUSPARCState *env, float32 src)
+{
+    return float32_to_int64_round_to_zero(src, &env->fp_status);
+}
+
+int64_t helper_fdtox(CPUSPARCState *env, float64 src)
+{
+    return float64_to_int64_round_to_zero(src, &env->fp_status);
+}
+
+int64_t helper_fqtox(CPUSPARCState *env)
+{
+    return float128_to_int64_round_to_zero(QT1, &env->fp_status);
+}
+#endif
+
+float32 helper_fabss(float32 src)
+{
+    return float32_abs(src);
+}
+
+#ifdef TARGET_SPARC64
+float64 helper_fabsd(float64 src)
+{
+    return float64_abs(src);
+}
+
+void helper_fabsq(CPUSPARCState *env)
+{
+    QT0 = float128_abs(QT1);
+}
+#endif
+
+float32 helper_fsqrts(CPUSPARCState *env, float32 src)
+{
+    return float32_sqrt(src, &env->fp_status);
+}
+
+float64 helper_fsqrtd(CPUSPARCState *env, float64 src)
+{
+    return float64_sqrt(src, &env->fp_status);
+}
+
+void helper_fsqrtq(CPUSPARCState *env)
+{
+    QT0 = float128_sqrt(QT1, &env->fp_status);
+}
+
+#define GEN_FCMP(name, size, reg1, reg2, FS, E)                         \
+    target_ulong glue(helper_, name) (CPUSPARCState *env)               \
+    {                                                                   \
+        int ret;                                                        \
+        target_ulong fsr;                                               \
+        if (E) {                                                        \
+            ret = glue(size, _compare)(reg1, reg2, &env->fp_status);    \
+        } else {                                                        \
+            ret = glue(size, _compare_quiet)(reg1, reg2,                \
+                                             &env->fp_status);          \
+        }                                                               \
+        fsr = do_check_ieee_exceptions(env, GETPC());                   \
+        switch (ret) {                                                  \
+        case float_relation_unordered:                                  \
+            fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                         \
+            fsr |= FSR_NVA;                                             \
+            break;                                                      \
+        case float_relation_less:                                       \
+            fsr &= ~(FSR_FCC1) << FS;                                   \
+            fsr |= FSR_FCC0 << FS;                                      \
+            break;                                                      \
+        case float_relation_greater:                                    \
+            fsr &= ~(FSR_FCC0) << FS;                                   \
+            fsr |= FSR_FCC1 << FS;                                      \
+            break;                                                      \
+        default:                                                        \
+            fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                      \
+            break;                                                      \
+        }                                                               \
+        return fsr;                                                     \
+    }
+#define GEN_FCMP_T(name, size, FS, E)                                   \
+    target_ulong glue(helper_, name)(CPUSPARCState *env, size src1, size src2)\
+    {                                                                   \
+        int ret;                                                        \
+        target_ulong fsr;                                               \
+        if (E) {                                                        \
+            ret = glue(size, _compare)(src1, src2, &env->fp_status);    \
+        } else {                                                        \
+            ret = glue(size, _compare_quiet)(src1, src2,                \
+                                             &env->fp_status);          \
+        }                                                               \
+        fsr = do_check_ieee_exceptions(env, GETPC());                   \
+        switch (ret) {                                                  \
+        case float_relation_unordered:                                  \
+            fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                         \
+            break;                                                      \
+        case float_relation_less:                                       \
+            fsr &= ~(FSR_FCC1 << FS);                                   \
+            fsr |= FSR_FCC0 << FS;                                      \
+            break;                                                      \
+        case float_relation_greater:                                    \
+            fsr &= ~(FSR_FCC0 << FS);                                   \
+            fsr |= FSR_FCC1 << FS;                                      \
+            break;                                                      \
+        default:                                                        \
+            fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                      \
+            break;                                                      \
+        }                                                               \
+        return fsr;                                                     \
+    }
+
+GEN_FCMP_T(fcmps, float32, 0, 0);
+GEN_FCMP_T(fcmpd, float64, 0, 0);
+
+GEN_FCMP_T(fcmpes, float32, 0, 1);
+GEN_FCMP_T(fcmped, float64, 0, 1);
+
+GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
+GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
+
+#ifdef TARGET_SPARC64
+GEN_FCMP_T(fcmps_fcc1, float32, 22, 0);
+GEN_FCMP_T(fcmpd_fcc1, float64, 22, 0);
+GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
+
+GEN_FCMP_T(fcmps_fcc2, float32, 24, 0);
+GEN_FCMP_T(fcmpd_fcc2, float64, 24, 0);
+GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
+
+GEN_FCMP_T(fcmps_fcc3, float32, 26, 0);
+GEN_FCMP_T(fcmpd_fcc3, float64, 26, 0);
+GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
+
+GEN_FCMP_T(fcmpes_fcc1, float32, 22, 1);
+GEN_FCMP_T(fcmped_fcc1, float64, 22, 1);
+GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
+
+GEN_FCMP_T(fcmpes_fcc2, float32, 24, 1);
+GEN_FCMP_T(fcmped_fcc2, float64, 24, 1);
+GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
+
+GEN_FCMP_T(fcmpes_fcc3, float32, 26, 1);
+GEN_FCMP_T(fcmped_fcc3, float64, 26, 1);
+GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
+#endif
+#undef GEN_FCMP_T
+#undef GEN_FCMP
+
+static void set_fsr(CPUSPARCState *env, target_ulong fsr)
+{
+    int rnd_mode;
+
+    switch (fsr & FSR_RD_MASK) {
+    case FSR_RD_NEAREST:
+        rnd_mode = float_round_nearest_even;
+        break;
+    default:
+    case FSR_RD_ZERO:
+        rnd_mode = float_round_to_zero;
+        break;
+    case FSR_RD_POS:
+        rnd_mode = float_round_up;
+        break;
+    case FSR_RD_NEG:
+        rnd_mode = float_round_down;
+        break;
+    }
+    set_float_rounding_mode(rnd_mode, &env->fp_status);
+}
+
+target_ulong helper_ldfsr(CPUSPARCState *env, target_ulong old_fsr,
+                          uint32_t new_fsr)
+{
+    old_fsr = (new_fsr & FSR_LDFSR_MASK) | (old_fsr & FSR_LDFSR_OLDMASK);
+    set_fsr(env, old_fsr);
+    return old_fsr;
+}
+
+#ifdef TARGET_SPARC64
+target_ulong helper_ldxfsr(CPUSPARCState *env, target_ulong old_fsr,
+                           uint64_t new_fsr)
+{
+    old_fsr = (new_fsr & FSR_LDXFSR_MASK) | (old_fsr & FSR_LDXFSR_OLDMASK);
+    set_fsr(env, old_fsr);
+    return old_fsr;
+}
+#endif
diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c
new file mode 100644
index 0000000000..ffc2baa2e7
--- /dev/null
+++ b/target/sparc/gdbstub.c
@@ -0,0 +1,209 @@
+/*
+ * SPARC gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+#ifdef TARGET_ABI32
+#define gdb_get_rega(buf, val) gdb_get_reg32(buf, val)
+#else
+#define gdb_get_rega(buf, val) gdb_get_regl(buf, val)
+#endif
+
+int sparc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+
+    if (n < 8) {
+        /* g0..g7 */
+        return gdb_get_rega(mem_buf, env->gregs[n]);
+    }
+    if (n < 32) {
+        /* register window */
+        return gdb_get_rega(mem_buf, env->regwptr[n - 8]);
+    }
+#if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
+    if (n < 64) {
+        /* fprs */
+        if (n & 1) {
+            return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.lower);
+        } else {
+            return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.upper);
+        }
+    }
+    /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+    switch (n) {
+    case 64:
+        return gdb_get_rega(mem_buf, env->y);
+    case 65:
+        return gdb_get_rega(mem_buf, cpu_get_psr(env));
+    case 66:
+        return gdb_get_rega(mem_buf, env->wim);
+    case 67:
+        return gdb_get_rega(mem_buf, env->tbr);
+    case 68:
+        return gdb_get_rega(mem_buf, env->pc);
+    case 69:
+        return gdb_get_rega(mem_buf, env->npc);
+    case 70:
+        return gdb_get_rega(mem_buf, env->fsr);
+    case 71:
+        return gdb_get_rega(mem_buf, 0); /* csr */
+    default:
+        return gdb_get_rega(mem_buf, 0);
+    }
+#else
+    if (n < 64) {
+        /* f0-f31 */
+        if (n & 1) {
+            return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.lower);
+        } else {
+            return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.upper);
+        }
+    }
+    if (n < 80) {
+        /* f32-f62 (double width, even numbers only) */
+        return gdb_get_reg64(mem_buf, env->fpr[(n - 32) / 2].ll);
+    }
+    switch (n) {
+    case 80:
+        return gdb_get_regl(mem_buf, env->pc);
+    case 81:
+        return gdb_get_regl(mem_buf, env->npc);
+    case 82:
+        return gdb_get_regl(mem_buf, (cpu_get_ccr(env) << 32) |
+                                     ((env->asi & 0xff) << 24) |
+                                     ((env->pstate & 0xfff) << 8) |
+                                     cpu_get_cwp64(env));
+    case 83:
+        return gdb_get_regl(mem_buf, env->fsr);
+    case 84:
+        return gdb_get_regl(mem_buf, env->fprs);
+    case 85:
+        return gdb_get_regl(mem_buf, env->y);
+    }
+#endif
+    return 0;
+}
+
+int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+#if defined(TARGET_ABI32)
+    abi_ulong tmp;
+
+    tmp = ldl_p(mem_buf);
+#else
+    target_ulong tmp;
+
+    tmp = ldtul_p(mem_buf);
+#endif
+
+    if (n < 8) {
+        /* g0..g7 */
+        env->gregs[n] = tmp;
+    } else if (n < 32) {
+        /* register window */
+        env->regwptr[n - 8] = tmp;
+    }
+#if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
+    else if (n < 64) {
+        /* fprs */
+        /* f0-f31 */
+        if (n & 1) {
+            env->fpr[(n - 32) / 2].l.lower = tmp;
+        } else {
+            env->fpr[(n - 32) / 2].l.upper = tmp;
+        }
+    } else {
+        /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+        switch (n) {
+        case 64:
+            env->y = tmp;
+            break;
+        case 65:
+            cpu_put_psr(env, tmp);
+            break;
+        case 66:
+            env->wim = tmp;
+            break;
+        case 67:
+            env->tbr = tmp;
+            break;
+        case 68:
+            env->pc = tmp;
+            break;
+        case 69:
+            env->npc = tmp;
+            break;
+        case 70:
+            env->fsr = tmp;
+            break;
+        default:
+            return 0;
+        }
+    }
+    return 4;
+#else
+    else if (n < 64) {
+        /* f0-f31 */
+        tmp = ldl_p(mem_buf);
+        if (n & 1) {
+            env->fpr[(n - 32) / 2].l.lower = tmp;
+        } else {
+            env->fpr[(n - 32) / 2].l.upper = tmp;
+        }
+        return 4;
+    } else if (n < 80) {
+        /* f32-f62 (double width, even numbers only) */
+        env->fpr[(n - 32) / 2].ll = tmp;
+    } else {
+        switch (n) {
+        case 80:
+            env->pc = tmp;
+            break;
+        case 81:
+            env->npc = tmp;
+            break;
+        case 82:
+            cpu_put_ccr(env, tmp >> 32);
+            env->asi = (tmp >> 24) & 0xff;
+            env->pstate = (tmp >> 8) & 0xfff;
+            cpu_put_cwp64(env, tmp & 0xff);
+            break;
+        case 83:
+            env->fsr = tmp;
+            break;
+        case 84:
+            env->fprs = tmp;
+            break;
+        case 85:
+            env->y = tmp;
+            break;
+        default:
+            return 0;
+        }
+    }
+    return 8;
+#endif
+}
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
new file mode 100644
index 0000000000..359b0b15ed
--- /dev/null
+++ b/target/sparc/helper.c
@@ -0,0 +1,257 @@
+/*
+ *  Misc Sparc helpers
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "sysemu/sysemu.h"
+
+void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+    cs->exception_index = tt;
+    cpu_loop_exit_restore(cs, ra);
+}
+
+void helper_raise_exception(CPUSPARCState *env, int tt)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+    cs->exception_index = tt;
+    cpu_loop_exit(cs);
+}
+
+void helper_debug(CPUSPARCState *env)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+    cs->exception_index = EXCP_DEBUG;
+    cpu_loop_exit(cs);
+}
+
+#ifdef TARGET_SPARC64
+target_ulong helper_popc(target_ulong val)
+{
+    return ctpop64(val);
+}
+
+void helper_tick_set_count(void *opaque, uint64_t count)
+{
+#if !defined(CONFIG_USER_ONLY)
+    cpu_tick_set_count(opaque, count);
+#endif
+}
+
+uint64_t helper_tick_get_count(CPUSPARCState *env, void *opaque, int mem_idx)
+{
+#if !defined(CONFIG_USER_ONLY)
+    CPUTimer *timer = opaque;
+
+    if (timer->npt && mem_idx < MMU_KERNEL_IDX) {
+        cpu_raise_exception_ra(env, TT_PRIV_INSN, GETPC());
+    }
+
+    return cpu_tick_get_count(timer);
+#else
+    return 0;
+#endif
+}
+
+void helper_tick_set_limit(void *opaque, uint64_t limit)
+{
+#if !defined(CONFIG_USER_ONLY)
+    cpu_tick_set_limit(opaque, limit);
+#endif
+}
+#endif
+
+static target_ulong do_udiv(CPUSPARCState *env, target_ulong a,
+                            target_ulong b, int cc, uintptr_t ra)
+{
+    int overflow = 0;
+    uint64_t x0;
+    uint32_t x1;
+
+    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+    x1 = (b & 0xffffffff);
+
+    if (x1 == 0) {
+        cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
+    }
+
+    x0 = x0 / x1;
+    if (x0 > UINT32_MAX) {
+        x0 = UINT32_MAX;
+        overflow = 1;
+    }
+
+    if (cc) {
+        env->cc_dst = x0;
+        env->cc_src2 = overflow;
+        env->cc_op = CC_OP_DIV;
+    }
+    return x0;
+}
+
+target_ulong helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+    return do_udiv(env, a, b, 0, GETPC());
+}
+
+target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+    return do_udiv(env, a, b, 1, GETPC());
+}
+
+static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a,
+                            target_ulong b, int cc, uintptr_t ra)
+{
+    int overflow = 0;
+    int64_t x0;
+    int32_t x1;
+
+    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+    x1 = (b & 0xffffffff);
+
+    if (x1 == 0) {
+        cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
+    } else if (x1 == -1 && x0 == INT64_MIN) {
+        x0 = INT32_MAX;
+        overflow = 1;
+    } else {
+        x0 = x0 / x1;
+        if ((int32_t) x0 != x0) {
+            x0 = x0 < 0 ? INT32_MIN : INT32_MAX;
+            overflow = 1;
+        }
+    }
+
+    if (cc) {
+        env->cc_dst = x0;
+        env->cc_src2 = overflow;
+        env->cc_op = CC_OP_DIV;
+    }
+    return x0;
+}
+
+target_ulong helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+    return do_sdiv(env, a, b, 0, GETPC());
+}
+
+target_ulong helper_sdiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+    return do_sdiv(env, a, b, 1, GETPC());
+}
+
+#ifdef TARGET_SPARC64
+int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b)
+{
+    if (b == 0) {
+        /* Raise divide by zero trap.  */
+        cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
+    } else if (b == -1) {
+        /* Avoid overflow trap with i386 divide insn.  */
+        return -a;
+    } else {
+        return a / b;
+    }
+}
+
+uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b)
+{
+    if (b == 0) {
+        /* Raise divide by zero trap.  */
+        cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
+    }
+    return a / b;
+}
+#endif
+
+target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
+                             target_ulong src2)
+{
+    target_ulong dst;
+
+    /* Tag overflow occurs if either input has bits 0 or 1 set.  */
+    if ((src1 | src2) & 3) {
+        goto tag_overflow;
+    }
+
+    dst = src1 + src2;
+
+    /* Tag overflow occurs if the addition overflows.  */
+    if (~(src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
+        goto tag_overflow;
+    }
+
+    /* Only modify the CC after any exceptions have been generated.  */
+    env->cc_op = CC_OP_TADDTV;
+    env->cc_src = src1;
+    env->cc_src2 = src2;
+    env->cc_dst = dst;
+    return dst;
+
+ tag_overflow:
+    cpu_raise_exception_ra(env, TT_TOVF, GETPC());
+}
+
+target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
+                             target_ulong src2)
+{
+    target_ulong dst;
+
+    /* Tag overflow occurs if either input has bits 0 or 1 set.  */
+    if ((src1 | src2) & 3) {
+        goto tag_overflow;
+    }
+
+    dst = src1 - src2;
+
+    /* Tag overflow occurs if the subtraction overflows.  */
+    if ((src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
+        goto tag_overflow;
+    }
+
+    /* Only modify the CC after any exceptions have been generated.  */
+    env->cc_op = CC_OP_TSUBTV;
+    env->cc_src = src1;
+    env->cc_src2 = src2;
+    env->cc_dst = dst;
+    return dst;
+
+ tag_overflow:
+    cpu_raise_exception_ra(env, TT_TOVF, GETPC());
+}
+
+#ifndef TARGET_SPARC64
+void helper_power_down(CPUSPARCState *env)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+    cs->halted = 1;
+    cs->exception_index = EXCP_HLT;
+    env->pc = env->npc;
+    env->npc = env->pc + 4;
+    cpu_loop_exit(cs);
+}
+#endif
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
new file mode 100644
index 0000000000..0cf1bfb73a
--- /dev/null
+++ b/target/sparc/helper.h
@@ -0,0 +1,168 @@
+#ifndef TARGET_SPARC64
+DEF_HELPER_1(rett, void, env)
+DEF_HELPER_2(wrpsr, void, env, tl)
+DEF_HELPER_1(rdpsr, tl, env)
+DEF_HELPER_1(power_down, void, env)
+#else
+DEF_HELPER_FLAGS_2(wrpil, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(wrpstate, void, env, tl)
+DEF_HELPER_1(done, void, env)
+DEF_HELPER_1(retry, void, env)
+DEF_HELPER_FLAGS_1(flushw, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_FLAGS_1(saved, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(restored, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_1(rdccr, tl, env)
+DEF_HELPER_2(wrccr, void, env, tl)
+DEF_HELPER_1(rdcwp, tl, env)
+DEF_HELPER_2(wrcwp, void, env, tl)
+DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_1(popc, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(set_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(clear_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(write_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(tick_set_count, TCG_CALL_NO_RWG, void, ptr, i64)
+DEF_HELPER_FLAGS_3(tick_get_count, TCG_CALL_NO_WG, i64, env, ptr, int)
+DEF_HELPER_FLAGS_2(tick_set_limit, TCG_CALL_NO_RWG, void, ptr, i64)
+#endif
+DEF_HELPER_FLAGS_3(check_align, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_1(save, void, env)
+DEF_HELPER_1(restore, void, env)
+DEF_HELPER_3(udiv, tl, env, tl, tl)
+DEF_HELPER_3(udiv_cc, tl, env, tl, tl)
+DEF_HELPER_3(sdiv, tl, env, tl, tl)
+DEF_HELPER_3(sdiv_cc, tl, env, tl, tl)
+DEF_HELPER_3(taddcctv, tl, env, tl, tl)
+DEF_HELPER_3(tsubcctv, tl, env, tl, tl)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_3(sdivx, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_3(udivx, TCG_CALL_NO_WG, i64, env, i64, i64)
+#endif
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32)
+DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32)
+#endif
+DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_3(ldfsr, TCG_CALL_NO_RWG, tl, env, tl, i32)
+DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32)
+DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64)
+DEF_HELPER_FLAGS_3(fcmps, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpes, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_3(ldxfsr, TCG_CALL_NO_RWG, tl, env, tl, i64)
+DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmps_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpd_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpd_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpd_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpes_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpes_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpes_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmped_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmped_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmped_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_1(fabsq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc1, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc2, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc3, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc1, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc2, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc3, TCG_CALL_NO_WG, tl, env)
+#endif
+DEF_HELPER_2(raise_exception, noreturn, env, int)
+#define F_HELPER_0_1(name) \
+  DEF_HELPER_FLAGS_1(f ## name, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+F_HELPER_0_1(addq)
+F_HELPER_0_1(subq)
+F_HELPER_0_1(mulq)
+F_HELPER_0_1(divq)
+
+DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, f32, env, f32, f32)
+
+DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_RWG, f64, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_RWG, void, env, f64, f64)
+
+DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_RWG_SE, f64, env, s32)
+DEF_HELPER_FLAGS_2(fitoq, TCG_CALL_NO_RWG, void, env, s32)
+
+DEF_HELPER_FLAGS_2(fitos, TCG_CALL_NO_RWG, f32, env, s32)
+
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_1(fnegq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(fxtos, TCG_CALL_NO_RWG, f32, env, s64)
+DEF_HELPER_FLAGS_2(fxtod, TCG_CALL_NO_RWG, f64, env, s64)
+DEF_HELPER_FLAGS_2(fxtoq, TCG_CALL_NO_RWG, void, env, s64)
+#endif
+DEF_HELPER_FLAGS_2(fdtos, TCG_CALL_NO_RWG, f32, env, f64)
+DEF_HELPER_FLAGS_2(fstod, TCG_CALL_NO_RWG, f64, env, f32)
+DEF_HELPER_FLAGS_1(fqtos, TCG_CALL_NO_RWG, f32, env)
+DEF_HELPER_FLAGS_2(fstoq, TCG_CALL_NO_RWG, void, env, f32)
+DEF_HELPER_FLAGS_1(fqtod, TCG_CALL_NO_RWG, f64, env)
+DEF_HELPER_FLAGS_2(fdtoq, TCG_CALL_NO_RWG, void, env, f64)
+DEF_HELPER_FLAGS_2(fstoi, TCG_CALL_NO_RWG, s32, env, f32)
+DEF_HELPER_FLAGS_2(fdtoi, TCG_CALL_NO_RWG, s32, env, f64)
+DEF_HELPER_FLAGS_1(fqtoi, TCG_CALL_NO_RWG, s32, env)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_2(fstox, TCG_CALL_NO_RWG, s64, env, f32)
+DEF_HELPER_FLAGS_2(fdtox, TCG_CALL_NO_RWG, s64, env, f64)
+DEF_HELPER_FLAGS_1(fqtox, TCG_CALL_NO_RWG, s64, env)
+
+DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_3(pdist, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+#define VIS_HELPER(name)                                                 \
+    DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE,  \
+                       i64, i64, i64)                                    \
+    DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \
+                       i32, i32, i32)                                    \
+    DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE,  \
+                       i64, i64, i64)                                    \
+    DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \
+                       i32, i32, i32)
+
+VIS_HELPER(padd)
+VIS_HELPER(psub)
+#define VIS_CMPHELPER(name)                                              \
+    DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE,      \
+                       i64, i64, i64)                                    \
+    DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE,      \
+                       i64, i64, i64)
+VIS_CMPHELPER(cmpgt)
+VIS_CMPHELPER(cmpeq)
+VIS_CMPHELPER(cmple)
+VIS_CMPHELPER(cmpne)
+#endif
+#undef F_HELPER_0_1
+#undef VIS_HELPER
+#undef VIS_CMPHELPER
+DEF_HELPER_1(compute_psr, void, env)
+DEF_HELPER_FLAGS_1(compute_C_icc, TCG_CALL_NO_WG_SE, i32, env)
diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c
new file mode 100644
index 0000000000..09afe136e5
--- /dev/null
+++ b/target/sparc/int32_helper.c
@@ -0,0 +1,175 @@
+/*
+ * Sparc32 interrupt helpers
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "trace.h"
+#include "sysemu/sysemu.h"
+#include "exec/log.h"
+
+#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+static const char * const excp_names[0x80] = {
+    [TT_TFAULT] = "Instruction Access Fault",
+    [TT_ILL_INSN] = "Illegal Instruction",
+    [TT_PRIV_INSN] = "Privileged Instruction",
+    [TT_NFPU_INSN] = "FPU Disabled",
+    [TT_WIN_OVF] = "Window Overflow",
+    [TT_WIN_UNF] = "Window Underflow",
+    [TT_UNALIGNED] = "Unaligned Memory Access",
+    [TT_FP_EXCP] = "FPU Exception",
+    [TT_DFAULT] = "Data Access Fault",
+    [TT_TOVF] = "Tag Overflow",
+    [TT_EXTINT | 0x1] = "External Interrupt 1",
+    [TT_EXTINT | 0x2] = "External Interrupt 2",
+    [TT_EXTINT | 0x3] = "External Interrupt 3",
+    [TT_EXTINT | 0x4] = "External Interrupt 4",
+    [TT_EXTINT | 0x5] = "External Interrupt 5",
+    [TT_EXTINT | 0x6] = "External Interrupt 6",
+    [TT_EXTINT | 0x7] = "External Interrupt 7",
+    [TT_EXTINT | 0x8] = "External Interrupt 8",
+    [TT_EXTINT | 0x9] = "External Interrupt 9",
+    [TT_EXTINT | 0xa] = "External Interrupt 10",
+    [TT_EXTINT | 0xb] = "External Interrupt 11",
+    [TT_EXTINT | 0xc] = "External Interrupt 12",
+    [TT_EXTINT | 0xd] = "External Interrupt 13",
+    [TT_EXTINT | 0xe] = "External Interrupt 14",
+    [TT_EXTINT | 0xf] = "External Interrupt 15",
+    [TT_TOVF] = "Tag Overflow",
+    [TT_CODE_ACCESS] = "Instruction Access Error",
+    [TT_DATA_ACCESS] = "Data Access Error",
+    [TT_DIV_ZERO] = "Division By Zero",
+    [TT_NCP_INSN] = "Coprocessor Disabled",
+};
+#endif
+
+void sparc_cpu_do_interrupt(CPUState *cs)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    int cwp, intno = cs->exception_index;
+
+    /* Compute PSR before exposing state.  */
+    if (env->cc_op != CC_OP_FLAGS) {
+        cpu_get_psr(env);
+    }
+
+#ifdef DEBUG_PCALL
+    if (qemu_loglevel_mask(CPU_LOG_INT)) {
+        static int count;
+        const char *name;
+
+        if (intno < 0 || intno >= 0x100) {
+            name = "Unknown";
+        } else if (intno >= 0x80) {
+            name = "Trap Instruction";
+        } else {
+            name = excp_names[intno];
+            if (!name) {
+                name = "Unknown";
+            }
+        }
+
+        qemu_log("%6d: %s (v=%02x)\n", count, name, intno);
+        log_cpu_state(cs, 0);
+#if 0
+        {
+            int i;
+            uint8_t *ptr;
+
+            qemu_log("       code=");
+            ptr = (uint8_t *)env->pc;
+            for (i = 0; i < 16; i++) {
+                qemu_log(" %02x", ldub(ptr + i));
+            }
+            qemu_log("\n");
+        }
+#endif
+        count++;
+    }
+#endif
+#if !defined(CONFIG_USER_ONLY)
+    if (env->psret == 0) {
+        if (cs->exception_index == 0x80 &&
+            env->def->features & CPU_FEATURE_TA0_SHUTDOWN) {
+            qemu_system_shutdown_request();
+        } else {
+            cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state",
+                      cs->exception_index);
+        }
+        return;
+    }
+#endif
+    env->psret = 0;
+    cwp = cpu_cwp_dec(env, env->cwp - 1);
+    cpu_set_cwp(env, cwp);
+    env->regwptr[9] = env->pc;
+    env->regwptr[10] = env->npc;
+    env->psrps = env->psrs;
+    env->psrs = 1;
+    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
+    env->pc = env->tbr;
+    env->npc = env->pc + 4;
+    cs->exception_index = -1;
+
+#if !defined(CONFIG_USER_ONLY)
+    /* IRQ acknowledgment */
+    if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
+        env->qemu_irq_ack(env, env->irq_manager, intno);
+    }
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void leon3_cache_control_int(CPUSPARCState *env)
+{
+    uint32_t state = 0;
+
+    if (env->cache_control & CACHE_CTRL_IF) {
+        /* Instruction cache state */
+        state = env->cache_control & CACHE_STATE_MASK;
+        if (state == CACHE_ENABLED) {
+            state = CACHE_FROZEN;
+            trace_int_helper_icache_freeze();
+        }
+
+        env->cache_control &= ~CACHE_STATE_MASK;
+        env->cache_control |= state;
+    }
+
+    if (env->cache_control & CACHE_CTRL_DF) {
+        /* Data cache state */
+        state = (env->cache_control >> 2) & CACHE_STATE_MASK;
+        if (state == CACHE_ENABLED) {
+            state = CACHE_FROZEN;
+            trace_int_helper_dcache_freeze();
+        }
+
+        env->cache_control &= ~(CACHE_STATE_MASK << 2);
+        env->cache_control |= (state << 2);
+    }
+}
+
+void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno)
+{
+    leon3_irq_ack(irq_manager, intno);
+    leon3_cache_control_int(env);
+}
+#endif
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
new file mode 100644
index 0000000000..29360fa5fe
--- /dev/null
+++ b/target/sparc/int64_helper.c
@@ -0,0 +1,205 @@
+/*
+ * Sparc64 interrupt helpers
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/log.h"
+#include "trace.h"
+
+#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+static const char * const excp_names[0x80] = {
+    [TT_TFAULT] = "Instruction Access Fault",
+    [TT_TMISS] = "Instruction Access MMU Miss",
+    [TT_CODE_ACCESS] = "Instruction Access Error",
+    [TT_ILL_INSN] = "Illegal Instruction",
+    [TT_PRIV_INSN] = "Privileged Instruction",
+    [TT_NFPU_INSN] = "FPU Disabled",
+    [TT_FP_EXCP] = "FPU Exception",
+    [TT_TOVF] = "Tag Overflow",
+    [TT_CLRWIN] = "Clean Windows",
+    [TT_DIV_ZERO] = "Division By Zero",
+    [TT_DFAULT] = "Data Access Fault",
+    [TT_DMISS] = "Data Access MMU Miss",
+    [TT_DATA_ACCESS] = "Data Access Error",
+    [TT_DPROT] = "Data Protection Error",
+    [TT_UNALIGNED] = "Unaligned Memory Access",
+    [TT_PRIV_ACT] = "Privileged Action",
+    [TT_EXTINT | 0x1] = "External Interrupt 1",
+    [TT_EXTINT | 0x2] = "External Interrupt 2",
+    [TT_EXTINT | 0x3] = "External Interrupt 3",
+    [TT_EXTINT | 0x4] = "External Interrupt 4",
+    [TT_EXTINT | 0x5] = "External Interrupt 5",
+    [TT_EXTINT | 0x6] = "External Interrupt 6",
+    [TT_EXTINT | 0x7] = "External Interrupt 7",
+    [TT_EXTINT | 0x8] = "External Interrupt 8",
+    [TT_EXTINT | 0x9] = "External Interrupt 9",
+    [TT_EXTINT | 0xa] = "External Interrupt 10",
+    [TT_EXTINT | 0xb] = "External Interrupt 11",
+    [TT_EXTINT | 0xc] = "External Interrupt 12",
+    [TT_EXTINT | 0xd] = "External Interrupt 13",
+    [TT_EXTINT | 0xe] = "External Interrupt 14",
+    [TT_EXTINT | 0xf] = "External Interrupt 15",
+};
+#endif
+
+void sparc_cpu_do_interrupt(CPUState *cs)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    int intno = cs->exception_index;
+    trap_state *tsptr;
+
+    /* Compute PSR before exposing state.  */
+    if (env->cc_op != CC_OP_FLAGS) {
+        cpu_get_psr(env);
+    }
+
+#ifdef DEBUG_PCALL
+    if (qemu_loglevel_mask(CPU_LOG_INT)) {
+        static int count;
+        const char *name;
+
+        if (intno < 0 || intno >= 0x180) {
+            name = "Unknown";
+        } else if (intno >= 0x100) {
+            name = "Trap Instruction";
+        } else if (intno >= 0xc0) {
+            name = "Window Fill";
+        } else if (intno >= 0x80) {
+            name = "Window Spill";
+        } else {
+            name = excp_names[intno];
+            if (!name) {
+                name = "Unknown";
+            }
+        }
+
+        qemu_log("%6d: %s (v=%04x)\n", count, name, intno);
+        log_cpu_state(cs, 0);
+#if 0
+        {
+            int i;
+            uint8_t *ptr;
+
+            qemu_log("       code=");
+            ptr = (uint8_t *)env->pc;
+            for (i = 0; i < 16; i++) {
+                qemu_log(" %02x", ldub(ptr + i));
+            }
+            qemu_log("\n");
+        }
+#endif
+        count++;
+    }
+#endif
+#if !defined(CONFIG_USER_ONLY)
+    if (env->tl >= env->maxtl) {
+        cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
+                  " Error state", cs->exception_index, env->tl, env->maxtl);
+        return;
+    }
+#endif
+    if (env->tl < env->maxtl - 1) {
+        env->tl++;
+    } else {
+        env->pstate |= PS_RED;
+        if (env->tl < env->maxtl) {
+            env->tl++;
+        }
+    }
+    tsptr = cpu_tsptr(env);
+
+    tsptr->tstate = (cpu_get_ccr(env) << 32) |
+        ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
+        cpu_get_cwp64(env);
+    tsptr->tpc = env->pc;
+    tsptr->tnpc = env->npc;
+    tsptr->tt = intno;
+
+    switch (intno) {
+    case TT_IVEC:
+        cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG);
+        break;
+    case TT_TFAULT:
+    case TT_DFAULT:
+    case TT_TMISS ... TT_TMISS + 3:
+    case TT_DMISS ... TT_DMISS + 3:
+    case TT_DPROT ... TT_DPROT + 3:
+        cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG);
+        break;
+    default:
+        cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG);
+        break;
+    }
+
+    if (intno == TT_CLRWIN) {
+        cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
+    } else if ((intno & 0x1c0) == TT_SPILL) {
+        cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
+    } else if ((intno & 0x1c0) == TT_FILL) {
+        cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
+    }
+    env->pc = env->tbr  & ~0x7fffULL;
+    env->pc |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
+    env->npc = env->pc + 4;
+    cs->exception_index = -1;
+}
+
+trap_state *cpu_tsptr(CPUSPARCState* env)
+{
+    return &env->ts[env->tl & MAXTL_MASK];
+}
+
+static bool do_modify_softint(CPUSPARCState *env, uint32_t value)
+{
+    if (env->softint != value) {
+        env->softint = value;
+#if !defined(CONFIG_USER_ONLY)
+        if (cpu_interrupts_enabled(env)) {
+            cpu_check_irqs(env);
+        }
+#endif
+        return true;
+    }
+    return false;
+}
+
+void helper_set_softint(CPUSPARCState *env, uint64_t value)
+{
+    if (do_modify_softint(env, env->softint | (uint32_t)value)) {
+        trace_int_helper_set_softint(env->softint);
+    }
+}
+
+void helper_clear_softint(CPUSPARCState *env, uint64_t value)
+{
+    if (do_modify_softint(env, env->softint & (uint32_t)~value)) {
+        trace_int_helper_clear_softint(env->softint);
+    }
+}
+
+void helper_write_softint(CPUSPARCState *env, uint64_t value)
+{
+    if (do_modify_softint(env, (uint32_t)value)) {
+        trace_int_helper_write_softint(env->softint);
+    }
+}
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
new file mode 100644
index 0000000000..de7d53ae20
--- /dev/null
+++ b/target/sparc/ldst_helper.c
@@ -0,0 +1,1709 @@
+/*
+ * Helpers for loads and stores
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "asi.h"
+
+//#define DEBUG_MMU
+//#define DEBUG_MXCC
+//#define DEBUG_UNALIGNED
+//#define DEBUG_UNASSIGNED
+//#define DEBUG_ASI
+//#define DEBUG_CACHE_CONTROL
+
+#ifdef DEBUG_MMU
+#define DPRINTF_MMU(fmt, ...)                                   \
+    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MMU(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_MXCC
+#define DPRINTF_MXCC(fmt, ...)                                  \
+    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MXCC(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_ASI
+#define DPRINTF_ASI(fmt, ...)                                   \
+    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
+#endif
+
+#ifdef DEBUG_CACHE_CONTROL
+#define DPRINTF_CACHE_CONTROL(fmt, ...)                                 \
+    do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
+#else
+#define AM_CHECK(env1) (1)
+#endif
+#endif
+
+#define QT0 (env->qt0)
+#define QT1 (env->qt1)
+
+#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+/* Calculates TSB pointer value for fault page size 8k or 64k */
+static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
+                                       uint64_t tag_access_register,
+                                       int page_size)
+{
+    uint64_t tsb_base = tsb_register & ~0x1fffULL;
+    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
+    int tsb_size  = tsb_register & 0xf;
+
+    /* discard lower 13 bits which hold tag access context */
+    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
+
+    /* now reorder bits */
+    uint64_t tsb_base_mask = ~0x1fffULL;
+    uint64_t va = tag_access_va;
+
+    /* move va bits to correct position */
+    if (page_size == 8*1024) {
+        va >>= 9;
+    } else if (page_size == 64*1024) {
+        va >>= 12;
+    }
+
+    if (tsb_size) {
+        tsb_base_mask <<= tsb_size;
+    }
+
+    /* calculate tsb_base mask and adjust va if split is in use */
+    if (tsb_split) {
+        if (page_size == 8*1024) {
+            va &= ~(1ULL << (13 + tsb_size));
+        } else if (page_size == 64*1024) {
+            va |= (1ULL << (13 + tsb_size));
+        }
+        tsb_base_mask <<= 1;
+    }
+
+    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
+}
+
+/* Calculates tag target register value by reordering bits
+   in tag access register */
+static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
+{
+    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
+}
+
+static void replace_tlb_entry(SparcTLBEntry *tlb,
+                              uint64_t tlb_tag, uint64_t tlb_tte,
+                              CPUSPARCState *env1)
+{
+    target_ulong mask, size, va, offset;
+
+    /* flush page range if translation is valid */
+    if (TTE_IS_VALID(tlb->tte)) {
+        CPUState *cs = CPU(sparc_env_get_cpu(env1));
+
+        mask = 0xffffffffffffe000ULL;
+        mask <<= 3 * ((tlb->tte >> 61) & 3);
+        size = ~mask + 1;
+
+        va = tlb->tag & mask;
+
+        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
+            tlb_flush_page(cs, va + offset);
+        }
+    }
+
+    tlb->tag = tlb_tag;
+    tlb->tte = tlb_tte;
+}
+
+static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
+                      const char *strmmu, CPUSPARCState *env1)
+{
+    unsigned int i;
+    target_ulong mask;
+    uint64_t context;
+
+    int is_demap_context = (demap_addr >> 6) & 1;
+
+    /* demap context */
+    switch ((demap_addr >> 4) & 3) {
+    case 0: /* primary */
+        context = env1->dmmu.mmu_primary_context;
+        break;
+    case 1: /* secondary */
+        context = env1->dmmu.mmu_secondary_context;
+        break;
+    case 2: /* nucleus */
+        context = 0;
+        break;
+    case 3: /* reserved */
+    default:
+        return;
+    }
+
+    for (i = 0; i < 64; i++) {
+        if (TTE_IS_VALID(tlb[i].tte)) {
+
+            if (is_demap_context) {
+                /* will remove non-global entries matching context value */
+                if (TTE_IS_GLOBAL(tlb[i].tte) ||
+                    !tlb_compare_context(&tlb[i], context)) {
+                    continue;
+                }
+            } else {
+                /* demap page
+                   will remove any entry matching VA */
+                mask = 0xffffffffffffe000ULL;
+                mask <<= 3 * ((tlb[i].tte >> 61) & 3);
+
+                if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
+                    continue;
+                }
+
+                /* entry should be global or matching context value */
+                if (!TTE_IS_GLOBAL(tlb[i].tte) &&
+                    !tlb_compare_context(&tlb[i], context)) {
+                    continue;
+                }
+            }
+
+            replace_tlb_entry(&tlb[i], 0, 0, env1);
+#ifdef DEBUG_MMU
+            DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
+            dump_mmu(stdout, fprintf, env1);
+#endif
+        }
+    }
+}
+
+static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
+                                 uint64_t tlb_tag, uint64_t tlb_tte,
+                                 const char *strmmu, CPUSPARCState *env1)
+{
+    unsigned int i, replace_used;
+
+    /* Try replacing invalid entry */
+    for (i = 0; i < 64; i++) {
+        if (!TTE_IS_VALID(tlb[i].tte)) {
+            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
+            dump_mmu(stdout, fprintf, env1);
+#endif
+            return;
+        }
+    }
+
+    /* All entries are valid, try replacing unlocked entry */
+
+    for (replace_used = 0; replace_used < 2; ++replace_used) {
+
+        /* Used entries are not replaced on first pass */
+
+        for (i = 0; i < 64; i++) {
+            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
+
+                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
+                            strmmu, (replace_used ? "used" : "unused"), i);
+                dump_mmu(stdout, fprintf, env1);
+#endif
+                return;
+            }
+        }
+
+        /* Now reset used bit and search for unused entries again */
+
+        for (i = 0; i < 64; i++) {
+            TTE_SET_UNUSED(tlb[i].tte);
+        }
+    }
+
+#ifdef DEBUG_MMU
+    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
+#endif
+    /* error state? */
+}
+
+#endif
+
+#ifdef TARGET_SPARC64
+/* returns true if access using this ASI is to have address translated by MMU
+   otherwise access is to raw physical address */
+/* TODO: check sparc32 bits */
+static inline int is_translating_asi(int asi)
+{
+    /* Ultrasparc IIi translating asi
+       - note this list is defined by cpu implementation
+    */
+    switch (asi) {
+    case 0x04 ... 0x11:
+    case 0x16 ... 0x19:
+    case 0x1E ... 0x1F:
+    case 0x24 ... 0x2C:
+    case 0x70 ... 0x73:
+    case 0x78 ... 0x79:
+    case 0x80 ... 0xFF:
+        return 1;
+
+    default:
+        return 0;
+    }
+}
+
+static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
+{
+    if (AM_CHECK(env1)) {
+        addr &= 0xffffffffULL;
+    }
+    return addr;
+}
+
+static inline target_ulong asi_address_mask(CPUSPARCState *env,
+                                            int asi, target_ulong addr)
+{
+    if (is_translating_asi(asi)) {
+        addr = address_mask(env, addr);
+    }
+    return addr;
+}
+#endif
+
+static void do_check_align(CPUSPARCState *env, target_ulong addr,
+                           uint32_t align, uintptr_t ra)
+{
+    if (addr & align) {
+#ifdef DEBUG_UNALIGNED
+        printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
+               "\n", addr, env->pc);
+#endif
+        cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
+    }
+}
+
+void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
+{
+    do_check_align(env, addr, align, GETPC());
+}
+
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) &&   \
+    defined(DEBUG_MXCC)
+static void dump_mxcc(CPUSPARCState *env)
+{
+    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+           "\n",
+           env->mxccdata[0], env->mxccdata[1],
+           env->mxccdata[2], env->mxccdata[3]);
+    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+           "\n"
+           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+           "\n",
+           env->mxccregs[0], env->mxccregs[1],
+           env->mxccregs[2], env->mxccregs[3],
+           env->mxccregs[4], env->mxccregs[5],
+           env->mxccregs[6], env->mxccregs[7]);
+}
+#endif
+
+#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY))     \
+    && defined(DEBUG_ASI)
+static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
+                     uint64_t r1)
+{
+    switch (size) {
+    case 1:
+        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
+                    addr, asi, r1 & 0xff);
+        break;
+    case 2:
+        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
+                    addr, asi, r1 & 0xffff);
+        break;
+    case 4:
+        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
+                    addr, asi, r1 & 0xffffffff);
+        break;
+    case 8:
+        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
+                    addr, asi, r1);
+        break;
+    }
+}
+#endif
+
+#ifndef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
+
+
+/* Leon3 cache control */
+
+static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
+                                   uint64_t val, int size)
+{
+    DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
+                          addr, val, size);
+
+    if (size != 4) {
+        DPRINTF_CACHE_CONTROL("32bits only\n");
+        return;
+    }
+
+    switch (addr) {
+    case 0x00:              /* Cache control */
+
+        /* These values must always be read as zeros */
+        val &= ~CACHE_CTRL_FD;
+        val &= ~CACHE_CTRL_FI;
+        val &= ~CACHE_CTRL_IB;
+        val &= ~CACHE_CTRL_IP;
+        val &= ~CACHE_CTRL_DP;
+
+        env->cache_control = val;
+        break;
+    case 0x04:              /* Instruction cache configuration */
+    case 0x08:              /* Data cache configuration */
+        /* Read Only */
+        break;
+    default:
+        DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
+        break;
+    };
+}
+
+static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
+                                       int size)
+{
+    uint64_t ret = 0;
+
+    if (size != 4) {
+        DPRINTF_CACHE_CONTROL("32bits only\n");
+        return 0;
+    }
+
+    switch (addr) {
+    case 0x00:              /* Cache control */
+        ret = env->cache_control;
+        break;
+
+        /* Configuration registers are read and only always keep those
+           predefined values */
+
+    case 0x04:              /* Instruction cache configuration */
+        ret = 0x10220000;
+        break;
+    case 0x08:              /* Data cache configuration */
+        ret = 0x18220000;
+        break;
+    default:
+        DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
+        break;
+    };
+    DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
+                          addr, ret, size);
+    return ret;
+}
+
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+                       int asi, uint32_t memop)
+{
+    int size = 1 << (memop & MO_SIZE);
+    int sign = memop & MO_SIGN;
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+    uint64_t ret = 0;
+#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
+    uint32_t last_addr = addr;
+#endif
+
+    do_check_align(env, addr, size - 1, GETPC());
+    switch (asi) {
+    case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
+    /* case ASI_LEON_CACHEREGS:  Leon3 cache control */
+        switch (addr) {
+        case 0x00:          /* Leon3 Cache Control */
+        case 0x08:          /* Leon3 Instruction Cache config */
+        case 0x0C:          /* Leon3 Date Cache config */
+            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+                ret = leon3_cache_control_ld(env, addr, size);
+            }
+            break;
+        case 0x01c00a00: /* MXCC control register */
+            if (size == 8) {
+                ret = env->mxccregs[3];
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00a04: /* MXCC control register */
+            if (size == 4) {
+                ret = env->mxccregs[3];
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00c00: /* Module reset register */
+            if (size == 8) {
+                ret = env->mxccregs[5];
+                /* should we do something here? */
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00f00: /* MBus port address register */
+            if (size == 8) {
+                ret = env->mxccregs[7];
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        default:
+            qemu_log_mask(LOG_UNIMP,
+                          "%08x: unimplemented address, size: %d\n", addr,
+                          size);
+            break;
+        }
+        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
+                     "addr = %08x -> ret = %" PRIx64 ","
+                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
+#ifdef DEBUG_MXCC
+        dump_mxcc(env);
+#endif
+        break;
+    case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
+    case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
+        {
+            int mmulev;
+
+            mmulev = (addr >> 8) & 15;
+            if (mmulev > 4) {
+                ret = 0;
+            } else {
+                ret = mmu_probe(env, addr, mmulev);
+            }
+            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
+                        addr, mmulev, ret);
+        }
+        break;
+    case ASI_M_MMUREGS: /* SuperSparc MMU regs */
+    case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
+        {
+            int reg = (addr >> 8) & 0x1f;
+
+            ret = env->mmuregs[reg];
+            if (reg == 3) { /* Fault status cleared on read */
+                env->mmuregs[3] = 0;
+            } else if (reg == 0x13) { /* Fault status read */
+                ret = env->mmuregs[3];
+            } else if (reg == 0x14) { /* Fault address read */
+                ret = env->mmuregs[4];
+            }
+            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
+        }
+        break;
+    case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
+    case ASI_M_DIAGS:   /* Turbosparc DTLB Diagnostic */
+    case ASI_M_IODIAG:  /* Turbosparc IOTLB Diagnostic */
+        break;
+    case ASI_KERNELTXT: /* Supervisor code access */
+        switch (size) {
+        case 1:
+            ret = cpu_ldub_code(env, addr);
+            break;
+        case 2:
+            ret = cpu_lduw_code(env, addr);
+            break;
+        default:
+        case 4:
+            ret = cpu_ldl_code(env, addr);
+            break;
+        case 8:
+            ret = cpu_ldq_code(env, addr);
+            break;
+        }
+        break;
+    case ASI_M_TXTC_TAG:   /* SparcStation 5 I-cache tag */
+    case ASI_M_TXTC_DATA:  /* SparcStation 5 I-cache data */
+    case ASI_M_DATAC_TAG:  /* SparcStation 5 D-cache tag */
+    case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
+        break;
+    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+        switch (size) {
+        case 1:
+            ret = ldub_phys(cs->as, (hwaddr)addr
+                            | ((hwaddr)(asi & 0xf) << 32));
+            break;
+        case 2:
+            ret = lduw_phys(cs->as, (hwaddr)addr
+                            | ((hwaddr)(asi & 0xf) << 32));
+            break;
+        default:
+        case 4:
+            ret = ldl_phys(cs->as, (hwaddr)addr
+                           | ((hwaddr)(asi & 0xf) << 32));
+            break;
+        case 8:
+            ret = ldq_phys(cs->as, (hwaddr)addr
+                           | ((hwaddr)(asi & 0xf) << 32));
+            break;
+        }
+        break;
+    case 0x30: /* Turbosparc secondary cache diagnostic */
+    case 0x31: /* Turbosparc RAM snoop */
+    case 0x32: /* Turbosparc page table descriptor diagnostic */
+    case 0x39: /* data cache diagnostic register */
+        ret = 0;
+        break;
+    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
+        {
+            int reg = (addr >> 8) & 3;
+
+            switch (reg) {
+            case 0: /* Breakpoint Value (Addr) */
+                ret = env->mmubpregs[reg];
+                break;
+            case 1: /* Breakpoint Mask */
+                ret = env->mmubpregs[reg];
+                break;
+            case 2: /* Breakpoint Control */
+                ret = env->mmubpregs[reg];
+                break;
+            case 3: /* Breakpoint Status */
+                ret = env->mmubpregs[reg];
+                env->mmubpregs[reg] = 0ULL;
+                break;
+            }
+            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
+                        ret);
+        }
+        break;
+    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
+        ret = env->mmubpctrv;
+        break;
+    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
+        ret = env->mmubpctrc;
+        break;
+    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
+        ret = env->mmubpctrs;
+        break;
+    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
+        ret = env->mmubpaction;
+        break;
+    case ASI_USERTXT: /* User code access, XXX */
+    default:
+        cpu_unassigned_access(cs, addr, false, false, asi, size);
+        ret = 0;
+        break;
+
+    case ASI_USERDATA: /* User data access */
+    case ASI_KERNELDATA: /* Supervisor data access */
+    case ASI_P: /* Implicit primary context data access (v9 only?) */
+    case ASI_M_BYPASS:    /* MMU passthrough */
+    case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+        /* These are always handled inline.  */
+        g_assert_not_reached();
+    }
+    if (sign) {
+        switch (size) {
+        case 1:
+            ret = (int8_t) ret;
+            break;
+        case 2:
+            ret = (int16_t) ret;
+            break;
+        case 4:
+            ret = (int32_t) ret;
+            break;
+        default:
+            break;
+        }
+    }
+#ifdef DEBUG_ASI
+    dump_asi("read ", last_addr, asi, size, ret);
+#endif
+    return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
+                   int asi, uint32_t memop)
+{
+    int size = 1 << (memop & MO_SIZE);
+    SPARCCPU *cpu = sparc_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+
+    do_check_align(env, addr, size - 1, GETPC());
+    switch (asi) {
+    case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
+    /* case ASI_LEON_CACHEREGS:  Leon3 cache control */
+        switch (addr) {
+        case 0x00:          /* Leon3 Cache Control */
+        case 0x08:          /* Leon3 Instruction Cache config */
+        case 0x0C:          /* Leon3 Date Cache config */
+            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+                leon3_cache_control_st(env, addr, val, size);
+            }
+            break;
+
+        case 0x01c00000: /* MXCC stream data register 0 */
+            if (size == 8) {
+                env->mxccdata[0] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00008: /* MXCC stream data register 1 */
+            if (size == 8) {
+                env->mxccdata[1] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00010: /* MXCC stream data register 2 */
+            if (size == 8) {
+                env->mxccdata[2] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00018: /* MXCC stream data register 3 */
+            if (size == 8) {
+                env->mxccdata[3] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00100: /* MXCC stream source */
+            if (size == 8) {
+                env->mxccregs[0] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            env->mxccdata[0] = ldq_phys(cs->as,
+                                        (env->mxccregs[0] & 0xffffffffULL) +
+                                        0);
+            env->mxccdata[1] = ldq_phys(cs->as,
+                                        (env->mxccregs[0] & 0xffffffffULL) +
+                                        8);
+            env->mxccdata[2] = ldq_phys(cs->as,
+                                        (env->mxccregs[0] & 0xffffffffULL) +
+                                        16);
+            env->mxccdata[3] = ldq_phys(cs->as,
+                                        (env->mxccregs[0] & 0xffffffffULL) +
+                                        24);
+            break;
+        case 0x01c00200: /* MXCC stream destination */
+            if (size == 8) {
+                env->mxccregs[1] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) +  0,
+                     env->mxccdata[0]);
+            stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) +  8,
+                     env->mxccdata[1]);
+            stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
+                     env->mxccdata[2]);
+            stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
+                     env->mxccdata[3]);
+            break;
+        case 0x01c00a00: /* MXCC control register */
+            if (size == 8) {
+                env->mxccregs[3] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00a04: /* MXCC control register */
+            if (size == 4) {
+                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
+                    | val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00e00: /* MXCC error register  */
+            /* writing a 1 bit clears the error */
+            if (size == 8) {
+                env->mxccregs[6] &= ~val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        case 0x01c00f00: /* MBus port address register */
+            if (size == 8) {
+                env->mxccregs[7] = val;
+            } else {
+                qemu_log_mask(LOG_UNIMP,
+                              "%08x: unimplemented access size: %d\n", addr,
+                              size);
+            }
+            break;
+        default:
+            qemu_log_mask(LOG_UNIMP,
+                          "%08x: unimplemented address, size: %d\n", addr,
+                          size);
+            break;
+        }
+        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
+                     asi, size, addr, val);
+#ifdef DEBUG_MXCC
+        dump_mxcc(env);
+#endif
+        break;
+    case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
+    case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
+        {
+            int mmulev;
+
+            mmulev = (addr >> 8) & 15;
+            DPRINTF_MMU("mmu flush level %d\n", mmulev);
+            switch (mmulev) {
+            case 0: /* flush page */
+                tlb_flush_page(CPU(cpu), addr & 0xfffff000);
+                break;
+            case 1: /* flush segment (256k) */
+            case 2: /* flush region (16M) */
+            case 3: /* flush context (4G) */
+            case 4: /* flush entire */
+                tlb_flush(CPU(cpu), 1);
+                break;
+            default:
+                break;
+            }
+#ifdef DEBUG_MMU
+            dump_mmu(stdout, fprintf, env);
+#endif
+        }
+        break;
+    case ASI_M_MMUREGS: /* write MMU regs */
+    case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
+        {
+            int reg = (addr >> 8) & 0x1f;
+            uint32_t oldreg;
+
+            oldreg = env->mmuregs[reg];
+            switch (reg) {
+            case 0: /* Control Register */
+                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
+                    (val & 0x00ffffff);
+                /* Mappings generated during no-fault mode
+                   are invalid in normal mode.  */
+                if ((oldreg ^ env->mmuregs[reg])
+                    & (MMU_NF | env->def->mmu_bm)) {
+                    tlb_flush(CPU(cpu), 1);
+                }
+                break;
+            case 1: /* Context Table Pointer Register */
+                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
+                break;
+            case 2: /* Context Register */
+                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
+                if (oldreg != env->mmuregs[reg]) {
+                    /* we flush when the MMU context changes because
+                       QEMU has no MMU context support */
+                    tlb_flush(CPU(cpu), 1);
+                }
+                break;
+            case 3: /* Synchronous Fault Status Register with Clear */
+            case 4: /* Synchronous Fault Address Register */
+                break;
+            case 0x10: /* TLB Replacement Control Register */
+                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
+                break;
+            case 0x13: /* Synchronous Fault Status Register with Read
+                          and Clear */
+                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
+                break;
+            case 0x14: /* Synchronous Fault Address Register */
+                env->mmuregs[4] = val;
+                break;
+            default:
+                env->mmuregs[reg] = val;
+                break;
+            }
+            if (oldreg != env->mmuregs[reg]) {
+                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
+                            reg, oldreg, env->mmuregs[reg]);
+            }
+#ifdef DEBUG_MMU
+            dump_mmu(stdout, fprintf, env);
+#endif
+        }
+        break;
+    case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
+    case ASI_M_DIAGS:   /* Turbosparc DTLB Diagnostic */
+    case ASI_M_IODIAG:  /* Turbosparc IOTLB Diagnostic */
+        break;
+    case ASI_M_TXTC_TAG:   /* I-cache tag */
+    case ASI_M_TXTC_DATA:  /* I-cache data */
+    case ASI_M_DATAC_TAG:  /* D-cache tag */
+    case ASI_M_DATAC_DATA: /* D-cache data */
+    case ASI_M_FLUSH_PAGE:   /* I/D-cache flush page */
+    case ASI_M_FLUSH_SEG:    /* I/D-cache flush segment */
+    case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
+    case ASI_M_FLUSH_CTX:    /* I/D-cache flush context */
+    case ASI_M_FLUSH_USER:   /* I/D-cache flush user */
+        break;
+    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+        {
+            switch (size) {
+            case 1:
+                stb_phys(cs->as, (hwaddr)addr
+                         | ((hwaddr)(asi & 0xf) << 32), val);
+                break;
+            case 2:
+                stw_phys(cs->as, (hwaddr)addr
+                         | ((hwaddr)(asi & 0xf) << 32), val);
+                break;
+            case 4:
+            default:
+                stl_phys(cs->as, (hwaddr)addr
+                         | ((hwaddr)(asi & 0xf) << 32), val);
+                break;
+            case 8:
+                stq_phys(cs->as, (hwaddr)addr
+                         | ((hwaddr)(asi & 0xf) << 32), val);
+                break;
+            }
+        }
+        break;
+    case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
+    case 0x31: /* store buffer data, Ross RT620 I-cache flush or
+                  Turbosparc snoop RAM */
+    case 0x32: /* store buffer control or Turbosparc page table
+                  descriptor diagnostic */
+    case 0x36: /* I-cache flash clear */
+    case 0x37: /* D-cache flash clear */
+        break;
+    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
+        {
+            int reg = (addr >> 8) & 3;
+
+            switch (reg) {
+            case 0: /* Breakpoint Value (Addr) */
+                env->mmubpregs[reg] = (val & 0xfffffffffULL);
+                break;
+            case 1: /* Breakpoint Mask */
+                env->mmubpregs[reg] = (val & 0xfffffffffULL);
+                break;
+            case 2: /* Breakpoint Control */
+                env->mmubpregs[reg] = (val & 0x7fULL);
+                break;
+            case 3: /* Breakpoint Status */
+                env->mmubpregs[reg] = (val & 0xfULL);
+                break;
+            }
+            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
+                        env->mmuregs[reg]);
+        }
+        break;
+    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
+        env->mmubpctrv = val & 0xffffffff;
+        break;
+    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
+        env->mmubpctrc = val & 0x3;
+        break;
+    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
+        env->mmubpctrs = val & 0x3;
+        break;
+    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
+        env->mmubpaction = val & 0x1fff;
+        break;
+    case ASI_USERTXT: /* User code access, XXX */
+    case ASI_KERNELTXT: /* Supervisor code access, XXX */
+    default:
+        cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
+                              addr, true, false, asi, size);
+        break;
+
+    case ASI_USERDATA: /* User data access */
+    case ASI_KERNELDATA: /* Supervisor data access */
+    case ASI_P:
+    case ASI_M_BYPASS:    /* MMU passthrough */
+    case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+    case ASI_M_BCOPY: /* Block copy, sta access */
+    case ASI_M_BFILL: /* Block fill, stda access */
+        /* These are always handled inline.  */
+        g_assert_not_reached();
+    }
+#ifdef DEBUG_ASI
+    dump_asi("write", addr, asi, size, val);
+#endif
+}
+
+#endif /* CONFIG_USER_ONLY */
+#else /* TARGET_SPARC64 */
+
+#ifdef CONFIG_USER_ONLY
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+                       int asi, uint32_t memop)
+{
+    int size = 1 << (memop & MO_SIZE);
+    int sign = memop & MO_SIGN;
+    uint64_t ret = 0;
+
+    if (asi < 0x80) {
+        cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+    }
+    do_check_align(env, addr, size - 1, GETPC());
+    addr = asi_address_mask(env, asi, addr);
+
+    switch (asi) {
+    case ASI_PNF:  /* Primary no-fault */
+    case ASI_PNFL: /* Primary no-fault LE */
+    case ASI_SNF:  /* Secondary no-fault */
+    case ASI_SNFL: /* Secondary no-fault LE */
+        if (page_check_range(addr, size, PAGE_READ) == -1) {
+            ret = 0;
+            break;
+        }
+        switch (size) {
+        case 1:
+            ret = cpu_ldub_data(env, addr);
+            break;
+        case 2:
+            ret = cpu_lduw_data(env, addr);
+            break;
+        case 4:
+            ret = cpu_ldl_data(env, addr);
+            break;
+        case 8:
+            ret = cpu_ldq_data(env, addr);
+            break;
+        default:
+            g_assert_not_reached();
+        }
+        break;
+        break;
+
+    case ASI_P: /* Primary */
+    case ASI_PL: /* Primary LE */
+    case ASI_S:  /* Secondary */
+    case ASI_SL: /* Secondary LE */
+        /* These are always handled inline.  */
+        g_assert_not_reached();
+
+    default:
+        cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
+    }
+
+    /* Convert from little endian */
+    switch (asi) {
+    case ASI_PNFL: /* Primary no-fault LE */
+    case ASI_SNFL: /* Secondary no-fault LE */
+        switch (size) {
+        case 2:
+            ret = bswap16(ret);
+            break;
+        case 4:
+            ret = bswap32(ret);
+            break;
+        case 8:
+            ret = bswap64(ret);
+            break;
+        }
+    }
+
+    /* Convert to signed number */
+    if (sign) {
+        switch (size) {
+        case 1:
+            ret = (int8_t) ret;
+            break;
+        case 2:
+            ret = (int16_t) ret;
+            break;
+        case 4:
+            ret = (int32_t) ret;
+            break;
+        }
+    }
+#ifdef DEBUG_ASI
+    dump_asi("read", addr, asi, size, ret);
+#endif
+    return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
+                   int asi, uint32_t memop)
+{
+    int size = 1 << (memop & MO_SIZE);
+#ifdef DEBUG_ASI
+    dump_asi("write", addr, asi, size, val);
+#endif
+    if (asi < 0x80) {
+        cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+    }
+    do_check_align(env, addr, size - 1, GETPC());
+
+    switch (asi) {
+    case ASI_P:  /* Primary */
+    case ASI_PL: /* Primary LE */
+    case ASI_S:  /* Secondary */
+    case ASI_SL: /* Secondary LE */
+        /* These are always handled inline.  */
+        g_assert_not_reached();
+
+    case ASI_PNF:  /* Primary no-fault, RO */
+    case ASI_SNF:  /* Secondary no-fault, RO */
+    case ASI_PNFL: /* Primary no-fault LE, RO */
+    case ASI_SNFL: /* Secondary no-fault LE, RO */
+    default:
+        cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
+    }
+}
+
+#else /* CONFIG_USER_ONLY */
+
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+                       int asi, uint32_t memop)
+{
+    int size = 1 << (memop & MO_SIZE);
+    int sign = memop & MO_SIGN;
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+    uint64_t ret = 0;
+#if defined(DEBUG_ASI)
+    target_ulong last_addr = addr;
+#endif
+
+    asi &= 0xff;
+
+    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
+        || (cpu_has_hypervisor(env)
+            && asi >= 0x30 && asi < 0x80
+            && !(env->hpstate & HS_PRIV))) {
+        cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+    }
+
+    do_check_align(env, addr, size - 1, GETPC());
+    addr = asi_address_mask(env, asi, addr);
+
+    switch (asi) {
+    case ASI_PNF:
+    case ASI_PNFL:
+    case ASI_SNF:
+    case ASI_SNFL:
+        {
+            TCGMemOpIdx oi;
+            int idx = (env->pstate & PS_PRIV
+                       ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
+                       : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
+
+            if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
+#ifdef DEBUG_ASI
+                dump_asi("read ", last_addr, asi, size, ret);
+#endif
+                /* exception_index is set in get_physical_address_data. */
+                cpu_raise_exception_ra(env, cs->exception_index, GETPC());
+            }
+            oi = make_memop_idx(memop, idx);
+            switch (size) {
+            case 1:
+                ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
+                break;
+            case 2:
+                if (asi & 8) {
+                    ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
+                } else {
+                    ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
+                }
+                break;
+            case 4:
+                if (asi & 8) {
+                    ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
+                } else {
+                    ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
+                }
+                break;
+            case 8:
+                if (asi & 8) {
+                    ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
+                } else {
+                    ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
+                }
+                break;
+            default:
+                g_assert_not_reached();
+            }
+        }
+        break;
+
+    case ASI_AIUP:  /* As if user primary */
+    case ASI_AIUS:  /* As if user secondary */
+    case ASI_AIUPL: /* As if user primary LE */
+    case ASI_AIUSL: /* As if user secondary LE */
+    case ASI_P:  /* Primary */
+    case ASI_S:  /* Secondary */
+    case ASI_PL: /* Primary LE */
+    case ASI_SL: /* Secondary LE */
+    case ASI_REAL:      /* Bypass */
+    case ASI_REAL_IO:   /* Bypass, non-cacheable */
+    case ASI_REAL_L:    /* Bypass LE */
+    case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+    case ASI_N:  /* Nucleus */
+    case ASI_NL: /* Nucleus Little Endian (LE) */
+    case ASI_NUCLEUS_QUAD_LDD:   /* Nucleus quad LDD 128 bit atomic */
+    case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
+    case ASI_TWINX_AIUP:   /* As if user primary, twinx */
+    case ASI_TWINX_AIUS:   /* As if user secondary, twinx */
+    case ASI_TWINX_REAL:   /* Real address, twinx */
+    case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
+    case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
+    case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+    case ASI_TWINX_N:  /* Nucleus, twinx */
+    case ASI_TWINX_NL: /* Nucleus, twinx, LE */
+    /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
+    case ASI_TWINX_P:  /* Primary, twinx */
+    case ASI_TWINX_PL: /* Primary, twinx, LE */
+    case ASI_TWINX_S:  /* Secondary, twinx */
+    case ASI_TWINX_SL: /* Secondary, twinx, LE */
+        /* These are always handled inline.  */
+        g_assert_not_reached();
+
+    case ASI_UPA_CONFIG: /* UPA config */
+        /* XXX */
+        break;
+    case ASI_LSU_CONTROL: /* LSU */
+        ret = env->lsu;
+        break;
+    case ASI_IMMU: /* I-MMU regs */
+        {
+            int reg = (addr >> 3) & 0xf;
+
+            if (reg == 0) {
+                /* I-TSB Tag Target register */
+                ret = ultrasparc_tag_target(env->immu.tag_access);
+            } else {
+                ret = env->immuregs[reg];
+            }
+
+            break;
+        }
+    case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
+        {
+            /* env->immuregs[5] holds I-MMU TSB register value
+               env->immuregs[6] holds I-MMU Tag Access register value */
+            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
+                                         8*1024);
+            break;
+        }
+    case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
+        {
+            /* env->immuregs[5] holds I-MMU TSB register value
+               env->immuregs[6] holds I-MMU Tag Access register value */
+            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
+                                         64*1024);
+            break;
+        }
+    case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
+        {
+            int reg = (addr >> 3) & 0x3f;
+
+            ret = env->itlb[reg].tte;
+            break;
+        }
+    case ASI_ITLB_TAG_READ: /* I-MMU tag read */
+        {
+            int reg = (addr >> 3) & 0x3f;
+
+            ret = env->itlb[reg].tag;
+            break;
+        }
+    case ASI_DMMU: /* D-MMU regs */
+        {
+            int reg = (addr >> 3) & 0xf;
+
+            if (reg == 0) {
+                /* D-TSB Tag Target register */
+                ret = ultrasparc_tag_target(env->dmmu.tag_access);
+            } else {
+                ret = env->dmmuregs[reg];
+            }
+            break;
+        }
+    case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
+        {
+            /* env->dmmuregs[5] holds D-MMU TSB register value
+               env->dmmuregs[6] holds D-MMU Tag Access register value */
+            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
+                                         8*1024);
+            break;
+        }
+    case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
+        {
+            /* env->dmmuregs[5] holds D-MMU TSB register value
+               env->dmmuregs[6] holds D-MMU Tag Access register value */
+            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
+                                         64*1024);
+            break;
+        }
+    case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
+        {
+            int reg = (addr >> 3) & 0x3f;
+
+            ret = env->dtlb[reg].tte;
+            break;
+        }
+    case ASI_DTLB_TAG_READ: /* D-MMU tag read */
+        {
+            int reg = (addr >> 3) & 0x3f;
+
+            ret = env->dtlb[reg].tag;
+            break;
+        }
+    case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
+        break;
+    case ASI_INTR_RECEIVE: /* Interrupt data receive */
+        ret = env->ivec_status;
+        break;
+    case ASI_INTR_R: /* Incoming interrupt vector, RO */
+        {
+            int reg = (addr >> 4) & 0x3;
+            if (reg < 3) {
+                ret = env->ivec_data[reg];
+            }
+            break;
+        }
+    case ASI_DCACHE_DATA:     /* D-cache data */
+    case ASI_DCACHE_TAG:      /* D-cache tag access */
+    case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
+    case ASI_AFSR:            /* E-cache asynchronous fault status */
+    case ASI_AFAR:            /* E-cache asynchronous fault address */
+    case ASI_EC_TAG_DATA:     /* E-cache tag data */
+    case ASI_IC_INSTR:        /* I-cache instruction access */
+    case ASI_IC_TAG:          /* I-cache tag access */
+    case ASI_IC_PRE_DECODE:   /* I-cache predecode */
+    case ASI_IC_NEXT_FIELD:   /* I-cache LRU etc. */
+    case ASI_EC_W:            /* E-cache tag */
+    case ASI_EC_R:            /* E-cache tag */
+        break;
+    case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
+    case ASI_ITLB_DATA_IN:        /* I-MMU data in, WO */
+    case ASI_IMMU_DEMAP:          /* I-MMU demap, WO */
+    case ASI_DTLB_DATA_IN:        /* D-MMU data in, WO */
+    case ASI_DMMU_DEMAP:          /* D-MMU demap, WO */
+    case ASI_INTR_W:              /* Interrupt vector, WO */
+    default:
+        cpu_unassigned_access(cs, addr, false, false, 1, size);
+        ret = 0;
+        break;
+    }
+
+    /* Convert to signed number */
+    if (sign) {
+        switch (size) {
+        case 1:
+            ret = (int8_t) ret;
+            break;
+        case 2:
+            ret = (int16_t) ret;
+            break;
+        case 4:
+            ret = (int32_t) ret;
+            break;
+        default:
+            break;
+        }
+    }
+#ifdef DEBUG_ASI
+    dump_asi("read ", last_addr, asi, size, ret);
+#endif
+    return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
+                   int asi, uint32_t memop)
+{
+    int size = 1 << (memop & MO_SIZE);
+    SPARCCPU *cpu = sparc_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+
+#ifdef DEBUG_ASI
+    dump_asi("write", addr, asi, size, val);
+#endif
+
+    asi &= 0xff;
+
+    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
+        || (cpu_has_hypervisor(env)
+            && asi >= 0x30 && asi < 0x80
+            && !(env->hpstate & HS_PRIV))) {
+        cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+    }
+
+    do_check_align(env, addr, size - 1, GETPC());
+    addr = asi_address_mask(env, asi, addr);
+
+    switch (asi) {
+    case ASI_AIUP:  /* As if user primary */
+    case ASI_AIUS:  /* As if user secondary */
+    case ASI_AIUPL: /* As if user primary LE */
+    case ASI_AIUSL: /* As if user secondary LE */
+    case ASI_P:  /* Primary */
+    case ASI_S:  /* Secondary */
+    case ASI_PL: /* Primary LE */
+    case ASI_SL: /* Secondary LE */
+    case ASI_REAL:      /* Bypass */
+    case ASI_REAL_IO:   /* Bypass, non-cacheable */
+    case ASI_REAL_L:    /* Bypass LE */
+    case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+    case ASI_N:  /* Nucleus */
+    case ASI_NL: /* Nucleus Little Endian (LE) */
+    case ASI_NUCLEUS_QUAD_LDD:   /* Nucleus quad LDD 128 bit atomic */
+    case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
+    case ASI_TWINX_AIUP:   /* As if user primary, twinx */
+    case ASI_TWINX_AIUS:   /* As if user secondary, twinx */
+    case ASI_TWINX_REAL:   /* Real address, twinx */
+    case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
+    case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
+    case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+    case ASI_TWINX_N:  /* Nucleus, twinx */
+    case ASI_TWINX_NL: /* Nucleus, twinx, LE */
+    /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
+    case ASI_TWINX_P:  /* Primary, twinx */
+    case ASI_TWINX_PL: /* Primary, twinx, LE */
+    case ASI_TWINX_S:  /* Secondary, twinx */
+    case ASI_TWINX_SL: /* Secondary, twinx, LE */
+        /* These are always handled inline.  */
+        g_assert_not_reached();
+
+    case ASI_UPA_CONFIG: /* UPA config */
+        /* XXX */
+        return;
+    case ASI_LSU_CONTROL: /* LSU */
+        env->lsu = val & (DMMU_E | IMMU_E);
+        return;
+    case ASI_IMMU: /* I-MMU regs */
+        {
+            int reg = (addr >> 3) & 0xf;
+            uint64_t oldreg;
+
+            oldreg = env->immuregs[reg];
+            switch (reg) {
+            case 0: /* RO */
+                return;
+            case 1: /* Not in I-MMU */
+            case 2:
+                return;
+            case 3: /* SFSR */
+                if ((val & 1) == 0) {
+                    val = 0; /* Clear SFSR */
+                }
+                env->immu.sfsr = val;
+                break;
+            case 4: /* RO */
+                return;
+            case 5: /* TSB access */
+                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
+                            PRIx64 "\n", env->immu.tsb, val);
+                env->immu.tsb = val;
+                break;
+            case 6: /* Tag access */
+                env->immu.tag_access = val;
+                break;
+            case 7:
+            case 8:
+                return;
+            default:
+                break;
+            }
+
+            if (oldreg != env->immuregs[reg]) {
+                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
+            }
+#ifdef DEBUG_MMU
+            dump_mmu(stdout, fprintf, env);
+#endif
+            return;
+        }
+    case ASI_ITLB_DATA_IN: /* I-MMU data in */
+        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
+        return;
+    case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
+        {
+            /* TODO: auto demap */
+
+            unsigned int i = (addr >> 3) & 0x3f;
+
+            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
+
+#ifdef DEBUG_MMU
+            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
+            dump_mmu(stdout, fprintf, env);
+#endif
+            return;
+        }
+    case ASI_IMMU_DEMAP: /* I-MMU demap */
+        demap_tlb(env->itlb, addr, "immu", env);
+        return;
+    case ASI_DMMU: /* D-MMU regs */
+        {
+            int reg = (addr >> 3) & 0xf;
+            uint64_t oldreg;
+
+            oldreg = env->dmmuregs[reg];
+            switch (reg) {
+            case 0: /* RO */
+            case 4:
+                return;
+            case 3: /* SFSR */
+                if ((val & 1) == 0) {
+                    val = 0; /* Clear SFSR, Fault address */
+                    env->dmmu.sfar = 0;
+                }
+                env->dmmu.sfsr = val;
+                break;
+            case 1: /* Primary context */
+                env->dmmu.mmu_primary_context = val;
+                /* can be optimized to only flush MMU_USER_IDX
+                   and MMU_KERNEL_IDX entries */
+                tlb_flush(CPU(cpu), 1);
+                break;
+            case 2: /* Secondary context */
+                env->dmmu.mmu_secondary_context = val;
+                /* can be optimized to only flush MMU_USER_SECONDARY_IDX
+                   and MMU_KERNEL_SECONDARY_IDX entries */
+                tlb_flush(CPU(cpu), 1);
+                break;
+            case 5: /* TSB access */
+                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
+                            PRIx64 "\n", env->dmmu.tsb, val);
+                env->dmmu.tsb = val;
+                break;
+            case 6: /* Tag access */
+                env->dmmu.tag_access = val;
+                break;
+            case 7: /* Virtual Watchpoint */
+            case 8: /* Physical Watchpoint */
+            default:
+                env->dmmuregs[reg] = val;
+                break;
+            }
+
+            if (oldreg != env->dmmuregs[reg]) {
+                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
+            }
+#ifdef DEBUG_MMU
+            dump_mmu(stdout, fprintf, env);
+#endif
+            return;
+        }
+    case ASI_DTLB_DATA_IN: /* D-MMU data in */
+        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
+        return;
+    case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
+        {
+            unsigned int i = (addr >> 3) & 0x3f;
+
+            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
+
+#ifdef DEBUG_MMU
+            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
+            dump_mmu(stdout, fprintf, env);
+#endif
+            return;
+        }
+    case ASI_DMMU_DEMAP: /* D-MMU demap */
+        demap_tlb(env->dtlb, addr, "dmmu", env);
+        return;
+    case ASI_INTR_RECEIVE: /* Interrupt data receive */
+        env->ivec_status = val & 0x20;
+        return;
+    case ASI_DCACHE_DATA: /* D-cache data */
+    case ASI_DCACHE_TAG: /* D-cache tag access */
+    case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
+    case ASI_AFSR: /* E-cache asynchronous fault status */
+    case ASI_AFAR: /* E-cache asynchronous fault address */
+    case ASI_EC_TAG_DATA: /* E-cache tag data */
+    case ASI_IC_INSTR: /* I-cache instruction access */
+    case ASI_IC_TAG: /* I-cache tag access */
+    case ASI_IC_PRE_DECODE: /* I-cache predecode */
+    case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
+    case ASI_EC_W: /* E-cache tag */
+    case ASI_EC_R: /* E-cache tag */
+        return;
+    case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
+    case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
+    case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
+    case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
+    case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
+    case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
+    case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
+    case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
+    case ASI_INTR_R: /* Incoming interrupt vector, RO */
+    case ASI_PNF: /* Primary no-fault, RO */
+    case ASI_SNF: /* Secondary no-fault, RO */
+    case ASI_PNFL: /* Primary no-fault LE, RO */
+    case ASI_SNFL: /* Secondary no-fault LE, RO */
+    default:
+        cpu_unassigned_access(cs, addr, true, false, 1, size);
+        return;
+    }
+}
+#endif /* CONFIG_USER_ONLY */
+#endif /* TARGET_SPARC64 */
+
+#if !defined(CONFIG_USER_ONLY)
+#ifndef TARGET_SPARC64
+void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
+                                 bool is_write, bool is_exec, int is_asi,
+                                 unsigned size)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    int fault_type;
+
+#ifdef DEBUG_UNASSIGNED
+    if (is_asi) {
+        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+               " asi 0x%02x from " TARGET_FMT_lx "\n",
+               is_exec ? "exec" : is_write ? "write" : "read", size,
+               size == 1 ? "" : "s", addr, is_asi, env->pc);
+    } else {
+        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+               " from " TARGET_FMT_lx "\n",
+               is_exec ? "exec" : is_write ? "write" : "read", size,
+               size == 1 ? "" : "s", addr, env->pc);
+    }
+#endif
+    /* Don't overwrite translation and access faults */
+    fault_type = (env->mmuregs[3] & 0x1c) >> 2;
+    if ((fault_type > 4) || (fault_type == 0)) {
+        env->mmuregs[3] = 0; /* Fault status register */
+        if (is_asi) {
+            env->mmuregs[3] |= 1 << 16;
+        }
+        if (env->psrs) {
+            env->mmuregs[3] |= 1 << 5;
+        }
+        if (is_exec) {
+            env->mmuregs[3] |= 1 << 6;
+        }
+        if (is_write) {
+            env->mmuregs[3] |= 1 << 7;
+        }
+        env->mmuregs[3] |= (5 << 2) | 2;
+        /* SuperSPARC will never place instruction fault addresses in the FAR */
+        if (!is_exec) {
+            env->mmuregs[4] = addr; /* Fault address register */
+        }
+    }
+    /* overflow (same type fault was not read before another fault) */
+    if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
+        env->mmuregs[3] |= 1;
+    }
+
+    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
+        int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
+        cpu_raise_exception_ra(env, tt, GETPC());
+    }
+
+    /* flush neverland mappings created during no-fault mode,
+       so the sequential MMU faults report proper fault types */
+    if (env->mmuregs[0] & MMU_NF) {
+        tlb_flush(cs, 1);
+    }
+}
+#else
+void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
+                                 bool is_write, bool is_exec, int is_asi,
+                                 unsigned size)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
+
+#ifdef DEBUG_UNASSIGNED
+    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
+           "\n", addr, env->pc);
+#endif
+
+    cpu_raise_exception_ra(env, tt, GETPC());
+}
+#endif
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+                                                 MMUAccessType access_type,
+                                                 int mmu_idx,
+                                                 uintptr_t retaddr)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+
+#ifdef DEBUG_UNALIGNED
+    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
+           "\n", addr, env->pc);
+#endif
+    cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
+}
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+   NULL, it means that the function was called in C code (i.e. not
+   from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+              int mmu_idx, uintptr_t retaddr)
+{
+    int ret;
+
+    ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+    if (ret) {
+        cpu_loop_exit_restore(cs, retaddr);
+    }
+}
+#endif
diff --git a/target/sparc/machine.c b/target/sparc/machine.c
new file mode 100644
index 0000000000..aea6397861
--- /dev/null
+++ b/target/sparc/machine.c
@@ -0,0 +1,194 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "qemu/timer.h"
+
+#include "migration/cpu.h"
+
+#ifdef TARGET_SPARC64
+static const VMStateDescription vmstate_cpu_timer = {
+    .name = "cpu_timer",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(frequency, CPUTimer),
+        VMSTATE_UINT32(disabled, CPUTimer),
+        VMSTATE_UINT64(disabled_mask, CPUTimer),
+        VMSTATE_UINT32(npt, CPUTimer),
+        VMSTATE_UINT64(npt_mask, CPUTimer),
+        VMSTATE_INT64(clock_offset, CPUTimer),
+        VMSTATE_TIMER_PTR(qtimer, CPUTimer),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define VMSTATE_CPU_TIMER(_f, _s)                             \
+    VMSTATE_STRUCT_POINTER(_f, _s, vmstate_cpu_timer, CPUTimer)
+
+static const VMStateDescription vmstate_trap_state = {
+    .name = "trap_state",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(tpc, trap_state),
+        VMSTATE_UINT64(tnpc, trap_state),
+        VMSTATE_UINT64(tstate, trap_state),
+        VMSTATE_UINT32(tt, trap_state),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_tlb_entry = {
+    .name = "tlb_entry",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(tag, SparcTLBEntry),
+        VMSTATE_UINT64(tte, SparcTLBEntry),
+        VMSTATE_END_OF_LIST()
+    }
+};
+#endif
+
+static int get_psr(QEMUFile *f, void *opaque, size_t size)
+{
+    SPARCCPU *cpu = opaque;
+    CPUSPARCState *env = &cpu->env;
+    uint32_t val = qemu_get_be32(f);
+
+    /* needed to ensure that the wrapping registers are correctly updated */
+    env->cwp = 0;
+    cpu_put_psr_raw(env, val);
+
+    return 0;
+}
+
+static void put_psr(QEMUFile *f, void *opaque, size_t size)
+{
+    SPARCCPU *cpu = opaque;
+    CPUSPARCState *env = &cpu->env;
+    uint32_t val;
+
+    val = cpu_get_psr(env);
+
+    qemu_put_be32(f, val);
+}
+
+static const VMStateInfo vmstate_psr = {
+    .name = "psr",
+    .get = get_psr,
+    .put = put_psr,
+};
+
+static void cpu_pre_save(void *opaque)
+{
+    SPARCCPU *cpu = opaque;
+    CPUSPARCState *env = &cpu->env;
+
+    /* if env->cwp == env->nwindows - 1, this will set the ins of the last
+     * window as the outs of the first window
+     */
+    cpu_set_cwp(env, env->cwp);
+}
+
+/* 32-bit SPARC retains migration compatibility with older versions
+ * of QEMU; 64-bit SPARC has had a migration break since then, so the
+ * versions are different.
+ */
+#ifndef TARGET_SPARC64
+#define SPARC_VMSTATE_VER 7
+#else
+#define SPARC_VMSTATE_VER 9
+#endif
+
+const VMStateDescription vmstate_sparc_cpu = {
+    .name = "cpu",
+    .version_id = SPARC_VMSTATE_VER,
+    .minimum_version_id = SPARC_VMSTATE_VER,
+    .minimum_version_id_old = SPARC_VMSTATE_VER,
+    .pre_save = cpu_pre_save,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8),
+        VMSTATE_UINT32(env.nwindows, SPARCCPU),
+        VMSTATE_VARRAY_MULTIPLY(env.regbase, SPARCCPU, env.nwindows, 16,
+                                vmstate_info_uinttl, target_ulong),
+        VMSTATE_CPUDOUBLE_ARRAY(env.fpr, SPARCCPU, TARGET_DPREGS),
+        VMSTATE_UINTTL(env.pc, SPARCCPU),
+        VMSTATE_UINTTL(env.npc, SPARCCPU),
+        VMSTATE_UINTTL(env.y, SPARCCPU),
+        {
+
+            .name = "psr",
+            .version_id = 0,
+            .size = sizeof(uint32_t),
+            .info = &vmstate_psr,
+            .flags = VMS_SINGLE,
+            .offset = 0,
+        },
+        VMSTATE_UINTTL(env.fsr, SPARCCPU),
+        VMSTATE_UINTTL(env.tbr, SPARCCPU),
+        VMSTATE_INT32(env.interrupt_index, SPARCCPU),
+        VMSTATE_UINT32(env.pil_in, SPARCCPU),
+#ifndef TARGET_SPARC64
+        /* MMU */
+        VMSTATE_UINT32(env.wim, SPARCCPU),
+        VMSTATE_UINT32_ARRAY(env.mmuregs, SPARCCPU, 32),
+        VMSTATE_UINT64_ARRAY(env.mxccdata, SPARCCPU, 4),
+        VMSTATE_UINT64_ARRAY(env.mxccregs, SPARCCPU, 8),
+        VMSTATE_UINT32(env.mmubpctrv, SPARCCPU),
+        VMSTATE_UINT32(env.mmubpctrc, SPARCCPU),
+        VMSTATE_UINT32(env.mmubpctrs, SPARCCPU),
+        VMSTATE_UINT64(env.mmubpaction, SPARCCPU),
+        VMSTATE_UINT64_ARRAY(env.mmubpregs, SPARCCPU, 4),
+#else
+        VMSTATE_UINT64(env.lsu, SPARCCPU),
+        VMSTATE_UINT64_ARRAY(env.immuregs, SPARCCPU, 16),
+        VMSTATE_UINT64_ARRAY(env.dmmuregs, SPARCCPU, 16),
+        VMSTATE_STRUCT_ARRAY(env.itlb, SPARCCPU, 64, 0,
+                             vmstate_tlb_entry, SparcTLBEntry),
+        VMSTATE_STRUCT_ARRAY(env.dtlb, SPARCCPU, 64, 0,
+                             vmstate_tlb_entry, SparcTLBEntry),
+        VMSTATE_UINT32(env.mmu_version, SPARCCPU),
+        VMSTATE_STRUCT_ARRAY(env.ts, SPARCCPU, MAXTL_MAX, 0,
+                             vmstate_trap_state, trap_state),
+        VMSTATE_UINT32(env.xcc, SPARCCPU),
+        VMSTATE_UINT32(env.asi, SPARCCPU),
+        VMSTATE_UINT32(env.pstate, SPARCCPU),
+        VMSTATE_UINT32(env.tl, SPARCCPU),
+        VMSTATE_UINT32(env.cansave, SPARCCPU),
+        VMSTATE_UINT32(env.canrestore, SPARCCPU),
+        VMSTATE_UINT32(env.otherwin, SPARCCPU),
+        VMSTATE_UINT32(env.wstate, SPARCCPU),
+        VMSTATE_UINT32(env.cleanwin, SPARCCPU),
+        VMSTATE_UINT64_ARRAY(env.agregs, SPARCCPU, 8),
+        VMSTATE_UINT64_ARRAY(env.bgregs, SPARCCPU, 8),
+        VMSTATE_UINT64_ARRAY(env.igregs, SPARCCPU, 8),
+        VMSTATE_UINT64_ARRAY(env.mgregs, SPARCCPU, 8),
+        VMSTATE_UINT64(env.fprs, SPARCCPU),
+        VMSTATE_UINT64(env.tick_cmpr, SPARCCPU),
+        VMSTATE_UINT64(env.stick_cmpr, SPARCCPU),
+        VMSTATE_CPU_TIMER(env.tick, SPARCCPU),
+        VMSTATE_CPU_TIMER(env.stick, SPARCCPU),
+        VMSTATE_UINT64(env.gsr, SPARCCPU),
+        VMSTATE_UINT32(env.gl, SPARCCPU),
+        VMSTATE_UINT64(env.hpstate, SPARCCPU),
+        VMSTATE_UINT64_ARRAY(env.htstate, SPARCCPU, MAXTL_MAX),
+        VMSTATE_UINT64(env.hintp, SPARCCPU),
+        VMSTATE_UINT64(env.htba, SPARCCPU),
+        VMSTATE_UINT64(env.hver, SPARCCPU),
+        VMSTATE_UINT64(env.hstick_cmpr, SPARCCPU),
+        VMSTATE_UINT64(env.ssr, SPARCCPU),
+        VMSTATE_CPU_TIMER(env.hstick, SPARCCPU),
+        /* On SPARC32 env.psrpil and env.cwp are migrated as part of the PSR */
+        VMSTATE_UINT32(env.psrpil, SPARCCPU),
+        VMSTATE_UINT32(env.cwp, SPARCCPU),
+#endif
+        VMSTATE_END_OF_LIST()
+    },
+};
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
new file mode 100644
index 0000000000..044e88c4c5
--- /dev/null
+++ b/target/sparc/mmu_helper.c
@@ -0,0 +1,880 @@
+/*
+ *  Sparc MMU helpers
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "trace.h"
+#include "exec/address-spaces.h"
+
+/* Sparc MMU emulation */
+
+#if defined(CONFIG_USER_ONLY)
+
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+                               int mmu_idx)
+{
+    if (rw & 2) {
+        cs->exception_index = TT_TFAULT;
+    } else {
+        cs->exception_index = TT_DFAULT;
+    }
+    return 1;
+}
+
+#else
+
+#ifndef TARGET_SPARC64
+/*
+ * Sparc V8 Reference MMU (SRMMU)
+ */
+static const int access_table[8][8] = {
+    { 0, 0, 0, 0, 8, 0, 12, 12 },
+    { 0, 0, 0, 0, 8, 0, 0, 0 },
+    { 8, 8, 0, 0, 0, 8, 12, 12 },
+    { 8, 8, 0, 0, 0, 8, 0, 0 },
+    { 8, 0, 8, 0, 8, 8, 12, 12 },
+    { 8, 0, 8, 0, 8, 0, 8, 0 },
+    { 8, 8, 8, 0, 8, 8, 12, 12 },
+    { 8, 8, 8, 0, 8, 8, 8, 0 }
+};
+
+static const int perm_table[2][8] = {
+    {
+        PAGE_READ,
+        PAGE_READ | PAGE_WRITE,
+        PAGE_READ | PAGE_EXEC,
+        PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+        PAGE_EXEC,
+        PAGE_READ | PAGE_WRITE,
+        PAGE_READ | PAGE_EXEC,
+        PAGE_READ | PAGE_WRITE | PAGE_EXEC
+    },
+    {
+        PAGE_READ,
+        PAGE_READ | PAGE_WRITE,
+        PAGE_READ | PAGE_EXEC,
+        PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+        PAGE_EXEC,
+        PAGE_READ,
+        0,
+        0,
+    }
+};
+
+static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
+                                int *prot, int *access_index,
+                                target_ulong address, int rw, int mmu_idx,
+                                target_ulong *page_size)
+{
+    int access_perms = 0;
+    hwaddr pde_ptr;
+    uint32_t pde;
+    int error_code = 0, is_dirty, is_user;
+    unsigned long page_offset;
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+    is_user = mmu_idx == MMU_USER_IDX;
+
+    if (mmu_idx == MMU_PHYS_IDX) {
+        *page_size = TARGET_PAGE_SIZE;
+        /* Boot mode: instruction fetches are taken from PROM */
+        if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
+            *physical = env->prom_addr | (address & 0x7ffffULL);
+            *prot = PAGE_READ | PAGE_EXEC;
+            return 0;
+        }
+        *physical = address;
+        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+        return 0;
+    }
+
+    *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
+    *physical = 0xffffffffffff0000ULL;
+
+    /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
+    /* Context base + context number */
+    pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
+    pde = ldl_phys(cs->as, pde_ptr);
+
+    /* Ctx pde */
+    switch (pde & PTE_ENTRYTYPE_MASK) {
+    default:
+    case 0: /* Invalid */
+        return 1 << 2;
+    case 2: /* L0 PTE, maybe should not happen? */
+    case 3: /* Reserved */
+        return 4 << 2;
+    case 1: /* L0 PDE */
+        pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+        pde = ldl_phys(cs->as, pde_ptr);
+
+        switch (pde & PTE_ENTRYTYPE_MASK) {
+        default:
+        case 0: /* Invalid */
+            return (1 << 8) | (1 << 2);
+        case 3: /* Reserved */
+            return (1 << 8) | (4 << 2);
+        case 1: /* L1 PDE */
+            pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+            pde = ldl_phys(cs->as, pde_ptr);
+
+            switch (pde & PTE_ENTRYTYPE_MASK) {
+            default:
+            case 0: /* Invalid */
+                return (2 << 8) | (1 << 2);
+            case 3: /* Reserved */
+                return (2 << 8) | (4 << 2);
+            case 1: /* L2 PDE */
+                pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+                pde = ldl_phys(cs->as, pde_ptr);
+
+                switch (pde & PTE_ENTRYTYPE_MASK) {
+                default:
+                case 0: /* Invalid */
+                    return (3 << 8) | (1 << 2);
+                case 1: /* PDE, should not happen */
+                case 3: /* Reserved */
+                    return (3 << 8) | (4 << 2);
+                case 2: /* L3 PTE */
+                    page_offset = 0;
+                }
+                *page_size = TARGET_PAGE_SIZE;
+                break;
+            case 2: /* L2 PTE */
+                page_offset = address & 0x3f000;
+                *page_size = 0x40000;
+            }
+            break;
+        case 2: /* L1 PTE */
+            page_offset = address & 0xfff000;
+            *page_size = 0x1000000;
+        }
+    }
+
+    /* check access */
+    access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
+    error_code = access_table[*access_index][access_perms];
+    if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
+        return error_code;
+    }
+
+    /* update page modified and dirty bits */
+    is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
+    if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
+        pde |= PG_ACCESSED_MASK;
+        if (is_dirty) {
+            pde |= PG_MODIFIED_MASK;
+        }
+        stl_phys_notdirty(cs->as, pde_ptr, pde);
+    }
+
+    /* the page can be put in the TLB */
+    *prot = perm_table[is_user][access_perms];
+    if (!(pde & PG_MODIFIED_MASK)) {
+        /* only set write access if already dirty... otherwise wait
+           for dirty access */
+        *prot &= ~PAGE_WRITE;
+    }
+
+    /* Even if large ptes, we map only one 4KB page in the cache to
+       avoid filling it too fast */
+    *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
+    return error_code;
+}
+
+/* Perform address translation */
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+                               int mmu_idx)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    hwaddr paddr;
+    target_ulong vaddr;
+    target_ulong page_size;
+    int error_code = 0, prot, access_index;
+
+    address &= TARGET_PAGE_MASK;
+    error_code = get_physical_address(env, &paddr, &prot, &access_index,
+                                      address, rw, mmu_idx, &page_size);
+    vaddr = address;
+    if (error_code == 0) {
+        qemu_log_mask(CPU_LOG_MMU,
+                "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
+                TARGET_FMT_lx "\n", address, paddr, vaddr);
+        tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
+        return 0;
+    }
+
+    if (env->mmuregs[3]) { /* Fault status register */
+        env->mmuregs[3] = 1; /* overflow (not read before another fault) */
+    }
+    env->mmuregs[3] |= (access_index << 5) | error_code | 2;
+    env->mmuregs[4] = address; /* Fault address register */
+
+    if ((env->mmuregs[0] & MMU_NF) || env->psret == 0)  {
+        /* No fault mode: if a mapping is available, just override
+           permissions. If no mapping is available, redirect accesses to
+           neverland. Fake/overridden mappings will be flushed when
+           switching to normal mode. */
+        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+        tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
+        return 0;
+    } else {
+        if (rw & 2) {
+            cs->exception_index = TT_TFAULT;
+        } else {
+            cs->exception_index = TT_DFAULT;
+        }
+        return 1;
+    }
+}
+
+target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+    hwaddr pde_ptr;
+    uint32_t pde;
+
+    /* Context base + context number */
+    pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
+        (env->mmuregs[2] << 2);
+    pde = ldl_phys(cs->as, pde_ptr);
+
+    switch (pde & PTE_ENTRYTYPE_MASK) {
+    default:
+    case 0: /* Invalid */
+    case 2: /* PTE, maybe should not happen? */
+    case 3: /* Reserved */
+        return 0;
+    case 1: /* L1 PDE */
+        if (mmulev == 3) {
+            return pde;
+        }
+        pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+        pde = ldl_phys(cs->as, pde_ptr);
+
+        switch (pde & PTE_ENTRYTYPE_MASK) {
+        default:
+        case 0: /* Invalid */
+        case 3: /* Reserved */
+            return 0;
+        case 2: /* L1 PTE */
+            return pde;
+        case 1: /* L2 PDE */
+            if (mmulev == 2) {
+                return pde;
+            }
+            pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+            pde = ldl_phys(cs->as, pde_ptr);
+
+            switch (pde & PTE_ENTRYTYPE_MASK) {
+            default:
+            case 0: /* Invalid */
+            case 3: /* Reserved */
+                return 0;
+            case 2: /* L2 PTE */
+                return pde;
+            case 1: /* L3 PDE */
+                if (mmulev == 1) {
+                    return pde;
+                }
+                pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+                pde = ldl_phys(cs->as, pde_ptr);
+
+                switch (pde & PTE_ENTRYTYPE_MASK) {
+                default:
+                case 0: /* Invalid */
+                case 1: /* PDE, should not happen */
+                case 3: /* Reserved */
+                    return 0;
+                case 2: /* L3 PTE */
+                    return pde;
+                }
+            }
+        }
+    }
+    return 0;
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+    target_ulong va, va1, va2;
+    unsigned int n, m, o;
+    hwaddr pde_ptr, pa;
+    uint32_t pde;
+
+    pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
+    pde = ldl_phys(cs->as, pde_ptr);
+    (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
+                   (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
+    for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
+        pde = mmu_probe(env, va, 2);
+        if (pde) {
+            pa = cpu_get_phys_page_debug(cs, va);
+            (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
+                           " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
+            for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
+                pde = mmu_probe(env, va1, 1);
+                if (pde) {
+                    pa = cpu_get_phys_page_debug(cs, va1);
+                    (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: "
+                                   TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
+                                   va1, pa, pde);
+                    for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
+                        pde = mmu_probe(env, va2, 0);
+                        if (pde) {
+                            pa = cpu_get_phys_page_debug(cs, va2);
+                            (*cpu_fprintf)(f, "  VA: " TARGET_FMT_lx ", PA: "
+                                           TARGET_FMT_plx " PTE: "
+                                           TARGET_FMT_lx "\n",
+                                           va2, pa, pde);
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+/* Gdb expects all registers windows to be flushed in ram. This function handles
+ * reads (and only reads) in stack frames as if windows were flushed. We assume
+ * that the sparc ABI is followed.
+ */
+int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
+                              uint8_t *buf, int len, bool is_write)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    target_ulong addr = address;
+    int i;
+    int len1;
+    int cwp = env->cwp;
+
+    if (!is_write) {
+        for (i = 0; i < env->nwindows; i++) {
+            int off;
+            target_ulong fp = env->regbase[cwp * 16 + 22];
+
+            /* Assume fp == 0 means end of frame.  */
+            if (fp == 0) {
+                break;
+            }
+
+            cwp = cpu_cwp_inc(env, cwp + 1);
+
+            /* Invalid window ? */
+            if (env->wim & (1 << cwp)) {
+                break;
+            }
+
+            /* According to the ABI, the stack is growing downward.  */
+            if (addr + len < fp) {
+                break;
+            }
+
+            /* Not in this frame.  */
+            if (addr > fp + 64) {
+                continue;
+            }
+
+            /* Handle access before this window.  */
+            if (addr < fp) {
+                len1 = fp - addr;
+                if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
+                    return -1;
+                }
+                addr += len1;
+                len -= len1;
+                buf += len1;
+            }
+
+            /* Access byte per byte to registers. Not very efficient but speed
+             * is not critical.
+             */
+            off = addr - fp;
+            len1 = 64 - off;
+
+            if (len1 > len) {
+                len1 = len;
+            }
+
+            for (; len1; len1--) {
+                int reg = cwp * 16 + 8 + (off >> 2);
+                union {
+                    uint32_t v;
+                    uint8_t c[4];
+                } u;
+                u.v = cpu_to_be32(env->regbase[reg]);
+                *buf++ = u.c[off & 3];
+                addr++;
+                len--;
+                off++;
+            }
+
+            if (len == 0) {
+                return 0;
+            }
+        }
+    }
+    return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
+}
+
+#else /* !TARGET_SPARC64 */
+
+/* 41 bit physical address space */
+static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
+{
+    return x & 0x1ffffffffffULL;
+}
+
+/*
+ * UltraSparc IIi I/DMMUs
+ */
+
+/* Returns true if TTE tag is valid and matches virtual address value
+   in context requires virtual address mask value calculated from TTE
+   entry size */
+static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
+                                       uint64_t address, uint64_t context,
+                                       hwaddr *physical)
+{
+    uint64_t mask;
+
+    switch (TTE_PGSIZE(tlb->tte)) {
+    default:
+    case 0x0: /* 8k */
+        mask = 0xffffffffffffe000ULL;
+        break;
+    case 0x1: /* 64k */
+        mask = 0xffffffffffff0000ULL;
+        break;
+    case 0x2: /* 512k */
+        mask = 0xfffffffffff80000ULL;
+        break;
+    case 0x3: /* 4M */
+        mask = 0xffffffffffc00000ULL;
+        break;
+    }
+
+    /* valid, context match, virtual address match? */
+    if (TTE_IS_VALID(tlb->tte) &&
+        (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
+        && compare_masked(address, tlb->tag, mask)) {
+        /* decode physical address */
+        *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
+        return 1;
+    }
+
+    return 0;
+}
+
+static int get_physical_address_data(CPUSPARCState *env,
+                                     hwaddr *physical, int *prot,
+                                     target_ulong address, int rw, int mmu_idx)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+    unsigned int i;
+    uint64_t context;
+    uint64_t sfsr = 0;
+    bool is_user = false;
+
+    switch (mmu_idx) {
+    case MMU_PHYS_IDX:
+        g_assert_not_reached();
+    case MMU_USER_IDX:
+        is_user = true;
+        /* fallthru */
+    case MMU_KERNEL_IDX:
+        context = env->dmmu.mmu_primary_context & 0x1fff;
+        sfsr |= SFSR_CT_PRIMARY;
+        break;
+    case MMU_USER_SECONDARY_IDX:
+        is_user = true;
+        /* fallthru */
+    case MMU_KERNEL_SECONDARY_IDX:
+        context = env->dmmu.mmu_secondary_context & 0x1fff;
+        sfsr |= SFSR_CT_SECONDARY;
+        break;
+    case MMU_NUCLEUS_IDX:
+        sfsr |= SFSR_CT_NUCLEUS;
+        /* FALLTHRU */
+    default:
+        context = 0;
+        break;
+    }
+
+    if (rw == 1) {
+        sfsr |= SFSR_WRITE_BIT;
+    } else if (rw == 4) {
+        sfsr |= SFSR_NF_BIT;
+    }
+
+    for (i = 0; i < 64; i++) {
+        /* ctx match, vaddr match, valid? */
+        if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
+            int do_fault = 0;
+
+            /* access ok? */
+            /* multiple bits in SFSR.FT may be set on TT_DFAULT */
+            if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
+                do_fault = 1;
+                sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
+                trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
+            }
+            if (rw == 4) {
+                if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
+                    do_fault = 1;
+                    sfsr |= SFSR_FT_NF_E_BIT;
+                }
+            } else {
+                if (TTE_IS_NFO(env->dtlb[i].tte)) {
+                    do_fault = 1;
+                    sfsr |= SFSR_FT_NFO_BIT;
+                }
+            }
+
+            if (do_fault) {
+                /* faults above are reported with TT_DFAULT. */
+                cs->exception_index = TT_DFAULT;
+            } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
+                do_fault = 1;
+                cs->exception_index = TT_DPROT;
+
+                trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
+            }
+
+            if (!do_fault) {
+                *prot = PAGE_READ;
+                if (TTE_IS_W_OK(env->dtlb[i].tte)) {
+                    *prot |= PAGE_WRITE;
+                }
+
+                TTE_SET_USED(env->dtlb[i].tte);
+
+                return 0;
+            }
+
+            if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
+                sfsr |= SFSR_OW_BIT; /* overflow (not read before
+                                        another fault) */
+            }
+
+            if (env->pstate & PS_PRIV) {
+                sfsr |= SFSR_PR_BIT;
+            }
+
+            /* FIXME: ASI field in SFSR must be set */
+            env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
+
+            env->dmmu.sfar = address; /* Fault address register */
+
+            env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+
+            return 1;
+        }
+    }
+
+    trace_mmu_helper_dmiss(address, context);
+
+    /*
+     * On MMU misses:
+     * - UltraSPARC IIi: SFSR and SFAR unmodified
+     * - JPS1: SFAR updated and some fields of SFSR updated
+     */
+    env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+    cs->exception_index = TT_DMISS;
+    return 1;
+}
+
+static int get_physical_address_code(CPUSPARCState *env,
+                                     hwaddr *physical, int *prot,
+                                     target_ulong address, int mmu_idx)
+{
+    CPUState *cs = CPU(sparc_env_get_cpu(env));
+    unsigned int i;
+    uint64_t context;
+    bool is_user = false;
+
+    switch (mmu_idx) {
+    case MMU_PHYS_IDX:
+    case MMU_USER_SECONDARY_IDX:
+    case MMU_KERNEL_SECONDARY_IDX:
+        g_assert_not_reached();
+    case MMU_USER_IDX:
+        is_user = true;
+        /* fallthru */
+    case MMU_KERNEL_IDX:
+        context = env->dmmu.mmu_primary_context & 0x1fff;
+        break;
+    default:
+        context = 0;
+        break;
+    }
+
+    if (env->tl == 0) {
+        /* PRIMARY context */
+        context = env->dmmu.mmu_primary_context & 0x1fff;
+    } else {
+        /* NUCLEUS context */
+        context = 0;
+    }
+
+    for (i = 0; i < 64; i++) {
+        /* ctx match, vaddr match, valid? */
+        if (ultrasparc_tag_match(&env->itlb[i],
+                                 address, context, physical)) {
+            /* access ok? */
+            if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
+                /* Fault status register */
+                if (env->immu.sfsr & SFSR_VALID_BIT) {
+                    env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
+                                                     another fault) */
+                } else {
+                    env->immu.sfsr = 0;
+                }
+                if (env->pstate & PS_PRIV) {
+                    env->immu.sfsr |= SFSR_PR_BIT;
+                }
+                if (env->tl > 0) {
+                    env->immu.sfsr |= SFSR_CT_NUCLEUS;
+                }
+
+                /* FIXME: ASI field in SFSR must be set */
+                env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
+                cs->exception_index = TT_TFAULT;
+
+                env->immu.tag_access = (address & ~0x1fffULL) | context;
+
+                trace_mmu_helper_tfault(address, context);
+
+                return 1;
+            }
+            *prot = PAGE_EXEC;
+            TTE_SET_USED(env->itlb[i].tte);
+            return 0;
+        }
+    }
+
+    trace_mmu_helper_tmiss(address, context);
+
+    /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
+    env->immu.tag_access = (address & ~0x1fffULL) | context;
+    cs->exception_index = TT_TMISS;
+    return 1;
+}
+
+static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
+                                int *prot, int *access_index,
+                                target_ulong address, int rw, int mmu_idx,
+                                target_ulong *page_size)
+{
+    /* ??? We treat everything as a small page, then explicitly flush
+       everything when an entry is evicted.  */
+    *page_size = TARGET_PAGE_SIZE;
+
+    /* safety net to catch wrong softmmu index use from dynamic code */
+    if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
+        if (rw == 2) {
+            trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
+                                                env->dmmu.mmu_primary_context,
+                                                env->dmmu.mmu_secondary_context,
+                                                address);
+        } else {
+            trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
+                                                env->dmmu.mmu_primary_context,
+                                                env->dmmu.mmu_secondary_context,
+                                                address);
+        }
+    }
+
+    if (mmu_idx == MMU_PHYS_IDX) {
+        *physical = ultrasparc_truncate_physical(address);
+        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+        return 0;
+    }
+
+    if (rw == 2) {
+        return get_physical_address_code(env, physical, prot, address,
+                                         mmu_idx);
+    } else {
+        return get_physical_address_data(env, physical, prot, address, rw,
+                                         mmu_idx);
+    }
+}
+
+/* Perform address translation */
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+                               int mmu_idx)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    target_ulong vaddr;
+    hwaddr paddr;
+    target_ulong page_size;
+    int error_code = 0, prot, access_index;
+
+    address &= TARGET_PAGE_MASK;
+    error_code = get_physical_address(env, &paddr, &prot, &access_index,
+                                      address, rw, mmu_idx, &page_size);
+    if (error_code == 0) {
+        vaddr = address;
+
+        trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
+                                   env->dmmu.mmu_primary_context,
+                                   env->dmmu.mmu_secondary_context);
+
+        tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
+        return 0;
+    }
+    /* XXX */
+    return 1;
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
+{
+    unsigned int i;
+    const char *mask;
+
+    (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %"
+                   PRId64 "\n",
+                   env->dmmu.mmu_primary_context,
+                   env->dmmu.mmu_secondary_context);
+    if ((env->lsu & DMMU_E) == 0) {
+        (*cpu_fprintf)(f, "DMMU disabled\n");
+    } else {
+        (*cpu_fprintf)(f, "DMMU dump\n");
+        for (i = 0; i < 64; i++) {
+            switch (TTE_PGSIZE(env->dtlb[i].tte)) {
+            default:
+            case 0x0:
+                mask = "  8k";
+                break;
+            case 0x1:
+                mask = " 64k";
+                break;
+            case 0x2:
+                mask = "512k";
+                break;
+            case 0x3:
+                mask = "  4M";
+                break;
+            }
+            if (TTE_IS_VALID(env->dtlb[i].tte)) {
+                (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
+                               ", %s, %s, %s, %s, ctx %" PRId64 " %s\n",
+                               i,
+                               env->dtlb[i].tag & (uint64_t)~0x1fffULL,
+                               TTE_PA(env->dtlb[i].tte),
+                               mask,
+                               TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
+                               TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
+                               TTE_IS_LOCKED(env->dtlb[i].tte) ?
+                               "locked" : "unlocked",
+                               env->dtlb[i].tag & (uint64_t)0x1fffULL,
+                               TTE_IS_GLOBAL(env->dtlb[i].tte) ?
+                               "global" : "local");
+            }
+        }
+    }
+    if ((env->lsu & IMMU_E) == 0) {
+        (*cpu_fprintf)(f, "IMMU disabled\n");
+    } else {
+        (*cpu_fprintf)(f, "IMMU dump\n");
+        for (i = 0; i < 64; i++) {
+            switch (TTE_PGSIZE(env->itlb[i].tte)) {
+            default:
+            case 0x0:
+                mask = "  8k";
+                break;
+            case 0x1:
+                mask = " 64k";
+                break;
+            case 0x2:
+                mask = "512k";
+                break;
+            case 0x3:
+                mask = "  4M";
+                break;
+            }
+            if (TTE_IS_VALID(env->itlb[i].tte)) {
+                (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
+                               ", %s, %s, %s, ctx %" PRId64 " %s\n",
+                               i,
+                               env->itlb[i].tag & (uint64_t)~0x1fffULL,
+                               TTE_PA(env->itlb[i].tte),
+                               mask,
+                               TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
+                               TTE_IS_LOCKED(env->itlb[i].tte) ?
+                               "locked" : "unlocked",
+                               env->itlb[i].tag & (uint64_t)0x1fffULL,
+                               TTE_IS_GLOBAL(env->itlb[i].tte) ?
+                               "global" : "local");
+            }
+        }
+    }
+}
+
+#endif /* TARGET_SPARC64 */
+
+static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
+                                   target_ulong addr, int rw, int mmu_idx)
+{
+    target_ulong page_size;
+    int prot, access_index;
+
+    return get_physical_address(env, phys, &prot, &access_index, addr, rw,
+                                mmu_idx, &page_size);
+}
+
+#if defined(TARGET_SPARC64)
+hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
+                                           int mmu_idx)
+{
+    hwaddr phys_addr;
+
+    if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
+        return -1;
+    }
+    return phys_addr;
+}
+#endif
+
+hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+    SPARCCPU *cpu = SPARC_CPU(cs);
+    CPUSPARCState *env = &cpu->env;
+    hwaddr phys_addr;
+    int mmu_idx = cpu_mmu_index(env, false);
+    MemoryRegionSection section;
+
+    if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
+        if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
+            return -1;
+        }
+    }
+    section = memory_region_find(get_system_memory(), phys_addr, 1);
+    memory_region_unref(section.mr);
+    if (!int128_nz(section.size)) {
+        return -1;
+    }
+    return phys_addr;
+}
+#endif
diff --git a/target/sparc/monitor.c b/target/sparc/monitor.c
new file mode 100644
index 0000000000..7cc1b0f87f
--- /dev/null
+++ b/target/sparc/monitor.c
@@ -0,0 +1,159 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+    CPUArchState *env1 = mon_get_cpu_env();
+
+    dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1);
+}
+
+#ifndef TARGET_SPARC64
+static target_long monitor_get_psr (const struct MonitorDef *md, int val)
+{
+    CPUArchState *env = mon_get_cpu_env();
+
+    return cpu_get_psr(env);
+}
+#endif
+
+static target_long monitor_get_reg(const struct MonitorDef *md, int val)
+{
+    CPUArchState *env = mon_get_cpu_env();
+    return env->regwptr[val];
+}
+
+const MonitorDef monitor_defs[] = {
+    { "g0", offsetof(CPUSPARCState, gregs[0]) },
+    { "g1", offsetof(CPUSPARCState, gregs[1]) },
+    { "g2", offsetof(CPUSPARCState, gregs[2]) },
+    { "g3", offsetof(CPUSPARCState, gregs[3]) },
+    { "g4", offsetof(CPUSPARCState, gregs[4]) },
+    { "g5", offsetof(CPUSPARCState, gregs[5]) },
+    { "g6", offsetof(CPUSPARCState, gregs[6]) },
+    { "g7", offsetof(CPUSPARCState, gregs[7]) },
+    { "o0", 0, monitor_get_reg },
+    { "o1", 1, monitor_get_reg },
+    { "o2", 2, monitor_get_reg },
+    { "o3", 3, monitor_get_reg },
+    { "o4", 4, monitor_get_reg },
+    { "o5", 5, monitor_get_reg },
+    { "o6", 6, monitor_get_reg },
+    { "o7", 7, monitor_get_reg },
+    { "l0", 8, monitor_get_reg },
+    { "l1", 9, monitor_get_reg },
+    { "l2", 10, monitor_get_reg },
+    { "l3", 11, monitor_get_reg },
+    { "l4", 12, monitor_get_reg },
+    { "l5", 13, monitor_get_reg },
+    { "l6", 14, monitor_get_reg },
+    { "l7", 15, monitor_get_reg },
+    { "i0", 16, monitor_get_reg },
+    { "i1", 17, monitor_get_reg },
+    { "i2", 18, monitor_get_reg },
+    { "i3", 19, monitor_get_reg },
+    { "i4", 20, monitor_get_reg },
+    { "i5", 21, monitor_get_reg },
+    { "i6", 22, monitor_get_reg },
+    { "i7", 23, monitor_get_reg },
+    { "pc", offsetof(CPUSPARCState, pc) },
+    { "npc", offsetof(CPUSPARCState, npc) },
+    { "y", offsetof(CPUSPARCState, y) },
+#ifndef TARGET_SPARC64
+    { "psr", 0, &monitor_get_psr, },
+    { "wim", offsetof(CPUSPARCState, wim) },
+#endif
+    { "tbr", offsetof(CPUSPARCState, tbr) },
+    { "fsr", offsetof(CPUSPARCState, fsr) },
+    { "f0", offsetof(CPUSPARCState, fpr[0].l.upper) },
+    { "f1", offsetof(CPUSPARCState, fpr[0].l.lower) },
+    { "f2", offsetof(CPUSPARCState, fpr[1].l.upper) },
+    { "f3", offsetof(CPUSPARCState, fpr[1].l.lower) },
+    { "f4", offsetof(CPUSPARCState, fpr[2].l.upper) },
+    { "f5", offsetof(CPUSPARCState, fpr[2].l.lower) },
+    { "f6", offsetof(CPUSPARCState, fpr[3].l.upper) },
+    { "f7", offsetof(CPUSPARCState, fpr[3].l.lower) },
+    { "f8", offsetof(CPUSPARCState, fpr[4].l.upper) },
+    { "f9", offsetof(CPUSPARCState, fpr[4].l.lower) },
+    { "f10", offsetof(CPUSPARCState, fpr[5].l.upper) },
+    { "f11", offsetof(CPUSPARCState, fpr[5].l.lower) },
+    { "f12", offsetof(CPUSPARCState, fpr[6].l.upper) },
+    { "f13", offsetof(CPUSPARCState, fpr[6].l.lower) },
+    { "f14", offsetof(CPUSPARCState, fpr[7].l.upper) },
+    { "f15", offsetof(CPUSPARCState, fpr[7].l.lower) },
+    { "f16", offsetof(CPUSPARCState, fpr[8].l.upper) },
+    { "f17", offsetof(CPUSPARCState, fpr[8].l.lower) },
+    { "f18", offsetof(CPUSPARCState, fpr[9].l.upper) },
+    { "f19", offsetof(CPUSPARCState, fpr[9].l.lower) },
+    { "f20", offsetof(CPUSPARCState, fpr[10].l.upper) },
+    { "f21", offsetof(CPUSPARCState, fpr[10].l.lower) },
+    { "f22", offsetof(CPUSPARCState, fpr[11].l.upper) },
+    { "f23", offsetof(CPUSPARCState, fpr[11].l.lower) },
+    { "f24", offsetof(CPUSPARCState, fpr[12].l.upper) },
+    { "f25", offsetof(CPUSPARCState, fpr[12].l.lower) },
+    { "f26", offsetof(CPUSPARCState, fpr[13].l.upper) },
+    { "f27", offsetof(CPUSPARCState, fpr[13].l.lower) },
+    { "f28", offsetof(CPUSPARCState, fpr[14].l.upper) },
+    { "f29", offsetof(CPUSPARCState, fpr[14].l.lower) },
+    { "f30", offsetof(CPUSPARCState, fpr[15].l.upper) },
+    { "f31", offsetof(CPUSPARCState, fpr[15].l.lower) },
+#ifdef TARGET_SPARC64
+    { "f32", offsetof(CPUSPARCState, fpr[16]) },
+    { "f34", offsetof(CPUSPARCState, fpr[17]) },
+    { "f36", offsetof(CPUSPARCState, fpr[18]) },
+    { "f38", offsetof(CPUSPARCState, fpr[19]) },
+    { "f40", offsetof(CPUSPARCState, fpr[20]) },
+    { "f42", offsetof(CPUSPARCState, fpr[21]) },
+    { "f44", offsetof(CPUSPARCState, fpr[22]) },
+    { "f46", offsetof(CPUSPARCState, fpr[23]) },
+    { "f48", offsetof(CPUSPARCState, fpr[24]) },
+    { "f50", offsetof(CPUSPARCState, fpr[25]) },
+    { "f52", offsetof(CPUSPARCState, fpr[26]) },
+    { "f54", offsetof(CPUSPARCState, fpr[27]) },
+    { "f56", offsetof(CPUSPARCState, fpr[28]) },
+    { "f58", offsetof(CPUSPARCState, fpr[29]) },
+    { "f60", offsetof(CPUSPARCState, fpr[30]) },
+    { "f62", offsetof(CPUSPARCState, fpr[31]) },
+    { "asi", offsetof(CPUSPARCState, asi) },
+    { "pstate", offsetof(CPUSPARCState, pstate) },
+    { "cansave", offsetof(CPUSPARCState, cansave) },
+    { "canrestore", offsetof(CPUSPARCState, canrestore) },
+    { "otherwin", offsetof(CPUSPARCState, otherwin) },
+    { "wstate", offsetof(CPUSPARCState, wstate) },
+    { "cleanwin", offsetof(CPUSPARCState, cleanwin) },
+    { "fprs", offsetof(CPUSPARCState, fprs) },
+#endif
+    { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+    return monitor_defs;
+}
diff --git a/target/sparc/trace-events b/target/sparc/trace-events
new file mode 100644
index 0000000000..8df178a347
--- /dev/null
+++ b/target/sparc/trace-events
@@ -0,0 +1,28 @@
+# See docs/tracing.txt for syntax documentation.
+
+# target/sparc/mmu_helper.c
+mmu_helper_dfault(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DFAULT at %"PRIx64" context %"PRIx64" mmu_idx=%d tl=%d"
+mmu_helper_dprot(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DPROT at %"PRIx64" context %"PRIx64" mmu_idx=%d tl=%d"
+mmu_helper_dmiss(uint64_t address, uint64_t context) "DMISS at %"PRIx64" context %"PRIx64
+mmu_helper_tfault(uint64_t address, uint64_t context) "TFAULT at %"PRIx64" context %"PRIx64
+mmu_helper_tmiss(uint64_t address, uint64_t context) "TMISS at %"PRIx64" context %"PRIx64
+mmu_helper_get_phys_addr_code(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=%"PRIx64" secondary context=%"PRIx64" address=%"PRIx64
+mmu_helper_get_phys_addr_data(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=%"PRIx64" secondary context=%"PRIx64" address=%"PRIx64
+mmu_helper_mmu_fault(uint64_t address, uint64_t paddr, int mmu_idx, uint32_t tl, uint64_t prim_context, uint64_t sec_context) "Translate at %"PRIx64" -> %"PRIx64", mmu_idx=%d tl=%d primary context=%"PRIx64" secondary context=%"PRIx64
+
+# target/sparc/int64_helper.c
+int_helper_set_softint(uint32_t softint) "new %08x"
+int_helper_clear_softint(uint32_t softint) "new %08x"
+int_helper_write_softint(uint32_t softint) "new %08x"
+
+# target/sparc/int32_helper.c
+int_helper_icache_freeze(void) "Instruction cache: freeze"
+int_helper_dcache_freeze(void) "Data cache: freeze"
+
+# target/sparc/win_helper.c
+win_helper_gregset_error(uint32_t pstate) "ERROR in get_gregset: active pstate bits=%x"
+win_helper_switch_pstate(uint32_t pstate_regs, uint32_t new_pstate_regs) "change_pstate: switching regs old=%x new=%x"
+win_helper_no_switch_pstate(uint32_t new_pstate_regs) "change_pstate: regs new=%x (unchanged)"
+win_helper_wrpil(uint32_t psrpil, uint32_t new_pil) "old=%x new=%x"
+win_helper_done(uint32_t tl) "tl=%d"
+win_helper_retry(uint32_t tl) "tl=%d"
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
new file mode 100644
index 0000000000..2205f89837
--- /dev/null
+++ b/target/sparc/translate.c
@@ -0,0 +1,5924 @@
+/*
+   SPARC translation
+
+   Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
+   Copyright (C) 2003-2005 Fabrice Bellard
+
+   This library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2 of the License, or (at your option) any later version.
+
+   This library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+#include "asi.h"
+
+
+#define DEBUG_DISAS
+
+#define DYNAMIC_PC  1 /* dynamic pc value */
+#define JUMP_PC     2 /* dynamic pc value which takes only two values
+                         according to jump_pc[T2] */
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static TCGv_ptr cpu_regwptr;
+static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
+static TCGv_i32 cpu_cc_op;
+static TCGv_i32 cpu_psr;
+static TCGv cpu_fsr, cpu_pc, cpu_npc;
+static TCGv cpu_regs[32];
+static TCGv cpu_y;
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_tbr;
+#endif
+static TCGv cpu_cond;
+#ifdef TARGET_SPARC64
+static TCGv_i32 cpu_xcc, cpu_fprs;
+static TCGv cpu_gsr;
+static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
+static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
+#else
+static TCGv cpu_wim;
+#endif
+/* Floating point registers */
+static TCGv_i64 cpu_fpr[TARGET_DPREGS];
+
+#include "exec/gen-icount.h"
+
+typedef struct DisasContext {
+    target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
+    target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
+    target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
+    int is_br;
+    int mem_idx;
+    int fpu_enabled;
+    int address_mask_32bit;
+    int singlestep;
+    uint32_t cc_op;  /* current CC operation */
+    struct TranslationBlock *tb;
+    sparc_def_t *def;
+    TCGv_i32 t32[3];
+    TCGv ttl[5];
+    int n_t32;
+    int n_ttl;
+#ifdef TARGET_SPARC64
+    int fprs_dirty;
+    int asi;
+#endif
+} DisasContext;
+
+typedef struct {
+    TCGCond cond;
+    bool is_bool;
+    bool g1, g2;
+    TCGv c1, c2;
+} DisasCompare;
+
+// This function uses non-native bit order
+#define GET_FIELD(X, FROM, TO)                                  \
+    ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
+
+// This function uses the order in the manuals, i.e. bit 0 is 2^0
+#define GET_FIELD_SP(X, FROM, TO)               \
+    GET_FIELD(X, 31 - (TO), 31 - (FROM))
+
+#define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
+#define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
+
+#ifdef TARGET_SPARC64
+#define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
+#define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
+#else
+#define DFPREG(r) (r & 0x1e)
+#define QFPREG(r) (r & 0x1c)
+#endif
+
+#define UA2005_HTRAP_MASK 0xff
+#define V8_TRAP_MASK 0x7f
+
+static int sign_extend(int x, int len)
+{
+    len = 32 - len;
+    return (x << len) >> len;
+}
+
+#define IS_IMM (insn & (1<<13))
+
+static inline TCGv_i32 get_temp_i32(DisasContext *dc)
+{
+    TCGv_i32 t;
+    assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
+    dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
+    return t;
+}
+
+static inline TCGv get_temp_tl(DisasContext *dc)
+{
+    TCGv t;
+    assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
+    dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
+    return t;
+}
+
+static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
+{
+#if defined(TARGET_SPARC64)
+    int bit = (rd < 32) ? 1 : 2;
+    /* If we know we've already set this bit within the TB,
+       we can avoid setting it again.  */
+    if (!(dc->fprs_dirty & bit)) {
+        dc->fprs_dirty |= bit;
+        tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
+    }
+#endif
+}
+
+/* floating point registers moves */
+static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
+{
+#if TCG_TARGET_REG_BITS == 32
+    if (src & 1) {
+        return TCGV_LOW(cpu_fpr[src / 2]);
+    } else {
+        return TCGV_HIGH(cpu_fpr[src / 2]);
+    }
+#else
+    if (src & 1) {
+        return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
+    } else {
+        TCGv_i32 ret = get_temp_i32(dc);
+        TCGv_i64 t = tcg_temp_new_i64();
+
+        tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
+        tcg_gen_extrl_i64_i32(ret, t);
+        tcg_temp_free_i64(t);
+
+        return ret;
+    }
+#endif
+}
+
+static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
+{
+#if TCG_TARGET_REG_BITS == 32
+    if (dst & 1) {
+        tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
+    } else {
+        tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
+    }
+#else
+    TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
+    tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
+                        (dst & 1 ? 0 : 32), 32);
+#endif
+    gen_update_fprs_dirty(dc, dst);
+}
+
+static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
+{
+    return get_temp_i32(dc);
+}
+
+static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
+{
+    src = DFPREG(src);
+    return cpu_fpr[src / 2];
+}
+
+static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
+{
+    dst = DFPREG(dst);
+    tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
+    gen_update_fprs_dirty(dc, dst);
+}
+
+static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
+{
+    return cpu_fpr[DFPREG(dst) / 2];
+}
+
+static void gen_op_load_fpr_QT0(unsigned int src)
+{
+    tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+                   offsetof(CPU_QuadU, ll.upper));
+    tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+                   offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_op_load_fpr_QT1(unsigned int src)
+{
+    tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
+                   offsetof(CPU_QuadU, ll.upper));
+    tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
+                   offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_op_store_QT0_fpr(unsigned int dst)
+{
+    tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+                   offsetof(CPU_QuadU, ll.upper));
+    tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+                   offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
+                            TCGv_i64 v1, TCGv_i64 v2)
+{
+    dst = QFPREG(dst);
+
+    tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
+    tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
+    gen_update_fprs_dirty(dc, dst);
+}
+
+#ifdef TARGET_SPARC64
+static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
+{
+    src = QFPREG(src);
+    return cpu_fpr[src / 2];
+}
+
+static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
+{
+    src = QFPREG(src);
+    return cpu_fpr[src / 2 + 1];
+}
+
+static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
+{
+    rd = QFPREG(rd);
+    rs = QFPREG(rs);
+
+    tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
+    tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
+    gen_update_fprs_dirty(dc, rd);
+}
+#endif
+
+/* moves */
+#ifdef CONFIG_USER_ONLY
+#define supervisor(dc) 0
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) 0
+#endif
+#else
+#define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
+#else
+#endif
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(dc) ((dc)->address_mask_32bit)
+#else
+#define AM_CHECK(dc) (1)
+#endif
+#endif
+
+static inline void gen_address_mask(DisasContext *dc, TCGv addr)
+{
+#ifdef TARGET_SPARC64
+    if (AM_CHECK(dc))
+        tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
+#endif
+}
+
+static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
+{
+    if (reg > 0) {
+        assert(reg < 32);
+        return cpu_regs[reg];
+    } else {
+        TCGv t = get_temp_tl(dc);
+        tcg_gen_movi_tl(t, 0);
+        return t;
+    }
+}
+
+static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
+{
+    if (reg > 0) {
+        assert(reg < 32);
+        tcg_gen_mov_tl(cpu_regs[reg], v);
+    }
+}
+
+static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
+{
+    if (reg > 0) {
+        assert(reg < 32);
+        return cpu_regs[reg];
+    } else {
+        return get_temp_tl(dc);
+    }
+}
+
+static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
+                               target_ulong npc)
+{
+    if (unlikely(s->singlestep)) {
+        return false;
+    }
+
+#ifndef CONFIG_USER_ONLY
+    return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
+           (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
+#else
+    return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *s, int tb_num,
+                               target_ulong pc, target_ulong npc)
+{
+    if (use_goto_tb(s, pc, npc))  {
+        /* jump to same page: we can use a direct jump */
+        tcg_gen_goto_tb(tb_num);
+        tcg_gen_movi_tl(cpu_pc, pc);
+        tcg_gen_movi_tl(cpu_npc, npc);
+        tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
+    } else {
+        /* jump to another page: currently not optimized */
+        tcg_gen_movi_tl(cpu_pc, pc);
+        tcg_gen_movi_tl(cpu_npc, npc);
+        tcg_gen_exit_tb(0);
+    }
+}
+
+// XXX suboptimal
+static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
+{
+    tcg_gen_extu_i32_tl(reg, src);
+    tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
+    tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
+{
+    tcg_gen_extu_i32_tl(reg, src);
+    tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
+    tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
+{
+    tcg_gen_extu_i32_tl(reg, src);
+    tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
+    tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
+{
+    tcg_gen_extu_i32_tl(reg, src);
+    tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
+    tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+    tcg_gen_mov_tl(cpu_cc_src, src1);
+    tcg_gen_mov_tl(cpu_cc_src2, src2);
+    tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+    tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static TCGv_i32 gen_add32_carry32(void)
+{
+    TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+    /* Carry is computed from a previous add: (dst < src)  */
+#if TARGET_LONG_BITS == 64
+    cc_src1_32 = tcg_temp_new_i32();
+    cc_src2_32 = tcg_temp_new_i32();
+    tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
+    tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
+#else
+    cc_src1_32 = cpu_cc_dst;
+    cc_src2_32 = cpu_cc_src;
+#endif
+
+    carry_32 = tcg_temp_new_i32();
+    tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+    tcg_temp_free_i32(cc_src1_32);
+    tcg_temp_free_i32(cc_src2_32);
+#endif
+
+    return carry_32;
+}
+
+static TCGv_i32 gen_sub32_carry32(void)
+{
+    TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+    /* Carry is computed from a previous borrow: (src1 < src2)  */
+#if TARGET_LONG_BITS == 64
+    cc_src1_32 = tcg_temp_new_i32();
+    cc_src2_32 = tcg_temp_new_i32();
+    tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
+    tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
+#else
+    cc_src1_32 = cpu_cc_src;
+    cc_src2_32 = cpu_cc_src2;
+#endif
+
+    carry_32 = tcg_temp_new_i32();
+    tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+    tcg_temp_free_i32(cc_src1_32);
+    tcg_temp_free_i32(cc_src2_32);
+#endif
+
+    return carry_32;
+}
+
+static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
+                            TCGv src2, int update_cc)
+{
+    TCGv_i32 carry_32;
+    TCGv carry;
+
+    switch (dc->cc_op) {
+    case CC_OP_DIV:
+    case CC_OP_LOGIC:
+        /* Carry is known to be zero.  Fall back to plain ADD.  */
+        if (update_cc) {
+            gen_op_add_cc(dst, src1, src2);
+        } else {
+            tcg_gen_add_tl(dst, src1, src2);
+        }
+        return;
+
+    case CC_OP_ADD:
+    case CC_OP_TADD:
+    case CC_OP_TADDTV:
+        if (TARGET_LONG_BITS == 32) {
+            /* We can re-use the host's hardware carry generation by using
+               an ADD2 opcode.  We discard the low part of the output.
+               Ideally we'd combine this operation with the add that
+               generated the carry in the first place.  */
+            carry = tcg_temp_new();
+            tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
+            tcg_temp_free(carry);
+            goto add_done;
+        }
+        carry_32 = gen_add32_carry32();
+        break;
+
+    case CC_OP_SUB:
+    case CC_OP_TSUB:
+    case CC_OP_TSUBTV:
+        carry_32 = gen_sub32_carry32();
+        break;
+
+    default:
+        /* We need external help to produce the carry.  */
+        carry_32 = tcg_temp_new_i32();
+        gen_helper_compute_C_icc(carry_32, cpu_env);
+        break;
+    }
+
+#if TARGET_LONG_BITS == 64
+    carry = tcg_temp_new();
+    tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+    carry = carry_32;
+#endif
+
+    tcg_gen_add_tl(dst, src1, src2);
+    tcg_gen_add_tl(dst, dst, carry);
+
+    tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+    tcg_temp_free(carry);
+#endif
+
+ add_done:
+    if (update_cc) {
+        tcg_gen_mov_tl(cpu_cc_src, src1);
+        tcg_gen_mov_tl(cpu_cc_src2, src2);
+        tcg_gen_mov_tl(cpu_cc_dst, dst);
+        tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
+        dc->cc_op = CC_OP_ADDX;
+    }
+}
+
+static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+    tcg_gen_mov_tl(cpu_cc_src, src1);
+    tcg_gen_mov_tl(cpu_cc_src2, src2);
+    tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+    tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
+                            TCGv src2, int update_cc)
+{
+    TCGv_i32 carry_32;
+    TCGv carry;
+
+    switch (dc->cc_op) {
+    case CC_OP_DIV:
+    case CC_OP_LOGIC:
+        /* Carry is known to be zero.  Fall back to plain SUB.  */
+        if (update_cc) {
+            gen_op_sub_cc(dst, src1, src2);
+        } else {
+            tcg_gen_sub_tl(dst, src1, src2);
+        }
+        return;
+
+    case CC_OP_ADD:
+    case CC_OP_TADD:
+    case CC_OP_TADDTV:
+        carry_32 = gen_add32_carry32();
+        break;
+
+    case CC_OP_SUB:
+    case CC_OP_TSUB:
+    case CC_OP_TSUBTV:
+        if (TARGET_LONG_BITS == 32) {
+            /* We can re-use the host's hardware carry generation by using
+               a SUB2 opcode.  We discard the low part of the output.
+               Ideally we'd combine this operation with the add that
+               generated the carry in the first place.  */
+            carry = tcg_temp_new();
+            tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
+            tcg_temp_free(carry);
+            goto sub_done;
+        }
+        carry_32 = gen_sub32_carry32();
+        break;
+
+    default:
+        /* We need external help to produce the carry.  */
+        carry_32 = tcg_temp_new_i32();
+        gen_helper_compute_C_icc(carry_32, cpu_env);
+        break;
+    }
+
+#if TARGET_LONG_BITS == 64
+    carry = tcg_temp_new();
+    tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+    carry = carry_32;
+#endif
+
+    tcg_gen_sub_tl(dst, src1, src2);
+    tcg_gen_sub_tl(dst, dst, carry);
+
+    tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+    tcg_temp_free(carry);
+#endif
+
+ sub_done:
+    if (update_cc) {
+        tcg_gen_mov_tl(cpu_cc_src, src1);
+        tcg_gen_mov_tl(cpu_cc_src2, src2);
+        tcg_gen_mov_tl(cpu_cc_dst, dst);
+        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
+        dc->cc_op = CC_OP_SUBX;
+    }
+}
+
+static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
+{
+    TCGv r_temp, zero, t0;
+
+    r_temp = tcg_temp_new();
+    t0 = tcg_temp_new();
+
+    /* old op:
+    if (!(env->y & 1))
+        T1 = 0;
+    */
+    zero = tcg_const_tl(0);
+    tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
+    tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
+    tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
+    tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
+                       zero, cpu_cc_src2);
+    tcg_temp_free(zero);
+
+    // b2 = T0 & 1;
+    // env->y = (b2 << 31) | (env->y >> 1);
+    tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
+    tcg_gen_shli_tl(r_temp, r_temp, 31);
+    tcg_gen_shri_tl(t0, cpu_y, 1);
+    tcg_gen_andi_tl(t0, t0, 0x7fffffff);
+    tcg_gen_or_tl(t0, t0, r_temp);
+    tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
+
+    // b1 = N ^ V;
+    gen_mov_reg_N(t0, cpu_psr);
+    gen_mov_reg_V(r_temp, cpu_psr);
+    tcg_gen_xor_tl(t0, t0, r_temp);
+    tcg_temp_free(r_temp);
+
+    // T0 = (b1 << 31) | (T0 >> 1);
+    // src1 = T0;
+    tcg_gen_shli_tl(t0, t0, 31);
+    tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
+    tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
+    tcg_temp_free(t0);
+
+    tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+
+    tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
+{
+#if TARGET_LONG_BITS == 32
+    if (sign_ext) {
+        tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
+    } else {
+        tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
+    }
+#else
+    TCGv t0 = tcg_temp_new_i64();
+    TCGv t1 = tcg_temp_new_i64();
+
+    if (sign_ext) {
+        tcg_gen_ext32s_i64(t0, src1);
+        tcg_gen_ext32s_i64(t1, src2);
+    } else {
+        tcg_gen_ext32u_i64(t0, src1);
+        tcg_gen_ext32u_i64(t1, src2);
+    }
+
+    tcg_gen_mul_i64(dst, t0, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+
+    tcg_gen_shri_i64(cpu_y, dst, 32);
+#endif
+}
+
+static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
+{
+    /* zero-extend truncated operands before multiplication */
+    gen_op_multiply(dst, src1, src2, 0);
+}
+
+static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
+{
+    /* sign-extend truncated operands before multiplication */
+    gen_op_multiply(dst, src1, src2, 1);
+}
+
+// 1
+static inline void gen_op_eval_ba(TCGv dst)
+{
+    tcg_gen_movi_tl(dst, 1);
+}
+
+// Z
+static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_Z(dst, src);
+}
+
+// Z | (N ^ V)
+static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_N(t0, src);
+    gen_mov_reg_V(dst, src);
+    tcg_gen_xor_tl(dst, dst, t0);
+    gen_mov_reg_Z(t0, src);
+    tcg_gen_or_tl(dst, dst, t0);
+    tcg_temp_free(t0);
+}
+
+// N ^ V
+static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_V(t0, src);
+    gen_mov_reg_N(dst, src);
+    tcg_gen_xor_tl(dst, dst, t0);
+    tcg_temp_free(t0);
+}
+
+// C | Z
+static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_Z(t0, src);
+    gen_mov_reg_C(dst, src);
+    tcg_gen_or_tl(dst, dst, t0);
+    tcg_temp_free(t0);
+}
+
+// C
+static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_C(dst, src);
+}
+
+// V
+static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_V(dst, src);
+}
+
+// 0
+static inline void gen_op_eval_bn(TCGv dst)
+{
+    tcg_gen_movi_tl(dst, 0);
+}
+
+// N
+static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_N(dst, src);
+}
+
+// !Z
+static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_Z(dst, src);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(Z | (N ^ V))
+static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
+{
+    gen_op_eval_ble(dst, src);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(N ^ V)
+static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
+{
+    gen_op_eval_bl(dst, src);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(C | Z)
+static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
+{
+    gen_op_eval_bleu(dst, src);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !C
+static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_C(dst, src);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !N
+static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_N(dst, src);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !V
+static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
+{
+    gen_mov_reg_V(dst, src);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+/*
+  FPSR bit field FCC1 | FCC0:
+   0 =
+   1 <
+   2 >
+   3 unordered
+*/
+static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
+    tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
+    tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+// !0: FCC0 | FCC1
+static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_or_tl(dst, dst, t0);
+    tcg_temp_free(t0);
+}
+
+// 1 or 2: FCC0 ^ FCC1
+static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_xor_tl(dst, dst, t0);
+    tcg_temp_free(t0);
+}
+
+// 1 or 3: FCC0
+static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+}
+
+// 1: FCC0 & !FCC1
+static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_andc_tl(dst, dst, t0);
+    tcg_temp_free(t0);
+}
+
+// 2 or 3: FCC1
+static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    gen_mov_reg_FCC1(dst, src, fcc_offset);
+}
+
+// 2: !FCC0 & FCC1
+static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_andc_tl(dst, t0, dst);
+    tcg_temp_free(t0);
+}
+
+// 3: FCC0 & FCC1
+static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_and_tl(dst, dst, t0);
+    tcg_temp_free(t0);
+}
+
+// 0: !(FCC0 | FCC1)
+static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_or_tl(dst, dst, t0);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+    tcg_temp_free(t0);
+}
+
+// 0 or 3: !(FCC0 ^ FCC1)
+static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_xor_tl(dst, dst, t0);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+    tcg_temp_free(t0);
+}
+
+// 0 or 2: !FCC0
+static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !1: !(FCC0 & !FCC1)
+static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_andc_tl(dst, dst, t0);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+    tcg_temp_free(t0);
+}
+
+// 0 or 1: !FCC1
+static inline void gen_op_eval_fble(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    gen_mov_reg_FCC1(dst, src, fcc_offset);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !2: !(!FCC0 & FCC1)
+static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_andc_tl(dst, t0, dst);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+    tcg_temp_free(t0);
+}
+
+// !3: !(FCC0 & FCC1)
+static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
+                                    unsigned int fcc_offset)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_mov_reg_FCC0(dst, src, fcc_offset);
+    gen_mov_reg_FCC1(t0, src, fcc_offset);
+    tcg_gen_and_tl(dst, dst, t0);
+    tcg_gen_xori_tl(dst, dst, 0x1);
+    tcg_temp_free(t0);
+}
+
+static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
+                               target_ulong pc2, TCGv r_cond)
+{
+    TCGLabel *l1 = gen_new_label();
+
+    tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+
+    gen_goto_tb(dc, 0, pc1, pc1 + 4);
+
+    gen_set_label(l1);
+    gen_goto_tb(dc, 1, pc2, pc2 + 4);
+}
+
+static void gen_branch_a(DisasContext *dc, target_ulong pc1)
+{
+    TCGLabel *l1 = gen_new_label();
+    target_ulong npc = dc->npc;
+
+    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
+
+    gen_goto_tb(dc, 0, npc, pc1);
+
+    gen_set_label(l1);
+    gen_goto_tb(dc, 1, npc + 4, npc + 8);
+
+    dc->is_br = 1;
+}
+
+static void gen_branch_n(DisasContext *dc, target_ulong pc1)
+{
+    target_ulong npc = dc->npc;
+
+    if (likely(npc != DYNAMIC_PC)) {
+        dc->pc = npc;
+        dc->jump_pc[0] = pc1;
+        dc->jump_pc[1] = npc + 4;
+        dc->npc = JUMP_PC;
+    } else {
+        TCGv t, z;
+
+        tcg_gen_mov_tl(cpu_pc, cpu_npc);
+
+        tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+        t = tcg_const_tl(pc1);
+        z = tcg_const_tl(0);
+        tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
+        tcg_temp_free(t);
+        tcg_temp_free(z);
+
+        dc->pc = DYNAMIC_PC;
+    }
+}
+
+static inline void gen_generic_branch(DisasContext *dc)
+{
+    TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
+    TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
+    TCGv zero = tcg_const_tl(0);
+
+    tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
+
+    tcg_temp_free(npc0);
+    tcg_temp_free(npc1);
+    tcg_temp_free(zero);
+}
+
+/* call this function before using the condition register as it may
+   have been set for a jump */
+static inline void flush_cond(DisasContext *dc)
+{
+    if (dc->npc == JUMP_PC) {
+        gen_generic_branch(dc);
+        dc->npc = DYNAMIC_PC;
+    }
+}
+
+static inline void save_npc(DisasContext *dc)
+{
+    if (dc->npc == JUMP_PC) {
+        gen_generic_branch(dc);
+        dc->npc = DYNAMIC_PC;
+    } else if (dc->npc != DYNAMIC_PC) {
+        tcg_gen_movi_tl(cpu_npc, dc->npc);
+    }
+}
+
+static inline void update_psr(DisasContext *dc)
+{
+    if (dc->cc_op != CC_OP_FLAGS) {
+        dc->cc_op = CC_OP_FLAGS;
+        gen_helper_compute_psr(cpu_env);
+    }
+}
+
+static inline void save_state(DisasContext *dc)
+{
+    tcg_gen_movi_tl(cpu_pc, dc->pc);
+    save_npc(dc);
+}
+
+static void gen_exception(DisasContext *dc, int which)
+{
+    TCGv_i32 t;
+
+    save_state(dc);
+    t = tcg_const_i32(which);
+    gen_helper_raise_exception(cpu_env, t);
+    tcg_temp_free_i32(t);
+    dc->is_br = 1;
+}
+
+static void gen_check_align(TCGv addr, int mask)
+{
+    TCGv_i32 r_mask = tcg_const_i32(mask);
+    gen_helper_check_align(cpu_env, addr, r_mask);
+    tcg_temp_free_i32(r_mask);
+}
+
+static inline void gen_mov_pc_npc(DisasContext *dc)
+{
+    if (dc->npc == JUMP_PC) {
+        gen_generic_branch(dc);
+        tcg_gen_mov_tl(cpu_pc, cpu_npc);
+        dc->pc = DYNAMIC_PC;
+    } else if (dc->npc == DYNAMIC_PC) {
+        tcg_gen_mov_tl(cpu_pc, cpu_npc);
+        dc->pc = DYNAMIC_PC;
+    } else {
+        dc->pc = dc->npc;
+    }
+}
+
+static inline void gen_op_next_insn(void)
+{
+    tcg_gen_mov_tl(cpu_pc, cpu_npc);
+    tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+}
+
+static void free_compare(DisasCompare *cmp)
+{
+    if (!cmp->g1) {
+        tcg_temp_free(cmp->c1);
+    }
+    if (!cmp->g2) {
+        tcg_temp_free(cmp->c2);
+    }
+}
+
+static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
+                        DisasContext *dc)
+{
+    static int subcc_cond[16] = {
+        TCG_COND_NEVER,
+        TCG_COND_EQ,
+        TCG_COND_LE,
+        TCG_COND_LT,
+        TCG_COND_LEU,
+        TCG_COND_LTU,
+        -1, /* neg */
+        -1, /* overflow */
+        TCG_COND_ALWAYS,
+        TCG_COND_NE,
+        TCG_COND_GT,
+        TCG_COND_GE,
+        TCG_COND_GTU,
+        TCG_COND_GEU,
+        -1, /* pos */
+        -1, /* no overflow */
+    };
+
+    static int logic_cond[16] = {
+        TCG_COND_NEVER,
+        TCG_COND_EQ,     /* eq:  Z */
+        TCG_COND_LE,     /* le:  Z | (N ^ V) -> Z | N */
+        TCG_COND_LT,     /* lt:  N ^ V -> N */
+        TCG_COND_EQ,     /* leu: C | Z -> Z */
+        TCG_COND_NEVER,  /* ltu: C -> 0 */
+        TCG_COND_LT,     /* neg: N */
+        TCG_COND_NEVER,  /* vs:  V -> 0 */
+        TCG_COND_ALWAYS,
+        TCG_COND_NE,     /* ne:  !Z */
+        TCG_COND_GT,     /* gt:  !(Z | (N ^ V)) -> !(Z | N) */
+        TCG_COND_GE,     /* ge:  !(N ^ V) -> !N */
+        TCG_COND_NE,     /* gtu: !(C | Z) -> !Z */
+        TCG_COND_ALWAYS, /* geu: !C -> 1 */
+        TCG_COND_GE,     /* pos: !N */
+        TCG_COND_ALWAYS, /* vc:  !V -> 1 */
+    };
+
+    TCGv_i32 r_src;
+    TCGv r_dst;
+
+#ifdef TARGET_SPARC64
+    if (xcc) {
+        r_src = cpu_xcc;
+    } else {
+        r_src = cpu_psr;
+    }
+#else
+    r_src = cpu_psr;
+#endif
+
+    switch (dc->cc_op) {
+    case CC_OP_LOGIC:
+        cmp->cond = logic_cond[cond];
+    do_compare_dst_0:
+        cmp->is_bool = false;
+        cmp->g2 = false;
+        cmp->c2 = tcg_const_tl(0);
+#ifdef TARGET_SPARC64
+        if (!xcc) {
+            cmp->g1 = false;
+            cmp->c1 = tcg_temp_new();
+            tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
+            break;
+        }
+#endif
+        cmp->g1 = true;
+        cmp->c1 = cpu_cc_dst;
+        break;
+
+    case CC_OP_SUB:
+        switch (cond) {
+        case 6:  /* neg */
+        case 14: /* pos */
+            cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
+            goto do_compare_dst_0;
+
+        case 7: /* overflow */
+        case 15: /* !overflow */
+            goto do_dynamic;
+
+        default:
+            cmp->cond = subcc_cond[cond];
+            cmp->is_bool = false;
+#ifdef TARGET_SPARC64
+            if (!xcc) {
+                /* Note that sign-extension works for unsigned compares as
+                   long as both operands are sign-extended.  */
+                cmp->g1 = cmp->g2 = false;
+                cmp->c1 = tcg_temp_new();
+                cmp->c2 = tcg_temp_new();
+                tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
+                tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
+                break;
+            }
+#endif
+            cmp->g1 = cmp->g2 = true;
+            cmp->c1 = cpu_cc_src;
+            cmp->c2 = cpu_cc_src2;
+            break;
+        }
+        break;
+
+    default:
+    do_dynamic:
+        gen_helper_compute_psr(cpu_env);
+        dc->cc_op = CC_OP_FLAGS;
+        /* FALLTHRU */
+
+    case CC_OP_FLAGS:
+        /* We're going to generate a boolean result.  */
+        cmp->cond = TCG_COND_NE;
+        cmp->is_bool = true;
+        cmp->g1 = cmp->g2 = false;
+        cmp->c1 = r_dst = tcg_temp_new();
+        cmp->c2 = tcg_const_tl(0);
+
+        switch (cond) {
+        case 0x0:
+            gen_op_eval_bn(r_dst);
+            break;
+        case 0x1:
+            gen_op_eval_be(r_dst, r_src);
+            break;
+        case 0x2:
+            gen_op_eval_ble(r_dst, r_src);
+            break;
+        case 0x3:
+            gen_op_eval_bl(r_dst, r_src);
+            break;
+        case 0x4:
+            gen_op_eval_bleu(r_dst, r_src);
+            break;
+        case 0x5:
+            gen_op_eval_bcs(r_dst, r_src);
+            break;
+        case 0x6:
+            gen_op_eval_bneg(r_dst, r_src);
+            break;
+        case 0x7:
+            gen_op_eval_bvs(r_dst, r_src);
+            break;
+        case 0x8:
+            gen_op_eval_ba(r_dst);
+            break;
+        case 0x9:
+            gen_op_eval_bne(r_dst, r_src);
+            break;
+        case 0xa:
+            gen_op_eval_bg(r_dst, r_src);
+            break;
+        case 0xb:
+            gen_op_eval_bge(r_dst, r_src);
+            break;
+        case 0xc:
+            gen_op_eval_bgu(r_dst, r_src);
+            break;
+        case 0xd:
+            gen_op_eval_bcc(r_dst, r_src);
+            break;
+        case 0xe:
+            gen_op_eval_bpos(r_dst, r_src);
+            break;
+        case 0xf:
+            gen_op_eval_bvc(r_dst, r_src);
+            break;
+        }
+        break;
+    }
+}
+
+static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
+{
+    unsigned int offset;
+    TCGv r_dst;
+
+    /* For now we still generate a straight boolean result.  */
+    cmp->cond = TCG_COND_NE;
+    cmp->is_bool = true;
+    cmp->g1 = cmp->g2 = false;
+    cmp->c1 = r_dst = tcg_temp_new();
+    cmp->c2 = tcg_const_tl(0);
+
+    switch (cc) {
+    default:
+    case 0x0:
+        offset = 0;
+        break;
+    case 0x1:
+        offset = 32 - 10;
+        break;
+    case 0x2:
+        offset = 34 - 10;
+        break;
+    case 0x3:
+        offset = 36 - 10;
+        break;
+    }
+
+    switch (cond) {
+    case 0x0:
+        gen_op_eval_bn(r_dst);
+        break;
+    case 0x1:
+        gen_op_eval_fbne(r_dst, cpu_fsr, offset);
+        break;
+    case 0x2:
+        gen_op_eval_fblg(r_dst, cpu_fsr, offset);
+        break;
+    case 0x3:
+        gen_op_eval_fbul(r_dst, cpu_fsr, offset);
+        break;
+    case 0x4:
+        gen_op_eval_fbl(r_dst, cpu_fsr, offset);
+        break;
+    case 0x5:
+        gen_op_eval_fbug(r_dst, cpu_fsr, offset);
+        break;
+    case 0x6:
+        gen_op_eval_fbg(r_dst, cpu_fsr, offset);
+        break;
+    case 0x7:
+        gen_op_eval_fbu(r_dst, cpu_fsr, offset);
+        break;
+    case 0x8:
+        gen_op_eval_ba(r_dst);
+        break;
+    case 0x9:
+        gen_op_eval_fbe(r_dst, cpu_fsr, offset);
+        break;
+    case 0xa:
+        gen_op_eval_fbue(r_dst, cpu_fsr, offset);
+        break;
+    case 0xb:
+        gen_op_eval_fbge(r_dst, cpu_fsr, offset);
+        break;
+    case 0xc:
+        gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
+        break;
+    case 0xd:
+        gen_op_eval_fble(r_dst, cpu_fsr, offset);
+        break;
+    case 0xe:
+        gen_op_eval_fbule(r_dst, cpu_fsr, offset);
+        break;
+    case 0xf:
+        gen_op_eval_fbo(r_dst, cpu_fsr, offset);
+        break;
+    }
+}
+
+static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
+                     DisasContext *dc)
+{
+    DisasCompare cmp;
+    gen_compare(&cmp, cc, cond, dc);
+
+    /* The interface is to return a boolean in r_dst.  */
+    if (cmp.is_bool) {
+        tcg_gen_mov_tl(r_dst, cmp.c1);
+    } else {
+        tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+    }
+
+    free_compare(&cmp);
+}
+
+static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
+{
+    DisasCompare cmp;
+    gen_fcompare(&cmp, cc, cond);
+
+    /* The interface is to return a boolean in r_dst.  */
+    if (cmp.is_bool) {
+        tcg_gen_mov_tl(r_dst, cmp.c1);
+    } else {
+        tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+    }
+
+    free_compare(&cmp);
+}
+
+#ifdef TARGET_SPARC64
+// Inverted logic
+static const int gen_tcg_cond_reg[8] = {
+    -1,
+    TCG_COND_NE,
+    TCG_COND_GT,
+    TCG_COND_GE,
+    -1,
+    TCG_COND_EQ,
+    TCG_COND_LE,
+    TCG_COND_LT,
+};
+
+static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
+{
+    cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
+    cmp->is_bool = false;
+    cmp->g1 = true;
+    cmp->g2 = false;
+    cmp->c1 = r_src;
+    cmp->c2 = tcg_const_tl(0);
+}
+
+static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
+{
+    DisasCompare cmp;
+    gen_compare_reg(&cmp, cond, r_src);
+
+    /* The interface is to return a boolean in r_dst.  */
+    tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+
+    free_compare(&cmp);
+}
+#endif
+
+static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+{
+    unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+    target_ulong target = dc->pc + offset;
+
+#ifdef TARGET_SPARC64
+    if (unlikely(AM_CHECK(dc))) {
+        target &= 0xffffffffULL;
+    }
+#endif
+    if (cond == 0x0) {
+        /* unconditional not taken */
+        if (a) {
+            dc->pc = dc->npc + 4;
+            dc->npc = dc->pc + 4;
+        } else {
+            dc->pc = dc->npc;
+            dc->npc = dc->pc + 4;
+        }
+    } else if (cond == 0x8) {
+        /* unconditional taken */
+        if (a) {
+            dc->pc = target;
+            dc->npc = dc->pc + 4;
+        } else {
+            dc->pc = dc->npc;
+            dc->npc = target;
+            tcg_gen_mov_tl(cpu_pc, cpu_npc);
+        }
+    } else {
+        flush_cond(dc);
+        gen_cond(cpu_cond, cc, cond, dc);
+        if (a) {
+            gen_branch_a(dc, target);
+        } else {
+            gen_branch_n(dc, target);
+        }
+    }
+}
+
+static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+{
+    unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+    target_ulong target = dc->pc + offset;
+
+#ifdef TARGET_SPARC64
+    if (unlikely(AM_CHECK(dc))) {
+        target &= 0xffffffffULL;
+    }
+#endif
+    if (cond == 0x0) {
+        /* unconditional not taken */
+        if (a) {
+            dc->pc = dc->npc + 4;
+            dc->npc = dc->pc + 4;
+        } else {
+            dc->pc = dc->npc;
+            dc->npc = dc->pc + 4;
+        }
+    } else if (cond == 0x8) {
+        /* unconditional taken */
+        if (a) {
+            dc->pc = target;
+            dc->npc = dc->pc + 4;
+        } else {
+            dc->pc = dc->npc;
+            dc->npc = target;
+            tcg_gen_mov_tl(cpu_pc, cpu_npc);
+        }
+    } else {
+        flush_cond(dc);
+        gen_fcond(cpu_cond, cc, cond);
+        if (a) {
+            gen_branch_a(dc, target);
+        } else {
+            gen_branch_n(dc, target);
+        }
+    }
+}
+
+#ifdef TARGET_SPARC64
+static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
+                          TCGv r_reg)
+{
+    unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
+    target_ulong target = dc->pc + offset;
+
+    if (unlikely(AM_CHECK(dc))) {
+        target &= 0xffffffffULL;
+    }
+    flush_cond(dc);
+    gen_cond_reg(cpu_cond, cond, r_reg);
+    if (a) {
+        gen_branch_a(dc, target);
+    } else {
+        gen_branch_n(dc, target);
+    }
+}
+
+static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+    switch (fccno) {
+    case 0:
+        gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 1:
+        gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 2:
+        gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 3:
+        gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    }
+}
+
+static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+    switch (fccno) {
+    case 0:
+        gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 1:
+        gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 2:
+        gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 3:
+        gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    }
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+    switch (fccno) {
+    case 0:
+        gen_helper_fcmpq(cpu_fsr, cpu_env);
+        break;
+    case 1:
+        gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
+        break;
+    case 2:
+        gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
+        break;
+    case 3:
+        gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
+        break;
+    }
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+    switch (fccno) {
+    case 0:
+        gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 1:
+        gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 2:
+        gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 3:
+        gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    }
+}
+
+static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+    switch (fccno) {
+    case 0:
+        gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 1:
+        gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 2:
+        gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    case 3:
+        gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+        break;
+    }
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+    switch (fccno) {
+    case 0:
+        gen_helper_fcmpeq(cpu_fsr, cpu_env);
+        break;
+    case 1:
+        gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
+        break;
+    case 2:
+        gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
+        break;
+    case 3:
+        gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
+        break;
+    }
+}
+
+#else
+
+static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+    gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+    gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+    gen_helper_fcmpq(cpu_fsr, cpu_env);
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+    gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+    gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+    gen_helper_fcmpeq(cpu_fsr, cpu_env);
+}
+#endif
+
+static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
+{
+    tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
+    tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
+    gen_exception(dc, TT_FP_EXCP);
+}
+
+static int gen_trap_ifnofpu(DisasContext *dc)
+{
+#if !defined(CONFIG_USER_ONLY)
+    if (!dc->fpu_enabled) {
+        gen_exception(dc, TT_NFPU_INSN);
+        return 1;
+    }
+#endif
+    return 0;
+}
+
+static inline void gen_op_clear_ieee_excp_and_FTT(void)
+{
+    tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
+}
+
+static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
+                              void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
+{
+    TCGv_i32 dst, src;
+
+    src = gen_load_fpr_F(dc, rs);
+    dst = gen_dest_fpr_F(dc);
+
+    gen(dst, cpu_env, src);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
+                                 void (*gen)(TCGv_i32, TCGv_i32))
+{
+    TCGv_i32 dst, src;
+
+    src = gen_load_fpr_F(dc, rs);
+    dst = gen_dest_fpr_F(dc);
+
+    gen(dst, src);
+
+    gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+                        void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+    TCGv_i32 dst, src1, src2;
+
+    src1 = gen_load_fpr_F(dc, rs1);
+    src2 = gen_load_fpr_F(dc, rs2);
+    dst = gen_dest_fpr_F(dc);
+
+    gen(dst, cpu_env, src1, src2);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_F(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+                                  void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+    TCGv_i32 dst, src1, src2;
+
+    src1 = gen_load_fpr_F(dc, rs1);
+    src2 = gen_load_fpr_F(dc, rs2);
+    dst = gen_dest_fpr_F(dc);
+
+    gen(dst, src1, src2);
+
+    gen_store_fpr_F(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
+                              void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
+{
+    TCGv_i64 dst, src;
+
+    src = gen_load_fpr_D(dc, rs);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, cpu_env, src);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
+                                 void (*gen)(TCGv_i64, TCGv_i64))
+{
+    TCGv_i64 dst, src;
+
+    src = gen_load_fpr_D(dc, rs);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, src);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+                        void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
+{
+    TCGv_i64 dst, src1, src2;
+
+    src1 = gen_load_fpr_D(dc, rs1);
+    src2 = gen_load_fpr_D(dc, rs2);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, cpu_env, src1, src2);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+                                  void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+    TCGv_i64 dst, src1, src2;
+
+    src1 = gen_load_fpr_D(dc, rs1);
+    src2 = gen_load_fpr_D(dc, rs2);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, src1, src2);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+                           void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+    TCGv_i64 dst, src1, src2;
+
+    src1 = gen_load_fpr_D(dc, rs1);
+    src2 = gen_load_fpr_D(dc, rs2);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, cpu_gsr, src1, src2);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
+                           void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+    TCGv_i64 dst, src0, src1, src2;
+
+    src1 = gen_load_fpr_D(dc, rs1);
+    src2 = gen_load_fpr_D(dc, rs2);
+    src0 = gen_load_fpr_D(dc, rd);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, src0, src1, src2);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
+                              void (*gen)(TCGv_ptr))
+{
+    gen_op_load_fpr_QT1(QFPREG(rs));
+
+    gen(cpu_env);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_op_store_QT0_fpr(QFPREG(rd));
+    gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
+                                 void (*gen)(TCGv_ptr))
+{
+    gen_op_load_fpr_QT1(QFPREG(rs));
+
+    gen(cpu_env);
+
+    gen_op_store_QT0_fpr(QFPREG(rd));
+    gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+#endif
+
+static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
+                               void (*gen)(TCGv_ptr))
+{
+    gen_op_load_fpr_QT0(QFPREG(rs1));
+    gen_op_load_fpr_QT1(QFPREG(rs2));
+
+    gen(cpu_env);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_op_store_QT0_fpr(QFPREG(rd));
+    gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
+                        void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+    TCGv_i64 dst;
+    TCGv_i32 src1, src2;
+
+    src1 = gen_load_fpr_F(dc, rs1);
+    src2 = gen_load_fpr_F(dc, rs2);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, cpu_env, src1, src2);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
+                               void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
+{
+    TCGv_i64 src1, src2;
+
+    src1 = gen_load_fpr_D(dc, rs1);
+    src2 = gen_load_fpr_D(dc, rs2);
+
+    gen(cpu_env, src1, src2);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_op_store_QT0_fpr(QFPREG(rd));
+    gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
+                              void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+{
+    TCGv_i64 dst;
+    TCGv_i32 src;
+
+    src = gen_load_fpr_F(dc, rs);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, cpu_env, src);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
+                                 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+{
+    TCGv_i64 dst;
+    TCGv_i32 src;
+
+    src = gen_load_fpr_F(dc, rs);
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, cpu_env, src);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
+                              void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
+{
+    TCGv_i32 dst;
+    TCGv_i64 src;
+
+    src = gen_load_fpr_D(dc, rs);
+    dst = gen_dest_fpr_F(dc);
+
+    gen(dst, cpu_env, src);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
+                              void (*gen)(TCGv_i32, TCGv_ptr))
+{
+    TCGv_i32 dst;
+
+    gen_op_load_fpr_QT1(QFPREG(rs));
+    dst = gen_dest_fpr_F(dc);
+
+    gen(dst, cpu_env);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
+                              void (*gen)(TCGv_i64, TCGv_ptr))
+{
+    TCGv_i64 dst;
+
+    gen_op_load_fpr_QT1(QFPREG(rs));
+    dst = gen_dest_fpr_D(dc, rd);
+
+    gen(dst, cpu_env);
+    gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
+                                 void (*gen)(TCGv_ptr, TCGv_i32))
+{
+    TCGv_i32 src;
+
+    src = gen_load_fpr_F(dc, rs);
+
+    gen(cpu_env, src);
+
+    gen_op_store_QT0_fpr(QFPREG(rd));
+    gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
+                                 void (*gen)(TCGv_ptr, TCGv_i64))
+{
+    TCGv_i64 src;
+
+    src = gen_load_fpr_D(dc, rs);
+
+    gen(cpu_env, src);
+
+    gen_op_store_QT0_fpr(QFPREG(rd));
+    gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
+                     TCGv addr, int mmu_idx, TCGMemOp memop)
+{
+    gen_address_mask(dc, addr);
+    tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
+}
+
+static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
+{
+    TCGv m1 = tcg_const_tl(0xff);
+    gen_address_mask(dc, addr);
+    tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
+    tcg_temp_free(m1);
+}
+
+/* asi moves */
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+typedef enum {
+    GET_ASI_HELPER,
+    GET_ASI_EXCP,
+    GET_ASI_DIRECT,
+    GET_ASI_DTWINX,
+    GET_ASI_BLOCK,
+    GET_ASI_SHORT,
+    GET_ASI_BCOPY,
+    GET_ASI_BFILL,
+} ASIType;
+
+typedef struct {
+    ASIType type;
+    int asi;
+    int mem_idx;
+    TCGMemOp memop;
+} DisasASI;
+
+static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
+{
+    int asi = GET_FIELD(insn, 19, 26);
+    ASIType type = GET_ASI_HELPER;
+    int mem_idx = dc->mem_idx;
+
+#ifndef TARGET_SPARC64
+    /* Before v9, all asis are immediate and privileged.  */
+    if (IS_IMM) {
+        gen_exception(dc, TT_ILL_INSN);
+        type = GET_ASI_EXCP;
+    } else if (supervisor(dc)
+               /* Note that LEON accepts ASI_USERDATA in user mode, for
+                  use with CASA.  Also note that previous versions of
+                  QEMU allowed (and old versions of gcc emitted) ASI_P
+                  for LEON, which is incorrect.  */
+               || (asi == ASI_USERDATA
+                   && (dc->def->features & CPU_FEATURE_CASA))) {
+        switch (asi) {
+        case ASI_USERDATA:   /* User data access */
+            mem_idx = MMU_USER_IDX;
+            type = GET_ASI_DIRECT;
+            break;
+        case ASI_KERNELDATA: /* Supervisor data access */
+            mem_idx = MMU_KERNEL_IDX;
+            type = GET_ASI_DIRECT;
+            break;
+        case ASI_M_BYPASS:    /* MMU passthrough */
+        case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+            mem_idx = MMU_PHYS_IDX;
+            type = GET_ASI_DIRECT;
+            break;
+        case ASI_M_BCOPY: /* Block copy, sta access */
+            mem_idx = MMU_KERNEL_IDX;
+            type = GET_ASI_BCOPY;
+            break;
+        case ASI_M_BFILL: /* Block fill, stda access */
+            mem_idx = MMU_KERNEL_IDX;
+            type = GET_ASI_BFILL;
+            break;
+        }
+    } else {
+        gen_exception(dc, TT_PRIV_INSN);
+        type = GET_ASI_EXCP;
+    }
+#else
+    if (IS_IMM) {
+        asi = dc->asi;
+    }
+    /* With v9, all asis below 0x80 are privileged.  */
+    /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
+       down that bit into DisasContext.  For the moment that's ok,
+       since the direct implementations below doesn't have any ASIs
+       in the restricted [0x30, 0x7f] range, and the check will be
+       done properly in the helper.  */
+    if (!supervisor(dc) && asi < 0x80) {
+        gen_exception(dc, TT_PRIV_ACT);
+        type = GET_ASI_EXCP;
+    } else {
+        switch (asi) {
+        case ASI_REAL:      /* Bypass */
+        case ASI_REAL_IO:   /* Bypass, non-cacheable */
+        case ASI_REAL_L:    /* Bypass LE */
+        case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+        case ASI_TWINX_REAL:   /* Real address, twinx */
+        case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+        case ASI_QUAD_LDD_PHYS:
+        case ASI_QUAD_LDD_PHYS_L:
+            mem_idx = MMU_PHYS_IDX;
+            break;
+        case ASI_N:  /* Nucleus */
+        case ASI_NL: /* Nucleus LE */
+        case ASI_TWINX_N:
+        case ASI_TWINX_NL:
+        case ASI_NUCLEUS_QUAD_LDD:
+        case ASI_NUCLEUS_QUAD_LDD_L:
+            mem_idx = MMU_NUCLEUS_IDX;
+            break;
+        case ASI_AIUP:  /* As if user primary */
+        case ASI_AIUPL: /* As if user primary LE */
+        case ASI_TWINX_AIUP:
+        case ASI_TWINX_AIUP_L:
+        case ASI_BLK_AIUP_4V:
+        case ASI_BLK_AIUP_L_4V:
+        case ASI_BLK_AIUP:
+        case ASI_BLK_AIUPL:
+            mem_idx = MMU_USER_IDX;
+            break;
+        case ASI_AIUS:  /* As if user secondary */
+        case ASI_AIUSL: /* As if user secondary LE */
+        case ASI_TWINX_AIUS:
+        case ASI_TWINX_AIUS_L:
+        case ASI_BLK_AIUS_4V:
+        case ASI_BLK_AIUS_L_4V:
+        case ASI_BLK_AIUS:
+        case ASI_BLK_AIUSL:
+            mem_idx = MMU_USER_SECONDARY_IDX;
+            break;
+        case ASI_S:  /* Secondary */
+        case ASI_SL: /* Secondary LE */
+        case ASI_TWINX_S:
+        case ASI_TWINX_SL:
+        case ASI_BLK_COMMIT_S:
+        case ASI_BLK_S:
+        case ASI_BLK_SL:
+        case ASI_FL8_S:
+        case ASI_FL8_SL:
+        case ASI_FL16_S:
+        case ASI_FL16_SL:
+            if (mem_idx == MMU_USER_IDX) {
+                mem_idx = MMU_USER_SECONDARY_IDX;
+            } else if (mem_idx == MMU_KERNEL_IDX) {
+                mem_idx = MMU_KERNEL_SECONDARY_IDX;
+            }
+            break;
+        case ASI_P:  /* Primary */
+        case ASI_PL: /* Primary LE */
+        case ASI_TWINX_P:
+        case ASI_TWINX_PL:
+        case ASI_BLK_COMMIT_P:
+        case ASI_BLK_P:
+        case ASI_BLK_PL:
+        case ASI_FL8_P:
+        case ASI_FL8_PL:
+        case ASI_FL16_P:
+        case ASI_FL16_PL:
+            break;
+        }
+        switch (asi) {
+        case ASI_REAL:
+        case ASI_REAL_IO:
+        case ASI_REAL_L:
+        case ASI_REAL_IO_L:
+        case ASI_N:
+        case ASI_NL:
+        case ASI_AIUP:
+        case ASI_AIUPL:
+        case ASI_AIUS:
+        case ASI_AIUSL:
+        case ASI_S:
+        case ASI_SL:
+        case ASI_P:
+        case ASI_PL:
+            type = GET_ASI_DIRECT;
+            break;
+        case ASI_TWINX_REAL:
+        case ASI_TWINX_REAL_L:
+        case ASI_TWINX_N:
+        case ASI_TWINX_NL:
+        case ASI_TWINX_AIUP:
+        case ASI_TWINX_AIUP_L:
+        case ASI_TWINX_AIUS:
+        case ASI_TWINX_AIUS_L:
+        case ASI_TWINX_P:
+        case ASI_TWINX_PL:
+        case ASI_TWINX_S:
+        case ASI_TWINX_SL:
+        case ASI_QUAD_LDD_PHYS:
+        case ASI_QUAD_LDD_PHYS_L:
+        case ASI_NUCLEUS_QUAD_LDD:
+        case ASI_NUCLEUS_QUAD_LDD_L:
+            type = GET_ASI_DTWINX;
+            break;
+        case ASI_BLK_COMMIT_P:
+        case ASI_BLK_COMMIT_S:
+        case ASI_BLK_AIUP_4V:
+        case ASI_BLK_AIUP_L_4V:
+        case ASI_BLK_AIUP:
+        case ASI_BLK_AIUPL:
+        case ASI_BLK_AIUS_4V:
+        case ASI_BLK_AIUS_L_4V:
+        case ASI_BLK_AIUS:
+        case ASI_BLK_AIUSL:
+        case ASI_BLK_S:
+        case ASI_BLK_SL:
+        case ASI_BLK_P:
+        case ASI_BLK_PL:
+            type = GET_ASI_BLOCK;
+            break;
+        case ASI_FL8_S:
+        case ASI_FL8_SL:
+        case ASI_FL8_P:
+        case ASI_FL8_PL:
+            memop = MO_UB;
+            type = GET_ASI_SHORT;
+            break;
+        case ASI_FL16_S:
+        case ASI_FL16_SL:
+        case ASI_FL16_P:
+        case ASI_FL16_PL:
+            memop = MO_TEUW;
+            type = GET_ASI_SHORT;
+            break;
+        }
+        /* The little-endian asis all have bit 3 set.  */
+        if (asi & 8) {
+            memop ^= MO_BSWAP;
+        }
+    }
+#endif
+
+    return (DisasASI){ type, asi, mem_idx, memop };
+}
+
+static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
+                       int insn, TCGMemOp memop)
+{
+    DisasASI da = get_asi(dc, insn, memop);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+    case GET_ASI_DTWINX: /* Reserved for ldda.  */
+        gen_exception(dc, TT_ILL_INSN);
+        break;
+    case GET_ASI_DIRECT:
+        gen_address_mask(dc, addr);
+        tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
+        break;
+    default:
+        {
+            TCGv_i32 r_asi = tcg_const_i32(da.asi);
+            TCGv_i32 r_mop = tcg_const_i32(memop);
+
+            save_state(dc);
+#ifdef TARGET_SPARC64
+            gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
+#else
+            {
+                TCGv_i64 t64 = tcg_temp_new_i64();
+                gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+                tcg_gen_trunc_i64_tl(dst, t64);
+                tcg_temp_free_i64(t64);
+            }
+#endif
+            tcg_temp_free_i32(r_mop);
+            tcg_temp_free_i32(r_asi);
+        }
+        break;
+    }
+}
+
+static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
+                       int insn, TCGMemOp memop)
+{
+    DisasASI da = get_asi(dc, insn, memop);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+    case GET_ASI_DTWINX: /* Reserved for stda.  */
+        gen_exception(dc, TT_ILL_INSN);
+        break;
+    case GET_ASI_DIRECT:
+        gen_address_mask(dc, addr);
+        tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
+        break;
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+    case GET_ASI_BCOPY:
+        /* Copy 32 bytes from the address in SRC to ADDR.  */
+        /* ??? The original qemu code suggests 4-byte alignment, dropping
+           the low bits, but the only place I can see this used is in the
+           Linux kernel with 32 byte alignment, which would make more sense
+           as a cacheline-style operation.  */
+        {
+            TCGv saddr = tcg_temp_new();
+            TCGv daddr = tcg_temp_new();
+            TCGv four = tcg_const_tl(4);
+            TCGv_i32 tmp = tcg_temp_new_i32();
+            int i;
+
+            tcg_gen_andi_tl(saddr, src, -4);
+            tcg_gen_andi_tl(daddr, addr, -4);
+            for (i = 0; i < 32; i += 4) {
+                /* Since the loads and stores are paired, allow the
+                   copy to happen in the host endianness.  */
+                tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
+                tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
+                tcg_gen_add_tl(saddr, saddr, four);
+                tcg_gen_add_tl(daddr, daddr, four);
+            }
+
+            tcg_temp_free(saddr);
+            tcg_temp_free(daddr);
+            tcg_temp_free(four);
+            tcg_temp_free_i32(tmp);
+        }
+        break;
+#endif
+    default:
+        {
+            TCGv_i32 r_asi = tcg_const_i32(da.asi);
+            TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
+
+            save_state(dc);
+#ifdef TARGET_SPARC64
+            gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
+#else
+            {
+                TCGv_i64 t64 = tcg_temp_new_i64();
+                tcg_gen_extu_tl_i64(t64, src);
+                gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+                tcg_temp_free_i64(t64);
+            }
+#endif
+            tcg_temp_free_i32(r_mop);
+            tcg_temp_free_i32(r_asi);
+
+            /* A write to a TLB register may alter page maps.  End the TB. */
+            dc->npc = DYNAMIC_PC;
+        }
+        break;
+    }
+}
+
+static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
+                         TCGv addr, int insn)
+{
+    DisasASI da = get_asi(dc, insn, MO_TEUL);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+    case GET_ASI_DIRECT:
+        gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
+        break;
+    default:
+        /* ??? Should be DAE_invalid_asi.  */
+        gen_exception(dc, TT_DATA_ACCESS);
+        break;
+    }
+}
+
+static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
+                        int insn, int rd)
+{
+    DisasASI da = get_asi(dc, insn, MO_TEUL);
+    TCGv oldv;
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        return;
+    case GET_ASI_DIRECT:
+        oldv = tcg_temp_new();
+        tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
+                                  da.mem_idx, da.memop);
+        gen_store_gpr(dc, rd, oldv);
+        tcg_temp_free(oldv);
+        break;
+    default:
+        /* ??? Should be DAE_invalid_asi.  */
+        gen_exception(dc, TT_DATA_ACCESS);
+        break;
+    }
+}
+
+static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
+{
+    DisasASI da = get_asi(dc, insn, MO_UB);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+    case GET_ASI_DIRECT:
+        gen_ldstub(dc, dst, addr, da.mem_idx);
+        break;
+    default:
+        /* ??? Should be DAE_invalid_asi.  */
+        gen_exception(dc, TT_DATA_ACCESS);
+        break;
+    }
+}
+#endif
+
+#ifdef TARGET_SPARC64
+static void gen_ldf_asi(DisasContext *dc, TCGv addr,
+                        int insn, int size, int rd)
+{
+    DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+    TCGv_i32 d32;
+    TCGv_i64 d64;
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+
+    case GET_ASI_DIRECT:
+        gen_address_mask(dc, addr);
+        switch (size) {
+        case 4:
+            d32 = gen_dest_fpr_F(dc);
+            tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
+            gen_store_fpr_F(dc, rd, d32);
+            break;
+        case 8:
+            tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+                                da.memop | MO_ALIGN_4);
+            break;
+        case 16:
+            d64 = tcg_temp_new_i64();
+            tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
+            tcg_gen_addi_tl(addr, addr, 8);
+            tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
+                                da.memop | MO_ALIGN_4);
+            tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
+            tcg_temp_free_i64(d64);
+            break;
+        default:
+            g_assert_not_reached();
+        }
+        break;
+
+    case GET_ASI_BLOCK:
+        /* Valid for lddfa on aligned registers only.  */
+        if (size == 8 && (rd & 7) == 0) {
+            TCGMemOp memop;
+            TCGv eight;
+            int i;
+
+            gen_address_mask(dc, addr);
+
+            /* The first operation checks required alignment.  */
+            memop = da.memop | MO_ALIGN_64;
+            eight = tcg_const_tl(8);
+            for (i = 0; ; ++i) {
+                tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
+                                    da.mem_idx, memop);
+                if (i == 7) {
+                    break;
+                }
+                tcg_gen_add_tl(addr, addr, eight);
+                memop = da.memop;
+            }
+            tcg_temp_free(eight);
+        } else {
+            gen_exception(dc, TT_ILL_INSN);
+        }
+        break;
+
+    case GET_ASI_SHORT:
+        /* Valid for lddfa only.  */
+        if (size == 8) {
+            gen_address_mask(dc, addr);
+            tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+        } else {
+            gen_exception(dc, TT_ILL_INSN);
+        }
+        break;
+
+    default:
+        {
+            TCGv_i32 r_asi = tcg_const_i32(da.asi);
+            TCGv_i32 r_mop = tcg_const_i32(da.memop);
+
+            save_state(dc);
+            /* According to the table in the UA2011 manual, the only
+               other asis that are valid for ldfa/lddfa/ldqfa are
+               the NO_FAULT asis.  We still need a helper for these,
+               but we can just use the integer asi helper for them.  */
+            switch (size) {
+            case 4:
+                d64 = tcg_temp_new_i64();
+                gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+                d32 = gen_dest_fpr_F(dc);
+                tcg_gen_extrl_i64_i32(d32, d64);
+                tcg_temp_free_i64(d64);
+                gen_store_fpr_F(dc, rd, d32);
+                break;
+            case 8:
+                gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
+                break;
+            case 16:
+                d64 = tcg_temp_new_i64();
+                gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+                tcg_gen_addi_tl(addr, addr, 8);
+                gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
+                tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
+                tcg_temp_free_i64(d64);
+                break;
+            default:
+                g_assert_not_reached();
+            }
+            tcg_temp_free_i32(r_mop);
+            tcg_temp_free_i32(r_asi);
+        }
+        break;
+    }
+}
+
+static void gen_stf_asi(DisasContext *dc, TCGv addr,
+                        int insn, int size, int rd)
+{
+    DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+    TCGv_i32 d32;
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+
+    case GET_ASI_DIRECT:
+        gen_address_mask(dc, addr);
+        switch (size) {
+        case 4:
+            d32 = gen_load_fpr_F(dc, rd);
+            tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
+            break;
+        case 8:
+            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+                                da.memop | MO_ALIGN_4);
+            break;
+        case 16:
+            /* Only 4-byte alignment required.  However, it is legal for the
+               cpu to signal the alignment fault, and the OS trap handler is
+               required to fix it up.  Requiring 16-byte alignment here avoids
+               having to probe the second page before performing the first
+               write.  */
+            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+                                da.memop | MO_ALIGN_16);
+            tcg_gen_addi_tl(addr, addr, 8);
+            tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
+            break;
+        default:
+            g_assert_not_reached();
+        }
+        break;
+
+    case GET_ASI_BLOCK:
+        /* Valid for stdfa on aligned registers only.  */
+        if (size == 8 && (rd & 7) == 0) {
+            TCGMemOp memop;
+            TCGv eight;
+            int i;
+
+            gen_address_mask(dc, addr);
+
+            /* The first operation checks required alignment.  */
+            memop = da.memop | MO_ALIGN_64;
+            eight = tcg_const_tl(8);
+            for (i = 0; ; ++i) {
+                tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
+                                    da.mem_idx, memop);
+                if (i == 7) {
+                    break;
+                }
+                tcg_gen_add_tl(addr, addr, eight);
+                memop = da.memop;
+            }
+            tcg_temp_free(eight);
+        } else {
+            gen_exception(dc, TT_ILL_INSN);
+        }
+        break;
+
+    case GET_ASI_SHORT:
+        /* Valid for stdfa only.  */
+        if (size == 8) {
+            gen_address_mask(dc, addr);
+            tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+        } else {
+            gen_exception(dc, TT_ILL_INSN);
+        }
+        break;
+
+    default:
+        /* According to the table in the UA2011 manual, the only
+           other asis that are valid for ldfa/lddfa/ldqfa are
+           the PST* asis, which aren't currently handled.  */
+        gen_exception(dc, TT_ILL_INSN);
+        break;
+    }
+}
+
+static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+{
+    DisasASI da = get_asi(dc, insn, MO_TEQ);
+    TCGv_i64 hi = gen_dest_gpr(dc, rd);
+    TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        return;
+
+    case GET_ASI_DTWINX:
+        gen_address_mask(dc, addr);
+        tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+        tcg_gen_addi_tl(addr, addr, 8);
+        tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
+        break;
+
+    case GET_ASI_DIRECT:
+        {
+            TCGv_i64 tmp = tcg_temp_new_i64();
+
+            gen_address_mask(dc, addr);
+            tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
+
+            /* Note that LE ldda acts as if each 32-bit register
+               result is byte swapped.  Having just performed one
+               64-bit bswap, we need now to swap the writebacks.  */
+            if ((da.memop & MO_BSWAP) == MO_TE) {
+                tcg_gen_extr32_i64(lo, hi, tmp);
+            } else {
+                tcg_gen_extr32_i64(hi, lo, tmp);
+            }
+            tcg_temp_free_i64(tmp);
+        }
+        break;
+
+    default:
+        /* ??? In theory we've handled all of the ASIs that are valid
+           for ldda, and this should raise DAE_invalid_asi.  However,
+           real hardware allows others.  This can be seen with e.g.
+           FreeBSD 10.3 wrt ASI_IC_TAG.  */
+        {
+            TCGv_i32 r_asi = tcg_const_i32(da.asi);
+            TCGv_i32 r_mop = tcg_const_i32(da.memop);
+            TCGv_i64 tmp = tcg_temp_new_i64();
+
+            save_state(dc);
+            gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
+            tcg_temp_free_i32(r_asi);
+            tcg_temp_free_i32(r_mop);
+
+            /* See above.  */
+            if ((da.memop & MO_BSWAP) == MO_TE) {
+                tcg_gen_extr32_i64(lo, hi, tmp);
+            } else {
+                tcg_gen_extr32_i64(hi, lo, tmp);
+            }
+            tcg_temp_free_i64(tmp);
+        }
+        break;
+    }
+
+    gen_store_gpr(dc, rd, hi);
+    gen_store_gpr(dc, rd + 1, lo);
+}
+
+static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
+                         int insn, int rd)
+{
+    DisasASI da = get_asi(dc, insn, MO_TEQ);
+    TCGv lo = gen_load_gpr(dc, rd + 1);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+
+    case GET_ASI_DTWINX:
+        gen_address_mask(dc, addr);
+        tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+        tcg_gen_addi_tl(addr, addr, 8);
+        tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
+        break;
+
+    case GET_ASI_DIRECT:
+        {
+            TCGv_i64 t64 = tcg_temp_new_i64();
+
+            /* Note that LE stda acts as if each 32-bit register result is
+               byte swapped.  We will perform one 64-bit LE store, so now
+               we must swap the order of the construction.  */
+            if ((da.memop & MO_BSWAP) == MO_TE) {
+                tcg_gen_concat32_i64(t64, lo, hi);
+            } else {
+                tcg_gen_concat32_i64(t64, hi, lo);
+            }
+            gen_address_mask(dc, addr);
+            tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+            tcg_temp_free_i64(t64);
+        }
+        break;
+
+    default:
+        /* ??? In theory we've handled all of the ASIs that are valid
+           for stda, and this should raise DAE_invalid_asi.  */
+        {
+            TCGv_i32 r_asi = tcg_const_i32(da.asi);
+            TCGv_i32 r_mop = tcg_const_i32(da.memop);
+            TCGv_i64 t64 = tcg_temp_new_i64();
+
+            /* See above.  */
+            if ((da.memop & MO_BSWAP) == MO_TE) {
+                tcg_gen_concat32_i64(t64, lo, hi);
+            } else {
+                tcg_gen_concat32_i64(t64, hi, lo);
+            }
+
+            save_state(dc);
+            gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+            tcg_temp_free_i32(r_mop);
+            tcg_temp_free_i32(r_asi);
+            tcg_temp_free_i64(t64);
+        }
+        break;
+    }
+}
+
+static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
+                         int insn, int rd)
+{
+    DisasASI da = get_asi(dc, insn, MO_TEQ);
+    TCGv oldv;
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        return;
+    case GET_ASI_DIRECT:
+        oldv = tcg_temp_new();
+        tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
+                                  da.mem_idx, da.memop);
+        gen_store_gpr(dc, rd, oldv);
+        tcg_temp_free(oldv);
+        break;
+    default:
+        /* ??? Should be DAE_invalid_asi.  */
+        gen_exception(dc, TT_DATA_ACCESS);
+        break;
+    }
+}
+
+#elif !defined(CONFIG_USER_ONLY)
+static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+{
+    /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
+       whereby "rd + 1" elicits "error: array subscript is above array".
+       Since we have already asserted that rd is even, the semantics
+       are unchanged.  */
+    TCGv lo = gen_dest_gpr(dc, rd | 1);
+    TCGv hi = gen_dest_gpr(dc, rd);
+    TCGv_i64 t64 = tcg_temp_new_i64();
+    DisasASI da = get_asi(dc, insn, MO_TEQ);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        tcg_temp_free_i64(t64);
+        return;
+    case GET_ASI_DIRECT:
+        gen_address_mask(dc, addr);
+        tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
+        break;
+    default:
+        {
+            TCGv_i32 r_asi = tcg_const_i32(da.asi);
+            TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+
+            save_state(dc);
+            gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+            tcg_temp_free_i32(r_mop);
+            tcg_temp_free_i32(r_asi);
+        }
+        break;
+    }
+
+    tcg_gen_extr_i64_i32(lo, hi, t64);
+    tcg_temp_free_i64(t64);
+    gen_store_gpr(dc, rd | 1, lo);
+    gen_store_gpr(dc, rd, hi);
+}
+
+static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
+                         int insn, int rd)
+{
+    DisasASI da = get_asi(dc, insn, MO_TEQ);
+    TCGv lo = gen_load_gpr(dc, rd + 1);
+    TCGv_i64 t64 = tcg_temp_new_i64();
+
+    tcg_gen_concat_tl_i64(t64, lo, hi);
+
+    switch (da.type) {
+    case GET_ASI_EXCP:
+        break;
+    case GET_ASI_DIRECT:
+        gen_address_mask(dc, addr);
+        tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+        break;
+    case GET_ASI_BFILL:
+        /* Store 32 bytes of T64 to ADDR.  */
+        /* ??? The original qemu code suggests 8-byte alignment, dropping
+           the low bits, but the only place I can see this used is in the
+           Linux kernel with 32 byte alignment, which would make more sense
+           as a cacheline-style operation.  */
+        {
+            TCGv d_addr = tcg_temp_new();
+            TCGv eight = tcg_const_tl(8);
+            int i;
+
+            tcg_gen_andi_tl(d_addr, addr, -8);
+            for (i = 0; i < 32; i += 8) {
+                tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
+                tcg_gen_add_tl(d_addr, d_addr, eight);
+            }
+
+            tcg_temp_free(d_addr);
+            tcg_temp_free(eight);
+        }
+        break;
+    default:
+        {
+            TCGv_i32 r_asi = tcg_const_i32(da.asi);
+            TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+
+            save_state(dc);
+            gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+            tcg_temp_free_i32(r_mop);
+            tcg_temp_free_i32(r_asi);
+        }
+        break;
+    }
+
+    tcg_temp_free_i64(t64);
+}
+#endif
+
+static TCGv get_src1(DisasContext *dc, unsigned int insn)
+{
+    unsigned int rs1 = GET_FIELD(insn, 13, 17);
+    return gen_load_gpr(dc, rs1);
+}
+
+static TCGv get_src2(DisasContext *dc, unsigned int insn)
+{
+    if (IS_IMM) { /* immediate */
+        target_long simm = GET_FIELDs(insn, 19, 31);
+        TCGv t = get_temp_tl(dc);
+        tcg_gen_movi_tl(t, simm);
+        return t;
+    } else {      /* register */
+        unsigned int rs2 = GET_FIELD(insn, 27, 31);
+        return gen_load_gpr(dc, rs2);
+    }
+}
+
+#ifdef TARGET_SPARC64
+static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+    TCGv_i32 c32, zero, dst, s1, s2;
+
+    /* We have two choices here: extend the 32 bit data and use movcond_i64,
+       or fold the comparison down to 32 bits and use movcond_i32.  Choose
+       the later.  */
+    c32 = tcg_temp_new_i32();
+    if (cmp->is_bool) {
+        tcg_gen_extrl_i64_i32(c32, cmp->c1);
+    } else {
+        TCGv_i64 c64 = tcg_temp_new_i64();
+        tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
+        tcg_gen_extrl_i64_i32(c32, c64);
+        tcg_temp_free_i64(c64);
+    }
+
+    s1 = gen_load_fpr_F(dc, rs);
+    s2 = gen_load_fpr_F(dc, rd);
+    dst = gen_dest_fpr_F(dc);
+    zero = tcg_const_i32(0);
+
+    tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
+
+    tcg_temp_free_i32(c32);
+    tcg_temp_free_i32(zero);
+    gen_store_fpr_F(dc, rd, dst);
+}
+
+static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+    TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
+    tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
+                        gen_load_fpr_D(dc, rs),
+                        gen_load_fpr_D(dc, rd));
+    gen_store_fpr_D(dc, rd, dst);
+}
+
+static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+    int qd = QFPREG(rd);
+    int qs = QFPREG(rs);
+
+    tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
+                        cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
+    tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
+                        cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
+
+    gen_update_fprs_dirty(dc, qd);
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
+{
+    TCGv_i32 r_tl = tcg_temp_new_i32();
+
+    /* load env->tl into r_tl */
+    tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
+
+    /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
+    tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
+
+    /* calculate offset to current trap state from env->ts, reuse r_tl */
+    tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
+    tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
+
+    /* tsptr = env->ts[env->tl & MAXTL_MASK] */
+    {
+        TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
+        tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
+        tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
+        tcg_temp_free_ptr(r_tl_tmp);
+    }
+
+    tcg_temp_free_i32(r_tl);
+}
+#endif
+
+static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
+                     int width, bool cc, bool left)
+{
+    TCGv lo1, lo2, t1, t2;
+    uint64_t amask, tabl, tabr;
+    int shift, imask, omask;
+
+    if (cc) {
+        tcg_gen_mov_tl(cpu_cc_src, s1);
+        tcg_gen_mov_tl(cpu_cc_src2, s2);
+        tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
+        tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+        dc->cc_op = CC_OP_SUB;
+    }
+
+    /* Theory of operation: there are two tables, left and right (not to
+       be confused with the left and right versions of the opcode).  These
+       are indexed by the low 3 bits of the inputs.  To make things "easy",
+       these tables are loaded into two constants, TABL and TABR below.
+       The operation index = (input & imask) << shift calculates the index
+       into the constant, while val = (table >> index) & omask calculates
+       the value we're looking for.  */
+    switch (width) {
+    case 8:
+        imask = 0x7;
+        shift = 3;
+        omask = 0xff;
+        if (left) {
+            tabl = 0x80c0e0f0f8fcfeffULL;
+            tabr = 0xff7f3f1f0f070301ULL;
+        } else {
+            tabl = 0x0103070f1f3f7fffULL;
+            tabr = 0xfffefcf8f0e0c080ULL;
+        }
+        break;
+    case 16:
+        imask = 0x6;
+        shift = 1;
+        omask = 0xf;
+        if (left) {
+            tabl = 0x8cef;
+            tabr = 0xf731;
+        } else {
+            tabl = 0x137f;
+            tabr = 0xfec8;
+        }
+        break;
+    case 32:
+        imask = 0x4;
+        shift = 0;
+        omask = 0x3;
+        if (left) {
+            tabl = (2 << 2) | 3;
+            tabr = (3 << 2) | 1;
+        } else {
+            tabl = (1 << 2) | 3;
+            tabr = (3 << 2) | 2;
+        }
+        break;
+    default:
+        abort();
+    }
+
+    lo1 = tcg_temp_new();
+    lo2 = tcg_temp_new();
+    tcg_gen_andi_tl(lo1, s1, imask);
+    tcg_gen_andi_tl(lo2, s2, imask);
+    tcg_gen_shli_tl(lo1, lo1, shift);
+    tcg_gen_shli_tl(lo2, lo2, shift);
+
+    t1 = tcg_const_tl(tabl);
+    t2 = tcg_const_tl(tabr);
+    tcg_gen_shr_tl(lo1, t1, lo1);
+    tcg_gen_shr_tl(lo2, t2, lo2);
+    tcg_gen_andi_tl(dst, lo1, omask);
+    tcg_gen_andi_tl(lo2, lo2, omask);
+
+    amask = -8;
+    if (AM_CHECK(dc)) {
+        amask &= 0xffffffffULL;
+    }
+    tcg_gen_andi_tl(s1, s1, amask);
+    tcg_gen_andi_tl(s2, s2, amask);
+
+    /* We want to compute
+        dst = (s1 == s2 ? lo1 : lo1 & lo2).
+       We've already done dst = lo1, so this reduces to
+        dst &= (s1 == s2 ? -1 : lo2)
+       Which we perform by
+        lo2 |= -(s1 == s2)
+        dst &= lo2
+    */
+    tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
+    tcg_gen_neg_tl(t1, t1);
+    tcg_gen_or_tl(lo2, lo2, t1);
+    tcg_gen_and_tl(dst, dst, lo2);
+
+    tcg_temp_free(lo1);
+    tcg_temp_free(lo2);
+    tcg_temp_free(t1);
+    tcg_temp_free(t2);
+}
+
+static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
+{
+    TCGv tmp = tcg_temp_new();
+
+    tcg_gen_add_tl(tmp, s1, s2);
+    tcg_gen_andi_tl(dst, tmp, -8);
+    if (left) {
+        tcg_gen_neg_tl(tmp, tmp);
+    }
+    tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
+
+    tcg_temp_free(tmp);
+}
+
+static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
+{
+    TCGv t1, t2, shift;
+
+    t1 = tcg_temp_new();
+    t2 = tcg_temp_new();
+    shift = tcg_temp_new();
+
+    tcg_gen_andi_tl(shift, gsr, 7);
+    tcg_gen_shli_tl(shift, shift, 3);
+    tcg_gen_shl_tl(t1, s1, shift);
+
+    /* A shift of 64 does not produce 0 in TCG.  Divide this into a
+       shift of (up to 63) followed by a constant shift of 1.  */
+    tcg_gen_xori_tl(shift, shift, 63);
+    tcg_gen_shr_tl(t2, s2, shift);
+    tcg_gen_shri_tl(t2, t2, 1);
+
+    tcg_gen_or_tl(dst, t1, t2);
+
+    tcg_temp_free(t1);
+    tcg_temp_free(t2);
+    tcg_temp_free(shift);
+}
+#endif
+
+#define CHECK_IU_FEATURE(dc, FEATURE)                      \
+    if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE))  \
+        goto illegal_insn;
+#define CHECK_FPU_FEATURE(dc, FEATURE)                     \
+    if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE))  \
+        goto nfpu_insn;
+
+/* before an instruction, dc->pc must be static */
+static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
+{
+    unsigned int opc, rs1, rs2, rd;
+    TCGv cpu_src1, cpu_src2;
+    TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
+    TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
+    target_long simm;
+
+    opc = GET_FIELD(insn, 0, 1);
+    rd = GET_FIELD(insn, 2, 6);
+
+    switch (opc) {
+    case 0:                     /* branches/sethi */
+        {
+            unsigned int xop = GET_FIELD(insn, 7, 9);
+            int32_t target;
+            switch (xop) {
+#ifdef TARGET_SPARC64
+            case 0x1:           /* V9 BPcc */
+                {
+                    int cc;
+
+                    target = GET_FIELD_SP(insn, 0, 18);
+                    target = sign_extend(target, 19);
+                    target <<= 2;
+                    cc = GET_FIELD_SP(insn, 20, 21);
+                    if (cc == 0)
+                        do_branch(dc, target, insn, 0);
+                    else if (cc == 2)
+                        do_branch(dc, target, insn, 1);
+                    else
+                        goto illegal_insn;
+                    goto jmp_insn;
+                }
+            case 0x3:           /* V9 BPr */
+                {
+                    target = GET_FIELD_SP(insn, 0, 13) |
+                        (GET_FIELD_SP(insn, 20, 21) << 14);
+                    target = sign_extend(target, 16);
+                    target <<= 2;
+                    cpu_src1 = get_src1(dc, insn);
+                    do_branch_reg(dc, target, insn, cpu_src1);
+                    goto jmp_insn;
+                }
+            case 0x5:           /* V9 FBPcc */
+                {
+                    int cc = GET_FIELD_SP(insn, 20, 21);
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    target = GET_FIELD_SP(insn, 0, 18);
+                    target = sign_extend(target, 19);
+                    target <<= 2;
+                    do_fbranch(dc, target, insn, cc);
+                    goto jmp_insn;
+                }
+#else
+            case 0x7:           /* CBN+x */
+                {
+                    goto ncp_insn;
+                }
+#endif
+            case 0x2:           /* BN+x */
+                {
+                    target = GET_FIELD(insn, 10, 31);
+                    target = sign_extend(target, 22);
+                    target <<= 2;
+                    do_branch(dc, target, insn, 0);
+                    goto jmp_insn;
+                }
+            case 0x6:           /* FBN+x */
+                {
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    target = GET_FIELD(insn, 10, 31);
+                    target = sign_extend(target, 22);
+                    target <<= 2;
+                    do_fbranch(dc, target, insn, 0);
+                    goto jmp_insn;
+                }
+            case 0x4:           /* SETHI */
+                /* Special-case %g0 because that's the canonical nop.  */
+                if (rd) {
+                    uint32_t value = GET_FIELD(insn, 10, 31);
+                    TCGv t = gen_dest_gpr(dc, rd);
+                    tcg_gen_movi_tl(t, value << 10);
+                    gen_store_gpr(dc, rd, t);
+                }
+                break;
+            case 0x0:           /* UNIMPL */
+            default:
+                goto illegal_insn;
+            }
+            break;
+        }
+        break;
+    case 1:                     /*CALL*/
+        {
+            target_long target = GET_FIELDs(insn, 2, 31) << 2;
+            TCGv o7 = gen_dest_gpr(dc, 15);
+
+            tcg_gen_movi_tl(o7, dc->pc);
+            gen_store_gpr(dc, 15, o7);
+            target += dc->pc;
+            gen_mov_pc_npc(dc);
+#ifdef TARGET_SPARC64
+            if (unlikely(AM_CHECK(dc))) {
+                target &= 0xffffffffULL;
+            }
+#endif
+            dc->npc = target;
+        }
+        goto jmp_insn;
+    case 2:                     /* FPU & Logical Operations */
+        {
+            unsigned int xop = GET_FIELD(insn, 7, 12);
+            TCGv cpu_dst = get_temp_tl(dc);
+            TCGv cpu_tmp0;
+
+            if (xop == 0x3a) {  /* generate trap */
+                int cond = GET_FIELD(insn, 3, 6);
+                TCGv_i32 trap;
+                TCGLabel *l1 = NULL;
+                int mask;
+
+                if (cond == 0) {
+                    /* Trap never.  */
+                    break;
+                }
+
+                save_state(dc);
+
+                if (cond != 8) {
+                    /* Conditional trap.  */
+                    DisasCompare cmp;
+#ifdef TARGET_SPARC64
+                    /* V9 icc/xcc */
+                    int cc = GET_FIELD_SP(insn, 11, 12);
+                    if (cc == 0) {
+                        gen_compare(&cmp, 0, cond, dc);
+                    } else if (cc == 2) {
+                        gen_compare(&cmp, 1, cond, dc);
+                    } else {
+                        goto illegal_insn;
+                    }
+#else
+                    gen_compare(&cmp, 0, cond, dc);
+#endif
+                    l1 = gen_new_label();
+                    tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
+                                      cmp.c1, cmp.c2, l1);
+                    free_compare(&cmp);
+                }
+
+                mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
+                        ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
+
+                /* Don't use the normal temporaries, as they may well have
+                   gone out of scope with the branch above.  While we're
+                   doing that we might as well pre-truncate to 32-bit.  */
+                trap = tcg_temp_new_i32();
+
+                rs1 = GET_FIELD_SP(insn, 14, 18);
+                if (IS_IMM) {
+                    rs2 = GET_FIELD_SP(insn, 0, 6);
+                    if (rs1 == 0) {
+                        tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
+                        /* Signal that the trap value is fully constant.  */
+                        mask = 0;
+                    } else {
+                        TCGv t1 = gen_load_gpr(dc, rs1);
+                        tcg_gen_trunc_tl_i32(trap, t1);
+                        tcg_gen_addi_i32(trap, trap, rs2);
+                    }
+                } else {
+                    TCGv t1, t2;
+                    rs2 = GET_FIELD_SP(insn, 0, 4);
+                    t1 = gen_load_gpr(dc, rs1);
+                    t2 = gen_load_gpr(dc, rs2);
+                    tcg_gen_add_tl(t1, t1, t2);
+                    tcg_gen_trunc_tl_i32(trap, t1);
+                }
+                if (mask != 0) {
+                    tcg_gen_andi_i32(trap, trap, mask);
+                    tcg_gen_addi_i32(trap, trap, TT_TRAP);
+                }
+
+                gen_helper_raise_exception(cpu_env, trap);
+                tcg_temp_free_i32(trap);
+
+                if (cond == 8) {
+                    /* An unconditional trap ends the TB.  */
+                    dc->is_br = 1;
+                    goto jmp_insn;
+                } else {
+                    /* A conditional trap falls through to the next insn.  */
+                    gen_set_label(l1);
+                    break;
+                }
+            } else if (xop == 0x28) {
+                rs1 = GET_FIELD(insn, 13, 17);
+                switch(rs1) {
+                case 0: /* rdy */
+#ifndef TARGET_SPARC64
+                case 0x01 ... 0x0e: /* undefined in the SPARCv8
+                                       manual, rdy on the microSPARC
+                                       II */
+                case 0x0f:          /* stbar in the SPARCv8 manual,
+                                       rdy on the microSPARC II */
+                case 0x10 ... 0x1f: /* implementation-dependent in the
+                                       SPARCv8 manual, rdy on the
+                                       microSPARC II */
+                    /* Read Asr17 */
+                    if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
+                        TCGv t = gen_dest_gpr(dc, rd);
+                        /* Read Asr17 for a Leon3 monoprocessor */
+                        tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
+                        gen_store_gpr(dc, rd, t);
+                        break;
+                    }
+#endif
+                    gen_store_gpr(dc, rd, cpu_y);
+                    break;
+#ifdef TARGET_SPARC64
+                case 0x2: /* V9 rdccr */
+                    update_psr(dc);
+                    gen_helper_rdccr(cpu_dst, cpu_env);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x3: /* V9 rdasi */
+                    tcg_gen_movi_tl(cpu_dst, dc->asi);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x4: /* V9 rdtick */
+                    {
+                        TCGv_ptr r_tickptr;
+                        TCGv_i32 r_const;
+
+                        r_tickptr = tcg_temp_new_ptr();
+                        r_const = tcg_const_i32(dc->mem_idx);
+                        tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                       offsetof(CPUSPARCState, tick));
+                        gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+                                                  r_const);
+                        tcg_temp_free_ptr(r_tickptr);
+                        tcg_temp_free_i32(r_const);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                    }
+                    break;
+                case 0x5: /* V9 rdpc */
+                    {
+                        TCGv t = gen_dest_gpr(dc, rd);
+                        if (unlikely(AM_CHECK(dc))) {
+                            tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
+                        } else {
+                            tcg_gen_movi_tl(t, dc->pc);
+                        }
+                        gen_store_gpr(dc, rd, t);
+                    }
+                    break;
+                case 0x6: /* V9 rdfprs */
+                    tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0xf: /* V9 membar */
+                    break; /* no effect */
+                case 0x13: /* Graphics Status */
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    gen_store_gpr(dc, rd, cpu_gsr);
+                    break;
+                case 0x16: /* Softint */
+                    tcg_gen_ld32s_tl(cpu_dst, cpu_env,
+                                     offsetof(CPUSPARCState, softint));
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x17: /* Tick compare */
+                    gen_store_gpr(dc, rd, cpu_tick_cmpr);
+                    break;
+                case 0x18: /* System tick */
+                    {
+                        TCGv_ptr r_tickptr;
+                        TCGv_i32 r_const;
+
+                        r_tickptr = tcg_temp_new_ptr();
+                        r_const = tcg_const_i32(dc->mem_idx);
+                        tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                       offsetof(CPUSPARCState, stick));
+                        gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+                                                  r_const);
+                        tcg_temp_free_ptr(r_tickptr);
+                        tcg_temp_free_i32(r_const);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                    }
+                    break;
+                case 0x19: /* System tick compare */
+                    gen_store_gpr(dc, rd, cpu_stick_cmpr);
+                    break;
+                case 0x10: /* Performance Control */
+                case 0x11: /* Performance Instrumentation Counter */
+                case 0x12: /* Dispatch Control */
+                case 0x14: /* Softint set, WO */
+                case 0x15: /* Softint clear, WO */
+#endif
+                default:
+                    goto illegal_insn;
+                }
+#if !defined(CONFIG_USER_ONLY)
+            } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
+#ifndef TARGET_SPARC64
+                if (!supervisor(dc)) {
+                    goto priv_insn;
+                }
+                update_psr(dc);
+                gen_helper_rdpsr(cpu_dst, cpu_env);
+#else
+                CHECK_IU_FEATURE(dc, HYPV);
+                if (!hypervisor(dc))
+                    goto priv_insn;
+                rs1 = GET_FIELD(insn, 13, 17);
+                switch (rs1) {
+                case 0: // hpstate
+                    // gen_op_rdhpstate();
+                    break;
+                case 1: // htstate
+                    // gen_op_rdhtstate();
+                    break;
+                case 3: // hintp
+                    tcg_gen_mov_tl(cpu_dst, cpu_hintp);
+                    break;
+                case 5: // htba
+                    tcg_gen_mov_tl(cpu_dst, cpu_htba);
+                    break;
+                case 6: // hver
+                    tcg_gen_mov_tl(cpu_dst, cpu_hver);
+                    break;
+                case 31: // hstick_cmpr
+                    tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
+                    break;
+                default:
+                    goto illegal_insn;
+                }
+#endif
+                gen_store_gpr(dc, rd, cpu_dst);
+                break;
+            } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
+                if (!supervisor(dc)) {
+                    goto priv_insn;
+                }
+                cpu_tmp0 = get_temp_tl(dc);
+#ifdef TARGET_SPARC64
+                rs1 = GET_FIELD(insn, 13, 17);
+                switch (rs1) {
+                case 0: // tpc
+                    {
+                        TCGv_ptr r_tsptr;
+
+                        r_tsptr = tcg_temp_new_ptr();
+                        gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                        tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+                                      offsetof(trap_state, tpc));
+                        tcg_temp_free_ptr(r_tsptr);
+                    }
+                    break;
+                case 1: // tnpc
+                    {
+                        TCGv_ptr r_tsptr;
+
+                        r_tsptr = tcg_temp_new_ptr();
+                        gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                        tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+                                      offsetof(trap_state, tnpc));
+                        tcg_temp_free_ptr(r_tsptr);
+                    }
+                    break;
+                case 2: // tstate
+                    {
+                        TCGv_ptr r_tsptr;
+
+                        r_tsptr = tcg_temp_new_ptr();
+                        gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                        tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+                                      offsetof(trap_state, tstate));
+                        tcg_temp_free_ptr(r_tsptr);
+                    }
+                    break;
+                case 3: // tt
+                    {
+                        TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+                        gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                        tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
+                                         offsetof(trap_state, tt));
+                        tcg_temp_free_ptr(r_tsptr);
+                    }
+                    break;
+                case 4: // tick
+                    {
+                        TCGv_ptr r_tickptr;
+                        TCGv_i32 r_const;
+
+                        r_tickptr = tcg_temp_new_ptr();
+                        r_const = tcg_const_i32(dc->mem_idx);
+                        tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                       offsetof(CPUSPARCState, tick));
+                        gen_helper_tick_get_count(cpu_tmp0, cpu_env,
+                                                  r_tickptr, r_const);
+                        tcg_temp_free_ptr(r_tickptr);
+                        tcg_temp_free_i32(r_const);
+                    }
+                    break;
+                case 5: // tba
+                    tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
+                    break;
+                case 6: // pstate
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, pstate));
+                    break;
+                case 7: // tl
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, tl));
+                    break;
+                case 8: // pil
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, psrpil));
+                    break;
+                case 9: // cwp
+                    gen_helper_rdcwp(cpu_tmp0, cpu_env);
+                    break;
+                case 10: // cansave
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, cansave));
+                    break;
+                case 11: // canrestore
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, canrestore));
+                    break;
+                case 12: // cleanwin
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, cleanwin));
+                    break;
+                case 13: // otherwin
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, otherwin));
+                    break;
+                case 14: // wstate
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, wstate));
+                    break;
+                case 16: // UA2005 gl
+                    CHECK_IU_FEATURE(dc, GL);
+                    tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+                                     offsetof(CPUSPARCState, gl));
+                    break;
+                case 26: // UA2005 strand status
+                    CHECK_IU_FEATURE(dc, HYPV);
+                    if (!hypervisor(dc))
+                        goto priv_insn;
+                    tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
+                    break;
+                case 31: // ver
+                    tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
+                    break;
+                case 15: // fq
+                default:
+                    goto illegal_insn;
+                }
+#else
+                tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
+#endif
+                gen_store_gpr(dc, rd, cpu_tmp0);
+                break;
+            } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
+#ifdef TARGET_SPARC64
+                gen_helper_flushw(cpu_env);
+#else
+                if (!supervisor(dc))
+                    goto priv_insn;
+                gen_store_gpr(dc, rd, cpu_tbr);
+#endif
+                break;
+#endif
+            } else if (xop == 0x34) {   /* FPU Operations */
+                if (gen_trap_ifnofpu(dc)) {
+                    goto jmp_insn;
+                }
+                gen_op_clear_ieee_excp_and_FTT();
+                rs1 = GET_FIELD(insn, 13, 17);
+                rs2 = GET_FIELD(insn, 27, 31);
+                xop = GET_FIELD(insn, 18, 26);
+
+                switch (xop) {
+                case 0x1: /* fmovs */
+                    cpu_src1_32 = gen_load_fpr_F(dc, rs2);
+                    gen_store_fpr_F(dc, rd, cpu_src1_32);
+                    break;
+                case 0x5: /* fnegs */
+                    gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
+                    break;
+                case 0x9: /* fabss */
+                    gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
+                    break;
+                case 0x29: /* fsqrts */
+                    CHECK_FPU_FEATURE(dc, FSQRT);
+                    gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
+                    break;
+                case 0x2a: /* fsqrtd */
+                    CHECK_FPU_FEATURE(dc, FSQRT);
+                    gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
+                    break;
+                case 0x2b: /* fsqrtq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
+                    break;
+                case 0x41: /* fadds */
+                    gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
+                    break;
+                case 0x42: /* faddd */
+                    gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
+                    break;
+                case 0x43: /* faddq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
+                    break;
+                case 0x45: /* fsubs */
+                    gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
+                    break;
+                case 0x46: /* fsubd */
+                    gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
+                    break;
+                case 0x47: /* fsubq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
+                    break;
+                case 0x49: /* fmuls */
+                    CHECK_FPU_FEATURE(dc, FMUL);
+                    gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
+                    break;
+                case 0x4a: /* fmuld */
+                    CHECK_FPU_FEATURE(dc, FMUL);
+                    gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
+                    break;
+                case 0x4b: /* fmulq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    CHECK_FPU_FEATURE(dc, FMUL);
+                    gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
+                    break;
+                case 0x4d: /* fdivs */
+                    gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
+                    break;
+                case 0x4e: /* fdivd */
+                    gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
+                    break;
+                case 0x4f: /* fdivq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
+                    break;
+                case 0x69: /* fsmuld */
+                    CHECK_FPU_FEATURE(dc, FSMULD);
+                    gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
+                    break;
+                case 0x6e: /* fdmulq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
+                    break;
+                case 0xc4: /* fitos */
+                    gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
+                    break;
+                case 0xc6: /* fdtos */
+                    gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
+                    break;
+                case 0xc7: /* fqtos */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
+                    break;
+                case 0xc8: /* fitod */
+                    gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
+                    break;
+                case 0xc9: /* fstod */
+                    gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
+                    break;
+                case 0xcb: /* fqtod */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
+                    break;
+                case 0xcc: /* fitoq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
+                    break;
+                case 0xcd: /* fstoq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
+                    break;
+                case 0xce: /* fdtoq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
+                    break;
+                case 0xd1: /* fstoi */
+                    gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
+                    break;
+                case 0xd2: /* fdtoi */
+                    gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
+                    break;
+                case 0xd3: /* fqtoi */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
+                    break;
+#ifdef TARGET_SPARC64
+                case 0x2: /* V9 fmovd */
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+                    gen_store_fpr_D(dc, rd, cpu_src1_64);
+                    break;
+                case 0x3: /* V9 fmovq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_move_Q(dc, rd, rs2);
+                    break;
+                case 0x6: /* V9 fnegd */
+                    gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
+                    break;
+                case 0x7: /* V9 fnegq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
+                    break;
+                case 0xa: /* V9 fabsd */
+                    gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
+                    break;
+                case 0xb: /* V9 fabsq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
+                    break;
+                case 0x81: /* V9 fstox */
+                    gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
+                    break;
+                case 0x82: /* V9 fdtox */
+                    gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
+                    break;
+                case 0x83: /* V9 fqtox */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
+                    break;
+                case 0x84: /* V9 fxtos */
+                    gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
+                    break;
+                case 0x88: /* V9 fxtod */
+                    gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
+                    break;
+                case 0x8c: /* V9 fxtoq */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
+                    break;
+#endif
+                default:
+                    goto illegal_insn;
+                }
+            } else if (xop == 0x35) {   /* FPU Operations */
+#ifdef TARGET_SPARC64
+                int cond;
+#endif
+                if (gen_trap_ifnofpu(dc)) {
+                    goto jmp_insn;
+                }
+                gen_op_clear_ieee_excp_and_FTT();
+                rs1 = GET_FIELD(insn, 13, 17);
+                rs2 = GET_FIELD(insn, 27, 31);
+                xop = GET_FIELD(insn, 18, 26);
+
+#ifdef TARGET_SPARC64
+#define FMOVR(sz)                                                  \
+                do {                                               \
+                    DisasCompare cmp;                              \
+                    cond = GET_FIELD_SP(insn, 10, 12);             \
+                    cpu_src1 = get_src1(dc, insn);                 \
+                    gen_compare_reg(&cmp, cond, cpu_src1);         \
+                    gen_fmov##sz(dc, &cmp, rd, rs2);               \
+                    free_compare(&cmp);                            \
+                } while (0)
+
+                if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
+                    FMOVR(s);
+                    break;
+                } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
+                    FMOVR(d);
+                    break;
+                } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    FMOVR(q);
+                    break;
+                }
+#undef FMOVR
+#endif
+                switch (xop) {
+#ifdef TARGET_SPARC64
+#define FMOVCC(fcc, sz)                                                 \
+                    do {                                                \
+                        DisasCompare cmp;                               \
+                        cond = GET_FIELD_SP(insn, 14, 17);              \
+                        gen_fcompare(&cmp, fcc, cond);                  \
+                        gen_fmov##sz(dc, &cmp, rd, rs2);                \
+                        free_compare(&cmp);                             \
+                    } while (0)
+
+                    case 0x001: /* V9 fmovscc %fcc0 */
+                        FMOVCC(0, s);
+                        break;
+                    case 0x002: /* V9 fmovdcc %fcc0 */
+                        FMOVCC(0, d);
+                        break;
+                    case 0x003: /* V9 fmovqcc %fcc0 */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        FMOVCC(0, q);
+                        break;
+                    case 0x041: /* V9 fmovscc %fcc1 */
+                        FMOVCC(1, s);
+                        break;
+                    case 0x042: /* V9 fmovdcc %fcc1 */
+                        FMOVCC(1, d);
+                        break;
+                    case 0x043: /* V9 fmovqcc %fcc1 */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        FMOVCC(1, q);
+                        break;
+                    case 0x081: /* V9 fmovscc %fcc2 */
+                        FMOVCC(2, s);
+                        break;
+                    case 0x082: /* V9 fmovdcc %fcc2 */
+                        FMOVCC(2, d);
+                        break;
+                    case 0x083: /* V9 fmovqcc %fcc2 */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        FMOVCC(2, q);
+                        break;
+                    case 0x0c1: /* V9 fmovscc %fcc3 */
+                        FMOVCC(3, s);
+                        break;
+                    case 0x0c2: /* V9 fmovdcc %fcc3 */
+                        FMOVCC(3, d);
+                        break;
+                    case 0x0c3: /* V9 fmovqcc %fcc3 */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        FMOVCC(3, q);
+                        break;
+#undef FMOVCC
+#define FMOVCC(xcc, sz)                                                 \
+                    do {                                                \
+                        DisasCompare cmp;                               \
+                        cond = GET_FIELD_SP(insn, 14, 17);              \
+                        gen_compare(&cmp, xcc, cond, dc);               \
+                        gen_fmov##sz(dc, &cmp, rd, rs2);                \
+                        free_compare(&cmp);                             \
+                    } while (0)
+
+                    case 0x101: /* V9 fmovscc %icc */
+                        FMOVCC(0, s);
+                        break;
+                    case 0x102: /* V9 fmovdcc %icc */
+                        FMOVCC(0, d);
+                        break;
+                    case 0x103: /* V9 fmovqcc %icc */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        FMOVCC(0, q);
+                        break;
+                    case 0x181: /* V9 fmovscc %xcc */
+                        FMOVCC(1, s);
+                        break;
+                    case 0x182: /* V9 fmovdcc %xcc */
+                        FMOVCC(1, d);
+                        break;
+                    case 0x183: /* V9 fmovqcc %xcc */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        FMOVCC(1, q);
+                        break;
+#undef FMOVCC
+#endif
+                    case 0x51: /* fcmps, V9 %fcc */
+                        cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+                        cpu_src2_32 = gen_load_fpr_F(dc, rs2);
+                        gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
+                        break;
+                    case 0x52: /* fcmpd, V9 %fcc */
+                        cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                        cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                        gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
+                        break;
+                    case 0x53: /* fcmpq, V9 %fcc */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        gen_op_load_fpr_QT0(QFPREG(rs1));
+                        gen_op_load_fpr_QT1(QFPREG(rs2));
+                        gen_op_fcmpq(rd & 3);
+                        break;
+                    case 0x55: /* fcmpes, V9 %fcc */
+                        cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+                        cpu_src2_32 = gen_load_fpr_F(dc, rs2);
+                        gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
+                        break;
+                    case 0x56: /* fcmped, V9 %fcc */
+                        cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                        cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                        gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
+                        break;
+                    case 0x57: /* fcmpeq, V9 %fcc */
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        gen_op_load_fpr_QT0(QFPREG(rs1));
+                        gen_op_load_fpr_QT1(QFPREG(rs2));
+                        gen_op_fcmpeq(rd & 3);
+                        break;
+                    default:
+                        goto illegal_insn;
+                }
+            } else if (xop == 0x2) {
+                TCGv dst = gen_dest_gpr(dc, rd);
+                rs1 = GET_FIELD(insn, 13, 17);
+                if (rs1 == 0) {
+                    /* clr/mov shortcut : or %g0, x, y -> mov x, y */
+                    if (IS_IMM) {       /* immediate */
+                        simm = GET_FIELDs(insn, 19, 31);
+                        tcg_gen_movi_tl(dst, simm);
+                        gen_store_gpr(dc, rd, dst);
+                    } else {            /* register */
+                        rs2 = GET_FIELD(insn, 27, 31);
+                        if (rs2 == 0) {
+                            tcg_gen_movi_tl(dst, 0);
+                            gen_store_gpr(dc, rd, dst);
+                        } else {
+                            cpu_src2 = gen_load_gpr(dc, rs2);
+                            gen_store_gpr(dc, rd, cpu_src2);
+                        }
+                    }
+                } else {
+                    cpu_src1 = get_src1(dc, insn);
+                    if (IS_IMM) {       /* immediate */
+                        simm = GET_FIELDs(insn, 19, 31);
+                        tcg_gen_ori_tl(dst, cpu_src1, simm);
+                        gen_store_gpr(dc, rd, dst);
+                    } else {            /* register */
+                        rs2 = GET_FIELD(insn, 27, 31);
+                        if (rs2 == 0) {
+                            /* mov shortcut:  or x, %g0, y -> mov x, y */
+                            gen_store_gpr(dc, rd, cpu_src1);
+                        } else {
+                            cpu_src2 = gen_load_gpr(dc, rs2);
+                            tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
+                            gen_store_gpr(dc, rd, dst);
+                        }
+                    }
+                }
+#ifdef TARGET_SPARC64
+            } else if (xop == 0x25) { /* sll, V9 sllx */
+                cpu_src1 = get_src1(dc, insn);
+                if (IS_IMM) {   /* immediate */
+                    simm = GET_FIELDs(insn, 20, 31);
+                    if (insn & (1 << 12)) {
+                        tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
+                    } else {
+                        tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
+                    }
+                } else {                /* register */
+                    rs2 = GET_FIELD(insn, 27, 31);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    cpu_tmp0 = get_temp_tl(dc);
+                    if (insn & (1 << 12)) {
+                        tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+                    } else {
+                        tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+                    }
+                    tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
+                }
+                gen_store_gpr(dc, rd, cpu_dst);
+            } else if (xop == 0x26) { /* srl, V9 srlx */
+                cpu_src1 = get_src1(dc, insn);
+                if (IS_IMM) {   /* immediate */
+                    simm = GET_FIELDs(insn, 20, 31);
+                    if (insn & (1 << 12)) {
+                        tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
+                    } else {
+                        tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+                        tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
+                    }
+                } else {                /* register */
+                    rs2 = GET_FIELD(insn, 27, 31);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    cpu_tmp0 = get_temp_tl(dc);
+                    if (insn & (1 << 12)) {
+                        tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+                        tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
+                    } else {
+                        tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+                        tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+                        tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
+                    }
+                }
+                gen_store_gpr(dc, rd, cpu_dst);
+            } else if (xop == 0x27) { /* sra, V9 srax */
+                cpu_src1 = get_src1(dc, insn);
+                if (IS_IMM) {   /* immediate */
+                    simm = GET_FIELDs(insn, 20, 31);
+                    if (insn & (1 << 12)) {
+                        tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
+                    } else {
+                        tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
+                        tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
+                    }
+                } else {                /* register */
+                    rs2 = GET_FIELD(insn, 27, 31);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    cpu_tmp0 = get_temp_tl(dc);
+                    if (insn & (1 << 12)) {
+                        tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+                        tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
+                    } else {
+                        tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+                        tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
+                        tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
+                    }
+                }
+                gen_store_gpr(dc, rd, cpu_dst);
+#endif
+            } else if (xop < 0x36) {
+                if (xop < 0x20) {
+                    cpu_src1 = get_src1(dc, insn);
+                    cpu_src2 = get_src2(dc, insn);
+                    switch (xop & ~0x10) {
+                    case 0x0: /* add */
+                        if (xop & 0x10) {
+                            gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+                            dc->cc_op = CC_OP_ADD;
+                        } else {
+                            tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+                        }
+                        break;
+                    case 0x1: /* and */
+                        tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0x2: /* or */
+                        tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0x3: /* xor */
+                        tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0x4: /* sub */
+                        if (xop & 0x10) {
+                            gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+                            dc->cc_op = CC_OP_SUB;
+                        } else {
+                            tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
+                        }
+                        break;
+                    case 0x5: /* andn */
+                        tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0x6: /* orn */
+                        tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0x7: /* xorn */
+                        tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0x8: /* addx, V9 addc */
+                        gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+                                        (xop & 0x10));
+                        break;
+#ifdef TARGET_SPARC64
+                    case 0x9: /* V9 mulx */
+                        tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
+                        break;
+#endif
+                    case 0xa: /* umul */
+                        CHECK_IU_FEATURE(dc, MUL);
+                        gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0xb: /* smul */
+                        CHECK_IU_FEATURE(dc, MUL);
+                        gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
+                        if (xop & 0x10) {
+                            tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+                            dc->cc_op = CC_OP_LOGIC;
+                        }
+                        break;
+                    case 0xc: /* subx, V9 subc */
+                        gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+                                        (xop & 0x10));
+                        break;
+#ifdef TARGET_SPARC64
+                    case 0xd: /* V9 udivx */
+                        gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+                        break;
+#endif
+                    case 0xe: /* udiv */
+                        CHECK_IU_FEATURE(dc, DIV);
+                        if (xop & 0x10) {
+                            gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
+                                               cpu_src2);
+                            dc->cc_op = CC_OP_DIV;
+                        } else {
+                            gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
+                                            cpu_src2);
+                        }
+                        break;
+                    case 0xf: /* sdiv */
+                        CHECK_IU_FEATURE(dc, DIV);
+                        if (xop & 0x10) {
+                            gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
+                                               cpu_src2);
+                            dc->cc_op = CC_OP_DIV;
+                        } else {
+                            gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
+                                            cpu_src2);
+                        }
+                        break;
+                    default:
+                        goto illegal_insn;
+                    }
+                    gen_store_gpr(dc, rd, cpu_dst);
+                } else {
+                    cpu_src1 = get_src1(dc, insn);
+                    cpu_src2 = get_src2(dc, insn);
+                    switch (xop) {
+                    case 0x20: /* taddcc */
+                        gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
+                        dc->cc_op = CC_OP_TADD;
+                        break;
+                    case 0x21: /* tsubcc */
+                        gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
+                        dc->cc_op = CC_OP_TSUB;
+                        break;
+                    case 0x22: /* taddcctv */
+                        gen_helper_taddcctv(cpu_dst, cpu_env,
+                                            cpu_src1, cpu_src2);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        dc->cc_op = CC_OP_TADDTV;
+                        break;
+                    case 0x23: /* tsubcctv */
+                        gen_helper_tsubcctv(cpu_dst, cpu_env,
+                                            cpu_src1, cpu_src2);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        dc->cc_op = CC_OP_TSUBTV;
+                        break;
+                    case 0x24: /* mulscc */
+                        update_psr(dc);
+                        gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+                        dc->cc_op = CC_OP_ADD;
+                        break;
+#ifndef TARGET_SPARC64
+                    case 0x25:  /* sll */
+                        if (IS_IMM) { /* immediate */
+                            simm = GET_FIELDs(insn, 20, 31);
+                            tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
+                        } else { /* register */
+                            cpu_tmp0 = get_temp_tl(dc);
+                            tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+                            tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
+                        }
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        break;
+                    case 0x26:  /* srl */
+                        if (IS_IMM) { /* immediate */
+                            simm = GET_FIELDs(insn, 20, 31);
+                            tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
+                        } else { /* register */
+                            cpu_tmp0 = get_temp_tl(dc);
+                            tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+                            tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
+                        }
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        break;
+                    case 0x27:  /* sra */
+                        if (IS_IMM) { /* immediate */
+                            simm = GET_FIELDs(insn, 20, 31);
+                            tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
+                        } else { /* register */
+                            cpu_tmp0 = get_temp_tl(dc);
+                            tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+                            tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
+                        }
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        break;
+#endif
+                    case 0x30:
+                        {
+                            cpu_tmp0 = get_temp_tl(dc);
+                            switch(rd) {
+                            case 0: /* wry */
+                                tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                                tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
+                                break;
+#ifndef TARGET_SPARC64
+                            case 0x01 ... 0x0f: /* undefined in the
+                                                   SPARCv8 manual, nop
+                                                   on the microSPARC
+                                                   II */
+                            case 0x10 ... 0x1f: /* implementation-dependent
+                                                   in the SPARCv8
+                                                   manual, nop on the
+                                                   microSPARC II */
+                                if ((rd == 0x13) && (dc->def->features &
+                                                     CPU_FEATURE_POWERDOWN)) {
+                                    /* LEON3 power-down */
+                                    save_state(dc);
+                                    gen_helper_power_down(cpu_env);
+                                }
+                                break;
+#else
+                            case 0x2: /* V9 wrccr */
+                                tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                                gen_helper_wrccr(cpu_env, cpu_tmp0);
+                                tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+                                dc->cc_op = CC_OP_FLAGS;
+                                break;
+                            case 0x3: /* V9 wrasi */
+                                tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                                tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                                offsetof(CPUSPARCState, asi));
+                                /* End TB to notice changed ASI.  */
+                                save_state(dc);
+                                gen_op_next_insn();
+                                tcg_gen_exit_tb(0);
+                                dc->is_br = 1;
+                                break;
+                            case 0x6: /* V9 wrfprs */
+                                tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                                tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
+                                dc->fprs_dirty = 0;
+                                save_state(dc);
+                                gen_op_next_insn();
+                                tcg_gen_exit_tb(0);
+                                dc->is_br = 1;
+                                break;
+                            case 0xf: /* V9 sir, nop if user */
+#if !defined(CONFIG_USER_ONLY)
+                                if (supervisor(dc)) {
+                                    ; // XXX
+                                }
+#endif
+                                break;
+                            case 0x13: /* Graphics Status */
+                                if (gen_trap_ifnofpu(dc)) {
+                                    goto jmp_insn;
+                                }
+                                tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
+                                break;
+                            case 0x14: /* Softint set */
+                                if (!supervisor(dc))
+                                    goto illegal_insn;
+                                tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                                gen_helper_set_softint(cpu_env, cpu_tmp0);
+                                break;
+                            case 0x15: /* Softint clear */
+                                if (!supervisor(dc))
+                                    goto illegal_insn;
+                                tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                                gen_helper_clear_softint(cpu_env, cpu_tmp0);
+                                break;
+                            case 0x16: /* Softint write */
+                                if (!supervisor(dc))
+                                    goto illegal_insn;
+                                tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                                gen_helper_write_softint(cpu_env, cpu_tmp0);
+                                break;
+                            case 0x17: /* Tick compare */
+#if !defined(CONFIG_USER_ONLY)
+                                if (!supervisor(dc))
+                                    goto illegal_insn;
+#endif
+                                {
+                                    TCGv_ptr r_tickptr;
+
+                                    tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
+                                                   cpu_src2);
+                                    r_tickptr = tcg_temp_new_ptr();
+                                    tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                                   offsetof(CPUSPARCState, tick));
+                                    gen_helper_tick_set_limit(r_tickptr,
+                                                              cpu_tick_cmpr);
+                                    tcg_temp_free_ptr(r_tickptr);
+                                }
+                                break;
+                            case 0x18: /* System tick */
+#if !defined(CONFIG_USER_ONLY)
+                                if (!supervisor(dc))
+                                    goto illegal_insn;
+#endif
+                                {
+                                    TCGv_ptr r_tickptr;
+
+                                    tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
+                                                   cpu_src2);
+                                    r_tickptr = tcg_temp_new_ptr();
+                                    tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                                   offsetof(CPUSPARCState, stick));
+                                    gen_helper_tick_set_count(r_tickptr,
+                                                              cpu_tmp0);
+                                    tcg_temp_free_ptr(r_tickptr);
+                                }
+                                break;
+                            case 0x19: /* System tick compare */
+#if !defined(CONFIG_USER_ONLY)
+                                if (!supervisor(dc))
+                                    goto illegal_insn;
+#endif
+                                {
+                                    TCGv_ptr r_tickptr;
+
+                                    tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
+                                                   cpu_src2);
+                                    r_tickptr = tcg_temp_new_ptr();
+                                    tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                                   offsetof(CPUSPARCState, stick));
+                                    gen_helper_tick_set_limit(r_tickptr,
+                                                              cpu_stick_cmpr);
+                                    tcg_temp_free_ptr(r_tickptr);
+                                }
+                                break;
+
+                            case 0x10: /* Performance Control */
+                            case 0x11: /* Performance Instrumentation
+                                          Counter */
+                            case 0x12: /* Dispatch Control */
+#endif
+                            default:
+                                goto illegal_insn;
+                            }
+                        }
+                        break;
+#if !defined(CONFIG_USER_ONLY)
+                    case 0x31: /* wrpsr, V9 saved, restored */
+                        {
+                            if (!supervisor(dc))
+                                goto priv_insn;
+#ifdef TARGET_SPARC64
+                            switch (rd) {
+                            case 0:
+                                gen_helper_saved(cpu_env);
+                                break;
+                            case 1:
+                                gen_helper_restored(cpu_env);
+                                break;
+                            case 2: /* UA2005 allclean */
+                            case 3: /* UA2005 otherw */
+                            case 4: /* UA2005 normalw */
+                            case 5: /* UA2005 invalw */
+                                // XXX
+                            default:
+                                goto illegal_insn;
+                            }
+#else
+                            cpu_tmp0 = get_temp_tl(dc);
+                            tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                            gen_helper_wrpsr(cpu_env, cpu_tmp0);
+                            tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+                            dc->cc_op = CC_OP_FLAGS;
+                            save_state(dc);
+                            gen_op_next_insn();
+                            tcg_gen_exit_tb(0);
+                            dc->is_br = 1;
+#endif
+                        }
+                        break;
+                    case 0x32: /* wrwim, V9 wrpr */
+                        {
+                            if (!supervisor(dc))
+                                goto priv_insn;
+                            cpu_tmp0 = get_temp_tl(dc);
+                            tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+#ifdef TARGET_SPARC64
+                            switch (rd) {
+                            case 0: // tpc
+                                {
+                                    TCGv_ptr r_tsptr;
+
+                                    r_tsptr = tcg_temp_new_ptr();
+                                    gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                                    tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+                                                  offsetof(trap_state, tpc));
+                                    tcg_temp_free_ptr(r_tsptr);
+                                }
+                                break;
+                            case 1: // tnpc
+                                {
+                                    TCGv_ptr r_tsptr;
+
+                                    r_tsptr = tcg_temp_new_ptr();
+                                    gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                                    tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+                                                  offsetof(trap_state, tnpc));
+                                    tcg_temp_free_ptr(r_tsptr);
+                                }
+                                break;
+                            case 2: // tstate
+                                {
+                                    TCGv_ptr r_tsptr;
+
+                                    r_tsptr = tcg_temp_new_ptr();
+                                    gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                                    tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+                                                  offsetof(trap_state,
+                                                           tstate));
+                                    tcg_temp_free_ptr(r_tsptr);
+                                }
+                                break;
+                            case 3: // tt
+                                {
+                                    TCGv_ptr r_tsptr;
+
+                                    r_tsptr = tcg_temp_new_ptr();
+                                    gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+                                    tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
+                                                    offsetof(trap_state, tt));
+                                    tcg_temp_free_ptr(r_tsptr);
+                                }
+                                break;
+                            case 4: // tick
+                                {
+                                    TCGv_ptr r_tickptr;
+
+                                    r_tickptr = tcg_temp_new_ptr();
+                                    tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                                   offsetof(CPUSPARCState, tick));
+                                    gen_helper_tick_set_count(r_tickptr,
+                                                              cpu_tmp0);
+                                    tcg_temp_free_ptr(r_tickptr);
+                                }
+                                break;
+                            case 5: // tba
+                                tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
+                                break;
+                            case 6: // pstate
+                                save_state(dc);
+                                gen_helper_wrpstate(cpu_env, cpu_tmp0);
+                                dc->npc = DYNAMIC_PC;
+                                break;
+                            case 7: // tl
+                                save_state(dc);
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                               offsetof(CPUSPARCState, tl));
+                                dc->npc = DYNAMIC_PC;
+                                break;
+                            case 8: // pil
+                                gen_helper_wrpil(cpu_env, cpu_tmp0);
+                                break;
+                            case 9: // cwp
+                                gen_helper_wrcwp(cpu_env, cpu_tmp0);
+                                break;
+                            case 10: // cansave
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                                offsetof(CPUSPARCState,
+                                                         cansave));
+                                break;
+                            case 11: // canrestore
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                                offsetof(CPUSPARCState,
+                                                         canrestore));
+                                break;
+                            case 12: // cleanwin
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                                offsetof(CPUSPARCState,
+                                                         cleanwin));
+                                break;
+                            case 13: // otherwin
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                                offsetof(CPUSPARCState,
+                                                         otherwin));
+                                break;
+                            case 14: // wstate
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                                offsetof(CPUSPARCState,
+                                                         wstate));
+                                break;
+                            case 16: // UA2005 gl
+                                CHECK_IU_FEATURE(dc, GL);
+                                tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+                                                offsetof(CPUSPARCState, gl));
+                                break;
+                            case 26: // UA2005 strand status
+                                CHECK_IU_FEATURE(dc, HYPV);
+                                if (!hypervisor(dc))
+                                    goto priv_insn;
+                                tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
+                                break;
+                            default:
+                                goto illegal_insn;
+                            }
+#else
+                            tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
+                            if (dc->def->nwindows != 32) {
+                                tcg_gen_andi_tl(cpu_wim, cpu_wim,
+                                                (1 << dc->def->nwindows) - 1);
+                            }
+#endif
+                        }
+                        break;
+                    case 0x33: /* wrtbr, UA2005 wrhpr */
+                        {
+#ifndef TARGET_SPARC64
+                            if (!supervisor(dc))
+                                goto priv_insn;
+                            tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
+#else
+                            CHECK_IU_FEATURE(dc, HYPV);
+                            if (!hypervisor(dc))
+                                goto priv_insn;
+                            cpu_tmp0 = get_temp_tl(dc);
+                            tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                            switch (rd) {
+                            case 0: // hpstate
+                                // XXX gen_op_wrhpstate();
+                                save_state(dc);
+                                gen_op_next_insn();
+                                tcg_gen_exit_tb(0);
+                                dc->is_br = 1;
+                                break;
+                            case 1: // htstate
+                                // XXX gen_op_wrhtstate();
+                                break;
+                            case 3: // hintp
+                                tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
+                                break;
+                            case 5: // htba
+                                tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
+                                break;
+                            case 31: // hstick_cmpr
+                                {
+                                    TCGv_ptr r_tickptr;
+
+                                    tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
+                                    r_tickptr = tcg_temp_new_ptr();
+                                    tcg_gen_ld_ptr(r_tickptr, cpu_env,
+                                                   offsetof(CPUSPARCState, hstick));
+                                    gen_helper_tick_set_limit(r_tickptr,
+                                                              cpu_hstick_cmpr);
+                                    tcg_temp_free_ptr(r_tickptr);
+                                }
+                                break;
+                            case 6: // hver readonly
+                            default:
+                                goto illegal_insn;
+                            }
+#endif
+                        }
+                        break;
+#endif
+#ifdef TARGET_SPARC64
+                    case 0x2c: /* V9 movcc */
+                        {
+                            int cc = GET_FIELD_SP(insn, 11, 12);
+                            int cond = GET_FIELD_SP(insn, 14, 17);
+                            DisasCompare cmp;
+                            TCGv dst;
+
+                            if (insn & (1 << 18)) {
+                                if (cc == 0) {
+                                    gen_compare(&cmp, 0, cond, dc);
+                                } else if (cc == 2) {
+                                    gen_compare(&cmp, 1, cond, dc);
+                                } else {
+                                    goto illegal_insn;
+                                }
+                            } else {
+                                gen_fcompare(&cmp, cc, cond);
+                            }
+
+                            /* The get_src2 above loaded the normal 13-bit
+                               immediate field, not the 11-bit field we have
+                               in movcc.  But it did handle the reg case.  */
+                            if (IS_IMM) {
+                                simm = GET_FIELD_SPs(insn, 0, 10);
+                                tcg_gen_movi_tl(cpu_src2, simm);
+                            }
+
+                            dst = gen_load_gpr(dc, rd);
+                            tcg_gen_movcond_tl(cmp.cond, dst,
+                                               cmp.c1, cmp.c2,
+                                               cpu_src2, dst);
+                            free_compare(&cmp);
+                            gen_store_gpr(dc, rd, dst);
+                            break;
+                        }
+                    case 0x2d: /* V9 sdivx */
+                        gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        break;
+                    case 0x2e: /* V9 popc */
+                        gen_helper_popc(cpu_dst, cpu_src2);
+                        gen_store_gpr(dc, rd, cpu_dst);
+                        break;
+                    case 0x2f: /* V9 movr */
+                        {
+                            int cond = GET_FIELD_SP(insn, 10, 12);
+                            DisasCompare cmp;
+                            TCGv dst;
+
+                            gen_compare_reg(&cmp, cond, cpu_src1);
+
+                            /* The get_src2 above loaded the normal 13-bit
+                               immediate field, not the 10-bit field we have
+                               in movr.  But it did handle the reg case.  */
+                            if (IS_IMM) {
+                                simm = GET_FIELD_SPs(insn, 0, 9);
+                                tcg_gen_movi_tl(cpu_src2, simm);
+                            }
+
+                            dst = gen_load_gpr(dc, rd);
+                            tcg_gen_movcond_tl(cmp.cond, dst,
+                                               cmp.c1, cmp.c2,
+                                               cpu_src2, dst);
+                            free_compare(&cmp);
+                            gen_store_gpr(dc, rd, dst);
+                            break;
+                        }
+#endif
+                    default:
+                        goto illegal_insn;
+                    }
+                }
+            } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
+#ifdef TARGET_SPARC64
+                int opf = GET_FIELD_SP(insn, 5, 13);
+                rs1 = GET_FIELD(insn, 13, 17);
+                rs2 = GET_FIELD(insn, 27, 31);
+                if (gen_trap_ifnofpu(dc)) {
+                    goto jmp_insn;
+                }
+
+                switch (opf) {
+                case 0x000: /* VIS I edge8cc */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x001: /* VIS II edge8n */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x002: /* VIS I edge8lcc */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x003: /* VIS II edge8ln */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x004: /* VIS I edge16cc */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x005: /* VIS II edge16n */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x006: /* VIS I edge16lcc */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x007: /* VIS II edge16ln */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x008: /* VIS I edge32cc */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x009: /* VIS II edge32n */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x00a: /* VIS I edge32lcc */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x00b: /* VIS II edge32ln */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x010: /* VIS I array8 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x012: /* VIS I array16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+                    tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x014: /* VIS I array32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+                    tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x018: /* VIS I alignaddr */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x01a: /* VIS I alignaddrl */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x019: /* VIS II bmask */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    cpu_src1 = gen_load_gpr(dc, rs1);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+                    tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x020: /* VIS I fcmple16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x022: /* VIS I fcmpne16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x024: /* VIS I fcmple32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x026: /* VIS I fcmpne32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x028: /* VIS I fcmpgt16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x02a: /* VIS I fcmpeq16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x02c: /* VIS I fcmpgt32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x02e: /* VIS I fcmpeq32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+                    gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
+                    gen_store_gpr(dc, rd, cpu_dst);
+                    break;
+                case 0x031: /* VIS I fmul8x16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
+                    break;
+                case 0x033: /* VIS I fmul8x16au */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
+                    break;
+                case 0x035: /* VIS I fmul8x16al */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
+                    break;
+                case 0x036: /* VIS I fmul8sux16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
+                    break;
+                case 0x037: /* VIS I fmul8ulx16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
+                    break;
+                case 0x038: /* VIS I fmuld8sux16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
+                    break;
+                case 0x039: /* VIS I fmuld8ulx16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
+                    break;
+                case 0x03a: /* VIS I fpack32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
+                    break;
+                case 0x03b: /* VIS I fpack16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+                    cpu_dst_32 = gen_dest_fpr_F(dc);
+                    gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
+                    gen_store_fpr_F(dc, rd, cpu_dst_32);
+                    break;
+                case 0x03d: /* VIS I fpackfix */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+                    cpu_dst_32 = gen_dest_fpr_F(dc);
+                    gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
+                    gen_store_fpr_F(dc, rd, cpu_dst_32);
+                    break;
+                case 0x03e: /* VIS I pdist */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
+                    break;
+                case 0x048: /* VIS I faligndata */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
+                    break;
+                case 0x04b: /* VIS I fpmerge */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
+                    break;
+                case 0x04c: /* VIS II bshuffle */
+                    CHECK_FPU_FEATURE(dc, VIS2);
+                    gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
+                    break;
+                case 0x04d: /* VIS I fexpand */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
+                    break;
+                case 0x050: /* VIS I fpadd16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
+                    break;
+                case 0x051: /* VIS I fpadd16s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
+                    break;
+                case 0x052: /* VIS I fpadd32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
+                    break;
+                case 0x053: /* VIS I fpadd32s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
+                    break;
+                case 0x054: /* VIS I fpsub16 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
+                    break;
+                case 0x055: /* VIS I fpsub16s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
+                    break;
+                case 0x056: /* VIS I fpsub32 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
+                    break;
+                case 0x057: /* VIS I fpsub32s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
+                    break;
+                case 0x060: /* VIS I fzero */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+                    tcg_gen_movi_i64(cpu_dst_64, 0);
+                    gen_store_fpr_D(dc, rd, cpu_dst_64);
+                    break;
+                case 0x061: /* VIS I fzeros */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_dst_32 = gen_dest_fpr_F(dc);
+                    tcg_gen_movi_i32(cpu_dst_32, 0);
+                    gen_store_fpr_F(dc, rd, cpu_dst_32);
+                    break;
+                case 0x062: /* VIS I fnor */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
+                    break;
+                case 0x063: /* VIS I fnors */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
+                    break;
+                case 0x064: /* VIS I fandnot2 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
+                    break;
+                case 0x065: /* VIS I fandnot2s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
+                    break;
+                case 0x066: /* VIS I fnot2 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
+                    break;
+                case 0x067: /* VIS I fnot2s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
+                    break;
+                case 0x068: /* VIS I fandnot1 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
+                    break;
+                case 0x069: /* VIS I fandnot1s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
+                    break;
+                case 0x06a: /* VIS I fnot1 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
+                    break;
+                case 0x06b: /* VIS I fnot1s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
+                    break;
+                case 0x06c: /* VIS I fxor */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
+                    break;
+                case 0x06d: /* VIS I fxors */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
+                    break;
+                case 0x06e: /* VIS I fnand */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
+                    break;
+                case 0x06f: /* VIS I fnands */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
+                    break;
+                case 0x070: /* VIS I fand */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
+                    break;
+                case 0x071: /* VIS I fands */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
+                    break;
+                case 0x072: /* VIS I fxnor */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
+                    break;
+                case 0x073: /* VIS I fxnors */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
+                    break;
+                case 0x074: /* VIS I fsrc1 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+                    gen_store_fpr_D(dc, rd, cpu_src1_64);
+                    break;
+                case 0x075: /* VIS I fsrc1s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+                    gen_store_fpr_F(dc, rd, cpu_src1_32);
+                    break;
+                case 0x076: /* VIS I fornot2 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
+                    break;
+                case 0x077: /* VIS I fornot2s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
+                    break;
+                case 0x078: /* VIS I fsrc2 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+                    gen_store_fpr_D(dc, rd, cpu_src1_64);
+                    break;
+                case 0x079: /* VIS I fsrc2s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_src1_32 = gen_load_fpr_F(dc, rs2);
+                    gen_store_fpr_F(dc, rd, cpu_src1_32);
+                    break;
+                case 0x07a: /* VIS I fornot1 */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
+                    break;
+                case 0x07b: /* VIS I fornot1s */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
+                    break;
+                case 0x07c: /* VIS I for */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
+                    break;
+                case 0x07d: /* VIS I fors */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
+                    break;
+                case 0x07e: /* VIS I fone */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+                    tcg_gen_movi_i64(cpu_dst_64, -1);
+                    gen_store_fpr_D(dc, rd, cpu_dst_64);
+                    break;
+                case 0x07f: /* VIS I fones */
+                    CHECK_FPU_FEATURE(dc, VIS1);
+                    cpu_dst_32 = gen_dest_fpr_F(dc);
+                    tcg_gen_movi_i32(cpu_dst_32, -1);
+                    gen_store_fpr_F(dc, rd, cpu_dst_32);
+                    break;
+                case 0x080: /* VIS I shutdown */
+                case 0x081: /* VIS II siam */
+                    // XXX
+                    goto illegal_insn;
+                default:
+                    goto illegal_insn;
+                }
+#else
+                goto ncp_insn;
+#endif
+            } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
+#ifdef TARGET_SPARC64
+                goto illegal_insn;
+#else
+                goto ncp_insn;
+#endif
+#ifdef TARGET_SPARC64
+            } else if (xop == 0x39) { /* V9 return */
+                save_state(dc);
+                cpu_src1 = get_src1(dc, insn);
+                cpu_tmp0 = get_temp_tl(dc);
+                if (IS_IMM) {   /* immediate */
+                    simm = GET_FIELDs(insn, 19, 31);
+                    tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
+                } else {                /* register */
+                    rs2 = GET_FIELD(insn, 27, 31);
+                    if (rs2) {
+                        cpu_src2 = gen_load_gpr(dc, rs2);
+                        tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                    } else {
+                        tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
+                    }
+                }
+                gen_helper_restore(cpu_env);
+                gen_mov_pc_npc(dc);
+                gen_check_align(cpu_tmp0, 3);
+                tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+                dc->npc = DYNAMIC_PC;
+                goto jmp_insn;
+#endif
+            } else {
+                cpu_src1 = get_src1(dc, insn);
+                cpu_tmp0 = get_temp_tl(dc);
+                if (IS_IMM) {   /* immediate */
+                    simm = GET_FIELDs(insn, 19, 31);
+                    tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
+                } else {                /* register */
+                    rs2 = GET_FIELD(insn, 27, 31);
+                    if (rs2) {
+                        cpu_src2 = gen_load_gpr(dc, rs2);
+                        tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
+                    } else {
+                        tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
+                    }
+                }
+                switch (xop) {
+                case 0x38:      /* jmpl */
+                    {
+                        TCGv t = gen_dest_gpr(dc, rd);
+                        tcg_gen_movi_tl(t, dc->pc);
+                        gen_store_gpr(dc, rd, t);
+
+                        gen_mov_pc_npc(dc);
+                        gen_check_align(cpu_tmp0, 3);
+                        gen_address_mask(dc, cpu_tmp0);
+                        tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+                        dc->npc = DYNAMIC_PC;
+                    }
+                    goto jmp_insn;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+                case 0x39:      /* rett, V9 return */
+                    {
+                        if (!supervisor(dc))
+                            goto priv_insn;
+                        gen_mov_pc_npc(dc);
+                        gen_check_align(cpu_tmp0, 3);
+                        tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+                        dc->npc = DYNAMIC_PC;
+                        gen_helper_rett(cpu_env);
+                    }
+                    goto jmp_insn;
+#endif
+                case 0x3b: /* flush */
+                    if (!((dc)->def->features & CPU_FEATURE_FLUSH))
+                        goto unimp_flush;
+                    /* nop */
+                    break;
+                case 0x3c:      /* save */
+                    gen_helper_save(cpu_env);
+                    gen_store_gpr(dc, rd, cpu_tmp0);
+                    break;
+                case 0x3d:      /* restore */
+                    gen_helper_restore(cpu_env);
+                    gen_store_gpr(dc, rd, cpu_tmp0);
+                    break;
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
+                case 0x3e:      /* V9 done/retry */
+                    {
+                        switch (rd) {
+                        case 0:
+                            if (!supervisor(dc))
+                                goto priv_insn;
+                            dc->npc = DYNAMIC_PC;
+                            dc->pc = DYNAMIC_PC;
+                            gen_helper_done(cpu_env);
+                            goto jmp_insn;
+                        case 1:
+                            if (!supervisor(dc))
+                                goto priv_insn;
+                            dc->npc = DYNAMIC_PC;
+                            dc->pc = DYNAMIC_PC;
+                            gen_helper_retry(cpu_env);
+                            goto jmp_insn;
+                        default:
+                            goto illegal_insn;
+                        }
+                    }
+                    break;
+#endif
+                default:
+                    goto illegal_insn;
+                }
+            }
+            break;
+        }
+        break;
+    case 3:                     /* load/store instructions */
+        {
+            unsigned int xop = GET_FIELD(insn, 7, 12);
+            /* ??? gen_address_mask prevents us from using a source
+               register directly.  Always generate a temporary.  */
+            TCGv cpu_addr = get_temp_tl(dc);
+
+            tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
+            if (xop == 0x3c || xop == 0x3e) {
+                /* V9 casa/casxa : no offset */
+            } else if (IS_IMM) {     /* immediate */
+                simm = GET_FIELDs(insn, 19, 31);
+                if (simm != 0) {
+                    tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
+                }
+            } else {            /* register */
+                rs2 = GET_FIELD(insn, 27, 31);
+                if (rs2 != 0) {
+                    tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
+                }
+            }
+            if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
+                (xop > 0x17 && xop <= 0x1d ) ||
+                (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
+                TCGv cpu_val = gen_dest_gpr(dc, rd);
+
+                switch (xop) {
+                case 0x0:       /* ld, V9 lduw, load unsigned word */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x1:       /* ldub, load unsigned byte */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x2:       /* lduh, load unsigned halfword */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x3:       /* ldd, load double word */
+                    if (rd & 1)
+                        goto illegal_insn;
+                    else {
+                        TCGv_i64 t64;
+
+                        gen_address_mask(dc, cpu_addr);
+                        t64 = tcg_temp_new_i64();
+                        tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
+                        tcg_gen_trunc_i64_tl(cpu_val, t64);
+                        tcg_gen_ext32u_tl(cpu_val, cpu_val);
+                        gen_store_gpr(dc, rd + 1, cpu_val);
+                        tcg_gen_shri_i64(t64, t64, 32);
+                        tcg_gen_trunc_i64_tl(cpu_val, t64);
+                        tcg_temp_free_i64(t64);
+                        tcg_gen_ext32u_tl(cpu_val, cpu_val);
+                    }
+                    break;
+                case 0x9:       /* ldsb, load signed byte */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0xa:       /* ldsh, load signed halfword */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0xd:       /* ldstub */
+                    gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x0f:
+                    /* swap, swap register with memory. Also atomically */
+                    CHECK_IU_FEATURE(dc, SWAP);
+                    cpu_src1 = gen_load_gpr(dc, rd);
+                    gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
+                             dc->mem_idx, MO_TEUL);
+                    break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+                case 0x10:      /* lda, V9 lduwa, load word alternate */
+                    gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
+                    break;
+                case 0x11:      /* lduba, load unsigned byte alternate */
+                    gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
+                    break;
+                case 0x12:      /* lduha, load unsigned halfword alternate */
+                    gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
+                    break;
+                case 0x13:      /* ldda, load double word alternate */
+                    if (rd & 1) {
+                        goto illegal_insn;
+                    }
+                    gen_ldda_asi(dc, cpu_addr, insn, rd);
+                    goto skip_move;
+                case 0x19:      /* ldsba, load signed byte alternate */
+                    gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
+                    break;
+                case 0x1a:      /* ldsha, load signed halfword alternate */
+                    gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
+                    break;
+                case 0x1d:      /* ldstuba -- XXX: should be atomically */
+                    gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
+                    break;
+                case 0x1f:      /* swapa, swap reg with alt. memory. Also
+                                   atomically */
+                    CHECK_IU_FEATURE(dc, SWAP);
+                    cpu_src1 = gen_load_gpr(dc, rd);
+                    gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
+                    break;
+
+#ifndef TARGET_SPARC64
+                case 0x30: /* ldc */
+                case 0x31: /* ldcsr */
+                case 0x33: /* lddc */
+                    goto ncp_insn;
+#endif
+#endif
+#ifdef TARGET_SPARC64
+                case 0x08: /* V9 ldsw */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x0b: /* V9 ldx */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x18: /* V9 ldswa */
+                    gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
+                    break;
+                case 0x1b: /* V9 ldxa */
+                    gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+                    break;
+                case 0x2d: /* V9 prefetch, no effect */
+                    goto skip_move;
+                case 0x30: /* V9 ldfa */
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
+                    gen_update_fprs_dirty(dc, rd);
+                    goto skip_move;
+                case 0x33: /* V9 lddfa */
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
+                    gen_update_fprs_dirty(dc, DFPREG(rd));
+                    goto skip_move;
+                case 0x3d: /* V9 prefetcha, no effect */
+                    goto skip_move;
+                case 0x32: /* V9 ldqfa */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
+                    gen_update_fprs_dirty(dc, QFPREG(rd));
+                    goto skip_move;
+#endif
+                default:
+                    goto illegal_insn;
+                }
+                gen_store_gpr(dc, rd, cpu_val);
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+            skip_move: ;
+#endif
+            } else if (xop >= 0x20 && xop < 0x24) {
+                if (gen_trap_ifnofpu(dc)) {
+                    goto jmp_insn;
+                }
+                switch (xop) {
+                case 0x20:      /* ldf, load fpreg */
+                    gen_address_mask(dc, cpu_addr);
+                    cpu_dst_32 = gen_dest_fpr_F(dc);
+                    tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
+                                        dc->mem_idx, MO_TEUL);
+                    gen_store_fpr_F(dc, rd, cpu_dst_32);
+                    break;
+                case 0x21:      /* ldfsr, V9 ldxfsr */
+#ifdef TARGET_SPARC64
+                    gen_address_mask(dc, cpu_addr);
+                    if (rd == 1) {
+                        TCGv_i64 t64 = tcg_temp_new_i64();
+                        tcg_gen_qemu_ld_i64(t64, cpu_addr,
+                                            dc->mem_idx, MO_TEQ);
+                        gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
+                        tcg_temp_free_i64(t64);
+                        break;
+                    }
+#endif
+                    cpu_dst_32 = get_temp_i32(dc);
+                    tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
+                                        dc->mem_idx, MO_TEUL);
+                    gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
+                    break;
+                case 0x22:      /* ldqf, load quad fpreg */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_address_mask(dc, cpu_addr);
+                    cpu_src1_64 = tcg_temp_new_i64();
+                    tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
+                                        MO_TEQ | MO_ALIGN_4);
+                    tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
+                    cpu_src2_64 = tcg_temp_new_i64();
+                    tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
+                                        MO_TEQ | MO_ALIGN_4);
+                    gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
+                    tcg_temp_free_i64(cpu_src1_64);
+                    tcg_temp_free_i64(cpu_src2_64);
+                    break;
+                case 0x23:      /* lddf, load double fpreg */
+                    gen_address_mask(dc, cpu_addr);
+                    cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+                    tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
+                                        MO_TEQ | MO_ALIGN_4);
+                    gen_store_fpr_D(dc, rd, cpu_dst_64);
+                    break;
+                default:
+                    goto illegal_insn;
+                }
+            } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
+                       xop == 0xe || xop == 0x1e) {
+                TCGv cpu_val = gen_load_gpr(dc, rd);
+
+                switch (xop) {
+                case 0x4: /* st, store word */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x5: /* stb, store byte */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x6: /* sth, store halfword */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x7: /* std, store double word */
+                    if (rd & 1)
+                        goto illegal_insn;
+                    else {
+                        TCGv_i64 t64;
+                        TCGv lo;
+
+                        gen_address_mask(dc, cpu_addr);
+                        lo = gen_load_gpr(dc, rd + 1);
+                        t64 = tcg_temp_new_i64();
+                        tcg_gen_concat_tl_i64(t64, lo, cpu_val);
+                        tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
+                        tcg_temp_free_i64(t64);
+                    }
+                    break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+                case 0x14: /* sta, V9 stwa, store word alternate */
+                    gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
+                    break;
+                case 0x15: /* stba, store byte alternate */
+                    gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
+                    break;
+                case 0x16: /* stha, store halfword alternate */
+                    gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
+                    break;
+                case 0x17: /* stda, store double word alternate */
+                    if (rd & 1) {
+                        goto illegal_insn;
+                    }
+                    gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
+                    break;
+#endif
+#ifdef TARGET_SPARC64
+                case 0x0e: /* V9 stx */
+                    gen_address_mask(dc, cpu_addr);
+                    tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
+                    break;
+                case 0x1e: /* V9 stxa */
+                    gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+                    break;
+#endif
+                default:
+                    goto illegal_insn;
+                }
+            } else if (xop > 0x23 && xop < 0x28) {
+                if (gen_trap_ifnofpu(dc)) {
+                    goto jmp_insn;
+                }
+                switch (xop) {
+                case 0x24: /* stf, store fpreg */
+                    gen_address_mask(dc, cpu_addr);
+                    cpu_src1_32 = gen_load_fpr_F(dc, rd);
+                    tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
+                                        dc->mem_idx, MO_TEUL);
+                    break;
+                case 0x25: /* stfsr, V9 stxfsr */
+                    {
+#ifdef TARGET_SPARC64
+                        gen_address_mask(dc, cpu_addr);
+                        if (rd == 1) {
+                            tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
+                            break;
+                        }
+#endif
+                        tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
+                    }
+                    break;
+                case 0x26:
+#ifdef TARGET_SPARC64
+                    /* V9 stqf, store quad fpreg */
+                    CHECK_FPU_FEATURE(dc, FLOAT128);
+                    gen_address_mask(dc, cpu_addr);
+                    /* ??? While stqf only requires 4-byte alignment, it is
+                       legal for the cpu to signal the unaligned exception.
+                       The OS trap handler is then required to fix it up.
+                       For qemu, this avoids having to probe the second page
+                       before performing the first write.  */
+                    cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
+                    tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
+                                        dc->mem_idx, MO_TEQ | MO_ALIGN_16);
+                    tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
+                    cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
+                    tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
+                                        dc->mem_idx, MO_TEQ);
+                    break;
+#else /* !TARGET_SPARC64 */
+                    /* stdfq, store floating point queue */
+#if defined(CONFIG_USER_ONLY)
+                    goto illegal_insn;
+#else
+                    if (!supervisor(dc))
+                        goto priv_insn;
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    goto nfq_insn;
+#endif
+#endif
+                case 0x27: /* stdf, store double fpreg */
+                    gen_address_mask(dc, cpu_addr);
+                    cpu_src1_64 = gen_load_fpr_D(dc, rd);
+                    tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
+                                        MO_TEQ | MO_ALIGN_4);
+                    break;
+                default:
+                    goto illegal_insn;
+                }
+            } else if (xop > 0x33 && xop < 0x3f) {
+                switch (xop) {
+#ifdef TARGET_SPARC64
+                case 0x34: /* V9 stfa */
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    gen_stf_asi(dc, cpu_addr, insn, 4, rd);
+                    break;
+                case 0x36: /* V9 stqfa */
+                    {
+                        CHECK_FPU_FEATURE(dc, FLOAT128);
+                        if (gen_trap_ifnofpu(dc)) {
+                            goto jmp_insn;
+                        }
+                        gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
+                    }
+                    break;
+                case 0x37: /* V9 stdfa */
+                    if (gen_trap_ifnofpu(dc)) {
+                        goto jmp_insn;
+                    }
+                    gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
+                    break;
+                case 0x3e: /* V9 casxa */
+                    rs2 = GET_FIELD(insn, 27, 31);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
+                    break;
+#else
+                case 0x34: /* stc */
+                case 0x35: /* stcsr */
+                case 0x36: /* stdcq */
+                case 0x37: /* stdc */
+                    goto ncp_insn;
+#endif
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+                case 0x3c: /* V9 or LEON3 casa */
+#ifndef TARGET_SPARC64
+                    CHECK_IU_FEATURE(dc, CASA);
+#endif
+                    rs2 = GET_FIELD(insn, 27, 31);
+                    cpu_src2 = gen_load_gpr(dc, rs2);
+                    gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
+                    break;
+#endif
+                default:
+                    goto illegal_insn;
+                }
+            } else {
+                goto illegal_insn;
+            }
+        }
+        break;
+    }
+    /* default case for non jump instructions */
+    if (dc->npc == DYNAMIC_PC) {
+        dc->pc = DYNAMIC_PC;
+        gen_op_next_insn();
+    } else if (dc->npc == JUMP_PC) {
+        /* we can do a static jump */
+        gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
+        dc->is_br = 1;
+    } else {
+        dc->pc = dc->npc;
+        dc->npc = dc->npc + 4;
+    }
+ jmp_insn:
+    goto egress;
+ illegal_insn:
+    gen_exception(dc, TT_ILL_INSN);
+    goto egress;
+ unimp_flush:
+    gen_exception(dc, TT_UNIMP_FLUSH);
+    goto egress;
+#if !defined(CONFIG_USER_ONLY)
+ priv_insn:
+    gen_exception(dc, TT_PRIV_INSN);
+    goto egress;
+#endif
+ nfpu_insn:
+    gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
+    goto egress;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ nfq_insn:
+    gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
+    goto egress;
+#endif
+#ifndef TARGET_SPARC64
+ ncp_insn:
+    gen_exception(dc, TT_NCP_INSN);
+    goto egress;
+#endif
+ egress:
+    if (dc->n_t32 != 0) {
+        int i;
+        for (i = dc->n_t32 - 1; i >= 0; --i) {
+            tcg_temp_free_i32(dc->t32[i]);
+        }
+        dc->n_t32 = 0;
+    }
+    if (dc->n_ttl != 0) {
+        int i;
+        for (i = dc->n_ttl - 1; i >= 0; --i) {
+            tcg_temp_free(dc->ttl[i]);
+        }
+        dc->n_ttl = 0;
+    }
+}
+
+void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
+{
+    SPARCCPU *cpu = sparc_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    target_ulong pc_start, last_pc;
+    DisasContext dc1, *dc = &dc1;
+    int num_insns;
+    int max_insns;
+    unsigned int insn;
+
+    memset(dc, 0, sizeof(DisasContext));
+    dc->tb = tb;
+    pc_start = tb->pc;
+    dc->pc = pc_start;
+    last_pc = dc->pc;
+    dc->npc = (target_ulong) tb->cs_base;
+    dc->cc_op = CC_OP_DYNAMIC;
+    dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
+    dc->def = env->def;
+    dc->fpu_enabled = tb_fpu_enabled(tb->flags);
+    dc->address_mask_32bit = tb_am_enabled(tb->flags);
+    dc->singlestep = (cs->singlestep_enabled || singlestep);
+#ifdef TARGET_SPARC64
+    dc->fprs_dirty = 0;
+    dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
+#endif
+
+    num_insns = 0;
+    max_insns = tb->cflags & CF_COUNT_MASK;
+    if (max_insns == 0) {
+        max_insns = CF_COUNT_MASK;
+    }
+    if (max_insns > TCG_MAX_INSNS) {
+        max_insns = TCG_MAX_INSNS;
+    }
+
+    gen_tb_start(tb);
+    do {
+        if (dc->npc & JUMP_PC) {
+            assert(dc->jump_pc[1] == dc->pc + 4);
+            tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
+        } else {
+            tcg_gen_insn_start(dc->pc, dc->npc);
+        }
+        num_insns++;
+        last_pc = dc->pc;
+
+        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+            if (dc->pc != pc_start) {
+                save_state(dc);
+            }
+            gen_helper_debug(cpu_env);
+            tcg_gen_exit_tb(0);
+            dc->is_br = 1;
+            goto exit_gen_loop;
+        }
+
+        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+            gen_io_start();
+        }
+
+        insn = cpu_ldl_code(env, dc->pc);
+
+        disas_sparc_insn(dc, insn);
+
+        if (dc->is_br)
+            break;
+        /* if the next PC is different, we abort now */
+        if (dc->pc != (last_pc + 4))
+            break;
+        /* if we reach a page boundary, we stop generation so that the
+           PC of a TT_TFAULT exception is always in the right page */
+        if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
+            break;
+        /* if single step mode, we generate only one instruction and
+           generate an exception */
+        if (dc->singlestep) {
+            break;
+        }
+    } while (!tcg_op_buf_full() &&
+             (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
+             num_insns < max_insns);
+
+ exit_gen_loop:
+    if (tb->cflags & CF_LAST_IO) {
+        gen_io_end();
+    }
+    if (!dc->is_br) {
+        if (dc->pc != DYNAMIC_PC &&
+            (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
+            /* static PC and NPC: we can use direct chaining */
+            gen_goto_tb(dc, 0, dc->pc, dc->npc);
+        } else {
+            if (dc->pc != DYNAMIC_PC) {
+                tcg_gen_movi_tl(cpu_pc, dc->pc);
+            }
+            save_npc(dc);
+            tcg_gen_exit_tb(0);
+        }
+    }
+    gen_tb_end(tb, num_insns);
+
+    tb->size = last_pc + 4 - pc_start;
+    tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+        && qemu_log_in_addr_range(pc_start)) {
+        qemu_log_lock();
+        qemu_log("--------------\n");
+        qemu_log("IN: %s\n", lookup_symbol(pc_start));
+        log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
+        qemu_log("\n");
+        qemu_log_unlock();
+    }
+#endif
+}
+
+void gen_intermediate_code_init(CPUSPARCState *env)
+{
+    static int inited;
+    static const char gregnames[32][4] = {
+        "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
+        "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
+        "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
+        "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
+    };
+    static const char fregnames[32][4] = {
+        "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
+        "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
+        "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
+        "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
+    };
+
+    static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
+#ifdef TARGET_SPARC64
+        { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
+        { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
+#else
+        { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
+#endif
+        { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
+        { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
+    };
+
+    static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
+#ifdef TARGET_SPARC64
+        { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
+        { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
+        { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
+        { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
+          "hstick_cmpr" },
+        { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
+        { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
+        { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
+        { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
+        { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
+#endif
+        { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
+        { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
+        { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
+        { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
+        { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
+        { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
+        { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
+        { &cpu_y, offsetof(CPUSPARCState, y), "y" },
+#ifndef CONFIG_USER_ONLY
+        { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
+#endif
+    };
+
+    unsigned int i;
+
+    /* init various static tables */
+    if (inited) {
+        return;
+    }
+    inited = 1;
+
+    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+    tcg_ctx.tcg_env = cpu_env;
+
+    cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
+                                         offsetof(CPUSPARCState, regwptr),
+                                         "regwptr");
+
+    for (i = 0; i < ARRAY_SIZE(r32); ++i) {
+        *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
+    }
+
+    for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
+        *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
+    }
+
+    TCGV_UNUSED(cpu_regs[0]);
+    for (i = 1; i < 8; ++i) {
+        cpu_regs[i] = tcg_global_mem_new(cpu_env,
+                                         offsetof(CPUSPARCState, gregs[i]),
+                                         gregnames[i]);
+    }
+
+    for (i = 8; i < 32; ++i) {
+        cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
+                                         (i - 8) * sizeof(target_ulong),
+                                         gregnames[i]);
+    }
+
+    for (i = 0; i < TARGET_DPREGS; i++) {
+        cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+                                            offsetof(CPUSPARCState, fpr[i]),
+                                            fregnames[i]);
+    }
+}
+
+void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
+                          target_ulong *data)
+{
+    target_ulong pc = data[0];
+    target_ulong npc = data[1];
+
+    env->pc = pc;
+    if (npc == DYNAMIC_PC) {
+        /* dynamic NPC: already stored */
+    } else if (npc & JUMP_PC) {
+        /* jump PC: use 'cond' and the jump targets of the translation */
+        if (env->cond) {
+            env->npc = npc & ~3;
+        } else {
+            env->npc = pc + 4;
+        }
+    } else {
+        env->npc = npc;
+    }
+}
diff --git a/target/sparc/vis_helper.c b/target/sparc/vis_helper.c
new file mode 100644
index 0000000000..8a9b763d0b
--- /dev/null
+++ b/target/sparc/vis_helper.c
@@ -0,0 +1,490 @@
+/*
+ * VIS op helpers
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/* This function uses non-native bit order */
+#define GET_FIELD(X, FROM, TO)                                  \
+    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
+
+/* This function uses the order in the manuals, i.e. bit 0 is 2^0 */
+#define GET_FIELD_SP(X, FROM, TO)               \
+    GET_FIELD(X, 63 - (TO), 63 - (FROM))
+
+target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
+{
+    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
+        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
+        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
+        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
+        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
+        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
+        (((pixel_addr >> 55) & 1) << 4) |
+        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
+        GET_FIELD_SP(pixel_addr, 11, 12);
+}
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define VIS_B64(n) b[7 - (n)]
+#define VIS_W64(n) w[3 - (n)]
+#define VIS_SW64(n) sw[3 - (n)]
+#define VIS_L64(n) l[1 - (n)]
+#define VIS_B32(n) b[3 - (n)]
+#define VIS_W32(n) w[1 - (n)]
+#else
+#define VIS_B64(n) b[n]
+#define VIS_W64(n) w[n]
+#define VIS_SW64(n) sw[n]
+#define VIS_L64(n) l[n]
+#define VIS_B32(n) b[n]
+#define VIS_W32(n) w[n]
+#endif
+
+typedef union {
+    uint8_t b[8];
+    uint16_t w[4];
+    int16_t sw[4];
+    uint32_t l[2];
+    uint64_t ll;
+    float64 d;
+} VIS64;
+
+typedef union {
+    uint8_t b[4];
+    uint16_t w[2];
+    uint32_t l;
+    float32 f;
+} VIS32;
+
+uint64_t helper_fpmerge(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+
+    s.ll = src1;
+    d.ll = src2;
+
+    /* Reverse calculation order to handle overlap */
+    d.VIS_B64(7) = s.VIS_B64(3);
+    d.VIS_B64(6) = d.VIS_B64(3);
+    d.VIS_B64(5) = s.VIS_B64(2);
+    d.VIS_B64(4) = d.VIS_B64(2);
+    d.VIS_B64(3) = s.VIS_B64(1);
+    d.VIS_B64(2) = d.VIS_B64(1);
+    d.VIS_B64(1) = s.VIS_B64(0);
+    /* d.VIS_B64(0) = d.VIS_B64(0); */
+
+    return d.ll;
+}
+
+uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+    uint32_t tmp;
+
+    s.ll = src1;
+    d.ll = src2;
+
+#define PMUL(r)                                                 \
+    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
+    if ((tmp & 0xff) > 0x7f) {                                  \
+        tmp += 0x100;                                           \
+    }                                                           \
+    d.VIS_W64(r) = tmp >> 8;
+
+    PMUL(0);
+    PMUL(1);
+    PMUL(2);
+    PMUL(3);
+#undef PMUL
+
+    return d.ll;
+}
+
+uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+    uint32_t tmp;
+
+    s.ll = src1;
+    d.ll = src2;
+
+#define PMUL(r)                                                 \
+    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
+    if ((tmp & 0xff) > 0x7f) {                                  \
+        tmp += 0x100;                                           \
+    }                                                           \
+    d.VIS_W64(r) = tmp >> 8;
+
+    PMUL(0);
+    PMUL(1);
+    PMUL(2);
+    PMUL(3);
+#undef PMUL
+
+    return d.ll;
+}
+
+uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+    uint32_t tmp;
+
+    s.ll = src1;
+    d.ll = src2;
+
+#define PMUL(r)                                                 \
+    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
+    if ((tmp & 0xff) > 0x7f) {                                  \
+        tmp += 0x100;                                           \
+    }                                                           \
+    d.VIS_W64(r) = tmp >> 8;
+
+    PMUL(0);
+    PMUL(1);
+    PMUL(2);
+    PMUL(3);
+#undef PMUL
+
+    return d.ll;
+}
+
+uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+    uint32_t tmp;
+
+    s.ll = src1;
+    d.ll = src2;
+
+#define PMUL(r)                                                         \
+    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
+    if ((tmp & 0xff) > 0x7f) {                                          \
+        tmp += 0x100;                                                   \
+    }                                                                   \
+    d.VIS_W64(r) = tmp >> 8;
+
+    PMUL(0);
+    PMUL(1);
+    PMUL(2);
+    PMUL(3);
+#undef PMUL
+
+    return d.ll;
+}
+
+uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+    uint32_t tmp;
+
+    s.ll = src1;
+    d.ll = src2;
+
+#define PMUL(r)                                                         \
+    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
+    if ((tmp & 0xff) > 0x7f) {                                          \
+        tmp += 0x100;                                                   \
+    }                                                                   \
+    d.VIS_W64(r) = tmp >> 8;
+
+    PMUL(0);
+    PMUL(1);
+    PMUL(2);
+    PMUL(3);
+#undef PMUL
+
+    return d.ll;
+}
+
+uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+    uint32_t tmp;
+
+    s.ll = src1;
+    d.ll = src2;
+
+#define PMUL(r)                                                         \
+    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
+    if ((tmp & 0xff) > 0x7f) {                                          \
+        tmp += 0x100;                                                   \
+    }                                                                   \
+    d.VIS_L64(r) = tmp;
+
+    /* Reverse calculation order to handle overlap */
+    PMUL(1);
+    PMUL(0);
+#undef PMUL
+
+    return d.ll;
+}
+
+uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2)
+{
+    VIS64 s, d;
+    uint32_t tmp;
+
+    s.ll = src1;
+    d.ll = src2;
+
+#define PMUL(r)                                                         \
+    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
+    if ((tmp & 0xff) > 0x7f) {                                          \
+        tmp += 0x100;                                                   \
+    }                                                                   \
+    d.VIS_L64(r) = tmp;
+
+    /* Reverse calculation order to handle overlap */
+    PMUL(1);
+    PMUL(0);
+#undef PMUL
+
+    return d.ll;
+}
+
+uint64_t helper_fexpand(uint64_t src1, uint64_t src2)
+{
+    VIS32 s;
+    VIS64 d;
+
+    s.l = (uint32_t)src1;
+    d.ll = src2;
+    d.VIS_W64(0) = s.VIS_B32(0) << 4;
+    d.VIS_W64(1) = s.VIS_B32(1) << 4;
+    d.VIS_W64(2) = s.VIS_B32(2) << 4;
+    d.VIS_W64(3) = s.VIS_B32(3) << 4;
+
+    return d.ll;
+}
+
+#define VIS_HELPER(name, F)                             \
+    uint64_t name##16(uint64_t src1, uint64_t src2)     \
+    {                                                   \
+        VIS64 s, d;                                     \
+                                                        \
+        s.ll = src1;                                    \
+        d.ll = src2;                                    \
+                                                        \
+        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
+        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
+        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
+        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
+                                                        \
+        return d.ll;                                    \
+    }                                                   \
+                                                        \
+    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
+    {                                                   \
+        VIS32 s, d;                                     \
+                                                        \
+        s.l = src1;                                     \
+        d.l = src2;                                     \
+                                                        \
+        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
+        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
+                                                        \
+        return d.l;                                     \
+    }                                                   \
+                                                        \
+    uint64_t name##32(uint64_t src1, uint64_t src2)     \
+    {                                                   \
+        VIS64 s, d;                                     \
+                                                        \
+        s.ll = src1;                                    \
+        d.ll = src2;                                    \
+                                                        \
+        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
+        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
+                                                        \
+        return d.ll;                                    \
+    }                                                   \
+                                                        \
+    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
+    {                                                   \
+        VIS32 s, d;                                     \
+                                                        \
+        s.l = src1;                                     \
+        d.l = src2;                                     \
+                                                        \
+        d.l = F(d.l, s.l);                              \
+                                                        \
+        return d.l;                                     \
+    }
+
+#define FADD(a, b) ((a) + (b))
+#define FSUB(a, b) ((a) - (b))
+VIS_HELPER(helper_fpadd, FADD)
+VIS_HELPER(helper_fpsub, FSUB)
+
+#define VIS_CMPHELPER(name, F)                                    \
+    uint64_t name##16(uint64_t src1, uint64_t src2)               \
+    {                                                             \
+        VIS64 s, d;                                               \
+                                                                  \
+        s.ll = src1;                                              \
+        d.ll = src2;                                              \
+                                                                  \
+        d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0;     \
+        d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0;    \
+        d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0;    \
+        d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0;    \
+        d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0;           \
+                                                                  \
+        return d.ll;                                              \
+    }                                                             \
+                                                                  \
+    uint64_t name##32(uint64_t src1, uint64_t src2)               \
+    {                                                             \
+        VIS64 s, d;                                               \
+                                                                  \
+        s.ll = src1;                                              \
+        d.ll = src2;                                              \
+                                                                  \
+        d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0;     \
+        d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0;    \
+        d.VIS_L64(1) = 0;                                         \
+                                                                  \
+        return d.ll;                                              \
+    }
+
+#define FCMPGT(a, b) ((a) > (b))
+#define FCMPEQ(a, b) ((a) == (b))
+#define FCMPLE(a, b) ((a) <= (b))
+#define FCMPNE(a, b) ((a) != (b))
+
+VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
+VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
+VIS_CMPHELPER(helper_fcmple, FCMPLE)
+VIS_CMPHELPER(helper_fcmpne, FCMPNE)
+
+uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2)
+{
+    int i;
+    for (i = 0; i < 8; i++) {
+        int s1, s2;
+
+        s1 = (src1 >> (56 - (i * 8))) & 0xff;
+        s2 = (src2 >> (56 - (i * 8))) & 0xff;
+
+        /* Absolute value of difference. */
+        s1 -= s2;
+        if (s1 < 0) {
+            s1 = -s1;
+        }
+
+        sum += s1;
+    }
+
+    return sum;
+}
+
+uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2)
+{
+    int scale = (gsr >> 3) & 0xf;
+    uint32_t ret = 0;
+    int byte;
+
+    for (byte = 0; byte < 4; byte++) {
+        uint32_t val;
+        int16_t src = rs2 >> (byte * 16);
+        int32_t scaled = src << scale;
+        int32_t from_fixed = scaled >> 7;
+
+        val = (from_fixed < 0 ?  0 :
+               from_fixed > 255 ?  255 : from_fixed);
+
+        ret |= val << (8 * byte);
+    }
+
+    return ret;
+}
+
+uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2)
+{
+    int scale = (gsr >> 3) & 0x1f;
+    uint64_t ret = 0;
+    int word;
+
+    ret = (rs1 << 8) & ~(0x000000ff000000ffULL);
+    for (word = 0; word < 2; word++) {
+        uint64_t val;
+        int32_t src = rs2 >> (word * 32);
+        int64_t scaled = (int64_t)src << scale;
+        int64_t from_fixed = scaled >> 23;
+
+        val = (from_fixed < 0 ? 0 :
+               (from_fixed > 255) ? 255 : from_fixed);
+
+        ret |= val << (32 * word);
+    }
+
+    return ret;
+}
+
+uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2)
+{
+    int scale = (gsr >> 3) & 0x1f;
+    uint32_t ret = 0;
+    int word;
+
+    for (word = 0; word < 2; word++) {
+        uint32_t val;
+        int32_t src = rs2 >> (word * 32);
+        int64_t scaled = (int64_t)src << scale;
+        int64_t from_fixed = scaled >> 16;
+
+        val = (from_fixed < -32768 ? -32768 :
+               from_fixed > 32767 ?  32767 : from_fixed);
+
+        ret |= (val & 0xffff) << (word * 16);
+    }
+
+    return ret;
+}
+
+uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2)
+{
+    union {
+        uint64_t ll[2];
+        uint8_t b[16];
+    } s;
+    VIS64 r;
+    uint32_t i, mask, host;
+
+    /* Set up S such that we can index across all of the bytes.  */
+#ifdef HOST_WORDS_BIGENDIAN
+    s.ll[0] = src1;
+    s.ll[1] = src2;
+    host = 0;
+#else
+    s.ll[1] = src1;
+    s.ll[0] = src2;
+    host = 15;
+#endif
+    mask = gsr >> 32;
+
+    for (i = 0; i < 8; ++i) {
+        unsigned e = (mask >> (28 - i*4)) & 0xf;
+        r.VIS_B64(i) = s.b[e ^ host];
+    }
+
+    return r.ll;
+}
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
new file mode 100644
index 0000000000..2d5b5469a9
--- /dev/null
+++ b/target/sparc/win_helper.c
@@ -0,0 +1,400 @@
+/*
+ * Helpers for CWP and PSTATE handling
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "trace.h"
+
+static inline void memcpy32(target_ulong *dst, const target_ulong *src)
+{
+    dst[0] = src[0];
+    dst[1] = src[1];
+    dst[2] = src[2];
+    dst[3] = src[3];
+    dst[4] = src[4];
+    dst[5] = src[5];
+    dst[6] = src[6];
+    dst[7] = src[7];
+}
+
+void cpu_set_cwp(CPUSPARCState *env, int new_cwp)
+{
+    /* put the modified wrap registers at their proper location */
+    if (env->cwp == env->nwindows - 1) {
+        memcpy32(env->regbase, env->regbase + env->nwindows * 16);
+    }
+    env->cwp = new_cwp;
+
+    /* put the wrap registers at their temporary location */
+    if (new_cwp == env->nwindows - 1) {
+        memcpy32(env->regbase + env->nwindows * 16, env->regbase);
+    }
+    env->regwptr = env->regbase + (new_cwp * 16);
+}
+
+target_ulong cpu_get_psr(CPUSPARCState *env)
+{
+    helper_compute_psr(env);
+
+#if !defined(TARGET_SPARC64)
+    return env->version | (env->psr & PSR_ICC) |
+        (env->psref ? PSR_EF : 0) |
+        (env->psrpil << 8) |
+        (env->psrs ? PSR_S : 0) |
+        (env->psrps ? PSR_PS : 0) |
+        (env->psret ? PSR_ET : 0) | env->cwp;
+#else
+    return env->psr & PSR_ICC;
+#endif
+}
+
+void cpu_put_psr_raw(CPUSPARCState *env, target_ulong val)
+{
+    env->psr = val & PSR_ICC;
+#if !defined(TARGET_SPARC64)
+    env->psref = (val & PSR_EF) ? 1 : 0;
+    env->psrpil = (val & PSR_PIL) >> 8;
+    env->psrs = (val & PSR_S) ? 1 : 0;
+    env->psrps = (val & PSR_PS) ? 1 : 0;
+    env->psret = (val & PSR_ET) ? 1 : 0;
+#endif
+    env->cc_op = CC_OP_FLAGS;
+#if !defined(TARGET_SPARC64)
+    cpu_set_cwp(env, val & PSR_CWP);
+#endif
+}
+
+void cpu_put_psr(CPUSPARCState *env, target_ulong val)
+{
+    cpu_put_psr_raw(env, val);
+#if ((!defined(TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
+    cpu_check_irqs(env);
+#endif
+}
+
+int cpu_cwp_inc(CPUSPARCState *env, int cwp)
+{
+    if (unlikely(cwp >= env->nwindows)) {
+        cwp -= env->nwindows;
+    }
+    return cwp;
+}
+
+int cpu_cwp_dec(CPUSPARCState *env, int cwp)
+{
+    if (unlikely(cwp < 0)) {
+        cwp += env->nwindows;
+    }
+    return cwp;
+}
+
+#ifndef TARGET_SPARC64
+void helper_rett(CPUSPARCState *env)
+{
+    unsigned int cwp;
+
+    if (env->psret == 1) {
+        cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
+    }
+
+    env->psret = 1;
+    cwp = cpu_cwp_inc(env, env->cwp + 1) ;
+    if (env->wim & (1 << cwp)) {
+        cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
+    }
+    cpu_set_cwp(env, cwp);
+    env->psrs = env->psrps;
+}
+
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+   handling ? */
+void helper_save(CPUSPARCState *env)
+{
+    uint32_t cwp;
+
+    cwp = cpu_cwp_dec(env, env->cwp - 1);
+    if (env->wim & (1 << cwp)) {
+        cpu_raise_exception_ra(env, TT_WIN_OVF, GETPC());
+    }
+    cpu_set_cwp(env, cwp);
+}
+
+void helper_restore(CPUSPARCState *env)
+{
+    uint32_t cwp;
+
+    cwp = cpu_cwp_inc(env, env->cwp + 1);
+    if (env->wim & (1 << cwp)) {
+        cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
+    }
+    cpu_set_cwp(env, cwp);
+}
+
+void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr)
+{
+    if ((new_psr & PSR_CWP) >= env->nwindows) {
+        cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
+    } else {
+        cpu_put_psr(env, new_psr);
+    }
+}
+
+target_ulong helper_rdpsr(CPUSPARCState *env)
+{
+    return cpu_get_psr(env);
+}
+
+#else
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+   handling ? */
+void helper_save(CPUSPARCState *env)
+{
+    uint32_t cwp;
+
+    cwp = cpu_cwp_dec(env, env->cwp - 1);
+    if (env->cansave == 0) {
+        int tt = TT_SPILL | (env->otherwin != 0
+                             ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+                             : ((env->wstate & 0x7) << 2));
+        cpu_raise_exception_ra(env, tt, GETPC());
+    } else {
+        if (env->cleanwin - env->canrestore == 0) {
+            /* XXX Clean windows without trap */
+            cpu_raise_exception_ra(env, TT_CLRWIN, GETPC());
+        } else {
+            env->cansave--;
+            env->canrestore++;
+            cpu_set_cwp(env, cwp);
+        }
+    }
+}
+
+void helper_restore(CPUSPARCState *env)
+{
+    uint32_t cwp;
+
+    cwp = cpu_cwp_inc(env, env->cwp + 1);
+    if (env->canrestore == 0) {
+        int tt = TT_FILL | (env->otherwin != 0
+                            ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+                            : ((env->wstate & 0x7) << 2));
+        cpu_raise_exception_ra(env, tt, GETPC());
+    } else {
+        env->cansave++;
+        env->canrestore--;
+        cpu_set_cwp(env, cwp);
+    }
+}
+
+void helper_flushw(CPUSPARCState *env)
+{
+    if (env->cansave != env->nwindows - 2) {
+        int tt = TT_SPILL | (env->otherwin != 0
+                             ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+                             : ((env->wstate & 0x7) << 2));
+        cpu_raise_exception_ra(env, tt, GETPC());
+    }
+}
+
+void helper_saved(CPUSPARCState *env)
+{
+    env->cansave++;
+    if (env->otherwin == 0) {
+        env->canrestore--;
+    } else {
+        env->otherwin--;
+    }
+}
+
+void helper_restored(CPUSPARCState *env)
+{
+    env->canrestore++;
+    if (env->cleanwin < env->nwindows - 1) {
+        env->cleanwin++;
+    }
+    if (env->otherwin == 0) {
+        env->cansave--;
+    } else {
+        env->otherwin--;
+    }
+}
+
+target_ulong cpu_get_ccr(CPUSPARCState *env)
+{
+    target_ulong psr;
+
+    psr = cpu_get_psr(env);
+
+    return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
+}
+
+void cpu_put_ccr(CPUSPARCState *env, target_ulong val)
+{
+    env->xcc = (val >> 4) << 20;
+    env->psr = (val & 0xf) << 20;
+    CC_OP = CC_OP_FLAGS;
+}
+
+target_ulong cpu_get_cwp64(CPUSPARCState *env)
+{
+    return env->nwindows - 1 - env->cwp;
+}
+
+void cpu_put_cwp64(CPUSPARCState *env, int cwp)
+{
+    if (unlikely(cwp >= env->nwindows || cwp < 0)) {
+        cwp %= env->nwindows;
+    }
+    cpu_set_cwp(env, env->nwindows - 1 - cwp);
+}
+
+target_ulong helper_rdccr(CPUSPARCState *env)
+{
+    return cpu_get_ccr(env);
+}
+
+void helper_wrccr(CPUSPARCState *env, target_ulong new_ccr)
+{
+    cpu_put_ccr(env, new_ccr);
+}
+
+/* CWP handling is reversed in V9, but we still use the V8 register
+   order. */
+target_ulong helper_rdcwp(CPUSPARCState *env)
+{
+    return cpu_get_cwp64(env);
+}
+
+void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp)
+{
+    cpu_put_cwp64(env, new_cwp);
+}
+
+static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate)
+{
+    switch (pstate) {
+    default:
+        trace_win_helper_gregset_error(pstate);
+        /* pass through to normal set of global registers */
+    case 0:
+        return env->bgregs;
+    case PS_AG:
+        return env->agregs;
+    case PS_MG:
+        return env->mgregs;
+    case PS_IG:
+        return env->igregs;
+    }
+}
+
+void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate)
+{
+    uint32_t pstate_regs, new_pstate_regs;
+    uint64_t *src, *dst;
+
+    if (env->def->features & CPU_FEATURE_GL) {
+        /* PS_AG is not implemented in this case */
+        new_pstate &= ~PS_AG;
+    }
+
+    pstate_regs = env->pstate & 0xc01;
+    new_pstate_regs = new_pstate & 0xc01;
+
+    if (new_pstate_regs != pstate_regs) {
+        trace_win_helper_switch_pstate(pstate_regs, new_pstate_regs);
+
+        /* Switch global register bank */
+        src = get_gregset(env, new_pstate_regs);
+        dst = get_gregset(env, pstate_regs);
+        memcpy32(dst, env->gregs);
+        memcpy32(env->gregs, src);
+    } else {
+        trace_win_helper_no_switch_pstate(new_pstate_regs);
+    }
+    env->pstate = new_pstate;
+}
+
+void helper_wrpstate(CPUSPARCState *env, target_ulong new_state)
+{
+    cpu_change_pstate(env, new_state & 0xf3f);
+
+#if !defined(CONFIG_USER_ONLY)
+    if (cpu_interrupts_enabled(env)) {
+        cpu_check_irqs(env);
+    }
+#endif
+}
+
+void helper_wrpil(CPUSPARCState *env, target_ulong new_pil)
+{
+#if !defined(CONFIG_USER_ONLY)
+    trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil);
+
+    env->psrpil = new_pil;
+
+    if (cpu_interrupts_enabled(env)) {
+        cpu_check_irqs(env);
+    }
+#endif
+}
+
+void helper_done(CPUSPARCState *env)
+{
+    trap_state *tsptr = cpu_tsptr(env);
+
+    env->pc = tsptr->tnpc;
+    env->npc = tsptr->tnpc + 4;
+    cpu_put_ccr(env, tsptr->tstate >> 32);
+    env->asi = (tsptr->tstate >> 24) & 0xff;
+    cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
+    cpu_put_cwp64(env, tsptr->tstate & 0xff);
+    env->tl--;
+
+    trace_win_helper_done(env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+    if (cpu_interrupts_enabled(env)) {
+        cpu_check_irqs(env);
+    }
+#endif
+}
+
+void helper_retry(CPUSPARCState *env)
+{
+    trap_state *tsptr = cpu_tsptr(env);
+
+    env->pc = tsptr->tpc;
+    env->npc = tsptr->tnpc;
+    cpu_put_ccr(env, tsptr->tstate >> 32);
+    env->asi = (tsptr->tstate >> 24) & 0xff;
+    cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
+    cpu_put_cwp64(env, tsptr->tstate & 0xff);
+    env->tl--;
+
+    trace_win_helper_retry(env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+    if (cpu_interrupts_enabled(env)) {
+        cpu_check_irqs(env);
+    }
+#endif
+}
+#endif