summary refs log tree commit diff stats
path: root/target/mips/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/mips/cpu.h')
-rw-r--r--target/mips/cpu.h1069
1 files changed, 1069 insertions, 0 deletions
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
new file mode 100644
index 0000000000..5182dc74ff
--- /dev/null
+++ b/target/mips/cpu.h
@@ -0,0 +1,1069 @@
+#ifndef MIPS_CPU_H
+#define MIPS_CPU_H
+
+//#define DEBUG_OP
+
+#define ALIGNED_ONLY
+
+#define CPUArchState struct CPUMIPSState
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "mips-defs.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+struct CPUMIPSState;
+
+typedef struct r4k_tlb_t r4k_tlb_t;
+struct r4k_tlb_t {
+    target_ulong VPN;
+    uint32_t PageMask;
+    uint16_t ASID;
+    unsigned int G:1;
+    unsigned int C0:3;
+    unsigned int C1:3;
+    unsigned int V0:1;
+    unsigned int V1:1;
+    unsigned int D0:1;
+    unsigned int D1:1;
+    unsigned int XI0:1;
+    unsigned int XI1:1;
+    unsigned int RI0:1;
+    unsigned int RI1:1;
+    unsigned int EHINV:1;
+    uint64_t PFN[2];
+};
+
+#if !defined(CONFIG_USER_ONLY)
+typedef struct CPUMIPSTLBContext CPUMIPSTLBContext;
+struct CPUMIPSTLBContext {
+    uint32_t nb_tlb;
+    uint32_t tlb_in_use;
+    int (*map_address) (struct CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type);
+    void (*helper_tlbwi)(struct CPUMIPSState *env);
+    void (*helper_tlbwr)(struct CPUMIPSState *env);
+    void (*helper_tlbp)(struct CPUMIPSState *env);
+    void (*helper_tlbr)(struct CPUMIPSState *env);
+    void (*helper_tlbinv)(struct CPUMIPSState *env);
+    void (*helper_tlbinvf)(struct CPUMIPSState *env);
+    union {
+        struct {
+            r4k_tlb_t tlb[MIPS_TLB_MAX];
+        } r4k;
+    } mmu;
+};
+#endif
+
+/* MSA Context */
+#define MSA_WRLEN (128)
+
+enum CPUMIPSMSADataFormat {
+    DF_BYTE = 0,
+    DF_HALF,
+    DF_WORD,
+    DF_DOUBLE
+};
+
+typedef union wr_t wr_t;
+union wr_t {
+    int8_t  b[MSA_WRLEN/8];
+    int16_t h[MSA_WRLEN/16];
+    int32_t w[MSA_WRLEN/32];
+    int64_t d[MSA_WRLEN/64];
+};
+
+typedef union fpr_t fpr_t;
+union fpr_t {
+    float64  fd;   /* ieee double precision */
+    float32  fs[2];/* ieee single precision */
+    uint64_t d;    /* binary double fixed-point */
+    uint32_t w[2]; /* binary single fixed-point */
+/* FPU/MSA register mapping is not tested on big-endian hosts. */
+    wr_t     wr;   /* vector data */
+};
+/* define FP_ENDIAN_IDX to access the same location
+ * in the fpr_t union regardless of the host endianness
+ */
+#if defined(HOST_WORDS_BIGENDIAN)
+#  define FP_ENDIAN_IDX 1
+#else
+#  define FP_ENDIAN_IDX 0
+#endif
+
+typedef struct CPUMIPSFPUContext CPUMIPSFPUContext;
+struct CPUMIPSFPUContext {
+    /* Floating point registers */
+    fpr_t fpr[32];
+    float_status fp_status;
+    /* fpu implementation/revision register (fir) */
+    uint32_t fcr0;
+#define FCR0_FREP 29
+#define FCR0_UFRP 28
+#define FCR0_HAS2008 23
+#define FCR0_F64 22
+#define FCR0_L 21
+#define FCR0_W 20
+#define FCR0_3D 19
+#define FCR0_PS 18
+#define FCR0_D 17
+#define FCR0_S 16
+#define FCR0_PRID 8
+#define FCR0_REV 0
+    /* fcsr */
+    uint32_t fcr31_rw_bitmask;
+    uint32_t fcr31;
+#define FCR31_FS 24
+#define FCR31_ABS2008 19
+#define FCR31_NAN2008 18
+#define SET_FP_COND(num,env)     do { ((env).fcr31) |= ((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
+#define CLEAR_FP_COND(num,env)   do { ((env).fcr31) &= ~((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
+#define GET_FP_COND(env)         ((((env).fcr31 >> 24) & 0xfe) | (((env).fcr31 >> 23) & 0x1))
+#define GET_FP_CAUSE(reg)        (((reg) >> 12) & 0x3f)
+#define GET_FP_ENABLE(reg)       (((reg) >>  7) & 0x1f)
+#define GET_FP_FLAGS(reg)        (((reg) >>  2) & 0x1f)
+#define SET_FP_CAUSE(reg,v)      do { (reg) = ((reg) & ~(0x3f << 12)) | ((v & 0x3f) << 12); } while(0)
+#define SET_FP_ENABLE(reg,v)     do { (reg) = ((reg) & ~(0x1f <<  7)) | ((v & 0x1f) << 7); } while(0)
+#define SET_FP_FLAGS(reg,v)      do { (reg) = ((reg) & ~(0x1f <<  2)) | ((v & 0x1f) << 2); } while(0)
+#define UPDATE_FP_FLAGS(reg,v)   do { (reg) |= ((v & 0x1f) << 2); } while(0)
+#define FP_INEXACT        1
+#define FP_UNDERFLOW      2
+#define FP_OVERFLOW       4
+#define FP_DIV0           8
+#define FP_INVALID        16
+#define FP_UNIMPLEMENTED  32
+};
+
+#define NB_MMU_MODES 3
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
+struct CPUMIPSMVPContext {
+    int32_t CP0_MVPControl;
+#define CP0MVPCo_CPA	3
+#define CP0MVPCo_STLB	2
+#define CP0MVPCo_VPC	1
+#define CP0MVPCo_EVP	0
+    int32_t CP0_MVPConf0;
+#define CP0MVPC0_M	31
+#define CP0MVPC0_TLBS	29
+#define CP0MVPC0_GS	28
+#define CP0MVPC0_PCP	27
+#define CP0MVPC0_PTLBE	16
+#define CP0MVPC0_TCA	15
+#define CP0MVPC0_PVPE	10
+#define CP0MVPC0_PTC	0
+    int32_t CP0_MVPConf1;
+#define CP0MVPC1_CIM	31
+#define CP0MVPC1_CIF	30
+#define CP0MVPC1_PCX	20
+#define CP0MVPC1_PCP2	10
+#define CP0MVPC1_PCP1	0
+};
+
+typedef struct mips_def_t mips_def_t;
+
+#define MIPS_SHADOW_SET_MAX 16
+#define MIPS_TC_MAX 5
+#define MIPS_FPU_MAX 1
+#define MIPS_DSP_ACC 4
+#define MIPS_KSCRATCH_NUM 6
+#define MIPS_MAAR_MAX 16 /* Must be an even number. */
+
+typedef struct TCState TCState;
+struct TCState {
+    target_ulong gpr[32];
+    target_ulong PC;
+    target_ulong HI[MIPS_DSP_ACC];
+    target_ulong LO[MIPS_DSP_ACC];
+    target_ulong ACX[MIPS_DSP_ACC];
+    target_ulong DSPControl;
+    int32_t CP0_TCStatus;
+#define CP0TCSt_TCU3	31
+#define CP0TCSt_TCU2	30
+#define CP0TCSt_TCU1	29
+#define CP0TCSt_TCU0	28
+#define CP0TCSt_TMX	27
+#define CP0TCSt_RNST	23
+#define CP0TCSt_TDS	21
+#define CP0TCSt_DT	20
+#define CP0TCSt_DA	15
+#define CP0TCSt_A	13
+#define CP0TCSt_TKSU	11
+#define CP0TCSt_IXMT	10
+#define CP0TCSt_TASID	0
+    int32_t CP0_TCBind;
+#define CP0TCBd_CurTC	21
+#define CP0TCBd_TBE	17
+#define CP0TCBd_CurVPE	0
+    target_ulong CP0_TCHalt;
+    target_ulong CP0_TCContext;
+    target_ulong CP0_TCSchedule;
+    target_ulong CP0_TCScheFBack;
+    int32_t CP0_Debug_tcstatus;
+    target_ulong CP0_UserLocal;
+
+    int32_t msacsr;
+
+#define MSACSR_FS       24
+#define MSACSR_FS_MASK  (1 << MSACSR_FS)
+#define MSACSR_NX       18
+#define MSACSR_NX_MASK  (1 << MSACSR_NX)
+#define MSACSR_CEF      2
+#define MSACSR_CEF_MASK (0xffff << MSACSR_CEF)
+#define MSACSR_RM       0
+#define MSACSR_RM_MASK  (0x3 << MSACSR_RM)
+#define MSACSR_MASK     (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \
+        MSACSR_FS_MASK)
+
+    float_status msa_fp_status;
+};
+
+typedef struct CPUMIPSState CPUMIPSState;
+struct CPUMIPSState {
+    TCState active_tc;
+    CPUMIPSFPUContext active_fpu;
+
+    uint32_t current_tc;
+    uint32_t current_fpu;
+
+    uint32_t SEGBITS;
+    uint32_t PABITS;
+#if defined(TARGET_MIPS64)
+# define PABITS_BASE 36
+#else
+# define PABITS_BASE 32
+#endif
+    target_ulong SEGMask;
+    uint64_t PAMask;
+#define PAMASK_BASE ((1ULL << PABITS_BASE) - 1)
+
+    int32_t msair;
+#define MSAIR_ProcID    8
+#define MSAIR_Rev       0
+
+    int32_t CP0_Index;
+    /* CP0_MVP* are per MVP registers. */
+    int32_t CP0_VPControl;
+#define CP0VPCtl_DIS    0
+    int32_t CP0_Random;
+    int32_t CP0_VPEControl;
+#define CP0VPECo_YSI	21
+#define CP0VPECo_GSI	20
+#define CP0VPECo_EXCPT	16
+#define CP0VPECo_TE	15
+#define CP0VPECo_TargTC	0
+    int32_t CP0_VPEConf0;
+#define CP0VPEC0_M	31
+#define CP0VPEC0_XTC	21
+#define CP0VPEC0_TCS	19
+#define CP0VPEC0_SCS	18
+#define CP0VPEC0_DSC	17
+#define CP0VPEC0_ICS	16
+#define CP0VPEC0_MVP	1
+#define CP0VPEC0_VPA	0
+    int32_t CP0_VPEConf1;
+#define CP0VPEC1_NCX	20
+#define CP0VPEC1_NCP2	10
+#define CP0VPEC1_NCP1	0
+    target_ulong CP0_YQMask;
+    target_ulong CP0_VPESchedule;
+    target_ulong CP0_VPEScheFBack;
+    int32_t CP0_VPEOpt;
+#define CP0VPEOpt_IWX7	15
+#define CP0VPEOpt_IWX6	14
+#define CP0VPEOpt_IWX5	13
+#define CP0VPEOpt_IWX4	12
+#define CP0VPEOpt_IWX3	11
+#define CP0VPEOpt_IWX2	10
+#define CP0VPEOpt_IWX1	9
+#define CP0VPEOpt_IWX0	8
+#define CP0VPEOpt_DWX7	7
+#define CP0VPEOpt_DWX6	6
+#define CP0VPEOpt_DWX5	5
+#define CP0VPEOpt_DWX4	4
+#define CP0VPEOpt_DWX3	3
+#define CP0VPEOpt_DWX2	2
+#define CP0VPEOpt_DWX1	1
+#define CP0VPEOpt_DWX0	0
+    uint64_t CP0_EntryLo0;
+    uint64_t CP0_EntryLo1;
+#if defined(TARGET_MIPS64)
+# define CP0EnLo_RI 63
+# define CP0EnLo_XI 62
+#else
+# define CP0EnLo_RI 31
+# define CP0EnLo_XI 30
+#endif
+    int32_t CP0_GlobalNumber;
+#define CP0GN_VPId 0
+    target_ulong CP0_Context;
+    target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM];
+    int32_t CP0_PageMask;
+    int32_t CP0_PageGrain_rw_bitmask;
+    int32_t CP0_PageGrain;
+#define CP0PG_RIE 31
+#define CP0PG_XIE 30
+#define CP0PG_ELPA 29
+#define CP0PG_IEC 27
+    int32_t CP0_Wired;
+    int32_t CP0_SRSConf0_rw_bitmask;
+    int32_t CP0_SRSConf0;
+#define CP0SRSC0_M	31
+#define CP0SRSC0_SRS3	20
+#define CP0SRSC0_SRS2	10
+#define CP0SRSC0_SRS1	0
+    int32_t CP0_SRSConf1_rw_bitmask;
+    int32_t CP0_SRSConf1;
+#define CP0SRSC1_M	31
+#define CP0SRSC1_SRS6	20
+#define CP0SRSC1_SRS5	10
+#define CP0SRSC1_SRS4	0
+    int32_t CP0_SRSConf2_rw_bitmask;
+    int32_t CP0_SRSConf2;
+#define CP0SRSC2_M	31
+#define CP0SRSC2_SRS9	20
+#define CP0SRSC2_SRS8	10
+#define CP0SRSC2_SRS7	0
+    int32_t CP0_SRSConf3_rw_bitmask;
+    int32_t CP0_SRSConf3;
+#define CP0SRSC3_M	31
+#define CP0SRSC3_SRS12	20
+#define CP0SRSC3_SRS11	10
+#define CP0SRSC3_SRS10	0
+    int32_t CP0_SRSConf4_rw_bitmask;
+    int32_t CP0_SRSConf4;
+#define CP0SRSC4_SRS15	20
+#define CP0SRSC4_SRS14	10
+#define CP0SRSC4_SRS13	0
+    int32_t CP0_HWREna;
+    target_ulong CP0_BadVAddr;
+    uint32_t CP0_BadInstr;
+    uint32_t CP0_BadInstrP;
+    int32_t CP0_Count;
+    target_ulong CP0_EntryHi;
+#define CP0EnHi_EHINV 10
+    target_ulong CP0_EntryHi_ASID_mask;
+    int32_t CP0_Compare;
+    int32_t CP0_Status;
+#define CP0St_CU3   31
+#define CP0St_CU2   30
+#define CP0St_CU1   29
+#define CP0St_CU0   28
+#define CP0St_RP    27
+#define CP0St_FR    26
+#define CP0St_RE    25
+#define CP0St_MX    24
+#define CP0St_PX    23
+#define CP0St_BEV   22
+#define CP0St_TS    21
+#define CP0St_SR    20
+#define CP0St_NMI   19
+#define CP0St_IM    8
+#define CP0St_KX    7
+#define CP0St_SX    6
+#define CP0St_UX    5
+#define CP0St_KSU   3
+#define CP0St_ERL   2
+#define CP0St_EXL   1
+#define CP0St_IE    0
+    int32_t CP0_IntCtl;
+#define CP0IntCtl_IPTI 29
+#define CP0IntCtl_IPPCI 26
+#define CP0IntCtl_VS 5
+    int32_t CP0_SRSCtl;
+#define CP0SRSCtl_HSS 26
+#define CP0SRSCtl_EICSS 18
+#define CP0SRSCtl_ESS 12
+#define CP0SRSCtl_PSS 6
+#define CP0SRSCtl_CSS 0
+    int32_t CP0_SRSMap;
+#define CP0SRSMap_SSV7 28
+#define CP0SRSMap_SSV6 24
+#define CP0SRSMap_SSV5 20
+#define CP0SRSMap_SSV4 16
+#define CP0SRSMap_SSV3 12
+#define CP0SRSMap_SSV2 8
+#define CP0SRSMap_SSV1 4
+#define CP0SRSMap_SSV0 0
+    int32_t CP0_Cause;
+#define CP0Ca_BD   31
+#define CP0Ca_TI   30
+#define CP0Ca_CE   28
+#define CP0Ca_DC   27
+#define CP0Ca_PCI  26
+#define CP0Ca_IV   23
+#define CP0Ca_WP   22
+#define CP0Ca_IP    8
+#define CP0Ca_IP_mask 0x0000FF00
+#define CP0Ca_EC    2
+    target_ulong CP0_EPC;
+    int32_t CP0_PRid;
+    int32_t CP0_EBase;
+    target_ulong CP0_CMGCRBase;
+    int32_t CP0_Config0;
+#define CP0C0_M    31
+#define CP0C0_K23  28
+#define CP0C0_KU   25
+#define CP0C0_MDU  20
+#define CP0C0_MM   18
+#define CP0C0_BM   16
+#define CP0C0_BE   15
+#define CP0C0_AT   13
+#define CP0C0_AR   10
+#define CP0C0_MT   7
+#define CP0C0_VI   3
+#define CP0C0_K0   0
+    int32_t CP0_Config1;
+#define CP0C1_M    31
+#define CP0C1_MMU  25
+#define CP0C1_IS   22
+#define CP0C1_IL   19
+#define CP0C1_IA   16
+#define CP0C1_DS   13
+#define CP0C1_DL   10
+#define CP0C1_DA   7
+#define CP0C1_C2   6
+#define CP0C1_MD   5
+#define CP0C1_PC   4
+#define CP0C1_WR   3
+#define CP0C1_CA   2
+#define CP0C1_EP   1
+#define CP0C1_FP   0
+    int32_t CP0_Config2;
+#define CP0C2_M    31
+#define CP0C2_TU   28
+#define CP0C2_TS   24
+#define CP0C2_TL   20
+#define CP0C2_TA   16
+#define CP0C2_SU   12
+#define CP0C2_SS   8
+#define CP0C2_SL   4
+#define CP0C2_SA   0
+    int32_t CP0_Config3;
+#define CP0C3_M    31
+#define CP0C3_BPG  30
+#define CP0C3_CMGCR 29
+#define CP0C3_MSAP  28
+#define CP0C3_BP 27
+#define CP0C3_BI 26
+#define CP0C3_IPLW 21
+#define CP0C3_MMAR 18
+#define CP0C3_MCU  17
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ISA  14
+#define CP0C3_ULRI 13
+#define CP0C3_RXI  12
+#define CP0C3_DSP2P 11
+#define CP0C3_DSPP 10
+#define CP0C3_LPA  7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP   4
+#define CP0C3_CDMM 3
+#define CP0C3_MT   2
+#define CP0C3_SM   1
+#define CP0C3_TL   0
+    int32_t CP0_Config4;
+    int32_t CP0_Config4_rw_bitmask;
+#define CP0C4_M    31
+#define CP0C4_IE   29
+#define CP0C4_AE   28
+#define CP0C4_KScrExist 16
+#define CP0C4_MMUExtDef 14
+#define CP0C4_FTLBPageSize 8
+#define CP0C4_FTLBWays 4
+#define CP0C4_FTLBSets 0
+#define CP0C4_MMUSizeExt 0
+    int32_t CP0_Config5;
+    int32_t CP0_Config5_rw_bitmask;
+#define CP0C5_M          31
+#define CP0C5_K          30
+#define CP0C5_CV         29
+#define CP0C5_EVA        28
+#define CP0C5_MSAEn      27
+#define CP0C5_XNP        13
+#define CP0C5_UFE        9
+#define CP0C5_FRE        8
+#define CP0C5_VP         7
+#define CP0C5_SBRI       6
+#define CP0C5_MVH        5
+#define CP0C5_LLB        4
+#define CP0C5_MRP        3
+#define CP0C5_UFR        2
+#define CP0C5_NFExists   0
+    int32_t CP0_Config6;
+    int32_t CP0_Config7;
+    uint64_t CP0_MAAR[MIPS_MAAR_MAX];
+    int32_t CP0_MAARI;
+    /* XXX: Maybe make LLAddr per-TC? */
+    uint64_t lladdr;
+    target_ulong llval;
+    target_ulong llnewval;
+    target_ulong llreg;
+    uint64_t CP0_LLAddr_rw_bitmask;
+    int CP0_LLAddr_shift;
+    target_ulong CP0_WatchLo[8];
+    int32_t CP0_WatchHi[8];
+#define CP0WH_ASID 16
+    target_ulong CP0_XContext;
+    int32_t CP0_Framemask;
+    int32_t CP0_Debug;
+#define CP0DB_DBD  31
+#define CP0DB_DM   30
+#define CP0DB_LSNM 28
+#define CP0DB_Doze 27
+#define CP0DB_Halt 26
+#define CP0DB_CNT  25
+#define CP0DB_IBEP 24
+#define CP0DB_DBEP 21
+#define CP0DB_IEXI 20
+#define CP0DB_VER  15
+#define CP0DB_DEC  10
+#define CP0DB_SSt  8
+#define CP0DB_DINT 5
+#define CP0DB_DIB  4
+#define CP0DB_DDBS 3
+#define CP0DB_DDBL 2
+#define CP0DB_DBp  1
+#define CP0DB_DSS  0
+    target_ulong CP0_DEPC;
+    int32_t CP0_Performance0;
+    int32_t CP0_ErrCtl;
+#define CP0EC_WST 29
+#define CP0EC_SPR 28
+#define CP0EC_ITC 26
+    uint64_t CP0_TagLo;
+    int32_t CP0_DataLo;
+    int32_t CP0_TagHi;
+    int32_t CP0_DataHi;
+    target_ulong CP0_ErrorEPC;
+    int32_t CP0_DESAVE;
+    /* We waste some space so we can handle shadow registers like TCs. */
+    TCState tcs[MIPS_SHADOW_SET_MAX];
+    CPUMIPSFPUContext fpus[MIPS_FPU_MAX];
+    /* QEMU */
+    int error_code;
+#define EXCP_TLB_NOMATCH   0x1
+#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
+    uint32_t hflags;    /* CPU State */
+    /* TMASK defines different execution modes */
+#define MIPS_HFLAG_TMASK  0xF5807FF
+#define MIPS_HFLAG_MODE   0x00007 /* execution modes                    */
+    /* The KSU flags must be the lowest bits in hflags. The flag order
+       must be the same as defined for CP0 Status. This allows to use
+       the bits as the value of mmu_idx. */
+#define MIPS_HFLAG_KSU    0x00003 /* kernel/supervisor/user mode mask   */
+#define MIPS_HFLAG_UM     0x00002 /* user mode flag                     */
+#define MIPS_HFLAG_SM     0x00001 /* supervisor mode flag               */
+#define MIPS_HFLAG_KM     0x00000 /* kernel mode flag                   */
+#define MIPS_HFLAG_DM     0x00004 /* Debug mode                         */
+#define MIPS_HFLAG_64     0x00008 /* 64-bit instructions enabled        */
+#define MIPS_HFLAG_CP0    0x00010 /* CP0 enabled                        */
+#define MIPS_HFLAG_FPU    0x00020 /* FPU enabled                        */
+#define MIPS_HFLAG_F64    0x00040 /* 64-bit FPU enabled                 */
+    /* True if the MIPS IV COP1X instructions can be used.  This also
+       controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S
+       and RSQRT.D.  */
+#define MIPS_HFLAG_COP1X  0x00080 /* COP1X instructions enabled         */
+#define MIPS_HFLAG_RE     0x00100 /* Reversed endianness                */
+#define MIPS_HFLAG_AWRAP  0x00200 /* 32-bit compatibility address wrapping */
+#define MIPS_HFLAG_M16    0x00400 /* MIPS16 mode flag                   */
+#define MIPS_HFLAG_M16_SHIFT 10
+    /* If translation is interrupted between the branch instruction and
+     * the delay slot, record what type of branch it is so that we can
+     * resume translation properly.  It might be possible to reduce
+     * this from three bits to two.  */
+#define MIPS_HFLAG_BMASK_BASE  0x803800
+#define MIPS_HFLAG_B      0x00800 /* Unconditional branch               */
+#define MIPS_HFLAG_BC     0x01000 /* Conditional branch                 */
+#define MIPS_HFLAG_BL     0x01800 /* Likely branch                      */
+#define MIPS_HFLAG_BR     0x02000 /* branch to register (can't link TB) */
+    /* Extra flags about the current pending branch.  */
+#define MIPS_HFLAG_BMASK_EXT 0x7C000
+#define MIPS_HFLAG_B16    0x04000 /* branch instruction was 16 bits     */
+#define MIPS_HFLAG_BDS16  0x08000 /* branch requires 16-bit delay slot  */
+#define MIPS_HFLAG_BDS32  0x10000 /* branch requires 32-bit delay slot  */
+#define MIPS_HFLAG_BDS_STRICT  0x20000 /* Strict delay slot size */
+#define MIPS_HFLAG_BX     0x40000 /* branch exchanges execution mode    */
+#define MIPS_HFLAG_BMASK  (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT)
+    /* MIPS DSP resources access. */
+#define MIPS_HFLAG_DSP   0x080000  /* Enable access to MIPS DSP resources. */
+#define MIPS_HFLAG_DSPR2 0x100000  /* Enable access to MIPS DSPR2 resources. */
+    /* Extra flag about HWREna register. */
+#define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */
+#define MIPS_HFLAG_SBRI  0x400000 /* R6 SDBBP causes RI excpt. in user mode */
+#define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot                   */
+#define MIPS_HFLAG_MSA   0x1000000
+#define MIPS_HFLAG_FRE   0x2000000 /* FRE enabled */
+#define MIPS_HFLAG_ELPA  0x4000000
+#define MIPS_HFLAG_ITC_CACHE  0x8000000 /* CACHE instr. operates on ITC tag */
+    target_ulong btarget;        /* Jump / branch target               */
+    target_ulong bcond;          /* Branch condition (if needed)       */
+
+    int SYNCI_Step; /* Address step size for SYNCI */
+    int CCRes; /* Cycle count resolution/divisor */
+    uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */
+    uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */
+    int insn_flags; /* Supported instruction set */
+
+    CPU_COMMON
+
+    /* Fields from here on are preserved across CPU reset. */
+    CPUMIPSMVPContext *mvp;
+#if !defined(CONFIG_USER_ONLY)
+    CPUMIPSTLBContext *tlb;
+#endif
+
+    const mips_def_t *cpu_model;
+    void *irq[8];
+    QEMUTimer *timer; /* Internal timer */
+    MemoryRegion *itc_tag; /* ITC Configuration Tags */
+    target_ulong exception_base; /* ExceptionBase input to the core */
+};
+
+/**
+ * MIPSCPU:
+ * @env: #CPUMIPSState
+ *
+ * A MIPS CPU.
+ */
+struct MIPSCPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+
+    CPUMIPSState env;
+};
+
+static inline MIPSCPU *mips_env_get_cpu(CPUMIPSState *env)
+{
+    return container_of(env, MIPSCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(mips_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(MIPSCPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_mips_cpu;
+#endif
+
+void mips_cpu_do_interrupt(CPUState *cpu);
+bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void mips_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+                         int flags);
+hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+                                  MMUAccessType access_type,
+                                  int mmu_idx, uintptr_t retaddr);
+
+#if !defined(CONFIG_USER_ONLY)
+int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+                        target_ulong address, int rw, int access_type);
+int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+                           target_ulong address, int rw, int access_type);
+int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+                     target_ulong address, int rw, int access_type);
+void r4k_helper_tlbwi(CPUMIPSState *env);
+void r4k_helper_tlbwr(CPUMIPSState *env);
+void r4k_helper_tlbp(CPUMIPSState *env);
+void r4k_helper_tlbr(CPUMIPSState *env);
+void r4k_helper_tlbinv(CPUMIPSState *env);
+void r4k_helper_tlbinvf(CPUMIPSState *env);
+
+void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
+                                bool is_write, bool is_exec, int unused,
+                                unsigned size);
+#endif
+
+void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf);
+
+#define cpu_signal_handler cpu_mips_signal_handler
+#define cpu_list mips_cpu_list
+
+extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env);
+extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
+
+/* MMU modes definitions. We carefully match the indices with our
+   hflags layout. */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _super
+#define MMU_MODE2_SUFFIX _user
+#define MMU_USER_IDX 2
+static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
+{
+    return env->hflags & MIPS_HFLAG_KSU;
+}
+
+static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
+{
+    return (env->CP0_Status & (1 << CP0St_IE)) &&
+        !(env->CP0_Status & (1 << CP0St_EXL)) &&
+        !(env->CP0_Status & (1 << CP0St_ERL)) &&
+        !(env->hflags & MIPS_HFLAG_DM) &&
+        /* Note that the TCStatus IXMT field is initialized to zero,
+           and only MT capable cores can set it to one. So we don't
+           need to check for MT capabilities here.  */
+        !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT));
+}
+
+/* Check if there is pending and not masked out interrupt */
+static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
+{
+    int32_t pending;
+    int32_t status;
+    bool r;
+
+    pending = env->CP0_Cause & CP0Ca_IP_mask;
+    status = env->CP0_Status & CP0Ca_IP_mask;
+
+    if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
+        /* A MIPS configured with a vectorizing external interrupt controller
+           will feed a vector into the Cause pending lines. The core treats
+           the status lines as a vector level, not as indiviual masks.  */
+        r = pending > status;
+    } else {
+        /* A MIPS configured with compatibility or VInt (Vectored Interrupts)
+           treats the pending lines as individual interrupt lines, the status
+           lines are individual masks.  */
+        r = (pending & status) != 0;
+    }
+    return r;
+}
+
+#include "exec/cpu-all.h"
+
+/* Memory access type :
+ * may be needed for precise access rights control and precise exceptions.
+ */
+enum {
+    /* 1 bit to define user level / supervisor access */
+    ACCESS_USER  = 0x00,
+    ACCESS_SUPER = 0x01,
+    /* 1 bit to indicate direction */
+    ACCESS_STORE = 0x02,
+    /* Type of instruction that generated the access */
+    ACCESS_CODE  = 0x10, /* Code fetch access                */
+    ACCESS_INT   = 0x20, /* Integer load/store access        */
+    ACCESS_FLOAT = 0x30, /* floating point load/store access */
+};
+
+/* Exceptions */
+enum {
+    EXCP_NONE          = -1,
+    EXCP_RESET         = 0,
+    EXCP_SRESET,
+    EXCP_DSS,
+    EXCP_DINT,
+    EXCP_DDBL,
+    EXCP_DDBS,
+    EXCP_NMI,
+    EXCP_MCHECK,
+    EXCP_EXT_INTERRUPT, /* 8 */
+    EXCP_DFWATCH,
+    EXCP_DIB,
+    EXCP_IWATCH,
+    EXCP_AdEL,
+    EXCP_AdES,
+    EXCP_TLBF,
+    EXCP_IBE,
+    EXCP_DBp, /* 16 */
+    EXCP_SYSCALL,
+    EXCP_BREAK,
+    EXCP_CpU,
+    EXCP_RI,
+    EXCP_OVERFLOW,
+    EXCP_TRAP,
+    EXCP_FPE,
+    EXCP_DWATCH, /* 24 */
+    EXCP_LTLBL,
+    EXCP_TLBL,
+    EXCP_TLBS,
+    EXCP_DBE,
+    EXCP_THREAD,
+    EXCP_MDMX,
+    EXCP_C2E,
+    EXCP_CACHE, /* 32 */
+    EXCP_DSPDIS,
+    EXCP_MSADIS,
+    EXCP_MSAFPE,
+    EXCP_TLBXI,
+    EXCP_TLBRI,
+
+    EXCP_LAST = EXCP_TLBRI,
+};
+/* Dummy exception for conditional stores.  */
+#define EXCP_SC 0x100
+
+/*
+ * This is an interrnally generated WAKE request line.
+ * It is driven by the CPU itself. Raised when the MT
+ * block wants to wake a VPE from an inactive state and
+ * cleared when VPE goes from active to inactive.
+ */
+#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
+
+void mips_tcg_init(void);
+MIPSCPU *cpu_mips_init(const char *cpu_model);
+int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#define cpu_init(cpu_model) CPU(cpu_mips_init(cpu_model))
+bool cpu_supports_cps_smp(const char *cpu_model);
+void cpu_set_exception_base(int vp_index, target_ulong address);
+
+/* TODO QOM'ify CPU reset and remove */
+void cpu_state_reset(CPUMIPSState *s);
+
+/* mips_timer.c */
+uint32_t cpu_mips_get_random (CPUMIPSState *env);
+uint32_t cpu_mips_get_count (CPUMIPSState *env);
+void cpu_mips_store_count (CPUMIPSState *env, uint32_t value);
+void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value);
+void cpu_mips_start_count(CPUMIPSState *env);
+void cpu_mips_stop_count(CPUMIPSState *env);
+
+/* mips_int.c */
+void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
+
+/* helper.c */
+int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+                              int mmu_idx);
+
+/* op_helper.c */
+uint32_t float_class_s(uint32_t arg, float_status *fst);
+uint64_t float_class_d(uint64_t arg, float_status *fst);
+
+#if !defined(CONFIG_USER_ONLY)
+void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra);
+hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address,
+		                               int rw);
+#endif
+target_ulong exception_resume_pc (CPUMIPSState *env);
+
+/* op_helper.c */
+extern unsigned int ieee_rm[];
+int ieee_ex_to_mips(int xcpt);
+
+static inline void restore_rounding_mode(CPUMIPSState *env)
+{
+    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3],
+                            &env->active_fpu.fp_status);
+}
+
+static inline void restore_flush_mode(CPUMIPSState *env)
+{
+    set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0,
+                      &env->active_fpu.fp_status);
+}
+
+static inline void restore_snan_bit_mode(CPUMIPSState *env)
+{
+    set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0,
+                        &env->active_fpu.fp_status);
+}
+
+static inline void restore_fp_status(CPUMIPSState *env)
+{
+    restore_rounding_mode(env);
+    restore_flush_mode(env);
+    restore_snan_bit_mode(env);
+}
+
+static inline void restore_msa_fp_status(CPUMIPSState *env)
+{
+    float_status *status = &env->active_tc.msa_fp_status;
+    int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM;
+    bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0;
+
+    set_float_rounding_mode(ieee_rm[rounding_mode], status);
+    set_flush_to_zero(flush_to_zero, status);
+    set_flush_inputs_to_zero(flush_to_zero, status);
+}
+
+static inline void restore_pamask(CPUMIPSState *env)
+{
+    if (env->hflags & MIPS_HFLAG_ELPA) {
+        env->PAMask = (1ULL << env->PABITS) - 1;
+    } else {
+        env->PAMask = PAMASK_BASE;
+    }
+}
+
+static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
+                                        target_ulong *cs_base, uint32_t *flags)
+{
+    *pc = env->active_tc.PC;
+    *cs_base = 0;
+    *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
+                            MIPS_HFLAG_HWRENA_ULR);
+}
+
+static inline int mips_vpe_active(CPUMIPSState *env)
+{
+    int active = 1;
+
+    /* Check that the VPE is enabled.  */
+    if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) {
+        active = 0;
+    }
+    /* Check that the VPE is activated.  */
+    if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) {
+        active = 0;
+    }
+
+    /* Now verify that there are active thread contexts in the VPE.
+
+       This assumes the CPU model will internally reschedule threads
+       if the active one goes to sleep. If there are no threads available
+       the active one will be in a sleeping state, and we can turn off
+       the entire VPE.  */
+    if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) {
+        /* TC is not activated.  */
+        active = 0;
+    }
+    if (env->active_tc.CP0_TCHalt & 1) {
+        /* TC is in halt state.  */
+        active = 0;
+    }
+
+    return active;
+}
+
+static inline int mips_vp_active(CPUMIPSState *env)
+{
+    CPUState *other_cs = first_cpu;
+
+    /* Check if the VP disabled other VPs (which means the VP is enabled) */
+    if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
+        return 1;
+    }
+
+    /* Check if the virtual processor is disabled due to a DVP */
+    CPU_FOREACH(other_cs) {
+        MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+        if ((&other_cpu->env != env) &&
+            ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
+            return 0;
+        }
+    }
+    return 1;
+}
+
+static inline void compute_hflags(CPUMIPSState *env)
+{
+    env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
+                     MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
+                     MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2 |
+                     MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | MIPS_HFLAG_FRE |
+                     MIPS_HFLAG_ELPA);
+    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
+        !(env->CP0_Status & (1 << CP0St_ERL)) &&
+        !(env->hflags & MIPS_HFLAG_DM)) {
+        env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
+    }
+#if defined(TARGET_MIPS64)
+    if ((env->insn_flags & ISA_MIPS3) &&
+        (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
+         (env->CP0_Status & (1 << CP0St_PX)) ||
+         (env->CP0_Status & (1 << CP0St_UX)))) {
+        env->hflags |= MIPS_HFLAG_64;
+    }
+
+    if (!(env->insn_flags & ISA_MIPS3)) {
+        env->hflags |= MIPS_HFLAG_AWRAP;
+    } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
+               !(env->CP0_Status & (1 << CP0St_UX))) {
+        env->hflags |= MIPS_HFLAG_AWRAP;
+    } else if (env->insn_flags & ISA_MIPS64R6) {
+        /* Address wrapping for Supervisor and Kernel is specified in R6 */
+        if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) &&
+             !(env->CP0_Status & (1 << CP0St_SX))) ||
+            (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) &&
+             !(env->CP0_Status & (1 << CP0St_KX)))) {
+            env->hflags |= MIPS_HFLAG_AWRAP;
+        }
+    }
+#endif
+    if (((env->CP0_Status & (1 << CP0St_CU0)) &&
+         !(env->insn_flags & ISA_MIPS32R6)) ||
+        !(env->hflags & MIPS_HFLAG_KSU)) {
+        env->hflags |= MIPS_HFLAG_CP0;
+    }
+    if (env->CP0_Status & (1 << CP0St_CU1)) {
+        env->hflags |= MIPS_HFLAG_FPU;
+    }
+    if (env->CP0_Status & (1 << CP0St_FR)) {
+        env->hflags |= MIPS_HFLAG_F64;
+    }
+    if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) &&
+        (env->CP0_Config5 & (1 << CP0C5_SBRI))) {
+        env->hflags |= MIPS_HFLAG_SBRI;
+    }
+    if (env->insn_flags & ASE_DSPR2) {
+        /* Enables access MIPS DSP resources, now our cpu is DSP ASER2,
+           so enable to access DSPR2 resources. */
+        if (env->CP0_Status & (1 << CP0St_MX)) {
+            env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2;
+        }
+
+    } else if (env->insn_flags & ASE_DSP) {
+        /* Enables access MIPS DSP resources, now our cpu is DSP ASE,
+           so enable to access DSP resources. */
+        if (env->CP0_Status & (1 << CP0St_MX)) {
+            env->hflags |= MIPS_HFLAG_DSP;
+        }
+
+    }
+    if (env->insn_flags & ISA_MIPS32R2) {
+        if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
+            env->hflags |= MIPS_HFLAG_COP1X;
+        }
+    } else if (env->insn_flags & ISA_MIPS32) {
+        if (env->hflags & MIPS_HFLAG_64) {
+            env->hflags |= MIPS_HFLAG_COP1X;
+        }
+    } else if (env->insn_flags & ISA_MIPS4) {
+        /* All supported MIPS IV CPUs use the XX (CU3) to enable
+           and disable the MIPS IV extensions to the MIPS III ISA.
+           Some other MIPS IV CPUs ignore the bit, so the check here
+           would be too restrictive for them.  */
+        if (env->CP0_Status & (1U << CP0St_CU3)) {
+            env->hflags |= MIPS_HFLAG_COP1X;
+        }
+    }
+    if (env->insn_flags & ASE_MSA) {
+        if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) {
+            env->hflags |= MIPS_HFLAG_MSA;
+        }
+    }
+    if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+        if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
+            env->hflags |= MIPS_HFLAG_FRE;
+        }
+    }
+    if (env->CP0_Config3 & (1 << CP0C3_LPA)) {
+        if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) {
+            env->hflags |= MIPS_HFLAG_ELPA;
+        }
+    }
+}
+
+void cpu_mips_tlb_flush(CPUMIPSState *env, int flush_global);
+void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
+void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
+void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
+
+void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception,
+                                          int error_code, uintptr_t pc);
+
+static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
+                                                    uint32_t exception,
+                                                    uintptr_t pc)
+{
+    do_raise_exception_err(env, exception, 0, pc);
+}
+
+#endif /* MIPS_CPU_H */