diff options
Diffstat (limited to 'tcg')
| -rw-r--r-- | tcg/README | 18 | ||||
| -rw-r--r-- | tcg/aarch64/tcg-target.c | 1169 | ||||
| -rw-r--r-- | tcg/aarch64/tcg-target.h | 35 | ||||
| -rw-r--r-- | tcg/arm/tcg-target.c | 8 | ||||
| -rw-r--r-- | tcg/arm/tcg-target.h | 2 | ||||
| -rw-r--r-- | tcg/i386/tcg-target.c | 8 | ||||
| -rw-r--r-- | tcg/i386/tcg-target.h | 2 | ||||
| -rw-r--r-- | tcg/ia64/tcg-target.c | 500 | ||||
| -rw-r--r-- | tcg/ia64/tcg-target.h | 2 | ||||
| -rw-r--r-- | tcg/mips/tcg-target.c | 14 | ||||
| -rw-r--r-- | tcg/mips/tcg-target.h | 5 | ||||
| -rw-r--r-- | tcg/optimize.c | 45 | ||||
| -rw-r--r-- | tcg/ppc/tcg-target.c | 8 | ||||
| -rw-r--r-- | tcg/ppc/tcg-target.h | 2 | ||||
| -rw-r--r-- | tcg/ppc64/tcg-target.c | 12 | ||||
| -rw-r--r-- | tcg/ppc64/tcg-target.h | 1 | ||||
| -rw-r--r-- | tcg/s390/tcg-target.c | 45 | ||||
| -rw-r--r-- | tcg/s390/tcg-target.h | 2 | ||||
| -rw-r--r-- | tcg/sparc/tcg-target.c | 10 | ||||
| -rw-r--r-- | tcg/sparc/tcg-target.h | 2 | ||||
| -rw-r--r-- | tcg/tcg-be-ldst.h | 2 | ||||
| -rw-r--r-- | tcg/tcg-op.h | 48 | ||||
| -rw-r--r-- | tcg/tcg.c | 14 | ||||
| -rw-r--r-- | tcg/tcg.h | 8 | ||||
| -rw-r--r-- | tcg/tci/tcg-target.c | 2 | ||||
| -rw-r--r-- | tcg/tci/tcg-target.h | 8 |
26 files changed, 1031 insertions, 941 deletions
diff --git a/tcg/README b/tcg/README index f1782123b7..776e9259e3 100644 --- a/tcg/README +++ b/tcg/README @@ -36,6 +36,12 @@ or a memory location which is stored in a register outside QEMU TBs A TCG "basic block" corresponds to a list of instructions terminated by a branch instruction. +An operation with "undefined behavior" may result in a crash. + +An operation with "unspecified behavior" shall not crash. However, +the result may be one of several possibilities so may be considered +an "undefined result". + 3) Intermediate representation 3.1) Introduction @@ -239,23 +245,25 @@ t0=t1|~t2 * shl_i32/i64 t0, t1, t2 -t0=t1 << t2. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) +t0=t1 << t2. Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * shr_i32/i64 t0, t1, t2 -t0=t1 >> t2 (unsigned). Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) +t0=t1 >> t2 (unsigned). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * sar_i32/i64 t0, t1, t2 -t0=t1 >> t2 (signed). Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) +t0=t1 >> t2 (signed). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * rotl_i32/i64 t0, t1, t2 -Rotation of t2 bits to the left. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) +Rotation of t2 bits to the left. +Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) * rotr_i32/i64 t0, t1, t2 -Rotation of t2 bits to the right. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) +Rotation of t2 bits to the right. +Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) ********* Misc diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index 661a5af810..0a580b65ea 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -23,34 +23,26 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7", "%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15", "%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23", - "%x24", "%x25", "%x26", "%x27", "%x28", - "%fp", /* frame pointer */ - "%lr", /* link register */ - "%sp", /* stack pointer */ + "%x24", "%x25", "%x26", "%x27", "%x28", "%fp", "%x30", "%sp", }; #endif /* NDEBUG */ -#ifdef TARGET_WORDS_BIGENDIAN - #define TCG_LDST_BSWAP 1 -#else - #define TCG_LDST_BSWAP 0 -#endif - static const int tcg_target_reg_alloc_order[] = { TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */ - TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, - TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, + TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, + TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, TCG_REG_X16, TCG_REG_X17, - TCG_REG_X18, TCG_REG_X19, /* will not use these, see tcg_target_init */ - TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, - TCG_REG_X8, /* will not use, see tcg_target_init */ + /* X18 reserved by system */ + /* X19 reserved for AREG0 */ + /* X29 reserved as fp */ + /* X30 reserved as temporary */ }; static const int tcg_target_call_iarg_regs[8] = { @@ -61,13 +53,13 @@ static const int tcg_target_call_oarg_regs[1] = { TCG_REG_X0 }; -#define TCG_REG_TMP TCG_REG_X8 +#define TCG_REG_TMP TCG_REG_X30 #ifndef CONFIG_SOFTMMU -# if defined(CONFIG_USE_GUEST_BASE) -# define TCG_REG_GUEST_BASE TCG_REG_X28 +# ifdef CONFIG_USE_GUEST_BASE +# define TCG_REG_GUEST_BASE TCG_REG_X28 # else -# define TCG_REG_GUEST_BASE TCG_REG_XZR +# define TCG_REG_GUEST_BASE TCG_REG_XZR # endif #endif @@ -110,11 +102,10 @@ static inline void patch_reloc(uint8_t *code_ptr, int type, } } -#define TCG_CT_CONST_IS32 0x100 -#define TCG_CT_CONST_AIMM 0x200 -#define TCG_CT_CONST_LIMM 0x400 -#define TCG_CT_CONST_ZERO 0x800 -#define TCG_CT_CONST_MONE 0x1000 +#define TCG_CT_CONST_AIMM 0x100 +#define TCG_CT_CONST_LIMM 0x200 +#define TCG_CT_CONST_ZERO 0x400 +#define TCG_CT_CONST_MONE 0x800 /* parse target specific constraints */ static int target_parse_constraint(TCGArgConstraint *ct, @@ -139,9 +130,6 @@ static int target_parse_constraint(TCGArgConstraint *ct, tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); #endif break; - case 'w': /* The operand should be considered 32-bit. */ - ct->ct |= TCG_CT_CONST_IS32; - break; case 'A': /* Valid for arithmetic immediate (positive or negative). */ ct->ct |= TCG_CT_CONST_AIMM; break; @@ -188,7 +176,7 @@ static inline bool is_limm(uint64_t val) return (val & (val - 1)) == 0; } -static int tcg_target_const_match(tcg_target_long val, +static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; @@ -196,7 +184,7 @@ static int tcg_target_const_match(tcg_target_long val, if (ct & TCG_CT_CONST) { return 1; } - if (ct & TCG_CT_CONST_IS32) { + if (type == TCG_TYPE_I32) { val = (int32_t)val; } if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { @@ -250,19 +238,12 @@ static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { [TCG_COND_LEU] = COND_LS, }; -/* opcodes for LDR / STR instructions with base + simm9 addressing */ -enum aarch64_ldst_op_data { /* size of the data moved */ - LDST_8 = 0x38, - LDST_16 = 0x78, - LDST_32 = 0xb8, - LDST_64 = 0xf8, -}; -enum aarch64_ldst_op_type { /* type of operation */ - LDST_ST = 0x0, /* store */ - LDST_LD = 0x4, /* load */ - LDST_LD_S_X = 0x8, /* load and sign-extend into Xt */ - LDST_LD_S_W = 0xc, /* load and sign-extend into Wt */ -}; +typedef enum { + LDST_ST = 0, /* store */ + LDST_LD = 1, /* load */ + LDST_LD_S_X = 2, /* load and sign-extend into Xt */ + LDST_LD_S_W = 3, /* load and sign-extend into Wt */ +} AArch64LdstType; /* We encode the format of the insn into the beginning of the name, so that we can have the preprocessor help "typecheck" the insn vs the output @@ -270,6 +251,48 @@ enum aarch64_ldst_op_type { /* type of operation */ use the section number of the architecture reference manual in which the instruction group is described. */ typedef enum { + /* Compare and branch (immediate). */ + I3201_CBZ = 0x34000000, + I3201_CBNZ = 0x35000000, + + /* Conditional branch (immediate). */ + I3202_B_C = 0x54000000, + + /* Unconditional branch (immediate). */ + I3206_B = 0x14000000, + I3206_BL = 0x94000000, + + /* Unconditional branch (register). */ + I3207_BR = 0xd61f0000, + I3207_BLR = 0xd63f0000, + I3207_RET = 0xd65f0000, + + /* Load/store register. Described here as 3.3.12, but the helper + that emits them can transform to 3.3.10 or 3.3.13. */ + I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30, + I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30, + I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30, + I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30, + + I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30, + I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30, + I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30, + I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30, + + I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30, + I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30, + + I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30, + I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30, + I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30, + + I3312_TO_I3310 = 0x00206800, + I3312_TO_I3313 = 0x01000000, + + /* Load/store register pair instructions. */ + I3314_LDP = 0x28400000, + I3314_STP = 0x28000000, + /* Add/subtract immediate instructions. */ I3401_ADDI = 0x11000000, I3401_ADDSI = 0x31000000, @@ -294,6 +317,10 @@ typedef enum { I3405_MOVZ = 0x52800000, I3405_MOVK = 0x72800000, + /* PC relative addressing instructions. */ + I3406_ADR = 0x10000000, + I3406_ADRP = 0x90000000, + /* Add/subtract shifted register instructions (without a shift). */ I3502_ADD = 0x0b000000, I3502_ADDS = 0x2b000000, @@ -311,6 +338,11 @@ typedef enum { I3506_CSEL = 0x1a800000, I3506_CSINC = 0x1a800400, + /* Data-processing (1 source) instructions. */ + I3507_REV16 = 0x5ac00400, + I3507_REV32 = 0x5ac00800, + I3507_REV64 = 0x5ac00c00, + /* Data-processing (2 source) instructions. */ I3508_LSLV = 0x1ac02000, I3508_LSRV = 0x1ac02400, @@ -335,87 +367,51 @@ typedef enum { I3510_ANDS = 0x6a000000, } AArch64Insn; -static inline enum aarch64_ldst_op_data -aarch64_ldst_get_data(TCGOpcode tcg_op) +static inline uint32_t tcg_in32(TCGContext *s) { - switch (tcg_op) { - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - return LDST_8; - - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - return LDST_16; - - case INDEX_op_ld_i32: - case INDEX_op_st_i32: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_st32_i64: - return LDST_32; + uint32_t v = *(uint32_t *)s->code_ptr; + return v; +} - case INDEX_op_ld_i64: - case INDEX_op_st_i64: - return LDST_64; +/* Emit an opcode with "type-checking" of the format. */ +#define tcg_out_insn(S, FMT, OP, ...) \ + glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__) - default: - tcg_abort(); - } +static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rt, int imm19) +{ + tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt); } -static inline enum aarch64_ldst_op_type -aarch64_ldst_get_type(TCGOpcode tcg_op) +static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn, + TCGCond c, int imm19) { - switch (tcg_op) { - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return LDST_ST; - - case INDEX_op_ld8u_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - case INDEX_op_ld_i64: - return LDST_LD; - - case INDEX_op_ld8s_i32: - case INDEX_op_ld16s_i32: - return LDST_LD_S_W; - - case INDEX_op_ld8s_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32s_i64: - return LDST_LD_S_X; + tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5); +} - default: - tcg_abort(); - } +static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26) +{ + tcg_out32(s, insn | (imm26 & 0x03ffffff)); } -static inline uint32_t tcg_in32(TCGContext *s) +static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn) { - uint32_t v = *(uint32_t *)s->code_ptr; - return v; + tcg_out32(s, insn | rn << 5); } -/* Emit an opcode with "type-checking" of the format. */ -#define tcg_out_insn(S, FMT, OP, ...) \ - glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__) +static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn, + TCGReg r1, TCGReg r2, TCGReg rn, + tcg_target_long ofs, bool pre, bool w) +{ + insn |= 1u << 31; /* ext */ + insn |= pre << 24; + insn |= w << 23; + + assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0); + insn |= (ofs & (0x7f << 3)) << (15 - 3); + + tcg_out32(s, insn | r2 << 10 | rn << 5 | r1); +} static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, uint64_t aimm) @@ -457,6 +453,12 @@ static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext, tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd); } +static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn, + TCGReg rd, int64_t disp) +{ + tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd); +} + /* This function is for both 3.5.2 (Add/Subtract shifted register), for the rare occasion when we actually want to supply a shift amount. */ static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn, @@ -487,32 +489,37 @@ static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext, | tcg_cond_to_aarch64[c] << 12); } +static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn) +{ + tcg_out32(s, insn | ext << 31 | rn << 5 | rd); +} + static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra) { tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd); } +static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg base, TCGReg regoff) +{ + /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ + tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | base << 5 | rd); +} -static inline void tcg_out_ldst_9(TCGContext *s, - enum aarch64_ldst_op_data op_data, - enum aarch64_ldst_op_type op_type, - TCGReg rd, TCGReg rn, tcg_target_long offset) + +static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, intptr_t offset) { - /* use LDUR with BASE register with 9bit signed unscaled offset */ - tcg_out32(s, op_data << 24 | op_type << 20 - | (offset & 0x1ff) << 12 | rn << 5 | rd); + tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | rd); } -/* tcg_out_ldst_12 expects a scaled unsigned immediate offset */ -static inline void tcg_out_ldst_12(TCGContext *s, - enum aarch64_ldst_op_data op_data, - enum aarch64_ldst_op_type op_type, - TCGReg rd, TCGReg rn, - tcg_target_ulong scaled_uimm) +static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, uintptr_t scaled_uimm) { - tcg_out32(s, (op_data | 1) << 24 - | op_type << 20 | scaled_uimm << 10 | rn << 5 | rd); + /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ + tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | rd); } /* Register to register move using ORR (shifted register with no shift). */ @@ -527,89 +534,177 @@ static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0); } +/* This function is used for the Logical (immediate) instruction group. + The value of LIMM must satisfy IS_LIMM. See the comment above about + only supporting simplified logical immediates. */ +static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, uint64_t limm) +{ + unsigned h, l, r, c; + + assert(is_limm(limm)); + + h = clz64(limm); + l = ctz64(limm); + if (l == 0) { + r = 0; /* form 0....01....1 */ + c = ctz64(~limm) - 1; + if (h == 0) { + r = clz64(~limm); /* form 1..10..01..1 */ + c += r; + } + } else { + r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ + c = r - h - 1; + } + if (ext == TCG_TYPE_I32) { + r &= 31; + c &= 31; + } + + tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); +} + static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long value) { AArch64Insn insn; - - if (type == TCG_TYPE_I32) { + int i, wantinv, shift; + tcg_target_long svalue = value; + tcg_target_long ivalue = ~value; + tcg_target_long imask; + + /* For 32-bit values, discard potential garbage in value. For 64-bit + values within [2**31, 2**32-1], we can create smaller sequences by + interpreting this as a negative 32-bit number, while ensuring that + the high 32 bits are cleared by setting SF=0. */ + if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) { + svalue = (int32_t)value; value = (uint32_t)value; + ivalue = (uint32_t)ivalue; + type = TCG_TYPE_I32; + } + + /* Speed things up by handling the common case of small positive + and negative values specially. */ + if ((value & ~0xffffull) == 0) { + tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0); + return; + } else if ((ivalue & ~0xffffull) == 0) { + tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0); + return; + } + + /* Check for bitfield immediates. For the benefit of 32-bit quantities, + use the sign-extended value. That lets us match rotated values such + as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */ + if (is_limm(svalue)) { + tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue); + return; } - /* count trailing zeros in 16 bit steps, mapping 64 to 0. Emit the - first MOVZ with the half-word immediate skipping the zeros, with a shift - (LSL) equal to this number. Then all next instructions use MOVKs. - Zero the processed half-word in the value, continue until empty. - We build the final result 16bits at a time with up to 4 instructions, - but do not emit instructions for 16bit zero holes. */ + /* Look for host pointer values within 4G of the PC. This happens + often when loading pointers to QEMU's own data structures. */ + if (type == TCG_TYPE_I64) { + tcg_target_long disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12); + if (disp == sextract64(disp, 0, 21)) { + tcg_out_insn(s, 3406, ADRP, rd, disp); + if (value & 0xfff) { + tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff); + } + return; + } + } + + /* Would it take fewer insns to begin with MOVN? For the value and its + inverse, count the number of 16-bit lanes that are 0. */ + for (i = wantinv = imask = 0; i < 64; i += 16) { + tcg_target_long mask = 0xffffull << i; + if ((value & mask) == 0) { + wantinv -= 1; + } + if ((ivalue & mask) == 0) { + wantinv += 1; + imask |= mask; + } + } + + /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */ insn = I3405_MOVZ; - do { - unsigned shift = ctz64(value) & (63 & -16); - tcg_out_insn_3405(s, insn, shift >= 32, rd, value >> shift, shift); + if (wantinv > 0) { + value = ivalue; + insn = I3405_MOVN; + } + + /* Find the lowest lane that is not 0x0000. */ + shift = ctz64(value) & (63 & -16); + tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift); + + if (wantinv > 0) { + /* Re-invert the value, so MOVK sees non-inverted bits. */ + value = ~value; + /* Clear out all the 0xffff lanes. */ + value ^= imask; + } + /* Clear out the lane that we just set. */ + value &= ~(0xffffUL << shift); + + /* Iterate until all lanes have been set, and thus cleared from VALUE. */ + while (value) { + shift = ctz64(value) & (63 & -16); + tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift); value &= ~(0xffffUL << shift); - insn = I3405_MOVK; - } while (value); + } } -static inline void tcg_out_ldst_r(TCGContext *s, - enum aarch64_ldst_op_data op_data, - enum aarch64_ldst_op_type op_type, - TCGReg rd, TCGReg base, TCGReg regoff) -{ - /* load from memory to register using base + 64bit register offset */ - /* using f.e. STR Wt, [Xn, Xm] 0xb8600800|(regoff << 16)|(base << 5)|rd */ - /* the 0x6000 is for the "no extend field" */ - tcg_out32(s, 0x00206800 - | op_data << 24 | op_type << 20 | regoff << 16 | base << 5 | rd); -} +/* Define something more legible for general use. */ +#define tcg_out_ldst_r tcg_out_insn_3310 -/* solve the whole ldst problem */ -static inline void tcg_out_ldst(TCGContext *s, enum aarch64_ldst_op_data data, - enum aarch64_ldst_op_type type, - TCGReg rd, TCGReg rn, tcg_target_long offset) +static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, intptr_t offset) { - if (offset >= -256 && offset < 256) { - tcg_out_ldst_9(s, data, type, rd, rn, offset); - return; - } + TCGMemOp size = (uint32_t)insn >> 30; - if (offset >= 256) { - /* if the offset is naturally aligned and in range, - then we can use the scaled uimm12 encoding */ - unsigned int s_bits = data >> 6; - if (!(offset & ((1 << s_bits) - 1))) { - tcg_target_ulong scaled_uimm = offset >> s_bits; - if (scaled_uimm <= 0xfff) { - tcg_out_ldst_12(s, data, type, rd, rn, scaled_uimm); - return; - } + /* If the offset is naturally aligned and in range, then we can + use the scaled uimm12 encoding */ + if (offset >= 0 && !(offset & ((1 << size) - 1))) { + uintptr_t scaled_uimm = offset >> size; + if (scaled_uimm <= 0xfff) { + tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm); + return; } } - /* worst-case scenario, move offset to temp register, use reg offset */ + /* Small signed offsets can use the unscaled encoding. */ + if (offset >= -256 && offset < 256) { + tcg_out_insn_3312(s, insn, rd, rn, offset); + return; + } + + /* Worst-case scenario, move offset to temp register, use reg offset. */ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); - tcg_out_ldst_r(s, data, type, rd, rn, TCG_REG_TMP); + tcg_out_ldst_r(s, insn, rd, rn, TCG_REG_TMP); } static inline void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret != arg) { - tcg_out_movr(s, type == TCG_TYPE_I64, ret, arg); + tcg_out_movr(s, type, ret, arg); } } static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_LD, + tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_LDRW : I3312_LDRX, arg, arg1, arg2); } static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_ST, + tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_STRW : I3312_STRX, arg, arg1, arg2); } @@ -708,51 +803,29 @@ static inline void tcg_out_goto(TCGContext *s, intptr_t target) tcg_abort(); } - tcg_out32(s, 0x14000000 | (offset & 0x03ffffff)); + tcg_out_insn(s, 3206, B, offset); } static inline void tcg_out_goto_noaddr(TCGContext *s) { - /* We pay attention here to not modify the branch target by - reading from the buffer. This ensure that caches and memory are - kept coherent during retranslation. - Mask away possible garbage in the high bits for the first translation, - while keeping the offset bits for retranslation. */ - uint32_t insn; - insn = (tcg_in32(s) & 0x03ffffff) | 0x14000000; - tcg_out32(s, insn); + /* We pay attention here to not modify the branch target by reading from + the buffer. This ensure that caches and memory are kept coherent during + retranslation. Mask away possible garbage in the high bits for the + first translation, while keeping the offset bits for retranslation. */ + uint32_t old = tcg_in32(s); + tcg_out_insn(s, 3206, B, old); } static inline void tcg_out_goto_cond_noaddr(TCGContext *s, TCGCond c) { - /* see comments in tcg_out_goto_noaddr */ - uint32_t insn; - insn = tcg_in32(s) & (0x07ffff << 5); - insn |= 0x54000000 | tcg_cond_to_aarch64[c]; - tcg_out32(s, insn); -} - -static inline void tcg_out_goto_cond(TCGContext *s, TCGCond c, intptr_t target) -{ - intptr_t offset = (target - (intptr_t)s->code_ptr) / 4; - - if (offset < -0x40000 || offset >= 0x40000) { - /* out of 19bit range */ - tcg_abort(); - } - - offset &= 0x7ffff; - tcg_out32(s, 0x54000000 | tcg_cond_to_aarch64[c] | offset << 5); + /* See comments in tcg_out_goto_noaddr. */ + uint32_t old = tcg_in32(s) >> 5; + tcg_out_insn(s, 3202, B_C, c, old); } static inline void tcg_out_callr(TCGContext *s, TCGReg reg) { - tcg_out32(s, 0xd63f0000 | reg << 5); -} - -static inline void tcg_out_gotor(TCGContext *s, TCGReg reg) -{ - tcg_out32(s, 0xd61f0000 | reg << 5); + tcg_out_insn(s, 3207, BLR, reg); } static inline void tcg_out_call(TCGContext *s, intptr_t target) @@ -763,16 +836,10 @@ static inline void tcg_out_call(TCGContext *s, intptr_t target) tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, target); tcg_out_callr(s, TCG_REG_TMP); } else { - tcg_out32(s, 0x94000000 | (offset & 0x03ffffff)); + tcg_out_insn(s, 3206, BL, offset); } } -static inline void tcg_out_ret(TCGContext *s) -{ - /* emit RET { LR } */ - tcg_out32(s, 0xd65f03c0); -} - void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) { intptr_t target = addr; @@ -799,48 +866,66 @@ static inline void tcg_out_goto_label(TCGContext *s, int label_index) } } -static inline void tcg_out_goto_label_cond(TCGContext *s, - TCGCond c, int label_index) +static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a, + TCGArg b, bool b_const, int label) { - TCGLabel *l = &s->labels[label_index]; + TCGLabel *l = &s->labels[label]; + intptr_t offset; + bool need_cmp; + + if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { + need_cmp = false; + } else { + need_cmp = true; + tcg_out_cmp(s, ext, a, b, b_const); + } if (!l->has_value) { - tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label_index, 0); - tcg_out_goto_cond_noaddr(s, c); + tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label, 0); + offset = tcg_in32(s) >> 5; + } else { + offset = l->u.value - (uintptr_t)s->code_ptr; + offset >>= 2; + assert(offset >= -0x40000 && offset < 0x40000); + } + + if (need_cmp) { + tcg_out_insn(s, 3202, B_C, c, offset); + } else if (c == TCG_COND_EQ) { + tcg_out_insn(s, 3201, CBZ, ext, a, offset); } else { - tcg_out_goto_cond(s, c, l->u.value); + tcg_out_insn(s, 3201, CBNZ, ext, a, offset); } } -static inline void tcg_out_rev(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rm) +static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn) { - /* using REV 0x5ac00800 */ - unsigned int base = ext ? 0xdac00c00 : 0x5ac00800; - tcg_out32(s, base | rm << 5 | rd); + tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn); } -static inline void tcg_out_rev16(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rm) +static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn) { - /* using REV16 0x5ac00400 */ - unsigned int base = ext ? 0xdac00400 : 0x5ac00400; - tcg_out32(s, base | rm << 5 | rd); + tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn); } -static inline void tcg_out_sxt(TCGContext *s, TCGType ext, int s_bits, +static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); +} + +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ - int bits = 8 * (1 << s_bits) - 1; + int bits = (8 << s_bits) - 1; tcg_out_sbfm(s, ext, rd, rn, 0, bits); } -static inline void tcg_out_uxt(TCGContext *s, int s_bits, +static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ - int bits = 8 * (1 << s_bits) - 1; + int bits = (8 << s_bits) - 1; tcg_out_ubfm(s, 0, rd, rn, 0, bits); } @@ -854,37 +939,6 @@ static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, } } -/* This function is used for the Logical (immediate) instruction group. - The value of LIMM must satisfy IS_LIMM. See the comment above about - only supporting simplified logical immediates. */ -static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, uint64_t limm) -{ - unsigned h, l, r, c; - - assert(is_limm(limm)); - - h = clz64(limm); - l = ctz64(limm); - if (l == 0) { - r = 0; /* form 0....01....1 */ - c = ctz64(~limm) - 1; - if (h == 0) { - r = clz64(~limm); /* form 1..10..01..1 */ - c += r; - } - } else { - r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ - c = r - h - 1; - } - if (ext == TCG_TYPE_I32) { - r &= 31; - c &= 31; - } - - tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); -} - static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl, TCGReg rh, TCGReg al, TCGReg ah, tcg_target_long bl, tcg_target_long bh, @@ -921,47 +975,59 @@ static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl, } tcg_out_insn_3503(s, insn, ext, rh, ah, bh); - if (rl != orig_rl) { - tcg_out_movr(s, ext, orig_rl, rl); - } + tcg_out_mov(s, ext, orig_rl, rl); } #ifdef CONFIG_SOFTMMU /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static const void * const qemu_ld_helpers[4] = { - helper_ret_ldub_mmu, - helper_ret_lduw_mmu, - helper_ret_ldul_mmu, - helper_ret_ldq_mmu, +static const void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static const void * const qemu_st_helpers[4] = { - helper_ret_stb_mmu, - helper_ret_stw_mmu, - helper_ret_stl_mmu, - helper_ret_stq_mmu, +static const void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, }; +static inline void tcg_out_adr(TCGContext *s, TCGReg rd, uintptr_t addr) +{ + addr -= (uintptr_t)s->code_ptr; + assert(addr == sextract64(addr, 0, 21)); + tcg_out_insn(s, 3406, ADR, rd, addr); +} + static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { + TCGMemOp opc = lb->opc; + TCGMemOp size = opc & MO_SIZE; + reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr); - tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0); - tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X3, (tcg_target_long)lb->raddr); - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, - (tcg_target_long)qemu_ld_helpers[lb->opc & 3]); - tcg_out_callr(s, TCG_REG_TMP); - if (lb->opc & 0x04) { - tcg_out_sxt(s, 1, lb->opc & 3, lb->datalo_reg, TCG_REG_X0); + tcg_out_adr(s, TCG_REG_X3, (intptr_t)lb->raddr); + tcg_out_call(s, (intptr_t)qemu_ld_helpers[opc & ~MO_SIGN]); + if (opc & MO_SIGN) { + tcg_out_sxt(s, TCG_TYPE_I64, size, lb->datalo_reg, TCG_REG_X0); } else { - tcg_out_movr(s, 1, lb->datalo_reg, TCG_REG_X0); + tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); } tcg_out_goto(s, (intptr_t)lb->raddr); @@ -969,20 +1035,21 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { + TCGMemOp opc = lb->opc; + TCGMemOp size = opc & MO_SIZE; + reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr); - tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0); - tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg); - tcg_out_movr(s, 1, TCG_REG_X2, lb->datalo_reg); + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); + tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X4, (intptr_t)lb->raddr); - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, - (intptr_t)qemu_st_helpers[lb->opc & 3]); - tcg_out_callr(s, TCG_REG_TMP); - tcg_out_goto(s, (tcg_target_long)lb->raddr); + tcg_out_adr(s, TCG_REG_X4, (intptr_t)lb->raddr); + tcg_out_call(s, (intptr_t)qemu_st_helpers[opc]); + tcg_out_goto(s, (intptr_t)lb->raddr); } -static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, TCGReg data_reg, TCGReg addr_reg, int mem_index, uint8_t *raddr, uint8_t *label_ptr) @@ -1002,96 +1069,108 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc, slow path for the failure case, which will be patched later when finalizing the slow path. Generated code returns the host addend in X1, clobbers X0,X2,X3,TMP. */ -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, - int s_bits, uint8_t **label_ptr, int mem_index, int is_read) +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits, + uint8_t **label_ptr, int mem_index, bool is_read) { TCGReg base = TCG_AREG0; int tlb_offset = is_read ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); + /* Extract the TLB index from the address into X0. X0<CPU_TLB_BITS:0> = addr_reg<TARGET_PAGE_BITS+CPU_TLB_BITS:TARGET_PAGE_BITS> */ - tcg_out_ubfm(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, addr_reg, + tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg, TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS); + /* Store the page mask part of the address and the low s_bits into X3. Later this allows checking for equality and alignment at the same time. X3 = addr_reg & (PAGE_MASK | ((1 << s_bits) - 1)) */ tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); + /* Add any "high bits" from the tlb offset to the env address into X2, to take advantage of the LSL12 form of the ADDI instruction. X2 = env + (tlb_offset & 0xfff000) */ - tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base, - tlb_offset & 0xfff000); + if (tlb_offset & 0xfff000) { + tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base, + tlb_offset & 0xfff000); + base = TCG_REG_X2; + } + /* Merge the tlb index contribution into X2. X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */ - tcg_out_insn(s, 3502S, ADD_LSL, 1, TCG_REG_X2, TCG_REG_X2, + tcg_out_insn(s, 3502S, ADD_LSL, TCG_TYPE_I64, TCG_REG_X2, base, TCG_REG_X0, CPU_TLB_ENTRY_BITS); + /* Merge "low bits" from tlb offset, load the tlb comparator into X0. X0 = load [X2 + (tlb_offset & 0x000fff)] */ - tcg_out_ldst(s, TARGET_LONG_BITS == 64 ? LDST_64 : LDST_32, - LDST_LD, TCG_REG_X0, TCG_REG_X2, - (tlb_offset & 0xfff)); + tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX, + TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff); + /* Load the tlb addend. Do that early to avoid stalling. X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */ - tcg_out_ldst(s, LDST_64, LDST_LD, TCG_REG_X1, TCG_REG_X2, + tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2, (tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) - (is_read ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write))); + /* Perform the address comparison. */ tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0); - *label_ptr = s->code_ptr; + /* If not equal, we jump to the slow path. */ + *label_ptr = s->code_ptr; tcg_out_goto_cond_noaddr(s, TCG_COND_NE); } #endif /* CONFIG_SOFTMMU */ -static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data_r, - TCGReg addr_r, TCGReg off_r) +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, + TCGReg data_r, TCGReg addr_r, TCGReg off_r) { - switch (opc) { - case 0: - tcg_out_ldst_r(s, LDST_8, LDST_LD, data_r, addr_r, off_r); + const TCGMemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SSIZE) { + case MO_UB: + tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r); break; - case 0 | 4: - tcg_out_ldst_r(s, LDST_8, LDST_LD_S_X, data_r, addr_r, off_r); + case MO_SB: + tcg_out_ldst_r(s, I3312_LDRSBX, data_r, addr_r, off_r); break; - case 1: - tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r); - if (TCG_LDST_BSWAP) { - tcg_out_rev16(s, 0, data_r, data_r); + case MO_UW: + tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); + if (bswap) { + tcg_out_rev16(s, data_r, data_r); } break; - case 1 | 4: - if (TCG_LDST_BSWAP) { - tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r); - tcg_out_rev16(s, 0, data_r, data_r); - tcg_out_sxt(s, 1, 1, data_r, data_r); + case MO_SW: + if (bswap) { + tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); + tcg_out_rev16(s, data_r, data_r); + tcg_out_sxt(s, TCG_TYPE_I64, MO_16, data_r, data_r); } else { - tcg_out_ldst_r(s, LDST_16, LDST_LD_S_X, data_r, addr_r, off_r); + tcg_out_ldst_r(s, I3312_LDRSHX, data_r, addr_r, off_r); } break; - case 2: - tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r); - if (TCG_LDST_BSWAP) { - tcg_out_rev(s, 0, data_r, data_r); + case MO_UL: + tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); + if (bswap) { + tcg_out_rev32(s, data_r, data_r); } break; - case 2 | 4: - if (TCG_LDST_BSWAP) { - tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r); - tcg_out_rev(s, 0, data_r, data_r); - tcg_out_sxt(s, 1, 2, data_r, data_r); + case MO_SL: + if (bswap) { + tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); + tcg_out_rev32(s, data_r, data_r); + tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r); } else { - tcg_out_ldst_r(s, LDST_32, LDST_LD_S_X, data_r, addr_r, off_r); + tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, off_r); } break; - case 3: - tcg_out_ldst_r(s, LDST_64, LDST_LD, data_r, addr_r, off_r); - if (TCG_LDST_BSWAP) { - tcg_out_rev(s, 1, data_r, data_r); + case MO_Q: + tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, off_r); + if (bswap) { + tcg_out_rev64(s, data_r, data_r); } break; default: @@ -1099,141 +1178,77 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data_r, } } -static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data_r, - TCGReg addr_r, TCGReg off_r) +static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, + TCGReg data_r, TCGReg addr_r, TCGReg off_r) { - switch (opc) { - case 0: - tcg_out_ldst_r(s, LDST_8, LDST_ST, data_r, addr_r, off_r); + const TCGMemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SIZE) { + case MO_8: + tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, off_r); break; - case 1: - if (TCG_LDST_BSWAP) { - tcg_out_rev16(s, 0, TCG_REG_TMP, data_r); - tcg_out_ldst_r(s, LDST_16, LDST_ST, TCG_REG_TMP, addr_r, off_r); - } else { - tcg_out_ldst_r(s, LDST_16, LDST_ST, data_r, addr_r, off_r); + case MO_16: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev16(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; } + tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, off_r); break; - case 2: - if (TCG_LDST_BSWAP) { - tcg_out_rev(s, 0, TCG_REG_TMP, data_r); - tcg_out_ldst_r(s, LDST_32, LDST_ST, TCG_REG_TMP, addr_r, off_r); - } else { - tcg_out_ldst_r(s, LDST_32, LDST_ST, data_r, addr_r, off_r); + case MO_32: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev32(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; } + tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, off_r); break; - case 3: - if (TCG_LDST_BSWAP) { - tcg_out_rev(s, 1, TCG_REG_TMP, data_r); - tcg_out_ldst_r(s, LDST_64, LDST_ST, TCG_REG_TMP, addr_r, off_r); - } else { - tcg_out_ldst_r(s, LDST_64, LDST_ST, data_r, addr_r, off_r); + case MO_64: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev64(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; } + tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, off_r); break; default: tcg_abort(); } } -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp memop, int mem_index) { - TCGReg addr_reg, data_reg; #ifdef CONFIG_SOFTMMU - int mem_index, s_bits; + TCGMemOp s_bits = memop & MO_SIZE; uint8_t *label_ptr; -#endif - data_reg = args[0]; - addr_reg = args[1]; -#ifdef CONFIG_SOFTMMU - mem_index = args[2]; - s_bits = opc & 3; tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1); - tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, 1, opc, data_reg, addr_reg, + tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); + add_qemu_ldst_label(s, true, memop, data_reg, addr_reg, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ - tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, + tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); #endif /* CONFIG_SOFTMMU */ } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp memop, int mem_index) { - TCGReg addr_reg, data_reg; #ifdef CONFIG_SOFTMMU - int mem_index, s_bits; + TCGMemOp s_bits = memop & MO_SIZE; uint8_t *label_ptr; -#endif - data_reg = args[0]; - addr_reg = args[1]; - -#ifdef CONFIG_SOFTMMU - mem_index = args[2]; - s_bits = opc & 3; tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0); - tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, 0, opc, data_reg, addr_reg, + tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); + add_qemu_ldst_label(s, false, memop, data_reg, addr_reg, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ - tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, + tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); #endif /* CONFIG_SOFTMMU */ } static uint8_t *tb_ret_addr; -/* callee stack use example: - stp x29, x30, [sp,#-32]! - mov x29, sp - stp x1, x2, [sp,#16] - ... - ldp x1, x2, [sp,#16] - ldp x29, x30, [sp],#32 - ret -*/ - -/* push r1 and r2, and alloc stack space for a total of - alloc_n elements (1 element=16 bytes, must be between 1 and 31. */ -static inline void tcg_out_push_pair(TCGContext *s, TCGReg addr, - TCGReg r1, TCGReg r2, int alloc_n) -{ - /* using indexed scaled simm7 STP 0x28800000 | (ext) | 0x01000000 (pre-idx) - | alloc_n * (-1) << 16 | r2 << 10 | addr << 5 | r1 */ - assert(alloc_n > 0 && alloc_n < 0x20); - alloc_n = (-alloc_n) & 0x3f; - tcg_out32(s, 0xa9800000 | alloc_n << 16 | r2 << 10 | addr << 5 | r1); -} - -/* dealloc stack space for a total of alloc_n elements and pop r1, r2. */ -static inline void tcg_out_pop_pair(TCGContext *s, TCGReg addr, - TCGReg r1, TCGReg r2, int alloc_n) -{ - /* using indexed scaled simm7 LDP 0x28c00000 | (ext) | nothing (post-idx) - | alloc_n << 16 | r2 << 10 | addr << 5 | r1 */ - assert(alloc_n > 0 && alloc_n < 0x20); - tcg_out32(s, 0xa8c00000 | alloc_n << 16 | r2 << 10 | addr << 5 | r1); -} - -static inline void tcg_out_store_pair(TCGContext *s, TCGReg addr, - TCGReg r1, TCGReg r2, int idx) -{ - /* using register pair offset simm7 STP 0x29000000 | (ext) - | idx << 16 | r2 << 10 | addr << 5 | r1 */ - assert(idx > 0 && idx < 0x20); - tcg_out32(s, 0xa9000000 | idx << 16 | r2 << 10 | addr << 5 | r1); -} - -static inline void tcg_out_load_pair(TCGContext *s, TCGReg addr, - TCGReg r1, TCGReg r2, int idx) -{ - /* using register pair offset simm7 LDP 0x29400000 | (ext) - | idx << 16 | r2 << 10 | addr << 5 | r1 */ - assert(idx > 0 && idx < 0x20); - tcg_out32(s, 0xa9400000 | idx << 16 | r2 << 10 | addr << 5 | r1); -} - static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_OP_ARGS], const int const_args[TCG_MAX_OP_ARGS]) @@ -1282,27 +1297,51 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_goto_label(s, a0); break; - case INDEX_op_ld_i32: - case INDEX_op_ld_i64: - case INDEX_op_st_i32: - case INDEX_op_st_i64: case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: case INDEX_op_ld8u_i64: + tcg_out_ldst(s, I3312_LDRB, a0, a1, a2); + break; + case INDEX_op_ld8s_i32: + tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2); + break; case INDEX_op_ld8s_i64: + tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2); + break; + case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: + tcg_out_ldst(s, I3312_LDRH, a0, a1, a2); + break; + case INDEX_op_ld16s_i32: + tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2); + break; case INDEX_op_ld16s_i64: + tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2); + break; + case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: + tcg_out_ldst(s, I3312_LDRW, a0, a1, a2); + break; case INDEX_op_ld32s_i64: + tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, I3312_LDRX, a0, a1, a2); + break; + case INDEX_op_st8_i32: case INDEX_op_st8_i64: + tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2); + break; case INDEX_op_st16_i32: case INDEX_op_st16_i64: + tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2); + break; + case INDEX_op_st_i32: case INDEX_op_st32_i64: - tcg_out_ldst(s, aarch64_ldst_get_data(opc), aarch64_ldst_get_type(opc), - a0, a1, a2); + tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2); break; case INDEX_op_add_i32: @@ -1478,8 +1517,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, a1 = (int32_t)a1; /* FALLTHRU */ case INDEX_op_brcond_i64: - tcg_out_cmp(s, ext, a0, a1, const_args[1]); - tcg_out_goto_label_cond(s, a2, args[3]); + tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], args[3]); break; case INDEX_op_setcond_i32: @@ -1500,77 +1538,48 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); break; - case INDEX_op_qemu_ld8u: - tcg_out_qemu_ld(s, args, 0 | 0); - break; - case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, 4 | 0); - break; - case INDEX_op_qemu_ld16u: - tcg_out_qemu_ld(s, args, 0 | 1); - break; - case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, 4 | 1); - break; - case INDEX_op_qemu_ld32u: - tcg_out_qemu_ld(s, args, 0 | 2); - break; - case INDEX_op_qemu_ld32s: - tcg_out_qemu_ld(s, args, 4 | 2); - break; - case INDEX_op_qemu_ld32: - tcg_out_qemu_ld(s, args, 0 | 2); - break; - case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, 0 | 3); - break; - case INDEX_op_qemu_st8: - tcg_out_qemu_st(s, args, 0); + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, a0, a1, a2, args[3]); break; - case INDEX_op_qemu_st16: - tcg_out_qemu_st(s, args, 1); - break; - case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, 2); - break; - case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, 3); + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, REG0(0), a1, a2, args[3]); break; - case INDEX_op_bswap32_i64: - /* Despite the _i64, this is a 32-bit bswap. */ - ext = 0; - /* FALLTHRU */ case INDEX_op_bswap64_i64: + tcg_out_rev64(s, a0, a1); + break; + case INDEX_op_bswap32_i64: case INDEX_op_bswap32_i32: - tcg_out_rev(s, ext, a0, a1); + tcg_out_rev32(s, a0, a1); break; case INDEX_op_bswap16_i64: case INDEX_op_bswap16_i32: - tcg_out_rev16(s, 0, a0, a1); + tcg_out_rev16(s, a0, a1); break; case INDEX_op_ext8s_i64: case INDEX_op_ext8s_i32: - tcg_out_sxt(s, ext, 0, a0, a1); + tcg_out_sxt(s, ext, MO_8, a0, a1); break; case INDEX_op_ext16s_i64: case INDEX_op_ext16s_i32: - tcg_out_sxt(s, ext, 1, a0, a1); + tcg_out_sxt(s, ext, MO_16, a0, a1); break; case INDEX_op_ext32s_i64: - tcg_out_sxt(s, 1, 2, a0, a1); + tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1); break; case INDEX_op_ext8u_i64: case INDEX_op_ext8u_i32: - tcg_out_uxt(s, 0, a0, a1); + tcg_out_uxt(s, MO_8, a0, a1); break; case INDEX_op_ext16u_i64: case INDEX_op_ext16u_i32: - tcg_out_uxt(s, 1, a0, a1); + tcg_out_uxt(s, MO_16, a0, a1); break; case INDEX_op_ext32u_i64: - tcg_out_movr(s, 0, a0, a1); + tcg_out_movr(s, TCG_TYPE_I32, a0, a1); break; case INDEX_op_deposit_i64: @@ -1642,17 +1651,17 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_ld32s_i64, { "r", "r" } }, { INDEX_op_ld_i64, { "r", "r" } }, - { INDEX_op_st8_i32, { "r", "r" } }, - { INDEX_op_st16_i32, { "r", "r" } }, - { INDEX_op_st_i32, { "r", "r" } }, - { INDEX_op_st8_i64, { "r", "r" } }, - { INDEX_op_st16_i64, { "r", "r" } }, - { INDEX_op_st32_i64, { "r", "r" } }, - { INDEX_op_st_i64, { "r", "r" } }, + { INDEX_op_st8_i32, { "rZ", "r" } }, + { INDEX_op_st16_i32, { "rZ", "r" } }, + { INDEX_op_st_i32, { "rZ", "r" } }, + { INDEX_op_st8_i64, { "rZ", "r" } }, + { INDEX_op_st16_i64, { "rZ", "r" } }, + { INDEX_op_st32_i64, { "rZ", "r" } }, + { INDEX_op_st_i64, { "rZ", "r" } }, - { INDEX_op_add_i32, { "r", "r", "rwA" } }, + { INDEX_op_add_i32, { "r", "r", "rA" } }, { INDEX_op_add_i64, { "r", "r", "rA" } }, - { INDEX_op_sub_i32, { "r", "r", "rwA" } }, + { INDEX_op_sub_i32, { "r", "r", "rA" } }, { INDEX_op_sub_i64, { "r", "r", "rA" } }, { INDEX_op_mul_i32, { "r", "r", "r" } }, { INDEX_op_mul_i64, { "r", "r", "r" } }, @@ -1664,17 +1673,17 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_rem_i64, { "r", "r", "r" } }, { INDEX_op_remu_i32, { "r", "r", "r" } }, { INDEX_op_remu_i64, { "r", "r", "r" } }, - { INDEX_op_and_i32, { "r", "r", "rwL" } }, + { INDEX_op_and_i32, { "r", "r", "rL" } }, { INDEX_op_and_i64, { "r", "r", "rL" } }, - { INDEX_op_or_i32, { "r", "r", "rwL" } }, + { INDEX_op_or_i32, { "r", "r", "rL" } }, { INDEX_op_or_i64, { "r", "r", "rL" } }, - { INDEX_op_xor_i32, { "r", "r", "rwL" } }, + { INDEX_op_xor_i32, { "r", "r", "rL" } }, { INDEX_op_xor_i64, { "r", "r", "rL" } }, - { INDEX_op_andc_i32, { "r", "r", "rwL" } }, + { INDEX_op_andc_i32, { "r", "r", "rL" } }, { INDEX_op_andc_i64, { "r", "r", "rL" } }, - { INDEX_op_orc_i32, { "r", "r", "rwL" } }, + { INDEX_op_orc_i32, { "r", "r", "rL" } }, { INDEX_op_orc_i64, { "r", "r", "rL" } }, - { INDEX_op_eqv_i32, { "r", "r", "rwL" } }, + { INDEX_op_eqv_i32, { "r", "r", "rL" } }, { INDEX_op_eqv_i64, { "r", "r", "rL" } }, { INDEX_op_neg_i32, { "r", "r" } }, @@ -1693,27 +1702,17 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_rotl_i64, { "r", "r", "ri" } }, { INDEX_op_rotr_i64, { "r", "r", "ri" } }, - { INDEX_op_brcond_i32, { "r", "rwA" } }, + { INDEX_op_brcond_i32, { "r", "rA" } }, { INDEX_op_brcond_i64, { "r", "rA" } }, - { INDEX_op_setcond_i32, { "r", "r", "rwA" } }, + { INDEX_op_setcond_i32, { "r", "r", "rA" } }, { INDEX_op_setcond_i64, { "r", "r", "rA" } }, - { INDEX_op_movcond_i32, { "r", "r", "rwA", "rZ", "rZ" } }, + { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } }, { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } }, - { INDEX_op_qemu_ld8u, { "r", "l" } }, - { INDEX_op_qemu_ld8s, { "r", "l" } }, - { INDEX_op_qemu_ld16u, { "r", "l" } }, - { INDEX_op_qemu_ld16s, { "r", "l" } }, - { INDEX_op_qemu_ld32u, { "r", "l" } }, - { INDEX_op_qemu_ld32s, { "r", "l" } }, - - { INDEX_op_qemu_ld32, { "r", "l" } }, - { INDEX_op_qemu_ld64, { "r", "l" } }, - - { INDEX_op_qemu_st8, { "l", "l" } }, - { INDEX_op_qemu_st16, { "l", "l" } }, - { INDEX_op_qemu_st32, { "l", "l" } }, - { INDEX_op_qemu_st64, { "l", "l" } }, + { INDEX_op_qemu_ld_i32, { "r", "l" } }, + { INDEX_op_qemu_ld_i64, { "r", "l" } }, + { INDEX_op_qemu_st_i32, { "lZ", "l" } }, + { INDEX_op_qemu_st_i64, { "lZ", "l" } }, { INDEX_op_bswap16_i32, { "r", "r" } }, { INDEX_op_bswap32_i32, { "r", "r" } }, @@ -1736,9 +1735,9 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rwA", "rwMZ" } }, + { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rwA", "rwMZ" } }, + { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, { INDEX_op_muluh_i64, { "r", "r", "r" } }, @@ -1762,7 +1761,7 @@ static void tcg_target_init(TCGContext *s) (1 << TCG_REG_X12) | (1 << TCG_REG_X13) | (1 << TCG_REG_X14) | (1 << TCG_REG_X15) | (1 << TCG_REG_X16) | (1 << TCG_REG_X17) | - (1 << TCG_REG_X18)); + (1 << TCG_REG_X18) | (1 << TCG_REG_X30)); tcg_regset_clear(s->reserved_regs); tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); @@ -1773,40 +1772,44 @@ static void tcg_target_init(TCGContext *s) tcg_add_target_add_op_defs(aarch64_op_defs); } +/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ +#define PUSH_SIZE ((30 - 19 + 1) * 8) + +#define FRAME_SIZE \ + ((PUSH_SIZE \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & ~(TCG_TARGET_STACK_ALIGN - 1)) + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + +/* We're expecting to use a single ADDI insn. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); + static void tcg_target_qemu_prologue(TCGContext *s) { - /* NB: frame sizes are in 16 byte stack units! */ - int frame_size_callee_saved, frame_size_tcg_locals; TCGReg r; - /* save pairs (FP, LR) and (X19, X20) .. (X27, X28) */ - frame_size_callee_saved = (1) + (TCG_REG_X28 - TCG_REG_X19) / 2 + 1; - - /* frame size requirement for TCG local variables */ - frame_size_tcg_locals = TCG_STATIC_CALL_ARGS_SIZE - + CPU_TEMP_BUF_NLONGS * sizeof(long) - + (TCG_TARGET_STACK_ALIGN - 1); - frame_size_tcg_locals &= ~(TCG_TARGET_STACK_ALIGN - 1); - frame_size_tcg_locals /= TCG_TARGET_STACK_ALIGN; + /* Push (FP, LR) and allocate space for all saved registers. */ + tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR, + TCG_REG_SP, -PUSH_SIZE, 1, 1); - /* push (FP, LR) and update sp */ - tcg_out_push_pair(s, TCG_REG_SP, - TCG_REG_FP, TCG_REG_LR, frame_size_callee_saved); + /* Set up frame pointer for canonical unwinding. */ + tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); - /* FP -> callee_saved */ - tcg_out_movr_sp(s, 1, TCG_REG_FP, TCG_REG_SP); - - /* store callee-preserved regs x19..x28 using FP -> callee_saved */ + /* Store callee-preserved regs x19..x28. */ for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { - int idx = (r - TCG_REG_X19) / 2 + 1; - tcg_out_store_pair(s, TCG_REG_FP, r, r + 1, idx); + int ofs = (r - TCG_REG_X19 + 2) * 8; + tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0); } /* Make stack space for TCG locals. */ tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, - frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN); + FRAME_SIZE - PUSH_SIZE); - /* inform TCG about how to find TCG locals with register, offset, size */ + /* Inform TCG about how to find TCG locals with register, offset, size. */ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, CPU_TEMP_BUF_NLONGS * sizeof(long)); @@ -1818,23 +1821,71 @@ static void tcg_target_qemu_prologue(TCGContext *s) #endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); - tcg_out_gotor(s, tcg_target_call_iarg_regs[1]); + tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]); tb_ret_addr = s->code_ptr; /* Remove TCG locals stack space. */ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, - frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN); + FRAME_SIZE - PUSH_SIZE); - /* restore registers x19..x28. - FP must be preserved, so it still points to callee_saved area */ + /* Restore registers x19..x28. */ for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { - int idx = (r - TCG_REG_X19) / 2 + 1; - tcg_out_load_pair(s, TCG_REG_FP, r, r + 1, idx); + int ofs = (r - TCG_REG_X19 + 2) * 8; + tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0); } - /* pop (FP, LR), restore SP to previous frame, return */ - tcg_out_pop_pair(s, TCG_REG_SP, - TCG_REG_FP, TCG_REG_LR, frame_size_callee_saved); - tcg_out_ret(s); + /* Pop (FP, LR), restore SP to previous frame. */ + tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR, + TCG_REG_SP, PUSH_SIZE, 0, 1); + tcg_out_insn(s, 3207, RET, TCG_REG_LR); +} + +typedef struct { + DebugFrameCIE cie; + DebugFrameFDEHeader fde; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[24]; +} DebugFrame; + +#define ELF_HOST_MACHINE EM_AARCH64 + +static DebugFrame debug_frame = { + .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .cie.id = -1, + .cie.version = 1, + .cie.code_align = 1, + .cie.data_align = 0x78, /* sleb128 -8 */ + .cie.return_column = TCG_REG_LR, + + /* Total FDE size does not include the "len" member. */ + .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */ + 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */ + 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */ + 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */ + 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */ + 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */ + 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */ + 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */ + 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */ + 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */ + 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */ + 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */ + } +}; + +void tcg_register_jit(void *buf, size_t buf_size) +{ + debug_frame.fde.func_start = (intptr_t)buf; + debug_frame.fde.func_len = buf_size; + + tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); } diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 988983ed59..eff1d68fc6 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -13,21 +13,26 @@ #ifndef TCG_TARGET_AARCH64 #define TCG_TARGET_AARCH64 1 -#undef TCG_TARGET_WORDS_BIGENDIAN #undef TCG_TARGET_STACK_GROWSUP typedef enum { - TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, - TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8, TCG_REG_X9, - TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, - TCG_REG_X15, TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, - TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, - TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, TCG_REG_X28, - TCG_REG_FP, /* frame pointer */ - TCG_REG_LR, /* link register */ - TCG_REG_SP, /* stack pointer or zero register */ - TCG_REG_XZR = TCG_REG_SP /* same register number */ - /* program counter is not directly accessible! */ + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, + TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, + TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, + TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, + TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, + TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, + TCG_REG_X28, TCG_REG_X29, TCG_REG_X30, + + /* X31 is either the stack pointer or zero, depending on context. */ + TCG_REG_SP = 31, + TCG_REG_XZR = 31, + + /* Aliases. */ + TCG_REG_FP = TCG_REG_X29, + TCG_REG_LR = TCG_REG_X30, + TCG_AREG0 = TCG_REG_X19, } TCGReg; #define TCG_TARGET_NB_REGS 32 @@ -92,11 +97,7 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 -enum { - TCG_AREG0 = TCG_REG_X19, -}; - -#define TCG_TARGET_HAS_new_ldst 0 +#define TCG_TARGET_HAS_new_ldst 1 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index a65fc658e1..7535175f9c 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -261,7 +261,7 @@ static inline int check_fit_imm(uint32_t imm) * mov operand2: values represented with x << (2 * y), x < 0x100 * add, sub, eor...: ditto */ -static inline int tcg_target_const_match(tcg_target_long val, +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct; @@ -1253,7 +1253,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, int mem_index, uint8_t *raddr, uint8_t *label_ptr) @@ -1519,7 +1519,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend); - add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi, + add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ if (GUEST_BASE) { @@ -1647,7 +1647,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) label_ptr = s->code_ptr; tcg_out_bl_noaddr(s, COND_NE); - add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi, + add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ if (GUEST_BASE) { diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index 3746b6e298..1bc5dacec0 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -25,7 +25,6 @@ #ifndef TCG_TARGET_ARM #define TCG_TARGET_ARM 1 -#undef TCG_TARGET_WORDS_BIGENDIAN #undef TCG_TARGET_STACK_GROWSUP typedef enum { @@ -79,6 +78,7 @@ extern bool use_idiv_instructions; #define TCG_TARGET_HAS_nor_i32 0 #define TCG_TARGET_HAS_deposit_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 1 #define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index f832282d1a..34ece1f80b 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -257,7 +257,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) } /* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; @@ -1244,7 +1244,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, * Record the context of a call to the out of line helper code for the slow path * for a load or store, so that we can later generate the correct helper code */ -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, int mem_index, uint8_t *raddr, @@ -1554,7 +1554,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); /* Record the current context of a load into ldst label */ - add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi, + add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, mem_index, s->code_ptr, label_ptr); #else { @@ -1685,7 +1685,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); /* Record the current context of a store into ldst label */ - add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi, + add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, mem_index, s->code_ptr, label_ptr); #else { diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index bdf2222452..ababca0569 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -24,8 +24,6 @@ #ifndef TCG_TARGET_I386 #define TCG_TARGET_I386 1 -#undef TCG_TARGET_WORDS_BIGENDIAN - #ifdef __x86_64__ # define TCG_TARGET_REG_BITS 64 # define TCG_TARGET_NB_REGS 16 diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c index 2d8e00cd94..1f523d6466 100644 --- a/tcg/ia64/tcg-target.c +++ b/tcg/ia64/tcg-target.c @@ -23,8 +23,6 @@ * THE SOFTWARE. */ -#include "tcg-be-null.h" - /* * Register definitions */ @@ -221,10 +219,12 @@ enum { OPC_ALLOC_M34 = 0x02c00000000ull, OPC_BR_DPTK_FEW_B1 = 0x08400000000ull, OPC_BR_SPTK_MANY_B1 = 0x08000001000ull, + OPC_BR_CALL_SPNT_FEW_B3 = 0x0a200000000ull, OPC_BR_SPTK_MANY_B4 = 0x00100001000ull, OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull, OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull, OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull, + OPC_BRL_CALL_SPNT_MANY_X4 = 0x1a200001000ull, OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull, OPC_CMP_LT_A6 = 0x18000000000ull, OPC_CMP_LTU_A6 = 0x1a000000000ull, @@ -356,6 +356,15 @@ static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm) | (qp & 0x3f); } +static inline uint64_t tcg_opc_b3(int qp, uint64_t opc, int b1, uint64_t imm) +{ + return opc + | ((imm & 0x100000) << 16) /* s */ + | ((imm & 0x0fffff) << 13) /* imm20b */ + | ((b1 & 0x7) << 6) + | (qp & 0x3f); +} + static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2) { return opc @@ -815,6 +824,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) #if defined(CONFIG_SOFTMMU) tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R58); #endif break; case 'Z': @@ -832,7 +842,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) } /* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct; @@ -950,15 +960,21 @@ static inline void tcg_out_callr(TCGContext *s, TCGReg addr) static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg) { int64_t disp; - uint64_t imm; + uint64_t imm, opc1; - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg); + /* At least arg == 0 is a common operation. */ + if (arg == sextract64(arg, 0, 22)) { + opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R8, arg); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg); + opc1 = INSN_NOP_M; + } disp = tb_ret_addr - s->code_ptr; imm = (uint64_t)disp >> 4; tcg_out_bundle(s, mLX, - INSN_NOP_M, + opc1, tcg_opc_l3 (imm), tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm)); } @@ -1558,238 +1574,284 @@ static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret, } #if defined(CONFIG_SOFTMMU) +/* We're expecting to use an signed 22-bit immediate add. */ +QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) + > 0x1fffff) + /* Load and compare a TLB entry, and return the result in (p6, p7). - R2 is loaded with the address of the addend TLB entry. - R57 is loaded with the address, zero extented on 32-bit targets. */ -static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg, - TCGMemOp s_bits, uint64_t offset_rw, - uint64_t offset_addend) -{ - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R2, + R2 is loaded with the addend TLB entry. + R57 is loaded with the address, zero extented on 32-bit targets. + R1, R3 are clobbered, leaving R56 free for... + BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */ +static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg, + TCGMemOp s_bits, int off_rw, int off_add, + uint64_t bswap1, uint64_t bswap2) +{ + /* + .mii + mov r2 = off_rw + extr.u r3 = addr_reg, ... # extract tlb page + zxt4 r57 = addr_reg # or mov for 64-bit guest + ;; + .mii + addl r2 = r2, areg0 + shl r3 = r3, cteb # via dep.z + dep r1 = 0, r57, ... # zero page ofs, keep align + ;; + .mmi + add r2 = r2, r3 + ;; + ld4 r3 = [r2], off_add-off_rw # or ld8 for 64-bit guest + nop + ;; + .mmi + nop + cmp.eq p6, p7 = r3, r58 + nop + ;; + */ + tcg_out_bundle(s, miI, + tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw), + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3, addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1), - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R2, - TCG_REG_R2, 63 - CPU_TLB_ENTRY_BITS, - 63 - CPU_TLB_ENTRY_BITS)); - tcg_out_bundle(s, mII, - tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2, - offset_rw, TCG_REG_R2), tcg_opc_ext_i(TCG_REG_P0, TARGET_LONG_BITS == 32 ? MO_UL : MO_Q, - TCG_REG_R57, addr_reg), + TCG_REG_R57, addr_reg)); + tcg_out_bundle(s, miI, tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, - TCG_REG_R2, TCG_AREG0)); - tcg_out_bundle(s, mII, + TCG_REG_R2, TCG_AREG0), + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3, + TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS, + 63 - CPU_TLB_ENTRY_BITS), + tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0, + TCG_REG_R57, 63 - s_bits, + TARGET_PAGE_BITS - s_bits - 1)); + tcg_out_bundle(s, MmI, + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, + TCG_REG_R2, TCG_REG_R2, TCG_REG_R3), tcg_opc_m3 (TCG_REG_P0, (TARGET_LONG_BITS == 32 - ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R56, - TCG_REG_R2, offset_addend - offset_rw), - tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R3, 0, - TCG_REG_R57, 63 - s_bits, - TARGET_PAGE_BITS - s_bits - 1), + ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R3, + TCG_REG_R2, off_add - off_rw), + bswap1); + tcg_out_bundle(s, mmI, + tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2), tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6, - TCG_REG_P7, TCG_REG_R3, TCG_REG_R56)); + TCG_REG_P7, TCG_REG_R1, TCG_REG_R3), + bswap2); } -/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, - int mmu_idx) */ -static const void * const qemu_ld_helpers[4] = { - helper_ldb_mmu, - helper_ldw_mmu, - helper_ldl_mmu, - helper_ldq_mmu, -}; +#define TCG_MAX_QEMU_LDST 640 -static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, - TCGMemOp opc) +typedef struct TCGLabelQemuLdst { + bool is_ld; + TCGMemOp size; + uint8_t *label_ptr; /* label pointers to be updated */ +} TCGLabelQemuLdst; + +typedef struct TCGBackendData { + int nb_ldst_labels; + TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST]; +} TCGBackendData; + +static inline void tcg_out_tb_init(TCGContext *s) +{ + s->be->nb_ldst_labels = 0; +} + +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + uint8_t *label_ptr) +{ + TCGBackendData *be = s->be; + TCGLabelQemuLdst *l = &be->ldst_labels[be->nb_ldst_labels++]; + + assert(be->nb_ldst_labels <= TCG_MAX_QEMU_LDST); + l->is_ld = is_ld; + l->size = opc & MO_SIZE; + l->label_ptr = label_ptr; +} + +static void tcg_out_tb_finalize(TCGContext *s) +{ + static const void * const helpers[8] = { + helper_ret_stb_mmu, + helper_le_stw_mmu, + helper_le_stl_mmu, + helper_le_stq_mmu, + helper_ret_ldub_mmu, + helper_le_lduw_mmu, + helper_le_ldul_mmu, + helper_le_ldq_mmu, + }; + uintptr_t thunks[8] = { }; + TCGBackendData *be = s->be; + size_t i, n = be->nb_ldst_labels; + + for (i = 0; i < n; i++) { + TCGLabelQemuLdst *l = &be->ldst_labels[i]; + long x = l->is_ld * 4 + l->size; + uintptr_t dest = thunks[x]; + + /* The out-of-line thunks are all the same; load the return address + from B0, load the GP, and branch to the code. Note that we are + always post-call, so the register window has rolled, so we're + using incomming parameter register numbers, not outgoing. */ + if (dest == 0) { + uintptr_t disp, *desc = (uintptr_t *)helpers[x]; + + thunks[x] = dest = (uintptr_t)s->code_ptr; + + tcg_out_bundle(s, mlx, + INSN_NOP_M, + tcg_opc_l2 (desc[1]), + tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, + TCG_REG_R1, desc[1])); + tcg_out_bundle(s, mii, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22, + l->is_ld ? TCG_REG_R35 : TCG_REG_R36, + TCG_REG_B0)); + disp = (desc[0] - (uintptr_t)s->code_ptr) >> 4; + tcg_out_bundle(s, mLX, + INSN_NOP_M, + tcg_opc_l3 (disp), + tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp)); + } + + reloc_pcrel21b(l->label_ptr, dest); + } +} + +static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) { static const uint64_t opc_ld_m1[4] = { OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 }; int addr_reg, data_reg, mem_index; - TCGMemOp s_bits, bswap; - - data_reg = *args++; - addr_reg = *args++; - mem_index = *args; + TCGMemOp opc, s_bits; + uint64_t fin1, fin2; + uint8_t *label_ptr; + + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; + mem_index = args[3]; s_bits = opc & MO_SIZE; - bswap = opc & MO_BSWAP; /* Read the TLB entry */ tcg_out_qemu_tlb(s, addr_reg, s_bits, offsetof(CPUArchState, tlb_table[mem_index][0].addr_read), - offsetof(CPUArchState, tlb_table[mem_index][0].addend)); + offsetof(CPUArchState, tlb_table[mem_index][0].addend), + INSN_NOP_I, INSN_NOP_I); /* P6 is the fast path, and P7 the slow path */ - tcg_out_bundle(s, mLX, - tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0), - tcg_opc_l2 ((tcg_target_long) qemu_ld_helpers[s_bits]), - tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2, - (tcg_target_long) qemu_ld_helpers[s_bits])); - tcg_out_bundle(s, MmI, - tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3, - TCG_REG_R2, 8), - tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3, - TCG_REG_R3, TCG_REG_R57), - tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6, - TCG_REG_R3, 0)); - if (bswap && s_bits == MO_16) { - tcg_out_bundle(s, MmI, - tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], - TCG_REG_R8, TCG_REG_R3), - tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), - tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12, - TCG_REG_R8, TCG_REG_R8, 15, 15)); - } else if (bswap && s_bits == MO_32) { - tcg_out_bundle(s, MmI, - tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], - TCG_REG_R8, TCG_REG_R3), - tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), - tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12, - TCG_REG_R8, TCG_REG_R8, 31, 31)); - } else { - tcg_out_bundle(s, mmI, - tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], - TCG_REG_R8, TCG_REG_R3), - tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), - INSN_NOP_I); - } - if (!bswap) { - tcg_out_bundle(s, miB, - tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index), - INSN_NOP_I, - tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5, - TCG_REG_B0, TCG_REG_B6)); + + fin2 = 0; + if (opc & MO_BSWAP) { + fin1 = tcg_opc_bswap64_i(TCG_REG_P0, data_reg, TCG_REG_R8); + if (s_bits < MO_64) { + int shift = 64 - (8 << s_bits); + fin2 = (opc & MO_SIGN ? OPC_EXTR_I11 : OPC_EXTR_U_I11); + fin2 = tcg_opc_i11(TCG_REG_P0, fin2, + data_reg, data_reg, shift, 63 - shift); + } } else { - tcg_out_bundle(s, miB, - tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index), - tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R8, TCG_REG_R8), - tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5, - TCG_REG_B0, TCG_REG_B6)); + fin1 = tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8); } - tcg_out_bundle(s, miI, - INSN_NOP_M, + tcg_out_bundle(s, mmI, + tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0), + tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2, + TCG_REG_R2, TCG_REG_R57), + tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index)); + label_ptr = s->code_ptr + 2; + tcg_out_bundle(s, miB, + tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], + TCG_REG_R8, TCG_REG_R2), INSN_NOP_I, - tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8)); -} + tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0, + get_reloc_pcrel21b(label_ptr))); -/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, - uintxx_t val, int mmu_idx) */ -static const void * const qemu_st_helpers[4] = { - helper_stb_mmu, - helper_stw_mmu, - helper_stl_mmu, - helper_stq_mmu, -}; + add_qemu_ldst_label(s, 1, opc, label_ptr); -static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, - TCGMemOp opc) + /* Note that we always use LE helper functions, so the bswap insns + here for the fast path also apply to the slow path. */ + tcg_out_bundle(s, (fin2 ? mII : miI), + INSN_NOP_M, + fin1, + fin2 ? fin2 : INSN_NOP_I); +} + +static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) { static const uint64_t opc_st_m4[4] = { OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 }; - int addr_reg, data_reg, mem_index; - TCGMemOp s_bits; - - data_reg = *args++; - addr_reg = *args++; - mem_index = *args; + TCGReg addr_reg, data_reg; + int mem_index; + uint64_t pre1, pre2; + TCGMemOp opc, s_bits; + uint8_t *label_ptr; + + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; + mem_index = args[3]; s_bits = opc & MO_SIZE; + /* Note that we always use LE helper functions, so the bswap insns + that are here for the fast path also apply to the slow path, + and move the data into the argument register. */ + pre2 = INSN_NOP_I; + if (opc & MO_BSWAP) { + pre1 = tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R58, data_reg); + if (s_bits < MO_64) { + int shift = 64 - (8 << s_bits); + pre2 = tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, + TCG_REG_R58, TCG_REG_R58, shift, 63 - shift); + } + } else { + /* Just move the data into place for the slow path. */ + pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg); + } + tcg_out_qemu_tlb(s, addr_reg, s_bits, offsetof(CPUArchState, tlb_table[mem_index][0].addr_write), - offsetof(CPUArchState, tlb_table[mem_index][0].addend)); + offsetof(CPUArchState, tlb_table[mem_index][0].addend), + pre1, pre2); /* P6 is the fast path, and P7 the slow path */ - tcg_out_bundle(s, mLX, + tcg_out_bundle(s, mmI, tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0), - tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[s_bits]), - tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2, - (tcg_target_long) qemu_st_helpers[s_bits])); - tcg_out_bundle(s, MmI, - tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3, - TCG_REG_R2, 8), - tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3, - TCG_REG_R3, TCG_REG_R57), - tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6, - TCG_REG_R3, 0)); - - switch (opc) { - case MO_8: - case MO_16: - case MO_32: - case MO_64: - tcg_out_bundle(s, mii, - tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, - TCG_REG_R1, TCG_REG_R2), - tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg), - INSN_NOP_I); - break; - - case MO_16 | MO_BSWAP: - tcg_out_bundle(s, miI, - tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, - TCG_REG_R1, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12, - TCG_REG_R2, data_reg, 15, 15)); - tcg_out_bundle(s, miI, - tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg), - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2)); - data_reg = TCG_REG_R2; - break; - - case MO_32 | MO_BSWAP: - tcg_out_bundle(s, miI, - tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, - TCG_REG_R1, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12, - TCG_REG_R2, data_reg, 31, 31)); - tcg_out_bundle(s, miI, - tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg), - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2)); - data_reg = TCG_REG_R2; - break; - - case MO_64 | MO_BSWAP: - tcg_out_bundle(s, miI, - tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, - TCG_REG_R1, TCG_REG_R2), - tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg), - tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, data_reg)); - data_reg = TCG_REG_R2; - break; - - default: - tcg_abort(); - } - + tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2, + TCG_REG_R2, TCG_REG_R57), + tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index)); + label_ptr = s->code_ptr + 2; tcg_out_bundle(s, miB, tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits], - data_reg, TCG_REG_R3), - tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index), - tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5, - TCG_REG_B0, TCG_REG_B6)); + TCG_REG_R58, TCG_REG_R2), + INSN_NOP_I, + tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0, + get_reloc_pcrel21b(label_ptr))); + + add_qemu_ldst_label(s, 0, opc, label_ptr); } #else /* !CONFIG_SOFTMMU */ +# include "tcg-be-null.h" -static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, - TCGMemOp opc) +static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) { static uint64_t const opc_ld_m1[4] = { OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 }; int addr_reg, data_reg; - TCGMemOp s_bits, bswap; + TCGMemOp opc, s_bits, bswap; - data_reg = *args++; - addr_reg = *args++; + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; s_bits = opc & MO_SIZE; bswap = opc & MO_BSWAP; @@ -1900,8 +1962,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, #endif } -static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, - TCGMemOp opc) +static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) { static uint64_t const opc_st_m4[4] = { OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 @@ -1910,10 +1971,11 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, #if TARGET_LONG_BITS == 64 uint64_t add_guest_base; #endif - TCGMemOp s_bits, bswap; + TCGMemOp opc, s_bits, bswap; - data_reg = *args++; - addr_reg = *args++; + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; s_bits = opc & MO_SIZE; bswap = opc & MO_BSWAP; @@ -2237,40 +2299,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, args[3], const_args[3], args[4], const_args[4], 0); break; - case INDEX_op_qemu_ld8u: - tcg_out_qemu_ld(s, args, MO_UB); - break; - case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, MO_SB); - break; - case INDEX_op_qemu_ld16u: - tcg_out_qemu_ld(s, args, MO_TEUW); - break; - case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, MO_TESW); - break; - case INDEX_op_qemu_ld32: - case INDEX_op_qemu_ld32u: - tcg_out_qemu_ld(s, args, MO_TEUL); - break; - case INDEX_op_qemu_ld32s: - tcg_out_qemu_ld(s, args, MO_TESL); - break; - case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, MO_TEQ); - break; - - case INDEX_op_qemu_st8: - tcg_out_qemu_st(s, args, MO_UB); + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args); break; - case INDEX_op_qemu_st16: - tcg_out_qemu_st(s, args, MO_TEUW); + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args); break; - case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, MO_TEUL); + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args); break; - case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, MO_TEQ); + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args); break; default: @@ -2381,19 +2420,10 @@ static const TCGTargetOpDef ia64_op_defs[] = { { INDEX_op_deposit_i32, { "r", "rZ", "ri" } }, { INDEX_op_deposit_i64, { "r", "rZ", "ri" } }, - { INDEX_op_qemu_ld8u, { "r", "r" } }, - { INDEX_op_qemu_ld8s, { "r", "r" } }, - { INDEX_op_qemu_ld16u, { "r", "r" } }, - { INDEX_op_qemu_ld16s, { "r", "r" } }, - { INDEX_op_qemu_ld32, { "r", "r" } }, - { INDEX_op_qemu_ld32u, { "r", "r" } }, - { INDEX_op_qemu_ld32s, { "r", "r" } }, - { INDEX_op_qemu_ld64, { "r", "r" } }, - - { INDEX_op_qemu_st8, { "SZ", "r" } }, - { INDEX_op_qemu_st16, { "SZ", "r" } }, - { INDEX_op_qemu_st32, { "SZ", "r" } }, - { INDEX_op_qemu_st64, { "SZ", "r" } }, + { INDEX_op_qemu_ld_i32, { "r", "r" } }, + { INDEX_op_qemu_ld_i64, { "r", "r" } }, + { INDEX_op_qemu_st_i32, { "SZ", "r" } }, + { INDEX_op_qemu_st_i64, { "SZ", "r" } }, { -1 }, }; diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h index 52a939c946..09c3ba8fe3 100644 --- a/tcg/ia64/tcg-target.h +++ b/tcg/ia64/tcg-target.h @@ -153,7 +153,7 @@ typedef enum { #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_mulsh_i64 0 -#define TCG_TARGET_HAS_new_ldst 0 +#define TCG_TARGET_HAS_new_ldst 1 #define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16) #define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16) diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c index 40551cdcb5..37241b224b 100644 --- a/tcg/mips/tcg-target.c +++ b/tcg/mips/tcg-target.c @@ -26,7 +26,7 @@ #include "tcg-be-null.h" -#if defined(TCG_TARGET_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN) +#if defined(HOST_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN) # define TCG_NEED_BSWAP 0 #else # define TCG_NEED_BSWAP 1 @@ -253,7 +253,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) } /* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct; @@ -589,7 +589,7 @@ static inline void tcg_out_call_iarg_reg64(TCGContext *s, int *arg_num, { (*arg_num) = (*arg_num + 1) & ~1; -#if defined(TCG_TARGET_WORDS_BIGENDIAN) +#if defined(HOST_WORDS_BIGENDIAN) tcg_out_call_iarg_reg32(s, arg_num, arg_high); tcg_out_call_iarg_reg32(s, arg_num, arg_low); #else @@ -964,7 +964,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, #if defined(CONFIG_SOFTMMU) # if TARGET_LONG_BITS == 64 addr_regh = *args++; -# if defined(TCG_TARGET_WORDS_BIGENDIAN) +# if defined(HOST_WORDS_BIGENDIAN) addr_memh = 0; addr_meml = 4; # else @@ -979,7 +979,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, #endif if (opc == 3) { -#if defined(TCG_TARGET_WORDS_BIGENDIAN) +#if defined(HOST_WORDS_BIGENDIAN) data_reg1 = data_regh; data_reg2 = data_regl; #else @@ -1152,7 +1152,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, #if defined(CONFIG_SOFTMMU) # if TARGET_LONG_BITS == 64 addr_regh = *args++; -# if defined(TCG_TARGET_WORDS_BIGENDIAN) +# if defined(HOST_WORDS_BIGENDIAN) addr_memh = 0; addr_meml = 4; # else @@ -1167,7 +1167,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, #endif if (opc == 3) { -#if defined(TCG_TARGET_WORDS_BIGENDIAN) +#if defined(HOST_WORDS_BIGENDIAN) data_reg1 = data_regh; data_reg2 = data_regl; #else diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index 683c6af8b9..9576db514d 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -26,10 +26,6 @@ #ifndef TCG_TARGET_MIPS #define TCG_TARGET_MIPS 1 -#ifdef __MIPSEB__ -# define TCG_TARGET_WORDS_BIGENDIAN -#endif - #define TCG_TARGET_NB_REGS 32 typedef enum { @@ -109,6 +105,7 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_orc_i32 0 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_mulu2_i32 1 #define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 diff --git a/tcg/optimize.c b/tcg/optimize.c index 7777743e88..c447062ab1 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -220,34 +220,34 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) return x ^ y; case INDEX_op_shl_i32: - return (uint32_t)x << (uint32_t)y; + return (uint32_t)x << (y & 31); case INDEX_op_shl_i64: - return (uint64_t)x << (uint64_t)y; + return (uint64_t)x << (y & 63); case INDEX_op_shr_i32: - return (uint32_t)x >> (uint32_t)y; + return (uint32_t)x >> (y & 31); case INDEX_op_shr_i64: - return (uint64_t)x >> (uint64_t)y; + return (uint64_t)x >> (y & 63); case INDEX_op_sar_i32: - return (int32_t)x >> (int32_t)y; + return (int32_t)x >> (y & 31); case INDEX_op_sar_i64: - return (int64_t)x >> (int64_t)y; + return (int64_t)x >> (y & 63); case INDEX_op_rotr_i32: - return ror32(x, y); + return ror32(x, y & 31); case INDEX_op_rotr_i64: - return ror64(x, y); + return ror64(x, y & 63); case INDEX_op_rotl_i32: - return rol32(x, y); + return rol32(x, y & 31); case INDEX_op_rotl_i64: - return rol64(x, y); + return rol64(x, y & 63); CASE_OP_32_64(not): return ~x; @@ -806,29 +806,34 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, case INDEX_op_sar_i32: if (temps[args[2]].state == TCG_TEMP_CONST) { - mask = (int32_t)temps[args[1]].mask >> temps[args[2]].val; + tmp = temps[args[2]].val & 31; + mask = (int32_t)temps[args[1]].mask >> tmp; } break; case INDEX_op_sar_i64: if (temps[args[2]].state == TCG_TEMP_CONST) { - mask = (int64_t)temps[args[1]].mask >> temps[args[2]].val; + tmp = temps[args[2]].val & 63; + mask = (int64_t)temps[args[1]].mask >> tmp; } break; case INDEX_op_shr_i32: if (temps[args[2]].state == TCG_TEMP_CONST) { - mask = (uint32_t)temps[args[1]].mask >> temps[args[2]].val; + tmp = temps[args[2]].val & 31; + mask = (uint32_t)temps[args[1]].mask >> tmp; } break; case INDEX_op_shr_i64: if (temps[args[2]].state == TCG_TEMP_CONST) { - mask = (uint64_t)temps[args[1]].mask >> temps[args[2]].val; + tmp = temps[args[2]].val & 63; + mask = (uint64_t)temps[args[1]].mask >> tmp; } break; CASE_OP_32_64(shl): if (temps[args[2]].state == TCG_TEMP_CONST) { - mask = temps[args[1]].mask << temps[args[2]].val; + tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1); + mask = temps[args[1]].mask << tmp; } break; @@ -838,9 +843,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, break; CASE_OP_32_64(deposit): - tmp = ((1ull << args[4]) - 1); - mask = ((temps[args[1]].mask & ~(tmp << args[3])) - | ((temps[args[2]].mask & tmp) << args[3])); + mask = deposit64(temps[args[1]].mask, args[3], args[4], + temps[args[2]].mask); break; CASE_OP_32_64(or): @@ -1055,9 +1059,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST) { s->gen_opc_buf[op_index] = op_to_movi(op); - tmp = ((1ull << args[4]) - 1); - tmp = (temps[args[1]].val & ~(tmp << args[3])) - | ((temps[args[2]].val & tmp) << args[3]); + tmp = deposit64(temps[args[1]].val, args[3], args[4], + temps[args[2]].val); tcg_opt_gen_movi(gen_args, args[0], tmp); gen_args += 2; args += 5; diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c index dc2c2df890..83d9340fca 100644 --- a/tcg/ppc/tcg-target.c +++ b/tcg/ppc/tcg-target.c @@ -298,7 +298,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) } /* test if a constant matches the constraint */ -static int tcg_target_const_match(tcg_target_long val, +static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct; @@ -524,7 +524,7 @@ static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg, #if defined(CONFIG_SOFTMMU) static void add_qemu_ldst_label (TCGContext *s, - int is_ld, + bool is_ld, TCGMemOp opc, int data_reg, int data_reg2, @@ -720,7 +720,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) break; } #ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, + add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, mem_index, s->code_ptr, label_ptr); #endif } @@ -779,7 +779,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) } #ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi, + add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, mem_index, s->code_ptr, label_ptr); #endif } diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index e3395e301c..0d4f5959a7 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -24,7 +24,6 @@ #ifndef TCG_TARGET_PPC #define TCG_TARGET_PPC 1 -#define TCG_TARGET_WORDS_BIGENDIAN #define TCG_TARGET_NB_REGS 32 typedef enum { @@ -95,6 +94,7 @@ typedef enum { #define TCG_TARGET_HAS_nor_i32 1 #define TCG_TARGET_HAS_deposit_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 1 #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c index 06e440f9bc..45b1c06910 100644 --- a/tcg/ppc64/tcg-target.c +++ b/tcg/ppc64/tcg-target.c @@ -290,13 +290,21 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) } /* test if a constant matches the constraint */ -static int tcg_target_const_match(tcg_target_long val, +static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; - } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { + } + + /* The only 32-bit constraint we use aside from + TCG_CT_CONST is TCG_CT_CONST_S16. */ + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { return 1; } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { return 1; diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h index 7ee50b6c6c..78bbf7a34a 100644 --- a/tcg/ppc64/tcg-target.h +++ b/tcg/ppc64/tcg-target.h @@ -24,7 +24,6 @@ #ifndef TCG_TARGET_PPC64 #define TCG_TARGET_PPC64 1 -#define TCG_TARGET_WORDS_BIGENDIAN #define TCG_TARGET_NB_REGS 32 typedef enum { diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c index 907d9d1744..1d912a7937 100644 --- a/tcg/s390/tcg-target.c +++ b/tcg/s390/tcg-target.c @@ -38,11 +38,10 @@ a 32-bit displacement here Just In Case. */ #define USE_LONG_BRANCHES 0 -#define TCG_CT_CONST_32 0x0100 -#define TCG_CT_CONST_MULI 0x0800 -#define TCG_CT_CONST_ORI 0x2000 -#define TCG_CT_CONST_XORI 0x4000 -#define TCG_CT_CONST_CMPI 0x8000 +#define TCG_CT_CONST_MULI 0x100 +#define TCG_CT_CONST_ORI 0x200 +#define TCG_CT_CONST_XORI 0x400 +#define TCG_CT_CONST_CMPI 0x800 /* Several places within the instruction set 0 means "no register" rather than TCG_REG_R0. */ @@ -407,9 +406,6 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) tcg_regset_clear(ct->u.regs); tcg_regset_set_reg(ct->u.regs, TCG_REG_R3); break; - case 'W': /* force 32-bit ("word") immediate */ - ct->ct |= TCG_CT_CONST_32; - break; case 'K': ct->ct |= TCG_CT_CONST_MULI; break; @@ -437,10 +433,10 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) can load efficiently, and the immediate load plus the reg-reg OR is smaller than the sequential OI's. */ -static int tcg_match_ori(int ct, tcg_target_long val) +static int tcg_match_ori(TCGType type, tcg_target_long val) { if (facilities & FACILITY_EXT_IMM) { - if (ct & TCG_CT_CONST_32) { + if (type == TCG_TYPE_I32) { /* All 32-bit ORs can be performed with 1 48-bit insn. */ return 1; } @@ -466,13 +462,13 @@ static int tcg_match_ori(int ct, tcg_target_long val) extended-immediate facility. That said, there are a few patterns for which it is better to load the value into a register first. */ -static int tcg_match_xori(int ct, tcg_target_long val) +static int tcg_match_xori(TCGType type, tcg_target_long val) { if ((facilities & FACILITY_EXT_IMM) == 0) { return 0; } - if (ct & TCG_CT_CONST_32) { + if (type == TCG_TYPE_I32) { /* All 32-bit XORs can be performed with 1 48-bit insn. */ return 1; } @@ -487,11 +483,11 @@ static int tcg_match_xori(int ct, tcg_target_long val) /* Imediates to be used with comparisons. */ -static int tcg_match_cmpi(int ct, tcg_target_long val) +static int tcg_match_cmpi(TCGType type, tcg_target_long val) { if (facilities & FACILITY_EXT_IMM) { /* The COMPARE IMMEDIATE instruction is available. */ - if (ct & TCG_CT_CONST_32) { + if (type == TCG_TYPE_I32) { /* We have a 32-bit immediate and can compare against anything. */ return 1; } else { @@ -515,7 +511,7 @@ static int tcg_match_cmpi(int ct, tcg_target_long val) } /* Test if a constant matches the constraint. */ -static int tcg_target_const_match(tcg_target_long val, +static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; @@ -524,8 +520,7 @@ static int tcg_target_const_match(tcg_target_long val, return 1; } - /* Handle the modifiers. */ - if (ct & TCG_CT_CONST_32) { + if (type == TCG_TYPE_I32) { val = (int32_t)val; } @@ -541,11 +536,11 @@ static int tcg_target_const_match(tcg_target_long val, return val == (int16_t)val; } } else if (ct & TCG_CT_CONST_ORI) { - return tcg_match_ori(ct, val); + return tcg_match_ori(type, val); } else if (ct & TCG_CT_CONST_XORI) { - return tcg_match_xori(ct, val); + return tcg_match_xori(type, val); } else if (ct & TCG_CT_CONST_CMPI) { - return tcg_match_cmpi(ct, val); + return tcg_match_cmpi(type, val); } return 0; @@ -2112,8 +2107,8 @@ static const TCGTargetOpDef s390_op_defs[] = { { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } }, { INDEX_op_and_i32, { "r", "0", "ri" } }, - { INDEX_op_or_i32, { "r", "0", "rWO" } }, - { INDEX_op_xor_i32, { "r", "0", "rWX" } }, + { INDEX_op_or_i32, { "r", "0", "rO" } }, + { INDEX_op_xor_i32, { "r", "0", "rX" } }, { INDEX_op_neg_i32, { "r", "r" } }, @@ -2135,9 +2130,9 @@ static const TCGTargetOpDef s390_op_defs[] = { { INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } }, { INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } }, - { INDEX_op_brcond_i32, { "r", "rWC" } }, - { INDEX_op_setcond_i32, { "r", "r", "rWC" } }, - { INDEX_op_movcond_i32, { "r", "r", "rWC", "r", "0" } }, + { INDEX_op_brcond_i32, { "r", "rC" } }, + { INDEX_op_setcond_i32, { "r", "r", "rC" } }, + { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } }, { INDEX_op_deposit_i32, { "r", "0", "r" } }, { INDEX_op_qemu_ld8u, { "r", "L" } }, diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h index 10adb778c7..b3bfdcc22c 100644 --- a/tcg/s390/tcg-target.h +++ b/tcg/s390/tcg-target.h @@ -24,8 +24,6 @@ #ifndef TCG_TARGET_S390 #define TCG_TARGET_S390 1 -#define TCG_TARGET_WORDS_BIGENDIAN - typedef enum TCGReg { TCG_REG_R0 = 0, TCG_REG_R1, diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c index 152335cfe1..35089b82c9 100644 --- a/tcg/sparc/tcg-target.c +++ b/tcg/sparc/tcg-target.c @@ -327,14 +327,20 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) } /* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; - } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + } + + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { return 1; diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h index 3abf1b41b0..4519c64ae9 100644 --- a/tcg/sparc/tcg-target.h +++ b/tcg/sparc/tcg-target.h @@ -32,8 +32,6 @@ # error Unknown pointer size for tcg target #endif -#define TCG_TARGET_WORDS_BIGENDIAN - #define TCG_TARGET_NB_REGS 32 typedef enum { diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h index 284db0c70d..ad94c0ca51 100644 --- a/tcg/tcg-be-ldst.h +++ b/tcg/tcg-be-ldst.h @@ -24,7 +24,7 @@ #define TCG_MAX_QEMU_LDST 640 typedef struct TCGLabelQemuLdst { - int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */ + bool is_ld:1; /* qemu_ld: true, qemu_st: false */ TCGMemOp opc:4; TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 7eabf22f01..8d4ff7da9b 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -858,7 +858,7 @@ static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, { /* since arg2 and ret have different types, they cannot be the same temporary */ -#ifdef TCG_TARGET_WORDS_BIGENDIAN +#ifdef HOST_WORDS_BIGENDIAN tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset); tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4); #else @@ -888,7 +888,7 @@ static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { -#ifdef TCG_TARGET_WORDS_BIGENDIAN +#ifdef HOST_WORDS_BIGENDIAN tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset); tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4); #else @@ -2437,14 +2437,12 @@ static inline void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); - } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_mulu2_i32) { + } else if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32(); TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 t3 = tcg_temp_new_i32(); - tcg_gen_op4_i32(INDEX_op_mulu2_i32, t0, t1, arg1, arg2); - /* Allow the optimizer room to replace mulu2 with two moves. */ - tcg_gen_op0(INDEX_op_nop); + tcg_gen_mulu2_i32(t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i32(t2, arg1, 31); tcg_gen_sari_i32(t3, arg2, 31); @@ -2522,26 +2520,6 @@ static inline void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); - } else if (TCG_TARGET_HAS_mulu2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); - tcg_gen_op4_i64(INDEX_op_mulu2_i64, t0, t1, arg1, arg2); - /* Allow the optimizer room to replace mulu2 with two moves. */ - tcg_gen_op0(INDEX_op_nop); - /* Adjust for negative inputs. */ - tcg_gen_sari_i64(t2, arg1, 63); - tcg_gen_sari_i64(t3, arg2, 63); - tcg_gen_and_i64(t2, t2, arg2); - tcg_gen_and_i64(t3, t3, arg1); - tcg_gen_sub_i64(rh, t1, t2); - tcg_gen_sub_i64(rh, rh, t3); - tcg_gen_mov_i64(rl, t0); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } else { TCGv_i64 t0 = tcg_temp_new_i64(); int sizemask = 0; @@ -2569,6 +2547,24 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); + } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + tcg_gen_mulu2_i64(t0, t1, arg1, arg2); + /* Adjust for negative inputs. */ + tcg_gen_sari_i64(t2, arg1, 63); + tcg_gen_sari_i64(t3, arg2, 63); + tcg_gen_and_i64(t2, t2, arg2); + tcg_gen_and_i64(t3, t3, arg1); + tcg_gen_sub_i64(rh, t1, t2); + tcg_gen_sub_i64(rh, rh, t3); + tcg_gen_mov_i64(rl, t0); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); } else { TCGv_i64 t0 = tcg_temp_new_i64(); int sizemask = 0; diff --git a/tcg/tcg.c b/tcg/tcg.c index f1e0763432..21ce9fb505 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -101,7 +101,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args); static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2); -static int tcg_target_const_match(tcg_target_long val, +static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct); static void tcg_out_tb_init(TCGContext *s); static void tcg_out_tb_finalize(TCGContext *s); @@ -444,7 +444,7 @@ static inline int tcg_global_mem_new_internal(TCGType type, int reg, ts->fixed_reg = 0; ts->mem_allocated = 1; ts->mem_reg = reg; -#ifdef TCG_TARGET_WORDS_BIGENDIAN +#ifdef HOST_WORDS_BIGENDIAN ts->mem_offset = offset + 4; #else ts->mem_offset = offset; @@ -459,7 +459,7 @@ static inline int tcg_global_mem_new_internal(TCGType type, int reg, ts->fixed_reg = 0; ts->mem_allocated = 1; ts->mem_reg = reg; -#ifdef TCG_TARGET_WORDS_BIGENDIAN +#ifdef HOST_WORDS_BIGENDIAN ts->mem_offset = offset; #else ts->mem_offset = offset + 4; @@ -686,7 +686,7 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, if (ret != TCG_CALL_DUMMY_ARG) { #if TCG_TARGET_REG_BITS < 64 if (sizemask & 1) { -#ifdef TCG_TARGET_WORDS_BIGENDIAN +#ifdef HOST_WORDS_BIGENDIAN *s->gen_opparam_ptr++ = ret + 1; *s->gen_opparam_ptr++ = ret; #else @@ -725,7 +725,7 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, order. If another such target is added, this logic may have to get more complicated to differentiate between stack arguments and register arguments. */ -#if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) +#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) *s->gen_opparam_ptr++ = args[i] + 1; *s->gen_opparam_ptr++ = args[i]; #else @@ -2121,7 +2121,7 @@ static void tcg_reg_alloc_op(TCGContext *s, ts->mem_coherent = 1; s->reg_to_temp[reg] = arg; } else if (ts->val_type == TEMP_VAL_CONST) { - if (tcg_target_const_match(ts->val, arg_ct)) { + if (tcg_target_const_match(ts->val, ts->type, arg_ct)) { /* constant is OK for instruction */ const_args[i] = 1; new_args[i] = ts->val; @@ -2365,7 +2365,7 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, func_arg = reg; tcg_regset_set_reg(allocated_regs, reg); } else if (ts->val_type == TEMP_VAL_CONST) { - if (tcg_target_const_match(func_addr, arg_ct)) { + if (tcg_target_const_match(func_addr, ts->type, arg_ct)) { const_func_arg = 1; func_arg = func_addr; } else { diff --git a/tcg/tcg.h b/tcg/tcg.h index f7efcb4202..0bb66775a5 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -97,7 +97,6 @@ typedef uint64_t TCGRegSet; /* Turn some undef macros into true macros. */ #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 1 #endif #ifndef TCG_TARGET_deposit_i32_valid @@ -121,6 +120,13 @@ typedef uint64_t TCGRegSet; #define TCG_TARGET_HAS_rem_i64 0 #endif +/* For 32-bit targets, some sort of unsigned widening multiply is required. */ +#if TCG_TARGET_REG_BITS == 32 \ + && !(defined(TCG_TARGET_HAS_mulu2_i32) \ + || defined(TCG_TARGET_HAS_muluh_i32)) +# error "Missing unsigned widening multiply" +#endif + typedef enum TCGOpcode { #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, #include "tcg-opc.h" diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c index fc80704de8..47c0b85d95 100644 --- a/tcg/tci/tcg-target.c +++ b/tcg/tci/tcg-target.c @@ -859,7 +859,7 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, } /* Test if a constant matches the constraint. */ -static int tcg_target_const_match(tcg_target_long val, +static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { /* No need to return 0 or 1, 0 or != 0 is good enough. */ diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 6e1da8c007..f43492cc67 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -57,12 +57,6 @@ #define CONFIG_DEBUG_TCG_INTERPRETER #endif -#if 0 /* TCI tries to emulate a little endian host. */ -#if defined(HOST_WORDS_BIGENDIAN) -# define TCG_TARGET_WORDS_BIGENDIAN -#endif -#endif - /* Optional instructions. */ #define TCG_TARGET_HAS_bswap16_i32 1 @@ -118,6 +112,8 @@ #define TCG_TARGET_HAS_mulu2_i64 0 #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 +#else +#define TCG_TARGET_HAS_mulu2_i32 1 #endif /* TCG_TARGET_REG_BITS == 64 */ #define TCG_TARGET_HAS_new_ldst 0 |