diff options
Diffstat (limited to 'target/arm')
| -rw-r--r-- | target/arm/cpu.c | 2 | ||||
| -rw-r--r-- | target/arm/helper.c | 9 | ||||
| -rw-r--r-- | target/arm/internals.h | 12 | ||||
| -rw-r--r-- | target/arm/ptw.c | 141 | ||||
| -rw-r--r-- | target/arm/tcg/cpu-v7m.c | 2 | ||||
| -rw-r--r-- | target/arm/tcg/m_helper.c | 8 | ||||
| -rw-r--r-- | target/arm/tcg/tlb_helper.c | 47 | ||||
| -rw-r--r-- | target/arm/tcg/translate-a64.c | 4 |
8 files changed, 117 insertions, 108 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 19191c2391..1320fd8c8f 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -2663,7 +2663,7 @@ static const TCGCPUOps arm_tcg_ops = { .record_sigsegv = arm_cpu_record_sigsegv, .record_sigbus = arm_cpu_record_sigbus, #else - .tlb_fill = arm_cpu_tlb_fill, + .tlb_fill_align = arm_cpu_tlb_fill_align, .cpu_exec_interrupt = arm_cpu_exec_interrupt, .cpu_exec_halt = arm_cpu_exec_halt, .do_interrupt = arm_cpu_do_interrupt, diff --git a/target/arm/helper.c b/target/arm/helper.c index 3f77b40734..0a731a38e8 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -3599,11 +3599,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, GetPhysAddrResult res = {}; /* - * I_MXTJT: Granule protection checks are not performed on the final address - * of a successful translation. + * I_MXTJT: Granule protection checks are not performed on the final + * address of a successful translation. This is a translation not a + * memory reference, so "memop = none = 0". */ - ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss, - &res, &fi); + ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0, + mmu_idx, ss, &res, &fi); /* * ATS operations only do S1 or S1+S2 translations, so we never diff --git a/target/arm/internals.h b/target/arm/internals.h index 1e5da81ce9..299a96a81a 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -816,9 +816,9 @@ void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, MMUAccessType access_type, uintptr_t ra); #else -bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); +bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra); #endif static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) @@ -1432,6 +1432,7 @@ typedef struct GetPhysAddrResult { * @env: CPUARMState * @address: virtual address to get physical address for * @access_type: 0 for read, 1 for write, 2 for execute + * @memop: memory operation feeding this access, or 0 for none * @mmu_idx: MMU index indicating required translation regime * @result: set on translation success. * @fi: set to fault info if the translation fails @@ -1450,7 +1451,7 @@ typedef struct GetPhysAddrResult { * value. */ bool get_phys_addr(CPUARMState *env, vaddr address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) __attribute__((nonnull)); @@ -1460,6 +1461,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address, * @env: CPUARMState * @address: virtual address to get physical address for * @access_type: 0 for read, 1 for write, 2 for execute + * @memop: memory operation feeding this access, or 0 for none * @mmu_idx: MMU index indicating required translation regime * @space: security space for the access * @result: set on translation success. @@ -1469,7 +1471,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address, * a Granule Protection Check on the resulting address. */ bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, ARMSecuritySpace space, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 659855133c..dd40268397 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -75,13 +75,13 @@ typedef struct S1Translate { static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, vaddr address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi); static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, vaddr address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi); @@ -579,7 +579,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, }; GetPhysAddrResult s2 = { }; - if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) { + if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) { goto fail; } @@ -1684,12 +1684,13 @@ static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) * @ptw: Current and next stage parameters for the walk. * @address: virtual address to get physical address for * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH + * @memop: memory operation feeding this access, or 0 for none * @result: set on translation success, * @fi: set to fault info if the translation fails */ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, uint64_t address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { ARMCPU *cpu = env_archcpu(env); @@ -2028,8 +2029,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, xn = extract64(attrs, 53, 2); result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); } + + result->cacheattrs.is_s2_format = true; + result->cacheattrs.attrs = extract32(attrs, 2, 4); + /* + * Security state does not really affect HCR_EL2.FWB; + * we only need to filter FWB for aa32 or other FEAT. + */ + device = S2_attrs_are_device(arm_hcr_el2_eff(env), + result->cacheattrs.attrs); } else { int nse, ns = extract32(attrs, 5, 1); + uint8_t attrindx; + uint64_t mair; + switch (out_space) { case ARMSS_Root: /* @@ -2101,6 +2114,49 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, */ result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn, result->f.attrs.space, out_space); + + /* Index into MAIR registers for cache attributes */ + attrindx = extract32(attrs, 2, 3); + mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + assert(attrindx <= 7); + result->cacheattrs.is_s2_format = false; + result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); + + /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ + if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { + result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ + } + device = S1_attrs_are_device(result->cacheattrs.attrs); + } + + /* + * Enable alignment checks on Device memory. + * + * Per R_XCHFJ, the correct ordering for alignment, permission, + * and stage 2 faults is: + * - Alignment fault caused by the memory type + * - Permission fault + * - A stage 2 fault on the memory access + * Perform the alignment check now, so that we recognize it in + * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent + * softmmu tlb hit will also check the alignment; clear along the + * non-device path so that tlb_fill_flags is consistent in the + * event of restart_atomic_update. + * + * In v7, for a CPU without the Virtualization Extensions this + * access is UNPREDICTABLE; we choose to make it take the alignment + * fault as is required for a v7VE CPU. (QEMU doesn't emulate any + * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) + */ + if (device) { + unsigned a_bits = memop_atomicity_bits(memop); + if (address & ((1 << a_bits) - 1)) { + fi->type = ARMFault_Alignment; + goto do_fault; + } + result->f.tlb_fill_flags = TLB_CHECK_ALIGNED; + } else { + result->f.tlb_fill_flags = 0; } if (!(result->f.prot & (1 << access_type))) { @@ -2130,51 +2186,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, result->f.attrs.space = out_space; result->f.attrs.secure = arm_space_is_secure(out_space); - if (regime_is_stage2(mmu_idx)) { - result->cacheattrs.is_s2_format = true; - result->cacheattrs.attrs = extract32(attrs, 2, 4); - /* - * Security state does not really affect HCR_EL2.FWB; - * we only need to filter FWB for aa32 or other FEAT. - */ - device = S2_attrs_are_device(arm_hcr_el2_eff(env), - result->cacheattrs.attrs); - } else { - /* Index into MAIR registers for cache attributes */ - uint8_t attrindx = extract32(attrs, 2, 3); - uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; - assert(attrindx <= 7); - result->cacheattrs.is_s2_format = false; - result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); - - /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ - if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { - result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ - } - device = S1_attrs_are_device(result->cacheattrs.attrs); - } - - /* - * Enable alignment checks on Device memory. - * - * Per R_XCHFJ, this check is mis-ordered. The correct ordering - * for alignment, permission, and stage 2 faults should be: - * - Alignment fault caused by the memory type - * - Permission fault - * - A stage 2 fault on the memory access - * but due to the way the TCG softmmu TLB operates, we will have - * implicitly done the permission check and the stage2 lookup in - * finding the TLB entry, so the alignment check cannot be done sooner. - * - * In v7, for a CPU without the Virtualization Extensions this - * access is UNPREDICTABLE; we choose to make it take the alignment - * fault as is required for a v7VE CPU. (QEMU doesn't emulate any - * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) - */ - if (device) { - result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED; - } - /* * For FEAT_LPA2 and effective DS, the SH field in the attributes * was re-purposed for output address bits. The SH attribute in @@ -3301,7 +3312,7 @@ static bool get_phys_addr_disabled(CPUARMState *env, static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, vaddr address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { @@ -3313,7 +3324,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, ARMSecuritySpace ipa_space; uint64_t hcr; - ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi); + ret = get_phys_addr_nogpc(env, ptw, address, access_type, + memop, result, fi); /* If S1 fails, return early. */ if (ret) { @@ -3339,7 +3351,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, cacheattrs1 = result->cacheattrs; memset(result, 0, sizeof(*result)); - ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi); + ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, + memop, result, fi); fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ @@ -3406,7 +3419,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, vaddr address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { @@ -3469,7 +3482,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, if (arm_feature(env, ARM_FEATURE_EL2) && !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { return get_phys_addr_twostage(env, ptw, address, access_type, - result, fi); + memop, result, fi); } /* fall through */ @@ -3532,7 +3545,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, } if (regime_using_lpae_format(env, mmu_idx)) { - return get_phys_addr_lpae(env, ptw, address, access_type, result, fi); + return get_phys_addr_lpae(env, ptw, address, access_type, + memop, result, fi); } else if (arm_feature(env, ARM_FEATURE_V7) || regime_sctlr(env, mmu_idx) & SCTLR_XP) { return get_phys_addr_v6(env, ptw, address, access_type, result, fi); @@ -3543,11 +3557,12 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, vaddr address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { - if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) { + if (get_phys_addr_nogpc(env, ptw, address, access_type, + memop, result, fi)) { return true; } if (!granule_protection_check(env, result->f.phys_addr, @@ -3559,7 +3574,7 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, } bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, ARMSecuritySpace space, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) @@ -3568,11 +3583,12 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, .in_mmu_idx = mmu_idx, .in_space = space, }; - return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi); + return get_phys_addr_nogpc(env, &ptw, address, access_type, + memop, result, fi); } bool get_phys_addr(CPUARMState *env, vaddr address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { S1Translate ptw = { @@ -3641,7 +3657,8 @@ bool get_phys_addr(CPUARMState *env, vaddr address, } ptw.in_space = ss; - return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi); + return get_phys_addr_gpc(env, &ptw, address, access_type, + memop, result, fi); } hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, @@ -3660,7 +3677,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, ARMMMUFaultInfo fi = {}; bool ret; - ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi); + ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi); *attrs = res.f.attrs; if (ret) { diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c index 5496f14dc1..58e54578d6 100644 --- a/target/arm/tcg/cpu-v7m.c +++ b/target/arm/tcg/cpu-v7m.c @@ -242,7 +242,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = { .record_sigsegv = arm_cpu_record_sigsegv, .record_sigbus = arm_cpu_record_sigbus, #else - .tlb_fill = arm_cpu_tlb_fill, + .tlb_fill_align = arm_cpu_tlb_fill_align, .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt, .cpu_exec_halt = arm_cpu_exec_halt, .do_interrupt = arm_v7m_cpu_do_interrupt, diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index 23d7f73035..f7354f3c6e 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -222,7 +222,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, int exc; bool exc_secure; - if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { if (mode == STACK_LAZYFP) { @@ -311,7 +311,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, bool exc_secure; uint32_t value; - if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { qemu_log_mask(CPU_LOG_INT, @@ -2009,7 +2009,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure, "...really SecureFault with SFSR.INVEP\n"); return false; } - if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) { /* the MPU lookup failed */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); @@ -2045,7 +2045,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx, ARMMMUFaultInfo fi = {}; uint32_t value; - if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { qemu_log_mask(CPU_LOG_INT, diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 885bf4ec14..8841f039bc 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -318,14 +318,13 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } -bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra) { ARMCPU *cpu = ARM_CPU(cs); GetPhysAddrResult res = {}; ARMMMUFaultInfo local_fi, *fi; - int ret; /* * Allow S1_ptw_translate to see any fault generated here. @@ -339,37 +338,27 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } /* - * Walk the page table and (if the mapping exists) add the page - * to the TLB. On success, return true. Otherwise, if probing, - * return false. Otherwise populate fsr with ARM DFSR/IFSR fault - * register format, and signal the fault. + * Per R_XCHFJ, alignment fault not due to memory type has + * highest precedence. Otherwise, walk the page table and + * and collect the page description. */ - ret = get_phys_addr(&cpu->env, address, access_type, - core_to_arm_mmu_idx(&cpu->env, mmu_idx), - &res, fi); - if (likely(!ret)) { - /* - * Map a single [sub]page. Regions smaller than our declared - * target page size are handled specially, so for those we - * pass in the exact addresses. - */ - if (res.f.lg_page_size >= TARGET_PAGE_BITS) { - res.f.phys_addr &= TARGET_PAGE_MASK; - address &= TARGET_PAGE_MASK; - } - + if (address & ((1 << memop_alignment_bits(memop)) - 1)) { + fi->type = ARMFault_Alignment; + } else if (!get_phys_addr(&cpu->env, address, access_type, memop, + core_to_arm_mmu_idx(&cpu->env, mmu_idx), + &res, fi)) { res.f.extra.arm.pte_attrs = res.cacheattrs.attrs; res.f.extra.arm.shareability = res.cacheattrs.shareability; - - tlb_set_page_full(cs, mmu_idx, address, &res.f); + *out = res.f; return true; - } else if (probe) { + } + if (probe) { return false; - } else { - /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr); - arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); } + + /* Now we have a real cpu fault. */ + cpu_restore_state(cs, ra); + arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); } #else void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 071b6349fc..ec0b1ee252 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -294,7 +294,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); - desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop)); + desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop)); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1); ret = tcg_temp_new_i64(); @@ -326,7 +326,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); - desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop)); + desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop)); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); ret = tcg_temp_new_i64(); |