diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/exec/exec-all.h | 24 | ||||
| -rw-r--r-- | include/exec/translator.h | 11 | ||||
| -rw-r--r-- | include/hw/core/cpu.h | 4 | ||||
| -rw-r--r-- | include/hw/core/tcg-cpu-ops.h | 6 | ||||
| -rw-r--r-- | include/qemu/atomic.h | 247 | ||||
| -rw-r--r-- | include/qemu/stats64.h | 2 | ||||
| -rw-r--r-- | include/tcg/tcg.h | 80 |
7 files changed, 92 insertions, 282 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 754f4130c9..5d1b6d80fb 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -492,13 +492,18 @@ struct TranslationBlock { target_ulong cs_base; /* CS base for this block */ uint32_t flags; /* flags defining in which context the code was generated */ uint32_t cflags; /* compile flags */ -#define CF_COUNT_MASK 0x00007fff -#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ -#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */ -#define CF_USE_ICOUNT 0x00020000 -#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ -#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ -#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ + +/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */ +#define CF_COUNT_MASK 0x000001ff +#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */ +#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */ +#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */ +#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ +#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */ +#define CF_USE_ICOUNT 0x00020000 +#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ +#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ +#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ #define CF_CLUSTER_SHIFT 24 /* Per-vCPU dynamic tracing state used to generate this TB */ @@ -563,10 +568,7 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) } /* current cflags for hashing/comparison */ -static inline uint32_t curr_cflags(CPUState *cpu) -{ - return cpu->tcg_cflags; -} +uint32_t curr_cflags(CPUState *cpu); /* TranslationBlock invalidate API */ #if defined(CONFIG_USER_ONLY) diff --git a/include/exec/translator.h b/include/exec/translator.h index dd9c06d40d..d318803267 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -89,15 +89,6 @@ typedef struct DisasContextBase { * @insn_start: * Emit the tcg_gen_insn_start opcode. * - * @breakpoint_check: - * When called, the breakpoint has already been checked to match the PC, - * but the target may decide the breakpoint missed the address - * (e.g., due to conditions encoded in their flags). Return true to - * indicate that the breakpoint did hit, in which case no more breakpoints - * are checked. If the breakpoint did hit, emit any code required to - * signal the exception, and set db->is_jmp as necessary to terminate - * the main loop. - * * @translate_insn: * Disassemble one instruction and set db->pc_next for the start * of the following instruction. Set db->is_jmp as necessary to @@ -113,8 +104,6 @@ typedef struct TranslatorOps { void (*init_disas_context)(DisasContextBase *db, CPUState *cpu); void (*tb_start)(DisasContextBase *db, CPUState *cpu); void (*insn_start)(DisasContextBase *db, CPUState *cpu); - bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu, - const CPUBreakpoint *bp); void (*translate_insn)(DisasContextBase *db, CPUState *cpu); void (*tb_stop)(DisasContextBase *db, CPUState *cpu); void (*disas_log)(const DisasContextBase *db, CPUState *cpu); diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 4e0ea68efc..bc864564ce 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -103,6 +103,9 @@ struct SysemuCPUOps; * also implement the synchronize_from_tb hook. * @gdb_read_register: Callback for letting GDB read a register. * @gdb_write_register: Callback for letting GDB write a register. + * @gdb_adjust_breakpoint: Callback for adjusting the address of a + * breakpoint. Used by AVR to handle a gdb mis-feature with + * its Harvard architecture split code and data. * @gdb_num_core_regs: Number of core registers accessible to GDB. * @gdb_core_xml_file: File name for core registers GDB XML description. * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop @@ -137,6 +140,7 @@ struct CPUClass { void (*set_pc)(CPUState *cpu, vaddr value); int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg); int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); + vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr); const char *gdb_core_xml_file; gchar * (*gdb_arch_name)(CPUState *cpu); diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 72d791438c..eab27d0c03 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -89,6 +89,12 @@ struct TCGCPUOps { bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); /** + * @debug_check_breakpoint: return true if the architectural + * breakpoint whose PC has matched should really fire. + */ + bool (*debug_check_breakpoint)(CPUState *cpu); + + /** * @io_recompile_replay_branch: Callback for cpu_io_recompile. * * The cpu has been stopped, and cpu_restore_state_from_tb has been diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 3ccf84fd46..112a29910b 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -60,8 +60,9 @@ (unsigned short)1, \ (expr)+0)))))) -#ifdef __ATOMIC_RELAXED -/* For C11 atomic ops */ +#ifndef __ATOMIC_RELAXED +#error "Expecting C11 atomic ops" +#endif /* Manual memory barriers * @@ -239,193 +240,8 @@ #define qatomic_xor(ptr, n) \ ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) -#else /* __ATOMIC_RELAXED */ - -#ifdef __alpha__ -#define smp_read_barrier_depends() asm volatile("mb":::"memory") -#endif - -#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) - -/* - * Because of the strongly ordered storage model, wmb() and rmb() are nops - * here (a compiler barrier only). QEMU doesn't do accesses to write-combining - * qemu memory or non-temporal load/stores from C code. - */ -#define smp_mb_release() barrier() -#define smp_mb_acquire() barrier() - -/* - * __sync_lock_test_and_set() is documented to be an acquire barrier only, - * but it is a full barrier at the hardware level. Add a compiler barrier - * to make it a full barrier also at the compiler level. - */ -#define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) - -#elif defined(_ARCH_PPC) - -/* - * We use an eieio() for wmb() on powerpc. This assumes we don't - * need to order cacheable and non-cacheable stores with respect to - * each other. - * - * smp_mb has the same problem as on x86 for not-very-new GCC - * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011). - */ -#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) -#if defined(__powerpc64__) -#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) -#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) -#else -#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; }) -#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; }) -#endif -#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) - -#endif /* _ARCH_PPC */ - -/* - * For (host) platforms we don't have explicit barrier definitions - * for, we use the gcc __sync_synchronize() primitive to generate a - * full barrier. This should be safe on all platforms, though it may - * be overkill for smp_mb_acquire() and smp_mb_release(). - */ -#ifndef smp_mb -#define smp_mb() __sync_synchronize() -#endif - -#ifndef smp_mb_acquire -#define smp_mb_acquire() __sync_synchronize() -#endif - -#ifndef smp_mb_release -#define smp_mb_release() __sync_synchronize() -#endif - -#ifndef smp_read_barrier_depends -#define smp_read_barrier_depends() barrier() -#endif - -#ifndef signal_barrier -#define signal_barrier() barrier() -#endif - -/* These will only be atomic if the processor does the fetch or store - * in a single issue memory operation - */ -#define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p)) -#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i)) - -#define qatomic_read(ptr) qatomic_read__nocheck(ptr) -#define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i) - -/** - * qatomic_rcu_read - reads a RCU-protected pointer to a local variable - * into a RCU read-side critical section. The pointer can later be safely - * dereferenced within the critical section. - * - * This ensures that the pointer copy is invariant thorough the whole critical - * section. - * - * Inserts memory barriers on architectures that require them (currently only - * Alpha) and documents which pointers are protected by RCU. - * - * qatomic_rcu_read also includes a compiler barrier to ensure that - * value-speculative optimizations (e.g. VSS: Value Speculation - * Scheduling) does not perform the data read before the pointer read - * by speculating the value of the pointer. - * - * Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg(). - */ -#define qatomic_rcu_read(ptr) ({ \ - typeof(*ptr) _val = qatomic_read(ptr); \ - smp_read_barrier_depends(); \ - _val; \ -}) - -/** - * qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure - * meant to be read by RCU read-side critical sections. - * - * Documents which pointers will be dereferenced by RCU read-side critical - * sections and adds the required memory barriers on architectures requiring - * them. It also makes sure the compiler does not reorder code initializing the - * data structure before its publication. - * - * Should match qatomic_rcu_read(). - */ -#define qatomic_rcu_set(ptr, i) do { \ - smp_wmb(); \ - qatomic_set(ptr, i); \ -} while (0) - -#define qatomic_load_acquire(ptr) ({ \ - typeof(*ptr) _val = qatomic_read(ptr); \ - smp_mb_acquire(); \ - _val; \ -}) - -#define qatomic_store_release(ptr, i) do { \ - smp_mb_release(); \ - qatomic_set(ptr, i); \ -} while (0) - -#ifndef qatomic_xchg -#if defined(__clang__) -#define qatomic_xchg(ptr, i) __sync_swap(ptr, i) -#else -/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ -#define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) -#endif -#endif -#define qatomic_xchg__nocheck qatomic_xchg - -/* Provide shorter names for GCC atomic builtins. */ -#define qatomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) -#define qatomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) - -#define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n) -#define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n) -#define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n) -#define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n) -#define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n) - -#define qatomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) -#define qatomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) -#define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) -#define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) -#define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) -#define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n) -#define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n) - -#define qatomic_cmpxchg(ptr, old, new) \ - __sync_val_compare_and_swap(ptr, old, new) -#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new) - -/* And even shorter names that return void. */ -#define qatomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) -#define qatomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) -#define qatomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) -#define qatomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) -#define qatomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) -#define qatomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) -#define qatomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n)) - -#endif /* __ATOMIC_RELAXED */ - -#ifndef smp_wmb #define smp_wmb() smp_mb_release() -#endif -#ifndef smp_rmb #define smp_rmb() smp_mb_acquire() -#endif - -/* This is more efficient than a store plus a fence. */ -#if !defined(__SANITIZE_THREAD__) -#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) -#define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i)) -#endif -#endif /* qatomic_mb_read/set semantics map Java volatile variables. They are * less expensive on some platforms (notably POWER) than fully @@ -435,16 +251,16 @@ * use. See docs/devel/atomics.rst for more discussion. */ -#ifndef qatomic_mb_read #define qatomic_mb_read(ptr) \ qatomic_load_acquire(ptr) -#endif -#ifndef qatomic_mb_set -#define qatomic_mb_set(ptr, i) do { \ - qatomic_store_release(ptr, i); \ - smp_mb(); \ -} while(0) +#if !defined(__SANITIZE_THREAD__) && \ + (defined(__i386__) || defined(__x86_64__) || defined(__s390x__)) +/* This is more efficient than a store plus a fence. */ +# define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i)) +#else +# define qatomic_mb_set(ptr, i) \ + ({ qatomic_store_release(ptr, i); smp_mb(); }) #endif #define qatomic_fetch_inc_nonzero(ptr) ({ \ @@ -455,28 +271,29 @@ _oldn; \ }) -/* Abstractions to access atomically (i.e. "once") i64/u64 variables */ -#ifdef CONFIG_ATOMIC64 -static inline int64_t qatomic_read_i64(const int64_t *ptr) -{ - /* use __nocheck because sizeof(void *) might be < sizeof(u64) */ - return qatomic_read__nocheck(ptr); -} - -static inline uint64_t qatomic_read_u64(const uint64_t *ptr) -{ - return qatomic_read__nocheck(ptr); -} - -static inline void qatomic_set_i64(int64_t *ptr, int64_t val) -{ - qatomic_set__nocheck(ptr, val); -} +/* + * Abstractions to access atomically (i.e. "once") i64/u64 variables. + * + * The i386 abi is odd in that by default members are only aligned to + * 4 bytes, which means that 8-byte types can wind up mis-aligned. + * Clang will then warn about this, and emit a call into libatomic. + * + * Use of these types in structures when they will be used with atomic + * operations can avoid this. + */ +typedef int64_t aligned_int64_t __attribute__((aligned(8))); +typedef uint64_t aligned_uint64_t __attribute__((aligned(8))); -static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val) -{ - qatomic_set__nocheck(ptr, val); -} +#ifdef CONFIG_ATOMIC64 +/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */ +#define qatomic_read_i64(P) \ + _Generic(*(P), int64_t: qatomic_read__nocheck(P)) +#define qatomic_read_u64(P) \ + _Generic(*(P), uint64_t: qatomic_read__nocheck(P)) +#define qatomic_set_i64(P, V) \ + _Generic(*(P), int64_t: qatomic_set__nocheck(P, V)) +#define qatomic_set_u64(P, V) \ + _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V)) static inline void qatomic64_init(void) { diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h index fdd3d1b8f9..802402254b 100644 --- a/include/qemu/stats64.h +++ b/include/qemu/stats64.h @@ -21,7 +21,7 @@ typedef struct Stat64 { #ifdef CONFIG_ATOMIC64 - uint64_t value; + aligned_uint64_t value; #else uint32_t low, high; uint32_t lock; diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 25dd19d6e1..44ccd86f3e 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -1341,31 +1341,32 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, # define helper_ret_stl_mmu helper_le_stl_mmu # define helper_ret_stq_mmu helper_le_stq_mmu #endif +#endif /* CONFIG_SOFTMMU */ -uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, +uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t cmpv, uint32_t newv, TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, - uint64_t cmpv, uint64_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, - uint64_t cmpv, uint64_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ -TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \ +TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ (CPUArchState *env, target_ulong addr, TYPE val, \ TCGMemOpIdx oi, uintptr_t retaddr); @@ -1411,31 +1412,22 @@ GEN_ATOMIC_HELPER_ALL(xchg) #undef GEN_ATOMIC_HELPER_ALL #undef GEN_ATOMIC_HELPER -#endif /* CONFIG_SOFTMMU */ -/* - * These aren't really a "proper" helpers because TCG cannot manage Int128. - * However, use the same format as the others, for use by the backends. - * - * The cmpxchg functions are only defined if HAVE_CMPXCHG128; - * the ld/st functions are only defined if HAVE_ATOMIC128, - * as defined by <qemu/atomic128.h>. - */ -Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, - TCGMemOpIdx oi, uintptr_t retaddr); -Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, - TCGMemOpIdx oi, uintptr_t retaddr); - -Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, - TCGMemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + TCGMemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + TCGMemOpIdx oi, uintptr_t retaddr); + +Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, + TCGMemOpIdx oi, uintptr_t retaddr); +void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, + TCGMemOpIdx oi, uintptr_t retaddr); #ifdef CONFIG_DEBUG_TCG void tcg_assert_listed_vecop(TCGOpcode); |