diff options
| author | Peter Maydell <peter.maydell@linaro.org> | 2016-09-16 16:54:50 +0100 |
|---|---|---|
| committer | Peter Maydell <peter.maydell@linaro.org> | 2016-09-16 16:54:50 +0100 |
| commit | e3571ae30cd26d19efd4554c25e32ef64d6a36b3 (patch) | |
| tree | b753d7c12df7c6c955bc93e3947c97cd7428125b /tcg/tcg.h | |
| parent | ebc231d7daf1f41b23d8b6a6d1234800b86e5fe2 (diff) | |
| parent | 34f939218ce78163171addd63750e1e0300376ab (diff) | |
| download | focaccia-qemu-e3571ae30cd26d19efd4554c25e32ef64d6a36b3.tar.gz focaccia-qemu-e3571ae30cd26d19efd4554c25e32ef64d6a36b3.zip | |
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20160916' into staging
tcg queued patches # gpg: Signature made Fri 16 Sep 2016 16:14:20 BST # gpg: using RSA key 0xAD1270CC4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" # Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC 16A4 AD12 70CC 4DD0 279B * remotes/rth/tags/pull-tcg-20160916: tcg: Optimize fence instructions target-i386: Generate fences for x86 target-aarch64: Generate fences for aarch64 target-arm: Generate fences in ARMv7 frontend target-alpha: Generate fence op tcg/tci: Add support for fence tcg/sparc: Add support for fence tcg/s390: Add support for fence tcg/ppc: Add support for fence tcg/mips: Add support for fence tcg/ia64: Add support for fence tcg/arm: Add support for fence tcg/aarch64: Add support for fence tcg/i386: Add support for fence Introduce TCGOpcode for memory barrier cpu-exec: Check -dfilter for -d cpu tcg: Merge GETPC and GETRA tcg: Support arbitrary size + alignment Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tcg/tcg.h')
| -rw-r--r-- | tcg/tcg.h | 68 |
1 files changed, 36 insertions, 32 deletions
diff --git a/tcg/tcg.h b/tcg/tcg.h index 9bf31bbc84..c9949aa79b 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -287,20 +287,19 @@ typedef enum TCGMemOp { * MO_ALIGN accesses will result in a call to the CPU's * do_unaligned_access hook if the guest address is not aligned. * The default depends on whether the target CPU defines ALIGNED_ONLY. + * * Some architectures (e.g. ARMv8) need the address which is aligned * to a size more than the size of the memory access. - * To support such check it's enough the current costless alignment - * check implementation in QEMU, but we need to support - * an alignment size specifying. - * MO_ALIGN supposes a natural alignment - * (i.e. the alignment size is the size of a memory access). - * Note that an alignment size must be equal or greater - * than an access size. + * Some architectures (e.g. SPARCv9) need an address which is aligned, + * but less strictly than the natural alignment. + * + * MO_ALIGN supposes the alignment size is the size of a memory access. + * * There are three options: - * - an alignment to the size of an access (MO_ALIGN); - * - an alignment to the specified size that is equal or greater than - * an access size (MO_ALIGN_x where 'x' is a size in bytes); * - unaligned access permitted (MO_UNALN). + * - an alignment to the size of an access (MO_ALIGN); + * - an alignment to a specified size, which may be more or less than + * the access size (MO_ALIGN_x where 'x' is a size in bytes); */ MO_ASHIFT = 4, MO_AMASK = 7 << MO_ASHIFT, @@ -353,38 +352,26 @@ typedef enum TCGMemOp { * @memop: TCGMemOp value * * Extract the alignment size from the memop. - * - * Returns: 0 in case of byte access (which is always aligned); - * positive value - number of alignment bits; - * negative value if unaligned access enabled - * and this is not a byte access. */ -static inline int get_alignment_bits(TCGMemOp memop) +static inline unsigned get_alignment_bits(TCGMemOp memop) { - int a = memop & MO_AMASK; - int s = memop & MO_SIZE; - int r; + unsigned a = memop & MO_AMASK; if (a == MO_UNALN) { - /* Negative value if unaligned access enabled, - * or zero value in case of byte access. - */ - return -s; + /* No alignment required. */ + a = 0; } else if (a == MO_ALIGN) { - /* A natural alignment: return a number of access size bits */ - r = s; + /* A natural alignment requirement. */ + a = memop & MO_SIZE; } else { - /* Specific alignment size. It must be equal or greater - * than the access size. - */ - r = a >> MO_ASHIFT; - tcg_debug_assert(r >= s); + /* A specific alignment requirement. */ + a = a >> MO_ASHIFT; } #if defined(CONFIG_SOFTMMU) /* The requested alignment cannot overlap the TLB flags. */ - tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0); + tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0); #endif - return r; + return a; } typedef tcg_target_ulong TCGArg; @@ -478,6 +465,23 @@ static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t) #define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1) #define TCG_CALL_DUMMY_ARG ((TCGArg)(-1)) +typedef enum { + /* Used to indicate the type of accesses on which ordering + is to be ensured. Modeled after SPARC barriers. */ + TCG_MO_LD_LD = 0x01, + TCG_MO_ST_LD = 0x02, + TCG_MO_LD_ST = 0x04, + TCG_MO_ST_ST = 0x08, + TCG_MO_ALL = 0x0F, /* OR of the above */ + + /* Used to indicate the kind of ordering which is to be ensured by the + instruction. These types are derived from x86/aarch64 instructions. + It should be noted that these are different from C11 semantics. */ + TCG_BAR_LDAQ = 0x10, /* Following ops will not come forward */ + TCG_BAR_STRL = 0x20, /* Previous ops will not be delayed */ + TCG_BAR_SC = 0x30, /* No ops cross barrier; OR of the above */ +} TCGBar; + /* Conditions. Note that these are laid out for easy manipulation by the functions below: bit 0 is used for inverting; |