summary refs log tree commit diff stats
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-06-29 17:21:21 +0100
committerRichard Henderson <richard.henderson@linaro.org>2018-07-02 08:02:20 -0700
commit334692bce7f0653a93b8d84ecde8c847b08dec38 (patch)
tree0bb2be80e0d9e50e9afc5b4545739497881898a3 /accel/tcg/cputlb.c
parenta688e73ba8c2e3b6f3e5e683c4a9c37d2ddc8984 (diff)
downloadfocaccia-qemu-334692bce7f0653a93b8d84ecde8c847b08dec38.tar.gz
focaccia-qemu-334692bce7f0653a93b8d84ecde8c847b08dec38.zip
tcg: Define and use new tlb_hit() and tlb_hit_page() functions
The condition to check whether an address has hit against a particular
TLB entry is not completely trivial. We do this in various places, and
in fact in one place (get_page_addr_code()) we have got the condition
wrong. Abstract it out into new tlb_hit() and tlb_hit_page() inline
functions (one for a known-page-aligned address and one for an
arbitrary address), and use them in all the places where we had the
condition correct.

This is a no-behaviour-change patch; we leave fixing the buggy
code in get_page_addr_code() to a subsequent patch.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20180629162122.19376-2-peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c15
1 files changed, 5 insertions, 10 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index eebe97dabb..adb711963b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -239,12 +239,9 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 
 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
 {
-    if (addr == (tlb_entry->addr_read &
-                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
-        addr == (tlb_entry->addr_write &
-                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
-        addr == (tlb_entry->addr_code &
-                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+    if (tlb_hit_page(tlb_entry->addr_read, addr) ||
+        tlb_hit_page(tlb_entry->addr_write, addr) ||
+        tlb_hit_page(tlb_entry->addr_code, addr)) {
         memset(tlb_entry, -1, sizeof(*tlb_entry));
     }
 }
@@ -1046,8 +1043,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 
-    if ((addr & TARGET_PAGE_MASK)
-        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+    if (!tlb_hit(tlb_addr, addr)) {
         /* TLB entry is for a different page */
         if (!VICTIM_TLB_HIT(addr_write, addr)) {
             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
@@ -1091,8 +1087,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
     }
 
     /* Check TLB entry and enforce page permissions.  */
-    if ((addr & TARGET_PAGE_MASK)
-        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+    if (!tlb_hit(tlb_addr, addr)) {
         if (!VICTIM_TLB_HIT(addr_write, addr)) {
             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
                      mmu_idx, retaddr);