summary refs log tree commit diff stats
path: root/include/exec/exec-all.h
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2022-10-05 10:17:02 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2022-10-05 10:17:02 -0400
commit4a9c04672a875ed00ea807ea4d552c01f6440bc7 (patch)
tree5ae0c7c85d114c608cf6018b8dc50f4feaaa83b8 /include/exec/exec-all.h
parentfafd35a6dab8e70a7c395aaa8e1273267cf9f3c8 (diff)
parentab419fd8a035a65942de4e63effcd55ccbf1a9fe (diff)
downloadfocaccia-qemu-4a9c04672a875ed00ea807ea4d552c01f6440bc7.tar.gz
focaccia-qemu-4a9c04672a875ed00ea807ea4d552c01f6440bc7.zip
Merge tag 'pull-tcg-20221004' of https://gitlab.com/rth7680/qemu into staging
Cache CPUClass for use in hot code paths.
Add CPUTLBEntryFull, probe_access_full, tlb_set_page_full.
Add generic support for TARGET_TB_PCREL.
tcg/ppc: Optimize 26-bit jumps using STQ for POWER 2.07
target/sh4: Fix TB_FLAG_UNALIGN

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmM8jXEdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/oEggArAHK8FtydfQ4ZwnF
# SjXfpdP50OC0SZn3uBN93FZOrxz9UYG9t1oDHs39J/+b/u2nwJYch//EH2k+NtOW
# hc3iIgS9bWgs/UWZESkViKQccw7gpYlc21Br38WWwFNEFyecX0p+e9pJgld5rSv1
# mRGvCs5J2svH2tcXl/Sb/JWgcumOJoG7qy2aLyJGolR6UOfwcfFMzQXzq8qjpRKH
# Jh84qusE/rLbzBsdN6snJY4+dyvUo03lT5IJ4d+FQg2tUip+Qqt7pnMbsqq6qF6H
# R6fWU1JTbsh7GxXJwQJ83jLBnUsi8cy6FKrZ3jyiBq76+DIpR0PqoEe+PN/weInU
# TN0z4g==
# =RfXJ
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 04 Oct 2022 15:45:53 EDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20221004' of https://gitlab.com/rth7680/qemu:
  target/sh4: Fix TB_FLAG_UNALIGN
  tcg/ppc: Optimize 26-bit jumps
  accel/tcg: Introduce TARGET_TB_PCREL
  accel/tcg: Introduce tb_pc and log_pc
  hw/core: Add CPUClass.get_pc
  include/hw/core: Create struct CPUJumpCache
  accel/tcg: Inline tb_flush_jmp_cache
  accel/tcg: Do not align tb->page_addr[0]
  accel/tcg: Use DisasContextBase in plugin_gen_tb_start
  accel/tcg: Use bool for page_find_alloc
  accel/tcg: Remove PageDesc code_bitmap
  include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
  accel/tcg: Introduce tlb_set_page_full
  accel/tcg: Introduce probe_access_full
  accel/tcg: Suppress auto-invalidate in probe_access_internal
  accel/tcg: Drop addr member from SavedIOTLB
  accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
  cputlb: used cached CPUClass in our hot-paths
  hw/core/cpu-sysemu: used cached class in cpu_asidx_from_attrs
  cpu: cache CPUClass in CPUState for hot code paths

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include/exec/exec-all.h')
-rw-r--r--include/exec/exec-all.h75
1 files changed, 73 insertions, 2 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bcad607c4e..e5f8b224a5 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -258,6 +258,28 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                unsigned bits);
 
 /**
+ * tlb_set_page_full:
+ * @cpu: CPU context
+ * @mmu_idx: mmu index of the tlb to modify
+ * @vaddr: virtual address of the entry to add
+ * @full: the details of the tlb entry
+ *
+ * Add an entry to @cpu tlb index @mmu_idx.  All of the fields of
+ * @full must be filled, except for xlat_section, and constitute
+ * the complete description of the translated page.
+ *
+ * This is generally called by the target tlb_fill function after
+ * having performed a successful page table walk to find the physical
+ * address and attributes for the translation.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
+                       CPUTLBEntryFull *full);
+
+/**
  * tlb_set_page_with_attrs:
  * @cpu: CPU to add this TLB entry for
  * @vaddr: virtual address of page to add entry for
@@ -434,6 +456,21 @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
                        MMUAccessType access_type, int mmu_idx,
                        bool nonfault, void **phost, uintptr_t retaddr);
 
+#ifndef CONFIG_USER_ONLY
+/**
+ * probe_access_full:
+ * Like probe_access_flags, except also return into @pfull.
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ */
+int probe_access_full(CPUArchState *env, target_ulong addr,
+                      MMUAccessType access_type, int mmu_idx,
+                      bool nonfault, void **phost,
+                      CPUTLBEntryFull **pfull, uintptr_t retaddr);
+#endif
+
 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
 
 /* Estimated block size for TB allocation.  */
@@ -459,8 +496,32 @@ struct tb_tc {
 };
 
 struct TranslationBlock {
-    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
-    target_ulong cs_base; /* CS base for this block */
+#if !TARGET_TB_PCREL
+    /*
+     * Guest PC corresponding to this block.  This must be the true
+     * virtual address.  Therefore e.g. x86 stores EIP + CS_BASE, and
+     * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
+     * privilege, must store those bits elsewhere.
+     *
+     * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are
+     * written such that the TB is associated only with the physical
+     * page and may be run in any virtual address context.  In this case,
+     * PC must always be taken from ENV in a target-specific manner.
+     * Unwind information is taken as offsets from the page, to be
+     * deposited into the "current" PC.
+     */
+    target_ulong pc;
+#endif
+
+    /*
+     * Target-specific data associated with the TranslationBlock, e.g.:
+     * x86: the original user, the Code Segment virtual base,
+     * arm: an extension of tb->flags,
+     * s390x: instruction data for EXECUTE,
+     * sparc: the next pc of the instruction queue (for delay slots).
+     */
+    target_ulong cs_base;
+
     uint32_t flags; /* flags defining in which context the code was generated */
     uint32_t cflags;    /* compile flags */
 
@@ -533,6 +594,16 @@ struct TranslationBlock {
     uintptr_t jmp_dest[2];
 };
 
+/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
+static inline target_ulong tb_pc(const TranslationBlock *tb)
+{
+#if TARGET_TB_PCREL
+    qemu_build_not_reached();
+#else
+    return tb->pc;
+#endif
+}
+
 /* Hide the qatomic_read to make code a little easier on the eyes */
 static inline uint32_t tb_cflags(const TranslationBlock *tb)
 {