diff options
| author | Stefan Hajnoczi <stefanha@redhat.com> | 2025-03-09 11:45:00 +0800 |
|---|---|---|
| committer | Stefan Hajnoczi <stefanha@redhat.com> | 2025-03-09 11:45:00 +0800 |
| commit | d9a4282c4b690e45d25c2b933f318bb41eeb271d (patch) | |
| tree | 4992a3dc412f6d1624532e10f79b31f42848727d /include/exec/exec-all.h | |
| parent | ffbc5e661fc3b73debaec2354bf46273186bf882 (diff) | |
| parent | 9e2080766f037857fc366012aaefd6fead0a75f9 (diff) | |
| download | focaccia-qemu-d9a4282c4b690e45d25c2b933f318bb41eeb271d.tar.gz focaccia-qemu-d9a4282c4b690e45d25c2b933f318bb41eeb271d.zip | |
Merge tag 'pull-tcg-20250308' of https://gitlab.com/rth7680/qemu into staging
include/qemu: Tidy atomic128 headers. include/exec: Split out cpu-interrupt.h include/exec: Split many tlb_* declarations to cputlb.h include/accel/tcg: Split out getpc.h accel/tcg: system: Compile some files once linux-user/main: Allow setting tb-size # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmfMyz8dHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9D/Af/Vh5PMtYjL+Mw2NQn # Vmqbv+joiqswAxI8PmZZzEBJ06j4pCLXn+r/2nr+sEwLmrI4BI40Vxx5c5puftoZ # GDGGclskF/pId5TE96TCEr8AoJgeNSSv4WxbINFTZRsRP4voZFHpU6mTz6B0Nnq5 # GS/k6c7+VcYbHIPD0RcIWwBlQv11uUAcnaygkNSsy+theUseOzTPTN/XGfTprf/6 # 1sxlmtt6QcQ88bBJJbiNwqbjWGxANcSUspRo0sstpVr8ApkXNl7WSkWYRBhBa5oc # iu0tixdCIoqqcCJy9/YVyIkmmwWeRUkbQqBeKf0o5xPnhmO3kfeezvERSDvDViAH # K9BVBw== # =7vra # -----END PGP SIGNATURE----- # gpg: Signature made Sun 09 Mar 2025 06:57:03 HKT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20250308' of https://gitlab.com/rth7680/qemu: (23 commits) accel/tcg: Build tcg-runtime-gvec.c once accel/tcg: Build tcg-runtime.c once qemu/atomic128: Include missing 'qemu/atomic.h' header qemu/atomic: Rename atomic128-ldst.h headers using .h.inc suffix qemu/atomic: Rename atomic128-cas.h headers using .h.inc suffix accel/tcg: Split out getpc.h accel/tcg: Restrict GETPC_ADJ to 'tb-internal.h' accel/tcg: Build tcg-accel-ops-mttcg.c once accel/tcg: Build tcg-accel-ops-rr.c once accel/tcg: Build tcg-accel-ops-icount.c once accel/tcg: Build tcg-accel-ops.c once system: Build watchpoint.c once exec: Declare tlb_flush*() in 'exec/cputlb.h' exec: Declare tlb_hit*() in 'exec/cputlb.h' exec: Declare tlb_set_page() in 'exec/cputlb.h' exec: Declare tlb_set_page_with_attrs() in 'exec/cputlb.h' exec: Declare tlb_set_page_full() in 'exec/cputlb.h' exec: Declare tlb_reset_dirty*() in 'exec/cputlb.h' accel/tcg: Compile watchpoint.c once include/exec: Split out exec/cpu-interrupt.h ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include/exec/exec-all.h')
| -rw-r--r-- | include/exec/exec-all.h | 262 |
1 files changed, 1 insertions, 261 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 8eb0df48f9..dd5c40f223 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -27,247 +27,8 @@ #include "exec/mmu-access-type.h" #include "exec/translation-block.h" -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) -/* cputlb.c */ -/** - * tlb_flush_page: - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * - * Flush one page from the TLB of the specified CPU, for all - * MMU indexes. - */ -void tlb_flush_page(CPUState *cpu, vaddr addr); -/** - * tlb_flush_page_all_cpus_synced: - * @cpu: src CPU of the flush - * @addr: virtual address of page to be flushed - * - * Flush one page from the TLB of all CPUs, for all - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); -/** - * tlb_flush: - * @cpu: CPU whose TLB should be flushed - * - * Flush the entire TLB for the specified CPU. Most CPU architectures - * allow the implementation to drop entries from the TLB at any time - * so this is generally safe. If more selective flushing is required - * use one of the other functions for efficiency. - */ -void tlb_flush(CPUState *cpu); -/** - * tlb_flush_all_cpus_synced: - * @cpu: src CPU of the flush - * - * Flush the entire TLB for all CPUs, for all MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_all_cpus_synced(CPUState *src_cpu); -/** - * tlb_flush_page_by_mmuidx: - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of MMU indexes to flush - * - * Flush one page from the TLB of the specified CPU, for the specified - * MMU indexes. - */ -void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap); -/** - * tlb_flush_page_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of MMU indexes to flush - * - * Flush one page from the TLB of all CPUs, for the specified - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap); -/** - * tlb_flush_by_mmuidx: - * @cpu: CPU whose TLB should be flushed - * @wait: If true ensure synchronisation by exiting the cpu_loop - * @idxmap: bitmap of MMU indexes to flush - * - * Flush all entries from the TLB of the specified CPU, for the specified - * MMU indexes. - */ -void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); -/** - * tlb_flush_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @idxmap: bitmap of MMU indexes to flush - * - * Flush all entries from the TLB of all CPUs, for the specified - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); - -/** - * tlb_flush_page_bits_by_mmuidx - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of mmu indexes to flush - * @bits: number of significant bits in address - * - * Similar to tlb_flush_page_mask, but with a bitmap of indexes. - */ -void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits); - -/* Similarly, with broadcast and syncing. */ -void tlb_flush_page_bits_by_mmuidx_all_cpus_synced - (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); - -/** - * tlb_flush_range_by_mmuidx - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of the start of the range to be flushed - * @len: length of range to be flushed - * @idxmap: bitmap of mmu indexes to flush - * @bits: number of significant bits in address - * - * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), - * comparing only the low @bits worth of each virtual page. - */ -void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, - unsigned bits); - -/* Similarly, with broadcast and syncing. */ -void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - vaddr len, - uint16_t idxmap, - unsigned bits); - -/** - * tlb_set_page_full: - * @cpu: CPU context - * @mmu_idx: mmu index of the tlb to modify - * @addr: virtual address of the entry to add - * @full: the details of the tlb entry - * - * Add an entry to @cpu tlb index @mmu_idx. All of the fields of - * @full must be filled, except for xlat_section, and constitute - * the complete description of the translated page. - * - * This is generally called by the target tlb_fill function after - * having performed a successful page table walk to find the physical - * address and attributes for the translation. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only - * used by tlb_flush_page. - */ -void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, - CPUTLBEntryFull *full); - -/** - * tlb_set_page_with_attrs: - * @cpu: CPU to add this TLB entry for - * @addr: virtual address of page to add entry for - * @paddr: physical address of the page - * @attrs: memory transaction attributes - * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) - * @mmu_idx: MMU index to insert TLB entry for - * @size: size of the page in bytes - * - * Add an entry to this CPU's TLB (a mapping from virtual address - * @addr to physical address @paddr) with the specified memory - * transaction attributes. This is generally called by the target CPU - * specific code after it has been called through the tlb_fill() - * entry point and performed a successful page table walk to find - * the physical address and attributes for the virtual address - * which provoked the TLB miss. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only - * used by tlb_flush_page. - */ -void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, - hwaddr paddr, MemTxAttrs attrs, - int prot, int mmu_idx, vaddr size); -/* tlb_set_page: - * - * This function is equivalent to calling tlb_set_page_with_attrs() - * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided - * as a convenience for CPUs which don't use memory transaction attributes. - */ -void tlb_set_page(CPUState *cpu, vaddr addr, - hwaddr paddr, int prot, - int mmu_idx, vaddr size); -#else -static inline void tlb_flush_page(CPUState *cpu, vaddr addr) -{ -} -static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) -{ -} -static inline void tlb_flush(CPUState *cpu) -{ -} -static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) -{ -} -static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, - vaddr addr, uint16_t idxmap) -{ -} - -static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) -{ -} -static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - uint16_t idxmap) -{ -} -static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, - uint16_t idxmap) -{ -} -static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, - vaddr addr, - uint16_t idxmap, - unsigned bits) -{ -} -static inline void -tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits) -{ -} -static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, - unsigned bits) -{ -} -static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - vaddr len, - uint16_t idxmap, - unsigned bits) -{ -} -#endif - #if defined(CONFIG_TCG) +#include "accel/tcg/getpc.h" /** * probe_access: @@ -417,24 +178,6 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); -/* GETPC is the true target of the return instruction that we'll execute. */ -#if defined(CONFIG_TCG_INTERPRETER) -extern __thread uintptr_t tci_tb_ptr; -# define GETPC() tci_tb_ptr -#else -# define GETPC() \ - ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) -#endif - -/* The true return address will often point to a host insn that is part of - the next translated guest insn. Adjust the address backward to point to - the middle of the call insn. Subtracting one would do the job except for - several compressed mode architectures (arm, mips) which set the low bit - to indicate the compressed mode; subtracting two works around that. It - is also the case that there are no host isas that contain a call insn - smaller than 4 bytes, so we don't worry about special-casing this. */ -#define GETPC_ADJ 2 - #if !defined(CONFIG_USER_ONLY) /** @@ -486,9 +229,6 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, #if !defined(CONFIG_USER_ONLY) -void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); -void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); - MemoryRegionSection * address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, hwaddr *xlat, hwaddr *plen, |