diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/accel/tcg/getpc.h | 24 | ||||
| -rw-r--r-- | include/exec/cpu-all.h | 97 | ||||
| -rw-r--r-- | include/exec/cpu-defs.h | 26 | ||||
| -rw-r--r-- | include/exec/cpu-interrupt.h | 70 | ||||
| -rw-r--r-- | include/exec/cputlb.h | 263 | ||||
| -rw-r--r-- | include/exec/exec-all.h | 262 | ||||
| -rw-r--r-- | include/exec/poison.h | 17 | ||||
| -rw-r--r-- | include/exec/ram_addr.h | 1 | ||||
| -rw-r--r-- | include/exec/target_page.h | 54 | ||||
| -rw-r--r-- | include/qemu/atomic128.h | 5 |
10 files changed, 408 insertions, 411 deletions
diff --git a/include/accel/tcg/getpc.h b/include/accel/tcg/getpc.h new file mode 100644 index 0000000000..8a97ce34e7 --- /dev/null +++ b/include/accel/tcg/getpc.h @@ -0,0 +1,24 @@ +/* + * Get host pc for helper unwinding. + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_GETPC_H +#define ACCEL_TCG_GETPC_H + +#ifndef CONFIG_TCG +#error Can only include this header with TCG +#endif + +/* GETPC is the true target of the return instruction that we'll execute. */ +#ifdef CONFIG_TCG_INTERPRETER +extern __thread uintptr_t tci_tb_ptr; +# define GETPC() tci_tb_ptr +#else +# define GETPC() \ + ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) +#endif + +#endif /* ACCEL_TCG_GETPC_H */ diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 09f537d06f..8cd6c00cf8 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -21,6 +21,7 @@ #include "exec/page-protection.h" #include "exec/cpu-common.h" +#include "exec/cpu-interrupt.h" #include "exec/memory.h" #include "exec/tswap.h" #include "hw/core/cpu.h" @@ -105,81 +106,10 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val /* page related stuff */ #include "exec/cpu-defs.h" -#ifdef TARGET_PAGE_BITS_VARY -# include "exec/page-vary.h" -extern const TargetPageBits target_page; -# ifdef CONFIG_DEBUG_TCG -# define TARGET_PAGE_BITS ({ assert(target_page.decided); \ - target_page.bits; }) -# define TARGET_PAGE_MASK ({ assert(target_page.decided); \ - (target_long)target_page.mask; }) -# else -# define TARGET_PAGE_BITS target_page.bits -# define TARGET_PAGE_MASK ((target_long)target_page.mask) -# endif -# define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) -#else -# define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS -# define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) -# define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS) -#endif - -#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) +#include "exec/target_page.h" CPUArchState *cpu_copy(CPUArchState *env); -/* Flags for use in ENV->INTERRUPT_PENDING. - - The numbers assigned here are non-sequential in order to preserve - binary compatibility with the vmstate dump. Bit 0 (0x0001) was - previously used for CPU_INTERRUPT_EXIT, and is cleared when loading - the vmstate dump. */ - -/* External hardware interrupt pending. This is typically used for - interrupts from devices. */ -#define CPU_INTERRUPT_HARD 0x0002 - -/* Exit the current TB. This is typically used when some system-level device - makes some change to the memory mapping. E.g. the a20 line change. */ -#define CPU_INTERRUPT_EXITTB 0x0004 - -/* Halt the CPU. */ -#define CPU_INTERRUPT_HALT 0x0020 - -/* Debug event pending. */ -#define CPU_INTERRUPT_DEBUG 0x0080 - -/* Reset signal. */ -#define CPU_INTERRUPT_RESET 0x0400 - -/* Several target-specific external hardware interrupts. Each target/cpu.h - should define proper names based on these defines. */ -#define CPU_INTERRUPT_TGT_EXT_0 0x0008 -#define CPU_INTERRUPT_TGT_EXT_1 0x0010 -#define CPU_INTERRUPT_TGT_EXT_2 0x0040 -#define CPU_INTERRUPT_TGT_EXT_3 0x0200 -#define CPU_INTERRUPT_TGT_EXT_4 0x1000 - -/* Several target-specific internal interrupts. These differ from the - preceding target-specific interrupts in that they are intended to - originate from within the cpu itself, typically in response to some - instruction being executed. These, therefore, are not masked while - single-stepping within the debugger. */ -#define CPU_INTERRUPT_TGT_INT_0 0x0100 -#define CPU_INTERRUPT_TGT_INT_1 0x0800 -#define CPU_INTERRUPT_TGT_INT_2 0x2000 - -/* First unused bit: 0x4000. */ - -/* The set of all bits that should be masked when single-stepping. */ -#define CPU_INTERRUPT_SSTEP_MASK \ - (CPU_INTERRUPT_HARD \ - | CPU_INTERRUPT_TGT_EXT_0 \ - | CPU_INTERRUPT_TGT_EXT_1 \ - | CPU_INTERRUPT_TGT_EXT_2 \ - | CPU_INTERRUPT_TGT_EXT_3 \ - | CPU_INTERRUPT_TGT_EXT_4) - #include "cpu.h" #ifdef CONFIG_USER_ONLY @@ -249,29 +179,6 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch) /* The two sets of flags must not overlap. */ QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); -/** - * tlb_hit_page: return true if page aligned @addr is a hit against the - * TLB entry @tlb_addr - * - * @addr: virtual address to test (must be page aligned) - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) - */ -static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) -{ - return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); -} - -/** - * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr - * - * @addr: virtual address to test (need not be page aligned) - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) - */ -static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) -{ - return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); -} - #endif /* !CONFIG_USER_ONLY */ /* Validate correct placement of CPUArchState. */ diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index ae18398fa9..9f955f53fd 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -46,30 +46,4 @@ #include "exec/target_long.h" -#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG) -#define CPU_TLB_DYN_MIN_BITS 6 -#define CPU_TLB_DYN_DEFAULT_BITS 8 - -# if HOST_LONG_BITS == 32 -/* Make sure we do not require a double-word shift for the TLB load */ -# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) -# else /* HOST_LONG_BITS == 64 */ -/* - * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == - * 2**34 == 16G of address space. This is roughly what one would expect a - * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel - * Skylake's Level-2 STLB has 16 1G entries. - * Also, make sure we do not size the TLB past the guest's address space. - */ -# ifdef TARGET_PAGE_BITS_VARY -# define CPU_TLB_DYN_MAX_BITS \ - MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) -# else -# define CPU_TLB_DYN_MAX_BITS \ - MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) -# endif -# endif - -#endif /* CONFIG_SOFTMMU && CONFIG_TCG */ - #endif diff --git a/include/exec/cpu-interrupt.h b/include/exec/cpu-interrupt.h new file mode 100644 index 0000000000..40715193ca --- /dev/null +++ b/include/exec/cpu-interrupt.h @@ -0,0 +1,70 @@ +/* + * Flags for use with cpu_interrupt() + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef CPU_INTERRUPT_H +#define CPU_INTERRUPT_H + +/* + * The numbers assigned here are non-sequential in order to preserve binary + * compatibility with the vmstate dump. Bit 0 (0x0001) was previously used + * for CPU_INTERRUPT_EXIT, and is cleared when loading the vmstate dump. + */ + +/* + * External hardware interrupt pending. + * This is typically used for interrupts from devices. + */ +#define CPU_INTERRUPT_HARD 0x0002 + +/* + * Exit the current TB. This is typically used when some system-level device + * makes some change to the memory mapping. E.g. the a20 line change. + */ +#define CPU_INTERRUPT_EXITTB 0x0004 + +/* Halt the CPU. */ +#define CPU_INTERRUPT_HALT 0x0020 + +/* Debug event pending. */ +#define CPU_INTERRUPT_DEBUG 0x0080 + +/* Reset signal. */ +#define CPU_INTERRUPT_RESET 0x0400 + +/* + * Several target-specific external hardware interrupts. Each target/cpu.h + * should define proper names based on these defines. + */ +#define CPU_INTERRUPT_TGT_EXT_0 0x0008 +#define CPU_INTERRUPT_TGT_EXT_1 0x0010 +#define CPU_INTERRUPT_TGT_EXT_2 0x0040 +#define CPU_INTERRUPT_TGT_EXT_3 0x0200 +#define CPU_INTERRUPT_TGT_EXT_4 0x1000 + +/* + * Several target-specific internal interrupts. These differ from the + * preceding target-specific interrupts in that they are intended to + * originate from within the cpu itself, typically in response to some + * instruction being executed. These, therefore, are not masked while + * single-stepping within the debugger. + */ +#define CPU_INTERRUPT_TGT_INT_0 0x0100 +#define CPU_INTERRUPT_TGT_INT_1 0x0800 +#define CPU_INTERRUPT_TGT_INT_2 0x2000 + +/* First unused bit: 0x4000. */ + +/* The set of all bits that should be masked when single-stepping. */ +#define CPU_INTERRUPT_SSTEP_MASK \ + (CPU_INTERRUPT_HARD \ + | CPU_INTERRUPT_TGT_EXT_0 \ + | CPU_INTERRUPT_TGT_EXT_1 \ + | CPU_INTERRUPT_TGT_EXT_2 \ + | CPU_INTERRUPT_TGT_EXT_3 \ + | CPU_INTERRUPT_TGT_EXT_4) + +#endif /* CPU_INTERRUPT_H */ diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h index ef18642a32..8125f6809c 100644 --- a/include/exec/cputlb.h +++ b/include/exec/cputlb.h @@ -21,15 +21,266 @@ #define CPUTLB_H #include "exec/cpu-common.h" +#include "exec/hwaddr.h" +#include "exec/memattrs.h" +#include "exec/vaddr.h" -#ifdef CONFIG_TCG - -#if !defined(CONFIG_USER_ONLY) -/* cputlb.c */ +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) void tlb_protect_code(ram_addr_t ram_addr); void tlb_unprotect_code(ram_addr_t ram_addr); #endif -#endif /* CONFIG_TCG */ - +#ifndef CONFIG_USER_ONLY +void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); +void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); #endif + +/** + * tlb_set_page_full: + * @cpu: CPU context + * @mmu_idx: mmu index of the tlb to modify + * @addr: virtual address of the entry to add + * @full: the details of the tlb entry + * + * Add an entry to @cpu tlb index @mmu_idx. All of the fields of + * @full must be filled, except for xlat_section, and constitute + * the complete description of the translated page. + * + * This is generally called by the target tlb_fill function after + * having performed a successful page table walk to find the physical + * address and attributes for the translation. + * + * At most one entry for a given virtual address is permitted. Only a + * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only + * used by tlb_flush_page. + */ +void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, + CPUTLBEntryFull *full); + +/** + * tlb_set_page_with_attrs: + * @cpu: CPU to add this TLB entry for + * @addr: virtual address of page to add entry for + * @paddr: physical address of the page + * @attrs: memory transaction attributes + * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) + * @mmu_idx: MMU index to insert TLB entry for + * @size: size of the page in bytes + * + * Add an entry to this CPU's TLB (a mapping from virtual address + * @addr to physical address @paddr) with the specified memory + * transaction attributes. This is generally called by the target CPU + * specific code after it has been called through the tlb_fill() + * entry point and performed a successful page table walk to find + * the physical address and attributes for the virtual address + * which provoked the TLB miss. + * + * At most one entry for a given virtual address is permitted. Only a + * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only + * used by tlb_flush_page. + */ +void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, + hwaddr paddr, MemTxAttrs attrs, + int prot, int mmu_idx, vaddr size); + +/** + * tlb_set_page: + * + * This function is equivalent to calling tlb_set_page_with_attrs() + * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided + * as a convenience for CPUs which don't use memory transaction attributes. + */ +void tlb_set_page(CPUState *cpu, vaddr addr, + hwaddr paddr, int prot, + int mmu_idx, vaddr size); + +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) +/** + * tlb_flush_page: + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of the specified CPU, for all + * MMU indexes. + */ +void tlb_flush_page(CPUState *cpu, vaddr addr); + +/** + * tlb_flush_page_all_cpus_synced: + * @cpu: src CPU of the flush + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of all CPUs, for all + * MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); + +/** + * tlb_flush: + * @cpu: CPU whose TLB should be flushed + * + * Flush the entire TLB for the specified CPU. Most CPU architectures + * allow the implementation to drop entries from the TLB at any time + * so this is generally safe. If more selective flushing is required + * use one of the other functions for efficiency. + */ +void tlb_flush(CPUState *cpu); + +/** + * tlb_flush_all_cpus_synced: + * @cpu: src CPU of the flush + * + * Flush the entire TLB for all CPUs, for all MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_all_cpus_synced(CPUState *src_cpu); + +/** + * tlb_flush_page_by_mmuidx: + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of the specified CPU, for the specified + * MMU indexes. + */ +void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, + uint16_t idxmap); + +/** + * tlb_flush_page_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of all CPUs, for the specified + * MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, + uint16_t idxmap); + +/** + * tlb_flush_by_mmuidx: + * @cpu: CPU whose TLB should be flushed + * @wait: If true ensure synchronisation by exiting the cpu_loop + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from the TLB of the specified CPU, for the specified + * MMU indexes. + */ +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); + +/** + * tlb_flush_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from the TLB of all CPUs, for the specified + * MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); + +/** + * tlb_flush_page_bits_by_mmuidx + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of mmu indexes to flush + * @bits: number of significant bits in address + * + * Similar to tlb_flush_page_mask, but with a bitmap of indexes. + */ +void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, + uint16_t idxmap, unsigned bits); + +/* Similarly, with broadcast and syncing. */ +void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, + uint16_t idxmap, + unsigned bits); + +/** + * tlb_flush_range_by_mmuidx + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of the start of the range to be flushed + * @len: length of range to be flushed + * @idxmap: bitmap of mmu indexes to flush + * @bits: number of significant bits in address + * + * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), + * comparing only the low @bits worth of each virtual page. + */ +void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, + vaddr len, uint16_t idxmap, + unsigned bits); + +/* Similarly, with broadcast and syncing. */ +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, + vaddr addr, + vaddr len, + uint16_t idxmap, + unsigned bits); +#else +static inline void tlb_flush_page(CPUState *cpu, vaddr addr) +{ +} +static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) +{ +} +static inline void tlb_flush(CPUState *cpu) +{ +} +static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) +{ +} +static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, + vaddr addr, uint16_t idxmap) +{ +} + +static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +{ +} +static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, + vaddr addr, + uint16_t idxmap) +{ +} +static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, + uint16_t idxmap) +{ +} +static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, + vaddr addr, + uint16_t idxmap, + unsigned bits) +{ +} +static inline void +tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, + uint16_t idxmap, unsigned bits) +{ +} +static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, + vaddr len, uint16_t idxmap, + unsigned bits) +{ +} +static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, + vaddr addr, + vaddr len, + uint16_t idxmap, + unsigned bits) +{ +} +#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ +#endif /* CPUTLB_H */ diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 8eb0df48f9..dd5c40f223 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -27,247 +27,8 @@ #include "exec/mmu-access-type.h" #include "exec/translation-block.h" -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) -/* cputlb.c */ -/** - * tlb_flush_page: - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * - * Flush one page from the TLB of the specified CPU, for all - * MMU indexes. - */ -void tlb_flush_page(CPUState *cpu, vaddr addr); -/** - * tlb_flush_page_all_cpus_synced: - * @cpu: src CPU of the flush - * @addr: virtual address of page to be flushed - * - * Flush one page from the TLB of all CPUs, for all - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); -/** - * tlb_flush: - * @cpu: CPU whose TLB should be flushed - * - * Flush the entire TLB for the specified CPU. Most CPU architectures - * allow the implementation to drop entries from the TLB at any time - * so this is generally safe. If more selective flushing is required - * use one of the other functions for efficiency. - */ -void tlb_flush(CPUState *cpu); -/** - * tlb_flush_all_cpus_synced: - * @cpu: src CPU of the flush - * - * Flush the entire TLB for all CPUs, for all MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_all_cpus_synced(CPUState *src_cpu); -/** - * tlb_flush_page_by_mmuidx: - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of MMU indexes to flush - * - * Flush one page from the TLB of the specified CPU, for the specified - * MMU indexes. - */ -void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap); -/** - * tlb_flush_page_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of MMU indexes to flush - * - * Flush one page from the TLB of all CPUs, for the specified - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap); -/** - * tlb_flush_by_mmuidx: - * @cpu: CPU whose TLB should be flushed - * @wait: If true ensure synchronisation by exiting the cpu_loop - * @idxmap: bitmap of MMU indexes to flush - * - * Flush all entries from the TLB of the specified CPU, for the specified - * MMU indexes. - */ -void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); -/** - * tlb_flush_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @idxmap: bitmap of MMU indexes to flush - * - * Flush all entries from the TLB of all CPUs, for the specified - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); - -/** - * tlb_flush_page_bits_by_mmuidx - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of mmu indexes to flush - * @bits: number of significant bits in address - * - * Similar to tlb_flush_page_mask, but with a bitmap of indexes. - */ -void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits); - -/* Similarly, with broadcast and syncing. */ -void tlb_flush_page_bits_by_mmuidx_all_cpus_synced - (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); - -/** - * tlb_flush_range_by_mmuidx - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of the start of the range to be flushed - * @len: length of range to be flushed - * @idxmap: bitmap of mmu indexes to flush - * @bits: number of significant bits in address - * - * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), - * comparing only the low @bits worth of each virtual page. - */ -void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, - unsigned bits); - -/* Similarly, with broadcast and syncing. */ -void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - vaddr len, - uint16_t idxmap, - unsigned bits); - -/** - * tlb_set_page_full: - * @cpu: CPU context - * @mmu_idx: mmu index of the tlb to modify - * @addr: virtual address of the entry to add - * @full: the details of the tlb entry - * - * Add an entry to @cpu tlb index @mmu_idx. All of the fields of - * @full must be filled, except for xlat_section, and constitute - * the complete description of the translated page. - * - * This is generally called by the target tlb_fill function after - * having performed a successful page table walk to find the physical - * address and attributes for the translation. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only - * used by tlb_flush_page. - */ -void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, - CPUTLBEntryFull *full); - -/** - * tlb_set_page_with_attrs: - * @cpu: CPU to add this TLB entry for - * @addr: virtual address of page to add entry for - * @paddr: physical address of the page - * @attrs: memory transaction attributes - * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) - * @mmu_idx: MMU index to insert TLB entry for - * @size: size of the page in bytes - * - * Add an entry to this CPU's TLB (a mapping from virtual address - * @addr to physical address @paddr) with the specified memory - * transaction attributes. This is generally called by the target CPU - * specific code after it has been called through the tlb_fill() - * entry point and performed a successful page table walk to find - * the physical address and attributes for the virtual address - * which provoked the TLB miss. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only - * used by tlb_flush_page. - */ -void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, - hwaddr paddr, MemTxAttrs attrs, - int prot, int mmu_idx, vaddr size); -/* tlb_set_page: - * - * This function is equivalent to calling tlb_set_page_with_attrs() - * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided - * as a convenience for CPUs which don't use memory transaction attributes. - */ -void tlb_set_page(CPUState *cpu, vaddr addr, - hwaddr paddr, int prot, - int mmu_idx, vaddr size); -#else -static inline void tlb_flush_page(CPUState *cpu, vaddr addr) -{ -} -static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) -{ -} -static inline void tlb_flush(CPUState *cpu) -{ -} -static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) -{ -} -static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, - vaddr addr, uint16_t idxmap) -{ -} - -static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) -{ -} -static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - uint16_t idxmap) -{ -} -static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, - uint16_t idxmap) -{ -} -static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, - vaddr addr, - uint16_t idxmap, - unsigned bits) -{ -} -static inline void -tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits) -{ -} -static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, - unsigned bits) -{ -} -static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - vaddr len, - uint16_t idxmap, - unsigned bits) -{ -} -#endif - #if defined(CONFIG_TCG) +#include "accel/tcg/getpc.h" /** * probe_access: @@ -417,24 +178,6 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); -/* GETPC is the true target of the return instruction that we'll execute. */ -#if defined(CONFIG_TCG_INTERPRETER) -extern __thread uintptr_t tci_tb_ptr; -# define GETPC() tci_tb_ptr -#else -# define GETPC() \ - ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) -#endif - -/* The true return address will often point to a host insn that is part of - the next translated guest insn. Adjust the address backward to point to - the middle of the call insn. Subtracting one would do the job except for - several compressed mode architectures (arm, mips) which set the low bit - to indicate the compressed mode; subtracting two works around that. It - is also the case that there are no host isas that contain a call insn - smaller than 4 bytes, so we don't worry about special-casing this. */ -#define GETPC_ADJ 2 - #if !defined(CONFIG_USER_ONLY) /** @@ -486,9 +229,6 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, #if !defined(CONFIG_USER_ONLY) -void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); -void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); - MemoryRegionSection * address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, hwaddr *xlat, hwaddr *plen, diff --git a/include/exec/poison.h b/include/exec/poison.h index d6d4832854..8ed04b3108 100644 --- a/include/exec/poison.h +++ b/include/exec/poison.h @@ -44,25 +44,8 @@ #pragma GCC poison TARGET_FMT_ld #pragma GCC poison TARGET_FMT_lu -#pragma GCC poison TARGET_PAGE_SIZE -#pragma GCC poison TARGET_PAGE_MASK -#pragma GCC poison TARGET_PAGE_BITS -#pragma GCC poison TARGET_PAGE_ALIGN #pragma GCC poison TARGET_PHYS_ADDR_SPACE_BITS -#pragma GCC poison CPU_INTERRUPT_HARD -#pragma GCC poison CPU_INTERRUPT_EXITTB -#pragma GCC poison CPU_INTERRUPT_HALT -#pragma GCC poison CPU_INTERRUPT_DEBUG -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4 -#pragma GCC poison CPU_INTERRUPT_TGT_INT_0 -#pragma GCC poison CPU_INTERRUPT_TGT_INT_1 -#pragma GCC poison CPU_INTERRUPT_TGT_INT_2 - #pragma GCC poison CONFIG_ALPHA_DIS #pragma GCC poison CONFIG_HPPA_DIS #pragma GCC poison CONFIG_I386_DIS diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 94bb3ccbe4..3d8df4edf1 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -23,6 +23,7 @@ #include "cpu.h" #include "system/xen.h" #include "system/tcg.h" +#include "exec/cputlb.h" #include "exec/ramlist.h" #include "exec/ramblock.h" #include "exec/exec-all.h" diff --git a/include/exec/target_page.h b/include/exec/target_page.h index 98ffbb5c23..8e89e5cbe6 100644 --- a/include/exec/target_page.h +++ b/include/exec/target_page.h @@ -14,10 +14,56 @@ #ifndef EXEC_TARGET_PAGE_H #define EXEC_TARGET_PAGE_H -size_t qemu_target_page_size(void); -int qemu_target_page_mask(void); -int qemu_target_page_bits(void); -int qemu_target_page_bits_min(void); +/* + * If compiling per-target, get the real values. + * For generic code, reuse the mechanism for variable page size. + */ +#ifdef COMPILING_PER_TARGET +#include "cpu-param.h" +#include "exec/target_long.h" +#define TARGET_PAGE_TYPE target_long +#else +#define TARGET_PAGE_BITS_VARY +#define TARGET_PAGE_TYPE int +#endif + +#ifdef TARGET_PAGE_BITS_VARY +# include "exec/page-vary.h" +extern const TargetPageBits target_page; +# ifdef CONFIG_DEBUG_TCG +# define TARGET_PAGE_BITS ({ assert(target_page.decided); \ + target_page.bits; }) +# define TARGET_PAGE_MASK ({ assert(target_page.decided); \ + (TARGET_PAGE_TYPE)target_page.mask; }) +# else +# define TARGET_PAGE_BITS target_page.bits +# define TARGET_PAGE_MASK ((TARGET_PAGE_TYPE)target_page.mask) +# endif +# define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) +#else +# define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS +# define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) +# define TARGET_PAGE_MASK ((TARGET_PAGE_TYPE)-1 << TARGET_PAGE_BITS) +#endif +#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) + +static inline size_t qemu_target_page_size(void) +{ + return TARGET_PAGE_SIZE; +} + +static inline int qemu_target_page_mask(void) +{ + return TARGET_PAGE_MASK; +} + +static inline int qemu_target_page_bits(void) +{ + return TARGET_PAGE_BITS; +} + +int qemu_target_page_bits_min(void); size_t qemu_target_pages_to_MiB(size_t pages); + #endif diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h index 88af6d4ea3..31e5c48d8f 100644 --- a/include/qemu/atomic128.h +++ b/include/qemu/atomic128.h @@ -13,6 +13,7 @@ #ifndef QEMU_ATOMIC128_H #define QEMU_ATOMIC128_H +#include "qemu/atomic.h" #include "qemu/int128.h" /* @@ -58,7 +59,7 @@ * Therefore, special case each platform. */ -#include "host/atomic128-cas.h" -#include "host/atomic128-ldst.h" +#include "host/atomic128-cas.h.inc" +#include "host/atomic128-ldst.h.inc" #endif /* QEMU_ATOMIC128_H */ |