diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/exec/cpu-all.h | 23 | ||||
| -rw-r--r-- | include/exec/cpu-common.h | 3 | ||||
| -rw-r--r-- | include/exec/exec-all.h | 6 | ||||
| -rw-r--r-- | include/exec/memory-internal.h | 65 | ||||
| -rw-r--r-- | include/exec/memory.h | 14 | ||||
| -rw-r--r-- | include/exec/ram_addr.h | 6 | ||||
| -rw-r--r-- | include/hw/core/cpu.h | 2 | ||||
| -rw-r--r-- | include/qemu/compiler.h | 26 |
8 files changed, 56 insertions, 89 deletions
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index d2d443c4f9..ad9ab85eb3 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -317,26 +317,35 @@ CPUArchState *cpu_copy(CPUArchState *env); #if !defined(CONFIG_USER_ONLY) -/* Flags stored in the low bits of the TLB virtual address. These are - * defined so that fast path ram access is all zeros. +/* + * Flags stored in the low bits of the TLB virtual address. + * These are defined so that fast path ram access is all zeros. * The flags all must be between TARGET_PAGE_BITS and * maximum address alignment bit. + * + * Use TARGET_PAGE_BITS_MIN so that these bits are constant + * when TARGET_PAGE_BITS_VARY is in effect. */ /* Zero if TLB entry is valid. */ -#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1)) +#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) /* Set if TLB entry references a clean RAM page. The iotlb entry will contain the page physical address. */ -#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) +#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) /* Set if TLB entry is an IO callback. */ -#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) +#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) /* Set if TLB entry contains a watchpoint. */ -#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4)) +#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4)) +/* Set if TLB entry requires byte swap. */ +#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5)) +/* Set if TLB entry writes ignored. */ +#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ #define TLB_FLAGS_MASK \ - (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT) + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ + | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE) /** * tlb_hit_page: return true if page aligned @addr is a hit against the diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index f7dbe75fbc..81753bbb34 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -100,9 +100,6 @@ void qemu_flush_coalesced_mmio_buffer(void); void cpu_flush_icache_range(hwaddr start, hwaddr len); -extern struct MemoryRegion io_mem_rom; -extern struct MemoryRegion io_mem_notdirty; - typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 81b02eb2fe..49db07ba0b 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -509,11 +509,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, hwaddr *xlat, hwaddr *plen, MemTxAttrs attrs, int *prot); hwaddr memory_region_section_get_iotlb(CPUState *cpu, - MemoryRegionSection *section, - target_ulong vaddr, - hwaddr paddr, hwaddr xlat, - int prot, - target_ulong *address); + MemoryRegionSection *section); #endif /* vl.c */ diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h index ef4fb92371..9fcc2af25c 100644 --- a/include/exec/memory-internal.h +++ b/include/exec/memory-internal.h @@ -49,70 +49,5 @@ void address_space_dispatch_free(AddressSpaceDispatch *d); void mtree_print_dispatch(struct AddressSpaceDispatch *d, MemoryRegion *root); - -struct page_collection; - -/* Opaque struct for passing info from memory_notdirty_write_prepare() - * to memory_notdirty_write_complete(). Callers should treat all fields - * as private, with the exception of @active. - * - * @active is a field which is not touched by either the prepare or - * complete functions, but which the caller can use if it wishes to - * track whether it has called prepare for this struct and so needs - * to later call the complete function. - */ -typedef struct { - CPUState *cpu; - struct page_collection *pages; - ram_addr_t ram_addr; - vaddr mem_vaddr; - unsigned size; - bool active; -} NotDirtyInfo; - -/** - * memory_notdirty_write_prepare: call before writing to non-dirty memory - * @ndi: pointer to opaque NotDirtyInfo struct - * @cpu: CPU doing the write - * @mem_vaddr: virtual address of write - * @ram_addr: the ram address of the write - * @size: size of write in bytes - * - * Any code which writes to the host memory corresponding to - * guest RAM which has been marked as NOTDIRTY must wrap those - * writes in calls to memory_notdirty_write_prepare() and - * memory_notdirty_write_complete(): - * - * NotDirtyInfo ndi; - * memory_notdirty_write_prepare(&ndi, ....); - * ... perform write here ... - * memory_notdirty_write_complete(&ndi); - * - * These calls will ensure that we flush any TCG translated code for - * the memory being written, update the dirty bits and (if possible) - * remove the slowpath callback for writing to the memory. - * - * This must only be called if we are using TCG; it will assert otherwise. - * - * We may take locks in the prepare call, so callers must ensure that - * they don't exit (via longjump or otherwise) without calling complete. - * - * This call must only be made inside an RCU critical section. - * (Note that while we're executing a TCG TB we're always in an - * RCU critical section, which is likely to be the case for callers - * of these functions.) - */ -void memory_notdirty_write_prepare(NotDirtyInfo *ndi, - CPUState *cpu, - vaddr mem_vaddr, - ram_addr_t ram_addr, - unsigned size); -/** - * memory_notdirty_write_complete: finish write to non-dirty memory - * @ndi: pointer to the opaque NotDirtyInfo struct which was initialized - * by memory_not_dirty_write_prepare(). - */ -void memory_notdirty_write_complete(NotDirtyInfo *ndi); - #endif #endif diff --git a/include/exec/memory.h b/include/exec/memory.h index a30245c25a..6e67043ee0 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -495,15 +495,27 @@ static inline FlatView *address_space_to_flatview(AddressSpace *as) * @nonvolatile: this section is non-volatile */ struct MemoryRegionSection { + Int128 size; MemoryRegion *mr; FlatView *fv; hwaddr offset_within_region; - Int128 size; hwaddr offset_within_address_space; bool readonly; bool nonvolatile; }; +static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, + MemoryRegionSection *b) +{ + return a->mr == b->mr && + a->fv == b->fv && + a->offset_within_region == b->offset_within_region && + a->offset_within_address_space == b->offset_within_address_space && + int128_eq(a->size, b->size) && + a->readonly == b->readonly && + a->nonvolatile == b->nonvolatile; +} + /** * memory_region_init: Initialize a memory region * diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index a327a80cfe..e96e621de5 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -44,12 +44,6 @@ struct RAMBlock { size_t page_size; /* dirty bitmap used during migration */ unsigned long *bmap; - /* bitmap of pages that haven't been sent even once - * only maintained and used in postcopy at the moment - * where it's used to send the dirtymap at the start - * of the postcopy phase - */ - unsigned long *unsentmap; /* bitmap of already received pages in postcopy */ unsigned long *receivedmap; diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index c7cda65c66..031f587e51 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -338,7 +338,6 @@ struct qemu_work_item; * @next_cpu: Next CPU sharing TB cache. * @opaque: User data. * @mem_io_pc: Host Program Counter at which the memory was accessed. - * @mem_io_vaddr: Target virtual address at which the memory was accessed. * @kvm_fd: vCPU file descriptor for KVM. * @work_mutex: Lock to prevent multiple access to queued_work_*. * @queued_work_first: First asynchronous work pending. @@ -413,7 +412,6 @@ struct CPUState { * we store some rarely used information in the CPU context. */ uintptr_t mem_io_pc; - vaddr mem_io_vaddr; /* * This is only needed for the legacy cpu_unassigned_access() hook; * when all targets using it have been converted to use diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 09fc44cca4..7b93c73340 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -170,6 +170,17 @@ # define QEMU_NONSTRING #endif +/* + * Forced inlining may be desired to encourage constant propagation + * of function parameters. However, it can also make debugging harder, + * so disable it for a non-optimizing build. + */ +#if defined(__OPTIMIZE__) +#define QEMU_ALWAYS_INLINE __attribute__((always_inline)) +#else +#define QEMU_ALWAYS_INLINE +#endif + /* Implement C11 _Generic via GCC builtins. Example: * * QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x) @@ -210,4 +221,19 @@ #define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__)) #define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__)) +/** + * qemu_build_not_reached() + * + * The compiler, during optimization, is expected to prove that a call + * to this function cannot be reached and remove it. If the compiler + * supports QEMU_ERROR, this will be reported at compile time; otherwise + * this will be reported at link time due to the missing symbol. + */ +#ifdef __OPTIMIZE__ +extern void QEMU_NORETURN QEMU_ERROR("code path is reachable") + qemu_build_not_reached(void); +#else +#define qemu_build_not_reached() g_assert_not_reached() +#endif + #endif /* COMPILER_H */ |