diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/exec/cputlb.h | 2 | ||||
| -rw-r--r-- | include/exec/exec-all.h | 132 | ||||
| -rw-r--r-- | include/hw/s390x/s390_flic.h | 11 | ||||
| -rw-r--r-- | include/qemu/cutils.h | 29 | ||||
| -rw-r--r-- | include/qom/cpu.h | 16 | ||||
| -rw-r--r-- | include/sysemu/cpus.h | 2 |
6 files changed, 154 insertions, 38 deletions
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h index d454c005b7..3f941783c5 100644 --- a/include/exec/cputlb.h +++ b/include/exec/cputlb.h @@ -23,8 +23,6 @@ /* cputlb.c */ void tlb_protect_code(ram_addr_t ram_addr); void tlb_unprotect_code(ram_addr_t ram_addr); -void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, - uintptr_t length); extern int tlb_flush_count; #endif diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 21ab7bf3fd..bcde1e6a14 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -93,6 +93,27 @@ void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx); */ void tlb_flush_page(CPUState *cpu, target_ulong addr); /** + * tlb_flush_page_all_cpus: + * @cpu: src CPU of the flush + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of the specified CPU, for all + * MMU indexes. + */ +void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); +/** + * tlb_flush_page_all_cpus_synced: + * @cpu: src CPU of the flush + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of the specified CPU, for all MMU + * indexes like tlb_flush_page_all_cpus except the source vCPUs work + * is scheduled as safe work meaning all flushes will be complete once + * the source vCPUs safe work is complete. This will depend on when + * the guests translation ends the TB. + */ +void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); +/** * tlb_flush: * @cpu: CPU whose TLB should be flushed * @@ -103,24 +124,87 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr); */ void tlb_flush(CPUState *cpu); /** + * tlb_flush_all_cpus: + * @cpu: src CPU of the flush + */ +void tlb_flush_all_cpus(CPUState *src_cpu); +/** + * tlb_flush_all_cpus_synced: + * @cpu: src CPU of the flush + * + * Like tlb_flush_all_cpus except this except the source vCPUs work is + * scheduled as safe work meaning all flushes will be complete once + * the source vCPUs safe work is complete. This will depend on when + * the guests translation ends the TB. + */ +void tlb_flush_all_cpus_synced(CPUState *src_cpu); +/** * tlb_flush_page_by_mmuidx: * @cpu: CPU whose TLB should be flushed * @addr: virtual address of page to be flushed - * @...: list of MMU indexes to flush, terminated by a negative value + * @idxmap: bitmap of MMU indexes to flush * * Flush one page from the TLB of the specified CPU, for the specified * MMU indexes. */ -void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...); +void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, + uint16_t idxmap); +/** + * tlb_flush_page_by_mmuidx_all_cpus: + * @cpu: Originating CPU of the flush + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of all CPUs, for the specified + * MMU indexes. + */ +void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, + uint16_t idxmap); +/** + * tlb_flush_page_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of all CPUs, for the specified MMU + * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source + * vCPUs work is scheduled as safe work meaning all flushes will be + * complete once the source vCPUs safe work is complete. This will + * depend on when the guests translation ends the TB. + */ +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, + uint16_t idxmap); /** * tlb_flush_by_mmuidx: * @cpu: CPU whose TLB should be flushed - * @...: list of MMU indexes to flush, terminated by a negative value + * @wait: If true ensure synchronisation by exiting the cpu_loop + * @idxmap: bitmap of MMU indexes to flush * * Flush all entries from the TLB of the specified CPU, for the specified * MMU indexes. */ -void tlb_flush_by_mmuidx(CPUState *cpu, ...); +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); +/** + * tlb_flush_by_mmuidx_all_cpus: + * @cpu: Originating CPU of the flush + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from all TLBs of all CPUs, for the specified + * MMU indexes. + */ +void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); +/** + * tlb_flush_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from all TLBs of all CPUs, for the specified + * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source + * vCPUs work is scheduled as safe work meaning all flushes will be + * complete once the source vCPUs safe work is complete. This will + * depend on when the guests translation ends the TB. + */ +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); /** * tlb_set_page_with_attrs: * @cpu: CPU to add this TLB entry for @@ -162,17 +246,45 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) { } - +static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) +{ +} +static inline void tlb_flush_page_all_cpus_synced(CPUState *src, + target_ulong addr) +{ +} static inline void tlb_flush(CPUState *cpu) { } - +static inline void tlb_flush_all_cpus(CPUState *src_cpu) +{ +} +static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) +{ +} static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, - target_ulong addr, ...) + target_ulong addr, uint16_t idxmap) { } -static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...) +static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +{ +} +static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, + target_ulong addr, + uint16_t idxmap) +{ +} +static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, + target_ulong addr, + uint16_t idxmap) +{ +} +static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) +{ +} +static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, + uint16_t idxmap) { } #endif @@ -404,8 +516,4 @@ bool memory_region_is_unassigned(MemoryRegion *mr); /* vl.c */ extern int singlestep; -/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */ -extern CPUState *tcg_current_cpu; -extern bool exit_request; - #endif diff --git a/include/hw/s390x/s390_flic.h b/include/hw/s390x/s390_flic.h index 9094edadf5..f9e6890c90 100644 --- a/include/hw/s390x/s390_flic.h +++ b/include/hw/s390x/s390_flic.h @@ -17,8 +17,13 @@ #include "hw/s390x/adapter.h" #include "hw/virtio/virtio.h" -#define ADAPTER_ROUTES_MAX_GSI 64 -#define VIRTIO_CCW_QUEUE_MAX ADAPTER_ROUTES_MAX_GSI +/* + * Reserve enough gsis to accommodate all virtio devices. + * If any other user of adapter routes needs more of these, + * we need to bump the value; but virtio looks like the + * maximum right now. + */ +#define ADAPTER_ROUTES_MAX_GSI VIRTIO_QUEUE_MAX typedef struct AdapterRoutes { AdapterInfo adapter; @@ -32,6 +37,8 @@ typedef struct AdapterRoutes { typedef struct S390FLICState { SysBusDevice parent_obj; + /* to limit AdapterRoutes.num_routes for compat */ + uint32_t adapter_routes_max_batch; } S390FLICState; diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h index 8033929139..f0878eaafa 100644 --- a/include/qemu/cutils.h +++ b/include/qemu/cutils.h @@ -130,34 +130,19 @@ int qemu_strtol(const char *nptr, const char **endptr, int base, long *result); int qemu_strtoul(const char *nptr, const char **endptr, int base, unsigned long *result); -int qemu_strtoll(const char *nptr, const char **endptr, int base, - int64_t *result); -int qemu_strtoull(const char *nptr, const char **endptr, int base, +int qemu_strtoi64(const char *nptr, const char **endptr, int base, + int64_t *result); +int qemu_strtou64(const char *nptr, const char **endptr, int base, uint64_t *result); int parse_uint(const char *s, unsigned long long *value, char **endptr, int base); int parse_uint_full(const char *s, unsigned long long *value, int base); -/* - * qemu_strtosz() suffixes used to specify the default treatment of an - * argument passed to qemu_strtosz() without an explicit suffix. - * These should be defined using upper case characters in the range - * A-Z, as qemu_strtosz() will use qemu_toupper() on the given argument - * prior to comparison. - */ -#define QEMU_STRTOSZ_DEFSUFFIX_EB 'E' -#define QEMU_STRTOSZ_DEFSUFFIX_PB 'P' -#define QEMU_STRTOSZ_DEFSUFFIX_TB 'T' -#define QEMU_STRTOSZ_DEFSUFFIX_GB 'G' -#define QEMU_STRTOSZ_DEFSUFFIX_MB 'M' -#define QEMU_STRTOSZ_DEFSUFFIX_KB 'K' -#define QEMU_STRTOSZ_DEFSUFFIX_B 'B' -int64_t qemu_strtosz(const char *nptr, char **end); -int64_t qemu_strtosz_suffix(const char *nptr, char **end, - const char default_suffix); -int64_t qemu_strtosz_suffix_unit(const char *nptr, char **end, - const char default_suffix, int64_t unit); +int qemu_strtosz(const char *nptr, char **end, uint64_t *result); +int qemu_strtosz_MiB(const char *nptr, char **end, uint64_t *result); +int qemu_strtosz_metric(const char *nptr, char **end, uint64_t *result); + #define K_BYTE (1ULL << 10) #define M_BYTE (1ULL << 20) #define G_BYTE (1ULL << 30) diff --git a/include/qom/cpu.h b/include/qom/cpu.h index f69b2407ea..3e61c880da 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -329,6 +329,7 @@ struct CPUState { bool unplug; bool crash_occurred; bool exit_request; + /* updates protected by BQL */ uint32_t interrupt_request; int singlestep_enabled; int64_t icount_extra; @@ -401,6 +402,12 @@ struct CPUState { bool hax_vcpu_dirty; struct hax_vcpu_state *hax_vcpu; + + /* The pending_tlb_flush flag is set and cleared atomically to + * avoid potential races. The aim of the flag is to avoid + * unnecessary flushes. + */ + uint16_t pending_tlb_flush; }; QTAILQ_HEAD(CPUTailQ, CPUState); @@ -416,6 +423,15 @@ extern struct CPUTailQ cpus; extern __thread CPUState *current_cpu; /** + * qemu_tcg_mttcg_enabled: + * Check whether we are running MultiThread TCG or not. + * + * Returns: %true if we are in MTTCG mode %false otherwise. + */ +extern bool mttcg_enabled; +#define qemu_tcg_mttcg_enabled() (mttcg_enabled) + +/** * cpu_paging_enabled: * @cpu: The CPU whose state is to be inspected. * diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h index 3728a1ea7e..a73b5d4bce 100644 --- a/include/sysemu/cpus.h +++ b/include/sysemu/cpus.h @@ -36,4 +36,6 @@ extern int smp_threads; void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg); +void qemu_tcg_configure(QemuOpts *opts, Error **errp); + #endif |