diff options
Diffstat (limited to 'include')
46 files changed, 798 insertions, 241 deletions
diff --git a/include/block/block.h b/include/block/block.h index ab80195378..2ad18775af 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -441,7 +441,6 @@ int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only, int bdrv_set_read_only(BlockDriverState *bs, bool read_only, Error **errp); bool bdrv_is_sg(BlockDriverState *bs); bool bdrv_is_inserted(BlockDriverState *bs); -int bdrv_media_changed(BlockDriverState *bs); void bdrv_lock_medium(BlockDriverState *bs, bool locked); void bdrv_eject(BlockDriverState *bs, bool eject_flag); const char *bdrv_get_format_name(BlockDriverState *bs); diff --git a/include/block/block_int.h b/include/block/block_int.h index 7571c0aaaf..ba4c383393 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -87,7 +87,11 @@ struct BlockDriver { const char *format_name; int instance_size; - /* set to true if the BlockDriver is a block filter */ + /* set to true if the BlockDriver is a block filter. Block filters pass + * certain callbacks that refer to data (see block.c) to their bs->file if + * the driver doesn't implement them. Drivers that do not wish to forward + * must implement them and return -ENOTSUP. + */ bool is_filter; /* for snapshots block filter like Quorum can implement the * following recursive callback. @@ -146,12 +150,43 @@ struct BlockDriver { int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); + + /** + * @offset: position in bytes to read at + * @bytes: number of bytes to read + * @qiov: the buffers to fill with read data + * @flags: currently unused, always 0 + * + * @offset and @bytes will be a multiple of 'request_alignment', + * but the length of individual @qiov elements does not have to + * be a multiple. + * + * @bytes will always equal the total size of @qiov, and will be + * no larger than 'max_transfer'. + * + * The buffer in @qiov may point directly to guest memory. + */ int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); + /** + * @offset: position in bytes to write at + * @bytes: number of bytes to write + * @qiov: the buffers containing data to write + * @flags: zero or more bits allowed by 'supported_write_flags' + * + * @offset and @bytes will be a multiple of 'request_alignment', + * but the length of individual @qiov elements does not have to + * be a multiple. + * + * @bytes will always equal the total size of @qiov, and will be + * no larger than 'max_transfer'. + * + * The buffer in @qiov may point directly to guest memory. + */ int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); @@ -244,7 +279,6 @@ struct BlockDriver { /* removable device specific */ bool (*bdrv_is_inserted)(BlockDriverState *bs); - int (*bdrv_media_changed)(BlockDriverState *bs); void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag); void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked); @@ -961,6 +995,24 @@ void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c, uint64_t perm, uint64_t shared, uint64_t *nperm, uint64_t *nshared); +/* + * Default implementation for drivers to pass bdrv_co_get_block_status() to + * their file. + */ +int64_t coroutine_fn bdrv_co_get_block_status_from_file(BlockDriverState *bs, + int64_t sector_num, + int nb_sectors, + int *pnum, + BlockDriverState **file); +/* + * Default implementation for drivers to pass bdrv_co_get_block_status() to + * their backing file. + */ +int64_t coroutine_fn bdrv_co_get_block_status_from_backing(BlockDriverState *bs, + int64_t sector_num, + int nb_sectors, + int *pnum, + BlockDriverState **file); const char *bdrv_get_parent_name(const BlockDriverState *bs); void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp); bool blk_dev_has_removable_media(BlockBackend *blk); diff --git a/include/block/nbd.h b/include/block/nbd.h index 040cdd2e60..707fd37575 100644 --- a/include/block/nbd.h +++ b/include/block/nbd.h @@ -155,8 +155,6 @@ struct NBDExportInfo { }; typedef struct NBDExportInfo NBDExportInfo; -ssize_t nbd_rwv(QIOChannel *ioc, struct iovec *iov, size_t niov, size_t length, - bool do_read, Error **errp); int nbd_receive_negotiate(QIOChannel *ioc, const char *name, QCryptoTLSCreds *tlscreds, const char *hostname, QIOChannel **outioc, NBDExportInfo *info, diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h index d983d34074..e2fd0513c4 100644 --- a/include/block/throttle-groups.h +++ b/include/block/throttle-groups.h @@ -28,20 +28,58 @@ #include "qemu/throttle.h" #include "block/block_int.h" -const char *throttle_group_get_name(BlockBackend *blk); +/* The ThrottleGroupMember structure indicates membership in a ThrottleGroup + * and holds related data. + */ + +typedef struct ThrottleGroupMember { + AioContext *aio_context; + /* throttled_reqs_lock protects the CoQueues for throttled requests. */ + CoMutex throttled_reqs_lock; + CoQueue throttled_reqs[2]; + + /* Nonzero if the I/O limits are currently being ignored; generally + * it is zero. Accessed with atomic operations. + */ + unsigned int io_limits_disabled; + + /* The following fields are protected by the ThrottleGroup lock. + * See the ThrottleGroup documentation for details. + * throttle_state tells us if I/O limits are configured. */ + ThrottleState *throttle_state; + ThrottleTimers throttle_timers; + unsigned pending_reqs[2]; + QLIST_ENTRY(ThrottleGroupMember) round_robin; + +} ThrottleGroupMember; + +#define TYPE_THROTTLE_GROUP "throttle-group" +#define THROTTLE_GROUP(obj) OBJECT_CHECK(ThrottleGroup, (obj), TYPE_THROTTLE_GROUP) + +const char *throttle_group_get_name(ThrottleGroupMember *tgm); ThrottleState *throttle_group_incref(const char *name); void throttle_group_unref(ThrottleState *ts); -void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg); -void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg); +void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg); +void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg); -void throttle_group_register_blk(BlockBackend *blk, const char *groupname); -void throttle_group_unregister_blk(BlockBackend *blk); -void throttle_group_restart_blk(BlockBackend *blk); +void throttle_group_register_tgm(ThrottleGroupMember *tgm, + const char *groupname, + AioContext *ctx); +void throttle_group_unregister_tgm(ThrottleGroupMember *tgm); +void throttle_group_restart_tgm(ThrottleGroupMember *tgm); -void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk, +void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm, unsigned int bytes, bool is_write); +void throttle_group_attach_aio_context(ThrottleGroupMember *tgm, + AioContext *new_context); +void throttle_group_detach_aio_context(ThrottleGroupMember *tgm); +/* + * throttle_group_exists() must be called under the global + * mutex. + */ +bool throttle_group_exists(const char *name); #endif diff --git a/include/elf.h b/include/elf.h index cd51434877..e8a515ce3d 100644 --- a/include/elf.h +++ b/include/elf.h @@ -942,8 +942,9 @@ typedef struct { #define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ #define R_390_TLS_TPOFF 56 /* Negate offset in static TLS block. */ +#define R_390_20 57 /* Keep this the last entry. */ -#define R_390_NUM 57 +#define R_390_NUM 58 /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 440fc31b37..673fc066d0 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -35,35 +35,6 @@ typedef abi_ulong tb_page_addr_t; typedef ram_addr_t tb_page_addr_t; #endif -/* DisasContext is_jmp field values - * - * is_jmp starts as DISAS_NEXT. The translator will keep processing - * instructions until an exit condition is reached. If we reach the - * exit condition and is_jmp is still DISAS_NEXT (because of some - * other condition) we simply "jump" to the next address. - * The remaining exit cases are: - * - * DISAS_JUMP - Only the PC was modified dynamically (e.g computed) - * DISAS_TB_JUMP - Only the PC was modified statically (e.g. branch) - * - * In these cases as long as the PC is updated we can chain to the - * next TB either by exiting the loop or looking up the next TB via - * the loookup helper. - * - * DISAS_UPDATE - CPU State was modified dynamically - * - * This covers any other CPU state which necessities us exiting the - * TCG code to the main run-loop. Typically this includes anything - * that might change the interrupt state. - * - * Individual translators may define additional exit cases to deal - * with per-target special conditions. - */ -#define DISAS_NEXT 0 /* next instruction can be analyzed */ -#define DISAS_JUMP 1 /* only pc was modified dynamically */ -#define DISAS_TB_JUMP 2 /* only pc was modified statically */ -#define DISAS_UPDATE 3 /* cpu state was modified dynamically */ - #include "qemu/log.h" void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb); @@ -330,15 +301,6 @@ static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) #define CODE_GEN_AVG_BLOCK_SIZE 150 #endif -#if defined(_ARCH_PPC) \ - || defined(__x86_64__) || defined(__i386__) \ - || defined(__sparc__) || defined(__aarch64__) \ - || defined(__s390x__) || defined(__mips__) \ - || defined(CONFIG_TCG_INTERPRETER) -/* NOTE: Direct jump patching must be atomic to be thread-safe. */ -#define USE_DIRECT_JUMP -#endif - struct TranslationBlock { target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ target_ulong cs_base; /* CS base for this block */ @@ -376,11 +338,8 @@ struct TranslationBlock { */ uint16_t jmp_reset_offset[2]; /* offset of original jump target */ #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ -#ifdef USE_DIRECT_JUMP - uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */ -#else - uintptr_t jmp_target_addr[2]; /* target address for indirect jump */ -#endif + uintptr_t jmp_target_arg[2]; /* target address or offset */ + /* Each TB has an assosiated circular list of TBs jumping to this one. * jmp_list_first points to the first TB jumping to this one. * jmp_list_next is used to point to the next TB in a list. @@ -402,84 +361,7 @@ void tb_flush(CPUState *cpu); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags); - -#if defined(USE_DIRECT_JUMP) - -#if defined(CONFIG_TCG_INTERPRETER) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); - /* no need to flush icache explicitly */ -} -#elif defined(_ARCH_PPC) -void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); -#define tb_set_jmp_target1 ppc_tb_set_jmp_target -#elif defined(__i386__) || defined(__x86_64__) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); - /* no need to flush icache explicitly */ -} -#elif defined(__s390x__) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - intptr_t disp = addr - (jmp_addr - 2); - atomic_set((int32_t *)jmp_addr, disp / 2); - /* no need to flush icache explicitly */ -} -#elif defined(__aarch64__) -void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); -#define tb_set_jmp_target1 aarch64_tb_set_jmp_target -#elif defined(__sparc__) || defined(__mips__) -void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); -#else -#error tb_set_jmp_target1 is missing -#endif - -static inline void tb_set_jmp_target(TranslationBlock *tb, - int n, uintptr_t addr) -{ - uint16_t offset = tb->jmp_insn_offset[n]; - tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); -} - -#else - -/* set the jump target */ -static inline void tb_set_jmp_target(TranslationBlock *tb, - int n, uintptr_t addr) -{ - tb->jmp_target_addr[n] = addr; -} - -#endif - -/* Called with tb_lock held. */ -static inline void tb_add_jump(TranslationBlock *tb, int n, - TranslationBlock *tb_next) -{ - assert(n < ARRAY_SIZE(tb->jmp_list_next)); - if (tb->jmp_list_next[n]) { - /* Another thread has already done this while we were - * outside of the lock; nothing to do in this case */ - return; - } - qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, - "Linking TBs %p [" TARGET_FMT_lx - "] index %d -> %p [" TARGET_FMT_lx "]\n", - tb->tc_ptr, tb->pc, n, - tb_next->tc_ptr, tb_next->pc); - - /* patch the native jump address */ - tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); - - /* add in TB jmp circular list */ - tb->jmp_list_next[n] = tb_next->jmp_list_first; - tb_next->jmp_list_first = (uintptr_t)tb | n; -} +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); /* GETPC is the true target of the return instruction that we'll execute. */ #if defined(CONFIG_TCG_INTERPRETER) diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h index e601061848..d4a1642098 100644 --- a/include/exec/memattrs.h +++ b/include/exec/memattrs.h @@ -46,4 +46,14 @@ typedef struct MemTxAttrs { */ #define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 }) +/* New-style MMIO accessors can indicate that the transaction failed. + * A zero (MEMTX_OK) response means success; anything else is a failure + * of some kind. The memory subsystem will bitwise-OR together results + * if it is synthesizing an operation from multiple smaller accesses. + */ +#define MEMTX_OK 0 +#define MEMTX_ERROR (1U << 0) /* device returned an error */ +#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ +typedef uint32_t MemTxResult; + #endif diff --git a/include/exec/memory.h b/include/exec/memory.h index 400dd4491b..1dcd3122d7 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -112,16 +112,6 @@ static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, n->end = end; } -/* New-style MMIO accessors can indicate that the transaction failed. - * A zero (MEMTX_OK) response means success; anything else is a failure - * of some kind. The memory subsystem will bitwise-OR together results - * if it is synthesizing an operation from multiple smaller accesses. - */ -#define MEMTX_OK 0 -#define MEMTX_ERROR (1U << 0) /* device returned an error */ -#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ -typedef uint32_t MemTxResult; - /* * Memory region callbacks */ diff --git a/include/exec/translator.h b/include/exec/translator.h new file mode 100644 index 0000000000..e2dc2a04ae --- /dev/null +++ b/include/exec/translator.h @@ -0,0 +1,144 @@ +/* + * Generic intermediate code generation. + * + * Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef EXEC__TRANSLATOR_H +#define EXEC__TRANSLATOR_H + +/* + * Include this header from a target-specific file, and add a + * + * DisasContextBase base; + * + * member in your target-specific DisasContext. + */ + + +#include "exec/exec-all.h" +#include "tcg/tcg.h" + + +/** + * DisasJumpType: + * @DISAS_NEXT: Next instruction in program order. + * @DISAS_TOO_MANY: Too many instructions translated. + * @DISAS_NORETURN: Following code is dead. + * @DISAS_TARGET_*: Start of target-specific conditions. + * + * What instruction to disassemble next. + */ +typedef enum DisasJumpType { + DISAS_NEXT, + DISAS_TOO_MANY, + DISAS_NORETURN, + DISAS_TARGET_0, + DISAS_TARGET_1, + DISAS_TARGET_2, + DISAS_TARGET_3, + DISAS_TARGET_4, + DISAS_TARGET_5, + DISAS_TARGET_6, + DISAS_TARGET_7, + DISAS_TARGET_8, + DISAS_TARGET_9, + DISAS_TARGET_10, + DISAS_TARGET_11, +} DisasJumpType; + +/** + * DisasContextBase: + * @tb: Translation block for this disassembly. + * @pc_first: Address of first guest instruction in this TB. + * @pc_next: Address of next guest instruction in this TB (current during + * disassembly). + * @is_jmp: What instruction to disassemble next. + * @num_insns: Number of translated instructions (including current). + * @singlestep_enabled: "Hardware" single stepping enabled. + * + * Architecture-agnostic disassembly context. + */ +typedef struct DisasContextBase { + TranslationBlock *tb; + target_ulong pc_first; + target_ulong pc_next; + DisasJumpType is_jmp; + unsigned int num_insns; + bool singlestep_enabled; +} DisasContextBase; + +/** + * TranslatorOps: + * @init_disas_context: + * Initialize the target-specific portions of DisasContext struct. + * The generic DisasContextBase has already been initialized. + * Return max_insns, modified as necessary by db->tb->flags. + * + * @tb_start: + * Emit any code required before the start of the main loop, + * after the generic gen_tb_start(). + * + * @insn_start: + * Emit the tcg_gen_insn_start opcode. + * + * @breakpoint_check: + * When called, the breakpoint has already been checked to match the PC, + * but the target may decide the breakpoint missed the address + * (e.g., due to conditions encoded in their flags). Return true to + * indicate that the breakpoint did hit, in which case no more breakpoints + * are checked. If the breakpoint did hit, emit any code required to + * signal the exception, and set db->is_jmp as necessary to terminate + * the main loop. + * + * @translate_insn: + * Disassemble one instruction and set db->pc_next for the start + * of the following instruction. Set db->is_jmp as necessary to + * terminate the main loop. + * + * @tb_stop: + * Emit any opcodes required to exit the TB, based on db->is_jmp. + * + * @disas_log: + * Print instruction disassembly to log. + */ +typedef struct TranslatorOps { + int (*init_disas_context)(DisasContextBase *db, CPUState *cpu, + int max_insns); + void (*tb_start)(DisasContextBase *db, CPUState *cpu); + void (*insn_start)(DisasContextBase *db, CPUState *cpu); + bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu, + const CPUBreakpoint *bp); + void (*translate_insn)(DisasContextBase *db, CPUState *cpu); + void (*tb_stop)(DisasContextBase *db, CPUState *cpu); + void (*disas_log)(const DisasContextBase *db, CPUState *cpu); +} TranslatorOps; + +/** + * translator_loop: + * @ops: Target-specific operations. + * @db: Disassembly context. + * @cpu: Target vCPU. + * @tb: Translation block. + * + * Generic translator loop. + * + * Translation will stop in the following cases (in order): + * - When is_jmp set by #TranslatorOps::breakpoint_check. + * - set to DISAS_TOO_MANY exits after translating one more insn + * - set to any other value than DISAS_NEXT exits immediately. + * - When is_jmp set by #TranslatorOps::translate_insn. + * - set to any value other than DISAS_NEXT exits immediately. + * - When the TCG operation buffer is full. + * - When single-stepping is enabled (system-wide or on the current vCPU). + * - When too many instructions have been translated. + */ +void translator_loop(const TranslatorOps *ops, DisasContextBase *db, + CPUState *cpu, TranslationBlock *tb); + +void translator_loop_temp_check(DisasContextBase *db); + +#endif /* EXEC__TRANSLATOR_H */ diff --git a/include/hw/acpi/bios-linker-loader.h b/include/hw/acpi/bios-linker-loader.h index efe17b0b9c..a711dbced8 100644 --- a/include/hw/acpi/bios-linker-loader.h +++ b/include/hw/acpi/bios-linker-loader.h @@ -7,6 +7,8 @@ typedef struct BIOSLinker { GArray *file_list; } BIOSLinker; +bool bios_linker_loader_can_write_pointer(void); + BIOSLinker *bios_linker_loader_init(void); void bios_linker_loader_alloc(BIOSLinker *linker, diff --git a/include/hw/acpi/vmgenid.h b/include/hw/acpi/vmgenid.h index 7beb9592fb..38586ecbdf 100644 --- a/include/hw/acpi/vmgenid.h +++ b/include/hw/acpi/vmgenid.h @@ -21,7 +21,6 @@ typedef struct VmGenIdState { DeviceClass parent_obj; QemuUUID guid; /* The 128-bit GUID seen by the guest */ uint8_t vmgenid_addr_le[8]; /* Address of the GUID (little-endian) */ - bool write_pointer_available; } VmGenIdState; /* returns NULL unless there is exactly one device */ diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h index a9b3f2ab35..10eb058027 100644 --- a/include/hw/arm/armv7m.h +++ b/include/hw/arm/armv7m.h @@ -11,7 +11,7 @@ #define HW_ARM_ARMV7M_H #include "hw/sysbus.h" -#include "hw/arm/armv7m_nvic.h" +#include "hw/intc/armv7m_nvic.h" #define TYPE_BITBAND "ARM,bitband-memory" #define BITBAND(obj) OBJECT_CHECK(BitBandState, (obj), TYPE_BITBAND) diff --git a/include/hw/boards.h b/include/hw/boards.h index 3363dd19fd..7f044d101d 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -131,6 +131,16 @@ typedef struct { * size than the target architecture's minimum. (Attempting to create * such a CPU will fail.) Note that changing this is a migration * compatibility break for the machine. + * @ignore_memory_transaction_failures: + * If this is flag is true then the CPU will ignore memory transaction + * failures which should cause the CPU to take an exception due to an + * access to an unassigned physical address; the transaction will instead + * return zero (for a read) or be ignored (for a write). This should be + * set only by legacy board models which rely on the old RAZ/WI behaviour + * for handling devices that QEMU does not yet model. New board models + * should instead use "unimplemented-device" for all memory ranges where + * the guest will attempt to probe for a device that QEMU doesn't + * implement and a stub device is required. */ struct MachineClass { /*< private >*/ @@ -171,6 +181,7 @@ struct MachineClass { bool rom_file_has_mr; int minimum_page_bits; bool has_hotpluggable_cpus; + bool ignore_memory_transaction_failures; int numa_mem_align_shift; void (*numa_auto_assign_ram)(MachineClass *mc, NodeInfo *nodes, int nb_nodes, ram_addr_t size); diff --git a/include/hw/compat.h b/include/hw/compat.h index 3e101f8f67..9cc14dd798 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -153,10 +153,6 @@ .driver = "fw_cfg_io",\ .property = "dma_enabled",\ .value = "off",\ - },{\ - .driver = "vmgenid",\ - .property = "x-write-pointer-available",\ - .value = "off",\ }, #define HW_COMPAT_2_3 \ diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index a172a6068a..d192e7e2a3 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -362,6 +362,54 @@ static int glue(load_elf, SZ)(const char *name, int fd, goto fail; } } + + /* The ELF spec is somewhat vague about the purpose of the + * physical address field. One common use in the embedded world + * is that physical address field specifies the load address + * and the virtual address field specifies the execution address. + * Segments are packed into ROM or flash, and the relocation + * and zero-initialization of data is done at runtime. This + * means that the memsz header represents the runtime size of the + * segment, but the filesz represents the loadtime size. If + * we try to honour the memsz value for an ELF file like this + * we will end up with overlapping segments (which the + * loader.c code will later reject). + * We support ELF files using this scheme by by checking whether + * paddr + memsz for this segment would overlap with any other + * segment. If so, then we assume it's using this scheme and + * truncate the loaded segment to the filesz size. + * If the segment considered as being memsz size doesn't overlap + * then we use memsz for the segment length, to handle ELF files + * which assume that the loader will do the zero-initialization. + */ + if (mem_size > file_size) { + /* If this segment's zero-init portion overlaps another + * segment's data or zero-init portion, then truncate this one. + * Invalid ELF files where the segments overlap even when + * only file_size bytes are loaded will be rejected by + * the ROM overlap check in loader.c, so we don't try to + * explicitly detect those here. + */ + int j; + elf_word zero_start = ph->p_paddr + file_size; + elf_word zero_end = ph->p_paddr + mem_size; + + for (j = 0; j < ehdr.e_phnum; j++) { + struct elf_phdr *jph = &phdr[j]; + + if (i != j && jph->p_type == PT_LOAD) { + elf_word other_start = jph->p_paddr; + elf_word other_end = jph->p_paddr + jph->p_memsz; + + if (!(other_start >= zero_end || + zero_start >= other_end)) { + mem_size = file_size; + break; + } + } + } + } + /* address_offset is hack for kernel images that are linked at the wrong physical address. */ if (translate_fn) { @@ -403,14 +451,24 @@ static int glue(load_elf, SZ)(const char *name, int fd, *pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr; } - if (load_rom) { - snprintf(label, sizeof(label), "phdr #%d: %s", i, name); - - /* rom_add_elf_program() seize the ownership of 'data' */ - rom_add_elf_program(label, data, file_size, mem_size, addr, as); - } else { - cpu_physical_memory_write(addr, data, file_size); + if (mem_size == 0) { + /* Some ELF files really do have segments of zero size; + * just ignore them rather than trying to create empty + * ROM blobs, because the zero-length blob can falsely + * trigger the overlapping-ROM-blobs check. + */ g_free(data); + } else { + if (load_rom) { + snprintf(label, sizeof(label), "phdr #%d: %s", i, name); + + /* rom_add_elf_program() seize the ownership of 'data' */ + rom_add_elf_program(label, data, file_size, mem_size, + addr, as); + } else { + cpu_physical_memory_write(addr, data, file_size); + g_free(data); + } } total_size += mem_size; diff --git a/include/hw/i2c/ppc4xx_i2c.h b/include/hw/i2c/ppc4xx_i2c.h new file mode 100644 index 0000000000..e53042f6d4 --- /dev/null +++ b/include/hw/i2c/ppc4xx_i2c.h @@ -0,0 +1,61 @@ +/* + * PPC4xx I2C controller emulation + * + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef PPC4XX_I2C_H +#define PPC4XX_I2C_H + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/sysbus.h" +#include "hw/i2c/i2c.h" + +#define TYPE_PPC4xx_I2C "ppc4xx-i2c" +#define PPC4xx_I2C(obj) OBJECT_CHECK(PPC4xxI2CState, (obj), TYPE_PPC4xx_I2C) + +typedef struct PPC4xxI2CState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + I2CBus *bus; + qemu_irq irq; + MemoryRegion iomem; + uint8_t mdata; + uint8_t lmadr; + uint8_t hmadr; + uint8_t cntl; + uint8_t mdcntl; + uint8_t sts; + uint8_t extsts; + uint8_t sdata; + uint8_t lsadr; + uint8_t hsadr; + uint8_t clkdiv; + uint8_t intrmsk; + uint8_t xfrcnt; + uint8_t xtcntlss; + uint8_t directcntl; +} PPC4xxI2CState; + +#endif /* PPC4XX_I2C_H */ diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index d80859bfad..8226904524 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -369,6 +369,9 @@ int e820_add_entry(uint64_t, uint64_t, uint32_t); int e820_get_num_entries(void); bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *); +#define PC_COMPAT_2_10 \ + HW_COMPAT_2_10 \ + #define PC_COMPAT_2_9 \ HW_COMPAT_2_9 \ {\ diff --git a/include/hw/arm/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h index 1d145fb75f..1a4cce7442 100644 --- a/include/hw/arm/armv7m_nvic.h +++ b/include/hw/intc/armv7m_nvic.h @@ -50,6 +50,7 @@ typedef struct NVICState { int exception_prio; /* group prio of the highest prio active exception */ MemoryRegion sysregmem; + MemoryRegion sysreg_ns_mem; MemoryRegion container; uint32_t num_irq; diff --git a/include/hw/loader.h b/include/hw/loader.h index 490c9ff8e6..355fe0f5a2 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -192,7 +192,7 @@ int rom_add_file(const char *file, const char *fw_dir, MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, size_t max_len, hwaddr addr, const char *fw_file_name, - FWCfgReadCallback fw_callback, + FWCfgCallback fw_callback, void *callback_opaque, AddressSpace *as, bool read_only); int rom_add_elf_program(const char *name, void *data, size_t datasize, diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index b77ea48abb..f50d068563 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -44,7 +44,7 @@ typedef struct FWCfgDmaAccess { uint64_t address; } QEMU_PACKED FWCfgDmaAccess; -typedef void (*FWCfgReadCallback)(void *opaque); +typedef void (*FWCfgCallback)(void *opaque); struct FWCfgState { /*< private >*/ @@ -182,7 +182,7 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data, * fw_cfg_add_file_callback: * @s: fw_cfg device being modified * @filename: name of new fw_cfg file item - * @callback: callback function + * @select_cb: callback function when selecting * @callback_opaque: argument to be passed into callback function * @data: pointer to start of item data * @len: size of item data @@ -201,7 +201,8 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data, * with FW_CFG_DMA_CTL_SELECT). */ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, - FWCfgReadCallback callback, void *callback_opaque, + FWCfgCallback select_cb, + void *callback_opaque, void *data, size_t len, bool read_only); /** diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index 8bb6449dd7..aa7ef9cf69 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -100,6 +100,7 @@ extern bool pci_available; #define PCI_DEVICE_ID_REDHAT_PXB_PCIE 0x000b #define PCI_DEVICE_ID_REDHAT_PCIE_RP 0x000c #define PCI_DEVICE_ID_REDHAT_XHCI 0x000d +#define PCI_DEVICE_ID_REDHAT_PCIE_BRIDGE 0x000e #define PCI_DEVICE_ID_REDHAT_QXL 0x0100 #define FMT_PCIBUS PRIx64 diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h index ff7cbaa227..1acadc2c15 100644 --- a/include/hw/pci/pci_bridge.h +++ b/include/hw/pci/pci_bridge.h @@ -67,4 +67,29 @@ void pci_bridge_map_irq(PCIBridge *br, const char* bus_name, #define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */ #define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */ +typedef struct PCIBridgeQemuCap { + uint8_t id; /* Standard PCI capability header field */ + uint8_t next; /* Standard PCI capability header field */ + uint8_t len; /* Standard PCI vendor-specific capability header field */ + uint8_t type; /* Red Hat vendor-specific capability type. + Types are defined with REDHAT_PCI_CAP_ prefix */ + + uint32_t bus_res; /* Minimum number of buses to reserve */ + uint64_t io; /* IO space to reserve */ + uint32_t mem; /* Non-prefetchable memory to reserve */ + /* At most one of the following two fields may be set to a value + * different from -1 */ + uint32_t mem_pref_32; /* Prefetchable memory to reserve (32-bit MMIO) */ + uint64_t mem_pref_64; /* Prefetchable memory to reserve (64-bit MMIO) */ +} PCIBridgeQemuCap; + +#define REDHAT_PCI_CAP_RESOURCE_RESERVE 1 + +int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset, + uint32_t bus_reserve, uint64_t io_reserve, + uint32_t mem_non_pref_reserve, + uint32_t mem_pref_32_reserve, + uint64_t mem_pref_64_reserve, + Error **errp); + #endif /* QEMU_PCI_BRIDGE_H */ diff --git a/include/hw/pci/pci_bus.h b/include/hw/pci/pci_bus.h index 5484a9b5c5..bc34fd0017 100644 --- a/include/hw/pci/pci_bus.h +++ b/include/hw/pci/pci_bus.h @@ -23,6 +23,7 @@ struct PCIBus { PCIIOMMUFunc iommu_fn; void *iommu_opaque; uint8_t devfn_min; + uint32_t slot_reserved_mask; pci_set_irq_fn set_irq; pci_map_irq_fn map_irq; pci_route_irq_fn route_intx_to_irq; diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h index 13332668e8..0736014bfd 100644 --- a/include/hw/pci/pcie_port.h +++ b/include/hw/pci/pcie_port.h @@ -65,6 +65,7 @@ void pcie_chassis_del_slot(PCIESlot *s); typedef struct PCIERootPortClass { PCIDeviceClass parent_class; + DeviceRealize parent_realize; uint8_t (*aer_vector)(const PCIDevice *dev); int (*interrupts_init)(PCIDevice *dev, Error **errp); diff --git a/include/hw/ppc/ppc4xx.h b/include/hw/ppc/ppc4xx.h index 66e57a5194..cb0bb55cec 100644 --- a/include/hw/ppc/ppc4xx.h +++ b/include/hw/ppc/ppc4xx.h @@ -53,6 +53,9 @@ void ppc4xx_sdram_init (CPUPPCState *env, qemu_irq irq, int nbanks, hwaddr *ram_sizes, int do_init); +void ppc4xx_mal_init(CPUPPCState *env, uint8_t txcnum, uint8_t rxcnum, + qemu_irq irqs[4]); + #define TYPE_PPC4xx_PCI_HOST_BRIDGE "ppc4xx-pcihost" #endif /* PPC4XX_H */ diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 2a303a705c..c1b365f564 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -99,6 +99,7 @@ struct sPAPRMachineState { uint64_t rtc_offset; /* Now used only during incoming migration */ struct PPCTimebase tb; bool has_graphics; + uint32_t vsmt; /* Virtual SMT mode (KVM's "core stride") */ Notifier epow_notifier; QTAILQ_HEAD(, sPAPREventLogEntry) pending_events; @@ -662,6 +663,7 @@ void spapr_cpu_parse_features(sPAPRMachineState *spapr); int spapr_hpt_shift_for_ramsize(uint64_t ramsize); void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, Error **errp); +void spapr_clear_pending_events(sPAPRMachineState *spapr); /* CPU and LMB DRC release callbacks. */ void spapr_core_release(DeviceState *dev); @@ -704,4 +706,7 @@ void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg); #define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift)) +int spapr_vcpu_id(PowerPCCPU *cpu); +PowerPCCPU *spapr_find_cpu(int vcpu_id); + #endif /* HW_SPAPR_H */ diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h index a7958d0a8d..f8d9f5b231 100644 --- a/include/hw/ppc/spapr_drc.h +++ b/include/hw/ppc/spapr_drc.h @@ -257,6 +257,7 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner, void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt, int fdt_start_offset, Error **errp); void spapr_drc_detach(sPAPRDRConnector *drc); +bool spapr_drc_needed(void *opaque); static inline bool spapr_drc_unplug_requested(sPAPRDRConnector *drc) { diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index ae317286a4..089146197f 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -249,7 +249,7 @@ struct Property { struct PropertyInfo { const char *name; const char *description; - const char * const *enum_table; + const QEnumLookup *enum_table; int (*print)(DeviceState *dev, Property *prop, char *dest, size_t len); void (*set_default_value)(Object *obj, const Property *prop); void (*create)(Object *obj, Property *prop, Error **errp); diff --git a/include/hw/watchdog/wdt_aspeed.h b/include/hw/watchdog/wdt_aspeed.h index 080c223122..7de3e5c224 100644 --- a/include/hw/watchdog/wdt_aspeed.h +++ b/include/hw/watchdog/wdt_aspeed.h @@ -27,6 +27,8 @@ typedef struct AspeedWDTState { uint32_t regs[ASPEED_WDT_REGS_MAX]; uint32_t pclk_freq; + uint32_t silicon_rev; + uint32_t ext_pulse_width_mask; } AspeedWDTState; #endif /* ASPEED_WDT_H */ diff --git a/include/io/channel.h b/include/io/channel.h index db9bb022a1..3995e243a3 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -269,6 +269,88 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc, Error **errp); /** + * qio_channel_readv_all_eof: + * @ioc: the channel object + * @iov: the array of memory regions to read data into + * @niov: the length of the @iov array + * @errp: pointer to a NULL-initialized error object + * + * Read data from the IO channel, storing it in the + * memory regions referenced by @iov. Each element + * in the @iov will be fully populated with data + * before the next one is used. The @niov parameter + * specifies the total number of elements in @iov. + * + * The function will wait for all requested data + * to be read, yielding from the current coroutine + * if required. + * + * If end-of-file occurs before any data is read, + * no error is reported; otherwise, if it occurs + * before all requested data has been read, an error + * will be reported. + * + * Returns: 1 if all bytes were read, 0 if end-of-file + * occurs without data, or -1 on error + */ +int qio_channel_readv_all_eof(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp); + +/** + * qio_channel_readv_all: + * @ioc: the channel object + * @iov: the array of memory regions to read data into + * @niov: the length of the @iov array + * @errp: pointer to a NULL-initialized error object + * + * Read data from the IO channel, storing it in the + * memory regions referenced by @iov. Each element + * in the @iov will be fully populated with data + * before the next one is used. The @niov parameter + * specifies the total number of elements in @iov. + * + * The function will wait for all requested data + * to be read, yielding from the current coroutine + * if required. + * + * If end-of-file occurs before all requested data + * has been read, an error will be reported. + * + * Returns: 0 if all bytes were read, or -1 on error + */ +int qio_channel_readv_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **errp); + + +/** + * qio_channel_writev_all: + * @ioc: the channel object + * @iov: the array of memory regions to write data from + * @niov: the length of the @iov array + * @errp: pointer to a NULL-initialized error object + * + * Write data to the IO channel, reading it from the + * memory regions referenced by @iov. Each element + * in the @iov will be fully sent, before the next + * one is used. The @niov parameter specifies the + * total number of elements in @iov. + * + * The function will wait for all requested data + * to be written, yielding from the current coroutine + * if required. + * + * Returns: 0 if all bytes were written, or -1 on error + */ +int qio_channel_writev_all(QIOChannel *ioc, + const struct iovec *iov, + size_t niov, + Error **erp); + +/** * qio_channel_readv: * @ioc: the channel object * @iov: the array of memory regions to read data into @@ -299,7 +381,7 @@ ssize_t qio_channel_writev(QIOChannel *ioc, Error **errp); /** - * qio_channel_readv: + * qio_channel_read: * @ioc: the channel object * @buf: the memory region to read data into * @buflen: the length of @buf @@ -331,6 +413,67 @@ ssize_t qio_channel_write(QIOChannel *ioc, Error **errp); /** + * qio_channel_read_all_eof: + * @ioc: the channel object + * @buf: the memory region to read data into + * @buflen: the number of bytes to @buf + * @errp: pointer to a NULL-initialized error object + * + * Reads @buflen bytes into @buf, possibly blocking or (if the + * channel is non-blocking) yielding from the current coroutine + * multiple times until the entire content is read. If end-of-file + * occurs immediately it is not an error, but if it occurs after + * data has been read it will return an error rather than a + * short-read. Otherwise behaves as qio_channel_read(). + * + * Returns: 1 if all bytes were read, 0 if end-of-file occurs + * without data, or -1 on error + */ +int qio_channel_read_all_eof(QIOChannel *ioc, + char *buf, + size_t buflen, + Error **errp); + +/** + * qio_channel_read_all: + * @ioc: the channel object + * @buf: the memory region to read data into + * @buflen: the number of bytes to @buf + * @errp: pointer to a NULL-initialized error object + * + * Reads @buflen bytes into @buf, possibly blocking or (if the + * channel is non-blocking) yielding from the current coroutine + * multiple times until the entire content is read. If end-of-file + * occurs it will return an error rather than a short-read. Otherwise + * behaves as qio_channel_read(). + * + * Returns: 0 if all bytes were read, or -1 on error + */ +int qio_channel_read_all(QIOChannel *ioc, + char *buf, + size_t buflen, + Error **errp); + +/** + * qio_channel_write_all: + * @ioc: the channel object + * @buf: the memory region to write data into + * @buflen: the number of bytes to @buf + * @errp: pointer to a NULL-initialized error object + * + * Writes @buflen bytes from @buf, possibly blocking or (if the + * channel is non-blocking) yielding from the current coroutine + * multiple times until the entire content is written. Otherwise + * behaves as qio_channel_write(). + * + * Returns: 0 if all bytes were written, or -1 on error + */ +int qio_channel_write_all(QIOChannel *ioc, + const char *buf, + size_t buflen, + Error **errp); + +/** * qio_channel_set_blocking: * @ioc: the channel object * @enabled: the blocking flag state diff --git a/include/qapi/qmp/qdict.h b/include/qapi/qmp/qdict.h index 363e431106..6588c7f0c8 100644 --- a/include/qapi/qmp/qdict.h +++ b/include/qapi/qmp/qdict.h @@ -53,13 +53,15 @@ void qdict_destroy_obj(QObject *obj); #define qdict_put(qdict, key, obj) \ qdict_put_obj(qdict, key, QOBJECT(obj)) -/* Helpers for int, bool, and string */ +/* Helpers for int, bool, null, and string */ #define qdict_put_int(qdict, key, value) \ qdict_put(qdict, key, qnum_from_int(value)) #define qdict_put_bool(qdict, key, value) \ qdict_put(qdict, key, qbool_from_bool(value)) #define qdict_put_str(qdict, key, value) \ qdict_put(qdict, key, qstring_from_str(value)) +#define qdict_put_null(qdict, key) \ + qdict_put(qdict, key, qnull()) /* High level helpers */ double qdict_get_double(const QDict *qdict, const char *key); diff --git a/include/qapi/qmp/qlit.h b/include/qapi/qmp/qlit.h new file mode 100644 index 0000000000..b18406bce9 --- /dev/null +++ b/include/qapi/qmp/qlit.h @@ -0,0 +1,54 @@ +/* + * Copyright IBM, Corp. 2009 + * Copyright (c) 2013, 2015, 2017 Red Hat Inc. + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Markus Armbruster <armbru@redhat.com> + * Marc-André Lureau <marcandre.lureau@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ +#ifndef QLIT_H +#define QLIT_H + +#include "qapi-types.h" +#include "qobject.h" + +typedef struct QLitDictEntry QLitDictEntry; +typedef struct QLitObject QLitObject; + +struct QLitObject { + int type; + union { + bool qbool; + int64_t qnum; + const char *qstr; + QLitDictEntry *qdict; + QLitObject *qlist; + } value; +}; + +struct QLitDictEntry { + const char *key; + QLitObject value; +}; + +#define QLIT_QNULL \ + { .type = QTYPE_QNULL } +#define QLIT_QBOOL(val) \ + { .type = QTYPE_QBOOL, .value.qbool = (val) } +#define QLIT_QNUM(val) \ + { .type = QTYPE_QNUM, .value.qnum = (val) } +#define QLIT_QSTR(val) \ + { .type = QTYPE_QSTRING, .value.qstr = (val) } +#define QLIT_QDICT(val) \ + { .type = QTYPE_QDICT, .value.qdict = (val) } +#define QLIT_QLIST(val) \ + { .type = QTYPE_QLIST, .value.qlist = (val) } + +bool qlit_equal_qobject(const QLitObject *lhs, const QObject *rhs); + +#endif /* QLIT_H */ diff --git a/include/qapi/qmp/qnum.h b/include/qapi/qmp/qnum.h index 09d745c490..d6b0791139 100644 --- a/include/qapi/qmp/qnum.h +++ b/include/qapi/qmp/qnum.h @@ -23,6 +23,27 @@ typedef enum { QNUM_DOUBLE } QNumKind; +/* + * QNum encapsulates how our dialect of JSON fills in the blanks left + * by the JSON specification (RFC 7159) regarding numbers. + * + * Conceptually, we treat number as an abstract type with three + * concrete subtypes: floating-point, signed integer, unsigned + * integer. QNum implements this as a discriminated union of double, + * int64_t, uint64_t. + * + * The JSON parser picks the subtype as follows. If the number has a + * decimal point or an exponent, it is floating-point. Else if it + * fits into int64_t, it's signed integer. Else if it fits into + * uint64_t, it's unsigned integer. Else it's floating-point. + * + * Any number can serve as double: qnum_get_double() converts under + * the hood. + * + * An integer can serve as signed / unsigned integer as long as it is + * in range: qnum_get_try_int() / qnum_get_try_uint() check range and + * convert under the hood. + */ typedef struct QNum { QObject base; QNumKind kind; diff --git a/include/qapi/util.h b/include/qapi/util.h index 7436ed815c..a7c3c64148 100644 --- a/include/qapi/util.h +++ b/include/qapi/util.h @@ -11,8 +11,14 @@ #ifndef QAPI_UTIL_H #define QAPI_UTIL_H -int qapi_enum_parse(const char * const lookup[], const char *buf, - int max, int def, Error **errp); +typedef struct QEnumLookup { + const char *const *array; + int size; +} QEnumLookup; + +const char *qapi_enum_lookup(const QEnumLookup *lookup, int val); +int qapi_enum_parse(const QEnumLookup *lookup, const char *buf, + int def, Error **errp); int parse_qapi_name(const char *name, bool complete); diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h index 0f3b8cb459..62a51a54cb 100644 --- a/include/qapi/visitor.h +++ b/include/qapi/visitor.h @@ -469,7 +469,7 @@ bool visit_optional(Visitor *v, const char *name, bool *present); * that visit_type_str() must have no unwelcome side effects. */ void visit_type_enum(Visitor *v, const char *name, int *obj, - const char *const strings[], Error **errp); + const QEnumLookup *lookup, Error **errp); /* * Check if visitor is an input visitor. diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index 95cf4f4163..5ac621cf1f 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -369,27 +369,35 @@ static inline bool is_power_of_2(uint64_t value) return !(value & (value - 1)); } -/* round down to the nearest power of 2*/ -static inline int64_t pow2floor(int64_t value) +/** + * Return @value rounded down to the nearest power of two or zero. + */ +static inline uint64_t pow2floor(uint64_t value) { - if (!is_power_of_2(value)) { - value = 0x8000000000000000ULL >> clz64(value); + if (!value) { + /* Avoid undefined shift by 64 */ + return 0; } - return value; + return 0x8000000000000000ull >> clz64(value); } -/* round up to the nearest power of 2 (0 if overflow) */ +/* + * Return @value rounded up to the nearest power of two modulo 2^64. + * This is *zero* for @value > 2^63, so be careful. + */ static inline uint64_t pow2ceil(uint64_t value) { - uint8_t nlz = clz64(value); - - if (is_power_of_2(value)) { - return value; - } - if (!nlz) { - return 0; + int n = clz64(value - 1); + + if (!n) { + /* + * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63 + * Therefore, either @value == 0 or @value > 2^63. + * If it's 0, return 1, else return 0. + */ + return !value; } - return 1ULL << (64 - nlz); + return 0x8000000000000000ull >> (n - 1); } /** diff --git a/include/qemu/iov.h b/include/qemu/iov.h index bd9fd55b0a..72d4c559b4 100644 --- a/include/qemu/iov.h +++ b/include/qemu/iov.h @@ -31,11 +31,6 @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt); * Number of bytes actually copied will be returned, which is * min(bytes, iov_size(iov)-offset) * `Offset' must point to the inside of iovec. - * It is okay to use very large value for `bytes' since we're - * limited by the size of the iovec anyway, provided that the - * buffer pointed to by buf has enough space. One possible - * such "large" value is -1 (sinice size_t is unsigned), - * so specifying `-1' as `bytes' means 'up to the end of iovec'. */ size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt, size_t offset, const void *buf, size_t bytes); @@ -76,7 +71,6 @@ iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt, * up to the end of it, will be filled with the specified value. * Function return actual number of bytes processed, which is * min(size, iov_size(iov) - offset). - * Again, it is okay to use large value for `bytes' to mean "up to the end". */ size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt, size_t offset, int fillc, size_t bytes); diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h index ef6b5591f7..4f7311b52a 100644 --- a/include/qemu/sockets.h +++ b/include/qemu/sockets.h @@ -27,18 +27,11 @@ int socket_set_fast_reuse(int fd); #define SHUT_RDWR 2 #endif -/* callback function for nonblocking connect - * valid fd on success, negative error code on failure - */ -typedef void NonBlockingConnectHandler(int fd, Error *err, void *opaque); - int inet_ai_family_from_address(InetSocketAddress *addr, Error **errp); int inet_parse(InetSocketAddress *addr, const char *str, Error **errp); int inet_connect(const char *str, Error **errp); -int inet_connect_saddr(InetSocketAddress *saddr, - NonBlockingConnectHandler *callback, void *opaque, - Error **errp); +int inet_connect_saddr(InetSocketAddress *saddr, Error **errp); NetworkAddressFamily inet_netfamily(int family); @@ -46,14 +39,14 @@ int unix_listen(const char *path, char *ostr, int olen, Error **errp); int unix_connect(const char *path, Error **errp); SocketAddress *socket_parse(const char *str, Error **errp); -int socket_connect(SocketAddress *addr, NonBlockingConnectHandler *callback, - void *opaque, Error **errp); +int socket_connect(SocketAddress *addr, Error **errp); int socket_listen(SocketAddress *addr, Error **errp); void socket_listen_cleanup(int fd, Error **errp); int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp); /* Old, ipv4 only bits. Don't use for new code. */ -int parse_host_port(struct sockaddr_in *saddr, const char *str); +int parse_host_port(struct sockaddr_in *saddr, const char *str, + Error **errp); int socket_init(void); /** diff --git a/include/qemu/throttle-options.h b/include/qemu/throttle-options.h index 3133d1ca40..3528a8f4a2 100644 --- a/include/qemu/throttle-options.h +++ b/include/qemu/throttle-options.h @@ -10,81 +10,103 @@ #ifndef THROTTLE_OPTIONS_H #define THROTTLE_OPTIONS_H +#define QEMU_OPT_IOPS_TOTAL "iops-total" +#define QEMU_OPT_IOPS_TOTAL_MAX "iops-total-max" +#define QEMU_OPT_IOPS_TOTAL_MAX_LENGTH "iops-total-max-length" +#define QEMU_OPT_IOPS_READ "iops-read" +#define QEMU_OPT_IOPS_READ_MAX "iops-read-max" +#define QEMU_OPT_IOPS_READ_MAX_LENGTH "iops-read-max-length" +#define QEMU_OPT_IOPS_WRITE "iops-write" +#define QEMU_OPT_IOPS_WRITE_MAX "iops-write-max" +#define QEMU_OPT_IOPS_WRITE_MAX_LENGTH "iops-write-max-length" +#define QEMU_OPT_BPS_TOTAL "bps-total" +#define QEMU_OPT_BPS_TOTAL_MAX "bps-total-max" +#define QEMU_OPT_BPS_TOTAL_MAX_LENGTH "bps-total-max-length" +#define QEMU_OPT_BPS_READ "bps-read" +#define QEMU_OPT_BPS_READ_MAX "bps-read-max" +#define QEMU_OPT_BPS_READ_MAX_LENGTH "bps-read-max-length" +#define QEMU_OPT_BPS_WRITE "bps-write" +#define QEMU_OPT_BPS_WRITE_MAX "bps-write-max" +#define QEMU_OPT_BPS_WRITE_MAX_LENGTH "bps-write-max-length" +#define QEMU_OPT_IOPS_SIZE "iops-size" +#define QEMU_OPT_THROTTLE_GROUP_NAME "throttle-group" + +#define THROTTLE_OPT_PREFIX "throttling." #define THROTTLE_OPTS \ { \ - .name = "throttling.iops-total",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL,\ .type = QEMU_OPT_NUMBER,\ .help = "limit total I/O operations per second",\ },{ \ - .name = "throttling.iops-read",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ,\ .type = QEMU_OPT_NUMBER,\ .help = "limit read operations per second",\ },{ \ - .name = "throttling.iops-write",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE,\ .type = QEMU_OPT_NUMBER,\ .help = "limit write operations per second",\ },{ \ - .name = "throttling.bps-total",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL,\ .type = QEMU_OPT_NUMBER,\ .help = "limit total bytes per second",\ },{ \ - .name = "throttling.bps-read",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ,\ .type = QEMU_OPT_NUMBER,\ .help = "limit read bytes per second",\ },{ \ - .name = "throttling.bps-write",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE,\ .type = QEMU_OPT_NUMBER,\ .help = "limit write bytes per second",\ },{ \ - .name = "throttling.iops-total-max",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX,\ .type = QEMU_OPT_NUMBER,\ .help = "I/O operations burst",\ },{ \ - .name = "throttling.iops-read-max",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX,\ .type = QEMU_OPT_NUMBER,\ .help = "I/O operations read burst",\ },{ \ - .name = "throttling.iops-write-max",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX,\ .type = QEMU_OPT_NUMBER,\ .help = "I/O operations write burst",\ },{ \ - .name = "throttling.bps-total-max",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX,\ .type = QEMU_OPT_NUMBER,\ .help = "total bytes burst",\ },{ \ - .name = "throttling.bps-read-max",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX,\ .type = QEMU_OPT_NUMBER,\ .help = "total bytes read burst",\ },{ \ - .name = "throttling.bps-write-max",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX,\ .type = QEMU_OPT_NUMBER,\ .help = "total bytes write burst",\ },{ \ - .name = "throttling.iops-total-max-length",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX_LENGTH,\ .type = QEMU_OPT_NUMBER,\ .help = "length of the iops-total-max burst period, in seconds",\ },{ \ - .name = "throttling.iops-read-max-length",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX_LENGTH,\ .type = QEMU_OPT_NUMBER,\ .help = "length of the iops-read-max burst period, in seconds",\ },{ \ - .name = "throttling.iops-write-max-length",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX_LENGTH,\ .type = QEMU_OPT_NUMBER,\ .help = "length of the iops-write-max burst period, in seconds",\ },{ \ - .name = "throttling.bps-total-max-length",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX_LENGTH,\ .type = QEMU_OPT_NUMBER,\ .help = "length of the bps-total-max burst period, in seconds",\ },{ \ - .name = "throttling.bps-read-max-length",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX_LENGTH,\ .type = QEMU_OPT_NUMBER,\ .help = "length of the bps-read-max burst period, in seconds",\ },{ \ - .name = "throttling.bps-write-max-length",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX_LENGTH,\ .type = QEMU_OPT_NUMBER,\ .help = "length of the bps-write-max burst period, in seconds",\ },{ \ - .name = "throttling.iops-size",\ + .name = THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_SIZE,\ .type = QEMU_OPT_NUMBER,\ .help = "when limiting by iops max size of an I/O in bytes",\ } diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h index 8e01885d29..8c93237866 100644 --- a/include/qemu/throttle.h +++ b/include/qemu/throttle.h @@ -152,5 +152,8 @@ bool throttle_schedule_timer(ThrottleState *ts, bool is_write); void throttle_account(ThrottleState *ts, bool is_write, uint64_t size); +void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg, + Error **errp); +void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var); #endif diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 25eefea7ab..995a7beeb5 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -85,8 +85,11 @@ struct TranslationBlock; * @has_work: Callback for checking if there is work to do. * @do_interrupt: Callback for interrupt handling. * @do_unassigned_access: Callback for unassigned access handling. + * (this is deprecated: new targets should use do_transaction_failed instead) * @do_unaligned_access: Callback for unaligned access handling, if * the target defines #ALIGNED_ONLY. + * @do_transaction_failed: Callback for handling failed memory transactions + * (ie bus faults or external aborts; not MMU faults) * @virtio_is_big_endian: Callback to return %true if a CPU which supports * runtime configurable endianness is currently big-endian. Non-configurable * CPUs can use the default implementation of this method. This method should @@ -153,6 +156,10 @@ typedef struct CPUClass { void (*do_unaligned_access)(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); + void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr, + unsigned size, MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); bool (*virtio_is_big_endian)(CPUState *cpu); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); @@ -305,6 +312,9 @@ struct qemu_work_item; * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes * to @trace_dstate). * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). + * @ignore_memory_transaction_failures: Cached copy of the MachineState + * flag of the same name: allows the board to suppress calling of the + * CPU do_transaction_failed hook function. * * State of one CPU core or thread. */ @@ -391,6 +401,8 @@ struct CPUState { */ bool throttle_thread_scheduled; + bool ignore_memory_transaction_failures; + /* Note that this is accessed at the start of every TB via a negative offset from AREG0. Leave this field at the end so as to make the (absolute value) offset as small as possible. This reduces code @@ -755,6 +767,16 @@ CPUState *qemu_get_cpu(int index); bool cpu_exists(int64_t id); /** + * cpu_by_arch_id: + * @id: Guest-exposed CPU ID of the CPU to obtain. + * + * Get a CPU with matching @id. + * + * Returns: The CPU or %NULL if there is no matching CPU. + */ +CPUState *cpu_by_arch_id(int64_t id); + +/** * cpu_throttle_set: * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99. * @@ -837,6 +859,21 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); } + +static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, + uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) { + cc->do_transaction_failed(cpu, physaddr, addr, size, access_type, + mmu_idx, attrs, response, retaddr); + } +} #endif #endif /* NEED_CPU_H */ diff --git a/include/qom/object.h b/include/qom/object.h index 1b828994fa..f3e5cff37a 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -1415,14 +1415,14 @@ void object_class_property_add_bool(ObjectClass *klass, const char *name, */ void object_property_add_enum(Object *obj, const char *name, const char *typename, - const char * const *strings, + const QEnumLookup *lookup, int (*get)(Object *, Error **), void (*set)(Object *, int, Error **), Error **errp); void object_class_property_add_enum(ObjectClass *klass, const char *name, const char *typename, - const char * const *strings, + const QEnumLookup *lookup, int (*get)(Object *, Error **), void (*set)(Object *, int, Error **), Error **errp); diff --git a/include/qom/object_interfaces.h b/include/qom/object_interfaces.h index fdd7603c84..d63c1c28f8 100644 --- a/include/qom/object_interfaces.h +++ b/include/qom/object_interfaces.h @@ -51,7 +51,7 @@ typedef struct UserCreatableClass { /* <public> */ void (*complete)(UserCreatable *uc, Error **errp); - bool (*can_be_deleted)(UserCreatable *uc, Error **errp); + bool (*can_be_deleted)(UserCreatable *uc); } UserCreatableClass; /** @@ -68,12 +68,11 @@ void user_creatable_complete(Object *obj, Error **errp); /** * user_creatable_can_be_deleted: * @uc: the object whose can_be_deleted() method is called if implemented - * @errp: if an error occurs, a pointer to an area to store the error * * Wrapper to call can_be_deleted() method if one of types it's inherited * from implements USER_CREATABLE interface. */ -bool user_creatable_can_be_deleted(UserCreatable *uc, Error **errp); +bool user_creatable_can_be_deleted(UserCreatable *uc); /** * user_creatable_add_type: diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index aadc733daf..c4e52a5fa3 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -70,24 +70,10 @@ typedef struct BlockDevOps { /* This struct is embedded in (the private) BlockBackend struct and contains * fields that must be public. This is in particular for QLIST_ENTRY() and - * friends so that BlockBackends can be kept in lists outside block-backend.c */ + * friends so that BlockBackends can be kept in lists outside block-backend.c + * */ typedef struct BlockBackendPublic { - /* throttled_reqs_lock protects the CoQueues for throttled requests. */ - CoMutex throttled_reqs_lock; - CoQueue throttled_reqs[2]; - - /* Nonzero if the I/O limits are currently being ignored; generally - * it is zero. Accessed with atomic operations. - */ - unsigned int io_limits_disabled; - - /* The following fields are protected by the ThrottleGroup lock. - * See the ThrottleGroup documentation for details. - * throttle_state tells us if I/O limits are configured. */ - ThrottleState *throttle_state; - ThrottleTimers throttle_timers; - unsigned pending_reqs[2]; - QLIST_ENTRY(BlockBackendPublic) round_robin; + ThrottleGroupMember throttle_group_member; } BlockBackendPublic; BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm); diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h index e6da1a4087..d2985b30ba 100644 --- a/include/sysemu/iothread.h +++ b/include/sysemu/iothread.h @@ -24,6 +24,9 @@ typedef struct { QemuThread thread; AioContext *ctx; + GMainContext *worker_context; + GMainLoop *main_loop; + GOnce once; QemuMutex init_done_lock; QemuCond init_done_cond; /* is thread initialization done? */ bool stopping; @@ -41,5 +44,6 @@ typedef struct { char *iothread_get_id(IOThread *iothread); AioContext *iothread_get_aio_context(IOThread *iothread); void iothread_stop_all(void); +GMainContext *iothread_get_g_main_context(IOThread *iothread); #endif /* IOTHREAD_H */ diff --git a/include/sysemu/tpm_backend.h b/include/sysemu/tpm_backend.h index b58f52d39f..b0a9731aee 100644 --- a/include/sysemu/tpm_backend.h +++ b/include/sysemu/tpm_backend.h @@ -226,7 +226,7 @@ TPMVersion tpm_backend_get_tpm_version(TPMBackend *s); TPMBackend *qemu_find_tpm(const char *id); const TPMDriverOps *tpm_get_backend_driver(const char *type); -int tpm_register_model(enum TpmModel model); -int tpm_register_driver(const TPMDriverOps *tdo); +void tpm_register_model(enum TpmModel model); +void tpm_register_driver(const TPMDriverOps *tdo); #endif |