diff options
120 files changed, 1896 insertions, 1361 deletions
diff --git a/.gitignore b/.gitignore index 8aab671265..61fa39967b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,13 @@ /GNUmakefile /build/ /.cache/ +/.vscode/ *.pyc .sdk .stgit-* .git-submodule-status +.clang-format +.gdb_history cscope.* tags TAGS diff --git a/MAINTAINERS b/MAINTAINERS index e3d5b7e09c..32e495e165 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1915,7 +1915,7 @@ SSI M: Alistair Francis <alistair@alistair23.me> S: Maintained F: hw/ssi/* -F: hw/block/m25p80.c +F: hw/block/m25p80* F: include/hw/ssi/ssi.h X: hw/ssi/xilinx_* F: tests/qtest/m25p80-test.c diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index f9e5cc9ba0..82b06c1824 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -187,13 +187,14 @@ static bool tb_lookup_cmp(const void *p, const void *d) const struct tb_desc *desc = d; if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) && - tb->page_addr[0] == desc->page_addr0 && + tb_page_addr0(tb) == desc->page_addr0 && tb->cs_base == desc->cs_base && tb->flags == desc->flags && tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && tb_cflags(tb) == desc->cflags) { /* check next page if needed */ - if (tb->page_addr[1] == -1) { + tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); + if (tb_phys_page1 == -1) { return true; } else { tb_page_addr_t phys_page1; @@ -210,7 +211,7 @@ static bool tb_lookup_cmp(const void *p, const void *d) */ virt_page1 = TARGET_PAGE_ALIGN(desc->pc); phys_page1 = get_page_addr_code(desc->env, virt_page1); - if (tb->page_addr[1] == phys_page1) { + if (tb_phys_page1 == phys_page1) { return true; } } @@ -304,16 +305,12 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu, } } -static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, - uint32_t *cflags) +static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc, + uint32_t *cflags) { CPUBreakpoint *bp; bool match_page = false; - if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { - return false; - } - /* * Singlestep overrides breakpoints. * This requirement is visible in the record-replay tests, where @@ -374,6 +371,13 @@ static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, return false; } +static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc, + uint32_t *cflags) +{ + return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && + check_for_breakpoints_slow(cpu, pc, cflags); +} + /** * helper_lookup_tb_ptr: quick check for next tb * @env: current cpu state @@ -1016,7 +1020,7 @@ int cpu_exec(CPUState *cpu) * direct jump to a TB spanning two pages because the mapping * for the second page can change. */ - if (tb->page_addr[1] != -1) { + if (tb_page_addr1(tb) != -1) { last_tb = NULL; } #endif diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h index dc800fd485..1227bb69bd 100644 --- a/accel/tcg/internal.h +++ b/accel/tcg/internal.h @@ -11,12 +11,103 @@ #include "exec/exec-all.h" +/* + * Access to the various translations structures need to be serialised + * via locks for consistency. In user-mode emulation access to the + * memory related structures are protected with mmap_lock. + * In !user-mode we use per-page locks. + */ +#ifdef CONFIG_SOFTMMU +#define assert_memory_lock() +#else +#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) +#endif + +typedef struct PageDesc { + /* list of TBs intersecting this ram page */ + uintptr_t first_tb; +#ifdef CONFIG_USER_ONLY + unsigned long flags; + void *target_data; +#endif +#ifdef CONFIG_SOFTMMU + QemuSpin lock; +#endif +} PageDesc; + +/* Size of the L2 (and L3, etc) page tables. */ +#define V_L2_BITS 10 +#define V_L2_SIZE (1 << V_L2_BITS) + +/* + * L1 Mapping properties + */ +extern int v_l1_size; +extern int v_l1_shift; +extern int v_l2_levels; + +/* + * The bottom level has pointers to PageDesc, and is indexed by + * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. + */ +#define V_L1_MIN_BITS 4 +#define V_L1_MAX_BITS (V_L2_BITS + 3) +#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) + +extern void *l1_map[V_L1_MAX_SIZE]; + +PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc); + +static inline PageDesc *page_find(tb_page_addr_t index) +{ + return page_find_alloc(index, false); +} + +/* list iterators for lists of tagged pointers in TranslationBlock */ +#define TB_FOR_EACH_TAGGED(head, tb, n, field) \ + for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ + tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ + tb = (TranslationBlock *)((uintptr_t)tb & ~1)) + +#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ + TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) + +#define TB_FOR_EACH_JMP(head_tb, tb, n) \ + TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) + +/* In user-mode page locks aren't used; mmap_lock is enough */ +#ifdef CONFIG_USER_ONLY +#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) +static inline void page_lock(PageDesc *pd) { } +static inline void page_unlock(PageDesc *pd) { } +#else +#ifdef CONFIG_DEBUG_TCG +void do_assert_page_locked(const PageDesc *pd, const char *file, int line); +#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) +#else +#define assert_page_locked(pd) +#endif +void page_lock(PageDesc *pd); +void page_unlock(PageDesc *pd); +#endif +#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) +void assert_no_pages_locked(void); +#else +static inline void assert_no_pages_locked(void) { } +#endif + TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags); G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); void page_init(void); void tb_htable_init(void); +void tb_reset_jump(TranslationBlock *tb, int n); +TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2); +bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); +int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t searched_pc, bool reset_icount); /* Return the current PC from CPU, which may be cached in TB. */ static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build index 7a0a79d731..75e1dffb4d 100644 --- a/accel/tcg/meson.build +++ b/accel/tcg/meson.build @@ -3,6 +3,7 @@ tcg_ss.add(files( 'tcg-all.c', 'cpu-exec-common.c', 'cpu-exec.c', + 'tb-maint.c', 'tcg-runtime-gvec.c', 'tcg-runtime.c', 'translate-all.c', diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c new file mode 100644 index 0000000000..c8e921089d --- /dev/null +++ b/accel/tcg/tb-maint.c @@ -0,0 +1,704 @@ +/* + * Translation Block Maintaince + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "exec/cputlb.h" +#include "exec/log.h" +#include "exec/exec-all.h" +#include "exec/translate-all.h" +#include "sysemu/tcg.h" +#include "tcg/tcg.h" +#include "tb-hash.h" +#include "tb-context.h" +#include "internal.h" + + +static bool tb_cmp(const void *ap, const void *bp) +{ + const TranslationBlock *a = ap; + const TranslationBlock *b = bp; + + return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) && + a->cs_base == b->cs_base && + a->flags == b->flags && + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && + a->trace_vcpu_dstate == b->trace_vcpu_dstate && + tb_page_addr0(a) == tb_page_addr0(b) && + tb_page_addr1(a) == tb_page_addr1(b)); +} + +void tb_htable_init(void) +{ + unsigned int mode = QHT_MODE_AUTO_RESIZE; + + qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); +} + +/* Set to NULL all the 'first_tb' fields in all PageDescs. */ +static void page_flush_tb_1(int level, void **lp) +{ + int i; + + if (*lp == NULL) { + return; + } + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_lock(&pd[i]); + pd[i].first_tb = (uintptr_t)NULL; + page_unlock(&pd[i]); + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_flush_tb_1(level - 1, pp + i); + } + } +} + +static void page_flush_tb(void) +{ + int i, l1_sz = v_l1_size; + + for (i = 0; i < l1_sz; i++) { + page_flush_tb_1(v_l2_levels, l1_map + i); + } +} + +/* flush all the translation blocks */ +static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +{ + bool did_flush = false; + + mmap_lock(); + /* If it is already been done on request of another CPU, just retry. */ + if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { + goto done; + } + did_flush = true; + + CPU_FOREACH(cpu) { + tcg_flush_jmp_cache(cpu); + } + + qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); + page_flush_tb(); + + tcg_region_reset_all(); + /* XXX: flush processor icache at this point if cache flush is expensive */ + qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); + +done: + mmap_unlock(); + if (did_flush) { + qemu_plugin_flush_cb(); + } +} + +void tb_flush(CPUState *cpu) +{ + if (tcg_enabled()) { + unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); + + if (cpu_in_exclusive_context(cpu)) { + do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); + } else { + async_safe_run_on_cpu(cpu, do_tb_flush, + RUN_ON_CPU_HOST_INT(tb_flush_count)); + } + } +} + +/* + * user-mode: call with mmap_lock held + * !user-mode: call with @pd->lock held + */ +static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) +{ + TranslationBlock *tb1; + uintptr_t *pprev; + unsigned int n1; + + assert_page_locked(pd); + pprev = &pd->first_tb; + PAGE_FOR_EACH_TB(pd, tb1, n1) { + if (tb1 == tb) { + *pprev = tb1->page_next[n1]; + return; + } + pprev = &tb1->page_next[n1]; + } + g_assert_not_reached(); +} + +/* remove @orig from its @n_orig-th jump list */ +static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) +{ + uintptr_t ptr, ptr_locked; + TranslationBlock *dest; + TranslationBlock *tb; + uintptr_t *pprev; + int n; + + /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ + ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); + dest = (TranslationBlock *)(ptr & ~1); + if (dest == NULL) { + return; + } + + qemu_spin_lock(&dest->jmp_lock); + /* + * While acquiring the lock, the jump might have been removed if the + * destination TB was invalidated; check again. + */ + ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); + if (ptr_locked != ptr) { + qemu_spin_unlock(&dest->jmp_lock); + /* + * The only possibility is that the jump was unlinked via + * tb_jump_unlink(dest). Seeing here another destination would be a bug, + * because we set the LSB above. + */ + g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); + return; + } + /* + * We first acquired the lock, and since the destination pointer matches, + * we know for sure that @orig is in the jmp list. + */ + pprev = &dest->jmp_list_head; + TB_FOR_EACH_JMP(dest, tb, n) { + if (tb == orig && n == n_orig) { + *pprev = tb->jmp_list_next[n]; + /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ + qemu_spin_unlock(&dest->jmp_lock); + return; + } + pprev = &tb->jmp_list_next[n]; + } + g_assert_not_reached(); +} + +/* + * Reset the jump entry 'n' of a TB so that it is not chained to another TB. + */ +void tb_reset_jump(TranslationBlock *tb, int n) +{ + uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); + tb_set_jmp_target(tb, n, addr); +} + +/* remove any jumps to the TB */ +static inline void tb_jmp_unlink(TranslationBlock *dest) +{ + TranslationBlock *tb; + int n; + + qemu_spin_lock(&dest->jmp_lock); + + TB_FOR_EACH_JMP(dest, tb, n) { + tb_reset_jump(tb, n); + qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); + /* No need to clear the list entry; setting the dest ptr is enough */ + } + dest->jmp_list_head = (uintptr_t)NULL; + + qemu_spin_unlock(&dest->jmp_lock); +} + +static void tb_jmp_cache_inval_tb(TranslationBlock *tb) +{ + CPUState *cpu; + + if (TARGET_TB_PCREL) { + /* A TB may be at any virtual address */ + CPU_FOREACH(cpu) { + tcg_flush_jmp_cache(cpu); + } + } else { + uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb)); + + CPU_FOREACH(cpu) { + CPUJumpCache *jc = cpu->tb_jmp_cache; + + if (qatomic_read(&jc->array[h].tb) == tb) { + qatomic_set(&jc->array[h].tb, NULL); + } + } + } +} + +/* + * In user-mode, call with mmap_lock held. + * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' + * locks held. + */ +static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) +{ + PageDesc *p; + uint32_t h; + tb_page_addr_t phys_pc; + uint32_t orig_cflags = tb_cflags(tb); + + assert_memory_lock(); + + /* make sure no further incoming jumps will be chained to this TB */ + qemu_spin_lock(&tb->jmp_lock); + qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); + qemu_spin_unlock(&tb->jmp_lock); + + /* remove the TB from the hash list */ + phys_pc = tb_page_addr0(tb); + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), + tb->flags, orig_cflags, tb->trace_vcpu_dstate); + if (!qht_remove(&tb_ctx.htable, tb, h)) { + return; + } + + /* remove the TB from the page list */ + if (rm_from_page_list) { + p = page_find(phys_pc >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + phys_pc = tb_page_addr1(tb); + if (phys_pc != -1) { + p = page_find(phys_pc >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + } + } + + /* remove the TB from the hash list */ + tb_jmp_cache_inval_tb(tb); + + /* suppress this TB from the two jump lists */ + tb_remove_from_jmp_list(tb, 0); + tb_remove_from_jmp_list(tb, 1); + + /* suppress any remaining jumps to this TB */ + tb_jmp_unlink(tb); + + qatomic_set(&tb_ctx.tb_phys_invalidate_count, + tb_ctx.tb_phys_invalidate_count + 1); +} + +static void tb_phys_invalidate__locked(TranslationBlock *tb) +{ + qemu_thread_jit_write(); + do_tb_phys_invalidate(tb, true); + qemu_thread_jit_execute(); +} + +static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc) +{ + PageDesc *p1, *p2; + tb_page_addr_t page1; + tb_page_addr_t page2; + + assert_memory_lock(); + g_assert(phys1 != -1); + + page1 = phys1 >> TARGET_PAGE_BITS; + page2 = phys2 >> TARGET_PAGE_BITS; + + p1 = page_find_alloc(page1, alloc); + if (ret_p1) { + *ret_p1 = p1; + } + if (likely(phys2 == -1)) { + page_lock(p1); + return; + } else if (page1 == page2) { + page_lock(p1); + if (ret_p2) { + *ret_p2 = p1; + } + return; + } + p2 = page_find_alloc(page2, alloc); + if (ret_p2) { + *ret_p2 = p2; + } + if (page1 < page2) { + page_lock(p1); + page_lock(p2); + } else { + page_lock(p2); + page_lock(p1); + } +} + +#ifdef CONFIG_USER_ONLY +static inline void page_lock_tb(const TranslationBlock *tb) { } +static inline void page_unlock_tb(const TranslationBlock *tb) { } +#else +/* lock the page(s) of a TB in the correct acquisition order */ +static void page_lock_tb(const TranslationBlock *tb) +{ + page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false); +} + +static void page_unlock_tb(const TranslationBlock *tb) +{ + PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS); + + page_unlock(p1); + if (unlikely(tb_page_addr1(tb) != -1)) { + PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS); + + if (p2 != p1) { + page_unlock(p2); + } + } +} +#endif + +/* + * Invalidate one TB. + * Called with mmap_lock held in user-mode. + */ +void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) +{ + if (page_addr == -1 && tb_page_addr0(tb) != -1) { + page_lock_tb(tb); + do_tb_phys_invalidate(tb, true); + page_unlock_tb(tb); + } else { + do_tb_phys_invalidate(tb, false); + } +} + +/* + * Add the tb in the target page and protect it if necessary. + * Called with mmap_lock held for user-mode emulation. + * Called with @p->lock held in !user-mode. + */ +static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, + unsigned int n, tb_page_addr_t page_addr) +{ +#ifndef CONFIG_USER_ONLY + bool page_already_protected; +#endif + + assert_page_locked(p); + + tb->page_next[n] = p->first_tb; +#ifndef CONFIG_USER_ONLY + page_already_protected = p->first_tb != (uintptr_t)NULL; +#endif + p->first_tb = (uintptr_t)tb | n; + +#if defined(CONFIG_USER_ONLY) + /* translator_loop() must have made all TB pages non-writable */ + assert(!(p->flags & PAGE_WRITE)); +#else + /* + * If some code is already present, then the pages are already + * protected. So we handle the case where only the first TB is + * allocated in a physical page. + */ + if (!page_already_protected) { + tlb_protect_code(page_addr); + } +#endif +} + +/* + * Add a new TB and link it to the physical page tables. phys_page2 is + * (-1) to indicate that only one page contains the TB. + * + * Called with mmap_lock held for user-mode emulation. + * + * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. + * Note that in !user-mode, another thread might have already added a TB + * for the same block of guest code that @tb corresponds to. In that case, + * the caller should discard the original @tb, and use instead the returned TB. + */ +TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2) +{ + PageDesc *p; + PageDesc *p2 = NULL; + void *existing_tb = NULL; + uint32_t h; + + assert_memory_lock(); + tcg_debug_assert(!(tb->cflags & CF_INVALID)); + + /* + * Add the TB to the page list, acquiring first the pages's locks. + * We keep the locks held until after inserting the TB in the hash table, + * so that if the insertion fails we know for sure that the TBs are still + * in the page descriptors. + * Note that inserting into the hash table first isn't an option, since + * we can only insert TBs that are fully initialized. + */ + page_lock_pair(&p, phys_pc, &p2, phys_page2, true); + tb_page_add(p, tb, 0, phys_pc); + if (p2) { + tb_page_add(p2, tb, 1, phys_page2); + } + + /* add in the hash table */ + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), + tb->flags, tb->cflags, tb->trace_vcpu_dstate); + qht_insert(&tb_ctx.htable, tb, h, &existing_tb); + + /* remove TB from the page(s) if we couldn't insert it */ + if (unlikely(existing_tb)) { + tb_page_remove(p, tb); + if (p2) { + tb_page_remove(p2, tb); + } + tb = existing_tb; + } + + if (p2 && p2 != p) { + page_unlock(p2); + } + page_unlock(p); + return tb; +} + +/* + * @p must be non-NULL. + * user-mode: call with mmap_lock held. + * !user-mode: call with all @pages locked. + */ +static void +tb_invalidate_phys_page_range__locked(struct page_collection *pages, + PageDesc *p, tb_page_addr_t start, + tb_page_addr_t end, + uintptr_t retaddr) +{ + TranslationBlock *tb; + tb_page_addr_t tb_start, tb_end; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + CPUState *cpu = current_cpu; + bool current_tb_not_found = retaddr != 0; + bool current_tb_modified = false; + TranslationBlock *current_tb = NULL; +#endif /* TARGET_HAS_PRECISE_SMC */ + + assert_page_locked(p); + + /* + * We remove all the TBs in the range [start, end[. + * XXX: see if in some cases it could be faster to invalidate all the code + */ + PAGE_FOR_EACH_TB(p, tb, n) { + assert_page_locked(p); + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb_page_addr0(tb); + tb_end = tb_start + tb->size; + } else { + tb_start = tb_page_addr1(tb); + tb_end = tb_start + ((tb_page_addr0(tb) + tb->size) + & ~TARGET_PAGE_MASK); + } + if (!(tb_end <= start || tb_start >= end)) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_not_found) { + current_tb_not_found = false; + /* now we have a real cpu fault */ + current_tb = tcg_tb_lookup(retaddr); + } + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop + * its execution. We could be more precise by checking + * that the modification is after the current PC, but it + * would require a specialized function to partially + * restore the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate__locked(tb); + } + } +#if !defined(CONFIG_USER_ONLY) + /* if no code remaining, no need to continue to use slow writes */ + if (!p->first_tb) { + tlb_unprotect_code(start); + } +#endif +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + page_collection_unlock(pages); + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); + } +#endif +} + +/* + * Invalidate all TBs which intersect with the target physical + * address page @addr. + * + * Called with mmap_lock held for user-mode emulation + */ +void tb_invalidate_phys_page(tb_page_addr_t addr) +{ + struct page_collection *pages; + tb_page_addr_t start, end; + PageDesc *p; + + assert_memory_lock(); + + p = page_find(addr >> TARGET_PAGE_BITS); + if (p == NULL) { + return; + } + + start = addr & TARGET_PAGE_MASK; + end = start + TARGET_PAGE_SIZE; + pages = page_collection_lock(start, end); + tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); + page_collection_unlock(pages); +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation. + */ +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *pages; + tb_page_addr_t next; + + assert_memory_lock(); + + pages = page_collection_lock(start, end); + for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + start < end; + start = next, next += TARGET_PAGE_SIZE) { + PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); + tb_page_addr_t bound = MIN(next, end); + + if (pd == NULL) { + continue; + } + tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); + } + page_collection_unlock(pages); +} + +#ifdef CONFIG_SOFTMMU +/* + * len must be <= 8 and start must be a multiple of len. + * Called via softmmu_template.h when code areas are written to with + * iothread mutex not held. + * + * Call with all @pages in the range [@start, @start + len[ locked. + */ +void tb_invalidate_phys_page_fast(struct page_collection *pages, + tb_page_addr_t start, int len, + uintptr_t retaddr) +{ + PageDesc *p; + + assert_memory_lock(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + + assert_page_locked(p); + tb_invalidate_phys_page_range__locked(pages, p, start, start + len, + retaddr); +} +#else +/* + * Called with mmap_lock held. If pc is not 0 then it indicates the + * host PC of the faulting store instruction that caused this invalidate. + * Returns true if the caller needs to abort execution of the current + * TB (because it was modified by this store and the guest CPU has + * precise-SMC semantics). + */ +bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) +{ + TranslationBlock *tb; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = NULL; + CPUState *cpu = current_cpu; + bool current_tb_modified = false; +#endif + + assert_memory_lock(); + + addr &= TARGET_PAGE_MASK; + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return false; + } + +#ifdef TARGET_HAS_PRECISE_SMC + if (p->first_tb && pc != 0) { + current_tb = tcg_tb_lookup(pc); + } +#endif + assert_page_locked(p); + PAGE_FOR_EACH_TB(p, tb, n) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop its execution. + * We could be more precise by checking that the modification is + * after the current PC, but it would require a specialized + * function to partially restore the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(cpu, current_tb, pc, true); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate(tb, addr); + } + p->first_tb = (uintptr_t)NULL; +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + return true; + } +#endif + + return false; +} +#endif diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index ba997f6cfe..d50239e0e2 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -70,8 +70,6 @@ static void *mttcg_cpu_thread_fn(void *arg) assert(tcg_enabled()); g_assert(!icount_enabled()); - tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); - rcu_register_thread(); force_rcu.notifier.notify = mttcg_force_rcu; force_rcu.cpu = cpu; @@ -141,6 +139,9 @@ void mttcg_start_vcpu_thread(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; + g_assert(tcg_enabled()); + tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); + cpu->thread = g_new0(QemuThread, 1); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index cc8adc2380..290833a37f 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -51,7 +51,7 @@ void rr_kick_vcpu_thread(CPUState *unused) * * The kick timer is responsible for moving single threaded vCPU * emulation on to the next vCPU. If more than one vCPU is running a - * timer event with force a cpu->exit so the next vCPU can get + * timer event we force a cpu->exit so the next vCPU can get * scheduled. * * The timer is removed if all vCPUs are idle and restarted again once @@ -152,9 +152,7 @@ static void *rr_cpu_thread_fn(void *arg) Notifier force_rcu; CPUState *cpu = arg; - g_assert(tcg_enabled()); - tcg_cpu_init_cflags(cpu, false); - + assert(tcg_enabled()); rcu_register_thread(); force_rcu.notify = rr_force_rcu; rcu_add_force_rcu_notifier(&force_rcu); @@ -277,6 +275,9 @@ void rr_start_vcpu_thread(CPUState *cpu) static QemuCond *single_tcg_halt_cond; static QemuThread *single_tcg_cpu_thread; + g_assert(tcg_enabled()); + tcg_cpu_init_cflags(cpu, false); + if (!single_tcg_cpu_thread) { cpu->thread = g_new0(QemuThread, 1); cpu->halt_cond = g_new0(QemuCond, 1); diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 4ed75a13e1..f185356a36 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -63,57 +63,7 @@ #include "tb-context.h" #include "internal.h" -/* #define DEBUG_TB_INVALIDATE */ -/* #define DEBUG_TB_FLUSH */ /* make various TB consistency checks */ -/* #define DEBUG_TB_CHECK */ - -#ifdef DEBUG_TB_INVALIDATE -#define DEBUG_TB_INVALIDATE_GATE 1 -#else -#define DEBUG_TB_INVALIDATE_GATE 0 -#endif - -#ifdef DEBUG_TB_FLUSH -#define DEBUG_TB_FLUSH_GATE 1 -#else -#define DEBUG_TB_FLUSH_GATE 0 -#endif - -#if !defined(CONFIG_USER_ONLY) -/* TB consistency checks only implemented for usermode emulation. */ -#undef DEBUG_TB_CHECK -#endif - -#ifdef DEBUG_TB_CHECK -#define DEBUG_TB_CHECK_GATE 1 -#else -#define DEBUG_TB_CHECK_GATE 0 -#endif - -/* Access to the various translations structures need to be serialised via locks - * for consistency. - * In user-mode emulation access to the memory related structures are protected - * with mmap_lock. - * In !user-mode we use per-page locks. - */ -#ifdef CONFIG_SOFTMMU -#define assert_memory_lock() -#else -#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) -#endif - -typedef struct PageDesc { - /* list of TBs intersecting this ram page */ - uintptr_t first_tb; -#ifdef CONFIG_USER_ONLY - unsigned long flags; - void *target_data; -#endif -#ifdef CONFIG_SOFTMMU - QemuSpin lock; -#endif -} PageDesc; /** * struct page_entry - page descriptor entry @@ -159,18 +109,6 @@ struct page_collection { struct page_entry *max; }; -/* list iterators for lists of tagged pointers in TranslationBlock */ -#define TB_FOR_EACH_TAGGED(head, tb, n, field) \ - for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ - tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ - tb = (TranslationBlock *)((uintptr_t)tb & ~1)) - -#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ - TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) - -#define TB_FOR_EACH_JMP(head_tb, tb, n) \ - TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) - /* * In system mode we want L1_MAP to be based on ram offsets, * while in user mode we want it to be based on virtual addresses. @@ -188,10 +126,6 @@ struct page_collection { # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS) #endif -/* Size of the L2 (and L3, etc) page tables. */ -#define V_L2_BITS 10 -#define V_L2_SIZE (1 << V_L2_BITS) - /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > sizeof_field(TranslationBlock, trace_vcpu_dstate) @@ -200,18 +134,11 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > /* * L1 Mapping properties */ -static int v_l1_size; -static int v_l1_shift; -static int v_l2_levels; +int v_l1_size; +int v_l1_shift; +int v_l2_levels; -/* The bottom level has pointers to PageDesc, and is indexed by - * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. - */ -#define V_L1_MIN_BITS 4 -#define V_L1_MAX_BITS (V_L2_BITS + 3) -#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) - -static void *l1_map[V_L1_MAX_SIZE]; +void *l1_map[V_L1_MAX_SIZE]; TBContext tb_ctx; @@ -324,12 +251,11 @@ static int encode_search(TranslationBlock *tb, uint8_t *block) * When reset_icount is true, current TB will be interrupted and * icount should be recalculated. */ -static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, - uintptr_t searched_pc, bool reset_icount) +int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t searched_pc, bool reset_icount) { - target_ulong data[TARGET_INSN_START_WORDS]; + uint64_t data[TARGET_INSN_START_WORDS]; uintptr_t host_pc = (uintptr_t)tb->tc.ptr; - CPUArchState *env = cpu->env_ptr; const uint8_t *p = tb->tc.ptr + tb->tc.size; int i, j, num_insns = tb->icount; #ifdef CONFIG_PROFILER @@ -368,7 +294,8 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, and shift if to the number of actually executed instructions */ cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; } - restore_state_to_opc(env, tb, data); + + cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); #ifdef CONFIG_PROFILER qatomic_set(&prof->restore_time, @@ -381,6 +308,14 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) { /* + * The pc update associated with restore without exit will + * break the relative pc adjustments performed by TARGET_TB_PCREL. + */ + if (TARGET_TB_PCREL) { + assert(will_exit); + } + + /* * The host_pc has to be in the rx region of the code buffer. * If it is not we will not be able to resolve it here. * The two cases where host_pc will not be correct are: @@ -471,7 +406,7 @@ void page_init(void) #endif } -static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc) +PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc) { PageDesc *pd; void **lp; @@ -537,31 +472,8 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc) return pd + (index & (V_L2_SIZE - 1)); } -static inline PageDesc *page_find(tb_page_addr_t index) -{ - return page_find_alloc(index, false); -} - -static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, - PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc); - /* In user-mode page locks aren't used; mmap_lock is enough */ #ifdef CONFIG_USER_ONLY - -#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) - -static inline void page_lock(PageDesc *pd) -{ } - -static inline void page_unlock(PageDesc *pd) -{ } - -static inline void page_lock_tb(const TranslationBlock *tb) -{ } - -static inline void page_unlock_tb(const TranslationBlock *tb) -{ } - struct page_collection * page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) { @@ -610,8 +522,7 @@ static void page_unlock__debug(const PageDesc *pd) g_assert(removed); } -static void -do_assert_page_locked(const PageDesc *pd, const char *file, int line) +void do_assert_page_locked(const PageDesc *pd, const char *file, int line) { if (unlikely(!page_is_locked(pd))) { error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", @@ -620,8 +531,6 @@ do_assert_page_locked(const PageDesc *pd, const char *file, int line) } } -#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) - void assert_no_pages_locked(void) { ht_pages_locked_debug_init(); @@ -630,50 +539,23 @@ void assert_no_pages_locked(void) #else /* !CONFIG_DEBUG_TCG */ -#define assert_page_locked(pd) - -static inline void page_lock__debug(const PageDesc *pd) -{ -} - -static inline void page_unlock__debug(const PageDesc *pd) -{ -} +static inline void page_lock__debug(const PageDesc *pd) { } +static inline void page_unlock__debug(const PageDesc *pd) { } #endif /* CONFIG_DEBUG_TCG */ -static inline void page_lock(PageDesc *pd) +void page_lock(PageDesc *pd) { page_lock__debug(pd); qemu_spin_lock(&pd->lock); } -static inline void page_unlock(PageDesc *pd) +void page_unlock(PageDesc *pd) { qemu_spin_unlock(&pd->lock); page_unlock__debug(pd); } -/* lock the page(s) of a TB in the correct acquisition order */ -static inline void page_lock_tb(const TranslationBlock *tb) -{ - page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false); -} - -static inline void page_unlock_tb(const TranslationBlock *tb) -{ - PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); - - page_unlock(p1); - if (unlikely(tb->page_addr[1] != -1)) { - PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); - - if (p2 != p1) { - page_unlock(p2); - } - } -} - static inline struct page_entry * page_entry_new(PageDesc *pd, tb_page_addr_t index) { @@ -824,9 +706,9 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) } assert_page_locked(pd); PAGE_FOR_EACH_TB(pd, tb, n) { - if (page_trylock_add(set, tb->page_addr[0]) || - (tb->page_addr[1] != -1 && - page_trylock_add(set, tb->page_addr[1]))) { + if (page_trylock_add(set, tb_page_addr0(tb)) || + (tb_page_addr1(tb) != -1 && + page_trylock_add(set, tb_page_addr1(tb)))) { /* drop all locks, and reacquire in order */ g_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; @@ -845,509 +727,6 @@ void page_collection_unlock(struct page_collection *set) #endif /* !CONFIG_USER_ONLY */ -static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, - PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc) -{ - PageDesc *p1, *p2; - tb_page_addr_t page1; - tb_page_addr_t page2; - - assert_memory_lock(); - g_assert(phys1 != -1); - - page1 = phys1 >> TARGET_PAGE_BITS; - page2 = phys2 >> TARGET_PAGE_BITS; - - p1 = page_find_alloc(page1, alloc); - if (ret_p1) { - *ret_p1 = p1; - } - if (likely(phys2 == -1)) { - page_lock(p1); - return; - } else if (page1 == page2) { - page_lock(p1); - if (ret_p2) { - *ret_p2 = p1; - } - return; - } - p2 = page_find_alloc(page2, alloc); - if (ret_p2) { - *ret_p2 = p2; - } - if (page1 < page2) { - page_lock(p1); - page_lock(p2); - } else { - page_lock(p2); - page_lock(p1); - } -} - -static bool tb_cmp(const void *ap, const void *bp) -{ - const TranslationBlock *a = ap; - const TranslationBlock *b = bp; - - return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) && - a->cs_base == b->cs_base && - a->flags == b->flags && - (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && - a->trace_vcpu_dstate == b->trace_vcpu_dstate && - a->page_addr[0] == b->page_addr[0] && - a->page_addr[1] == b->page_addr[1]); -} - -void tb_htable_init(void) -{ - unsigned int mode = QHT_MODE_AUTO_RESIZE; - - qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); -} - -/* Set to NULL all the 'first_tb' fields in all PageDescs. */ -static void page_flush_tb_1(int level, void **lp) -{ - int i; - - if (*lp == NULL) { - return; - } - if (level == 0) { - PageDesc *pd = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - page_lock(&pd[i]); - pd[i].first_tb = (uintptr_t)NULL; - page_unlock(&pd[i]); - } - } else { - void **pp = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - page_flush_tb_1(level - 1, pp + i); - } - } -} - -static void page_flush_tb(void) -{ - int i, l1_sz = v_l1_size; - - for (i = 0; i < l1_sz; i++) { - page_flush_tb_1(v_l2_levels, l1_map + i); - } -} - -static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) -{ - const TranslationBlock *tb = value; - size_t *size = data; - - *size += tb->tc.size; - return false; -} - -/* flush all the translation blocks */ -static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) -{ - bool did_flush = false; - - mmap_lock(); - /* If it is already been done on request of another CPU, - * just retry. - */ - if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { - goto done; - } - did_flush = true; - - if (DEBUG_TB_FLUSH_GATE) { - size_t nb_tbs = tcg_nb_tbs(); - size_t host_size = 0; - - tcg_tb_foreach(tb_host_size_iter, &host_size); - printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", - tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); - } - - CPU_FOREACH(cpu) { - tcg_flush_jmp_cache(cpu); - } - - qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); - page_flush_tb(); - - tcg_region_reset_all(); - /* XXX: flush processor icache at this point if cache flush is - expensive */ - qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); - -done: - mmap_unlock(); - if (did_flush) { - qemu_plugin_flush_cb(); - } -} - -void tb_flush(CPUState *cpu) -{ - if (tcg_enabled()) { - unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); - - if (cpu_in_exclusive_context(cpu)) { - do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); - } else { - async_safe_run_on_cpu(cpu, do_tb_flush, - RUN_ON_CPU_HOST_INT(tb_flush_count)); - } - } -} - -/* - * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, - * so in order to prevent bit rot we compile them unconditionally in user-mode, - * and let the optimizer get rid of them by wrapping their user-only callers - * with if (DEBUG_TB_CHECK_GATE). - */ -#ifdef CONFIG_USER_ONLY - -static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) -{ - TranslationBlock *tb = p; - target_ulong addr = *(target_ulong *)userp; - - if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) || - addr >= tb_pc(tb) + tb->size)) { - printf("ERROR invalidate: address=" TARGET_FMT_lx - " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size); - } -} - -/* verify that all the pages have correct rights for code - * - * Called with mmap_lock held. - */ -static void tb_invalidate_check(target_ulong address) -{ - address &= TARGET_PAGE_MASK; - qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); -} - -static void do_tb_page_check(void *p, uint32_t hash, void *userp) -{ - TranslationBlock *tb = p; - int flags1, flags2; - - flags1 = page_get_flags(tb_pc(tb)); - flags2 = page_get_flags(tb_pc(tb) + tb->size - 1); - if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { - printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", - (long)tb_pc(tb), tb->size, flags1, flags2); - } -} - -/* verify that all the pages have correct rights for code */ -static void tb_page_check(void) -{ - qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); -} - -#endif /* CONFIG_USER_ONLY */ - -/* - * user-mode: call with mmap_lock held - * !user-mode: call with @pd->lock held - */ -static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) -{ - TranslationBlock *tb1; - uintptr_t *pprev; - unsigned int n1; - - assert_page_locked(pd); - pprev = &pd->first_tb; - PAGE_FOR_EACH_TB(pd, tb1, n1) { - if (tb1 == tb) { - *pprev = tb1->page_next[n1]; - return; - } - pprev = &tb1->page_next[n1]; - } - g_assert_not_reached(); -} - -/* remove @orig from its @n_orig-th jump list */ -static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) -{ - uintptr_t ptr, ptr_locked; - TranslationBlock *dest; - TranslationBlock *tb; - uintptr_t *pprev; - int n; - - /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ - ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); - dest = (TranslationBlock *)(ptr & ~1); - if (dest == NULL) { - return; - } - - qemu_spin_lock(&dest->jmp_lock); - /* - * While acquiring the lock, the jump might have been removed if the - * destination TB was invalidated; check again. - */ - ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); - if (ptr_locked != ptr) { - qemu_spin_unlock(&dest->jmp_lock); - /* - * The only possibility is that the jump was unlinked via - * tb_jump_unlink(dest). Seeing here another destination would be a bug, - * because we set the LSB above. - */ - g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); - return; - } - /* - * We first acquired the lock, and since the destination pointer matches, - * we know for sure that @orig is in the jmp list. - */ - pprev = &dest->jmp_list_head; - TB_FOR_EACH_JMP(dest, tb, n) { - if (tb == orig && n == n_orig) { - *pprev = tb->jmp_list_next[n]; - /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ - qemu_spin_unlock(&dest->jmp_lock); - return; - } - pprev = &tb->jmp_list_next[n]; - } - g_assert_not_reached(); -} - -/* reset the jump entry 'n' of a TB so that it is not chained to - another TB */ -static inline void tb_reset_jump(TranslationBlock *tb, int n) -{ - uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); - tb_set_jmp_target(tb, n, addr); -} - -/* remove any jumps to the TB */ -static inline void tb_jmp_unlink(TranslationBlock *dest) -{ - TranslationBlock *tb; - int n; - - qemu_spin_lock(&dest->jmp_lock); - - TB_FOR_EACH_JMP(dest, tb, n) { - tb_reset_jump(tb, n); - qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); - /* No need to clear the list entry; setting the dest ptr is enough */ - } - dest->jmp_list_head = (uintptr_t)NULL; - - qemu_spin_unlock(&dest->jmp_lock); -} - -static void tb_jmp_cache_inval_tb(TranslationBlock *tb) -{ - CPUState *cpu; - - if (TARGET_TB_PCREL) { - /* A TB may be at any virtual address */ - CPU_FOREACH(cpu) { - tcg_flush_jmp_cache(cpu); - } - } else { - uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb)); - - CPU_FOREACH(cpu) { - CPUJumpCache *jc = cpu->tb_jmp_cache; - - if (qatomic_read(&jc->array[h].tb) == tb) { - qatomic_set(&jc->array[h].tb, NULL); - } - } - } -} - -/* - * In user-mode, call with mmap_lock held. - * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' - * locks held. - */ -static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) -{ - PageDesc *p; - uint32_t h; - tb_page_addr_t phys_pc; - uint32_t orig_cflags = tb_cflags(tb); - - assert_memory_lock(); - - /* make sure no further incoming jumps will be chained to this TB */ - qemu_spin_lock(&tb->jmp_lock); - qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); - qemu_spin_unlock(&tb->jmp_lock); - - /* remove the TB from the hash list */ - phys_pc = tb->page_addr[0]; - h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), - tb->flags, orig_cflags, tb->trace_vcpu_dstate); - if (!qht_remove(&tb_ctx.htable, tb, h)) { - return; - } - - /* remove the TB from the page list */ - if (rm_from_page_list) { - p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); - tb_page_remove(p, tb); - if (tb->page_addr[1] != -1) { - p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); - tb_page_remove(p, tb); - } - } - - /* remove the TB from the hash list */ - tb_jmp_cache_inval_tb(tb); - - /* suppress this TB from the two jump lists */ - tb_remove_from_jmp_list(tb, 0); - tb_remove_from_jmp_list(tb, 1); - - /* suppress any remaining jumps to this TB */ - tb_jmp_unlink(tb); - - qatomic_set(&tb_ctx.tb_phys_invalidate_count, - tb_ctx.tb_phys_invalidate_count + 1); -} - -static void tb_phys_invalidate__locked(TranslationBlock *tb) -{ - qemu_thread_jit_write(); - do_tb_phys_invalidate(tb, true); - qemu_thread_jit_execute(); -} - -/* invalidate one TB - * - * Called with mmap_lock held in user-mode. - */ -void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) -{ - if (page_addr == -1 && tb->page_addr[0] != -1) { - page_lock_tb(tb); - do_tb_phys_invalidate(tb, true); - page_unlock_tb(tb); - } else { - do_tb_phys_invalidate(tb, false); - } -} - -/* add the tb in the target page and protect it if necessary - * - * Called with mmap_lock held for user-mode emulation. - * Called with @p->lock held in !user-mode. - */ -static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, - unsigned int n, tb_page_addr_t page_addr) -{ -#ifndef CONFIG_USER_ONLY - bool page_already_protected; -#endif - - assert_page_locked(p); - - tb->page_addr[n] = page_addr; - tb->page_next[n] = p->first_tb; -#ifndef CONFIG_USER_ONLY - page_already_protected = p->first_tb != (uintptr_t)NULL; -#endif - p->first_tb = (uintptr_t)tb | n; - -#if defined(CONFIG_USER_ONLY) - /* translator_loop() must have made all TB pages non-writable */ - assert(!(p->flags & PAGE_WRITE)); -#else - /* if some code is already present, then the pages are already - protected. So we handle the case where only the first TB is - allocated in a physical page */ - if (!page_already_protected) { - tlb_protect_code(page_addr); - } -#endif -} - -/* - * Add a new TB and link it to the physical page tables. phys_page2 is - * (-1) to indicate that only one page contains the TB. - * - * Called with mmap_lock held for user-mode emulation. - * - * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. - * Note that in !user-mode, another thread might have already added a TB - * for the same block of guest code that @tb corresponds to. In that case, - * the caller should discard the original @tb, and use instead the returned TB. - */ -static TranslationBlock * -tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, - tb_page_addr_t phys_page2) -{ - PageDesc *p; - PageDesc *p2 = NULL; - void *existing_tb = NULL; - uint32_t h; - - assert_memory_lock(); - tcg_debug_assert(!(tb->cflags & CF_INVALID)); - - /* - * Add the TB to the page list, acquiring first the pages's locks. - * We keep the locks held until after inserting the TB in the hash table, - * so that if the insertion fails we know for sure that the TBs are still - * in the page descriptors. - * Note that inserting into the hash table first isn't an option, since - * we can only insert TBs that are fully initialized. - */ - page_lock_pair(&p, phys_pc, &p2, phys_page2, true); - tb_page_add(p, tb, 0, phys_pc); - if (p2) { - tb_page_add(p2, tb, 1, phys_page2); - } else { - tb->page_addr[1] = -1; - } - - /* add in the hash table */ - h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), - tb->flags, tb->cflags, tb->trace_vcpu_dstate); - qht_insert(&tb_ctx.htable, tb, h, &existing_tb); - - /* remove TB from the page(s) if we couldn't insert it */ - if (unlikely(existing_tb)) { - tb_page_remove(p, tb); - if (p2) { - tb_page_remove(p2, tb); - } - tb = existing_tb; - } - - if (p2 && p2 != p) { - page_unlock(p2); - } - page_unlock(p); - -#ifdef CONFIG_USER_ONLY - if (DEBUG_TB_CHECK_GATE) { - tb_page_check(); - } -#endif - return tb; -} - /* Called with mmap_lock held for user mode emulation. */ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, @@ -1400,8 +779,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb->flags = flags; tb->cflags = cflags; tb->trace_vcpu_dstate = *cpu->trace_dstate; - tb->page_addr[0] = phys_pc; - tb->page_addr[1] = -1; + tb_set_page_addr0(tb, phys_pc); + tb_set_page_addr1(tb, -1); tcg_ctx->tb_cflags = cflags; tb_overflow: @@ -1599,7 +978,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, * a temporary one-insn TB, and we have nothing left to do. Return early * before attempting to link to other TBs or add to the lookup table. */ - if (tb->page_addr[0] == -1) { + if (tb_page_addr0(tb) == -1) { return tb; } @@ -1614,7 +993,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, * No explicit memory barrier is required -- tb_link_page() makes the * TB visible in a consistent state. */ - existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]); + existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb)); /* if the TB already exists, discard what we just translated */ if (unlikely(existing_tb != tb)) { uintptr_t orig_aligned = (uintptr_t)gen_code_buf; @@ -1627,251 +1006,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, return tb; } -/* - * @p must be non-NULL. - * user-mode: call with mmap_lock held. - * !user-mode: call with all @pages locked. - */ -static void -tb_invalidate_phys_page_range__locked(struct page_collection *pages, - PageDesc *p, tb_page_addr_t start, - tb_page_addr_t end, - uintptr_t retaddr) -{ - TranslationBlock *tb; - tb_page_addr_t tb_start, tb_end; - int n; -#ifdef TARGET_HAS_PRECISE_SMC - CPUState *cpu = current_cpu; - CPUArchState *env = NULL; - bool current_tb_not_found = retaddr != 0; - bool current_tb_modified = false; - TranslationBlock *current_tb = NULL; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - uint32_t current_flags = 0; -#endif /* TARGET_HAS_PRECISE_SMC */ - - assert_page_locked(p); - -#if defined(TARGET_HAS_PRECISE_SMC) - if (cpu != NULL) { - env = cpu->env_ptr; - } -#endif - - /* we remove all the TBs in the range [start, end[ */ - /* XXX: see if in some cases it could be faster to invalidate all - the code */ - PAGE_FOR_EACH_TB(p, tb, n) { - assert_page_locked(p); - /* NOTE: this is subtle as a TB may span two physical pages */ - if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb->page_addr[0]; - tb_end = tb_start + tb->size; - } else { - tb_start = tb->page_addr[1]; - tb_end = tb_start + ((tb->page_addr[0] + tb->size) - & ~TARGET_PAGE_MASK); - } - if (!(tb_end <= start || tb_start >= end)) { -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_not_found) { - current_tb_not_found = false; - /* now we have a real cpu fault */ - current_tb = tcg_tb_lookup(retaddr); - } - if (current_tb == tb && - (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { - /* - * If we are modifying the current TB, we must stop - * its execution. We could be more precise by checking - * that the modification is after the current PC, but it - * would require a specialized function to partially - * restore the CPU state. - */ - current_tb_modified = true; - cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - tb_phys_invalidate__locked(tb); - } - } -#if !defined(CONFIG_USER_ONLY) - /* if no code remaining, no need to continue to use slow writes */ - if (!p->first_tb) { - tlb_unprotect_code(start); - } -#endif -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - page_collection_unlock(pages); - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); - mmap_unlock(); - cpu_loop_exit_noexc(cpu); - } -#endif -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end must refer to the *same* physical page. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - * - * Called with mmap_lock held for user-mode emulation - */ -void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end) -{ - struct page_collection *pages; - PageDesc *p; - - assert_memory_lock(); - - p = page_find(start >> TARGET_PAGE_BITS); - if (p == NULL) { - return; - } - pages = page_collection_lock(start, end); - tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); - page_collection_unlock(pages); -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end may refer to *different* physical pages. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - * - * Called with mmap_lock held for user-mode emulation. - */ -#ifdef CONFIG_SOFTMMU -void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) -#else -void tb_invalidate_phys_range(target_ulong start, target_ulong end) -#endif -{ - struct page_collection *pages; - tb_page_addr_t next; - - assert_memory_lock(); - - pages = page_collection_lock(start, end); - for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - start < end; - start = next, next += TARGET_PAGE_SIZE) { - PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); - tb_page_addr_t bound = MIN(next, end); - - if (pd == NULL) { - continue; - } - tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); - } - page_collection_unlock(pages); -} - -#ifdef CONFIG_SOFTMMU -/* len must be <= 8 and start must be a multiple of len. - * Called via softmmu_template.h when code areas are written to with - * iothread mutex not held. - * - * Call with all @pages in the range [@start, @start + len[ locked. - */ -void tb_invalidate_phys_page_fast(struct page_collection *pages, - tb_page_addr_t start, int len, - uintptr_t retaddr) -{ - PageDesc *p; - - assert_memory_lock(); - - p = page_find(start >> TARGET_PAGE_BITS); - if (!p) { - return; - } - - assert_page_locked(p); - tb_invalidate_phys_page_range__locked(pages, p, start, start + len, - retaddr); -} -#else -/* Called with mmap_lock held. If pc is not 0 then it indicates the - * host PC of the faulting store instruction that caused this invalidate. - * Returns true if the caller needs to abort execution of the current - * TB (because it was modified by this store and the guest CPU has - * precise-SMC semantics). - */ -static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) -{ - TranslationBlock *tb; - PageDesc *p; - int n; -#ifdef TARGET_HAS_PRECISE_SMC - TranslationBlock *current_tb = NULL; - CPUState *cpu = current_cpu; - CPUArchState *env = NULL; - int current_tb_modified = 0; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - uint32_t current_flags = 0; -#endif - - assert_memory_lock(); - - addr &= TARGET_PAGE_MASK; - p = page_find(addr >> TARGET_PAGE_BITS); - if (!p) { - return false; - } - -#ifdef TARGET_HAS_PRECISE_SMC - if (p->first_tb && pc != 0) { - current_tb = tcg_tb_lookup(pc); - } - if (cpu != NULL) { - env = cpu->env_ptr; - } -#endif - assert_page_locked(p); - PAGE_FOR_EACH_TB(p, tb, n) { -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb == tb && - (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { - /* If we are modifying the current TB, we must stop - its execution. We could be more precise by checking - that the modification is after the current PC, but it - would require a specialized function to partially - restore the CPU state */ - - current_tb_modified = 1; - cpu_restore_state_from_tb(cpu, current_tb, pc, true); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - tb_phys_invalidate(tb, addr); - } - p->first_tb = (uintptr_t)NULL; -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); - return true; - } -#endif - - return false; -} -#endif - /* user-mode: call with mmap_lock held */ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) { @@ -2014,7 +1148,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) if (tb->size > tst->max_target_size) { tst->max_target_size = tb->size; } - if (tb->page_addr[1] != -1) { + if (tb_page_addr1(tb) != -1) { tst->cross_page++; } if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { @@ -2226,7 +1360,7 @@ int page_get_flags(target_ulong address) void page_set_flags(target_ulong start, target_ulong end, int flags) { target_ulong addr, len; - bool reset_target_data; + bool reset, inval_tb = false; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates @@ -2243,7 +1377,10 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) if (flags & PAGE_WRITE) { flags |= PAGE_WRITE_ORG; } - reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); + reset = !(flags & PAGE_VALID) || (flags & PAGE_RESET); + if (reset) { + page_reset_target_data(start, end); + } flags &= ~PAGE_RESET; for (addr = start, len = end - start; @@ -2251,68 +1388,23 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true); - /* If the write protection bit is set, then we invalidate - the code inside. */ - if (!(p->flags & PAGE_WRITE) && - (flags & PAGE_WRITE) && - p->first_tb) { - tb_invalidate_phys_page(addr, 0); - } - if (reset_target_data) { - g_free(p->target_data); - p->target_data = NULL; - p->flags = flags; - } else { - /* Using mprotect on a page does not change sticky bits. */ - p->flags = (p->flags & PAGE_STICKY) | flags; + /* + * If the page was executable, but is reset, or is no longer + * executable, or has become writable, then invalidate any code. + */ + if ((p->flags & PAGE_EXEC) + && (reset || + !(flags & PAGE_EXEC) || + (flags & ~p->flags & PAGE_WRITE))) { + inval_tb = true; } + /* Using mprotect on a page does not change sticky bits. */ + p->flags = (reset ? 0 : p->flags & PAGE_STICKY) | flags; } -} - -void page_reset_target_data(target_ulong start, target_ulong end) -{ - target_ulong addr, len; - - /* - * This function should never be called with addresses outside the - * guest address space. If this assert fires, it probably indicates - * a missing call to h2g_valid. - */ - assert(end - 1 <= GUEST_ADDR_MAX); - assert(start < end); - assert_memory_lock(); - - start = start & TARGET_PAGE_MASK; - end = TARGET_PAGE_ALIGN(end); - - for (addr = start, len = end - start; - len != 0; - len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { - PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); - - g_free(p->target_data); - p->target_data = NULL; - } -} -void *page_get_target_data(target_ulong address) -{ - PageDesc *p = page_find(address >> TARGET_PAGE_BITS); - return p ? p->target_data : NULL; -} - -void *page_alloc_target_data(target_ulong address, size_t size) -{ - PageDesc *p = page_find(address >> TARGET_PAGE_BITS); - void *ret = NULL; - - if (p->flags & PAGE_VALID) { - ret = p->target_data; - if (!ret) { - p->target_data = ret = g_malloc0(size); - } + if (inval_tb) { + tb_invalidate_phys_range(start, end); } - return ret; } int page_check_range(target_ulong start, target_ulong len, int flags) @@ -2396,9 +1488,6 @@ void page_protect(tb_page_addr_t page_addr) } mprotect(g2h_untagged(page_addr), qemu_host_page_size, (prot & PAGE_BITS) & ~PAGE_WRITE); - if (DEBUG_TB_INVALIDATE_GATE) { - printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); - } } } @@ -2453,12 +1542,8 @@ int page_unprotect(target_ulong address, uintptr_t pc) /* and since the content will be modified, we must invalidate the corresponding translated code. */ - current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); -#ifdef CONFIG_USER_ONLY - if (DEBUG_TB_CHECK_GATE) { - tb_invalidate_check(addr); - } -#endif + current_tb_invalidated |= + tb_invalidate_phys_page_unwind(addr, pc); } mprotect((void *)g2h_untagged(host_start), qemu_host_page_size, prot & PAGE_BITS); diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 8e78fd7a9c..061519691f 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -157,7 +157,7 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db, tb = db->tb; /* Use slow path if first page is MMIO. */ - if (unlikely(tb->page_addr[0] == -1)) { + if (unlikely(tb_page_addr0(tb) == -1)) { return NULL; } @@ -169,13 +169,14 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db, host = db->host_addr[1]; base = TARGET_PAGE_ALIGN(db->pc_first); if (host == NULL) { - tb->page_addr[1] = + tb_page_addr_t phys_page = get_page_addr_code_hostp(env, base, &db->host_addr[1]); + /* We cannot handle MMIO as second page. */ + assert(phys_page != -1); + tb_set_page_addr1(tb, phys_page); #ifdef CONFIG_USER_ONLY page_protect(end); #endif - /* We cannot handle MMIO as second page. */ - assert(tb->page_addr[1] != -1); host = db->host_addr[1]; } diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 521aa8b61e..fb7d6ee9e9 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -210,6 +210,48 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, return addr; } +void page_reset_target_data(target_ulong start, target_ulong end) +{ +#ifdef TARGET_PAGE_DATA_SIZE + target_ulong addr, len; + + /* + * This function should never be called with addresses outside the + * guest address space. If this assert fires, it probably indicates + * a missing call to h2g_valid. + */ + assert(end - 1 <= GUEST_ADDR_MAX); + assert(start < end); + assert_memory_lock(); + + start = start & TARGET_PAGE_MASK; + end = TARGET_PAGE_ALIGN(end); + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); + + g_free(p->target_data); + p->target_data = NULL; + } +#endif +} + +#ifdef TARGET_PAGE_DATA_SIZE +void *page_get_target_data(target_ulong address) +{ + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + void *ret = p->target_data; + + if (!ret) { + ret = g_malloc0(TARGET_PAGE_DATA_SIZE); + p->target_data = ret; + } + return ret; +} +#endif + /* The softmmu versions of these helpers are in cputlb.c. */ /* diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c index e54e26de17..d6c5a344c9 100644 --- a/bsd-user/mmap.c +++ b/bsd-user/mmap.c @@ -663,7 +663,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, page_dump(stdout); printf("\n"); #endif - tb_invalidate_phys_range(start, start + len); mmap_unlock(); return start; fail: @@ -769,7 +768,6 @@ int target_munmap(abi_ulong start, abi_ulong len) if (ret == 0) { page_set_flags(start, start + len, 0); - tb_invalidate_phys_range(start, start + len); } mmap_unlock(); return ret; diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c index b9fc6d230c..d77b8f98f7 100644 --- a/contrib/elf2dmp/main.c +++ b/contrib/elf2dmp/main.c @@ -125,6 +125,7 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) { eprintf("Failed to extract entire KDBG\n"); + free(kdbg); return NULL; } diff --git a/cpu.c b/cpu.c index 14365e36f3..2a09b05205 100644 --- a/cpu.c +++ b/cpu.c @@ -277,7 +277,7 @@ void list_cpus(const char *optarg) void tb_invalidate_phys_addr(target_ulong addr) { mmap_lock(); - tb_invalidate_phys_page_range(addr, addr + 1); + tb_invalidate_phys_page(addr); mmap_unlock(); } #else @@ -298,7 +298,7 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) return; } ram_addr = memory_region_get_ram_addr(mr) + addr; - tb_invalidate_phys_page_range(ram_addr, ram_addr + 1); + tb_invalidate_phys_page(ram_addr); } #endif diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index 9bf13133e5..072cf67956 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -1803,7 +1803,7 @@ static void coroutine_fn v9fs_walk(void *opaque) err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); if (err < 0) { pdu_complete(pdu, err); - return ; + return; } offset += err; diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index bc3ecdb619..f8bc6d4a14 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -1099,7 +1099,7 @@ static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data) amc->soc_name = "ast2400-a1"; amc->hw_strap1 = PALMETTO_BMC_HW_STRAP1; amc->fmc_model = "n25q256a"; - amc->spi_model = "mx25l25635e"; + amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = palmetto_bmc_i2c_init; mc->default_ram_size = 256 * MiB; @@ -1150,7 +1150,7 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) amc->soc_name = "ast2500-a1"; amc->hw_strap1 = AST2500_EVB_HW_STRAP1; amc->fmc_model = "mx25l25635e"; - amc->spi_model = "mx25l25635e"; + amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = ast2500_evb_i2c_init; mc->default_ram_size = 512 * MiB; @@ -1200,7 +1200,7 @@ static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) mc->desc = "OpenPOWER Witherspoon BMC (ARM1176)"; amc->soc_name = "ast2500-a1"; amc->hw_strap1 = WITHERSPOON_BMC_HW_STRAP1; - amc->fmc_model = "mx25l25635e"; + amc->fmc_model = "mx25l25635f"; amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = witherspoon_bmc_i2c_init; @@ -1330,6 +1330,13 @@ static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; +/* On 32-bit hosts, lower RAM to 1G because of the 2047 MB limit */ +#if HOST_LONG_BITS == 32 +#define BLETCHLEY_BMC_RAM_SIZE (1 * GiB) +#else +#define BLETCHLEY_BMC_RAM_SIZE (2 * GiB) +#endif + static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1344,7 +1351,7 @@ static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON; amc->i2c_init = bletchley_bmc_i2c_init; - mc->default_ram_size = 512 * MiB; + mc->default_ram_size = BLETCHLEY_BMC_RAM_SIZE; mc->default_cpus = mc->min_cpus = mc->max_cpus = aspeed_soc_num_cpus(amc->soc_name); } diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index aa2cd90bec..cd75465c2b 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -307,6 +307,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 1125000000, &error_abort); + object_property_set_bool(OBJECT(&s->cpu[i]), "neon", false, + &error_abort); object_property_set_link(OBJECT(&s->cpu[i]), "memory", OBJECT(s->memory), &error_abort); diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c index 692c94ceb4..b151113c27 100644 --- a/hw/arm/nseries.c +++ b/hw/arm/nseries.c @@ -702,7 +702,7 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len) static void *mipid_init(void) { - struct mipid_s *s = (struct mipid_s *) g_malloc0(sizeof(*s)); + struct mipid_s *s = g_malloc0(sizeof(*s)); s->id = 0x838f03; mipid_reset(s); @@ -1300,7 +1300,7 @@ static int n810_atag_setup(const struct arm_boot_info *info, void *p) static void n8x0_init(MachineState *machine, struct arm_boot_info *binfo, int model) { - struct n800_s *s = (struct n800_s *) g_malloc0(sizeof(*s)); + struct n800_s *s = g_malloc0(sizeof(*s)); MachineClass *mc = MACHINE_GET_CLASS(machine); if (machine->ram_size != mc->default_ram_size) { diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index a8d2519141..02adc87527 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -35,6 +35,7 @@ #include "qapi/error.h" #include "trace.h" #include "qom/object.h" +#include "m25p80_sfdp.h" /* 16 MiB max in 3 byte address mode */ #define MAX_3BYTES_SIZE 0x1000000 @@ -72,6 +73,7 @@ typedef struct FlashPartInfo { * This field inform how many die is in the chip. */ uint8_t die_cnt; + uint8_t (*sfdp_read)(uint32_t sfdp_addr); } FlashPartInfo; /* adapted from linux */ @@ -230,12 +232,16 @@ static const FlashPartInfo known_devices[] = { { INFO("mx25l6405d", 0xc22017, 0, 64 << 10, 128, 0) }, { INFO("mx25l12805d", 0xc22018, 0, 64 << 10, 256, 0) }, { INFO("mx25l12855e", 0xc22618, 0, 64 << 10, 256, 0) }, - { INFO6("mx25l25635e", 0xc22019, 0xc22019, 64 << 10, 512, 0) }, + { INFO6("mx25l25635e", 0xc22019, 0xc22019, 64 << 10, 512, + ER_4K | ER_32K), .sfdp_read = m25p80_sfdp_mx25l25635e }, + { INFO6("mx25l25635f", 0xc22019, 0xc22019, 64 << 10, 512, + ER_4K | ER_32K), .sfdp_read = m25p80_sfdp_mx25l25635f }, { INFO("mx25l25655e", 0xc22619, 0, 64 << 10, 512, 0) }, { INFO("mx66l51235f", 0xc2201a, 0, 64 << 10, 1024, ER_4K | ER_32K) }, { INFO("mx66u51235f", 0xc2253a, 0, 64 << 10, 1024, ER_4K | ER_32K) }, { INFO("mx66u1g45g", 0xc2253b, 0, 64 << 10, 2048, ER_4K | ER_32K) }, - { INFO("mx66l1g45g", 0xc2201b, 0, 64 << 10, 2048, ER_4K | ER_32K) }, + { INFO("mx66l1g45g", 0xc2201b, 0, 64 << 10, 2048, ER_4K | ER_32K), + .sfdp_read = m25p80_sfdp_mx66l1g45g }, /* Micron */ { INFO("n25q032a11", 0x20bb16, 0, 64 << 10, 64, ER_4K) }, @@ -245,13 +251,15 @@ static const FlashPartInfo known_devices[] = { { INFO("n25q128a11", 0x20bb18, 0, 64 << 10, 256, ER_4K) }, { INFO("n25q128a13", 0x20ba18, 0, 64 << 10, 256, ER_4K) }, { INFO("n25q256a11", 0x20bb19, 0, 64 << 10, 512, ER_4K) }, - { INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K), + .sfdp_read = m25p80_sfdp_n25q256a }, { INFO("n25q512a11", 0x20bb20, 0, 64 << 10, 1024, ER_4K) }, { INFO("n25q512a13", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) }, { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, - ER_4K | HAS_SR_BP3_BIT6 | HAS_SR_TB) }, - { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, + ER_4K | HAS_SR_BP3_BIT6 | HAS_SR_TB), + .sfdp_read = m25p80_sfdp_n25q256a }, + { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, { INFO("n25q512ax3", 0x20ba20, 0x1000, 64 << 10, 1024, ER_4K) }, { INFO("mt25ql512ab", 0x20ba20, 0x1044, 64 << 10, 1024, ER_4K | ER_32K) }, { INFO_STACKED("mt35xu01g", 0x2c5b1b, 0x104100, 128 << 10, 1024, @@ -337,9 +345,12 @@ static const FlashPartInfo known_devices[] = { { INFO("w25q64", 0xef4017, 0, 64 << 10, 128, ER_4K) }, { INFO("w25q80", 0xef5014, 0, 64 << 10, 16, ER_4K) }, { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K) }, - { INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K) }, - { INFO("w25q512jv", 0xef4020, 0, 64 << 10, 1024, ER_4K) }, - { INFO("w25q01jvq", 0xef4021, 0, 64 << 10, 2048, ER_4K) }, + { INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K), + .sfdp_read = m25p80_sfdp_w25q256 }, + { INFO("w25q512jv", 0xef4020, 0, 64 << 10, 1024, ER_4K), + .sfdp_read = m25p80_sfdp_w25q512jv }, + { INFO("w25q01jvq", 0xef4021, 0, 64 << 10, 2048, ER_4K), + .sfdp_read = m25p80_sfdp_w25q01jvq }, }; typedef enum { @@ -355,6 +366,7 @@ typedef enum { BULK_ERASE = 0xc7, READ_FSR = 0x70, RDCR = 0x15, + RDSFDP = 0x5a, READ = 0x03, READ4 = 0x13, @@ -421,6 +433,7 @@ typedef enum { STATE_COLLECTING_DATA, STATE_COLLECTING_VAR_LEN_DATA, STATE_READING_DATA, + STATE_READING_SFDP, } CMDState; typedef enum { @@ -679,6 +692,8 @@ static inline int get_addr_length(Flash *s) } switch (s->cmd_in_progress) { + case RDSFDP: + return 3; case PP4: case PP4_4: case QPP_4: @@ -823,6 +838,11 @@ static void complete_collecting_data(Flash *s) " by device\n"); } break; + + case RDSFDP: + s->state = STATE_READING_SFDP; + break; + default: break; } @@ -1431,6 +1451,16 @@ static void decode_new_cmd(Flash *s, uint32_t value) qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value); } break; + case RDSFDP: + if (s->pi->sfdp_read) { + s->needed_bytes = get_addr_length(s) + 1; /* SFDP addr + dummy */ + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + break; + } + /* Fallthrough */ + default: s->pos = 0; s->len = 1; @@ -1538,6 +1568,12 @@ static uint32_t m25p80_transfer8(SSIPeripheral *ss, uint32_t tx) } } break; + case STATE_READING_SFDP: + assert(s->pi->sfdp_read); + r = s->pi->sfdp_read(s->cur_addr); + trace_m25p80_read_sfdp(s, s->cur_addr, (uint8_t)r); + s->cur_addr = (s->cur_addr + 1) & (M25P80_SFDP_MAX_SIZE - 1); + break; default: case STATE_IDLE: diff --git a/hw/block/m25p80_sfdp.c b/hw/block/m25p80_sfdp.c new file mode 100644 index 0000000000..77615fa29e --- /dev/null +++ b/hw/block/m25p80_sfdp.c @@ -0,0 +1,332 @@ +/* + * M25P80 Serial Flash Discoverable Parameter (SFDP) + * + * Copyright (c) 2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "m25p80_sfdp.h" + +#define define_sfdp_read(model) \ + uint8_t m25p80_sfdp_##model(uint32_t addr) \ + { \ + assert(is_power_of_2(sizeof(sfdp_##model))); \ + return sfdp_##model[addr & (sizeof(sfdp_##model) - 1)]; \ + } + +/* + * Micron + */ +static const uint8_t sfdp_n25q256a[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x00, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x30, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x29, 0xeb, 0x27, 0x6b, 0x08, 0x3b, 0x27, 0xbb, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x27, 0xbb, + 0xff, 0xff, 0x29, 0xeb, 0x0c, 0x20, 0x10, 0xd8, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(n25q256a); + + +/* + * Matronix + */ + +/* mx25l25635e. No 4B opcodes */ +static const uint8_t sfdp_mx25l25635e[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x01, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x30, 0x00, 0x00, 0xff, + 0xc2, 0x00, 0x01, 0x04, 0x60, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x04, 0xbb, + 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, + 0xff, 0xff, 0x00, 0xff, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x36, 0x00, 0x27, 0xf7, 0x4f, 0xff, 0xff, + 0xd9, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(mx25l25635e) + +static const uint8_t sfdp_mx25l25635f[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x01, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x30, 0x00, 0x00, 0xff, + 0xc2, 0x00, 0x01, 0x04, 0x60, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x04, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, + 0xff, 0xff, 0x44, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x36, 0x00, 0x27, 0x9d, 0xf9, 0xc0, 0x64, + 0x85, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xc2, 0xf5, 0x08, 0x0a, + 0x08, 0x04, 0x03, 0x06, 0x00, 0x00, 0x07, 0x29, + 0x17, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(mx25l25635f); + +static const uint8_t sfdp_mx66l1g45g[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x02, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x30, 0x00, 0x00, 0xff, + 0xc2, 0x00, 0x01, 0x04, 0x10, 0x01, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0xc0, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x04, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, + 0xff, 0xff, 0x44, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0xff, 0xd6, 0x49, 0xc5, 0x00, + 0x85, 0xdf, 0x04, 0xe3, 0x44, 0x03, 0x67, 0x38, + 0x30, 0xb0, 0x30, 0xb0, 0xf7, 0xbd, 0xd5, 0x5c, + 0x4a, 0x9e, 0x29, 0xff, 0xf0, 0x50, 0xf9, 0x85, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x7f, 0xef, 0xff, 0xff, 0x21, 0x5c, 0xdc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x36, 0x00, 0x27, 0x9d, 0xf9, 0xc0, 0x64, + 0x85, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc2, 0xf5, 0x08, 0x00, 0x0c, 0x04, 0x08, 0x08, + 0x01, 0x00, 0x19, 0x0f, 0x01, 0x01, 0x06, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(mx66l1g45g); + +/* + * Windbond + */ + +static const uint8_t sfdp_w25q256[] = { + 0x53, 0x46, 0x44, 0x50, 0x00, 0x01, 0x00, 0xff, + 0x00, 0x00, 0x01, 0x09, 0x80, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x0f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x21, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(w25q256); + +static const uint8_t sfdp_w25q512jv[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x80, 0x00, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0xd0, 0x00, 0x00, 0xff, + 0x03, 0x00, 0x01, 0x02, 0xf0, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x40, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0x00, 0x36, 0x02, 0xa6, 0x00, + 0x82, 0xea, 0x14, 0xe2, 0xe9, 0x63, 0x76, 0x33, + 0x7a, 0x75, 0x7a, 0x75, 0xf7, 0xa2, 0xd5, 0x5c, + 0x19, 0xf7, 0x4d, 0xff, 0xe9, 0x70, 0xf9, 0xa5, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x0a, 0xf0, 0xff, 0x21, 0xff, 0xdc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(w25q512jv); + +static const uint8_t sfdp_w25q01jvq[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x80, 0x00, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0xd0, 0x00, 0x00, 0xff, + 0x03, 0x00, 0x01, 0x02, 0xf0, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x40, 0xeb, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0x00, 0x36, 0x02, 0xa6, 0x00, + 0x82, 0xea, 0x14, 0xe2, 0xe9, 0x63, 0x76, 0x33, + 0x7a, 0x75, 0x7a, 0x75, 0xf7, 0xa2, 0xd5, 0x5c, + 0x19, 0xf7, 0x4d, 0xff, 0xe9, 0x70, 0xf9, 0xa5, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x0a, 0xf0, 0xff, 0x21, 0xff, 0xdc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(w25q01jvq); diff --git a/hw/block/m25p80_sfdp.h b/hw/block/m25p80_sfdp.h new file mode 100644 index 0000000000..df7adfb5ce --- /dev/null +++ b/hw/block/m25p80_sfdp.h @@ -0,0 +1,29 @@ +/* + * M25P80 SFDP + * + * Copyright (c) 2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef HW_M25P80_SFDP_H +#define HW_M25P80_SFDP_H + +/* + * SFDP area has a 3 bytes address space. + */ +#define M25P80_SFDP_MAX_SIZE (1 << 24) + +uint8_t m25p80_sfdp_n25q256a(uint32_t addr); + +uint8_t m25p80_sfdp_mx25l25635e(uint32_t addr); +uint8_t m25p80_sfdp_mx25l25635f(uint32_t addr); +uint8_t m25p80_sfdp_mx66l1g45g(uint32_t addr); + +uint8_t m25p80_sfdp_w25q256(uint32_t addr); +uint8_t m25p80_sfdp_w25q512jv(uint32_t addr); + +uint8_t m25p80_sfdp_w25q01jvq(uint32_t addr); + +#endif diff --git a/hw/block/meson.build b/hw/block/meson.build index 1908abd45c..b434d5654c 100644 --- a/hw/block/meson.build +++ b/hw/block/meson.build @@ -12,6 +12,7 @@ softmmu_ss.add(when: 'CONFIG_ONENAND', if_true: files('onenand.c')) softmmu_ss.add(when: 'CONFIG_PFLASH_CFI01', if_true: files('pflash_cfi01.c')) softmmu_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c')) softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) +softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c')) softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) diff --git a/hw/block/trace-events b/hw/block/trace-events index d86b53520c..2c45a62bd5 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -80,5 +80,6 @@ m25p80_page_program(void *s, uint32_t addr, uint8_t tx) "[%p] page program cur_a m25p80_transfer(void *s, uint8_t state, uint32_t len, uint8_t needed, uint32_t pos, uint32_t cur_addr, uint8_t t) "[%p] Transfer state 0x%"PRIx8" len 0x%"PRIx32" needed 0x%"PRIx8" pos 0x%"PRIx32" addr 0x%"PRIx32" tx 0x%"PRIx8 m25p80_read_byte(void *s, uint32_t addr, uint8_t v) "[%p] Read byte 0x%"PRIx32"=0x%"PRIx8 m25p80_read_data(void *s, uint32_t pos, uint8_t v) "[%p] Read data 0x%"PRIx32"=0x%"PRIx8 +m25p80_read_sfdp(void *s, uint32_t addr, uint8_t v) "[%p] Read SFDP 0x%"PRIx32"=0x%"PRIx8 m25p80_binding(void *s) "[%p] Binding to IF_MTD drive" m25p80_binding_no_bdrv(void *s) "[%p] No BDRV - binding to RAM" diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c index addcd59b02..7b7c56b6ef 100644 --- a/hw/char/exynos4210_uart.c +++ b/hw/char/exynos4210_uart.c @@ -211,7 +211,7 @@ static void fifo_reset(Exynos4210UartFIFO *q) g_free(q->data); q->data = NULL; - q->data = (uint8_t *)g_malloc0(q->size); + q->data = g_malloc0(q->size); q->sp = 0; q->rp = 0; diff --git a/hw/core/sysbus-fdt.c b/hw/core/sysbus-fdt.c index edb0c49b19..eebcd28f9a 100644 --- a/hw/core/sysbus-fdt.c +++ b/hw/core/sysbus-fdt.c @@ -299,7 +299,8 @@ static int add_amd_xgbe_fdt_node(SysBusDevice *sbdev, void *opaque) void *guest_fdt = data->fdt, *host_fdt; const void *r; int i, prop_len; - uint32_t *irq_attr, *reg_attr, *host_clock_phandles; + uint32_t *irq_attr, *reg_attr; + const uint32_t *host_clock_phandles; uint64_t mmio_base, irq_number; uint32_t guest_clock_phandles[2]; @@ -339,7 +340,7 @@ static int add_amd_xgbe_fdt_node(SysBusDevice *sbdev, void *opaque) error_report("%s clocks property should contain 2 handles", __func__); exit(1); } - host_clock_phandles = (uint32_t *)r; + host_clock_phandles = r; guest_clock_phandles[0] = qemu_fdt_alloc_phandle(guest_fdt); guest_clock_phandles[1] = qemu_fdt_alloc_phandle(guest_fdt); diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c index 105241577d..ebe230dd0a 100644 --- a/hw/display/blizzard.c +++ b/hw/display/blizzard.c @@ -1007,7 +1007,7 @@ static const GraphicHwOps blizzard_ops = { void *s1d13745_init(qemu_irq gpio_int) { - BlizzardState *s = (BlizzardState *) g_malloc0(sizeof(*s)); + BlizzardState *s = g_malloc0(sizeof(*s)); DisplaySurface *surface; s->fb = g_malloc(0x180000); diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c index 08e5938ec7..e5d521c329 100644 --- a/hw/dma/pl330.c +++ b/hw/dma/pl330.c @@ -1328,7 +1328,7 @@ static void pl330_debug_exec(PL330State *s) } if (!insn) { pl330_fault(ch, PL330_FAULT_UNDEF_INSTR | PL330_FAULT_DBG_INSTR); - return ; + return; } ch->stall = 0; insn->exec(ch, opcode, args, insn->size - 1); diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index 42c6d69b82..c166fd20fa 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -1131,7 +1131,9 @@ static int aspeed_i2c_bus_slave_event(I2CSlave *slave, enum i2c_event event) AspeedI2CBus *bus = ASPEED_I2C_BUS(qbus->parent); uint32_t reg_intr_sts = aspeed_i2c_bus_intr_sts_offset(bus); uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus); - uint32_t value; + uint32_t reg_dev_addr = aspeed_i2c_bus_dev_addr_offset(bus); + uint32_t dev_addr = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_dev_addr, + SLAVE_DEV_ADDR1); if (aspeed_i2c_is_new_mode(bus->controller)) { return aspeed_i2c_bus_new_slave_event(bus, event); @@ -1139,8 +1141,8 @@ static int aspeed_i2c_bus_slave_event(I2CSlave *slave, enum i2c_event event) switch (event) { case I2C_START_SEND_ASYNC: - value = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_byte_buf, TX_BUF); - SHARED_ARRAY_FIELD_DP32(bus->regs, reg_byte_buf, RX_BUF, value << 1); + /* Bit[0] == 0 indicates "send". */ + SHARED_ARRAY_FIELD_DP32(bus->regs, reg_byte_buf, RX_BUF, dev_addr << 1); ARRAY_FIELD_DP32(bus->regs, I2CD_INTR_STS, SLAVE_ADDR_RX_MATCH, 1); SHARED_ARRAY_FIELD_DP32(bus->regs, reg_intr_sts, RX_DONE, 1); diff --git a/hw/misc/cbus.c b/hw/misc/cbus.c index 3c3721ad2d..653e8ddcd5 100644 --- a/hw/misc/cbus.c +++ b/hw/misc/cbus.c @@ -133,7 +133,7 @@ static void cbus_sel(void *opaque, int line, int level) CBus *cbus_init(qemu_irq dat) { - CBusPriv *s = (CBusPriv *) g_malloc0(sizeof(*s)); + CBusPriv *s = g_malloc0(sizeof(*s)); s->dat_out = dat; s->cbus.clk = qemu_allocate_irq(cbus_clk, s, 0); @@ -388,7 +388,7 @@ static void retu_io(void *opaque, int rw, int reg, uint16_t *val) void *retu_init(qemu_irq irq, int vilma) { - CBusRetu *s = (CBusRetu *) g_malloc0(sizeof(*s)); + CBusRetu *s = g_malloc0(sizeof(*s)); s->irq = irq; s->irqen = 0xffff; @@ -604,7 +604,7 @@ static void tahvo_io(void *opaque, int rw, int reg, uint16_t *val) void *tahvo_init(qemu_irq irq, int betty) { - CBusTahvo *s = (CBusTahvo *) g_malloc0(sizeof(*s)); + CBusTahvo *s = g_malloc0(sizeof(*s)); s->irq = irq; s->irqen = 0xffff; diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c index e0f76d3eb3..73201f9139 100644 --- a/hw/net/can/can_sja1000.c +++ b/hw/net/can/can_sja1000.c @@ -431,7 +431,7 @@ void can_sja_mem_write(CanSJA1000State *s, hwaddr addr, uint64_t val, (unsigned long long)val, (unsigned int)addr); if (addr > CAN_SJA_MEM_SIZE) { - return ; + return; } if (s->clock & 0x80) { /* PeliCAN Mode */ diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c index a1b9c78844..1081e2cc0d 100644 --- a/hw/nvram/eeprom93xx.c +++ b/hw/nvram/eeprom93xx.c @@ -315,7 +315,7 @@ eeprom_t *eeprom93xx_new(DeviceState *dev, uint16_t nwords) addrbits = 6; } - eeprom = (eeprom_t *)g_malloc0(sizeof(*eeprom) + nwords * 2); + eeprom = g_malloc0(sizeof(*eeprom) + nwords * 2); eeprom->size = nwords; eeprom->addrbits = addrbits; /* Output DO is tristate, read results in 1. */ diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index faed7e0cbe..22df4be528 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -388,7 +388,7 @@ static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) static inline int aspeed_smc_flash_addr_width(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + AspeedSMCClass *asc = fl->asc; if (asc->addr_width) { return asc->addr_width(s); @@ -420,7 +420,7 @@ static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, uint32_t addr) { const AspeedSMCState *s = fl->controller; - AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + AspeedSMCClass *asc = fl->asc; AspeedSegments seg; asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->cs], &seg); @@ -1234,7 +1234,6 @@ static const TypeInfo aspeed_smc_info = { static void aspeed_smc_flash_realize(DeviceState *dev, Error **errp) { AspeedSMCFlash *s = ASPEED_SMC_FLASH(dev); - AspeedSMCClass *asc; g_autofree char *name = g_strdup_printf(TYPE_ASPEED_SMC_FLASH ".%d", s->cs); if (!s->controller) { @@ -1242,14 +1241,14 @@ static void aspeed_smc_flash_realize(DeviceState *dev, Error **errp) return; } - asc = ASPEED_SMC_GET_CLASS(s->controller); + s->asc = ASPEED_SMC_GET_CLASS(s->controller); /* * Use the default segment value to size the memory region. This * can be changed by FW at runtime. */ memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_flash_ops, - s, name, asc->segments[s->cs].size); + s, name, s->asc->segments[s->cs].size); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); } diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c index 003931fb50..d54a109bee 100644 --- a/hw/ssi/ssi.c +++ b/hw/ssi/ssi.c @@ -38,9 +38,8 @@ static void ssi_cs_default(void *opaque, int n, int level) bool cs = !!level; assert(n == 0); if (s->cs != cs) { - SSIPeripheralClass *ssc = SSI_PERIPHERAL_GET_CLASS(s); - if (ssc->set_cs) { - ssc->set_cs(s, cs); + if (s->spc->set_cs) { + s->spc->set_cs(s, cs); } } s->cs = cs; @@ -48,11 +47,11 @@ static void ssi_cs_default(void *opaque, int n, int level) static uint32_t ssi_transfer_raw_default(SSIPeripheral *dev, uint32_t val) { - SSIPeripheralClass *ssc = SSI_PERIPHERAL_GET_CLASS(dev); + SSIPeripheralClass *ssc = dev->spc; if ((dev->cs && ssc->cs_polarity == SSI_CS_HIGH) || - (!dev->cs && ssc->cs_polarity == SSI_CS_LOW) || - ssc->cs_polarity == SSI_CS_NONE) { + (!dev->cs && ssc->cs_polarity == SSI_CS_LOW) || + ssc->cs_polarity == SSI_CS_NONE) { return ssc->transfer(dev, val); } return 0; @@ -67,6 +66,7 @@ static void ssi_peripheral_realize(DeviceState *dev, Error **errp) ssc->cs_polarity != SSI_CS_NONE) { qdev_init_gpio_in_named(dev, ssi_cs_default, SSI_GPIO_CS, 1); } + s->spc = ssc; ssc->realize(s, errp); } @@ -115,13 +115,11 @@ uint32_t ssi_transfer(SSIBus *bus, uint32_t val) { BusState *b = BUS(bus); BusChild *kid; - SSIPeripheralClass *ssc; uint32_t r = 0; QTAILQ_FOREACH(kid, &b->children, sibling) { - SSIPeripheral *peripheral = SSI_PERIPHERAL(kid->child); - ssc = SSI_PERIPHERAL_GET_CLASS(peripheral); - r |= ssc->transfer_raw(peripheral, val); + SSIPeripheral *p = SSI_PERIPHERAL(kid->child); + r |= p->spc->transfer_raw(p, val); } return r; diff --git a/hw/timer/renesas_cmt.c b/hw/timer/renesas_cmt.c index 2e0fd21a36..69eabc678a 100644 --- a/hw/timer/renesas_cmt.c +++ b/hw/timer/renesas_cmt.c @@ -57,7 +57,7 @@ static void update_events(RCMTState *cmt, int ch) if ((cmt->cmstr & (1 << ch)) == 0) { /* count disable, so not happened next event. */ - return ; + return; } next_time = cmt->cmcor[ch] - cmt->cmcnt[ch]; next_time *= NANOSECONDS_PER_SECOND; diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c index d96002e1ee..c15f654738 100644 --- a/hw/timer/renesas_tmr.c +++ b/hw/timer/renesas_tmr.c @@ -67,18 +67,18 @@ static void update_events(RTMRState *tmr, int ch) int i, event; if (tmr->tccr[ch] == 0) { - return ; + return; } if (FIELD_EX8(tmr->tccr[ch], TCCR, CSS) == 0) { /* external clock mode */ /* event not happened */ - return ; + return; } if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) == CSS_CASCADING) { /* cascading mode */ if (ch == 1) { tmr->next[ch] = none; - return ; + return; } diff[cmia] = concat_reg(tmr->tcora) - concat_reg(tmr->tcnt); diff[cmib] = concat_reg(tmr->tcorb) - concat_reg(tmr->tcnt); @@ -384,7 +384,7 @@ static void timer_events(RTMRState *tmr, int ch) tmr->tcorb[ch]) & 0xff; } else { if (ch == 1) { - return ; + return; } tcnt = issue_event(tmr, ch, 16, concat_reg(tmr->tcnt), diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c index 1ddf7297f6..ee41a81801 100644 --- a/hw/usb/ccid-card-emulated.c +++ b/hw/usb/ccid-card-emulated.c @@ -140,7 +140,7 @@ static void emulated_apdu_from_guest(CCIDCardState *base, const uint8_t *apdu, uint32_t len) { EmulatedState *card = EMULATED_CCID_CARD(base); - EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent) + len); + EmulEvent *event = g_malloc(sizeof(EmulEvent) + len); assert(event); event->p.data.type = EMUL_GUEST_APDU; diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index e7d80242b7..34db51e241 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -1675,7 +1675,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" " neither legacy nor transitional device"); - return ; + return; } /* * Legacy and transitional devices use specific subsystem IDs. diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 16b7df41bf..2eb1176538 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -281,28 +281,18 @@ void page_reset_target_data(target_ulong start, target_ulong end); int page_check_range(target_ulong start, target_ulong len, int flags); /** - * page_alloc_target_data(address, size) + * page_get_target_data(address) * @address: guest virtual address - * @size: size of data to allocate * - * Allocate @size bytes of out-of-band data to associate with the - * guest page at @address. If the page is not mapped, NULL will - * be returned. If there is existing data associated with @address, - * no new memory will be allocated. + * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate + * with the guest page at @address, allocating it if necessary. The + * caller should already have verified that the address is valid. * * The memory will be freed when the guest page is deallocated, * e.g. with the munmap system call. */ -void *page_alloc_target_data(target_ulong address, size_t size); - -/** - * page_get_target_data(address) - * @address: guest virtual address - * - * Return any out-of-bound memory assocated with the guest page - * at @address, as per page_alloc_target_data. - */ -void *page_get_target_data(target_ulong address); +void *page_get_target_data(target_ulong address) + __attribute__((returns_nonnull)); #endif CPUArchState *cpu_copy(CPUArchState *env); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index e5f8b224a5..e948992a80 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -39,9 +39,6 @@ typedef ram_addr_t tb_page_addr_t; #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT #endif -void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, - target_ulong *data); - /** * cpu_restore_state: * @cpu: the vCPU state is to be restore to @@ -610,18 +607,40 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) return qatomic_read(&tb->cflags); } +static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) +{ + return tb->page_addr[0]; +} + +static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) +{ + return tb->page_addr[1]; +} + +static inline void tb_set_page_addr0(TranslationBlock *tb, + tb_page_addr_t addr) +{ + tb->page_addr[0] = addr; +} + +static inline void tb_set_page_addr1(TranslationBlock *tb, + tb_page_addr_t addr) +{ + tb->page_addr[1] = addr; +} + /* current cflags for hashing/comparison */ uint32_t curr_cflags(CPUState *cpu); /* TranslationBlock invalidate API */ #if defined(CONFIG_USER_ONLY) void tb_invalidate_phys_addr(target_ulong addr); -void tb_invalidate_phys_range(target_ulong start, target_ulong end); #else void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); #endif void tb_flush(CPUState *cpu); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); /* GETPC is the true target of the return instruction that we'll execute. */ @@ -642,14 +661,6 @@ extern __thread uintptr_t tci_tb_ptr; smaller than 4 bytes, so we don't worry about special-casing this. */ #define GETPC_ADJ 2 -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) -void assert_no_pages_locked(void); -#else -static inline void assert_no_pages_locked(void) -{ -} -#endif - #if !defined(CONFIG_USER_ONLY) /** diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index f3e0c78161..1500680458 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -147,8 +147,6 @@ static inline void qemu_ram_block_writeback(RAMBlock *block) #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) -void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end); - static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, ram_addr_t length, unsigned client) diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h index 9f646389af..3e9cb91565 100644 --- a/include/exec/translate-all.h +++ b/include/exec/translate-all.h @@ -29,7 +29,7 @@ void page_collection_unlock(struct page_collection *set); void tb_invalidate_phys_page_fast(struct page_collection *pages, tb_page_addr_t start, int len, uintptr_t retaddr); -void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end); +void tb_invalidate_phys_page(tb_page_addr_t addr); void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); #ifdef CONFIG_USER_ONLY diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 78c6c6635d..20e3c0ffbb 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -31,6 +31,17 @@ struct TCGCPUOps { * function to restore all the state, and register it here. */ void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); + /** + * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn + * + * This is called when we unwind state in the middle of a TB, + * usually before raising an exception. Set all part of the CPU + * state which are tracked insn-by-insn in the target-specific + * arguments to start_insn, passed as @data. + */ + void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, + const uint64_t *data); + /** @cpu_exec_enter: Callback for cpu_exec preparation */ void (*cpu_exec_enter)(CPUState *cpu); /** @cpu_exec_exit: Callback for cpu_exec cleanup */ diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index 7c3b1d0f6c..fbe0b1e956 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -117,7 +117,7 @@ static void glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab, shdr_table = load_at(fd, ehdr->e_shoff, sizeof(struct elf_shdr) * ehdr->e_shnum); if (!shdr_table) { - return ; + return; } if (must_swab) { diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h index 300a89b343..adc904d6c1 100644 --- a/include/hw/i2c/aspeed_i2c.h +++ b/include/hw/i2c/aspeed_i2c.h @@ -130,6 +130,7 @@ REG32(I2CD_CMD, 0x14) /* I2CD Command/Status */ SHARED_FIELD(M_TX_CMD, 1, 1) SHARED_FIELD(M_START_CMD, 0, 1) REG32(I2CD_DEV_ADDR, 0x18) /* Slave Device Address */ + SHARED_FIELD(SLAVE_DEV_ADDR1, 0, 7) REG32(I2CD_POOL_CTRL, 0x1C) /* Pool Buffer Control */ SHARED_FIELD(RX_COUNT, 24, 5) SHARED_FIELD(RX_SIZE, 16, 5) diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index 3b1b3d278e..6ea4b64fe7 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -188,7 +188,6 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, const char *serial, Error **errp); void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense); void scsi_bus_legacy_handle_cmdline(SCSIBus *bus); -void scsi_legacy_handle_cmdline(void); SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, uint32_t tag, uint32_t lun, void *hba_private); diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h index 2d5f8f3d8f..8e1dda556b 100644 --- a/include/hw/ssi/aspeed_smc.h +++ b/include/hw/ssi/aspeed_smc.h @@ -30,6 +30,7 @@ #include "qom/object.h" struct AspeedSMCState; +struct AspeedSMCClass; #define TYPE_ASPEED_SMC_FLASH "aspeed.smc.flash" OBJECT_DECLARE_SIMPLE_TYPE(AspeedSMCFlash, ASPEED_SMC_FLASH) @@ -37,6 +38,7 @@ struct AspeedSMCFlash { SysBusDevice parent_obj; struct AspeedSMCState *controller; + struct AspeedSMCClass *asc; uint8_t cs; MemoryRegion mmio; diff --git a/include/hw/ssi/ssi.h b/include/hw/ssi/ssi.h index f411858ab0..6950f86810 100644 --- a/include/hw/ssi/ssi.h +++ b/include/hw/ssi/ssi.h @@ -59,6 +59,9 @@ struct SSIPeripheralClass { struct SSIPeripheral { DeviceState parent_obj; + /* cache the class */ + SSIPeripheralClass *spc; + /* Chip select state */ bool cs; }; diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 7e8fc8e7cd..874134fd19 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -133,7 +133,7 @@ #define qatomic_read(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_read__nocheck(ptr); \ }) @@ -141,7 +141,7 @@ __atomic_store_n(ptr, i, __ATOMIC_RELAXED) #define qatomic_set(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_set__nocheck(ptr, i); \ } while(0) @@ -159,27 +159,27 @@ #define qatomic_rcu_read(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ typeof_strip_qual(*ptr) _val; \ qatomic_rcu_read__nocheck(ptr, &_val); \ _val; \ }) #define qatomic_rcu_set(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) #define qatomic_load_acquire(ptr) \ ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ typeof_strip_qual(*ptr) _val; \ __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \ _val; \ }) #define qatomic_store_release(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) @@ -191,7 +191,7 @@ }) #define qatomic_xchg(ptr, i) ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_xchg__nocheck(ptr, i); \ }) @@ -204,7 +204,7 @@ }) #define qatomic_cmpxchg(ptr, old, new) ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ qatomic_cmpxchg__nocheck(ptr, old, new); \ }) diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index b1c161c035..2276094729 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -186,6 +186,14 @@ void QEMU_ERROR("code path is reachable") #define qemu_build_not_reached() g_assert_not_reached() #endif +/** + * qemu_build_assert() + * + * The compiler, during optimization, is expected to prove that the + * assertion is true. + */ +#define qemu_build_assert(test) while (!(test)) qemu_build_not_reached() + /* * According to waitpid man page: * WCOREDUMP diff --git a/include/qemu/thread.h b/include/qemu/thread.h index af19f2b3fc..20641e5844 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -227,7 +227,7 @@ struct QemuSpin { static inline void qemu_spin_init(QemuSpin *spin) { - __sync_lock_release(&spin->value); + qatomic_set(&spin->value, 0); #ifdef CONFIG_TSAN __tsan_mutex_create(spin, __tsan_mutex_not_static); #endif @@ -246,7 +246,7 @@ static inline void qemu_spin_lock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_lock(spin, 0); #endif - while (unlikely(__sync_lock_test_and_set(&spin->value, true))) { + while (unlikely(qatomic_xchg(&spin->value, 1))) { while (qatomic_read(&spin->value)) { cpu_relax(); } @@ -261,7 +261,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock); #endif - bool busy = __sync_lock_test_and_set(&spin->value, true); + bool busy = qatomic_xchg(&spin->value, true); #ifdef CONFIG_TSAN unsigned flags = __tsan_mutex_try_lock; flags |= busy ? __tsan_mutex_try_lock_failed : 0; @@ -280,7 +280,7 @@ static inline void qemu_spin_unlock(QemuSpin *spin) #ifdef CONFIG_TSAN __tsan_mutex_pre_unlock(spin, 0); #endif - __sync_lock_release(&spin->value); + qatomic_store_release(&spin->value, 0); #ifdef CONFIG_TSAN __tsan_mutex_post_unlock(spin, 0); #endif diff --git a/linux-user/cpu_loop-common.h b/linux-user/cpu_loop-common.h index 36ff5b14f2..e644d2ef90 100644 --- a/linux-user/cpu_loop-common.h +++ b/linux-user/cpu_loop-common.h @@ -23,18 +23,9 @@ #include "exec/log.h" #include "special-errno.h" -#define EXCP_DUMP(env, fmt, ...) \ -do { \ - CPUState *cs = env_cpu(env); \ - fprintf(stderr, fmt , ## __VA_ARGS__); \ - fprintf(stderr, "Failing executable: %s\n", exec_path); \ - cpu_dump_state(cs, stderr, 0); \ - if (qemu_log_separate()) { \ - qemu_log(fmt, ## __VA_ARGS__); \ - qemu_log("Failing executable: %s\n", exec_path); \ - log_cpu_state(cs, 0); \ - } \ -} while (0) +void target_exception_dump(CPUArchState *env, const char *fmt, int code); +#define EXCP_DUMP(env, fmt, code) \ + target_exception_dump(env, fmt, code) void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs); #endif diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c index 42837399bc..865413c08f 100644 --- a/linux-user/i386/cpu_loop.c +++ b/linux-user/i386/cpu_loop.c @@ -201,7 +201,6 @@ void cpu_loop(CPUX86State *env) { CPUState *cs = env_cpu(env); int trapnr; - abi_ulong pc; abi_ulong ret; for(;;) { @@ -307,9 +306,8 @@ void cpu_loop(CPUX86State *env) cpu_exec_step_atomic(cs); break; default: - pc = env->segs[R_CS].base + env->eip; - EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n", - (long)pc, trapnr); + EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); abort(); } process_pending_signals(env); diff --git a/linux-user/ioctls.h b/linux-user/ioctls.h index f182d40190..071f7ca253 100644 --- a/linux-user/ioctls.h +++ b/linux-user/ioctls.h @@ -96,9 +96,7 @@ IOCTL(BLKROGET, IOC_R, MK_PTR(TYPE_INT)) IOCTL(BLKRRPART, 0, TYPE_NULL) IOCTL(BLKGETSIZE, IOC_R, MK_PTR(TYPE_ULONG)) -#ifdef BLKGETSIZE64 IOCTL(BLKGETSIZE64, IOC_R, MK_PTR(TYPE_ULONGLONG)) -#endif IOCTL(BLKFLSBUF, 0, TYPE_NULL) IOCTL(BLKRASET, 0, TYPE_INT) IOCTL(BLKRAGET, IOC_R, MK_PTR(TYPE_LONG)) @@ -107,33 +105,15 @@ IOCTL_SPECIAL(BLKPG, IOC_W, do_ioctl_blkpg, MK_PTR(MK_STRUCT(STRUCT_blkpg_ioctl_arg))) -#ifdef BLKDISCARD IOCTL(BLKDISCARD, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2))) -#endif -#ifdef BLKIOMIN IOCTL(BLKIOMIN, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKIOOPT IOCTL(BLKIOOPT, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKALIGNOFF IOCTL(BLKALIGNOFF, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKPBSZGET IOCTL(BLKPBSZGET, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKDISCARDZEROES IOCTL(BLKDISCARDZEROES, IOC_R, MK_PTR(TYPE_INT)) -#endif -#ifdef BLKSECDISCARD IOCTL(BLKSECDISCARD, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2))) -#endif -#ifdef BLKROTATIONAL IOCTL(BLKROTATIONAL, IOC_R, MK_PTR(TYPE_SHORT)) -#endif -#ifdef BLKZEROOUT IOCTL(BLKZEROOUT, IOC_W, MK_PTR(MK_ARRAY(TYPE_ULONGLONG, 2))) -#endif IOCTL(FDMSGON, 0, TYPE_NULL) IOCTL(FDMSGOFF, 0, TYPE_NULL) @@ -149,17 +129,13 @@ IOCTL(FDTWADDLE, 0, TYPE_NULL) IOCTL(FDEJECT, 0, TYPE_NULL) -#ifdef FIBMAP IOCTL(FIBMAP, IOC_W | IOC_R, MK_PTR(TYPE_LONG)) -#endif #ifdef FICLONE IOCTL(FICLONE, IOC_W, TYPE_INT) IOCTL(FICLONERANGE, IOC_W, MK_PTR(MK_STRUCT(STRUCT_file_clone_range))) #endif -#ifdef FIGETBSZ IOCTL(FIGETBSZ, IOC_R, MK_PTR(TYPE_LONG)) -#endif #ifdef CONFIG_FIEMAP IOCTL_SPECIAL(FS_IOC_FIEMAP, IOC_W | IOC_R, do_ioctl_fs_ioc_fiemap, MK_PTR(MK_STRUCT(STRUCT_fiemap))) diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 28f3bc85ed..10f5079331 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -182,7 +182,6 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } page_set_flags(start, start + len, page_flags); - tb_invalidate_phys_range(start, start + len); ret = 0; error: @@ -662,7 +661,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, qemu_log_unlock(f); } } - tb_invalidate_phys_range(start, start + len); mmap_unlock(); return start; fail: @@ -766,7 +764,6 @@ int target_munmap(abi_ulong start, abi_ulong len) if (ret == 0) { page_set_flags(start, start + len, 0); - tb_invalidate_phys_range(start, start + len); } mmap_unlock(); return ret; @@ -856,7 +853,6 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID | PAGE_RESET); } - tb_invalidate_phys_range(new_addr, new_addr + new_size); mmap_unlock(); return new_addr; } diff --git a/linux-user/strace.c b/linux-user/strace.c index 37bc96df9b..9ae5a812cd 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -1969,7 +1969,7 @@ print_execv(CPUArchState *cpu_env, const struct syscallname *name, } #endif -#ifdef TARGET_NR_faccessat +#if defined(TARGET_NR_faccessat) || defined(TARGET_NR_faccessat2) static void print_faccessat(CPUArchState *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, @@ -3383,10 +3383,10 @@ print_pidfd_send_signal(CPUArchState *cpu_env, const struct syscallname *name, unlock_user(p, arg2, 0); } else { - print_pointer(arg2, 1); + print_pointer(arg2, 0); } - print_raw_param("%u", arg3, 0); + print_raw_param("%u", arg3, 1); print_syscall_epilogue(name); } #endif diff --git a/linux-user/strace.list b/linux-user/strace.list index a87415bf3d..3df2184580 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -178,6 +178,9 @@ #ifdef TARGET_NR_faccessat { TARGET_NR_faccessat, "faccessat" , NULL, print_faccessat, NULL }, #endif +#ifdef TARGET_NR_faccessat2 +{ TARGET_NR_faccessat2, "faccessat2" , NULL, print_faccessat, NULL }, +#endif #ifdef TARGET_NR_fadvise64 { TARGET_NR_fadvise64, "fadvise64" , NULL, NULL, NULL }, #endif diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 2e954d8dbd..8402c1399d 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -111,6 +111,31 @@ #define FS_IOC32_SETFLAGS _IOW('f', 2, int) #define FS_IOC32_GETVERSION _IOR('v', 1, int) #define FS_IOC32_SETVERSION _IOW('v', 2, int) + +#define BLKGETSIZE64 _IOR(0x12,114,size_t) +#define BLKDISCARD _IO(0x12,119) +#define BLKIOMIN _IO(0x12,120) +#define BLKIOOPT _IO(0x12,121) +#define BLKALIGNOFF _IO(0x12,122) +#define BLKPBSZGET _IO(0x12,123) +#define BLKDISCARDZEROES _IO(0x12,124) +#define BLKSECDISCARD _IO(0x12,125) +#define BLKROTATIONAL _IO(0x12,126) +#define BLKZEROOUT _IO(0x12,127) + +#define FIBMAP _IO(0x00,1) +#define FIGETBSZ _IO(0x00,2) + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +#define FICLONE _IOW(0x94, 9, int) +#define FICLONERANGE _IOW(0x94, 13, struct file_clone_range) + #else #include <linux/fs.h> #endif @@ -158,6 +183,7 @@ #include "qapi/error.h" #include "fd-trans.h" #include "tcg/tcg.h" +#include "cpu_loop-common.h" #ifndef CLONE_IO #define CLONE_IO 0x80000000 /* Clone io context */ @@ -8144,6 +8170,33 @@ static int is_proc_myself(const char *filename, const char *entry) return 0; } +static void excp_dump_file(FILE *logfile, CPUArchState *env, + const char *fmt, int code) +{ + if (logfile) { + CPUState *cs = env_cpu(env); + + fprintf(logfile, fmt, code); + fprintf(logfile, "Failing executable: %s\n", exec_path); + cpu_dump_state(cs, logfile, 0); + open_self_maps(env, fileno(logfile)); + } +} + +void target_exception_dump(CPUArchState *env, const char *fmt, int code) +{ + /* dump to console */ + excp_dump_file(stderr, env, fmt, code); + + /* dump to log file */ + if (qemu_log_separate()) { + FILE *logfile = qemu_log_trylock(); + + excp_dump_file(logfile, env, fmt, code); + qemu_log_unlock(logfile); + } +} + #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \ defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) static int is_proc(const char *filename, const char *entry) @@ -8251,8 +8304,7 @@ static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int }; if (is_proc_myself(pathname, "exe")) { - int execfd = qemu_getauxval(AT_EXECFD); - return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); + return safe_openat(dirfd, exec_path, flags, mode); } for (fake_open = fakes; fake_open->filename; fake_open++) { @@ -8679,16 +8731,21 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) case TARGET_NR_pidfd_send_signal: { - siginfo_t uinfo; + siginfo_t uinfo, *puinfo; - p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); - if (!p) { - return -TARGET_EFAULT; + if (arg3) { + p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); + if (!p) { + return -TARGET_EFAULT; + } + target_to_host_siginfo(&uinfo, p); + unlock_user(p, arg3, 0); + puinfo = &uinfo; + } else { + puinfo = NULL; } - target_to_host_siginfo(&uinfo, p); - unlock_user(p, arg3, 0); ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2), - &uinfo, arg4)); + puinfo, arg4)); } return ret; #endif @@ -8855,7 +8912,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, * before the execve completes and makes it the other * program's problem. */ - ret = get_errno(safe_execve(p, argp, envp)); + if (is_proc_myself(p, "exe")) { + ret = get_errno(safe_execve(exec_path, argp, envp)); + } else { + ret = get_errno(safe_execve(p, argp, envp)); + } unlock_user(p, arg1, 0); goto execve_end; @@ -9110,6 +9171,15 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, unlock_user(p, arg2, 0); return ret; #endif +#if defined(TARGET_NR_faccessat2) + case TARGET_NR_faccessat2: + if (!(p = lock_user_string(arg2))) { + return -TARGET_EFAULT; + } + ret = get_errno(faccessat(arg1, p, arg3, arg4)); + unlock_user(p, arg2, 0); + return ret; +#endif #ifdef TARGET_NR_nice /* not on alpha */ case TARGET_NR_nice: return get_errno(nice(arg1)); @@ -11793,7 +11863,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return -host_to_target_errno(ret); #endif -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) #ifdef TARGET_NR_fadvise64_64 case TARGET_NR_fadvise64_64: @@ -11920,7 +11990,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return get_errno(sys_gettid()); #ifdef TARGET_NR_readahead case TARGET_NR_readahead: -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) if (regpairs_aligned(cpu_env, num)) { arg2 = arg3; arg3 = arg4; @@ -12612,7 +12682,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #endif /* CONFIG_EVENTFD */ #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) case TARGET_NR_fallocate: -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), target_offset64(arg5, arg6))); #else @@ -12623,7 +12693,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #if defined(CONFIG_SYNC_FILE_RANGE) #if defined(TARGET_NR_sync_file_range) case TARGET_NR_sync_file_range: -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) #if defined(TARGET_MIPS) ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg7)); @@ -12645,7 +12715,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, case TARGET_NR_arm_sync_file_range: #endif /* This is like sync_file_range but the arguments are reordered */ -#if TARGET_ABI_BITS == 32 +#if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg2)); #else diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py index b369388360..dfeee8231a 100755 --- a/scripts/vmstate-static-checker.py +++ b/scripts/vmstate-static-checker.py @@ -367,7 +367,6 @@ def check_machine_type(s, d): if s["Name"] != d["Name"]: print("Warning: checking incompatible machine types:", end=' ') print("\"" + s["Name"] + "\", \"" + d["Name"] + "\"") - return def main(): diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 979a629d59..270ae787b1 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -40,6 +40,14 @@ static vaddr alpha_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static void alpha_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + + cpu->env.pc = data[0]; +} static bool alpha_cpu_has_work(CPUState *cs) { @@ -226,6 +234,7 @@ static const struct SysemuCPUOps alpha_sysemu_ops = { static const struct TCGCPUOps alpha_tcg_ops = { .initialize = alpha_translate_init, + .restore_state_to_opc = alpha_restore_state_to_opc, #ifdef CONFIG_USER_ONLY .record_sigsegv = alpha_cpu_record_sigsegv, diff --git a/target/alpha/translate.c b/target/alpha/translate.c index 6766350f56..f9bcdeb717 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -3049,9 +3049,3 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); } - -void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 0bc5e9b125..0a7bfbf999 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -90,6 +90,31 @@ void arm_cpu_synchronize_from_tb(CPUState *cs, } } } + +static void arm_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + CPUARMState *env = cs->env_ptr; + + if (is_a64(env)) { + if (TARGET_TB_PCREL) { + env->pc = (env->pc & TARGET_PAGE_MASK) | data[0]; + } else { + env->pc = data[0]; + } + env->condexec_bits = 0; + env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; + } else { + if (TARGET_TB_PCREL) { + env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0]; + } else { + env->regs[15] = data[0]; + } + env->condexec_bits = data[1]; + env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; + } +} #endif /* CONFIG_TCG */ static bool arm_cpu_has_work(CPUState *cs) @@ -2152,6 +2177,7 @@ static const struct TCGCPUOps arm_tcg_ops = { .initialize = arm_translate_init, .synchronize_from_tb = arm_cpu_synchronize_from_tb, .debug_excp_handler = arm_debug_excp_handler, + .restore_state_to_opc = arm_restore_state_to_opc, #ifdef CONFIG_USER_ONLY .record_sigsegv = arm_cpu_record_sigsegv, diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 64fc03214c..db9ec6a038 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3410,6 +3410,14 @@ extern const uint64_t pred_esz_masks[5]; #define PAGE_MTE PAGE_TARGET_2 #define PAGE_TARGET_STICKY PAGE_MTE +/* We associate one allocation tag per 16 bytes, the minimum. */ +#define LOG2_TAG_GRANULE 4 +#define TAG_GRANULE (1 << LOG2_TAG_GRANULE) + +#ifdef CONFIG_USER_ONLY +#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1)) +#endif + #ifdef TARGET_TAGGED_ADDRESSES /** * cpu_untagged_addr: diff --git a/target/arm/internals.h b/target/arm/internals.h index c3c3920ded..b26c9ca17b 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1164,10 +1164,6 @@ void arm_log_exception(CPUState *cs); */ #define GMID_EL1_BS 6 -/* We associate one allocation tag per 16 bytes, the minimum. */ -#define LOG2_TAG_GRANULE 4 -#define TAG_GRANULE (1 << LOG2_TAG_GRANULE) - /* * SVE predicates are 1/8 the size of SVE vectors, and cannot use * the same simd_desc() encoding due to restrictions on size. diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c index e85208339e..86b3754838 100644 --- a/target/arm/mte_helper.c +++ b/target/arm/mte_helper.c @@ -95,11 +95,6 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, } tags = page_get_target_data(clean_ptr); - if (tags == NULL) { - size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1); - tags = page_alloc_target_data(clean_ptr, alloc_size); - assert(tags != NULL); - } index = extract32(ptr, LOG2_TAG_GRANULE + 1, TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1); diff --git a/target/arm/translate.c b/target/arm/translate.c index d1b868430e..74a903072f 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -9939,25 +9939,3 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base); } - -void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, - target_ulong *data) -{ - if (is_a64(env)) { - if (TARGET_TB_PCREL) { - env->pc = (env->pc & TARGET_PAGE_MASK) | data[0]; - } else { - env->pc = data[0]; - } - env->condexec_bits = 0; - env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; - } else { - if (TARGET_TB_PCREL) { - env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0]; - } else { - env->regs[15] = data[0]; - } - env->condexec_bits = data[1]; - env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; - } -} diff --git a/target/avr/cpu.c b/target/avr/cpu.c index 0d2861179d..c7295b488d 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -57,6 +57,16 @@ static void avr_cpu_synchronize_from_tb(CPUState *cs, env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */ } +static void avr_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + AVRCPU *cpu = AVR_CPU(cs); + CPUAVRState *env = &cpu->env; + + env->pc_w = data[0]; +} + static void avr_cpu_reset(DeviceState *ds) { CPUState *cs = CPU(ds); @@ -202,6 +212,7 @@ static const struct SysemuCPUOps avr_sysemu_ops = { static const struct TCGCPUOps avr_tcg_ops = { .initialize = avr_cpu_tcg_init, .synchronize_from_tb = avr_cpu_synchronize_from_tb, + .restore_state_to_opc = avr_restore_state_to_opc, .cpu_exec_interrupt = avr_cpu_exec_interrupt, .tlb_fill = avr_cpu_tlb_fill, .do_interrupt = avr_cpu_do_interrupt, diff --git a/target/avr/translate.c b/target/avr/translate.c index e65b6008c0..2bed56f135 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -3055,9 +3055,3 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, DisasContext dc = { }; translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base); } - -void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc_w = data[0]; -} diff --git a/target/cris/cpu.c b/target/cris/cpu.c index 22f5c70f39..fb05dc6f9a 100644 --- a/target/cris/cpu.c +++ b/target/cris/cpu.c @@ -42,6 +42,15 @@ static vaddr cris_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static void cris_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + CRISCPU *cpu = CRIS_CPU(cs); + + cpu->env.pc = data[0]; +} + static bool cris_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); @@ -212,6 +221,7 @@ static const struct SysemuCPUOps cris_sysemu_ops = { static const struct TCGCPUOps crisv10_tcg_ops = { .initialize = cris_initialize_crisv10_tcg, + .restore_state_to_opc = cris_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = cris_cpu_tlb_fill, @@ -222,6 +232,7 @@ static const struct TCGCPUOps crisv10_tcg_ops = { static const struct TCGCPUOps crisv32_tcg_ops = { .initialize = cris_initialize_tcg, + .restore_state_to_opc = cris_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = cris_cpu_tlb_fill, diff --git a/target/cris/translate.c b/target/cris/translate.c index 73385b0b3c..fbc3fd5865 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -3392,9 +3392,3 @@ void cris_initialize_tcg(void) pregnames_v32[i]); } } - -void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index fa6d722555..03221fbdc2 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -271,9 +271,13 @@ static bool hexagon_cpu_has_work(CPUState *cs) return true; } -void restore_state_to_opc(CPUHexagonState *env, TranslationBlock *tb, - target_ulong *data) +static void hexagon_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + HexagonCPU *cpu = HEXAGON_CPU(cs); + CPUHexagonState *env = &cpu->env; + env->gpr[HEX_REG_PC] = data[0]; } @@ -327,6 +331,7 @@ static void hexagon_cpu_init(Object *obj) static const struct TCGCPUOps hexagon_tcg_ops = { .initialize = hexagon_translate_init, .synchronize_from_tb = hexagon_cpu_synchronize_from_tb, + .restore_state_to_opc = hexagon_restore_state_to_opc, }; static void hexagon_cpu_class_init(ObjectClass *c, void *data) diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index e677ca09d4..55c190280e 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -68,6 +68,24 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, cpu->env.psw_n = (tb->flags & PSW_N) != 0; } +static void hppa_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + HPPACPU *cpu = HPPA_CPU(cs); + + cpu->env.iaoq_f = data[0]; + if (data[1] != (target_ureg)-1) { + cpu->env.iaoq_b = data[1]; + } + /* + * Since we were executing the instruction at IAOQ_F, and took some + * sort of action that provoked the cpu_restore_state, we can infer + * that the instruction was not nullified. + */ + cpu->env.psw_n = 0; +} + static bool hppa_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); @@ -153,6 +171,7 @@ static const struct SysemuCPUOps hppa_sysemu_ops = { static const struct TCGCPUOps hppa_tcg_ops = { .initialize = hppa_translate_init, .synchronize_from_tb = hppa_cpu_synchronize_from_tb, + .restore_state_to_opc = hppa_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = hppa_cpu_tlb_fill, diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 8b861957e0..1af77473da 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -4346,16 +4346,3 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, DisasContext ctx; translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); } - -void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->iaoq_f = data[0]; - if (data[1] != (target_ureg)-1) { - env->iaoq_b = data[1]; - } - /* Since we were executing the instruction at IAOQ_F, and took some - sort of action that provoked the cpu_restore_state, we can infer - that the instruction was not nullified. */ - env->psw_n = 0; -} diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index dac100c67c..4df0428089 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -2262,8 +2262,7 @@ static int kvm_get_supported_feature_msrs(KVMState *s) } assert(msr_list.nmsrs > 0); - kvm_feature_msrs = (struct kvm_msr_list *) \ - g_malloc0(sizeof(msr_list) + + kvm_feature_msrs = g_malloc0(sizeof(msr_list) + msr_list.nmsrs * sizeof(msr_list.indices[0])); kvm_feature_msrs->nmsrs = msr_list.nmsrs; diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 828244abe2..79ac5908f7 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -56,6 +56,24 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, } } +static void x86_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + int cc_op = data[1]; + + if (TARGET_TB_PCREL) { + env->eip = (env->eip & TARGET_PAGE_MASK) | data[0]; + } else { + env->eip = data[0] - tb->cs_base; + } + if (cc_op != CC_OP_DYNAMIC) { + env->cc_op = cc_op; + } +} + #ifndef CONFIG_USER_ONLY static bool x86_debug_check_breakpoint(CPUState *cs) { @@ -72,6 +90,7 @@ static bool x86_debug_check_breakpoint(CPUState *cs) static const struct TCGCPUOps x86_tcg_ops = { .initialize = tcg_x86_init, .synchronize_from_tb = x86_cpu_synchronize_from_tb, + .restore_state_to_opc = x86_restore_state_to_opc, .cpu_exec_enter = x86_cpu_exec_enter, .cpu_exec_exit = x86_cpu_exec_exit, #ifdef CONFIG_USER_ONLY diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 85be2e58c2..546c427c23 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -7023,18 +7023,3 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns, translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base); } - -void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, - target_ulong *data) -{ - int cc_op = data[1]; - - if (TARGET_TB_PCREL) { - env->eip = (env->eip & TARGET_PAGE_MASK) | data[0]; - } else { - env->eip = data[0] - tb->cs_base; - } - if (cc_op != CC_OP_DYNAMIC) { - env->cc_op = cc_op; - } -} diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 8e4969edeb..e738d83e81 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -1164,9 +1164,8 @@ static void whpx_translate_cpu_breakpoints( (breakpoints->breakpoints ? breakpoints->breakpoints->used : 0); struct whpx_breakpoint_collection *new_breakpoints = - (struct whpx_breakpoint_collection *)g_malloc0( - sizeof(struct whpx_breakpoint_collection) + - max_breakpoints * sizeof(struct whpx_breakpoint)); + g_malloc0(sizeof(struct whpx_breakpoint_collection) + + max_breakpoints * sizeof(struct whpx_breakpoint)); new_breakpoints->allocated = max_breakpoints; new_breakpoints->used = 0; diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index 1722ed2a4d..49393d95d8 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -319,6 +319,16 @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs, env->pc = tb_pc(tb); } + +static void loongarch_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + env->pc = data[0]; +} #endif /* CONFIG_TCG */ static bool loongarch_cpu_has_work(CPUState *cs) @@ -651,6 +661,7 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) static struct TCGCPUOps loongarch_tcg_ops = { .initialize = loongarch_translate_init, .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, + .restore_state_to_opc = loongarch_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = loongarch_cpu_tlb_fill, diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c index 95b37ea180..6091772349 100644 --- a/target/loongarch/translate.c +++ b/target/loongarch/translate.c @@ -272,9 +272,3 @@ void loongarch_translate_init(void) cpu_llval = tcg_global_mem_new(cpu_env, offsetof(CPULoongArchState, llval), "llval"); } - -void restore_state_to_opc(CPULoongArchState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 1e902e1ef0..b67ddea2ae 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -38,6 +38,19 @@ static vaddr m68k_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static void m68k_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + M68kCPU *cpu = M68K_CPU(cs); + int cc_op = data[1]; + + cpu->env.pc = data[0]; + if (cc_op != CC_OP_DYNAMIC) { + cpu->env.cc_op = cc_op; + } +} + static bool m68k_cpu_has_work(CPUState *cs) { return cs->interrupt_request & CPU_INTERRUPT_HARD; @@ -524,6 +537,7 @@ static const struct SysemuCPUOps m68k_sysemu_ops = { static const struct TCGCPUOps m68k_tcg_ops = { .initialize = m68k_tcg_init, + .restore_state_to_opc = m68k_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = m68k_cpu_tlb_fill, diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 9df17aa4b2..5cbde4be34 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -6479,13 +6479,3 @@ void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags) env->mmu.mmusr, env->mmu.ar); #endif } - -void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, - target_ulong *data) -{ - int cc_op = data[1]; - env->pc = data[0]; - if (cc_op != CC_OP_DYNAMIC) { - env->cc_op = cc_op; - } -} diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index c10b8ac029..89e493f3ff 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -100,6 +100,16 @@ static void mb_cpu_synchronize_from_tb(CPUState *cs, cpu->env.iflags = tb->flags & IFLAGS_TB_MASK; } +static void mb_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); + + cpu->env.pc = data[0]; + cpu->env.iflags = data[1]; +} + static bool mb_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); @@ -373,6 +383,7 @@ static const struct SysemuCPUOps mb_sysemu_ops = { static const struct TCGCPUOps mb_tcg_ops = { .initialize = mb_tcg_init, .synchronize_from_tb = mb_cpu_synchronize_from_tb, + .restore_state_to_opc = mb_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = mb_cpu_tlb_fill, diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index c5546f93aa..974f21eb31 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -1946,10 +1946,3 @@ void mb_tcg_init(void) cpu_res_addr = tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr"); } - -void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; - env->iflags = data[1]; -} diff --git a/target/mips/cpu.c b/target/mips/cpu.c index da58eb8892..e997c1b9cb 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -538,6 +538,7 @@ static const struct SysemuCPUOps mips_sysemu_ops = { static const struct TCGCPUOps mips_tcg_ops = { .initialize = mips_tcg_init, .synchronize_from_tb = mips_cpu_synchronize_from_tb, + .restore_state_to_opc = mips_restore_state_to_opc, #if !defined(CONFIG_USER_ONLY) .tlb_fill = mips_cpu_tlb_fill, diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h index 1d27fa2ff9..aef032c48d 100644 --- a/target/mips/tcg/tcg-internal.h +++ b/target/mips/tcg/tcg-internal.h @@ -21,6 +21,9 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); +void mips_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); const char *mips_exception_name(int32_t exception); diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index c3f92ea652..2f2d707a12 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -16229,9 +16229,13 @@ void mips_tcg_init(void) } } -void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, - target_ulong *data) +void mips_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + env->active_tc.PC = data[0]; env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= data[1]; diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c index 2b28429c08..9a5351bc81 100644 --- a/target/nios2/cpu.c +++ b/target/nios2/cpu.c @@ -42,6 +42,16 @@ static vaddr nios2_cpu_get_pc(CPUState *cs) return env->pc; } +static void nios2_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + env->pc = data[0]; +} + static bool nios2_cpu_has_work(CPUState *cs) { return cs->interrupt_request & CPU_INTERRUPT_HARD; @@ -346,6 +356,7 @@ static const struct SysemuCPUOps nios2_sysemu_ops = { static const struct TCGCPUOps nios2_tcg_ops = { .initialize = nios2_tcg_init, + .restore_state_to_opc = nios2_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = nios2_cpu_tlb_fill, diff --git a/target/nios2/translate.c b/target/nios2/translate.c index 8dc0a32c6c..4db8b47744 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -1110,9 +1110,3 @@ void nios2_tcg_init(void) cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPUNios2State, pc), "pc"); } - -void restore_state_to_opc(CPUNios2State *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index f6fd437785..de0176cd20 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -46,6 +46,18 @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs, cpu->env.pc = tb_pc(tb); } +static void openrisc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + OpenRISCCPU *cpu = OPENRISC_CPU(cs); + + cpu->env.pc = data[0]; + cpu->env.dflag = data[1] & 1; + if (data[1] & 2) { + cpu->env.ppc = cpu->env.pc - 4; + } +} static bool openrisc_cpu_has_work(CPUState *cs) { @@ -203,6 +215,7 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = { static const struct TCGCPUOps openrisc_tcg_ops = { .initialize = openrisc_translate_init, .synchronize_from_tb = openrisc_cpu_synchronize_from_tb, + .restore_state_to_opc = openrisc_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = openrisc_cpu_tlb_fill, diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index 8154f9d744..2f3d7c5fd1 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -1726,13 +1726,3 @@ void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags) (i % 4) == 3 ? '\n' : ' '); } } - -void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; - env->dflag = data[1] & 1; - if (data[1] & 2) { - env->ppc = env->pc - 4; - } -} diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 763a8431be..335351c226 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -7221,6 +7221,15 @@ static vaddr ppc_cpu_get_pc(CPUState *cs) return cpu->env.nip; } +static void ppc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + + cpu->env.nip = data[0]; +} + static bool ppc_cpu_has_work(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -7446,6 +7455,7 @@ static const struct SysemuCPUOps ppc_sysemu_ops = { static const struct TCGCPUOps ppc_tcg_ops = { .initialize = ppc_translate_init, + .restore_state_to_opc = ppc_restore_state_to_opc, #ifdef CONFIG_USER_ONLY .record_sigsegv = ppc_cpu_record_sigsegv, diff --git a/target/ppc/translate.c b/target/ppc/translate.c index e810842925..7228857e23 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -7739,9 +7739,3 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base); } - -void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->nip = data[0]; -} diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index e6d9c706bb..d14e95c9dc 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -503,10 +503,14 @@ static bool riscv_cpu_has_work(CPUState *cs) #endif } -void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb, - target_ulong *data) +static void riscv_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); + if (xl == MXL_RV32) { env->pc = (int32_t)data[0]; } else { @@ -1138,6 +1142,7 @@ static const struct SysemuCPUOps riscv_sysemu_ops = { static const struct TCGCPUOps riscv_tcg_ops = { .initialize = riscv_translate_init, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, + .restore_state_to_opc = riscv_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = riscv_cpu_tlb_fill, diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index b94f809eb3..0020b9a95d 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -211,7 +211,7 @@ static void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt, return; } if (tot - cnt == 0) { - return ; + return; } memset(base + cnt, -1, tot - cnt); } diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 2f28099723..9003c6e9fe 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -47,6 +47,15 @@ static void rx_cpu_synchronize_from_tb(CPUState *cs, cpu->env.pc = tb_pc(tb); } +static void rx_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + RXCPU *cpu = RX_CPU(cs); + + cpu->env.pc = data[0]; +} + static bool rx_cpu_has_work(CPUState *cs) { return cs->interrupt_request & @@ -192,6 +201,7 @@ static const struct SysemuCPUOps rx_sysemu_ops = { static const struct TCGCPUOps rx_tcg_ops = { .initialize = rx_translate_init, .synchronize_from_tb = rx_cpu_synchronize_from_tb, + .restore_state_to_opc = rx_restore_state_to_opc, .tlb_fill = rx_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c index 9ca32dcc82..acce650185 100644 --- a/target/rx/op_helper.c +++ b/target/rx/op_helper.c @@ -286,7 +286,7 @@ void helper_suntil(CPURXState *env, uint32_t sz) uint32_t tmp; tcg_debug_assert(sz < 3); if (env->regs[3] == 0) { - return ; + return; } do { tmp = cpu_ldufn[sz](env, env->regs[1], GETPC()); @@ -305,7 +305,7 @@ void helper_swhile(CPURXState *env, uint32_t sz) uint32_t tmp; tcg_debug_assert(sz < 3); if (env->regs[3] == 0) { - return ; + return; } do { tmp = cpu_ldufn[sz](env, env->regs[1], GETPC()); diff --git a/target/rx/translate.c b/target/rx/translate.c index ea5653bc95..87a3f54adb 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -2371,12 +2371,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base); } -void restore_state_to_opc(CPURXState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} - #define ALLOC_REGISTER(sym, name) \ cpu_##sym = tcg_global_mem_new_i32(cpu_env, \ offsetof(CPURXState, sym), name) diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index df00040e95..96562c516d 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -272,6 +272,7 @@ static void s390_cpu_reset_full(DeviceState *dev) static const struct TCGCPUOps s390_tcg_ops = { .initialize = s390x_translate_init, + .restore_state_to_opc = s390x_restore_state_to_opc, #ifdef CONFIG_USER_ONLY .record_sigsegv = s390_cpu_record_sigsegv, diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 04cae0b999..3ac7ec9acf 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -1035,7 +1035,7 @@ int kvm_arch_remove_hw_breakpoint(target_ulong addr, } size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); hw_breakpoints = - (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); + g_realloc(hw_breakpoints, size); } else { g_free(hw_breakpoints); hw_breakpoints = NULL; diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index b5ae0ae364..5d4361d35b 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -398,7 +398,9 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, /* translate.c */ void s390x_translate_init(void); - +void s390x_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); /* sigp.c */ int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3); diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 1d2dddab1c..5798928473 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -6691,9 +6691,12 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base); } -void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, - target_ulong *data) +void s390x_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; int cc_op = data[1]; env->psw.addr = data[0]; diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 56c50530da..453268392b 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -50,6 +50,21 @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, cpu->env.flags = tb->flags; } +static void superh_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + SuperHCPU *cpu = SUPERH_CPU(cs); + + cpu->env.pc = data[0]; + cpu->env.flags = data[1]; + /* + * Theoretically delayed_pc should also be restored. In practice the + * branch instruction is re-executed after exception, so the delayed + * branch target will be recomputed. + */ +} + #ifndef CONFIG_USER_ONLY static bool superh_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) @@ -243,6 +258,7 @@ static const struct SysemuCPUOps sh4_sysemu_ops = { static const struct TCGCPUOps superh_tcg_ops = { .initialize = sh4_translate_init, .synchronize_from_tb = superh_cpu_synchronize_from_tb, + .restore_state_to_opc = superh_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = superh_cpu_tlb_fill, diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 26231b2a5a..7db3468b01 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -2381,13 +2381,3 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base); } - -void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; - env->flags = data[1]; - /* Theoretically delayed_pc should also be restored. In practice the - branch instruction is re-executed after exception, so the delayed - branch target will be recomputed. */ -} diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 1f9ef7afd8..4c3d08a875 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -872,6 +872,7 @@ static const struct SysemuCPUOps sparc_sysemu_ops = { static const struct TCGCPUOps sparc_tcg_ops = { .initialize = sparc_tcg_init, .synchronize_from_tb = sparc_cpu_synchronize_from_tb, + .restore_state_to_opc = sparc_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = sparc_cpu_tlb_fill, diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index f80ea2e8cf..e478c5eb16 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -600,6 +600,9 @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr, /* translate.c */ void sparc_tcg_init(void); +void sparc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); /* cpu-exec.c */ diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 2cbbe2396a..34858eb95f 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -6011,9 +6011,12 @@ void sparc_tcg_init(void) } } -void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, - target_ulong *data) +void sparc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) { + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; target_ulong pc = data[0]; target_ulong npc = data[1]; diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index ab7a1e3a6d..2c54a2825f 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -58,6 +58,16 @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs, env->PC = tb_pc(tb); } +static void tricore_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = data[0]; +} + static void tricore_cpu_reset(DeviceState *dev) { CPUState *s = CPU(dev); @@ -161,6 +171,7 @@ static const struct SysemuCPUOps tricore_sysemu_ops = { static const struct TCGCPUOps tricore_tcg_ops = { .initialize = tricore_tcg_init, .synchronize_from_tb = tricore_cpu_synchronize_from_tb, + .restore_state_to_opc = tricore_restore_state_to_opc, .tlb_fill = tricore_cpu_tlb_fill, }; diff --git a/target/tricore/translate.c b/target/tricore/translate.c index a0558ead71..c5b7bfbf20 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -8886,12 +8886,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, &tricore_tr_ops, &ctx.base); } -void -restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->PC = data[0]; -} /* * * Initialization diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index cbbe0e84a2..09923301c4 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -51,6 +51,15 @@ static vaddr xtensa_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static void xtensa_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + XtensaCPU *cpu = XTENSA_CPU(cs); + + cpu->env.pc = data[0]; +} + static bool xtensa_cpu_has_work(CPUState *cs) { #ifndef CONFIG_USER_ONLY @@ -215,6 +224,7 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = { static const struct TCGCPUOps xtensa_tcg_ops = { .initialize = xtensa_translate_init, .debug_excp_handler = xtensa_breakpoint_handler, + .restore_state_to_opc = xtensa_restore_state_to_opc, #ifndef CONFIG_USER_ONLY .tlb_fill = xtensa_cpu_tlb_fill, diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index bdd4690a5c..77bcd71030 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -1355,12 +1355,6 @@ void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } -void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, - target_ulong *data) -{ - env->pc = data[0]; -} - static void translate_abs(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index d997f7922a..344b63e20f 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1916,24 +1916,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_goto_tb: - if (s->tb_jmp_insn_offset != NULL) { - /* TCG_TARGET_HAS_direct_jump */ - /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic - write can be used to patch the target address. */ - if ((uintptr_t)s->code_ptr & 7) { - tcg_out32(s, NOP); - } - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); - /* actual branch destination will be patched by - tb_target_set_jmp_target later. */ - tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0); - tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0); - } else { - /* !TCG_TARGET_HAS_direct_jump */ - tcg_debug_assert(s->tb_jmp_target_addr != NULL); - intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2; - tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP); + tcg_debug_assert(s->tb_jmp_insn_offset != NULL); + /* + * Ensure that ADRP+ADD are 8-byte aligned so that an atomic + * write can be used to patch the target address. + */ + if ((uintptr_t)s->code_ptr & 7) { + tcg_out32(s, NOP); } + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + /* + * actual branch destination will be patched by + * tb_target_set_jmp_target later + */ + tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0); + tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0); tcg_out_insn(s, 3207, BR, TCG_REG_TMP); set_jmp_reset_offset(s, a0); break; diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index a3debf6da7..d326e28740 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -1031,6 +1031,36 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) #endif } +/* LoongArch uses `andi zero, zero, 0` as NOP. */ +#define NOP OPC_ANDI +static void tcg_out_nop(TCGContext *s) +{ + tcg_out32(s, NOP); +} + +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, + uintptr_t jmp_rw, uintptr_t addr) +{ + tcg_insn_unit i1, i2; + ptrdiff_t upper, lower; + ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2; + + if (offset == sextreg(offset, 0, 26)) { + i1 = encode_sd10k16_insn(OPC_B, offset); + i2 = NOP; + } else { + tcg_debug_assert(offset == sextreg(offset, 0, 36)); + lower = (int16_t)offset; + upper = (offset - lower) >> 16; + + i1 = encode_dsj20_insn(OPC_PCADDU18I, TCG_REG_TMP0, upper); + i2 = encode_djsk16_insn(OPC_JIRL, TCG_REG_ZERO, TCG_REG_TMP0, lower); + } + uint64_t pair = ((uint64_t)i2 << 32) | i1; + qatomic_set((uint64_t *)jmp_rw, pair); + flush_idcache_range(jmp_rx, jmp_rw, 8); +} + /* * Entry-points */ @@ -1058,10 +1088,20 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_goto_tb: - assert(s->tb_jmp_insn_offset == 0); - /* indirect jump method */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, - (uintptr_t)(s->tb_jmp_target_addr + a0)); + tcg_debug_assert(s->tb_jmp_insn_offset != NULL); + /* + * Ensure that patch area is 8-byte aligned so that an + * atomic write can be used to patch the target address. + */ + if ((uintptr_t)s->code_ptr & 7) { + tcg_out_nop(s); + } + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + /* + * actual branch destination will be patched by + * tb_target_set_jmp_target later + */ + tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0); tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); set_jmp_reset_offset(s, a0); break; diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h index d58a6162f2..a659c8d6fd 100644 --- a/tcg/loongarch64/tcg-target.h +++ b/tcg/loongarch64/tcg-target.h @@ -42,7 +42,11 @@ #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_NB_REGS 32 -#define MAX_CODE_GEN_BUFFER_SIZE SIZE_MAX +/* + * PCADDU18I + JIRL sequence can give 20 + 16 + 2 = 38 bits + * signed offset, which is +/- 128 GiB. + */ +#define MAX_CODE_GEN_BUFFER_SIZE (128 * GiB) typedef enum { TCG_REG_ZERO, @@ -123,7 +127,7 @@ typedef enum { #define TCG_TARGET_HAS_clz_i32 1 #define TCG_TARGET_HAS_ctz_i32 1 #define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_direct_jump 0 +#define TCG_TARGET_HAS_direct_jump 1 #define TCG_TARGET_HAS_brcond2 0 #define TCG_TARGET_HAS_setcond2 0 #define TCG_TARGET_HAS_qemu_st8_i32 0 @@ -166,7 +170,6 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 -/* not defined -- call should be eliminated at compile time */ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); #define TCG_TARGET_DEFAULT_MO (0) diff --git a/tests/avocado/machine_aspeed.py b/tests/avocado/machine_aspeed.py index 124649a24b..fba6527026 100644 --- a/tests/avocado/machine_aspeed.py +++ b/tests/avocado/machine_aspeed.py @@ -92,7 +92,7 @@ class AST2x00Machine(QemuSystemTest): self.do_test_arm_aspeed(image_path) - def do_test_arm_aspeed_buidroot_start(self, image, cpu_id): + def do_test_arm_aspeed_buildroot_start(self, image, cpu_id): self.require_netdev('user') self.vm.set_console() @@ -111,11 +111,11 @@ class AST2x00Machine(QemuSystemTest): exec_command(self, 'root') time.sleep(0.1) - def do_test_arm_aspeed_buidroot_poweroff(self): + def do_test_arm_aspeed_buildroot_poweroff(self): exec_command_and_wait_for_pattern(self, 'poweroff', 'reboot: System halted'); - def test_arm_ast2500_evb_builroot(self): + def test_arm_ast2500_evb_buildroot(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast2500-evb @@ -129,7 +129,7 @@ class AST2x00Machine(QemuSystemTest): self.vm.add_args('-device', 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); - self.do_test_arm_aspeed_buidroot_start(image_path, '0x0') + self.do_test_arm_aspeed_buildroot_start(image_path, '0x0') exec_command_and_wait_for_pattern(self, 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', @@ -141,9 +141,9 @@ class AST2x00Machine(QemuSystemTest): exec_command_and_wait_for_pattern(self, 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') - self.do_test_arm_aspeed_buidroot_poweroff() + self.do_test_arm_aspeed_buildroot_poweroff() - def test_arm_ast2600_evb_builroot(self): + def test_arm_ast2600_evb_buildroot(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast2600-evb @@ -159,7 +159,7 @@ class AST2x00Machine(QemuSystemTest): 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); self.vm.add_args('-device', 'ds1338,bus=aspeed.i2c.bus.3,address=0x32'); - self.do_test_arm_aspeed_buidroot_start(image_path, '0xf00') + self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00') exec_command_and_wait_for_pattern(self, 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', @@ -177,7 +177,7 @@ class AST2x00Machine(QemuSystemTest): year = time.strftime("%Y") exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); - self.do_test_arm_aspeed_buidroot_poweroff() + self.do_test_arm_aspeed_buildroot_poweroff() class AST2x00MachineSDK(QemuSystemTest): diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index ef4427ff4d..aa1ba179fa 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -2481,8 +2481,8 @@ int main(int argc, char **argv) tmpfs = g_dir_make_tmp("migration-test-XXXXXX", &err); if (!tmpfs) { - g_test_message("g_dir_make_tmp on path (%s): %s", tmpfs, - err->message); + g_test_message("Can't create temporary directory in %s: %s", + g_get_tmp_dir(), err->message); } g_assert(tmpfs); diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c index e8d2da7228..bf9f7c4248 100644 --- a/tests/qtest/vhost-user-test.c +++ b/tests/qtest/vhost-user-test.c @@ -571,8 +571,8 @@ static TestServer *test_server_new(const gchar *name, tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err); if (!tmpfs) { - g_test_message("g_dir_make_tmp on path (%s): %s", tmpfs, - err->message); + g_test_message("Can't create temporary directory in %s: %s", + g_get_tmp_dir(), err->message); g_error_free(err); } g_assert(tmpfs); diff --git a/ui/console.c b/ui/console.c index 49da6a91df..65c117874c 100644 --- a/ui/console.c +++ b/ui/console.c @@ -1297,7 +1297,7 @@ static void kbd_send_chars(QemuConsole *s) uint32_t size; buf = fifo8_pop_buf(&s->out_fifo, MIN(len, avail), &size); - qemu_chr_be_write(s->chr, (uint8_t *)buf, size); + qemu_chr_be_write(s->chr, buf, size); len = qemu_chr_be_can_write(s->chr); avail -= size; } diff --git a/ui/gtk.c b/ui/gtk.c index 92daaa6a6e..7ec21f7798 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -1763,7 +1763,7 @@ static void gd_vc_send_chars(VirtualConsole *vc) uint32_t size; buf = fifo8_pop_buf(&vc->vte.out_fifo, MIN(len, avail), &size); - qemu_chr_be_write(vc->vte.chr, (uint8_t *)buf, size); + qemu_chr_be_write(vc->vte.chr, buf, size); len = qemu_chr_be_can_write(vc->vte.chr); avail -= size; } diff --git a/ui/vnc-enc-hextile.c b/ui/vnc-enc-hextile.c index 4215bd7daf..c763256f29 100644 --- a/ui/vnc-enc-hextile.c +++ b/ui/vnc-enc-hextile.c @@ -50,8 +50,8 @@ int vnc_hextile_send_framebuffer_update(VncState *vs, int x, int has_fg, has_bg; uint8_t *last_fg, *last_bg; - last_fg = (uint8_t *) g_malloc(VNC_SERVER_FB_BYTES); - last_bg = (uint8_t *) g_malloc(VNC_SERVER_FB_BYTES); + last_fg = g_malloc(VNC_SERVER_FB_BYTES); + last_bg = g_malloc(VNC_SERVER_FB_BYTES); has_fg = has_bg = 0; for (j = y; j < (y + h); j += 16) { for (i = x; i < (x + w); i += 16) { diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c index 4562bf8928..886f9bf611 100644 --- a/ui/vnc-jobs.c +++ b/ui/vnc-jobs.c @@ -373,7 +373,7 @@ void vnc_start_worker_thread(void) VncJobQueue *q; if (vnc_worker_thread_running()) - return ; + return; q = vnc_queue_init(); qemu_thread_create(&q->thread, "vnc_worker", vnc_worker_thread, q, diff --git a/ui/vnc.c b/ui/vnc.c index acb3629cd8..88f55cbf3c 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -3085,7 +3085,7 @@ static void vnc_rect_updated(VncDisplay *vd, int x, int y, struct timeval * tv) rect = vnc_stat_rect(vd, x, y); if (rect->updated) { - return ; + return; } rect->times[rect->idx] = *tv; rect->idx = (rect->idx + 1) % ARRAY_SIZE(rect->times); |