diff options
101 files changed, 3820 insertions, 1161 deletions
diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 5fb00da73d..0000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,111 +0,0 @@ -env: - CIRRUS_CLONE_DEPTH: 1 - -windows_msys2_task: - timeout_in: 90m - windows_container: - image: cirrusci/windowsservercore:2019 - os_version: 2019 - cpu: 8 - memory: 8G - env: - CIRRUS_SHELL: powershell - MSYS: winsymlinks:native - MSYSTEM: MINGW64 - MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe - MSYS2_FINGERPRINT: 0 - MSYS2_PACKAGES: " - diffutils git grep make pkg-config sed - mingw-w64-x86_64-python - mingw-w64-x86_64-python-sphinx - mingw-w64-x86_64-toolchain - mingw-w64-x86_64-SDL2 - mingw-w64-x86_64-SDL2_image - mingw-w64-x86_64-gtk3 - mingw-w64-x86_64-glib2 - mingw-w64-x86_64-ninja - mingw-w64-x86_64-jemalloc - mingw-w64-x86_64-lzo2 - mingw-w64-x86_64-zstd - mingw-w64-x86_64-libjpeg-turbo - mingw-w64-x86_64-pixman - mingw-w64-x86_64-libgcrypt - mingw-w64-x86_64-libpng - mingw-w64-x86_64-libssh - mingw-w64-x86_64-snappy - mingw-w64-x86_64-libusb - mingw-w64-x86_64-usbredir - mingw-w64-x86_64-libtasn1 - mingw-w64-x86_64-nettle - mingw-w64-x86_64-cyrus-sasl - mingw-w64-x86_64-curl - mingw-w64-x86_64-gnutls - mingw-w64-x86_64-libnfs - " - CHERE_INVOKING: 1 - msys2_cache: - folder: C:\tools\archive - reupload_on_changes: false - # These env variables are used to generate fingerprint to trigger the cache procedure - # If wanna to force re-populate msys2, increase MSYS2_FINGERPRINT - fingerprint_script: - - | - echo $env:CIRRUS_TASK_NAME - echo $env:MSYS2_URL - echo $env:MSYS2_FINGERPRINT - echo $env:MSYS2_PACKAGES - populate_script: - - | - md -Force C:\tools\archive\pkg - $start_time = Get-Date - bitsadmin /transfer msys_download /dynamic /download /priority FOREGROUND $env:MSYS2_URL C:\tools\archive\base.exe - Write-Output "Download time taken: $((Get-Date).Subtract($start_time))" - cd C:\tools - C:\tools\archive\base.exe -y - del -Force C:\tools\archive\base.exe - Write-Output "Base install time taken: $((Get-Date).Subtract($start_time))" - $start_time = Get-Date - - ((Get-Content -path C:\tools\msys64\etc\\post-install\\07-pacman-key.post -Raw) -replace '--refresh-keys', '--version') | Set-Content -Path C:\tools\msys64\etc\\post-install\\07-pacman-key.post - C:\tools\msys64\usr\bin\bash.exe -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf" - C:\tools\msys64\usr\bin\bash.exe -lc "export" - C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Sy - echo Y | C:\tools\msys64\usr\bin\pacman.exe --noconfirm -Suu --overwrite=* - taskkill /F /FI "MODULES eq msys-2.0.dll" - tasklist - C:\tools\msys64\usr\bin\bash.exe -lc "mv -f /etc/pacman.conf.pacnew /etc/pacman.conf || true" - C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -Syuu --overwrite=*" - Write-Output "Core install time taken: $((Get-Date).Subtract($start_time))" - $start_time = Get-Date - - C:\tools\msys64\usr\bin\bash.exe -lc "pacman --noconfirm -S --needed $env:MSYS2_PACKAGES" - Write-Output "Package install time taken: $((Get-Date).Subtract($start_time))" - $start_time = Get-Date - - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\etc\mtab - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\fd - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stderr - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdin - del -Force -ErrorAction SilentlyContinue C:\tools\msys64\dev\stdout - del -Force -Recurse -ErrorAction SilentlyContinue C:\tools\msys64\var\cache\pacman\pkg - tar cf C:\tools\archive\msys64.tar -C C:\tools\ msys64 - - Write-Output "Package archive time taken: $((Get-Date).Subtract($start_time))" - del -Force -Recurse -ErrorAction SilentlyContinue c:\tools\msys64 - install_script: - - | - $start_time = Get-Date - cd C:\tools - ls C:\tools\archive\msys64.tar - tar xf C:\tools\archive\msys64.tar - Write-Output "Extract msys2 time taken: $((Get-Date).Subtract($start_time))" - script: - - mkdir build - - cd build - - C:\tools\msys64\usr\bin\bash.exe -lc "../configure --python=python3 - --target-list-exclude=i386-softmmu,ppc64-softmmu,aarch64-softmmu,mips64-softmmu,mipsel-softmmu,sh4-softmmu" - - C:\tools\msys64\usr\bin\bash.exe -lc "make -j8" - - exit $LastExitCode - test_script: - - C:\tools\msys64\usr\bin\bash.exe -lc "cd build && make V=1 check" - - exit $LastExitCode diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..93718ef425 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,21 @@ +# +# List of code-formatting clean ups the git blame can ignore +# +# git blame --ignore-revs-file .git-blame-ignore-revs +# +# or +# +# git config blame.ignoreRevsFile .git-blame-ignore-revs +# + +# gdbstub: clean-up indents +ad9e4585b3c7425759d3eea697afbca71d2c2082 + +# e1000e: fix code style +0eadd56bf53ab196a16d492d7dd31c62e1c24c32 + +# target/riscv: coding style fixes +8c7feddddd9218b407792120bcfda0347ed16205 + +# replace TABs with spaces +48805df9c22a0700fba4b3b548fafaa21726ca68 diff --git a/.gitlab-ci.d/base.yml b/.gitlab-ci.d/base.yml index 0274228de8..2fbb58d2a3 100644 --- a/.gitlab-ci.d/base.yml +++ b/.gitlab-ci.d/base.yml @@ -75,5 +75,5 @@ - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"' when: manual - # Jobs can run if any jobs they depend on were successfull + # Jobs can run if any jobs they depend on were successful - when: on_success diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml index 87235e43b4..472bacd2e2 100644 --- a/.gitlab-ci.d/windows.yml +++ b/.gitlab-ci.d/windows.yml @@ -59,6 +59,7 @@ msys2-64bit: mingw-w64-x86_64-SDL2 mingw-w64-x86_64-SDL2_image mingw-w64-x86_64-snappy + mingw-w64-x86_64-spice mingw-w64-x86_64-usbredir mingw-w64-x86_64-zstd " - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory @@ -108,6 +109,7 @@ msys2-32bit: mingw-w64-i686-SDL2 mingw-w64-i686-SDL2_image mingw-w64-i686-snappy + mingw-w64-i686-spice mingw-w64-i686-usbredir mingw-w64-i686-zstd " - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory diff --git a/MAINTAINERS b/MAINTAINERS index 9b56ccdd92..2c2068ea5c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -64,6 +64,20 @@ L: qemu-devel@nongnu.org F: * F: */ +Project policy and developer guides +R: Alex Bennée <alex.bennee@linaro.org> +R: Daniel P. Berrangé <berrange@redhat.com> +R: Thomas Huth <thuth@redhat.com> +R: Markus Armbruster <armbru@redhat.com> +R: Philippe Mathieu-Daudé <philmd@linaro.org> +W: https://www.qemu.org/docs/master/devel/index.html +S: Odd Fixes +F: docs/devel/style.rst +F: docs/devel/code-of-conduct.rst +F: docs/devel/conflict-resolution.rst +F: docs/devel/submitting-a-patch.rst +F: docs/devel/submitting-a-pull-request.rst + Responsible Disclosure, Reporting Security Issues ------------------------------------------------- W: https://wiki.qemu.org/SecurityProcess @@ -2119,7 +2133,6 @@ T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org virtiofs -M: Dr. David Alan Gilbert <dgilbert@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com> S: Supported F: hw/virtio/vhost-user-fs* @@ -2252,6 +2265,7 @@ F: tests/qtest/libqos/e1000e.* igb M: Akihiko Odaki <akihiko.odaki@daynix.com> +R: Sriram Yagnaraman <sriram.yagnaraman@est.tech> S: Maintained F: docs/system/devices/igb.rst F: hw/net/igb* @@ -2862,7 +2876,7 @@ F: tests/unit/test-rcu-*.c F: util/rcu.c Human Monitor (HMP) -M: Dr. David Alan Gilbert <dgilbert@redhat.com> +M: Dr. David Alan Gilbert <dave@treblig.org> S: Maintained F: monitor/monitor-internal.h F: monitor/misc.c @@ -3135,7 +3149,6 @@ F: scripts/checkpatch.pl Migration M: Juan Quintela <quintela@redhat.com> -M: Dr. David Alan Gilbert <dgilbert@redhat.com> S: Maintained F: hw/core/vmstate-if.c F: include/hw/vmstate-if.h @@ -3818,8 +3831,7 @@ W: https://cirrus-ci.com/github/qemu/qemu Windows Hosted Continuous Integration M: Yonggang Luo <luoyonggang@gmail.com> S: Maintained -F: .cirrus.yml -W: https://cirrus-ci.com/github/qemu/qemu +F: .gitlab-ci.d/windows.yml Guest Test Compilation Support M: Alex Bennée <alex.bennee@linaro.org> @@ -3908,3 +3920,8 @@ Performance Tools and Tests M: Ahmed Karaman <ahmedkhaledkaraman@gmail.com> S: Maintained F: scripts/performance/ + +Code Coverage Tools +M: Alex Bennée <alex.bennee@linaro.org> +S: Odd Fixes +F: scripts/coverage/ diff --git a/VERSION b/VERSION index 2b20514daf..a5011b8814 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -7.2.91 +7.2.93 diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index f2a6ea6a68..cf3a88d90e 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -685,6 +685,15 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) uint32_t ring_size = s->kvm_dirty_ring_size; uint32_t count = 0, fetch = cpu->kvm_fetch_index; + /* + * It's possible that we race with vcpu creation code where the vcpu is + * put onto the vcpus list but not yet initialized the dirty ring + * structures. If so, skip it. + */ + if (!cpu->created) { + return 0; + } + assert(dirty_gfns && ring_size); trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index c815f2dbfd..8370c92c05 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -257,7 +257,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, if (cflags & CF_PCREL) { /* Use acquire to ensure current load of pc from jc. */ - tb = qatomic_load_acquire(&jc->array[hash].tb); + tb = qatomic_load_acquire(&jc->array[hash].tb); if (likely(tb && jc->array[hash].pc == pc && @@ -272,7 +272,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, return NULL; } jc->array[hash].pc = pc; - /* Use store_release on tb to ensure pc is written first. */ + /* Ensure pc is written first. */ qatomic_store_release(&jc->array[hash].tb, tb); } else { /* Use rcu_read to ensure current load of pc from *tb. */ @@ -971,18 +971,27 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) tb = tb_lookup(cpu, pc, cs_base, flags, cflags); if (tb == NULL) { + CPUJumpCache *jc; uint32_t h; mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); mmap_unlock(); + /* * We add the TB in the virtual pc hash table * for the fast lookup */ h = tb_jmp_cache_hash_func(pc); - /* Use the pc value already stored in tb->pc. */ - qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb); + jc = cpu->tb_jmp_cache; + if (cflags & CF_PCREL) { + jc->array[h].pc = pc; + /* Ensure pc is written first. */ + qatomic_store_release(&jc->array[h].tb, tb); + } else { + /* Use the pc value already stored in tb->pc. */ + qatomic_set(&jc->array[h].tb, tb); + } } #ifndef CONFIG_USER_ONLY diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index 7246c1c46b..cb1f806f00 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "qemu/interval-tree.h" +#include "qemu/qtree.h" #include "exec/cputlb.h" #include "exec/log.h" #include "exec/exec-all.h" @@ -126,29 +127,29 @@ static void tb_remove(TranslationBlock *tb) } /* TODO: For now, still shared with translate-all.c for system mode. */ -#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \ - for (T = foreach_tb_first(start, end), \ - N = foreach_tb_next(T, start, end); \ +#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \ + for (T = foreach_tb_first(start, last), \ + N = foreach_tb_next(T, start, last); \ T != NULL; \ - T = N, N = foreach_tb_next(N, start, end)) + T = N, N = foreach_tb_next(N, start, last)) typedef TranslationBlock *PageForEachNext; static PageForEachNext foreach_tb_first(tb_page_addr_t start, - tb_page_addr_t end) + tb_page_addr_t last) { - IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1); + IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last); return n ? container_of(n, TranslationBlock, itree) : NULL; } static PageForEachNext foreach_tb_next(PageForEachNext tb, tb_page_addr_t start, - tb_page_addr_t end) + tb_page_addr_t last) { IntervalTreeNode *n; if (tb) { - n = interval_tree_iter_next(&tb->itree, start, end - 1); + n = interval_tree_iter_next(&tb->itree, start, last); if (n) { return container_of(n, TranslationBlock, itree); } @@ -314,12 +315,12 @@ struct page_entry { * See also: page_collection_lock(). */ struct page_collection { - GTree *tree; + QTree *tree; struct page_entry *max; }; typedef int PageForEachNext; -#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \ +#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \ TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) #ifdef CONFIG_DEBUG_TCG @@ -467,7 +468,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) struct page_entry *pe; PageDesc *pd; - pe = g_tree_lookup(set->tree, &index); + pe = q_tree_lookup(set->tree, &index); if (pe) { return false; } @@ -478,7 +479,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) } pe = page_entry_new(pd, index); - g_tree_insert(set->tree, &pe->index, pe); + q_tree_insert(set->tree, &pe->index, pe); /* * If this is either (1) the first insertion or (2) a page whose index @@ -510,30 +511,30 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) } /* - * Lock a range of pages ([@start,@end[) as well as the pages of all + * Lock a range of pages ([@start,@last]) as well as the pages of all * intersecting TBs. * Locking order: acquire locks in ascending order of page index. */ static struct page_collection *page_collection_lock(tb_page_addr_t start, - tb_page_addr_t end) + tb_page_addr_t last) { struct page_collection *set = g_malloc(sizeof(*set)); tb_page_addr_t index; PageDesc *pd; start >>= TARGET_PAGE_BITS; - end >>= TARGET_PAGE_BITS; - g_assert(start <= end); + last >>= TARGET_PAGE_BITS; + g_assert(start <= last); - set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, + set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL, page_entry_destroy); set->max = NULL; assert_no_pages_locked(); retry: - g_tree_foreach(set->tree, page_entry_lock, NULL); + q_tree_foreach(set->tree, page_entry_lock, NULL); - for (index = start; index <= end; index++) { + for (index = start; index <= last; index++) { TranslationBlock *tb; PageForEachNext n; @@ -542,7 +543,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, continue; } if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { - g_tree_foreach(set->tree, page_entry_unlock, NULL); + q_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } assert_page_locked(pd); @@ -551,7 +552,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, (tb_page_addr1(tb) != -1 && page_trylock_add(set, tb_page_addr1(tb)))) { /* drop all locks, and reacquire in order */ - g_tree_foreach(set->tree, page_entry_unlock, NULL); + q_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } } @@ -562,7 +563,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start, static void page_collection_unlock(struct page_collection *set) { /* entries are unlocked and freed via page_entry_destroy */ - g_tree_destroy(set->tree); + q_tree_destroy(set->tree); g_free(set); } @@ -990,14 +991,14 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, * Called with mmap_lock held for user-mode emulation. * NOTE: this function must not be called while a TB is running. */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) { TranslationBlock *tb; PageForEachNext n; assert_memory_lock(); - PAGE_FOR_EACH_TB(start, end, unused, tb, n) { + PAGE_FOR_EACH_TB(start, last, unused, tb, n) { tb_phys_invalidate__locked(tb); } } @@ -1009,11 +1010,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) */ void tb_invalidate_phys_page(tb_page_addr_t addr) { - tb_page_addr_t start, end; + tb_page_addr_t start, last; start = addr & TARGET_PAGE_MASK; - end = start + TARGET_PAGE_SIZE; - tb_invalidate_phys_range(start, end); + last = addr | ~TARGET_PAGE_MASK; + tb_invalidate_phys_range(start, last); } /* @@ -1029,6 +1030,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) bool current_tb_modified; TranslationBlock *tb; PageForEachNext n; + tb_page_addr_t last; /* * Without precise smc semantics, or when outside of a TB, @@ -1045,10 +1047,11 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) assert_memory_lock(); current_tb = tcg_tb_lookup(pc); + last = addr | ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; current_tb_modified = false; - PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) { + PAGE_FOR_EACH_TB(addr, last, unused, tb, n) { if (current_tb == tb && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { /* @@ -1080,11 +1083,10 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) static void tb_invalidate_phys_page_range__locked(struct page_collection *pages, PageDesc *p, tb_page_addr_t start, - tb_page_addr_t end, + tb_page_addr_t last, uintptr_t retaddr) { TranslationBlock *tb; - tb_page_addr_t tb_start, tb_end; PageForEachNext n; #ifdef TARGET_HAS_PRECISE_SMC bool current_tb_modified = false; @@ -1092,22 +1094,22 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, #endif /* TARGET_HAS_PRECISE_SMC */ /* - * We remove all the TBs in the range [start, end[. + * We remove all the TBs in the range [start, last]. * XXX: see if in some cases it could be faster to invalidate all the code */ - PAGE_FOR_EACH_TB(start, end, p, tb, n) { + PAGE_FOR_EACH_TB(start, last, p, tb, n) { + tb_page_addr_t tb_start, tb_last; + /* NOTE: this is subtle as a TB may span two physical pages */ + tb_start = tb_page_addr0(tb); + tb_last = tb_start + tb->size - 1; if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb_page_addr0(tb); - tb_end = tb_start + tb->size; + tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK); } else { tb_start = tb_page_addr1(tb); - tb_end = tb_start + ((tb_page_addr0(tb) + tb->size) - & ~TARGET_PAGE_MASK); + tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK); } - if (!(tb_end <= start || tb_start >= end)) { + if (!(tb_last < start || tb_start > last)) { #ifdef TARGET_HAS_PRECISE_SMC if (current_tb == tb && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { @@ -1149,7 +1151,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, void tb_invalidate_phys_page(tb_page_addr_t addr) { struct page_collection *pages; - tb_page_addr_t start, end; + tb_page_addr_t start, last; PageDesc *p; p = page_find(addr >> TARGET_PAGE_BITS); @@ -1158,35 +1160,37 @@ void tb_invalidate_phys_page(tb_page_addr_t addr) } start = addr & TARGET_PAGE_MASK; - end = start + TARGET_PAGE_SIZE; - pages = page_collection_lock(start, end); - tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); + last = addr | ~TARGET_PAGE_MASK; + pages = page_collection_lock(start, last); + tb_invalidate_phys_page_range__locked(pages, p, start, last, 0); page_collection_unlock(pages); } /* * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end may refer to *different* physical pages. + * [start;last]. NOTE: start and end may refer to *different* physical pages. * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) { struct page_collection *pages; - tb_page_addr_t next; + tb_page_addr_t index, index_last; + + pages = page_collection_lock(start, last); - pages = page_collection_lock(start, end); - for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - start < end; - start = next, next += TARGET_PAGE_SIZE) { - PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); - tb_page_addr_t bound = MIN(next, end); + index_last = last >> TARGET_PAGE_BITS; + for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) { + PageDesc *pd = page_find(index); + tb_page_addr_t bound; if (pd == NULL) { continue; } assert_page_locked(pd); + bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK; + bound = MIN(bound, last); tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); } page_collection_unlock(pages); @@ -1207,7 +1211,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages, } assert_page_locked(p); - tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra); + tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra); } /* @@ -1221,7 +1225,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, { struct page_collection *pages; - pages = page_collection_lock(ram_addr, ram_addr + size); + pages = page_collection_lock(ram_addr, ram_addr + size - 1); tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr); page_collection_unlock(pages); } diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index af35e0d092..58c8e64096 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -59,7 +59,7 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) cflags |= parallel ? CF_PARALLEL : 0; cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; - cpu->tcg_cflags = cflags; + cpu->tcg_cflags |= cflags; } void tcg_cpus_destroy(CPUState *cpu) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 74deb18bd0..5b13281119 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -572,7 +572,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); addr = get_page_addr_code(env, pc); if (addr != -1) { - tb_invalidate_phys_range(addr, addr + 1); + tb_invalidate_phys_range(addr, addr); } } } diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 7b37fd229e..a7e0c3e2f4 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -480,24 +480,22 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last, * The flag PAGE_WRITE_ORG is positioned automatically depending * on PAGE_WRITE. The mmap_lock should already be held. */ -void page_set_flags(target_ulong start, target_ulong end, int flags) +void page_set_flags(target_ulong start, target_ulong last, int flags) { - target_ulong last; bool reset = false; bool inval_tb = false; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates a missing call to h2g_valid. */ - assert(start < end); - assert(end - 1 <= GUEST_ADDR_MAX); + assert(start <= last); + assert(last <= GUEST_ADDR_MAX); /* Only set PAGE_ANON with new mappings. */ assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); assert_memory_lock(); - start = start & TARGET_PAGE_MASK; - end = TARGET_PAGE_ALIGN(end); - last = end - 1; + start &= TARGET_PAGE_MASK; + last |= ~TARGET_PAGE_MASK; if (!(flags & PAGE_VALID)) { flags = 0; @@ -510,7 +508,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) } if (!flags || reset) { - page_reset_target_data(start, end); + page_reset_target_data(start, last); inval_tb |= pageflags_unset(start, last); } if (flags) { @@ -518,7 +516,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) ~(reset ? 0 : PAGE_STICKY)); } if (inval_tb) { - tb_invalidate_phys_range(start, end); + tb_invalidate_phys_range(start, last); } } @@ -816,15 +814,14 @@ typedef struct TargetPageDataNode { static IntervalTreeRoot targetdata_root; -void page_reset_target_data(target_ulong start, target_ulong end) +void page_reset_target_data(target_ulong start, target_ulong last) { IntervalTreeNode *n, *next; - target_ulong last; assert_memory_lock(); - start = start & TARGET_PAGE_MASK; - last = TARGET_PAGE_ALIGN(end) - 1; + start &= TARGET_PAGE_MASK; + last |= ~TARGET_PAGE_MASK; for (n = interval_tree_iter_first(&targetdata_root, start, last), next = n ? interval_tree_iter_next(n, start, last) : NULL; @@ -887,7 +884,7 @@ void *page_get_target_data(target_ulong address) return t->data[(page - region) >> TARGET_PAGE_BITS]; } #else -void page_reset_target_data(target_ulong start, target_ulong end) { } +void page_reset_target_data(target_ulong start, target_ulong last) { } #endif /* TARGET_PAGE_DATA_SIZE */ /* The softmmu versions of these helpers are in cputlb.c. */ diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c index 00221e23c5..5ff0cb8bd9 100644 --- a/accel/xen/xen-all.c +++ b/accel/xen/xen-all.c @@ -32,28 +32,13 @@ xendevicemodel_handle *xen_dmod; static void xenstore_record_dm_state(const char *state) { - struct xs_handle *xs; char path[50]; - /* We now have everything we need to set the xenstore entry. */ - xs = xs_open(0); - if (xs == NULL) { - fprintf(stderr, "Could not contact XenStore\n"); - exit(1); - } - snprintf(path, sizeof (path), "device-model/%u/state", xen_domid); - /* - * This call may fail when running restricted so don't make it fatal in - * that case. Toolstacks should instead use QMP to listen for state changes. - */ - if (!xs_write(xs, XBT_NULL, path, state, strlen(state)) && - !xen_domid_restrict) { + if (!qemu_xen_xs_write(xenstore, XBT_NULL, path, state, strlen(state))) { error_report("error recording dm state"); exit(1); } - - xs_close(xs); } @@ -111,7 +96,15 @@ static int xen_init(MachineState *ms) xc_interface_close(xen_xc); return -1; } - qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); + + /* + * The XenStore write would fail when running restricted so don't attempt + * it in that case. Toolstacks should instead use QMP to listen for state + * changes. + */ + if (!xen_domid_restrict) { + qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); + } /* * opt out of system RAM being allocated by generic code */ diff --git a/block.c b/block.c index 0dd604d0f6..e0c6c648b1 100644 --- a/block.c +++ b/block.c @@ -5879,9 +5879,10 @@ int64_t coroutine_fn bdrv_co_getlength(BlockDriverState *bs) } /* return 0 as number of sectors if no device present or error */ -void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) +void coroutine_fn bdrv_co_get_geometry(BlockDriverState *bs, + uint64_t *nb_sectors_ptr) { - int64_t nb_sectors = bdrv_nb_sectors(bs); + int64_t nb_sectors = bdrv_co_nb_sectors(bs); IO_CODE(); *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors; diff --git a/block/block-backend.c b/block/block-backend.c index 278b04ce69..2ee39229e4 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -1615,13 +1615,16 @@ int64_t coroutine_fn blk_co_getlength(BlockBackend *blk) return bdrv_co_getlength(blk_bs(blk)); } -void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr) +void coroutine_fn blk_co_get_geometry(BlockBackend *blk, + uint64_t *nb_sectors_ptr) { IO_CODE(); + GRAPH_RDLOCK_GUARD(); + if (!blk_bs(blk)) { *nb_sectors_ptr = 0; } else { - bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr); + bdrv_co_get_geometry(blk_bs(blk), nb_sectors_ptr); } } diff --git a/block/dmg-lzfse.c b/block/dmg-lzfse.c index 6798cf4fbf..4ea0b9b20d 100644 --- a/block/dmg-lzfse.c +++ b/block/dmg-lzfse.c @@ -23,7 +23,12 @@ */ #include "qemu/osdep.h" #include "dmg.h" + +/* Work around a -Wstrict-prototypes warning in LZFSE headers */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-prototypes" #include <lzfse.h> +#pragma GCC diagnostic pop static int dmg_uncompress_lzfse_do(char *next_in, unsigned int avail_in, char *next_out, unsigned int avail_out) diff --git a/block/export/virtio-blk-handler.c b/block/export/virtio-blk-handler.c index 313666e8ab..bc1cec6757 100644 --- a/block/export/virtio-blk-handler.c +++ b/block/export/virtio-blk-handler.c @@ -22,8 +22,9 @@ struct virtio_blk_inhdr { unsigned char status; }; -static bool virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size, - uint64_t sector, size_t size) +static bool coroutine_fn +virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size, + uint64_t sector, size_t size) { uint64_t nb_sectors; uint64_t total_sectors; @@ -41,7 +42,7 @@ static bool virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size, if ((sector << VIRTIO_BLK_SECTOR_BITS) % block_size) { return false; } - blk_get_geometry(blk, &total_sectors); + blk_co_get_geometry(blk, &total_sectors); if (sector > total_sectors || nb_sectors > total_sectors - sector) { return false; } diff --git a/bsd-user/main.c b/bsd-user/main.c index 89f225dead..babc3b009b 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -68,13 +68,9 @@ bool have_guest_base; # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \ (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) -/* - * There are a number of places where we assign reserved_va to a variable - * of type abi_ulong and expect it to fit. Avoid the last page. - */ -# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK) +# define MAX_RESERVED_VA 0xfffffffful # else -# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS) +# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) # endif # else # define MAX_RESERVED_VA 0 @@ -466,7 +462,7 @@ int main(int argc, char **argv) envlist_free(envlist); if (reserved_va) { - mmap_next_start = reserved_va; + mmap_next_start = reserved_va + 1; } { diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c index d6c5a344c9..565b9f97ed 100644 --- a/bsd-user/mmap.c +++ b/bsd-user/mmap.c @@ -118,7 +118,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) if (ret != 0) goto error; } - page_set_flags(start, start + len, prot | PAGE_VALID); + page_set_flags(start, start + len - 1, prot | PAGE_VALID); mmap_unlock(); return 0; error: @@ -234,7 +234,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, size = HOST_PAGE_ALIGN(size) + alignment; end_addr = start + size; if (end_addr > reserved_va) { - end_addr = reserved_va; + end_addr = reserved_va + 1; } addr = end_addr - qemu_host_page_size; @@ -243,7 +243,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, if (looped) { return (abi_ulong)-1; } - end_addr = reserved_va; + end_addr = reserved_va + 1; addr = end_addr - qemu_host_page_size; looped = 1; continue; @@ -656,7 +656,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, } } the_end1: - page_set_flags(start, start + len, prot | PAGE_VALID); + page_set_flags(start, start + len - 1, prot | PAGE_VALID); the_end: #ifdef DEBUG_MMAP printf("ret=0x" TARGET_ABI_FMT_lx "\n", start); @@ -767,7 +767,7 @@ int target_munmap(abi_ulong start, abi_ulong len) } if (ret == 0) { - page_set_flags(start, start + len, 0); + page_set_flags(start, start + len - 1, 0); } mmap_unlock(); return ret; diff --git a/configure b/configure index 05bed4f4a1..800b5850f4 100755 --- a/configure +++ b/configure @@ -231,6 +231,7 @@ safe_stack="" use_containers="yes" gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb") gdb_arches="" +glib_has_gslice="no" if test -e "$source_path/.git" then @@ -1494,6 +1495,17 @@ for i in $glib_modules; do fi done +# Check whether glib has gslice, which we have to avoid for correctness. +# TODO: remove this check and the corresponding workaround (qtree) when +# the minimum supported glib is >= $glib_dropped_gslice_version. +glib_dropped_gslice_version=2.75.3 +for i in $glib_modules; do + if ! $pkg_config --atleast-version=$glib_dropped_gslice_version $i; then + glib_has_gslice="yes" + break + fi +done + glib_bindir="$($pkg_config --variable=bindir glib-2.0)" if test -z "$glib_bindir" ; then glib_bindir="$($pkg_config --variable=prefix glib-2.0)"/bin @@ -2420,6 +2432,9 @@ echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak echo "GLIB_LIBS=$glib_libs" >> $config_host_mak echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak +if test "$glib_has_gslice" = "yes" ; then + echo "HAVE_GLIB_WITH_SLICE_ALLOCATOR=y" >> $config_host_mak +fi echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 33b942283f..1ca9dc33d6 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -206,15 +206,6 @@ be an effective use of its limited resources, and thus intends to discontinue it. Since all recent x86 hardware from the past >10 years is capable of the 64-bit x86 extensions, a corresponding 64-bit OS should be used instead. -System emulation on 32-bit arm hosts (since 8.0) -'''''''''''''''''''''''''''''''''''''''''''''''' - -Since QEMU needs a strong host machine for running full system emulation, and -all recent powerful arm hosts support 64-bit, the QEMU project deprecates the -support for running any system emulation on 32-bit arm hosts in general. Use -64-bit arm hosts for system emulation instead. (Note: "user" mode emulation -continues to be supported on 32-bit arm hosts, too) - QEMU API (QAPI) events ---------------------- diff --git a/gdbstub/gdbstub.c b/gdbstub/gdbstub.c index 2a66371aa5..0760d78685 100644 --- a/gdbstub/gdbstub.c +++ b/gdbstub/gdbstub.c @@ -1468,7 +1468,7 @@ static void handle_query_supported(GArray *params, void *user_ctx) ";ReverseStep+;ReverseContinue+"); } -#ifdef CONFIG_USER_ONLY +#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX) if (gdbserver_state.c_cpu->opaque) { g_string_append(gdbserver_state.str_buf, ";qXfer:auxv:read+"); } diff --git a/gdbstub/meson.build b/gdbstub/meson.build index bd5c5cd67d..cdb4d28691 100644 --- a/gdbstub/meson.build +++ b/gdbstub/meson.build @@ -20,11 +20,13 @@ gdb_softmmu_ss = gdb_softmmu_ss.apply(config_host, strict: false) libgdb_user = static_library('gdb_user', gdb_user_ss.sources() + genh, name_suffix: 'fa', - c_args: '-DCONFIG_USER_ONLY') + c_args: '-DCONFIG_USER_ONLY', + build_by_default: have_user) libgdb_softmmu = static_library('gdb_softmmu', gdb_softmmu_ss.sources() + genh, - name_suffix: 'fa') + name_suffix: 'fa', + build_by_default: have_system) gdb_user = declare_dependency(link_whole: libgdb_user) user_ss.add(gdb_user) diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 50e5141116..54f6a3e0b3 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -689,7 +689,10 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, rom_ptr_for_as(as, addr, size)); - g_free(fdt); + if (fdt != ms->fdt) { + g_free(ms->fdt); + ms->fdt = fdt; + } return size; diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 64ae4a6899..d7cc4d3ec1 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -601,8 +601,8 @@ enum { }; enum { - FD_STATE_MULTI = 0x01, /* multi track flag */ - FD_STATE_FORMAT = 0x02, /* format flag */ + FD_STATE_MULTI = 0x01, /* multi track flag */ + FD_STATE_FORMAT = 0x02, /* format flag */ }; enum { diff --git a/hw/block/nand.c b/hw/block/nand.c index 1aee1cb2b1..9c1b89cfa6 100644 --- a/hw/block/nand.c +++ b/hw/block/nand.c @@ -30,33 +30,33 @@ #include "qemu/module.h" #include "qom/object.h" -# define NAND_CMD_READ0 0x00 -# define NAND_CMD_READ1 0x01 -# define NAND_CMD_READ2 0x50 -# define NAND_CMD_LPREAD2 0x30 -# define NAND_CMD_NOSERIALREAD2 0x35 -# define NAND_CMD_RANDOMREAD1 0x05 -# define NAND_CMD_RANDOMREAD2 0xe0 -# define NAND_CMD_READID 0x90 -# define NAND_CMD_RESET 0xff -# define NAND_CMD_PAGEPROGRAM1 0x80 -# define NAND_CMD_PAGEPROGRAM2 0x10 -# define NAND_CMD_CACHEPROGRAM2 0x15 -# define NAND_CMD_BLOCKERASE1 0x60 -# define NAND_CMD_BLOCKERASE2 0xd0 -# define NAND_CMD_READSTATUS 0x70 -# define NAND_CMD_COPYBACKPRG1 0x85 - -# define NAND_IOSTATUS_ERROR (1 << 0) -# define NAND_IOSTATUS_PLANE0 (1 << 1) -# define NAND_IOSTATUS_PLANE1 (1 << 2) -# define NAND_IOSTATUS_PLANE2 (1 << 3) -# define NAND_IOSTATUS_PLANE3 (1 << 4) +# define NAND_CMD_READ0 0x00 +# define NAND_CMD_READ1 0x01 +# define NAND_CMD_READ2 0x50 +# define NAND_CMD_LPREAD2 0x30 +# define NAND_CMD_NOSERIALREAD2 0x35 +# define NAND_CMD_RANDOMREAD1 0x05 +# define NAND_CMD_RANDOMREAD2 0xe0 +# define NAND_CMD_READID 0x90 +# define NAND_CMD_RESET 0xff +# define NAND_CMD_PAGEPROGRAM1 0x80 +# define NAND_CMD_PAGEPROGRAM2 0x10 +# define NAND_CMD_CACHEPROGRAM2 0x15 +# define NAND_CMD_BLOCKERASE1 0x60 +# define NAND_CMD_BLOCKERASE2 0xd0 +# define NAND_CMD_READSTATUS 0x70 +# define NAND_CMD_COPYBACKPRG1 0x85 + +# define NAND_IOSTATUS_ERROR (1 << 0) +# define NAND_IOSTATUS_PLANE0 (1 << 1) +# define NAND_IOSTATUS_PLANE1 (1 << 2) +# define NAND_IOSTATUS_PLANE2 (1 << 3) +# define NAND_IOSTATUS_PLANE3 (1 << 4) # define NAND_IOSTATUS_READY (1 << 6) -# define NAND_IOSTATUS_UNPROTCT (1 << 7) +# define NAND_IOSTATUS_UNPROTCT (1 << 7) -# define MAX_PAGE 0x800 -# define MAX_OOB 0x40 +# define MAX_PAGE 0x800 +# define MAX_OOB 0x40 typedef struct NANDFlashState NANDFlashState; struct NANDFlashState { @@ -102,40 +102,40 @@ static void mem_and(uint8_t *dest, const uint8_t *src, size_t n) } } -# define NAND_NO_AUTOINCR 0x00000001 -# define NAND_BUSWIDTH_16 0x00000002 -# define NAND_NO_PADDING 0x00000004 -# define NAND_CACHEPRG 0x00000008 -# define NAND_COPYBACK 0x00000010 -# define NAND_IS_AND 0x00000020 -# define NAND_4PAGE_ARRAY 0x00000040 -# define NAND_NO_READRDY 0x00000100 -# define NAND_SAMSUNG_LP (NAND_NO_PADDING | NAND_COPYBACK) +# define NAND_NO_AUTOINCR 0x00000001 +# define NAND_BUSWIDTH_16 0x00000002 +# define NAND_NO_PADDING 0x00000004 +# define NAND_CACHEPRG 0x00000008 +# define NAND_COPYBACK 0x00000010 +# define NAND_IS_AND 0x00000020 +# define NAND_4PAGE_ARRAY 0x00000040 +# define NAND_NO_READRDY 0x00000100 +# define NAND_SAMSUNG_LP (NAND_NO_PADDING | NAND_COPYBACK) # define NAND_IO -# define PAGE(addr) ((addr) >> ADDR_SHIFT) -# define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE)) -# define PAGE_MASK ((1 << ADDR_SHIFT) - 1) -# define OOB_SHIFT (PAGE_SHIFT - 5) -# define OOB_SIZE (1 << OOB_SHIFT) -# define SECTOR(addr) ((addr) >> (9 + ADDR_SHIFT - PAGE_SHIFT)) -# define SECTOR_OFFSET(addr) ((addr) & ((511 >> PAGE_SHIFT) << 8)) - -# define NAND_PAGE_SIZE 256 -# define PAGE_SHIFT 8 -# define PAGE_SECTORS 1 -# define ADDR_SHIFT 8 +# define PAGE(addr) ((addr) >> ADDR_SHIFT) +# define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE)) +# define PAGE_MASK ((1 << ADDR_SHIFT) - 1) +# define OOB_SHIFT (PAGE_SHIFT - 5) +# define OOB_SIZE (1 << OOB_SHIFT) +# define SECTOR(addr) ((addr) >> (9 + ADDR_SHIFT - PAGE_SHIFT)) +# define SECTOR_OFFSET(addr) ((addr) & ((511 >> PAGE_SHIFT) << 8)) + +# define NAND_PAGE_SIZE 256 +# define PAGE_SHIFT 8 +# define PAGE_SECTORS 1 +# define ADDR_SHIFT 8 # include "nand.c" -# define NAND_PAGE_SIZE 512 -# define PAGE_SHIFT 9 -# define PAGE_SECTORS 1 -# define ADDR_SHIFT 8 +# define NAND_PAGE_SIZE 512 +# define PAGE_SHIFT 9 +# define PAGE_SECTORS 1 +# define ADDR_SHIFT 8 # include "nand.c" -# define NAND_PAGE_SIZE 2048 -# define PAGE_SHIFT 11 -# define PAGE_SECTORS 4 -# define ADDR_SHIFT 16 +# define NAND_PAGE_SIZE 2048 +# define PAGE_SHIFT 11 +# define PAGE_SECTORS 4 +# define ADDR_SHIFT 16 # include "nand.c" /* Information based on Linux drivers/mtd/nand/raw/nand_ids.c */ @@ -148,79 +148,79 @@ static const struct { } nand_flash_ids[0x100] = { [0 ... 0xff] = { 0 }, - [0x6b] = { 4, 8, 9, 4, 0 }, - [0xe3] = { 4, 8, 9, 4, 0 }, - [0xe5] = { 4, 8, 9, 4, 0 }, - [0xd6] = { 8, 8, 9, 4, 0 }, - [0xe6] = { 8, 8, 9, 4, 0 }, - - [0x33] = { 16, 8, 9, 5, 0 }, - [0x73] = { 16, 8, 9, 5, 0 }, - [0x43] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x53] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x35] = { 32, 8, 9, 5, 0 }, - [0x75] = { 32, 8, 9, 5, 0 }, - [0x45] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x55] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x36] = { 64, 8, 9, 5, 0 }, - [0x76] = { 64, 8, 9, 5, 0 }, - [0x46] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x56] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x78] = { 128, 8, 9, 5, 0 }, - [0x39] = { 128, 8, 9, 5, 0 }, - [0x79] = { 128, 8, 9, 5, 0 }, - [0x72] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x49] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x74] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x59] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x71] = { 256, 8, 9, 5, 0 }, + [0x6b] = { 4, 8, 9, 4, 0 }, + [0xe3] = { 4, 8, 9, 4, 0 }, + [0xe5] = { 4, 8, 9, 4, 0 }, + [0xd6] = { 8, 8, 9, 4, 0 }, + [0xe6] = { 8, 8, 9, 4, 0 }, + + [0x33] = { 16, 8, 9, 5, 0 }, + [0x73] = { 16, 8, 9, 5, 0 }, + [0x43] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x53] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x35] = { 32, 8, 9, 5, 0 }, + [0x75] = { 32, 8, 9, 5, 0 }, + [0x45] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x55] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x36] = { 64, 8, 9, 5, 0 }, + [0x76] = { 64, 8, 9, 5, 0 }, + [0x46] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x56] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x78] = { 128, 8, 9, 5, 0 }, + [0x39] = { 128, 8, 9, 5, 0 }, + [0x79] = { 128, 8, 9, 5, 0 }, + [0x72] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x49] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x74] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x59] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x71] = { 256, 8, 9, 5, 0 }, /* * These are the new chips with large page size. The pagesize and the * erasesize is determined from the extended id bytes */ -# define LP_OPTIONS (NAND_SAMSUNG_LP | NAND_NO_READRDY | NAND_NO_AUTOINCR) -# define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) +# define LP_OPTIONS (NAND_SAMSUNG_LP | NAND_NO_READRDY | NAND_NO_AUTOINCR) +# define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) /* 512 Megabit */ - [0xa2] = { 64, 8, 0, 0, LP_OPTIONS }, - [0xf2] = { 64, 8, 0, 0, LP_OPTIONS }, - [0xb2] = { 64, 16, 0, 0, LP_OPTIONS16 }, - [0xc2] = { 64, 16, 0, 0, LP_OPTIONS16 }, + [0xa2] = { 64, 8, 0, 0, LP_OPTIONS }, + [0xf2] = { 64, 8, 0, 0, LP_OPTIONS }, + [0xb2] = { 64, 16, 0, 0, LP_OPTIONS16 }, + [0xc2] = { 64, 16, 0, 0, LP_OPTIONS16 }, /* 1 Gigabit */ - [0xa1] = { 128, 8, 0, 0, LP_OPTIONS }, - [0xf1] = { 128, 8, 0, 0, LP_OPTIONS }, - [0xb1] = { 128, 16, 0, 0, LP_OPTIONS16 }, - [0xc1] = { 128, 16, 0, 0, LP_OPTIONS16 }, + [0xa1] = { 128, 8, 0, 0, LP_OPTIONS }, + [0xf1] = { 128, 8, 0, 0, LP_OPTIONS }, + [0xb1] = { 128, 16, 0, 0, LP_OPTIONS16 }, + [0xc1] = { 128, 16, 0, 0, LP_OPTIONS16 }, /* 2 Gigabit */ - [0xaa] = { 256, 8, 0, 0, LP_OPTIONS }, - [0xda] = { 256, 8, 0, 0, LP_OPTIONS }, - [0xba] = { 256, 16, 0, 0, LP_OPTIONS16 }, - [0xca] = { 256, 16, 0, 0, LP_OPTIONS16 }, + [0xaa] = { 256, 8, 0, 0, LP_OPTIONS }, + [0xda] = { 256, 8, 0, 0, LP_OPTIONS }, + [0xba] = { 256, 16, 0, 0, LP_OPTIONS16 }, + [0xca] = { 256, 16, 0, 0, LP_OPTIONS16 }, /* 4 Gigabit */ - [0xac] = { 512, 8, 0, 0, LP_OPTIONS }, - [0xdc] = { 512, 8, 0, 0, LP_OPTIONS }, - [0xbc] = { 512, 16, 0, 0, LP_OPTIONS16 }, - [0xcc] = { 512, 16, 0, 0, LP_OPTIONS16 }, + [0xac] = { 512, 8, 0, 0, LP_OPTIONS }, + [0xdc] = { 512, 8, 0, 0, LP_OPTIONS }, + [0xbc] = { 512, 16, 0, 0, LP_OPTIONS16 }, + [0xcc] = { 512, 16, 0, 0, LP_OPTIONS16 }, /* 8 Gigabit */ - [0xa3] = { 1024, 8, 0, 0, LP_OPTIONS }, - [0xd3] = { 1024, 8, 0, 0, LP_OPTIONS }, - [0xb3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, - [0xc3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, + [0xa3] = { 1024, 8, 0, 0, LP_OPTIONS }, + [0xd3] = { 1024, 8, 0, 0, LP_OPTIONS }, + [0xb3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, + [0xc3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, /* 16 Gigabit */ - [0xa5] = { 2048, 8, 0, 0, LP_OPTIONS }, - [0xd5] = { 2048, 8, 0, 0, LP_OPTIONS }, - [0xb5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, - [0xc5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, + [0xa5] = { 2048, 8, 0, 0, LP_OPTIONS }, + [0xd5] = { 2048, 8, 0, 0, LP_OPTIONS }, + [0xb5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, + [0xc5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, }; static void nand_reset(DeviceState *dev) @@ -812,4 +812,4 @@ static void glue(nand_init_, NAND_PAGE_SIZE)(NANDFlashState *s) # undef PAGE_SHIFT # undef PAGE_SECTORS # undef ADDR_SHIFT -#endif /* NAND_IO */ +#endif /* NAND_IO */ diff --git a/hw/block/onenand.c b/hw/block/onenand.c index 1fde975024..50d3d1c985 100644 --- a/hw/block/onenand.c +++ b/hw/block/onenand.c @@ -35,10 +35,10 @@ #include "qom/object.h" /* 11 for 2kB-page OneNAND ("2nd generation") and 10 for 1kB-page chips */ -#define PAGE_SHIFT 11 +#define PAGE_SHIFT 11 /* Fixed */ -#define BLOCK_SHIFT (PAGE_SHIFT + 6) +#define BLOCK_SHIFT (PAGE_SHIFT + 6) #define TYPE_ONE_NAND "onenand" OBJECT_DECLARE_SIMPLE_TYPE(OneNANDState, ONE_NAND) @@ -408,23 +408,23 @@ static void onenand_command(OneNANDState *s) int b; int sec; void *buf; -#define SETADDR(block, page) \ - sec = (s->addr[page] & 3) + \ - ((((s->addr[page] >> 2) & 0x3f) + \ - (((s->addr[block] & 0xfff) | \ - (s->addr[block] >> 15 ? \ - s->density_mask : 0)) << 6)) << (PAGE_SHIFT - 9)); -#define SETBUF_M() \ - buf = (s->bufaddr & 8) ? \ - s->data[(s->bufaddr >> 2) & 1][0] : s->boot[0]; \ +#define SETADDR(block, page) \ + sec = (s->addr[page] & 3) + \ + ((((s->addr[page] >> 2) & 0x3f) + \ + (((s->addr[block] & 0xfff) | \ + (s->addr[block] >> 15 ? s->density_mask : 0)) \ + << 6)) \ + << (PAGE_SHIFT - 9)); +#define SETBUF_M() \ + buf = (s->bufaddr & 8) ? s->data[(s->bufaddr >> 2) & 1][0] : s->boot[0]; \ buf += (s->bufaddr & 3) << 9; -#define SETBUF_S() \ - buf = (s->bufaddr & 8) ? \ - s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \ +#define SETBUF_S() \ + buf = (s->bufaddr & 8) ? \ + s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \ buf += (s->bufaddr & 3) << 4; switch (s->command) { - case 0x00: /* Load single/multiple sector data unit into buffer */ + case 0x00: /* Load single/multiple sector data unit into buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_M() @@ -443,7 +443,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_LOAD; break; - case 0x13: /* Load single/multiple spare sector into buffer */ + case 0x13: /* Load single/multiple spare sector into buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_S() @@ -456,7 +456,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_LOAD; break; - case 0x80: /* Program single/multiple sector data unit from buffer */ + case 0x80: /* Program single/multiple sector data unit from buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_M() @@ -475,7 +475,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_PROG; break; - case 0x1a: /* Program single/multiple spare area sector from buffer */ + case 0x1a: /* Program single/multiple spare area sector from buffer */ SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) SETBUF_S() @@ -488,7 +488,7 @@ static void onenand_command(OneNANDState *s) */ s->intstatus |= ONEN_INT | ONEN_INT_PROG; break; - case 0x1b: /* Copy-back program */ + case 0x1b: /* Copy-back program */ SETBUF_S() SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) @@ -504,7 +504,7 @@ static void onenand_command(OneNANDState *s) s->intstatus |= ONEN_INT | ONEN_INT_PROG; break; - case 0x23: /* Unlock NAND array block(s) */ + case 0x23: /* Unlock NAND array block(s) */ s->intstatus |= ONEN_INT; /* XXX the previous (?) area should be locked automatically */ @@ -519,7 +519,7 @@ static void onenand_command(OneNANDState *s) s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED; } break; - case 0x27: /* Unlock All NAND array blocks */ + case 0x27: /* Unlock All NAND array blocks */ s->intstatus |= ONEN_INT; for (b = 0; b < s->blocks; b ++) { @@ -530,7 +530,7 @@ static void onenand_command(OneNANDState *s) } break; - case 0x2a: /* Lock NAND array block(s) */ + case 0x2a: /* Lock NAND array block(s) */ s->intstatus |= ONEN_INT; for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { @@ -544,7 +544,7 @@ static void onenand_command(OneNANDState *s) s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKED; } break; - case 0x2c: /* Lock-tight NAND array block(s) */ + case 0x2c: /* Lock-tight NAND array block(s) */ s->intstatus |= ONEN_INT; for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { @@ -559,13 +559,13 @@ static void onenand_command(OneNANDState *s) } break; - case 0x71: /* Erase-Verify-Read */ + case 0x71: /* Erase-Verify-Read */ s->intstatus |= ONEN_INT; break; - case 0x95: /* Multi-block erase */ + case 0x95: /* Multi-block erase */ qemu_irq_pulse(s->intr); /* Fall through. */ - case 0x94: /* Block erase */ + case 0x94: /* Block erase */ sec = ((s->addr[ONEN_BUF_BLOCK] & 0xfff) | (s->addr[ONEN_BUF_BLOCK] >> 15 ? s->density_mask : 0)) << (BLOCK_SHIFT - 9); @@ -574,20 +574,20 @@ static void onenand_command(OneNANDState *s) s->intstatus |= ONEN_INT | ONEN_INT_ERASE; break; - case 0xb0: /* Erase suspend */ + case 0xb0: /* Erase suspend */ break; - case 0x30: /* Erase resume */ + case 0x30: /* Erase resume */ s->intstatus |= ONEN_INT | ONEN_INT_ERASE; break; - case 0xf0: /* Reset NAND Flash core */ + case 0xf0: /* Reset NAND Flash core */ onenand_reset(s, 0); break; - case 0xf3: /* Reset OneNAND */ + case 0xf3: /* Reset OneNAND */ onenand_reset(s, 0); break; - case 0x65: /* OTP Access */ + case 0x65: /* OTP Access */ s->intstatus |= ONEN_INT; s->blk_cur = NULL; s->current = s->otp; @@ -616,52 +616,52 @@ static uint64_t onenand_read(void *opaque, hwaddr addr, case 0x0000 ... 0xbffe: return lduw_le_p(s->boot[0] + addr); - case 0xf000: /* Manufacturer ID */ + case 0xf000: /* Manufacturer ID */ return s->id.man; - case 0xf001: /* Device ID */ + case 0xf001: /* Device ID */ return s->id.dev; - case 0xf002: /* Version ID */ + case 0xf002: /* Version ID */ return s->id.ver; /* TODO: get the following values from a real chip! */ - case 0xf003: /* Data Buffer size */ + case 0xf003: /* Data Buffer size */ return 1 << PAGE_SHIFT; - case 0xf004: /* Boot Buffer size */ + case 0xf004: /* Boot Buffer size */ return 0x200; - case 0xf005: /* Amount of buffers */ + case 0xf005: /* Amount of buffers */ return 1 | (2 << 8); - case 0xf006: /* Technology */ + case 0xf006: /* Technology */ return 0; - case 0xf100 ... 0xf107: /* Start addresses */ + case 0xf100 ... 0xf107: /* Start addresses */ return s->addr[offset - 0xf100]; - case 0xf200: /* Start buffer */ + case 0xf200: /* Start buffer */ return (s->bufaddr << 8) | ((s->count - 1) & (1 << (PAGE_SHIFT - 10))); - case 0xf220: /* Command */ + case 0xf220: /* Command */ return s->command; - case 0xf221: /* System Configuration 1 */ + case 0xf221: /* System Configuration 1 */ return s->config[0] & 0xffe0; - case 0xf222: /* System Configuration 2 */ + case 0xf222: /* System Configuration 2 */ return s->config[1]; - case 0xf240: /* Controller Status */ + case 0xf240: /* Controller Status */ return s->status; - case 0xf241: /* Interrupt */ + case 0xf241: /* Interrupt */ return s->intstatus; - case 0xf24c: /* Unlock Start Block Address */ + case 0xf24c: /* Unlock Start Block Address */ return s->unladdr[0]; - case 0xf24d: /* Unlock End Block Address */ + case 0xf24d: /* Unlock End Block Address */ return s->unladdr[1]; - case 0xf24e: /* Write Protection Status */ + case 0xf24e: /* Write Protection Status */ return s->wpstatus; - case 0xff00: /* ECC Status */ + case 0xff00: /* ECC Status */ return 0x00; - case 0xff01: /* ECC Result of main area data */ - case 0xff02: /* ECC Result of spare area data */ - case 0xff03: /* ECC Result of main area data */ - case 0xff04: /* ECC Result of spare area data */ + case 0xff01: /* ECC Result of main area data */ + case 0xff02: /* ECC Result of spare area data */ + case 0xff03: /* ECC Result of main area data */ + case 0xff04: /* ECC Result of spare area data */ qemu_log_mask(LOG_UNIMP, "onenand: ECC result registers unimplemented\n"); return 0x0000; @@ -696,15 +696,15 @@ static void onenand_write(void *opaque, hwaddr addr, } switch (value) { - case 0x00f0: /* Reset OneNAND */ + case 0x00f0: /* Reset OneNAND */ onenand_reset(s, 0); break; - case 0x00e0: /* Load Data into Buffer */ + case 0x00e0: /* Load Data into Buffer */ s->cycle = 1; break; - case 0x0090: /* Read Identification Data */ + case 0x0090: /* Read Identification Data */ memset(s->boot[0], 0, 3 << s->shift); s->boot[0][0 << s->shift] = s->id.man & 0xff; s->boot[0][1 << s->shift] = s->id.dev & 0xff; @@ -718,11 +718,11 @@ static void onenand_write(void *opaque, hwaddr addr, } break; - case 0xf100 ... 0xf107: /* Start addresses */ + case 0xf100 ... 0xf107: /* Start addresses */ s->addr[offset - 0xf100] = value; break; - case 0xf200: /* Start buffer */ + case 0xf200: /* Start buffer */ s->bufaddr = (value >> 8) & 0xf; if (PAGE_SHIFT == 11) s->count = (value & 3) ?: 4; @@ -730,36 +730,36 @@ static void onenand_write(void *opaque, hwaddr addr, s->count = (value & 1) ?: 2; break; - case 0xf220: /* Command */ + case 0xf220: /* Command */ if (s->intstatus & (1 << 15)) break; s->command = value; onenand_command(s); break; - case 0xf221: /* System Configuration 1 */ + case 0xf221: /* System Configuration 1 */ s->config[0] = value; onenand_intr_update(s); qemu_set_irq(s->rdy, (s->config[0] >> 7) & 1); break; - case 0xf222: /* System Configuration 2 */ + case 0xf222: /* System Configuration 2 */ s->config[1] = value; break; - case 0xf241: /* Interrupt */ + case 0xf241: /* Interrupt */ s->intstatus &= value; if ((1 << 15) & ~s->intstatus) s->status &= ~(ONEN_ERR_CMD | ONEN_ERR_ERASE | ONEN_ERR_PROG | ONEN_ERR_LOAD); onenand_intr_update(s); break; - case 0xf24c: /* Unlock Start Block Address */ + case 0xf24c: /* Unlock Start Block Address */ s->unladdr[0] = value & (s->blocks - 1); /* For some reason we have to set the end address to by default * be same as start because the software forgets to write anything * in there. */ s->unladdr[1] = value & (s->blocks - 1); break; - case 0xf24d: /* Unlock End Block Address */ + case 0xf24d: /* Unlock End Block Address */ s->unladdr[1] = value & (s->blocks - 1); break; diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c index bfc27ad899..d350126b27 100644 --- a/hw/block/tc58128.c +++ b/hw/block/tc58128.c @@ -62,24 +62,24 @@ static void init_dev(tc58128_dev * dev, const char *filename) dev->flash_contents = g_malloc(FLASH_SIZE); memset(dev->flash_contents, 0xff, FLASH_SIZE); if (filename) { - /* Load flash image skipping the first block */ + /* Load flash image skipping the first block */ ret = load_image_size(filename, dev->flash_contents + 528 * 32, FLASH_SIZE - 528 * 32); - if (ret < 0) { + if (ret < 0) { if (!qtest_enabled()) { error_report("Could not load flash image %s", filename); exit(1); } - } else { - /* Build first block with number of blocks */ + } else { + /* Build first block with number of blocks */ blocks = DIV_ROUND_UP(ret, 528 * 32); - dev->flash_contents[0] = blocks & 0xff; - dev->flash_contents[1] = (blocks >> 8) & 0xff; - dev->flash_contents[2] = (blocks >> 16) & 0xff; - dev->flash_contents[3] = (blocks >> 24) & 0xff; - fprintf(stderr, "loaded %d bytes for %s into flash\n", ret, - filename); - } + dev->flash_contents[0] = blocks & 0xff; + dev->flash_contents[1] = (blocks >> 8) & 0xff; + dev->flash_contents[2] = (blocks >> 16) & 0xff; + dev->flash_contents[3] = (blocks >> 24) & 0xff; + fprintf(stderr, "loaded %d bytes for %s into flash\n", ret, + filename); + } } } @@ -87,26 +87,26 @@ static void handle_command(tc58128_dev * dev, uint8_t command) { switch (command) { case 0xff: - fprintf(stderr, "reset flash device\n"); - dev->state = WAIT; - break; + fprintf(stderr, "reset flash device\n"); + dev->state = WAIT; + break; case 0x00: - fprintf(stderr, "read mode 1\n"); - dev->state = READ1; - dev->address_cycle = 0; - break; + fprintf(stderr, "read mode 1\n"); + dev->state = READ1; + dev->address_cycle = 0; + break; case 0x01: - fprintf(stderr, "read mode 2\n"); - dev->state = READ2; - dev->address_cycle = 0; - break; + fprintf(stderr, "read mode 2\n"); + dev->state = READ2; + dev->address_cycle = 0; + break; case 0x50: - fprintf(stderr, "read mode 3\n"); - dev->state = READ3; - dev->address_cycle = 0; - break; + fprintf(stderr, "read mode 3\n"); + dev->state = READ3; + dev->address_cycle = 0; + break; default: - fprintf(stderr, "unknown flash command 0x%02x\n", command); + fprintf(stderr, "unknown flash command 0x%02x\n", command); abort(); } } @@ -117,28 +117,28 @@ static void handle_address(tc58128_dev * dev, uint8_t data) case READ1: case READ2: case READ3: - switch (dev->address_cycle) { - case 0: - dev->address = data; - if (dev->state == READ2) - dev->address |= 0x100; - else if (dev->state == READ3) - dev->address |= 0x200; - break; - case 1: - dev->address += data * 528 * 0x100; - break; - case 2: - dev->address += data * 528; - fprintf(stderr, "address pointer in flash: 0x%08x\n", - dev->address); - break; - default: - /* Invalid data */ + switch (dev->address_cycle) { + case 0: + dev->address = data; + if (dev->state == READ2) + dev->address |= 0x100; + else if (dev->state == READ3) + dev->address |= 0x200; + break; + case 1: + dev->address += data * 528 * 0x100; + break; + case 2: + dev->address += data * 528; + fprintf(stderr, "address pointer in flash: 0x%08x\n", + dev->address); + break; + default: + /* Invalid data */ abort(); - } - dev->address_cycle++; - break; + } + dev->address_cycle++; + break; default: abort(); } @@ -148,7 +148,7 @@ static uint8_t handle_read(tc58128_dev * dev) { #if 0 if (dev->address % 0x100000 == 0) - fprintf(stderr, "reading flash at address 0x%08x\n", dev->address); + fprintf(stderr, "reading flash at address 0x%08x\n", dev->address); #endif return dev->flash_contents[dev->address++]; } @@ -163,31 +163,31 @@ static int tc58128_cb(uint16_t porta, uint16_t portb, int dev; if ((porta & CE1) == 0) - dev = 0; + dev = 0; else if ((porta & CE2) == 0) - dev = 1; + dev = 1; else - return 0; /* No device selected */ + return 0; /* No device selected */ if ((porta & RE) && (porta & WE)) { - /* Nothing to do, assert ready and return to input state */ - *periph_portadir &= 0xff00; - *periph_portadir |= RDY(dev); - *periph_pdtra |= RDY(dev); - return 1; + /* Nothing to do, assert ready and return to input state */ + *periph_portadir &= 0xff00; + *periph_portadir |= RDY(dev); + *periph_pdtra |= RDY(dev); + return 1; } if (porta & CLE) { - /* Command */ - assert((porta & WE) == 0); - handle_command(&tc58128_devs[dev], porta & 0x00ff); + /* Command */ + assert((porta & WE) == 0); + handle_command(&tc58128_devs[dev], porta & 0x00ff); } else if (porta & ALE) { - assert((porta & WE) == 0); - handle_address(&tc58128_devs[dev], porta & 0x00ff); + assert((porta & WE) == 0); + handle_address(&tc58128_devs[dev], porta & 0x00ff); } else if ((porta & RE) == 0) { - *periph_portadir |= 0x00ff; - *periph_pdtra &= 0xff00; - *periph_pdtra |= handle_read(&tc58128_devs[dev]); + *periph_portadir |= 0x00ff; + *periph_pdtra &= 0xff00; + *periph_pdtra |= handle_read(&tc58128_devs[dev]); } else { abort(); } @@ -195,9 +195,9 @@ static int tc58128_cb(uint16_t porta, uint16_t portb, } static sh7750_io_device tc58128 = { - RE | WE, /* Port A triggers */ - 0, /* Port B triggers */ - tc58128_cb /* Callback */ + RE | WE, /* Port A triggers */ + 0, /* Port B triggers */ + tc58128_cb /* Callback */ }; int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2) diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index c3d6046784..44fe4eddbb 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -94,6 +94,13 @@ void pmbus_send64(PMBusDevice *pmdev, uint64_t data) void pmbus_send_string(PMBusDevice *pmdev, const char *data) { + if (!data) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: %s: uninitialised read from 0x%02x\n", + __func__, DEVICE(pmdev)->canonical_path, pmdev->code); + return; + } + size_t len = strlen(data); g_assert(len > 0); g_assert(len + pmdev->out_buf_len < SMBUS_DATA_MAX_LEN); diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index faade7def8..a62896759c 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -3850,7 +3850,7 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) .domain_id = vtd_get_domain_id(s, &ce, vtd_as->pasid), }; - vtd_page_walk(s, &ce, n->start, n->end, &info, vtd_as->pasid); + vtd_page_walk(s, &ce, 0, ~0ULL, &info, vtd_as->pasid); } } else { trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c index 26a90ed45f..a68357c1c5 100644 --- a/hw/ide/cmd646.c +++ b/hw/ide/cmd646.c @@ -36,20 +36,20 @@ #include "trace.h" /* CMD646 specific */ -#define CFR 0x50 -#define CFR_INTR_CH0 0x04 -#define CNTRL 0x51 -#define CNTRL_EN_CH0 0x04 -#define CNTRL_EN_CH1 0x08 -#define ARTTIM23 0x57 -#define ARTTIM23_INTR_CH1 0x10 -#define MRDMODE 0x71 -#define MRDMODE_INTR_CH0 0x04 -#define MRDMODE_INTR_CH1 0x08 -#define MRDMODE_BLK_CH0 0x10 -#define MRDMODE_BLK_CH1 0x20 -#define UDIDETCR0 0x73 -#define UDIDETCR1 0x7B +#define CFR 0x50 +#define CFR_INTR_CH0 0x04 +#define CNTRL 0x51 +#define CNTRL_EN_CH0 0x04 +#define CNTRL_EN_CH1 0x08 +#define ARTTIM23 0x57 +#define ARTTIM23_INTR_CH1 0x10 +#define MRDMODE 0x71 +#define MRDMODE_INTR_CH0 0x04 +#define MRDMODE_INTR_CH1 0x08 +#define MRDMODE_BLK_CH0 0x10 +#define MRDMODE_BLK_CH1 0x20 +#define UDIDETCR0 0x73 +#define UDIDETCR1 0x7B static void cmd646_update_irq(PCIDevice *pd); diff --git a/hw/ide/core.c b/hw/ide/core.c index 2d034731cf..45d14a25e9 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -318,52 +318,52 @@ static void ide_cfata_identify(IDEState *s) cur_sec = s->cylinders * s->heads * s->sectors; - put_le16(p + 0, 0x848a); /* CF Storage Card signature */ - put_le16(p + 1, s->cylinders); /* Default cylinders */ - put_le16(p + 3, s->heads); /* Default heads */ - put_le16(p + 6, s->sectors); /* Default sectors per track */ + put_le16(p + 0, 0x848a); /* CF Storage Card signature */ + put_le16(p + 1, s->cylinders); /* Default cylinders */ + put_le16(p + 3, s->heads); /* Default heads */ + put_le16(p + 6, s->sectors); /* Default sectors per track */ /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */ /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */ padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ - put_le16(p + 22, 0x0004); /* ECC bytes */ - padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */ + put_le16(p + 22, 0x0004); /* ECC bytes */ + padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */ padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */ #if MAX_MULT_SECTORS > 1 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); #else put_le16(p + 47, 0x0000); #endif - put_le16(p + 49, 0x0f00); /* Capabilities */ - put_le16(p + 51, 0x0002); /* PIO cycle timing mode */ - put_le16(p + 52, 0x0001); /* DMA cycle timing mode */ - put_le16(p + 53, 0x0003); /* Translation params valid */ - put_le16(p + 54, s->cylinders); /* Current cylinders */ - put_le16(p + 55, s->heads); /* Current heads */ - put_le16(p + 56, s->sectors); /* Current sectors */ - put_le16(p + 57, cur_sec); /* Current capacity */ - put_le16(p + 58, cur_sec >> 16); /* Current capacity */ - if (s->mult_sectors) /* Multiple sector setting */ + put_le16(p + 49, 0x0f00); /* Capabilities */ + put_le16(p + 51, 0x0002); /* PIO cycle timing mode */ + put_le16(p + 52, 0x0001); /* DMA cycle timing mode */ + put_le16(p + 53, 0x0003); /* Translation params valid */ + put_le16(p + 54, s->cylinders); /* Current cylinders */ + put_le16(p + 55, s->heads); /* Current heads */ + put_le16(p + 56, s->sectors); /* Current sectors */ + put_le16(p + 57, cur_sec); /* Current capacity */ + put_le16(p + 58, cur_sec >> 16); /* Current capacity */ + if (s->mult_sectors) /* Multiple sector setting */ put_le16(p + 59, 0x100 | s->mult_sectors); /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */ /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */ - put_le16(p + 63, 0x0203); /* Multiword DMA capability */ - put_le16(p + 64, 0x0001); /* Flow Control PIO support */ - put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */ - put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */ - put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */ - put_le16(p + 82, 0x400c); /* Command Set supported */ - put_le16(p + 83, 0x7068); /* Command Set supported */ - put_le16(p + 84, 0x4000); /* Features supported */ - put_le16(p + 85, 0x000c); /* Command Set enabled */ - put_le16(p + 86, 0x7044); /* Command Set enabled */ - put_le16(p + 87, 0x4000); /* Features enabled */ - put_le16(p + 91, 0x4060); /* Current APM level */ - put_le16(p + 129, 0x0002); /* Current features option */ - put_le16(p + 130, 0x0005); /* Reassigned sectors */ - put_le16(p + 131, 0x0001); /* Initial power mode */ - put_le16(p + 132, 0x0000); /* User signature */ - put_le16(p + 160, 0x8100); /* Power requirement */ - put_le16(p + 161, 0x8001); /* CF command set */ + put_le16(p + 63, 0x0203); /* Multiword DMA capability */ + put_le16(p + 64, 0x0001); /* Flow Control PIO support */ + put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */ + put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */ + put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */ + put_le16(p + 82, 0x400c); /* Command Set supported */ + put_le16(p + 83, 0x7068); /* Command Set supported */ + put_le16(p + 84, 0x4000); /* Features supported */ + put_le16(p + 85, 0x000c); /* Command Set enabled */ + put_le16(p + 86, 0x7044); /* Command Set enabled */ + put_le16(p + 87, 0x4000); /* Features enabled */ + put_le16(p + 91, 0x4060); /* Current APM level */ + put_le16(p + 129, 0x0002); /* Current features option */ + put_le16(p + 130, 0x0005); /* Reassigned sectors */ + put_le16(p + 131, 0x0001); /* Initial power mode */ + put_le16(p + 132, 0x0000); /* User signature */ + put_le16(p + 160, 0x8100); /* Power requirement */ + put_le16(p + 161, 0x8001); /* CF command set */ ide_cfata_identify_size(s); s->identify_set = 1; @@ -1131,13 +1131,13 @@ static void ide_cfata_metadata_inquiry(IDEState *s) memset(p, 0, 0x200); spd = ((s->mdata_size - 1) >> 9) + 1; - put_le16(p + 0, 0x0001); /* Data format revision */ - put_le16(p + 1, 0x0000); /* Media property: silicon */ - put_le16(p + 2, s->media_changed); /* Media status */ - put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */ - put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */ - put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */ - put_le16(p + 6, spd >> 16); /* Sectors per device (high) */ + put_le16(p + 0, 0x0001); /* Data format revision */ + put_le16(p + 1, 0x0000); /* Media property: silicon */ + put_le16(p + 2, s->media_changed); /* Media status */ + put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */ + put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */ + put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */ + put_le16(p + 6, spd >> 16); /* Sectors per device (high) */ } static void ide_cfata_metadata_read(IDEState *s) @@ -1153,7 +1153,7 @@ static void ide_cfata_metadata_read(IDEState *s) p = (uint16_t *) s->io_buffer; memset(p, 0, 0x200); - put_le16(p + 0, s->media_changed); /* Media status */ + put_le16(p + 0, s->media_changed); /* Media status */ memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), s->nsector << 9), 0x200 - 2)); diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c index f1017f7333..981cfbd97f 100644 --- a/hw/ide/microdrive.c +++ b/hw/ide/microdrive.c @@ -40,7 +40,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(MicroDriveState, MICRODRIVE) /***********************************************************/ /* CF-ATA Microdrive */ -#define METADATA_SIZE 0x20 +#define METADATA_SIZE 0x20 /* DSCM-1XXXX Microdrive hard disk with CF+ II / PCMCIA interface. */ @@ -65,29 +65,29 @@ struct MicroDriveState { /* Register bitfields */ enum md_opt { - OPT_MODE_MMAP = 0, - OPT_MODE_IOMAP16 = 1, - OPT_MODE_IOMAP1 = 2, - OPT_MODE_IOMAP2 = 3, - OPT_MODE = 0x3f, - OPT_LEVIREQ = 0x40, - OPT_SRESET = 0x80, + OPT_MODE_MMAP = 0, + OPT_MODE_IOMAP16 = 1, + OPT_MODE_IOMAP1 = 2, + OPT_MODE_IOMAP2 = 3, + OPT_MODE = 0x3f, + OPT_LEVIREQ = 0x40, + OPT_SRESET = 0x80, }; enum md_cstat { - STAT_INT = 0x02, - STAT_PWRDWN = 0x04, - STAT_XE = 0x10, - STAT_IOIS8 = 0x20, - STAT_SIGCHG = 0x40, - STAT_CHANGED = 0x80, + STAT_INT = 0x02, + STAT_PWRDWN = 0x04, + STAT_XE = 0x10, + STAT_IOIS8 = 0x20, + STAT_SIGCHG = 0x40, + STAT_CHANGED = 0x80, }; enum md_pins { - PINS_MRDY = 0x02, - PINS_CRDY = 0x20, + PINS_MRDY = 0x02, + PINS_CRDY = 0x20, }; enum md_ctrl { - CTRL_IEN = 0x02, - CTRL_SRST = 0x04, + CTRL_IEN = 0x02, + CTRL_SRST = 0x04, }; static inline void md_interrupt_update(MicroDriveState *s) @@ -99,7 +99,7 @@ static inline void md_interrupt_update(MicroDriveState *s) } qemu_set_irq(card->slot->irq, - !(s->stat & STAT_INT) && /* Inverted */ + !(s->stat & STAT_INT) && /* Inverted */ !(s->ctrl & (CTRL_IEN | CTRL_SRST)) && !(s->opt & OPT_SRESET)); } @@ -145,17 +145,17 @@ static uint8_t md_attr_read(PCMCIACardState *card, uint32_t at) at -= s->attr_base; switch (at) { - case 0x00: /* Configuration Option Register */ + case 0x00: /* Configuration Option Register */ return s->opt; - case 0x02: /* Card Configuration Status Register */ + case 0x02: /* Card Configuration Status Register */ if (s->ctrl & CTRL_IEN) { return s->stat & ~STAT_INT; } else { return s->stat; } - case 0x04: /* Pin Replacement Register */ + case 0x04: /* Pin Replacement Register */ return (s->pins & PINS_CRDY) | 0x0c; - case 0x06: /* Socket and Copy Register */ + case 0x06: /* Socket and Copy Register */ return 0x00; #ifdef VERBOSE default: @@ -173,14 +173,14 @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) at -= s->attr_base; switch (at) { - case 0x00: /* Configuration Option Register */ + case 0x00: /* Configuration Option Register */ s->opt = value & 0xcf; if (value & OPT_SRESET) { device_cold_reset(DEVICE(s)); } md_interrupt_update(s); break; - case 0x02: /* Card Configuration Status Register */ + case 0x02: /* Card Configuration Status Register */ if ((s->stat ^ value) & STAT_PWRDWN) { s->pins |= PINS_CRDY; } @@ -189,11 +189,11 @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) md_interrupt_update(s); /* Word 170 in Identify Device must be equal to STAT_XE */ break; - case 0x04: /* Pin Replacement Register */ + case 0x04: /* Pin Replacement Register */ s->pins &= PINS_CRDY; s->pins |= value & PINS_MRDY; break; - case 0x06: /* Socket and Copy Register */ + case 0x06: /* Socket and Copy Register */ break; default: printf("%s: Bad attribute space register %02x\n", __func__, at); @@ -232,7 +232,7 @@ static uint16_t md_common_read(PCMCIACardState *card, uint32_t at) } switch (at) { - case 0x0: /* Even RD Data */ + case 0x0: /* Even RD Data */ case 0x8: return ide_data_readw(&s->bus, 0); @@ -245,18 +245,18 @@ static uint16_t md_common_read(PCMCIACardState *card, uint32_t at) } s->cycle = !s->cycle; return ret; - case 0x9: /* Odd RD Data */ + case 0x9: /* Odd RD Data */ return s->io >> 8; - case 0xd: /* Error */ + case 0xd: /* Error */ return ide_ioport_read(&s->bus, 0x1); - case 0xe: /* Alternate Status */ + case 0xe: /* Alternate Status */ ifs = ide_bus_active_if(&s->bus); if (ifs->blk) { return ifs->status; } else { return 0; } - case 0xf: /* Device Address */ + case 0xf: /* Device Address */ ifs = ide_bus_active_if(&s->bus); return 0xc2 | ((~ifs->select << 2) & 0x3c); default: @@ -296,7 +296,7 @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) } switch (at) { - case 0x0: /* Even WR Data */ + case 0x0: /* Even WR Data */ case 0x8: ide_data_writew(&s->bus, 0, value); break; @@ -313,10 +313,10 @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) s->io = value & 0xff; s->cycle = !s->cycle; break; - case 0xd: /* Features */ + case 0xd: /* Features */ ide_ioport_write(&s->bus, 0x1, value); break; - case 0xe: /* Device Control */ + case 0xe: /* Device Control */ s->ctrl = value; if (value & CTRL_SRST) { device_cold_reset(DEVICE(s)); @@ -350,35 +350,35 @@ static const VMStateDescription vmstate_microdrive = { }; static const uint8_t dscm1xxxx_cis[0x14a] = { - [0x000] = CISTPL_DEVICE, /* 5V Device Information */ - [0x002] = 0x03, /* Tuple length = 4 bytes */ - [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ - [0x006] = 0x01, /* Size = 2K bytes */ + [0x000] = CISTPL_DEVICE, /* 5V Device Information */ + [0x002] = 0x03, /* Tuple length = 4 bytes */ + [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x006] = 0x01, /* Size = 2K bytes */ [0x008] = CISTPL_ENDMARK, - [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */ - [0x00c] = 0x04, /* Tuple length = 4 byest */ - [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */ - [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ - [0x012] = 0x01, /* Size = 2K bytes */ + [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */ + [0x00c] = 0x04, /* Tuple length = 4 byest */ + [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */ + [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x012] = 0x01, /* Size = 2K bytes */ [0x014] = CISTPL_ENDMARK, - [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */ - [0x018] = 0x02, /* Tuple length = 2 bytes */ - [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */ + [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */ + [0x018] = 0x02, /* Tuple length = 2 bytes */ + [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */ [0x01c] = 0x01, - [0x01e] = CISTPL_MANFID, /* Manufacture ID */ - [0x020] = 0x04, /* Tuple length = 4 bytes */ - [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */ + [0x01e] = CISTPL_MANFID, /* Manufacture ID */ + [0x020] = 0x04, /* Tuple length = 4 bytes */ + [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */ [0x024] = 0x00, - [0x026] = 0x00, /* PLMID_CARD = 0000 */ + [0x026] = 0x00, /* PLMID_CARD = 0000 */ [0x028] = 0x00, - [0x02a] = CISTPL_VERS_1, /* Level 1 Version */ - [0x02c] = 0x12, /* Tuple length = 23 bytes */ - [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */ - [0x030] = 0x01, /* Minor Version = 1 */ + [0x02a] = CISTPL_VERS_1, /* Level 1 Version */ + [0x02c] = 0x12, /* Tuple length = 23 bytes */ + [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */ + [0x030] = 0x01, /* Minor Version = 1 */ [0x032] = 'I', [0x034] = 'B', [0x036] = 'M', @@ -396,142 +396,142 @@ static const uint8_t dscm1xxxx_cis[0x14a] = { [0x04e] = 0x00, [0x050] = CISTPL_ENDMARK, - [0x052] = CISTPL_FUNCID, /* Function ID */ - [0x054] = 0x02, /* Tuple length = 2 bytes */ - [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */ - [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */ - - [0x05a] = CISTPL_FUNCE, /* Function Extension */ - [0x05c] = 0x02, /* Tuple length = 2 bytes */ - [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */ - [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */ - - [0x062] = CISTPL_FUNCE, /* Function Extension */ - [0x064] = 0x03, /* Tuple length = 3 bytes */ - [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */ - [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */ - [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */ - - [0x06c] = CISTPL_CONFIG, /* Configuration */ - [0x06e] = 0x05, /* Tuple length = 5 bytes */ - [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */ - [0x072] = 0x07, /* TPCC_LAST = 7 */ - [0x074] = 0x00, /* TPCC_RADR = 0200 */ + [0x052] = CISTPL_FUNCID, /* Function ID */ + [0x054] = 0x02, /* Tuple length = 2 bytes */ + [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */ + [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */ + + [0x05a] = CISTPL_FUNCE, /* Function Extension */ + [0x05c] = 0x02, /* Tuple length = 2 bytes */ + [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */ + [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */ + + [0x062] = CISTPL_FUNCE, /* Function Extension */ + [0x064] = 0x03, /* Tuple length = 3 bytes */ + [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */ + [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */ + [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */ + + [0x06c] = CISTPL_CONFIG, /* Configuration */ + [0x06e] = 0x05, /* Tuple length = 5 bytes */ + [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */ + [0x072] = 0x07, /* TPCC_LAST = 7 */ + [0x074] = 0x00, /* TPCC_RADR = 0200 */ [0x076] = 0x02, - [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */ - - [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x07c] = 0x0b, /* Tuple length = 11 bytes */ - [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */ - [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */ - [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */ - [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x086] = 0x55, /* NomV: 5.0 V */ - [0x088] = 0x4d, /* MinV: 4.5 V */ - [0x08a] = 0x5d, /* MaxV: 5.5 V */ - [0x08c] = 0x4e, /* Peakl: 450 mA */ - [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */ - [0x090] = 0x00, /* Window descriptor: Window length = 0 */ - [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */ - - [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x096] = 0x06, /* Tuple length = 6 bytes */ - [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */ - [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x09e] = 0xb5, /* NomV: 3.3 V */ + [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */ + + [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x07c] = 0x0b, /* Tuple length = 11 bytes */ + [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */ + [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */ + [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */ + [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x086] = 0x55, /* NomV: 5.0 V */ + [0x088] = 0x4d, /* MinV: 4.5 V */ + [0x08a] = 0x5d, /* MaxV: 5.5 V */ + [0x08c] = 0x4e, /* Peakl: 450 mA */ + [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */ + [0x090] = 0x00, /* Window descriptor: Window length = 0 */ + [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */ + + [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x096] = 0x06, /* Tuple length = 6 bytes */ + [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */ + [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x09e] = 0xb5, /* NomV: 3.3 V */ [0x0a0] = 0x1e, - [0x0a2] = 0x3e, /* Peakl: 350 mA */ - - [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0a6] = 0x0d, /* Tuple length = 13 bytes */ - [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */ - [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x0b0] = 0x55, /* NomV: 5.0 V */ - [0x0b2] = 0x4d, /* MinV: 4.5 V */ - [0x0b4] = 0x5d, /* MaxV: 5.5 V */ - [0x0b6] = 0x4e, /* Peakl: 450 mA */ - [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */ - [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */ - [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */ - [0x0be] = 0xff, /* IRQ8..IRQ15 supported */ - [0x0c0] = 0x20, /* TPCE_MI = support power down mode */ - - [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0c4] = 0x06, /* Tuple length = 6 bytes */ - [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */ - [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x0cc] = 0xb5, /* NomV: 3.3 V */ + [0x0a2] = 0x3e, /* Peakl: 350 mA */ + + [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0a6] = 0x0d, /* Tuple length = 13 bytes */ + [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */ + [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0b0] = 0x55, /* NomV: 5.0 V */ + [0x0b2] = 0x4d, /* MinV: 4.5 V */ + [0x0b4] = 0x5d, /* MaxV: 5.5 V */ + [0x0b6] = 0x4e, /* Peakl: 450 mA */ + [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */ + [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */ + [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */ + [0x0be] = 0xff, /* IRQ8..IRQ15 supported */ + [0x0c0] = 0x20, /* TPCE_MI = support power down mode */ + + [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0c4] = 0x06, /* Tuple length = 6 bytes */ + [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */ + [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x0cc] = 0xb5, /* NomV: 3.3 V */ [0x0ce] = 0x1e, - [0x0d0] = 0x3e, /* Peakl: 350 mA */ - - [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0d4] = 0x12, /* Tuple length = 18 bytes */ - [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */ - [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x0de] = 0x55, /* NomV: 5.0 V */ - [0x0e0] = 0x4d, /* MinV: 4.5 V */ - [0x0e2] = 0x5d, /* MaxV: 5.5 V */ - [0x0e4] = 0x4e, /* Peakl: 450 mA */ - [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ - [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */ - [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */ + [0x0d0] = 0x3e, /* Peakl: 350 mA */ + + [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0d4] = 0x12, /* Tuple length = 18 bytes */ + [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */ + [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0de] = 0x55, /* NomV: 5.0 V */ + [0x0e0] = 0x4d, /* MinV: 4.5 V */ + [0x0e2] = 0x5d, /* MaxV: 5.5 V */ + [0x0e4] = 0x4e, /* Peakl: 450 mA */ + [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */ + [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */ [0x0ec] = 0x01, - [0x0ee] = 0x07, /* Address block length = 8 */ - [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */ + [0x0ee] = 0x07, /* Address block length = 8 */ + [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */ [0x0f2] = 0x03, - [0x0f4] = 0x01, /* Address block length = 2 */ - [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ - [0x0f8] = 0x20, /* TPCE_MI = support power down mode */ - - [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0fc] = 0x06, /* Tuple length = 6 bytes */ - [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */ - [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x104] = 0xb5, /* NomV: 3.3 V */ + [0x0f4] = 0x01, /* Address block length = 2 */ + [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x0f8] = 0x20, /* TPCE_MI = support power down mode */ + + [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0fc] = 0x06, /* Tuple length = 6 bytes */ + [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */ + [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x104] = 0xb5, /* NomV: 3.3 V */ [0x106] = 0x1e, - [0x108] = 0x3e, /* Peakl: 350 mA */ - - [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x10c] = 0x12, /* Tuple length = 18 bytes */ - [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */ - [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x116] = 0x55, /* NomV: 5.0 V */ - [0x118] = 0x4d, /* MinV: 4.5 V */ - [0x11a] = 0x5d, /* MaxV: 5.5 V */ - [0x11c] = 0x4e, /* Peakl: 450 mA */ - [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ - [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */ - [0x122] = 0x70, /* Field 1 address = 0x0170 */ + [0x108] = 0x3e, /* Peakl: 350 mA */ + + [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x10c] = 0x12, /* Tuple length = 18 bytes */ + [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */ + [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x116] = 0x55, /* NomV: 5.0 V */ + [0x118] = 0x4d, /* MinV: 4.5 V */ + [0x11a] = 0x5d, /* MaxV: 5.5 V */ + [0x11c] = 0x4e, /* Peakl: 450 mA */ + [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */ + [0x122] = 0x70, /* Field 1 address = 0x0170 */ [0x124] = 0x01, - [0x126] = 0x07, /* Address block length = 8 */ - [0x128] = 0x76, /* Field 2 address = 0x0376 */ + [0x126] = 0x07, /* Address block length = 8 */ + [0x128] = 0x76, /* Field 2 address = 0x0376 */ [0x12a] = 0x03, - [0x12c] = 0x01, /* Address block length = 2 */ - [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ - [0x130] = 0x20, /* TPCE_MI = support power down mode */ - - [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x134] = 0x06, /* Tuple length = 6 bytes */ - [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */ - [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x13c] = 0xb5, /* NomV: 3.3 V */ + [0x12c] = 0x01, /* Address block length = 2 */ + [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x130] = 0x20, /* TPCE_MI = support power down mode */ + + [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x134] = 0x06, /* Tuple length = 6 bytes */ + [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */ + [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x13c] = 0xb5, /* NomV: 3.3 V */ [0x13e] = 0x1e, - [0x140] = 0x3e, /* Peakl: 350 mA */ + [0x140] = 0x3e, /* Peakl: 350 mA */ - [0x142] = CISTPL_NO_LINK, /* No Link */ - [0x144] = 0x00, /* Tuple length = 0 bytes */ + [0x142] = CISTPL_NO_LINK, /* No Link */ + [0x144] = 0x00, /* Tuple length = 0 bytes */ - [0x146] = CISTPL_END, /* Tuple End */ + [0x146] = CISTPL_END, /* Tuple End */ }; #define TYPE_DSCM1XXXX "dscm1xxxx" diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index b702c3f51e..f4bf14c1c8 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -399,7 +399,7 @@ static struct _loaderparams { static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr) { - return addr & 0x1fffffffll; + return addr & MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS); } static int64_t load_kernel_info(void) diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index 4d9679ca0b..c0c09b6965 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -765,7 +765,7 @@ e1000e_process_tx_desc(E1000ECore *core, } tx->skip_cp = false; - net_tx_pkt_reset(tx->tx_pkt); + net_tx_pkt_reset(tx->tx_pkt, core->owner); tx->sum_needed = 0; tx->cptse = 0; @@ -3447,7 +3447,7 @@ e1000e_core_pci_uninit(E1000ECore *core) qemu_del_vm_change_state_handler(core->vmstate); for (i = 0; i < E1000E_NUM_QUEUES; i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt); + net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -3572,7 +3572,7 @@ static void e1000e_reset(E1000ECore *core, bool sw) e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); for (i = 0; i < ARRAY_SIZE(core->tx); i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt); + net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); core->tx[i].skip_cp = false; } diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h index c0832fa23d..6d3c4c6d3a 100644 --- a/hw/net/e1000x_regs.h +++ b/hw/net/e1000x_regs.h @@ -335,6 +335,7 @@ #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ #define E1000_ICR_RXO 0x00000040 /* rx overrun */ #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_RXDW 0x00000080 /* rx desc written back */ #define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ #define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ #define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ @@ -378,6 +379,7 @@ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ #define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_RXDW E1000_ICR_RXDW /* rx desc written back */ #define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ #define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ #define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ @@ -407,6 +409,7 @@ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_RXDW E1000_ICR_RXDW /* rx desc written back */ #define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ #define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ #define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ @@ -441,6 +444,7 @@ #define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ #define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_RXDW E1000_ICR_RXDW /* rx desc written back */ #define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ #define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ #define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ diff --git a/hw/net/igb.c b/hw/net/igb.c index c6d753df87..51a7e9133e 100644 --- a/hw/net/igb.c +++ b/hw/net/igb.c @@ -502,16 +502,28 @@ static int igb_post_load(void *opaque, int version_id) return igb_core_post_load(&s->core); } -static const VMStateDescription igb_vmstate_tx = { - .name = "igb-tx", +static const VMStateDescription igb_vmstate_tx_ctx = { + .name = "igb-tx-ctx", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_UINT16(vlan, struct igb_tx), - VMSTATE_UINT16(mss, struct igb_tx), - VMSTATE_BOOL(tse, struct igb_tx), - VMSTATE_BOOL(ixsm, struct igb_tx), - VMSTATE_BOOL(txsm, struct igb_tx), + VMSTATE_UINT32(vlan_macip_lens, struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(seqnum_seed, struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(type_tucmd_mlhl, struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(mss_l4len_idx, struct e1000_adv_tx_context_desc), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription igb_vmstate_tx = { + .name = "igb-tx", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(ctx, struct igb_tx, 2, 0, igb_vmstate_tx_ctx, + struct e1000_adv_tx_context_desc), + VMSTATE_UINT32(first_cmd_type_len, struct igb_tx), + VMSTATE_UINT32(first_olinfo_status, struct igb_tx), VMSTATE_BOOL(first, struct igb_tx), VMSTATE_BOOL(skip_cp, struct igb_tx), VMSTATE_END_OF_LIST() diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index a7c7bfdc75..d733fed6cf 100644 --- a/hw/net/igb_core.c +++ b/hw/net/igb_core.c @@ -386,11 +386,35 @@ igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx, info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); } +static void +igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, + uint16_t vlan, bool insert_vlan) +{ + if (core->mac[MRQC] & 1) { + uint16_t pool = qn % IGB_NUM_VM_POOLS; + + if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) { + /* always insert default VLAN */ + insert_vlan = true; + vlan = core->mac[VMVIR0 + pool] & 0xffff; + } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) { + insert_vlan = false; + } + } + + if (insert_vlan && e1000x_vlan_enabled(core->mac)) { + net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan, + core->mac[VET] & 0xffff); + } +} + static bool igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) { - if (tx->tse) { - if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->mss)) { + if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) { + uint32_t idx = (tx->first_olinfo_status >> 4) & 1; + uint32_t mss = tx->ctx[idx].mss_l4len_idx >> 16; + if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) { return false; } @@ -399,13 +423,13 @@ igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) return true; } - if (tx->txsm) { + if (tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) { if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) { return false; } } - if (tx->ixsm) { + if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) { net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); } @@ -490,7 +514,7 @@ igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index) } static void -igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt) +igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn) { static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, PTC1023, PTC1522 }; @@ -517,17 +541,25 @@ igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt) core->mac[GPTC] = core->mac[TPT]; core->mac[GOTCL] = core->mac[TOTL]; core->mac[GOTCH] = core->mac[TOTH]; + + if (core->mac[MRQC] & 1) { + uint16_t pool = qn % IGB_NUM_VM_POOLS; + + core->mac[PVFGOTC0 + (pool * 64)] += tot_len; + core->mac[PVFGPTC0 + (pool * 64)]++; + } } static void igb_process_tx_desc(IGBCore *core, + PCIDevice *dev, struct igb_tx *tx, union e1000_adv_tx_desc *tx_desc, int queue_index) { struct e1000_adv_tx_context_desc *tx_ctx_desc; uint32_t cmd_type_len; - uint32_t olinfo_status; + uint32_t idx; uint64_t buffer_addr; uint16_t length; @@ -538,20 +570,19 @@ igb_process_tx_desc(IGBCore *core, E1000_ADVTXD_DTYP_DATA) { /* advanced transmit data descriptor */ if (tx->first) { - olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status); - - tx->tse = !!(cmd_type_len & E1000_ADVTXD_DCMD_TSE); - tx->ixsm = !!(olinfo_status & E1000_ADVTXD_POTS_IXSM); - tx->txsm = !!(olinfo_status & E1000_ADVTXD_POTS_TXSM); - + tx->first_cmd_type_len = cmd_type_len; + tx->first_olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status); tx->first = false; } } else if ((cmd_type_len & E1000_ADVTXD_DTYP_CTXT) == E1000_ADVTXD_DTYP_CTXT) { /* advanced transmit context descriptor */ tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc; - tx->vlan = le32_to_cpu(tx_ctx_desc->vlan_macip_lens) >> 16; - tx->mss = le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 16; + idx = (le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 4) & 1; + tx->ctx[idx].vlan_macip_lens = le32_to_cpu(tx_ctx_desc->vlan_macip_lens); + tx->ctx[idx].seqnum_seed = le32_to_cpu(tx_ctx_desc->seqnum_seed); + tx->ctx[idx].type_tucmd_mlhl = le32_to_cpu(tx_ctx_desc->type_tucmd_mlhl); + tx->ctx[idx].mss_l4len_idx = le32_to_cpu(tx_ctx_desc->mss_l4len_idx); return; } else { /* unknown descriptor type */ @@ -574,18 +605,19 @@ igb_process_tx_desc(IGBCore *core, if (cmd_type_len & E1000_TXD_CMD_EOP) { if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { - if (cmd_type_len & E1000_TXD_CMD_VLE) { - net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, tx->vlan, - core->mac[VET] & 0xffff); - } + idx = (tx->first_olinfo_status >> 4) & 1; + igb_tx_insert_vlan(core, queue_index, tx, + tx->ctx[idx].vlan_macip_lens >> 16, + !!(cmd_type_len & E1000_TXD_CMD_VLE)); + if (igb_tx_pkt_send(core, tx, queue_index)) { - igb_on_tx_done_update_stats(core, tx->tx_pkt); + igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index); } } tx->first = true; tx->skip_cp = false; - net_tx_pkt_reset(tx->tx_pkt); + net_tx_pkt_reset(tx->tx_pkt, dev); } } @@ -780,6 +812,18 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t base, return igb_tx_wb_eic(core, txi->idx); } +static inline bool +igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi) +{ + bool vmdq = core->mac[MRQC] & 1; + uint16_t qn = txi->idx; + uint16_t pool = qn % IGB_NUM_VM_POOLS; + + return (core->mac[TCTL] & E1000_TCTL_EN) && + (!vmdq || core->mac[VFTE] & BIT(pool)) && + (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE); +} + static void igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) { @@ -789,8 +833,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) const E1000E_RingInfo *txi = txr->i; uint32_t eic = 0; - /* TODO: check if the queue itself is enabled too. */ - if (!(core->mac[TCTL] & E1000_TCTL_EN)) { + if (!igb_tx_enabled(core, txi)) { trace_e1000e_tx_disabled(); return; } @@ -800,6 +843,8 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) d = core->owner; } + net_tx_pkt_reset(txr->tx->tx_pkt, d); + while (!igb_ring_empty(core, txi)) { base = igb_ring_head_descr(core, txi); @@ -808,7 +853,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr, desc.read.cmd_type_len, desc.wb.status); - igb_process_tx_desc(core, txr->tx, &desc, txi->idx); + igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx); igb_ring_advance(core, txi, 1); eic |= igb_txdesc_writeback(core, base, &desc, txi); } @@ -866,6 +911,9 @@ igb_can_receive(IGBCore *core) for (i = 0; i < IGB_NUM_QUEUES; i++) { E1000E_RxRing rxr; + if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) { + continue; + } igb_rx_ring_init(core, &rxr, i); if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) { @@ -901,12 +949,26 @@ igb_rx_l4_cso_enabled(IGBCore *core) return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); } +static bool +igb_rx_is_oversized(IGBCore *core, uint16_t qn, size_t size) +{ + uint16_t pool = qn % IGB_NUM_VM_POOLS; + bool lpe = !!(core->mac[VMOLR0 + pool] & E1000_VMOLR_LPE); + int max_ethernet_lpe_size = + core->mac[VMOLR0 + pool] & E1000_VMOLR_RLPML_MASK; + int max_ethernet_vlan_size = 1522; + + return size > (lpe ? max_ethernet_lpe_size : max_ethernet_vlan_size); +} + static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, - E1000E_RSSInfo *rss_info, bool *external_tx) + size_t size, E1000E_RSSInfo *rss_info, + bool *external_tx) { static const int ta_shift[] = { 4, 3, 2, 0 }; uint32_t f, ra[2], *macp, rctl = core->mac[RCTL]; uint16_t queues = 0; + uint16_t oversized = 0; uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(ehdr)->h_tci) & VLAN_VID_MASK; bool accepted = false; int i; @@ -932,7 +994,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, if (core->mac[MRQC] & 1) { if (is_broadcast_ether_addr(ehdr->h_dest)) { - for (i = 0; i < 8; i++) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) { queues |= BIT(i); } @@ -966,7 +1028,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, f = ta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3]; f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff; if (macp[f >> 5] & (1 << (f & 0x1f))) { - for (i = 0; i < 8; i++) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) { queues |= BIT(i); } @@ -989,7 +1051,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, } } } else { - for (i = 0; i < 8; i++) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) { mask |= BIT(i); } @@ -1005,9 +1067,34 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT); } - igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL, rss_info); - if (rss_info->queue & 1) { - queues <<= 8; + queues &= core->mac[VFRE]; + if (queues) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { + if ((queues & BIT(i)) && igb_rx_is_oversized(core, i, size)) { + oversized |= BIT(i); + } + } + /* 8.19.37 increment ROC if packet is oversized for all queues */ + if (oversized == queues) { + trace_e1000x_rx_oversized(size); + e1000x_inc_reg_if_not_full(core->mac, ROC); + } + queues &= ~oversized; + } + + if (queues) { + igb_rss_parse_packet(core, core->rx_pkt, + external_tx != NULL, rss_info); + /* Sec 8.26.1: PQn = VFn + VQn*8 */ + if (rss_info->queue & 1) { + for (i = 0; i < IGB_NUM_VM_POOLS; i++) { + if ((queues & BIT(i)) && + (core->mac[VMOLR0 + i] & E1000_VMOLR_RSSE)) { + queues |= BIT(i + IGB_NUM_VM_POOLS); + queues &= ~BIT(i); + } + } + } } } else { switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { @@ -1350,7 +1437,8 @@ igb_write_to_rx_buffers(IGBCore *core, } static void -igb_update_rx_stats(IGBCore *core, size_t data_size, size_t data_fcs_size) +igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi, + size_t data_size, size_t data_fcs_size) { e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); @@ -1366,6 +1454,16 @@ igb_update_rx_stats(IGBCore *core, size_t data_size, size_t data_fcs_size) default: break; } + + if (core->mac[MRQC] & 1) { + uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS; + + core->mac[PVFGORC0 + (pool * 64)] += data_size + 4; + core->mac[PVFGPRC0 + (pool * 64)]++; + if (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) { + core->mac[PVFMPRC0 + (pool * 64)]++; + } + } } static inline bool @@ -1467,7 +1565,21 @@ igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, } while (desc_offset < total_size); - igb_update_rx_stats(core, size, total_size); + igb_update_rx_stats(core, rxi, size, total_size); +} + +static bool +igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi) +{ + if (core->mac[MRQC] & 1) { + uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS; + /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */ + return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ? + core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN : + core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN; + } + + return e1000x_vlan_enabled(core->mac); } static inline void @@ -1550,34 +1662,36 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, ehdr = PKT_GET_ETH_HDR(filter_buf); net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr)); + net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size); - net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, - e1000x_vlan_enabled(core->mac), - core->mac[VET] & 0xffff); - - queues = igb_receive_assign(core, ehdr, &rss_info, external_tx); + queues = igb_receive_assign(core, ehdr, size, &rss_info, external_tx); if (!queues) { trace_e1000e_rx_flt_dropped(); return orig_size; } - total_size = net_rx_pkt_get_total_len(core->rx_pkt) + - e1000x_fcs_len(core->mac); - for (i = 0; i < IGB_NUM_QUEUES; i++) { - if (!(queues & BIT(i))) { + if (!(queues & BIT(i)) || + !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) { continue; } igb_rx_ring_init(core, &rxr, i); + net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, + igb_rx_strip_vlan(core, rxr.i), + core->mac[VET] & 0xffff); + + total_size = net_rx_pkt_get_total_len(core->rx_pkt) + + e1000x_fcs_len(core->mac); + if (!igb_has_rxbufs(core, rxr.i, total_size)) { n |= E1000_ICS_RXO; trace_e1000e_rx_not_written_to_guest(rxr.i->idx); continue; } - n |= E1000_ICR_RXT0; + n |= E1000_ICR_RXDW; igb_rx_fix_l4_csum(core, core->rx_pkt); igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); @@ -1892,14 +2006,6 @@ static void igb_set_eims(IGBCore *core, int index, uint32_t val) igb_update_interrupt_state(core); } -static void igb_vf_reset(IGBCore *core, uint16_t vfn) -{ - /* TODO: Reset of the queue enable and the interrupt registers of the VF. */ - - core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI; - core->mac[V2PMAILBOX0 + vfn] = E1000_V2PMAILBOX_RSTD; -} - static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn) { uint32_t ent = core->mac[VTIVAR_MISC + vfn]; @@ -1977,6 +2083,24 @@ static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val) } } +static void igb_vf_reset(IGBCore *core, uint16_t vfn) +{ + uint16_t qn0 = vfn; + uint16_t qn1 = vfn + IGB_NUM_VM_POOLS; + + /* disable Rx and Tx for the VF*/ + core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE; + core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE; + core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE; + core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE; + core->mac[VFRE] &= ~BIT(vfn); + core->mac[VFTE] &= ~BIT(vfn); + /* indicate VF reset to PF */ + core->mac[VFLRE] |= BIT(vfn); + /* VFLRE and mailbox use the same interrupt cause */ + mailbox_interrupt_to_pf(core); +} + static void igb_w1c(IGBCore *core, int index, uint32_t val) { core->mac[index] &= ~val; @@ -2231,14 +2355,20 @@ igb_set_status(IGBCore *core, int index, uint32_t val) static void igb_set_ctrlext(IGBCore *core, int index, uint32_t val) { - trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), - !!(val & E1000_CTRL_EXT_SPD_BYPS)); - - /* TODO: PFRSTD */ + trace_igb_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), + !!(val & E1000_CTRL_EXT_SPD_BYPS), + !!(val & E1000_CTRL_EXT_PFRSTD)); /* Zero self-clearing bits */ val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST); core->mac[CTRL_EXT] = val; + + if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PFRSTD) { + for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) { + core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI; + core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTD; + } + } } static void @@ -3825,7 +3955,7 @@ igb_core_pci_realize(IGBCore *core, core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core); for (i = 0; i < IGB_NUM_QUEUES; i++) { - net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS); + net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS); } net_rx_pkt_init(&core->rx_pkt); @@ -3850,7 +3980,7 @@ igb_core_pci_uninit(IGBCore *core) qemu_del_vm_change_state_handler(core->vmstate); for (i = 0; i < IGB_NUM_QUEUES; i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt); + net_tx_pkt_reset(core->tx[i].tx_pkt, NULL); net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -3899,6 +4029,7 @@ igb_phy_reg_init[] = { static const uint32_t igb_mac_reg_init[] = { [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24), [EEMNGCTL] = BIT(31), + [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE, [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16), [RXDCTL1] = 1 << 16, [RXDCTL2] = 1 << 16, @@ -4021,14 +4152,15 @@ static void igb_reset(IGBCore *core, bool sw) e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); + for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) { + /* Set RSTI, so VF can identify a PF reset is in progress */ + core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTI; + } + for (i = 0; i < ARRAY_SIZE(core->tx); i++) { tx = &core->tx[i]; - net_tx_pkt_reset(tx->tx_pkt); - tx->vlan = 0; - tx->mss = 0; - tx->tse = false; - tx->ixsm = false; - tx->txsm = false; + net_tx_pkt_reset(tx->tx_pkt, NULL); + memset(tx->ctx, 0, sizeof(tx->ctx)); tx->first = true; tx->skip_cp = false; } diff --git a/hw/net/igb_core.h b/hw/net/igb_core.h index 814c1e264b..9cbbfd516b 100644 --- a/hw/net/igb_core.h +++ b/hw/net/igb_core.h @@ -47,6 +47,7 @@ #define IGB_MSIX_VEC_NUM (10) #define IGBVF_MSIX_VEC_NUM (3) #define IGB_NUM_QUEUES (16) +#define IGB_NUM_VM_POOLS (8) typedef struct IGBCore IGBCore; @@ -72,11 +73,9 @@ struct IGBCore { QEMUTimer *autoneg_timer; struct igb_tx { - uint16_t vlan; /* VLAN Tag */ - uint16_t mss; /* Maximum Segment Size */ - bool tse; /* TCP/UDP Segmentation Enable */ - bool ixsm; /* Insert IP Checksum */ - bool txsm; /* Insert TCP/UDP Checksum */ + struct e1000_adv_tx_context_desc ctx[2]; + uint32_t first_cmd_type_len; + uint32_t first_olinfo_status; bool first; bool skip_cp; diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h index 00934d4f20..c5c5b3c3b8 100644 --- a/hw/net/igb_regs.h +++ b/hw/net/igb_regs.h @@ -160,6 +160,9 @@ union e1000_adv_rx_desc { #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ + /* Additional Receive Descriptor Control definitions */ #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ @@ -240,6 +243,9 @@ union e1000_adv_rx_desc { /* from igb/e1000_defines.h */ +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 + #define E1000_IVAR_VALID 0x80 #define E1000_GPIE_NSICR 0x00000001 #define E1000_GPIE_MSIX_MODE 0x00000010 diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c index 986a3adfe9..8dc8568ba2 100644 --- a/hw/net/net_tx_pkt.c +++ b/hw/net/net_tx_pkt.c @@ -43,7 +43,11 @@ struct NetTxPkt { struct iovec *vec; uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; - uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN]; + union { + struct ip_header ip; + struct ip6_header ip6; + uint8_t octets[ETH_MAX_IP_DGRAM_LEN]; + } l3_hdr; uint32_t payload_len; @@ -89,16 +93,14 @@ void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt) { uint16_t csum; assert(pkt); - struct ip_header *ip_hdr; - ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; - ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + + pkt->l3_hdr.ip.ip_len = cpu_to_be16(pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); - ip_hdr->ip_sum = 0; - csum = net_raw_checksum((uint8_t *)ip_hdr, + pkt->l3_hdr.ip.ip_sum = 0; + csum = net_raw_checksum(pkt->l3_hdr.octets, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); - ip_hdr->ip_sum = cpu_to_be16(csum); + pkt->l3_hdr.ip.ip_sum = cpu_to_be16(csum); } void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) @@ -443,7 +445,7 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt) #endif } -void net_tx_pkt_reset(struct NetTxPkt *pkt) +void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev) { int i; @@ -467,6 +469,7 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt) pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0); } } + pkt->pci_dev = pci_dev; pkt->raw_frags = 0; pkt->hdr_len = 0; @@ -795,11 +798,13 @@ bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, { assert(pkt); + uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; + /* * Since underlying infrastructure does not support IP datagrams longer * than 64K we should drop such packets and don't even try to send */ - if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) { + if (VIRTIO_NET_HDR_GSO_NONE != gso_type) { if (pkt->payload_len > ETH_MAX_IP_DGRAM_LEN - pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) { @@ -807,7 +812,7 @@ bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, } } - if (offload || pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) { + if (offload || gso_type == VIRTIO_NET_HDR_GSO_NONE) { if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG], pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1, @@ -829,15 +834,14 @@ void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt) { struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) { - struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr; /* * TODO: if qemu would support >64K packets - add jumbo option check * something like that: * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {' */ - if (ip6->ip6_plen == 0) { + if (pkt->l3_hdr.ip6.ip6_plen == 0) { if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) { - ip6->ip6_plen = htons(pkt->payload_len); + pkt->l3_hdr.ip6.ip6_plen = htons(pkt->payload_len); } /* * TODO: if qemu would support >64K packets diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h index f57b4e034b..e5ce6f20bc 100644 --- a/hw/net/net_tx_pkt.h +++ b/hw/net/net_tx_pkt.h @@ -148,9 +148,10 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt); * reset tx packet private context (needed to be called between packets) * * @pkt: packet + * @dev: PCI device processing the next packet * */ -void net_tx_pkt_reset(struct NetTxPkt *pkt); +void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev); /** * Send packet to qemu. handles sw offloads if vhdr is not supported. diff --git a/hw/net/trace-events b/hw/net/trace-events index 65753411fc..d35554fce8 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -280,6 +280,8 @@ igb_core_mdic_read_unhandled(uint32_t addr) "MDIC READ: PHY[%u] UNHANDLED" igb_core_mdic_write(uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u] = 0x%x" igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED" +igb_link_set_ext_params(bool asd_check, bool speed_select_bypass, bool pfrstd) "Set extended link params: ASD check: %d, Speed select bypass: %d, PF reset done: %d" + igb_rx_desc_buff_size(uint32_t b) "buffer size: %u" igb_rx_desc_buff_write(uint64_t addr, uint16_t offset, const void* source, uint32_t len) "addr: 0x%"PRIx64", offset: %u, from: %p, length: %u" diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 1068b80868..f7b874c139 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -678,7 +678,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) vmxnet3_complete_packet(s, qidx, txd_idx); s->tx_sop = true; s->skip_current_tx_pkt = false; - net_tx_pkt_reset(s->tx_pkt); + net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s)); } } } @@ -1159,7 +1159,7 @@ static void vmxnet3_deactivate_device(VMXNET3State *s) { if (s->device_active) { VMW_CBPRN("Deactivating vmxnet3..."); - net_tx_pkt_reset(s->tx_pkt); + net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s)); net_tx_pkt_uninit(s->tx_pkt); net_rx_pkt_uninit(s->rx_pkt); s->device_active = false; diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 49c1210fce..8b7be14209 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -1434,26 +1434,26 @@ uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, } static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); } } static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); } @@ -2207,10 +2207,10 @@ static void nvme_rw_cb(void *opaque, int ret) } if (req->cmd.opcode == NVME_CMD_READ) { - return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_read(blk, offset, 1, nvme_rw_complete_cb, req); } - return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_write(blk, offset, 1, nvme_rw_complete_cb, req); } } @@ -2378,7 +2378,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } } @@ -2387,7 +2387,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) } if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } @@ -2436,7 +2436,7 @@ static void nvme_compare_data_cb(void *opaque, int ret) } if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } @@ -3437,7 +3437,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_READ); - nvme_blk_read(blk, data_offset, nvme_rw_cb, req); + nvme_blk_read(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); return NVME_NO_COMPLETE; invalid: @@ -3607,7 +3607,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_WRITE); - nvme_blk_write(blk, data_offset, nvme_rw_cb, req); + nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); } else { req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, BDRV_REQ_MAY_UNMAP, nvme_rw_cb, diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c index f226d03420..82c15edb46 100644 --- a/hw/pci-host/gt64120.c +++ b/hw/pci-host/gt64120.c @@ -321,9 +321,6 @@ static void gt64120_isd_mapping(GT64120State *s) static void gt64120_update_pci_cfgdata_mapping(GT64120State *s) { /* Indexed on MByteSwap bit, see Table 158: PCI_0 Command, Offset: 0xc00 */ - static const MemoryRegionOps *pci_host_conf_ops[] = { - &pci_host_conf_be_ops, &pci_host_conf_le_ops - }; static const MemoryRegionOps *pci_host_data_ops[] = { &pci_host_data_be_ops, &pci_host_data_le_ops }; @@ -339,15 +336,6 @@ static void gt64120_update_pci_cfgdata_mapping(GT64120State *s) * - Table 16: 32-bit PCI Transaction Endianess * - Table 158: PCI_0 Command, Offset: 0xc00 */ - if (memory_region_is_mapped(&phb->conf_mem)) { - memory_region_del_subregion(&s->ISD_mem, &phb->conf_mem); - object_unparent(OBJECT(&phb->conf_mem)); - } - memory_region_init_io(&phb->conf_mem, OBJECT(phb), - pci_host_conf_ops[s->regs[GT_PCI0_CMD] & 1], - s, "pci-conf-idx", 4); - memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2, - &phb->conf_mem, 1); if (memory_region_is_mapped(&phb->data_mem)) { memory_region_del_subregion(&s->ISD_mem, &phb->data_mem); @@ -1208,6 +1196,12 @@ static void gt64120_realize(DeviceState *dev, Error **errp) PCI_DEVFN(18, 0), TYPE_PCI_BUS); pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "gt64120_pci"); + memory_region_init_io(&phb->conf_mem, OBJECT(phb), + &pci_host_conf_le_ops, + s, "pci-conf-idx", 4); + memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2, + &phb->conf_mem, 1); + /* * The whole address space decoded by the GT-64120A doesn't generate diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c index 552927622f..d4de2e7aab 100644 --- a/hw/ssi/xilinx_spi.c +++ b/hw/ssi/xilinx_spi.c @@ -156,6 +156,7 @@ static void xlx_spi_do_reset(XilinxSPI *s) txfifo_reset(s); s->regs[R_SPISSR] = ~0; + s->regs[R_SPICR] = R_SPICR_MTI; xlx_spi_update_irq(s); xlx_spi_update_cs(s); } diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c index 2e759d0619..17cda5ec13 100644 --- a/hw/xenpv/xen_machine_pv.c +++ b/hw/xenpv/xen_machine_pv.c @@ -35,6 +35,8 @@ static void xen_init_pv(MachineState *machine) DriveInfo *dinfo; int i; + setup_xen_backend_ops(); + /* Initialize backend core & drivers */ xen_be_init(); diff --git a/include/block/block-io.h b/include/block/block-io.h index 5da99d4d60..dbc034b728 100644 --- a/include/block/block-io.h +++ b/include/block/block-io.h @@ -89,7 +89,9 @@ int64_t co_wrapper bdrv_get_allocated_file_size(BlockDriverState *bs); BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, BlockDriverState *in_bs, Error **errp); -void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); + +void coroutine_fn GRAPH_RDLOCK +bdrv_co_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); int coroutine_fn GRAPH_RDLOCK bdrv_co_delete_file(BlockDriverState *bs, Error **errp); diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 548be9c8ea..090922e4a8 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -152,6 +152,15 @@ static inline void tswap64s(uint64_t *s) */ extern uintptr_t guest_base; extern bool have_guest_base; + +/* + * If non-zero, the guest virtual address space is a contiguous subset + * of the host virtual address space, i.e. '-R reserved_va' is in effect + * either from the command-line or by default. The value is the last + * byte of the guest address space e.g. UINT32_MAX. + * + * If zero, the host and guest virtual address spaces are intermingled. + */ extern unsigned long reserved_va; /* @@ -171,7 +180,7 @@ extern unsigned long reserved_va; #define GUEST_ADDR_MAX_ \ ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \ UINT32_MAX : ~0ul) -#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_) +#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_) #else @@ -276,8 +285,8 @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong, int walk_memory_regions(void *, walk_memory_regions_fn); int page_get_flags(target_ulong address); -void page_set_flags(target_ulong start, target_ulong end, int flags); -void page_reset_target_data(target_ulong start, target_ulong end); +void page_set_flags(target_ulong start, target_ulong last, int flags); +void page_reset_target_data(target_ulong start, target_ulong last); int page_check_range(target_ulong start, target_ulong len, int flags); /** diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index ad9eb6067b..ecded1f112 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -678,7 +678,7 @@ void tb_invalidate_phys_addr(target_ulong addr); void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); #endif void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end); +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); /* GETPC is the true target of the return instruction that we'll execute. */ diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h index 86d8363bb0..7198953702 100644 --- a/include/hw/block/flash.h +++ b/include/hw/block/flash.h @@ -53,22 +53,22 @@ void nand_setio(DeviceState *dev, uint32_t value); uint32_t nand_getio(DeviceState *dev); uint32_t nand_getbuswidth(DeviceState *dev); -#define NAND_MFR_TOSHIBA 0x98 -#define NAND_MFR_SAMSUNG 0xec -#define NAND_MFR_FUJITSU 0x04 -#define NAND_MFR_NATIONAL 0x8f -#define NAND_MFR_RENESAS 0x07 -#define NAND_MFR_STMICRO 0x20 -#define NAND_MFR_HYNIX 0xad -#define NAND_MFR_MICRON 0x2c +#define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_SAMSUNG 0xec +#define NAND_MFR_FUJITSU 0x04 +#define NAND_MFR_NATIONAL 0x8f +#define NAND_MFR_RENESAS 0x07 +#define NAND_MFR_STMICRO 0x20 +#define NAND_MFR_HYNIX 0xad +#define NAND_MFR_MICRON 0x2c /* onenand.c */ void *onenand_raw_otp(DeviceState *onenand_device); /* ecc.c */ typedef struct { - uint8_t cp; /* Column parity */ - uint16_t lp[2]; /* Line parity */ + uint8_t cp; /* Column parity */ + uint16_t lp[2]; /* Line parity */ uint16_t count; } ECCState; diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 821e937020..397fd3ac68 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -949,7 +949,7 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) return false; } -#if !defined(CONFIG_TCG) || defined(CONFIG_USER_ONLY) +#if defined(CONFIG_USER_ONLY) static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint) { @@ -970,17 +970,6 @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) { } - -static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs atr, int fl, uintptr_t ra) -{ -} - -static inline int cpu_watchpoint_address_matches(CPUState *cpu, - vaddr addr, vaddr len) -{ - return 0; -} #else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); @@ -988,32 +977,6 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags); void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); - -/** - * cpu_check_watchpoint: - * @cpu: cpu context - * @addr: guest virtual address - * @len: access length - * @attrs: memory access attributes - * @flags: watchpoint access type - * @ra: unwind return address - * - * Check for a watchpoint hit in [addr, addr+len) of the type - * specified by @flags. Exit via exception with a hit. - */ -void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs attrs, int flags, uintptr_t ra); - -/** - * cpu_watchpoint_address_matches: - * @cpu: cpu context - * @addr: guest virtual address - * @len: access length - * - * Return the watchpoint flags that apply to [addr, addr+len). - * If no watchpoint is registered for the range, the result is 0. - */ -int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); #endif /** diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 20e3c0ffbb..0ae08df47e 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -175,4 +175,47 @@ struct TCGCPUOps { }; +#if defined(CONFIG_USER_ONLY) + +static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs atr, int fl, uintptr_t ra) +{ +} + +static inline int cpu_watchpoint_address_matches(CPUState *cpu, + vaddr addr, vaddr len) +{ + return 0; +} + +#else + +/** + * cpu_check_watchpoint: + * @cpu: cpu context + * @addr: guest virtual address + * @len: access length + * @attrs: memory access attributes + * @flags: watchpoint access type + * @ra: unwind return address + * + * Check for a watchpoint hit in [addr, addr+len) of the type + * specified by @flags. Exit via exception with a hit. + */ +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra); + +/** + * cpu_watchpoint_address_matches: + * @cpu: cpu context + * @addr: guest virtual address + * @len: access length + * + * Return the watchpoint flags that apply to [addr, addr+len). + * If no watchpoint is registered for the range, the result is 0. + */ +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); + +#endif + #endif /* TCG_CPU_OPS_H */ diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h index d9f1f77dd5..2bfa7533d6 100644 --- a/include/hw/ide/internal.h +++ b/include/hw/ide/internal.h @@ -38,32 +38,32 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) /* Bits of HD_STATUS */ -#define ERR_STAT 0x01 -#define INDEX_STAT 0x02 -#define ECC_STAT 0x04 /* Corrected error */ -#define DRQ_STAT 0x08 -#define SEEK_STAT 0x10 -#define SRV_STAT 0x10 -#define WRERR_STAT 0x20 -#define READY_STAT 0x40 -#define BUSY_STAT 0x80 +#define ERR_STAT 0x01 +#define INDEX_STAT 0x02 +#define ECC_STAT 0x04 /* Corrected error */ +#define DRQ_STAT 0x08 +#define SEEK_STAT 0x10 +#define SRV_STAT 0x10 +#define WRERR_STAT 0x20 +#define READY_STAT 0x40 +#define BUSY_STAT 0x80 /* Bits for HD_ERROR */ -#define MARK_ERR 0x01 /* Bad address mark */ -#define TRK0_ERR 0x02 /* couldn't find track 0 */ -#define ABRT_ERR 0x04 /* Command aborted */ -#define MCR_ERR 0x08 /* media change request */ -#define ID_ERR 0x10 /* ID field not found */ -#define MC_ERR 0x20 /* media changed */ -#define ECC_ERR 0x40 /* Uncorrectable ECC error */ -#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ -#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ +#define MARK_ERR 0x01 /* Bad address mark */ +#define TRK0_ERR 0x02 /* couldn't find track 0 */ +#define ABRT_ERR 0x04 /* Command aborted */ +#define MCR_ERR 0x08 /* media change request */ +#define ID_ERR 0x10 /* ID field not found */ +#define MC_ERR 0x20 /* media changed */ +#define ECC_ERR 0x40 /* Uncorrectable ECC error */ +#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ +#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ /* Bits of HD_NSECTOR */ -#define CD 0x01 -#define IO 0x02 -#define REL 0x04 -#define TAG_MASK 0xf8 +#define CD 0x01 +#define IO 0x02 +#define REL 0x04 +#define TAG_MASK 0xf8 /* Bits of Device Control register */ #define IDE_CTRL_HOB 0x80 @@ -71,50 +71,50 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) #define IDE_CTRL_DISABLE_IRQ 0x02 /* ACS-2 T13/2015-D Table B.2 Command codes */ -#define WIN_NOP 0x00 +#define WIN_NOP 0x00 /* reserved 0x01..0x02 */ -#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */ +#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */ /* reserved 0x04..0x05 */ #define WIN_DSM 0x06 /* reserved 0x07 */ -#define WIN_DEVICE_RESET 0x08 +#define WIN_DEVICE_RESET 0x08 /* reserved 0x09..0x0a */ /* REQUEST SENSE DATA EXT 0x0B */ /* reserved 0x0C..0x0F */ #define WIN_RECAL 0x10 /* obsolete since ATA4 */ /* obsolete since ATA3, retired in ATA4 0x11..0x1F */ -#define WIN_READ 0x20 /* 28-Bit */ +#define WIN_READ 0x20 /* 28-Bit */ #define WIN_READ_ONCE 0x21 /* 28-Bit w/o retries, obsolete since ATA5 */ /* obsolete since ATA4 0x22..0x23 */ -#define WIN_READ_EXT 0x24 /* 48-Bit */ -#define WIN_READDMA_EXT 0x25 /* 48-Bit */ +#define WIN_READ_EXT 0x24 /* 48-Bit */ +#define WIN_READDMA_EXT 0x25 /* 48-Bit */ #define WIN_READDMA_QUEUED_EXT 0x26 /* 48-Bit, obsolete since ACS2 */ -#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */ +#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */ /* reserved 0x28 */ -#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */ +#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */ /* READ STREAM DMA EXT 0x2A */ /* READ STREAM EXT 0x2B */ /* reserved 0x2C..0x2E */ /* READ LOG EXT 0x2F */ -#define WIN_WRITE 0x30 /* 28-Bit */ +#define WIN_WRITE 0x30 /* 28-Bit */ #define WIN_WRITE_ONCE 0x31 /* 28-Bit w/o retries, obsolete since ATA5 */ /* obsolete since ATA4 0x32..0x33 */ -#define WIN_WRITE_EXT 0x34 /* 48-Bit */ -#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */ -#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */ +#define WIN_WRITE_EXT 0x34 /* 48-Bit */ +#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */ +#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */ #define WIN_SET_MAX_EXT 0x37 /* 48-Bit, obsolete since ACS2 */ -#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */ -#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */ -#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */ +#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */ +#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */ +#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */ /* WRITE STREAM DMA EXT 0x3A */ /* WRITE STREAM EXT 0x3B */ #define WIN_WRITE_VERIFY 0x3C /* 28-Bit, obsolete since ATA4 */ /* WRITE DMA FUA EXT 0x3D */ /* obsolete since ACS2 0x3E */ /* WRITE LOG EXT 0x3F */ -#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */ +#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */ #define WIN_VERIFY_ONCE 0x41 /* 28-Bit - w/o retries, obsolete since ATA5 */ -#define WIN_VERIFY_EXT 0x42 /* 48-Bit */ +#define WIN_VERIFY_EXT 0x42 /* 48-Bit */ /* reserved 0x43..0x44 */ /* WRITE UNCORRECTABLE EXT 0x45 */ /* reserved 0x46 */ @@ -136,11 +136,11 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) #define WIN_SEEK 0x70 /* obsolete since ATA7 */ /* reserved 0x71-0x7F */ /* vendor specific 0x80-0x86 */ -#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */ +#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */ /* vendor specific 0x88-0x8F */ -#define WIN_DIAGNOSE 0x90 +#define WIN_DIAGNOSE 0x90 #define WIN_SPECIFY 0x91 /* set drive geometry translation, obsolete since ATA6 */ -#define WIN_DOWNLOAD_MICROCODE 0x92 +#define WIN_DOWNLOAD_MICROCODE 0x92 /* DOWNLOAD MICROCODE DMA 0x93 */ #define WIN_STANDBYNOW2 0x94 /* retired in ATA4 */ #define WIN_IDLEIMMEDIATE2 0x95 /* force drive to become "ready", retired in ATA4 */ @@ -150,31 +150,31 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) #define WIN_SLEEPNOW2 0x99 /* retired in ATA4 */ /* vendor specific 0x9A */ /* reserved 0x9B..0x9F */ -#define WIN_PACKETCMD 0xA0 /* Send a packet command. */ -#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */ +#define WIN_PACKETCMD 0xA0 /* Send a packet command. */ +#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */ #define WIN_QUEUED_SERVICE 0xA2 /* obsolete since ACS2 */ /* reserved 0xA3..0xAF */ -#define WIN_SMART 0xB0 /* self-monitoring and reporting */ +#define WIN_SMART 0xB0 /* self-monitoring and reporting */ /* Device Configuration Overlay 0xB1 */ /* reserved 0xB2..0xB3 */ /* Sanitize Device 0xB4 */ /* reserved 0xB5 */ /* NV Cache 0xB6 */ /* reserved for CFA 0xB7..0xBB */ -#define CFA_ACCESS_METADATA_STORAGE 0xB8 +#define CFA_ACCESS_METADATA_STORAGE 0xB8 /* reserved 0xBC..0xBF */ -#define CFA_ERASE_SECTORS 0xC0 /* microdrives implement as NOP */ +#define CFA_ERASE_SECTORS 0xC0 /* microdrives implement as NOP */ /* vendor specific 0xC1..0xC3 */ -#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/ -#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */ -#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */ +#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/ +#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */ +#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */ #define WIN_READDMA_QUEUED 0xC7 /* read sectors using Queued DMA transfers, obsolete since ACS2 */ -#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */ +#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */ #define WIN_READDMA_ONCE 0xC9 /* 28-Bit - w/o retries, obsolete since ATA5 */ -#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */ +#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */ #define WIN_WRITEDMA_ONCE 0xCB /* 28-Bit - w/o retries, obsolete since ATA5 */ -#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers, obsolete since ACS2 */ -#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */ +#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers, obsolete since ACS2 */ +#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */ /* WRITE MULTIPLE FUA EXT 0xCE */ /* reserved 0xCF..0xDO */ /* CHECK MEDIA CARD TYPE 0xD1 */ @@ -184,33 +184,33 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) /* obsolete since ATA3, retired in ATA4 0xDB..0xDD */ #define WIN_DOORLOCK 0xDE /* lock door on removable drives, obsolete since ATA8 */ #define WIN_DOORUNLOCK 0xDF /* unlock door on removable drives, obsolete since ATA8 */ -#define WIN_STANDBYNOW1 0xE0 -#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */ -#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */ -#define WIN_SETIDLE1 0xE3 -#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */ -#define WIN_CHECKPOWERMODE1 0xE5 -#define WIN_SLEEPNOW1 0xE6 -#define WIN_FLUSH_CACHE 0xE7 -#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */ +#define WIN_STANDBYNOW1 0xE0 +#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */ +#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */ +#define WIN_SETIDLE1 0xE3 +#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */ +#define WIN_CHECKPOWERMODE1 0xE5 +#define WIN_SLEEPNOW1 0xE6 +#define WIN_FLUSH_CACHE 0xE7 +#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */ /* READ BUFFER DMA 0xE9 */ -#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */ +#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */ /* WRITE BUFFER DMA 0xEB */ -#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */ +#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */ #define WIN_MEDIAEJECT 0xED /* obsolete since ATA8 */ /* obsolete since ATA4 0xEE */ -#define WIN_SETFEATURES 0xEF /* set special drive features */ +#define WIN_SETFEATURES 0xEF /* set special drive features */ #define IBM_SENSE_CONDITION 0xF0 /* measure disk temperature, vendor specific */ -#define WIN_SECURITY_SET_PASS 0xF1 -#define WIN_SECURITY_UNLOCK 0xF2 -#define WIN_SECURITY_ERASE_PREPARE 0xF3 -#define WIN_SECURITY_ERASE_UNIT 0xF4 -#define WIN_SECURITY_FREEZE_LOCK 0xF5 +#define WIN_SECURITY_SET_PASS 0xF1 +#define WIN_SECURITY_UNLOCK 0xF2 +#define WIN_SECURITY_ERASE_PREPARE 0xF3 +#define WIN_SECURITY_ERASE_UNIT 0xF4 +#define WIN_SECURITY_FREEZE_LOCK 0xF5 #define CFA_WEAR_LEVEL 0xF5 /* microdrives implement as NOP; not specified in T13! */ -#define WIN_SECURITY_DISABLE 0xF6 +#define WIN_SECURITY_DISABLE 0xF6 /* vendor specific 0xF7 */ -#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */ -#define WIN_SET_MAX 0xF9 +#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */ +#define WIN_SET_MAX 0xF9 /* vendor specific 0xFA..0xFF */ /* set to 1 set disable mult support */ @@ -231,68 +231,68 @@ OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS) /* The generic packet command opcodes for CD/DVD Logical Units, * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ -#define GPCMD_BLANK 0xa1 -#define GPCMD_CLOSE_TRACK 0x5b -#define GPCMD_FLUSH_CACHE 0x35 -#define GPCMD_FORMAT_UNIT 0x04 -#define GPCMD_GET_CONFIGURATION 0x46 +#define GPCMD_BLANK 0xa1 +#define GPCMD_CLOSE_TRACK 0x5b +#define GPCMD_FLUSH_CACHE 0x35 +#define GPCMD_FORMAT_UNIT 0x04 +#define GPCMD_GET_CONFIGURATION 0x46 #define GPCMD_GET_EVENT_STATUS_NOTIFICATION 0x4a -#define GPCMD_GET_PERFORMANCE 0xac -#define GPCMD_INQUIRY 0x12 -#define GPCMD_LOAD_UNLOAD 0xa6 -#define GPCMD_MECHANISM_STATUS 0xbd -#define GPCMD_MODE_SELECT_10 0x55 -#define GPCMD_MODE_SENSE_10 0x5a -#define GPCMD_PAUSE_RESUME 0x4b -#define GPCMD_PLAY_AUDIO_10 0x45 -#define GPCMD_PLAY_AUDIO_MSF 0x47 -#define GPCMD_PLAY_AUDIO_TI 0x48 -#define GPCMD_PLAY_CD 0xbc +#define GPCMD_GET_PERFORMANCE 0xac +#define GPCMD_INQUIRY 0x12 +#define GPCMD_LOAD_UNLOAD 0xa6 +#define GPCMD_MECHANISM_STATUS 0xbd +#define GPCMD_MODE_SELECT_10 0x55 +#define GPCMD_MODE_SENSE_10 0x5a +#define GPCMD_PAUSE_RESUME 0x4b +#define GPCMD_PLAY_AUDIO_10 0x45 +#define GPCMD_PLAY_AUDIO_MSF 0x47 +#define GPCMD_PLAY_AUDIO_TI 0x48 +#define GPCMD_PLAY_CD 0xbc #define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e -#define GPCMD_READ_10 0x28 -#define GPCMD_READ_12 0xa8 -#define GPCMD_READ_CDVD_CAPACITY 0x25 -#define GPCMD_READ_CD 0xbe -#define GPCMD_READ_CD_MSF 0xb9 -#define GPCMD_READ_DISC_INFO 0x51 -#define GPCMD_READ_DVD_STRUCTURE 0xad -#define GPCMD_READ_FORMAT_CAPACITIES 0x23 -#define GPCMD_READ_HEADER 0x44 -#define GPCMD_READ_TRACK_RZONE_INFO 0x52 -#define GPCMD_READ_SUBCHANNEL 0x42 -#define GPCMD_READ_TOC_PMA_ATIP 0x43 -#define GPCMD_REPAIR_RZONE_TRACK 0x58 -#define GPCMD_REPORT_KEY 0xa4 -#define GPCMD_REQUEST_SENSE 0x03 -#define GPCMD_RESERVE_RZONE_TRACK 0x53 -#define GPCMD_SCAN 0xba -#define GPCMD_SEEK 0x2b -#define GPCMD_SEND_DVD_STRUCTURE 0xad -#define GPCMD_SEND_EVENT 0xa2 -#define GPCMD_SEND_KEY 0xa3 -#define GPCMD_SEND_OPC 0x54 -#define GPCMD_SET_READ_AHEAD 0xa7 -#define GPCMD_SET_STREAMING 0xb6 -#define GPCMD_START_STOP_UNIT 0x1b -#define GPCMD_STOP_PLAY_SCAN 0x4e -#define GPCMD_TEST_UNIT_READY 0x00 -#define GPCMD_VERIFY_10 0x2f -#define GPCMD_WRITE_10 0x2a -#define GPCMD_WRITE_AND_VERIFY_10 0x2e +#define GPCMD_READ_10 0x28 +#define GPCMD_READ_12 0xa8 +#define GPCMD_READ_CDVD_CAPACITY 0x25 +#define GPCMD_READ_CD 0xbe +#define GPCMD_READ_CD_MSF 0xb9 +#define GPCMD_READ_DISC_INFO 0x51 +#define GPCMD_READ_DVD_STRUCTURE 0xad +#define GPCMD_READ_FORMAT_CAPACITIES 0x23 +#define GPCMD_READ_HEADER 0x44 +#define GPCMD_READ_TRACK_RZONE_INFO 0x52 +#define GPCMD_READ_SUBCHANNEL 0x42 +#define GPCMD_READ_TOC_PMA_ATIP 0x43 +#define GPCMD_REPAIR_RZONE_TRACK 0x58 +#define GPCMD_REPORT_KEY 0xa4 +#define GPCMD_REQUEST_SENSE 0x03 +#define GPCMD_RESERVE_RZONE_TRACK 0x53 +#define GPCMD_SCAN 0xba +#define GPCMD_SEEK 0x2b +#define GPCMD_SEND_DVD_STRUCTURE 0xad +#define GPCMD_SEND_EVENT 0xa2 +#define GPCMD_SEND_KEY 0xa3 +#define GPCMD_SEND_OPC 0x54 +#define GPCMD_SET_READ_AHEAD 0xa7 +#define GPCMD_SET_STREAMING 0xb6 +#define GPCMD_START_STOP_UNIT 0x1b +#define GPCMD_STOP_PLAY_SCAN 0x4e +#define GPCMD_TEST_UNIT_READY 0x00 +#define GPCMD_VERIFY_10 0x2f +#define GPCMD_WRITE_10 0x2a +#define GPCMD_WRITE_AND_VERIFY_10 0x2e /* This is listed as optional in ATAPI 2.6, but is (curiously) * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji * Table 377 as an MMC command for SCSi devices though... Most ATAPI * drives support it. */ -#define GPCMD_SET_SPEED 0xbb +#define GPCMD_SET_SPEED 0xbb /* This seems to be a SCSI specific CD-ROM opcode * to play data at track/index */ -#define GPCMD_PLAYAUDIO_TI 0x48 +#define GPCMD_PLAYAUDIO_TI 0x48 /* * From MS Media Status Notification Support Specification. For * older drives only. */ -#define GPCMD_GET_MEDIA_STATUS 0xda -#define GPCMD_MODE_SENSE_6 0x1a +#define GPCMD_GET_MEDIA_STATUS 0xda +#define GPCMD_MODE_SENSE_6 0x1a #define ATAPI_INT_REASON_CD 0x01 /* 0 = data transfer */ #define ATAPI_INT_REASON_IO 0x02 /* 1 = transfer to the host */ diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h new file mode 100644 index 0000000000..69fe74b50d --- /dev/null +++ b/include/qemu/qtree.h @@ -0,0 +1,201 @@ +/* + * GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * QTree is a partial import of Glib's GTree. The parts excluded correspond + * to API calls either deprecated (e.g. g_tree_traverse) or recently added + * (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU. + * + * The reason for this import is to allow us to control the memory allocator + * used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's + * slice allocator, which causes problems when forking in user-mode; + * see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's + * "45b5a6c1e gslice: Remove slice allocator and use malloc() instead". + * + * TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3. + */ + +#ifndef QEMU_QTREE_H +#define QEMU_QTREE_H + +#include "qemu/osdep.h" + +#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR + +typedef struct _QTree QTree; + +typedef struct _QTreeNode QTreeNode; + +typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node, + gpointer user_data); + +/* + * Balanced binary trees + */ +QTree *q_tree_new(GCompareFunc key_compare_func); +QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func, + gpointer key_compare_data); +QTree *q_tree_new_full(GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func); +QTree *q_tree_ref(QTree *tree); +void q_tree_unref(QTree *tree); +void q_tree_destroy(QTree *tree); +void q_tree_insert(QTree *tree, + gpointer key, + gpointer value); +void q_tree_replace(QTree *tree, + gpointer key, + gpointer value); +gboolean q_tree_remove(QTree *tree, + gconstpointer key); +gboolean q_tree_steal(QTree *tree, + gconstpointer key); +gpointer q_tree_lookup(QTree *tree, + gconstpointer key); +gboolean q_tree_lookup_extended(QTree *tree, + gconstpointer lookup_key, + gpointer *orig_key, + gpointer *value); +void q_tree_foreach(QTree *tree, + GTraverseFunc func, + gpointer user_data); +gpointer q_tree_search(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data); +gint q_tree_height(QTree *tree); +gint q_tree_nnodes(QTree *tree); + +#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */ + +typedef GTree QTree; +typedef GTreeNode QTreeNode; +typedef GTraverseNodeFunc QTraverseNodeFunc; + +static inline QTree *q_tree_new(GCompareFunc key_compare_func) +{ + return g_tree_new(key_compare_func); +} + +static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func, + gpointer key_compare_data) +{ + return g_tree_new_with_data(key_compare_func, key_compare_data); +} + +static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func) +{ + return g_tree_new_full(key_compare_func, key_compare_data, + key_destroy_func, value_destroy_func); +} + +static inline QTree *q_tree_ref(QTree *tree) +{ + return g_tree_ref(tree); +} + +static inline void q_tree_unref(QTree *tree) +{ + g_tree_unref(tree); +} + +static inline void q_tree_destroy(QTree *tree) +{ + g_tree_destroy(tree); +} + +static inline void q_tree_insert(QTree *tree, + gpointer key, + gpointer value) +{ + g_tree_insert(tree, key, value); +} + +static inline void q_tree_replace(QTree *tree, + gpointer key, + gpointer value) +{ + g_tree_replace(tree, key, value); +} + +static inline gboolean q_tree_remove(QTree *tree, + gconstpointer key) +{ + return g_tree_remove(tree, key); +} + +static inline gboolean q_tree_steal(QTree *tree, + gconstpointer key) +{ + return g_tree_steal(tree, key); +} + +static inline gpointer q_tree_lookup(QTree *tree, + gconstpointer key) +{ + return g_tree_lookup(tree, key); +} + +static inline gboolean q_tree_lookup_extended(QTree *tree, + gconstpointer lookup_key, + gpointer *orig_key, + gpointer *value) +{ + return g_tree_lookup_extended(tree, lookup_key, orig_key, value); +} + +static inline void q_tree_foreach(QTree *tree, + GTraverseFunc func, + gpointer user_data) +{ + return g_tree_foreach(tree, func, user_data); +} + +static inline gpointer q_tree_search(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data) +{ + return g_tree_search(tree, search_func, user_data); +} + +static inline gint q_tree_height(QTree *tree) +{ + return g_tree_height(tree); +} + +static inline gint q_tree_nnodes(QTree *tree) +{ + return g_tree_nnodes(tree); +} + +#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */ + +#endif /* QEMU_QTREE_H */ diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h index 40ab178719..c672b77247 100644 --- a/include/sysemu/block-backend-io.h +++ b/include/sysemu/block-backend-io.h @@ -70,7 +70,10 @@ void co_wrapper blk_eject(BlockBackend *blk, bool eject_flag); int64_t coroutine_fn blk_co_getlength(BlockBackend *blk); int64_t co_wrapper_mixed blk_getlength(BlockBackend *blk); -void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr); +void coroutine_fn blk_co_get_geometry(BlockBackend *blk, + uint64_t *nb_sectors_ptr); +void co_wrapper_mixed blk_get_geometry(BlockBackend *blk, + uint64_t *nb_sectors_ptr); int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk); int64_t co_wrapper_mixed blk_nb_sectors(BlockBackend *blk); diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h index 89ba274cfc..f6383a7cd1 100644 --- a/linux-user/arm/target_cpu.h +++ b/linux-user/arm/target_cpu.h @@ -30,7 +30,7 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs) * the high addresses. Restrict linux-user to the * cached write-back RAM in the system map. */ - return 0x80000000ul; + return 0x7ffffffful; } else { /* * We need to be able to map the commpage. diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 1dbc1f0f9b..f1370a7a8b 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -208,12 +208,12 @@ static bool init_guest_commpage(void) * has specified -R reserved_va, which would trigger an assert(). */ if (reserved_va != 0 && - TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) { + TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { error_report("Cannot allocate vsyscall page"); exit(EXIT_FAILURE); } page_set_flags(TARGET_VSYSCALL_PAGE, - TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE, + TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, PAGE_EXEC | PAGE_VALID); return true; } @@ -444,7 +444,7 @@ static bool init_guest_commpage(void) exit(EXIT_FAILURE); } - page_set_flags(commpage, commpage + qemu_host_page_size, + page_set_flags(commpage, commpage | ~qemu_host_page_mask, PAGE_READ | PAGE_EXEC | PAGE_VALID); return true; } @@ -1316,7 +1316,7 @@ static bool init_guest_commpage(void) exit(EXIT_FAILURE); } - page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE, + page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, PAGE_READ | PAGE_EXEC | PAGE_VALID); return true; } @@ -1728,7 +1728,7 @@ static bool init_guest_commpage(void) * and implement syscalls. Here, simply mark the page executable. * Special case the entry points during translation (see do_page_zero). */ - page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE, + page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, PAGE_EXEC | PAGE_VALID); return true; } @@ -2209,7 +2209,8 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) /* Ensure that the bss page(s) are valid */ if ((page_get_flags(last_bss-1) & prot) != prot) { - page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID); + page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1, + prot | PAGE_VALID); } if (host_start < host_map_start) { @@ -2511,7 +2512,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) { error_report("%s: requires more virtual address space " "than the host can provide (0x%" PRIx64 ")", - image_name, (uint64_t)guest_hiaddr - guest_base); + image_name, (uint64_t)guest_hiaddr + 1 - guest_base); exit(EXIT_FAILURE); } #endif @@ -2529,13 +2530,13 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, /* Reserve the address space for the binary, or reserved_va. */ test = g2h_untagged(guest_loaddr); - addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0); + addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0); if (test != addr) { pgb_fail_in_use(image_name); } qemu_log_mask(CPU_LOG_PAGE, - "%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n", - __func__, addr, guest_hiaddr - guest_loaddr); + "%s: base @ %p for %" PRIu64 " bytes\n", + __func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1); } /** @@ -2679,7 +2680,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, if (hiaddr != orig_hiaddr) { error_report("%s: requires virtual address space that the " "host cannot provide (0x%" PRIx64 ")", - image_name, (uint64_t)orig_hiaddr); + image_name, (uint64_t)orig_hiaddr + 1); exit(EXIT_FAILURE); } @@ -2693,7 +2694,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, * arithmetic wraps around. */ if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) { - hiaddr = (uintptr_t) 4 << 30; + hiaddr = UINT32_MAX; } else { offset = -(HI_COMMPAGE & -align); } @@ -2701,7 +2702,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr, loaddr = MIN(loaddr, LO_COMMPAGE & -align); } - addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset); + addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset); if (addr == -1) { /* * If HI_COMMPAGE, there *might* be a non-consecutive allocation @@ -2767,17 +2768,17 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr, /* Reserve the memory on the host. */ assert(guest_base != 0); test = g2h_untagged(0); - addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0); + addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0); if (addr == MAP_FAILED || addr != test) { error_report("Unable to reserve 0x%lx bytes of virtual address " "space at %p (%s) for use as guest address space (check your " "virtual memory ulimit setting, min_mmap_addr or reserve less " - "using -R option)", reserved_va, test, strerror(errno)); + "using -R option)", reserved_va + 1, test, strerror(errno)); exit(EXIT_FAILURE); } qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n", - __func__, addr, reserved_va); + __func__, addr, reserved_va + 1); } void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, @@ -3020,7 +3021,7 @@ static void load_elf_image(const char *image_name, int image_fd, if (a < loaddr) { loaddr = a; } - a = eppnt->p_vaddr + eppnt->p_memsz; + a = eppnt->p_vaddr + eppnt->p_memsz - 1; if (a > hiaddr) { hiaddr = a; } @@ -3111,7 +3112,7 @@ static void load_elf_image(const char *image_name, int image_fd, * In both cases, we will overwrite pages in this range with mappings * from the executable. */ - load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE, + load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | (ehdr->e_type == ET_EXEC ? MAP_FIXED : 0), -1, 0); diff --git a/linux-user/flatload.c b/linux-user/flatload.c index e99570ca18..5efec2630e 100644 --- a/linux-user/flatload.c +++ b/linux-user/flatload.c @@ -448,7 +448,7 @@ static int load_flat_file(struct linux_binprm * bprm, * Allocate the address space. */ probe_guest_base(bprm->filename, 0, - text_len + data_len + extra + indx_len); + text_len + data_len + extra + indx_len - 1); /* * there are a couple of cases here, the separate code/data diff --git a/linux-user/main.c b/linux-user/main.c index 4b18461969..fe03293516 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -109,11 +109,9 @@ static const char *last_log_filename; # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \ (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) -/* There are a number of places where we assign reserved_va to a variable - of type abi_ulong and expect it to fit. Avoid the last page. */ -# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK) +# define MAX_RESERVED_VA(CPU) 0xfffffffful # else -# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS) +# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) # endif # else # define MAX_RESERVED_VA(CPU) 0 @@ -379,7 +377,9 @@ static void handle_arg_reserved_va(const char *arg) { char *p; int shift = 0; - reserved_va = strtoul(arg, &p, 0); + unsigned long val; + + val = strtoul(arg, &p, 0); switch (*p) { case 'k': case 'K': @@ -393,10 +393,10 @@ static void handle_arg_reserved_va(const char *arg) break; } if (shift) { - unsigned long unshifted = reserved_va; + unsigned long unshifted = val; p++; - reserved_va <<= shift; - if (reserved_va >> shift != unshifted) { + val <<= shift; + if (val >> shift != unshifted) { fprintf(stderr, "Reserved virtual address too big\n"); exit(EXIT_FAILURE); } @@ -405,6 +405,8 @@ static void handle_arg_reserved_va(const char *arg) fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p); exit(EXIT_FAILURE); } + /* The representation is size - 1, with 0 remaining "default". */ + reserved_va = val ? val - 1 : 0; } static void handle_arg_singlestep(const char *arg) @@ -793,16 +795,19 @@ int main(int argc, char **argv, char **envp) */ max_reserved_va = MAX_RESERVED_VA(cpu); if (reserved_va != 0) { + if ((reserved_va + 1) % qemu_host_page_size) { + char *s = size_to_str(qemu_host_page_size); + fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s); + g_free(s); + exit(EXIT_FAILURE); + } if (max_reserved_va && reserved_va > max_reserved_va) { fprintf(stderr, "Reserved virtual address too big\n"); exit(EXIT_FAILURE); } } else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) { - /* - * reserved_va must be aligned with the host page size - * as it is used with mmap() - */ - reserved_va = max_reserved_va & qemu_host_page_mask; + /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */ + reserved_va = max_reserved_va; } { diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h index a98c9bd6ad..b965e86b2b 100644 --- a/linux-user/mips/target_elf.h +++ b/linux-user/mips/target_elf.h @@ -15,6 +15,9 @@ static inline const char *cpu_get_model(uint32_t eflags) if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) { return "R5900"; } + if (eflags & EF_MIPS_NAN2008) { + return "P5600"; + } return "24Kf"; } #endif diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 28135c9e6a..0aa8ae7356 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -181,7 +181,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } } - page_set_flags(start, start + len, page_flags); + page_set_flags(start, start + len - 1, page_flags); ret = 0; error: @@ -283,7 +283,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, end_addr = start + size; if (start > reserved_va - size) { /* Start at the top of the address space. */ - end_addr = ((reserved_va - size) & -align) + size; + end_addr = ((reserved_va + 1 - size) & -align) + size; looped = true; } @@ -297,7 +297,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, return (abi_ulong)-1; } /* Re-start at the top of the address space. */ - addr = end_addr = ((reserved_va - size) & -align) + size; + addr = end_addr = ((reserved_va + 1 - size) & -align) + size; looped = true; } else { prot = page_get_flags(addr); @@ -640,15 +640,15 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, } page_flags |= PAGE_RESET; if (passthrough_start == passthrough_end) { - page_set_flags(start, start + len, page_flags); + page_set_flags(start, start + len - 1, page_flags); } else { if (start < passthrough_start) { - page_set_flags(start, passthrough_start, page_flags); + page_set_flags(start, passthrough_start - 1, page_flags); } - page_set_flags(passthrough_start, passthrough_end, + page_set_flags(passthrough_start, passthrough_end - 1, page_flags | PAGE_PASSTHROUGH); if (passthrough_end < start + len) { - page_set_flags(passthrough_end, start + len, page_flags); + page_set_flags(passthrough_end, start + len - 1, page_flags); } } the_end: @@ -763,7 +763,7 @@ int target_munmap(abi_ulong start, abi_ulong len) } if (ret == 0) { - page_set_flags(start, start + len, 0); + page_set_flags(start, start + len - 1, 0); } mmap_unlock(); return ret; @@ -849,8 +849,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, } else { new_addr = h2g(host_addr); prot = page_get_flags(old_addr); - page_set_flags(old_addr, old_addr + old_size, 0); - page_set_flags(new_addr, new_addr + new_size, + page_set_flags(old_addr, old_addr + old_size - 1, 0); + page_set_flags(new_addr, new_addr + new_size - 1, prot | PAGE_VALID | PAGE_RESET); } mmap_unlock(); @@ -946,7 +946,7 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice) if (can_passthrough_madvise(start, end)) { ret = get_errno(madvise(g2h_untagged(start), len, advice)); if ((advice == MADV_DONTNEED) && (ret == 0)) { - page_reset_target_data(start, start + len); + page_reset_target_data(start, start + len - 1); } } } diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 27871641f4..69f740ff98 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -4595,7 +4595,7 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env, } raddr=h2g((unsigned long)host_raddr); - page_set_flags(raddr, raddr + shm_info.shm_segsz, + page_set_flags(raddr, raddr + shm_info.shm_segsz - 1, PAGE_VALID | PAGE_RESET | PAGE_READ | (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); @@ -4625,7 +4625,7 @@ static inline abi_long do_shmdt(abi_ulong shmaddr) for (i = 0; i < N_SHM_REGIONS; ++i) { if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { shm_regions[i].in_use = false; - page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); + page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0); break; } } diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 614a1cbc8e..cc37054cb5 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -61,7 +61,7 @@ #if (defined(TARGET_I386) && defined(TARGET_ABI32)) \ || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \ - || defined(TARGET_SPARC) \ + || (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \ || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS) /* 16 bit uid wrappers emulation */ #define USE_UID16 diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h index 9333db4f51..c63ef45fc7 100644 --- a/linux-user/user-internals.h +++ b/linux-user/user-internals.h @@ -76,19 +76,19 @@ void fork_end(int child); /** * probe_guest_base: * @image_name: the executable being loaded - * @loaddr: the lowest fixed address in the executable - * @hiaddr: the highest fixed address in the executable + * @loaddr: the lowest fixed address within the executable + * @hiaddr: the highest fixed address within the executable * * Creates the initial guest address space in the host memory space. * - * If @loaddr == 0, then no address in the executable is fixed, - * i.e. it is fully relocatable. In that case @hiaddr is the size - * of the executable. + * If @loaddr == 0, then no address in the executable is fixed, i.e. + * it is fully relocatable. In that case @hiaddr is the size of the + * executable minus one. * * This function will not return if a valid value for guest_base * cannot be chosen. On return, the executable loader can expect * - * target_mmap(loaddr, hiaddr - loaddr, ...) + * target_mmap(loaddr, hiaddr - loaddr + 1, ...) * * to succeed. */ diff --git a/meson.build b/meson.build index 29f8644d6d..c44d05a13f 100644 --- a/meson.build +++ b/meson.build @@ -508,6 +508,10 @@ glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(), }) # override glib dep with the configure results (for subprojects) meson.override_dependency('glib-2.0', glib) +# pass down whether Glib has the slice allocator +if config_host.has_key('HAVE_GLIB_WITH_SLICE_ALLOCATOR') + config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', true) +endif gio = not_found gdbus_codegen = not_found diff --git a/nbd/server.c b/nbd/server.c index a4750e4188..3d8d0d81df 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -2667,6 +2667,8 @@ static coroutine_fn void nbd_trip(void *opaque) goto disconnect; } + qio_channel_set_cork(client->ioc, true); + if (ret < 0) { /* It wasn't -EIO, so, according to nbd_co_receive_request() * semantics, we should return the error to the client. */ @@ -2692,6 +2694,7 @@ static coroutine_fn void nbd_trip(void *opaque) goto disconnect; } + qio_channel_set_cork(client->ioc, false); done: nbd_request_put(req); nbd_client_put(client); @@ -2755,6 +2758,7 @@ void nbd_client_new(QIOChannelSocket *sioc, } client->tlsauthz = g_strdup(tlsauthz); client->sioc = sioc; + qio_channel_set_delay(QIO_CHANNEL(sioc), false); object_ref(OBJECT(client->sioc)); client->ioc = QIO_CHANNEL(sioc); object_ref(OBJECT(client->ioc)); diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index 68662a6dfc..ff93b08a9e 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -527,7 +527,7 @@ namespace _com_util /* Stop QGA VSS provider service using Winsvc API */ STDAPI StopService(void) { - HRESULT hr; + HRESULT hr = S_OK; SC_HANDLE manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); SC_HANDLE service = NULL; diff --git a/scripts/coverage/compare_gcov_json.py b/scripts/coverage/compare_gcov_json.py new file mode 100755 index 0000000000..1b92dc2c8c --- /dev/null +++ b/scripts/coverage/compare_gcov_json.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# +# Compare output of two gcovr JSON reports and report differences. To +# generate the required output first: +# - create two build dirs with --enable-gcov +# - run set of tests in each +# - run make coverage-html in each +# - run gcovr --json --exclude-unreachable-branches \ +# --print-summary -o coverage.json --root ../../ . *.p +# +# Author: Alex Bennée <alex.bennee@linaro.org> +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +import argparse +import json +import sys +from pathlib import Path + +def create_parser(): + parser = argparse.ArgumentParser( + prog='compare_gcov_json', + description='analyse the differences in coverage between two runs') + + parser.add_argument('-a', type=Path, default=None, + help=('First file to check')) + + parser.add_argument('-b', type=Path, default=None, + help=('Second file to check')) + + parser.add_argument('--verbose', action='store_true', default=False, + help=('A minimal verbosity level that prints the ' + 'overall result of the check/wait')) + return parser + + +# See https://gcovr.com/en/stable/output/json.html#json-format-reference +def load_json(json_file_path: Path, verbose = False) -> dict[str, set[int]]: + + with open(json_file_path) as f: + data = json.load(f) + + root_dir = json_file_path.absolute().parent + covered_lines = dict() + + for filecov in data["files"]: + file_path = Path(filecov["file"]) + + # account for generated files - map into src tree + resolved_path = Path(file_path).absolute() + if resolved_path.is_relative_to(root_dir): + file_path = resolved_path.relative_to(root_dir) + # print(f"remapped {resolved_path} to {file_path}") + + lines = filecov["lines"] + + executed_lines = set( + linecov["line_number"] + for linecov in filecov["lines"] + if linecov["count"] != 0 and not linecov["gcovr/noncode"] + ) + + # if this file has any coverage add it to the system + if len(executed_lines) > 0: + if verbose: + print(f"file {file_path} {len(executed_lines)}/{len(lines)}") + covered_lines[str(file_path)] = executed_lines + + return covered_lines + +def find_missing_files(first, second): + """ + Return a list of files not covered in the second set + """ + missing_files = [] + for f in sorted(first): + file_a = first[f] + try: + file_b = second[f] + except KeyError: + missing_files.append(f) + + return missing_files + +def main(): + """ + Script entry point + """ + parser = create_parser() + args = parser.parse_args() + + if not args.a or not args.b: + print("We need two files to compare") + sys.exit(1) + + first_coverage = load_json(args.a, args.verbose) + second_coverage = load_json(args.b, args.verbose) + + first_missing = find_missing_files(first_coverage, + second_coverage) + + second_missing = find_missing_files(second_coverage, + first_coverage) + + a_name = args.a.parent.name + b_name = args.b.parent.name + + print(f"{b_name} missing coverage in {len(first_missing)} files") + for f in first_missing: + print(f" {f}") + + print(f"{a_name} missing coverage in {len(second_missing)} files") + for f in second_missing: + print(f" {f}") + + +if __name__ == '__main__': + main() diff --git a/softmmu/memory.c b/softmmu/memory.c index 5305aca7ca..b1a6cae6f5 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -1900,7 +1900,6 @@ int memory_region_register_iommu_notifier(MemoryRegion *mr, iommu_mr = IOMMU_MEMORY_REGION(mr); assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); assert(n->start <= n->end); - assert(n->end <= memory_region_size(mr)); assert(n->iommu_idx >= 0 && n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); @@ -1924,6 +1923,7 @@ uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) { + MemoryRegion *mr = MEMORY_REGION(iommu_mr); IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); hwaddr addr, granularity; IOMMUTLBEntry iotlb; @@ -1936,7 +1936,7 @@ void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) granularity = memory_region_iommu_get_min_page_size(iommu_mr); - for (addr = n->start; addr < n->end; addr += granularity) { + for (addr = 0; addr < memory_region_size(mr); addr += granularity) { iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); if (iotlb.perm != IOMMU_NONE) { n->notify(n, &iotlb); diff --git a/softmmu/meson.build b/softmmu/meson.build index 0180577517..1a7c7ac089 100644 --- a/softmmu/meson.build +++ b/softmmu/meson.build @@ -5,11 +5,11 @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files( 'physmem.c', 'qtest.c', 'dirtylimit.c', + 'watchpoint.c', )]) specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files( 'icount.c', - 'watchpoint.c', )]) softmmu_ss.add(files( diff --git a/softmmu/physmem.c b/softmmu/physmem.c index e35061bba4..0e0182d9f2 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -2527,7 +2527,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, } if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { assert(tcg_enabled()); - tb_invalidate_phys_range(addr, addr + length); + tb_invalidate_phys_range(addr, addr + length - 1); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); } cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c index ad58736787..5350163385 100644 --- a/softmmu/watchpoint.c +++ b/softmmu/watchpoint.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" +#include "qemu/error-report.h" #include "exec/exec-all.h" #include "exec/translate-all.h" #include "sysemu/tcg.h" @@ -103,6 +104,8 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) } } +#ifdef CONFIG_TCG + /* * Return true if this watchpoint address matches the specified * access (ie the address range covered by the watchpoint overlaps @@ -219,3 +222,5 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, } } } + +#endif /* CONFIG_TCG */ diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index 3bd86cee97..13fbe9b0d7 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -21,6 +21,7 @@ #include "cpu.h" #include "exec/gdbstub.h" #include "gdbstub/helpers.h" +#include "sysemu/tcg.h" #include "internals.h" #include "cpregs.h" @@ -553,7 +554,7 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 2, "arm-vfp-sysregs.xml", 0); } } - if (cpu_isar_feature(aa32_mve, cpu)) { + if (cpu_isar_feature(aa32_mve, cpu) && tcg_enabled()) { gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg, 1, "arm-m-profile-mve.xml", 0); } @@ -561,7 +562,7 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), "system-registers.xml", 0); - if (arm_feature(env, ARM_FEATURE_M)) { + if (arm_feature(env, ARM_FEATURE_M) && tcg_enabled()) { gdb_register_coprocessor(cs, arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg, arm_gen_dynamic_m_systemreg_xml(cs, cs->gdb_num_regs), diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index ec1e07f139..c1f7e8c934 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -230,8 +230,11 @@ int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg) { bool is_data = !(reg & 1); bool is_high = reg & 2; - uint64_t mask = pauth_ptr_mask(env, -is_high, is_data); - return gdb_get_reg64(buf, mask); + ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); + ARMVAParameters param; + + param = aa64_va_parameters(env, -is_high, mmu_idx, is_data); + return gdb_get_reg64(buf, pauth_ptr_mask(param)); } default: return 0; diff --git a/target/arm/internals.h b/target/arm/internals.h index 673519a24a..c2c70d5918 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1391,13 +1391,18 @@ bool arm_generate_debug_exceptions(CPUARMState *env); /** * pauth_ptr_mask: - * @env: cpu context - * @ptr: selects between TTBR0 and TTBR1 - * @data: selects between TBI and TBID + * @param: parameters defining the MMU setup * - * Return a mask of the bits of @ptr that contain the authentication code. + * Return a mask of the address bits that contain the authentication code, + * given the MMU config defined by @param. */ -uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data); +static inline uint64_t pauth_ptr_mask(ARMVAParameters param) +{ + int bot_pac_bit = 64 - param.tsz; + int top_pac_bit = 64 - 8 * param.tbi; + + return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); +} /* Add the cpreg definitions for debug related system registers */ void define_debug_regs(ARMCPU *cpu); diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index fee3c7eb96..a4f3f92bc0 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -25,6 +25,7 @@ #include "exec/ram_addr.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" +#include "hw/core/tcg-cpu-ops.h" #include "qapi/error.h" #include "qemu/guest-random.h" diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c index 20f347332d..de067fa716 100644 --- a/target/arm/tcg/pauth_helper.c +++ b/target/arm/tcg/pauth_helper.c @@ -339,17 +339,9 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, return pac | ext | ptr; } -static uint64_t pauth_ptr_mask_internal(ARMVAParameters param) -{ - int bot_pac_bit = 64 - param.tsz; - int top_pac_bit = 64 - 8 * param.tbi; - - return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); -} - static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) { - uint64_t mask = pauth_ptr_mask_internal(param); + uint64_t mask = pauth_ptr_mask(param); /* Note that bit 55 is used whether or not the regime has 2 ranges. */ if (extract64(ptr, 55, 1)) { @@ -359,14 +351,6 @@ static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) } } -uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data) -{ - ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); - ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); - - return pauth_ptr_mask_internal(param); -} - static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier, ARMPACKey *key, bool data, int keynumber) { diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 9a8951afa4..ccf5e5beca 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -27,6 +27,7 @@ #include "tcg/tcg.h" #include "vec_internal.h" #include "sve_ldst_internal.h" +#include "hw/core/tcg-cpu-ops.h" /* Return a value for NZCV as per the ARM PredTest pseudofunction. diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index 2cb9368b1b..3c8401e908 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -4623,6 +4623,12 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label); gen_exception_insn(s, 0, EXCP_UDEF, syndrome); + /* + * gen_exception_insn() will set is_jmp to DISAS_NORETURN, + * but since we're conditionally branching over it, we want + * to assume continue-to-next-instruction. + */ + s->base.is_jmp = DISAS_NEXT; set_disas_label(s, over); } } diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c index f443b5822f..21d86077f4 100644 --- a/target/loongarch/translate.c +++ b/target/loongarch/translate.c @@ -177,7 +177,7 @@ static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) CPULoongArchState *env = cs->env_ptr; DisasContext *ctx = container_of(dcbase, DisasContext, base); - ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next); + ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next); if (!decode(ctx, ctx->opcode)) { qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. " diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 287659c74d..199328f4b6 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -2622,7 +2622,7 @@ void helper_scv(CPUPPCState *env, uint32_t lev) } } -void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) +void helper_pminsn(CPUPPCState *env, uint32_t insn) { CPUState *cs; diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 5d4361d35b..825252d728 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -11,6 +11,7 @@ #define S390X_INTERNAL_H #include "cpu.h" +#include "fpu/softfloat.h" #ifndef CONFIG_USER_ONLY typedef struct LowCore { @@ -299,7 +300,7 @@ uint32_t set_cc_nz_f128(float128 v); uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); -int float_comp_to_cc(CPUS390XState *env, int float_compare); +int float_comp_to_cc(CPUS390XState *env, FloatRelation float_compare); #define DCMASK_ZERO 0x0c00 #define DCMASK_NORMAL 0x0300 diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index b93dbd3dad..8b58b8d88d 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -26,6 +26,7 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" +#include "hw/core/tcg-cpu-ops.h" #include "qemu/int128.h" #include "qemu/atomic128.h" #include "trace.h" diff --git a/tcg/region.c b/tcg/region.c index 88d6bb273f..bef4c4756f 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -28,6 +28,7 @@ #include "qemu/mprotect.h" #include "qemu/memalign.h" #include "qemu/cacheinfo.h" +#include "qemu/qtree.h" #include "qapi/error.h" #include "exec/exec-all.h" #include "tcg/tcg.h" @@ -36,7 +37,7 @@ struct tcg_region_tree { QemuMutex lock; - GTree *tree; + QTree *tree; /* padding to avoid false sharing is computed at run-time */ }; @@ -163,7 +164,7 @@ static void tcg_region_trees_init(void) struct tcg_region_tree *rt = region_trees + i * tree_size; qemu_mutex_init(&rt->lock); - rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); + rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); } } @@ -202,7 +203,7 @@ void tcg_tb_insert(TranslationBlock *tb) g_assert(rt != NULL); qemu_mutex_lock(&rt->lock); - g_tree_insert(rt->tree, &tb->tc, tb); + q_tree_insert(rt->tree, &tb->tc, tb); qemu_mutex_unlock(&rt->lock); } @@ -212,7 +213,7 @@ void tcg_tb_remove(TranslationBlock *tb) g_assert(rt != NULL); qemu_mutex_lock(&rt->lock); - g_tree_remove(rt->tree, &tb->tc); + q_tree_remove(rt->tree, &tb->tc); qemu_mutex_unlock(&rt->lock); } @@ -232,7 +233,7 @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) } qemu_mutex_lock(&rt->lock); - tb = g_tree_lookup(rt->tree, &s); + tb = q_tree_lookup(rt->tree, &s); qemu_mutex_unlock(&rt->lock); return tb; } @@ -267,7 +268,7 @@ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; - g_tree_foreach(rt->tree, func, user_data); + q_tree_foreach(rt->tree, func, user_data); } tcg_region_tree_unlock_all(); } @@ -281,7 +282,7 @@ size_t tcg_nb_tbs(void) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; - nb_tbs += g_tree_nnodes(rt->tree); + nb_tbs += q_tree_nnodes(rt->tree); } tcg_region_tree_unlock_all(); return nb_tbs; @@ -296,8 +297,8 @@ static void tcg_region_tree_reset_all(void) struct tcg_region_tree *rt = region_trees + i * tree_size; /* Increment the refcount first so that destroy acts as a reset */ - g_tree_ref(rt->tree); - g_tree_destroy(rt->tree); + q_tree_ref(rt->tree); + q_tree_destroy(rt->tree); } tcg_region_tree_unlock_all(); } diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index ccc4144f7c..694f2b9dd4 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -1445,12 +1445,12 @@ static void tcg_out_goto_tb(TCGContext *s, int which) { ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which)); - /* Direct branch will be patched by tb_target_set_jmp_target. */ + /* Load link and indirect branch. */ set_jmp_insn_offset(s, which); - tcg_out32(s, CALL); - /* delay slot */ - tcg_debug_assert(check_fit_ptr(off, 13)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off); + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL); + /* delay slot */ + tcg_out_nop(s); set_jmp_reset_offset(s, which); /* @@ -1469,28 +1469,6 @@ static void tcg_out_goto_tb(TCGContext *s, int which) void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { - uintptr_t addr = tb->jmp_target_addr[n]; - intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2; - tcg_insn_unit insn; - - br_disp >>= 2; - if (check_fit_ptr(br_disp, 19)) { - /* ba,pt %icc, addr */ - insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A) - | BPCC_ICC | BPCC_PT, 0, 19, br_disp); - } else if (check_fit_ptr(br_disp, 22)) { - /* ba addr */ - insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A), - 0, 22, br_disp); - } else { - /* The code_gen_buffer can't be larger than 2GB. */ - tcg_debug_assert(check_fit_ptr(br_disp, 30)); - /* call addr */ - insn = deposit32(CALL, 0, 30, br_disp); - } - - qatomic_set((uint32_t *)jmp_rw, insn); - flush_idcache_range(jmp_rx, jmp_rw, 4); } static void tcg_out_op(TCGContext *s, TCGOpcode opc, diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py new file mode 100644 index 0000000000..5391283113 --- /dev/null +++ b/tests/avocado/kvm_xen_guest.py @@ -0,0 +1,171 @@ +# KVM Xen guest functional tests +# +# Copyright © 2021 Red Hat, Inc. +# Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Author: +# David Woodhouse <dwmw2@infradead.org> +# Alex Bennée <alex.bennee@linaro.org> +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu.machine import machine + +from avocado_qemu import LinuxSSHMixIn +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern + +class KVMXenGuest(QemuSystemTest, LinuxSSHMixIn): + """ + :avocado: tags=arch:x86_64 + :avocado: tags=machine:q35 + :avocado: tags=accel:kvm + :avocado: tags=kvm_xen_guest + """ + + KERNEL_DEFAULT = 'printk.time=0 root=/dev/xvda console=ttyS0' + + kernel_path = None + kernel_params = None + + # Fetch assets from the kvm-xen-guest subdir of my shared test + # images directory on fileserver.linaro.org where you can find + # build instructions for how they where assembled. + def get_asset(self, name, sha1): + base_url = ('https://fileserver.linaro.org/s/' + 'kE4nCFLdQcoBF9t/download?' + 'path=%2Fkvm-xen-guest&files=' ) + url = base_url + name + # use explicit name rather than failing to neatly parse the + # URL into a unique one + return self.fetch_asset(name=name, locations=(url), asset_hash=sha1) + + def common_vm_setup(self): + # We also catch lack of KVM_XEN support if we fail to launch + self.require_accelerator("kvm") + + self.vm.set_console() + + self.vm.add_args("-accel", "kvm,xen-version=0x4000a,kernel-irqchip=split") + self.vm.add_args("-smp", "2") + + self.kernel_path = self.get_asset("bzImage", + "367962983d0d32109998a70b45dcee4672d0b045") + self.rootfs = self.get_asset("rootfs.ext4", + "f1478401ea4b3fa2ea196396be44315bab2bb5e4") + + def run_and_check(self): + self.vm.add_args('-kernel', self.kernel_path, + '-append', self.kernel_params, + '-drive', f"file={self.rootfs},if=none,format=raw,id=drv0", + '-device', 'xen-disk,drive=drv0,vdev=xvda', + '-device', 'virtio-net-pci,netdev=unet', + '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22') + + try: + self.vm.launch() + except machine.VMLaunchFailure as e: + if "Xen HVM guest support not present" in e.output: + self.cancel("KVM Xen support is not present " + "(need v5.12+ kernel with CONFIG_KVM_XEN)") + elif "Property 'kvm-accel.xen-version' not found" in e.output: + self.cancel("QEMU not built with CONFIG_XEN_EMU support") + else: + raise e + + self.log.info('VM launched, waiting for sshd') + console_pattern = 'Starting dropbear sshd: OK' + wait_for_console_pattern(self, console_pattern, 'Oops') + self.log.info('sshd ready') + self.ssh_connect('root', '', False) + + self.ssh_command('cat /proc/cmdline') + self.ssh_command('dmesg | grep -e "Grant table initialized"') + + def test_kvm_xen_guest(self): + """ + :avocado: tags=kvm_xen_guest + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks') + self.run_and_check() + self.ssh_command('grep xen-pirq.*msi /proc/interrupts') + + def test_kvm_xen_guest_nomsi(self): + """ + :avocado: tags=kvm_xen_guest_nomsi + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks pci=nomsi') + self.run_and_check() + self.ssh_command('grep xen-pirq.* /proc/interrupts') + + def test_kvm_xen_guest_noapic_nomsi(self): + """ + :avocado: tags=kvm_xen_guest_noapic_nomsi + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks noapic pci=nomsi') + self.run_and_check() + self.ssh_command('grep xen-pirq /proc/interrupts') + + def test_kvm_xen_guest_vapic(self): + """ + :avocado: tags=kvm_xen_guest_vapic + """ + + self.common_vm_setup() + self.vm.add_args('-cpu', 'host,+xen-vapic') + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks') + self.run_and_check() + self.ssh_command('grep xen-pirq /proc/interrupts') + self.ssh_command('grep PCI-MSI /proc/interrupts') + + def test_kvm_xen_guest_novector(self): + """ + :avocado: tags=kvm_xen_guest_novector + """ + + self.common_vm_setup() + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks' + + ' xen_no_vector_callback') + self.run_and_check() + self.ssh_command('grep xen-platform-pci /proc/interrupts') + + def test_kvm_xen_guest_novector_nomsi(self): + """ + :avocado: tags=kvm_xen_guest_novector_nomsi + """ + + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks pci=nomsi' + + ' xen_no_vector_callback') + self.run_and_check() + self.ssh_command('grep xen-platform-pci /proc/interrupts') + + def test_kvm_xen_guest_novector_noapic(self): + """ + :avocado: tags=kvm_xen_guest_novector_noapic + """ + + self.common_vm_setup() + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks' + + ' xen_no_vector_callback noapic') + self.run_and_check() + self.ssh_command('grep xen-platform-pci /proc/interrupts') diff --git a/tests/avocado/tuxrun_baselines.py b/tests/avocado/tuxrun_baselines.py index c3fb67f5dc..d343376faa 100644 --- a/tests/avocado/tuxrun_baselines.py +++ b/tests/avocado/tuxrun_baselines.py @@ -270,7 +270,6 @@ class TuxRunBaselineTest(QemuSystemTest): """ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0") - @skip("QEMU currently broken") # regression against stable QEMU def test_mips64(self): """ :avocado: tags=arch:mips64 diff --git a/tests/bench/meson.build b/tests/bench/meson.build index 7477a1f401..4e6b469066 100644 --- a/tests/bench/meson.build +++ b/tests/bench/meson.build @@ -9,6 +9,10 @@ xbzrle_bench = executable('xbzrle-bench', dependencies: [qemuutil,migration]) endif +qtree_bench = executable('qtree-bench', + sources: 'qtree-bench.c', + dependencies: [qemuutil]) + executable('atomic_add-bench', sources: files('atomic_add-bench.c'), dependencies: [qemuutil], diff --git a/tests/bench/qtree-bench.c b/tests/bench/qtree-bench.c new file mode 100644 index 0000000000..f3d7edc76d --- /dev/null +++ b/tests/bench/qtree-bench.c @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "qemu/osdep.h" +#include "qemu/qtree.h" +#include "qemu/timer.h" + +enum tree_op { + OP_LOOKUP, + OP_INSERT, + OP_REMOVE, + OP_REMOVE_ALL, + OP_TRAVERSE, +}; + +struct benchmark { + const char * const name; + enum tree_op op; + bool fill_on_init; +}; + +enum impl_type { + IMPL_GTREE, + IMPL_QTREE, +}; + +struct tree_implementation { + const char * const name; + enum impl_type type; +}; + +static const struct benchmark benchmarks[] = { + { + .name = "Lookup", + .op = OP_LOOKUP, + .fill_on_init = true, + }, + { + .name = "Insert", + .op = OP_INSERT, + .fill_on_init = false, + }, + { + .name = "Remove", + .op = OP_REMOVE, + .fill_on_init = true, + }, + { + .name = "RemoveAll", + .op = OP_REMOVE_ALL, + .fill_on_init = true, + }, + { + .name = "Traverse", + .op = OP_TRAVERSE, + .fill_on_init = true, + }, +}; + +static const struct tree_implementation impls[] = { + { + .name = "GTree", + .type = IMPL_GTREE, + }, + { + .name = "QTree", + .type = IMPL_QTREE, + }, +}; + +static int compare_func(const void *ap, const void *bp) +{ + const size_t *a = ap; + const size_t *b = bp; + + return *a - *b; +} + +static void init_empty_tree_and_keys(enum impl_type impl, + void **ret_tree, size_t **ret_keys, + size_t n_elems) +{ + size_t *keys = g_malloc_n(n_elems, sizeof(*keys)); + for (size_t i = 0; i < n_elems; i++) { + keys[i] = i; + } + + void *tree; + switch (impl) { + case IMPL_GTREE: + tree = g_tree_new(compare_func); + break; + case IMPL_QTREE: + tree = q_tree_new(compare_func); + break; + default: + g_assert_not_reached(); + } + + *ret_tree = tree; + *ret_keys = keys; +} + +static gboolean traverse_func(gpointer key, gpointer value, gpointer data) +{ + return FALSE; +} + +static inline void remove_all(void *tree, enum impl_type impl) +{ + switch (impl) { + case IMPL_GTREE: + g_tree_destroy(tree); + break; + case IMPL_QTREE: + q_tree_destroy(tree); + break; + default: + g_assert_not_reached(); + } +} + +static int64_t run_benchmark(const struct benchmark *bench, + enum impl_type impl, + size_t n_elems) +{ + void *tree; + size_t *keys; + + init_empty_tree_and_keys(impl, &tree, &keys, n_elems); + if (bench->fill_on_init) { + for (size_t i = 0; i < n_elems; i++) { + switch (impl) { + case IMPL_GTREE: + g_tree_insert(tree, &keys[i], &keys[i]); + break; + case IMPL_QTREE: + q_tree_insert(tree, &keys[i], &keys[i]); + break; + default: + g_assert_not_reached(); + } + } + } + + int64_t start_ns = get_clock(); + switch (bench->op) { + case OP_LOOKUP: + for (size_t i = 0; i < n_elems; i++) { + void *value; + switch (impl) { + case IMPL_GTREE: + value = g_tree_lookup(tree, &keys[i]); + break; + case IMPL_QTREE: + value = q_tree_lookup(tree, &keys[i]); + break; + default: + g_assert_not_reached(); + } + (void)value; + } + break; + case OP_INSERT: + for (size_t i = 0; i < n_elems; i++) { + switch (impl) { + case IMPL_GTREE: + g_tree_insert(tree, &keys[i], &keys[i]); + break; + case IMPL_QTREE: + q_tree_insert(tree, &keys[i], &keys[i]); + break; + default: + g_assert_not_reached(); + } + } + break; + case OP_REMOVE: + for (size_t i = 0; i < n_elems; i++) { + switch (impl) { + case IMPL_GTREE: + g_tree_remove(tree, &keys[i]); + break; + case IMPL_QTREE: + q_tree_remove(tree, &keys[i]); + break; + default: + g_assert_not_reached(); + } + } + break; + case OP_REMOVE_ALL: + remove_all(tree, impl); + break; + case OP_TRAVERSE: + switch (impl) { + case IMPL_GTREE: + g_tree_foreach(tree, traverse_func, NULL); + break; + case IMPL_QTREE: + q_tree_foreach(tree, traverse_func, NULL); + break; + default: + g_assert_not_reached(); + } + break; + default: + g_assert_not_reached(); + } + int64_t ns = get_clock() - start_ns; + + if (bench->op != OP_REMOVE_ALL) { + remove_all(tree, impl); + } + g_free(keys); + + return ns; +} + +int main(int argc, char *argv[]) +{ + size_t sizes[] = { + 32, + 1024, + 1024 * 4, + 1024 * 128, + 1024 * 1024, + }; + + double res[ARRAY_SIZE(benchmarks)][ARRAY_SIZE(impls)][ARRAY_SIZE(sizes)]; + for (int i = 0; i < ARRAY_SIZE(sizes); i++) { + size_t size = sizes[i]; + for (int j = 0; j < ARRAY_SIZE(impls); j++) { + const struct tree_implementation *impl = &impls[j]; + for (int k = 0; k < ARRAY_SIZE(benchmarks); k++) { + const struct benchmark *bench = &benchmarks[k]; + + /* warm-up run */ + run_benchmark(bench, impl->type, size); + + int64_t total_ns = 0; + int64_t n_runs = 0; + while (total_ns < 2e8 || n_runs < 5) { + total_ns += run_benchmark(bench, impl->type, size); + n_runs++; + } + double ns_per_run = (double)total_ns / n_runs; + + /* Throughput, in Mops/s */ + res[k][j][i] = size / ns_per_run * 1e3; + } + } + } + + printf("# Results' breakdown: Tree, Op and #Elements. Units: Mops/s\n"); + printf("%5s %10s ", "Tree", "Op"); + for (int i = 0; i < ARRAY_SIZE(sizes); i++) { + printf("%7zu ", sizes[i]); + } + printf("\n"); + char separator[97]; + for (int i = 0; i < ARRAY_SIZE(separator) - 1; i++) { + separator[i] = '-'; + } + separator[ARRAY_SIZE(separator) - 1] = '\0'; + printf("%s\n", separator); + for (int i = 0; i < ARRAY_SIZE(benchmarks); i++) { + for (int j = 0; j < ARRAY_SIZE(impls); j++) { + printf("%5s %10s ", impls[j].name, benchmarks[i].name); + for (int k = 0; k < ARRAY_SIZE(sizes); k++) { + printf("%7.2f ", res[i][j][k]); + if (j == 0) { + printf(" "); + } else { + if (res[i][0][k] != 0) { + double speedup = res[i][j][k] / res[i][0][k]; + printf("(%4.2fx) ", speedup); + } else { + printf("( ) "); + } + } + } + printf("\n"); + } + } + printf("%s\n", separator); + return 0; +} diff --git a/tests/docker/dockerfiles/debian-hexagon-cross.docker b/tests/docker/dockerfiles/debian-hexagon-cross.docker index 5308ccb8fe..b99d99f943 100644 --- a/tests/docker/dockerfiles/debian-hexagon-cross.docker +++ b/tests/docker/dockerfiles/debian-hexagon-cross.docker @@ -27,7 +27,7 @@ RUN apt-get update && \ ENV TOOLCHAIN_INSTALL /opt -ENV TOOLCHAIN_RELEASE 15.0.3 +ENV TOOLCHAIN_RELEASE 16.0.0 ENV TOOLCHAIN_BASENAME "clang+llvm-${TOOLCHAIN_RELEASE}-cross-hexagon-unknown-linux-musl" ENV TOOLCHAIN_URL https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/v${TOOLCHAIN_RELEASE}/${TOOLCHAIN_BASENAME}.tar.xz diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build index a162f683ef..9735071a29 100644 --- a/tests/qemu-iotests/meson.build +++ b/tests/qemu-iotests/meson.build @@ -47,19 +47,20 @@ foreach format, speed: qemu_iotests_formats endif rc = run_command( - [qemu_iotests_check_cmd] + args + ['-n'], + [python, qemu_iotests_check_cmd] + args + ['-n'], check: true, ) foreach item: rc.stdout().strip().split() - args = ['-tap', '-' + format, item, + args = [qemu_iotests_check_cmd, + '-tap', '-' + format, item, '--source-dir', meson.current_source_dir(), '--build-dir', meson.current_build_dir()] # Some individual tests take as long as 45 seconds # Bump the timeout to 3 minutes for some headroom # on slow machines to minimize spurious failures test('io-' + format + '-' + item, - qemu_iotests_check_cmd, + python, args: args, depends: qemu_iotests_binaries, env: qemu_iotests_env, diff --git a/tests/unit/meson.build b/tests/unit/meson.build index fa63cfe6ff..3bc78d8660 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -36,6 +36,7 @@ tests = { 'test-rcu-slist': [], 'test-qdist': [], 'test-qht': [], + 'test-qtree': [], 'test-bitops': [], 'test-bitcnt': [], 'test-qgraph': ['../qtest/libqos/qgraph.c'], diff --git a/tests/unit/test-qtree.c b/tests/unit/test-qtree.c new file mode 100644 index 0000000000..4d836d22c7 --- /dev/null +++ b/tests/unit/test-qtree.c @@ -0,0 +1,333 @@ +/* + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * Tests for QTree. + * Original source: glib + * https://gitlab.gnome.org/GNOME/glib/-/blob/main/glib/tests/tree.c + * LGPL license. + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + */ + +#include "qemu/osdep.h" +#include "qemu/qtree.h" + +static gint my_compare(gconstpointer a, gconstpointer b) +{ + const char *cha = a; + const char *chb = b; + + return *cha - *chb; +} + +static gint my_compare_with_data(gconstpointer a, + gconstpointer b, + gpointer user_data) +{ + const char *cha = a; + const char *chb = b; + + /* just check that we got the right data */ + g_assert(GPOINTER_TO_INT(user_data) == 123); + + return *cha - *chb; +} + +static gint my_search(gconstpointer a, gconstpointer b) +{ + return my_compare(b, a); +} + +static gpointer destroyed_key; +static gpointer destroyed_value; +static guint destroyed_key_count; +static guint destroyed_value_count; + +static void my_key_destroy(gpointer key) +{ + destroyed_key = key; + destroyed_key_count++; +} + +static void my_value_destroy(gpointer value) +{ + destroyed_value = value; + destroyed_value_count++; +} + +static gint my_traverse(gpointer key, gpointer value, gpointer data) +{ + char *ch = key; + + g_assert((*ch) > 0); + + if (*ch == 'd') { + return TRUE; + } + + return FALSE; +} + +char chars[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + +char chars2[] = + "0123456789" + "abcdefghijklmnopqrstuvwxyz"; + +static gint check_order(gpointer key, gpointer value, gpointer data) +{ + char **p = data; + char *ch = key; + + g_assert(**p == *ch); + + (*p)++; + + return FALSE; +} + +static void test_tree_search(void) +{ + gint i; + QTree *tree; + gboolean removed; + gchar c; + gchar *p, *d; + + tree = q_tree_new_with_data(my_compare_with_data, GINT_TO_POINTER(123)); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + + q_tree_foreach(tree, my_traverse, NULL); + + g_assert(q_tree_nnodes(tree) == strlen(chars)); + g_assert(q_tree_height(tree) == 6); + + p = chars; + q_tree_foreach(tree, check_order, &p); + + for (i = 0; i < 26; i++) { + removed = q_tree_remove(tree, &chars[i + 10]); + g_assert(removed); + } + + c = '\0'; + removed = q_tree_remove(tree, &c); + g_assert(!removed); + + q_tree_foreach(tree, my_traverse, NULL); + + g_assert(q_tree_nnodes(tree) == strlen(chars2)); + g_assert(q_tree_height(tree) == 6); + + p = chars2; + q_tree_foreach(tree, check_order, &p); + + for (i = 25; i >= 0; i--) { + q_tree_insert(tree, &chars[i + 10], &chars[i + 10]); + } + + p = chars; + q_tree_foreach(tree, check_order, &p); + + c = '0'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + g_assert(q_tree_lookup_extended(tree, &c, (gpointer *)&d, (gpointer *)&p)); + g_assert(c == *d && c == *p); + + c = 'A'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + + c = 'a'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + + c = 'z'; + p = q_tree_lookup(tree, &c); + g_assert(p && *p == c); + + c = '!'; + p = q_tree_lookup(tree, &c); + g_assert(p == NULL); + + c = '='; + p = q_tree_lookup(tree, &c); + g_assert(p == NULL); + + c = '|'; + p = q_tree_lookup(tree, &c); + g_assert(p == NULL); + + c = '0'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = 'A'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = 'a'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = 'z'; + p = q_tree_search(tree, my_search, &c); + g_assert(p && *p == c); + + c = '!'; + p = q_tree_search(tree, my_search, &c); + g_assert(p == NULL); + + c = '='; + p = q_tree_search(tree, my_search, &c); + g_assert(p == NULL); + + c = '|'; + p = q_tree_search(tree, my_search, &c); + g_assert(p == NULL); + + q_tree_destroy(tree); +} + +static void test_tree_remove(void) +{ + QTree *tree; + char c, d; + gint i; + gboolean removed; + + tree = q_tree_new_full((GCompareDataFunc)my_compare, NULL, + my_key_destroy, + my_value_destroy); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + + c = '0'; + q_tree_insert(tree, &c, &c); + g_assert(destroyed_key == &c); + g_assert(destroyed_value == &chars[0]); + destroyed_key = NULL; + destroyed_value = NULL; + + d = '1'; + q_tree_replace(tree, &d, &d); + g_assert(destroyed_key == &chars[1]); + g_assert(destroyed_value == &chars[1]); + destroyed_key = NULL; + destroyed_value = NULL; + + c = '2'; + removed = q_tree_remove(tree, &c); + g_assert(removed); + g_assert(destroyed_key == &chars[2]); + g_assert(destroyed_value == &chars[2]); + destroyed_key = NULL; + destroyed_value = NULL; + + c = '3'; + removed = q_tree_steal(tree, &c); + g_assert(removed); + g_assert(destroyed_key == NULL); + g_assert(destroyed_value == NULL); + + const gchar *remove = "omkjigfedba"; + for (i = 0; remove[i]; i++) { + removed = q_tree_remove(tree, &remove[i]); + g_assert(removed); + } + + q_tree_destroy(tree); +} + +static void test_tree_destroy(void) +{ + QTree *tree; + gint i; + + tree = q_tree_new(my_compare); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + + g_assert(q_tree_nnodes(tree) == strlen(chars)); + + g_test_message("nnodes: %d", q_tree_nnodes(tree)); + q_tree_ref(tree); + q_tree_destroy(tree); + + g_test_message("nnodes: %d", q_tree_nnodes(tree)); + g_assert(q_tree_nnodes(tree) == 0); + + q_tree_unref(tree); +} + +static void test_tree_insert(void) +{ + QTree *tree; + gchar *p; + gint i; + gchar *scrambled; + + tree = q_tree_new(my_compare); + + for (i = 0; chars[i]; i++) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + p = chars; + q_tree_foreach(tree, check_order, &p); + + q_tree_unref(tree); + tree = q_tree_new(my_compare); + + for (i = strlen(chars) - 1; i >= 0; i--) { + q_tree_insert(tree, &chars[i], &chars[i]); + } + p = chars; + q_tree_foreach(tree, check_order, &p); + + q_tree_unref(tree); + tree = q_tree_new(my_compare); + + scrambled = g_strdup(chars); + + for (i = 0; i < 30; i++) { + gchar tmp; + gint a, b; + + a = g_random_int_range(0, strlen(scrambled)); + b = g_random_int_range(0, strlen(scrambled)); + tmp = scrambled[a]; + scrambled[a] = scrambled[b]; + scrambled[b] = tmp; + } + + for (i = 0; scrambled[i]; i++) { + q_tree_insert(tree, &scrambled[i], &scrambled[i]); + } + p = chars; + q_tree_foreach(tree, check_order, &p); + + g_free(scrambled); + q_tree_unref(tree); +} + +int main(int argc, char *argv[]) +{ + g_test_init(&argc, &argv, NULL); + + g_test_add_func("/qtree/search", test_tree_search); + g_test_add_func("/qtree/remove", test_tree_remove); + g_test_add_func("/qtree/destroy", test_tree_destroy); + g_test_add_func("/qtree/insert", test_tree_insert); + + return g_test_run(); +} diff --git a/tests/vm/netbsd b/tests/vm/netbsd index aa54338dfa..0b9536ca17 100755 --- a/tests/vm/netbsd +++ b/tests/vm/netbsd @@ -30,7 +30,6 @@ class NetBSDVM(basevm.BaseVM): "git-base", "pkgconf", "xz", - "python37", "ninja-build", # gnu tools @@ -66,7 +65,7 @@ class NetBSDVM(basevm.BaseVM): mkdir src build; cd src; tar -xf /dev/rld1a; cd ../build - ../src/configure --python=python3.7 --disable-opengl {configure_opts}; + ../src/configure --disable-opengl {configure_opts}; gmake --output-sync -j{jobs} {target} {verbose}; """ poweroff = "/sbin/poweroff" diff --git a/ui/spice-core.c b/ui/spice-core.c index 67cfd3ca9c..52a59386d7 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -149,7 +149,7 @@ static void watch_remove(SpiceWatch *watch) qemu_set_fd_handler(watch->fd, NULL, NULL, NULL); #ifdef WIN32 /* SOCKET is owned by spice */ - qemu_close_to_socket(watch->fd); + qemu_close_socket_osfhandle(watch->fd); #endif g_free(watch); } diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index e11a8a022e..1683aa1105 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -127,6 +127,8 @@ static bool fdmon_epoll_try_enable(AioContext *ctx) bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) { + bool ok; + if (ctx->epollfd < 0) { return false; } @@ -136,14 +138,23 @@ bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) return false; } - if (npfd >= EPOLL_ENABLE_THRESHOLD) { - if (fdmon_epoll_try_enable(ctx)) { - return true; - } else { - fdmon_epoll_disable(ctx); - } + if (npfd < EPOLL_ENABLE_THRESHOLD) { + return false; + } + + /* The list must not change while we add fds to epoll */ + if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { + return false; + } + + ok = fdmon_epoll_try_enable(ctx); + + qemu_lockcnt_inc_and_unlock(&ctx->list_lock); + + if (!ok) { + fdmon_epoll_disable(ctx); } - return false; + return ok; } void fdmon_epoll_setup(AioContext *ctx) diff --git a/util/meson.build b/util/meson.build index 26c73e586b..3c2cfc6ede 100644 --- a/util/meson.build +++ b/util/meson.build @@ -26,6 +26,7 @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c')) util_ss.add(when: 'CONFIG_WIN32', if_true: winmm) util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch) +util_ss.add(when: 'HAVE_GLIB_WITH_SLICE_ALLOCATOR', if_true: files('qtree.c')) util_ss.add(files('envlist.c', 'path.c', 'module.c')) util_ss.add(files('host-utils.c')) util_ss.add(files('bitmap.c', 'bitops.c')) diff --git a/util/qtree.c b/util/qtree.c new file mode 100644 index 0000000000..31f0b46182 --- /dev/null +++ b/util/qtree.c @@ -0,0 +1,1390 @@ +/* + * GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * MT safe + */ + +#include "qemu/osdep.h" +#include "qemu/qtree.h" + +/** + * SECTION:trees-binary + * @title: Balanced Binary Trees + * @short_description: a sorted collection of key/value pairs optimized + * for searching and traversing in order + * + * The #QTree structure and its associated functions provide a sorted + * collection of key/value pairs optimized for searching and traversing + * in order. This means that most of the operations (access, search, + * insertion, deletion, ...) on #QTree are O(log(n)) in average and O(n) + * in worst case for time complexity. But, note that maintaining a + * balanced sorted #QTree of n elements is done in time O(n log(n)). + * + * To create a new #QTree use q_tree_new(). + * + * To insert a key/value pair into a #QTree use q_tree_insert() + * (O(n log(n))). + * + * To remove a key/value pair use q_tree_remove() (O(n log(n))). + * + * To look up the value corresponding to a given key, use + * q_tree_lookup() and q_tree_lookup_extended(). + * + * To find out the number of nodes in a #QTree, use q_tree_nnodes(). To + * get the height of a #QTree, use q_tree_height(). + * + * To traverse a #QTree, calling a function for each node visited in + * the traversal, use q_tree_foreach(). + * + * To destroy a #QTree, use q_tree_destroy(). + **/ + +#define MAX_GTREE_HEIGHT 40 + +/** + * QTree: + * + * The QTree struct is an opaque data structure representing a + * [balanced binary tree][glib-Balanced-Binary-Trees]. It should be + * accessed only by using the following functions. + */ +struct _QTree { + QTreeNode *root; + GCompareDataFunc key_compare; + GDestroyNotify key_destroy_func; + GDestroyNotify value_destroy_func; + gpointer key_compare_data; + guint nnodes; + gint ref_count; +}; + +struct _QTreeNode { + gpointer key; /* key for this node */ + gpointer value; /* value stored at this node */ + QTreeNode *left; /* left subtree */ + QTreeNode *right; /* right subtree */ + gint8 balance; /* height (right) - height (left) */ + guint8 left_child; + guint8 right_child; +}; + + +static QTreeNode *q_tree_node_new(gpointer key, + gpointer value); +static QTreeNode *q_tree_insert_internal(QTree *tree, + gpointer key, + gpointer value, + gboolean replace); +static gboolean q_tree_remove_internal(QTree *tree, + gconstpointer key, + gboolean steal); +static QTreeNode *q_tree_node_balance(QTreeNode *node); +static QTreeNode *q_tree_find_node(QTree *tree, + gconstpointer key); +static QTreeNode *q_tree_node_search(QTreeNode *node, + GCompareFunc search_func, + gconstpointer data); +static QTreeNode *q_tree_node_rotate_left(QTreeNode *node); +static QTreeNode *q_tree_node_rotate_right(QTreeNode *node); +#ifdef Q_TREE_DEBUG +static void q_tree_node_check(QTreeNode *node); +#endif + +static QTreeNode* +q_tree_node_new(gpointer key, + gpointer value) +{ + QTreeNode *node = g_new(QTreeNode, 1); + + node->balance = 0; + node->left = NULL; + node->right = NULL; + node->left_child = FALSE; + node->right_child = FALSE; + node->key = key; + node->value = value; + + return node; +} + +/** + * q_tree_new: + * @key_compare_func: the function used to order the nodes in the #QTree. + * It should return values similar to the standard strcmp() function - + * 0 if the two arguments are equal, a negative value if the first argument + * comes before the second, or a positive value if the first argument comes + * after the second. + * + * Creates a new #QTree. + * + * Returns: a newly allocated #QTree + */ +QTree * +q_tree_new(GCompareFunc key_compare_func) +{ + g_return_val_if_fail(key_compare_func != NULL, NULL); + + return q_tree_new_full((GCompareDataFunc) key_compare_func, NULL, + NULL, NULL); +} + +/** + * q_tree_new_with_data: + * @key_compare_func: qsort()-style comparison function + * @key_compare_data: data to pass to comparison function + * + * Creates a new #QTree with a comparison function that accepts user data. + * See q_tree_new() for more details. + * + * Returns: a newly allocated #QTree + */ +QTree * +q_tree_new_with_data(GCompareDataFunc key_compare_func, + gpointer key_compare_data) +{ + g_return_val_if_fail(key_compare_func != NULL, NULL); + + return q_tree_new_full(key_compare_func, key_compare_data, + NULL, NULL); +} + +/** + * q_tree_new_full: + * @key_compare_func: qsort()-style comparison function + * @key_compare_data: data to pass to comparison function + * @key_destroy_func: a function to free the memory allocated for the key + * used when removing the entry from the #QTree or %NULL if you don't + * want to supply such a function + * @value_destroy_func: a function to free the memory allocated for the + * value used when removing the entry from the #QTree or %NULL if you + * don't want to supply such a function + * + * Creates a new #QTree like q_tree_new() and allows to specify functions + * to free the memory allocated for the key and value that get called when + * removing the entry from the #QTree. + * + * Returns: a newly allocated #QTree + */ +QTree * +q_tree_new_full(GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func) +{ + QTree *tree; + + g_return_val_if_fail(key_compare_func != NULL, NULL); + + tree = g_new(QTree, 1); + tree->root = NULL; + tree->key_compare = key_compare_func; + tree->key_destroy_func = key_destroy_func; + tree->value_destroy_func = value_destroy_func; + tree->key_compare_data = key_compare_data; + tree->nnodes = 0; + tree->ref_count = 1; + + return tree; +} + +/** + * q_tree_node_first: + * @tree: a #QTree + * + * Returns the first in-order node of the tree, or %NULL + * for an empty tree. + * + * Returns: (nullable) (transfer none): the first node in the tree + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_node_first(QTree *tree) +{ + QTreeNode *tmp; + + g_return_val_if_fail(tree != NULL, NULL); + + if (!tree->root) { + return NULL; + } + + tmp = tree->root; + + while (tmp->left_child) { + tmp = tmp->left; + } + + return tmp; +} + +/** + * q_tree_node_previous + * @node: a #QTree node + * + * Returns the previous in-order node of the tree, or %NULL + * if the passed node was already the first one. + * + * Returns: (nullable) (transfer none): the previous node in the tree + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_node_previous(QTreeNode *node) +{ + QTreeNode *tmp; + + g_return_val_if_fail(node != NULL, NULL); + + tmp = node->left; + + if (node->left_child) { + while (tmp->right_child) { + tmp = tmp->right; + } + } + + return tmp; +} + +/** + * q_tree_node_next + * @node: a #QTree node + * + * Returns the next in-order node of the tree, or %NULL + * if the passed node was already the last one. + * + * Returns: (nullable) (transfer none): the next node in the tree + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_node_next(QTreeNode *node) +{ + QTreeNode *tmp; + + g_return_val_if_fail(node != NULL, NULL); + + tmp = node->right; + + if (node->right_child) { + while (tmp->left_child) { + tmp = tmp->left; + } + } + + return tmp; +} + +/** + * q_tree_remove_all: + * @tree: a #QTree + * + * Removes all nodes from a #QTree and destroys their keys and values, + * then resets the #QTree’s root to %NULL. + * + * Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static void QEMU_DISABLE_CFI +q_tree_remove_all(QTree *tree) +{ + QTreeNode *node; + QTreeNode *next; + + g_return_if_fail(tree != NULL); + + node = q_tree_node_first(tree); + + while (node) { + next = q_tree_node_next(node); + + if (tree->key_destroy_func) { + tree->key_destroy_func(node->key); + } + if (tree->value_destroy_func) { + tree->value_destroy_func(node->value); + } + g_free(node); + +#ifdef Q_TREE_DEBUG + g_assert(tree->nnodes > 0); + tree->nnodes--; +#endif + + node = next; + } + +#ifdef Q_TREE_DEBUG + g_assert(tree->nnodes == 0); +#endif + + tree->root = NULL; +#ifndef Q_TREE_DEBUG + tree->nnodes = 0; +#endif +} + +/** + * q_tree_ref: + * @tree: a #QTree + * + * Increments the reference count of @tree by one. + * + * It is safe to call this function from any thread. + * + * Returns: the passed in #QTree + * + * Since: 2.22 + */ +QTree * +q_tree_ref(QTree *tree) +{ + g_return_val_if_fail(tree != NULL, NULL); + + g_atomic_int_inc(&tree->ref_count); + + return tree; +} + +/** + * q_tree_unref: + * @tree: a #QTree + * + * Decrements the reference count of @tree by one. + * If the reference count drops to 0, all keys and values will + * be destroyed (if destroy functions were specified) and all + * memory allocated by @tree will be released. + * + * It is safe to call this function from any thread. + * + * Since: 2.22 + */ +void +q_tree_unref(QTree *tree) +{ + g_return_if_fail(tree != NULL); + + if (g_atomic_int_dec_and_test(&tree->ref_count)) { + q_tree_remove_all(tree); + g_free(tree); + } +} + +/** + * q_tree_destroy: + * @tree: a #QTree + * + * Removes all keys and values from the #QTree and decreases its + * reference count by one. If keys and/or values are dynamically + * allocated, you should either free them first or create the #QTree + * using q_tree_new_full(). In the latter case the destroy functions + * you supplied will be called on all keys and values before destroying + * the #QTree. + */ +void +q_tree_destroy(QTree *tree) +{ + g_return_if_fail(tree != NULL); + + q_tree_remove_all(tree); + q_tree_unref(tree); +} + +/** + * q_tree_insert_node: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a key/value pair into a #QTree. + * + * If the given key already exists in the #QTree its corresponding value + * is set to the new value. If you supplied a @value_destroy_func when + * creating the #QTree, the old value is freed using that function. If + * you supplied a @key_destroy_func when creating the #QTree, the passed + * key is freed using that function. + * + * The tree is automatically 'balanced' as new key/value pairs are added, + * so that the distance from the root to every leaf is as small as possible. + * The cost of maintaining a balanced tree while inserting new key/value + * result in a O(n log(n)) operation where most of the other operations + * are O(log(n)). + * + * Returns: (transfer none): the inserted (or set) node. + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_insert_node(QTree *tree, + gpointer key, + gpointer value) +{ + QTreeNode *node; + + g_return_val_if_fail(tree != NULL, NULL); + + node = q_tree_insert_internal(tree, key, value, FALSE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return node; +} + +/** + * q_tree_insert: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a key/value pair into a #QTree. + * + * Inserts a new key and value into a #QTree as q_tree_insert_node() does, + * only this function does not return the inserted or set node. + */ +void +q_tree_insert(QTree *tree, + gpointer key, + gpointer value) +{ + q_tree_insert_node(tree, key, value); +} + +/** + * q_tree_replace_node: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a new key and value into a #QTree similar to q_tree_insert_node(). + * The difference is that if the key already exists in the #QTree, it gets + * replaced by the new key. If you supplied a @value_destroy_func when + * creating the #QTree, the old value is freed using that function. If you + * supplied a @key_destroy_func when creating the #QTree, the old key is + * freed using that function. + * + * The tree is automatically 'balanced' as new key/value pairs are added, + * so that the distance from the root to every leaf is as small as possible. + * + * Returns: (transfer none): the inserted (or set) node. + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_replace_node(QTree *tree, + gpointer key, + gpointer value) +{ + QTreeNode *node; + + g_return_val_if_fail(tree != NULL, NULL); + + node = q_tree_insert_internal(tree, key, value, TRUE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return node; +} + +/** + * q_tree_replace: + * @tree: a #QTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a new key and value into a #QTree as q_tree_replace_node() does, + * only this function does not return the inserted or set node. + */ +void +q_tree_replace(QTree *tree, + gpointer key, + gpointer value) +{ + q_tree_replace_node(tree, key, value); +} + +/* internal insert routine */ +static QTreeNode * QEMU_DISABLE_CFI +q_tree_insert_internal(QTree *tree, + gpointer key, + gpointer value, + gboolean replace) +{ + QTreeNode *node, *retnode; + QTreeNode *path[MAX_GTREE_HEIGHT]; + int idx; + + g_return_val_if_fail(tree != NULL, NULL); + + if (!tree->root) { + tree->root = q_tree_node_new(key, value); + tree->nnodes++; + return tree->root; + } + + idx = 0; + path[idx++] = NULL; + node = tree->root; + + while (1) { + int cmp = tree->key_compare(key, node->key, tree->key_compare_data); + + if (cmp == 0) { + if (tree->value_destroy_func) { + tree->value_destroy_func(node->value); + } + + node->value = value; + + if (replace) { + if (tree->key_destroy_func) { + tree->key_destroy_func(node->key); + } + + node->key = key; + } else { + /* free the passed key */ + if (tree->key_destroy_func) { + tree->key_destroy_func(key); + } + } + + return node; + } else if (cmp < 0) { + if (node->left_child) { + path[idx++] = node; + node = node->left; + } else { + QTreeNode *child = q_tree_node_new(key, value); + + child->left = node->left; + child->right = node; + node->left = child; + node->left_child = TRUE; + node->balance -= 1; + + tree->nnodes++; + + retnode = child; + break; + } + } else { + if (node->right_child) { + path[idx++] = node; + node = node->right; + } else { + QTreeNode *child = q_tree_node_new(key, value); + + child->right = node->right; + child->left = node; + node->right = child; + node->right_child = TRUE; + node->balance += 1; + + tree->nnodes++; + + retnode = child; + break; + } + } + } + + /* + * Restore balance. This is the goodness of a non-recursive + * implementation, when we are done with balancing we 'break' + * the loop and we are done. + */ + while (1) { + QTreeNode *bparent = path[--idx]; + gboolean left_node = (bparent && node == bparent->left); + g_assert(!bparent || bparent->left == node || bparent->right == node); + + if (node->balance < -1 || node->balance > 1) { + node = q_tree_node_balance(node); + if (bparent == NULL) { + tree->root = node; + } else if (left_node) { + bparent->left = node; + } else { + bparent->right = node; + } + } + + if (node->balance == 0 || bparent == NULL) { + break; + } + + if (left_node) { + bparent->balance -= 1; + } else { + bparent->balance += 1; + } + + node = bparent; + } + + return retnode; +} + +/** + * q_tree_remove: + * @tree: a #QTree + * @key: the key to remove + * + * Removes a key/value pair from a #QTree. + * + * If the #QTree was created using q_tree_new_full(), the key and value + * are freed using the supplied destroy functions, otherwise you have to + * make sure that any dynamically allocated values are freed yourself. + * If the key does not exist in the #QTree, the function does nothing. + * + * The cost of maintaining a balanced tree while removing a key/value + * result in a O(n log(n)) operation where most of the other operations + * are O(log(n)). + * + * Returns: %TRUE if the key was found (prior to 2.8, this function + * returned nothing) + */ +gboolean +q_tree_remove(QTree *tree, + gconstpointer key) +{ + gboolean removed; + + g_return_val_if_fail(tree != NULL, FALSE); + + removed = q_tree_remove_internal(tree, key, FALSE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return removed; +} + +/** + * q_tree_steal: + * @tree: a #QTree + * @key: the key to remove + * + * Removes a key and its associated value from a #QTree without calling + * the key and value destroy functions. + * + * If the key does not exist in the #QTree, the function does nothing. + * + * Returns: %TRUE if the key was found (prior to 2.8, this function + * returned nothing) + */ +gboolean +q_tree_steal(QTree *tree, + gconstpointer key) +{ + gboolean removed; + + g_return_val_if_fail(tree != NULL, FALSE); + + removed = q_tree_remove_internal(tree, key, TRUE); + +#ifdef Q_TREE_DEBUG + q_tree_node_check(tree->root); +#endif + + return removed; +} + +/* internal remove routine */ +static gboolean QEMU_DISABLE_CFI +q_tree_remove_internal(QTree *tree, + gconstpointer key, + gboolean steal) +{ + QTreeNode *node, *parent, *balance; + QTreeNode *path[MAX_GTREE_HEIGHT]; + int idx; + gboolean left_node; + + g_return_val_if_fail(tree != NULL, FALSE); + + if (!tree->root) { + return FALSE; + } + + idx = 0; + path[idx++] = NULL; + node = tree->root; + + while (1) { + int cmp = tree->key_compare(key, node->key, tree->key_compare_data); + + if (cmp == 0) { + break; + } else if (cmp < 0) { + if (!node->left_child) { + return FALSE; + } + + path[idx++] = node; + node = node->left; + } else { + if (!node->right_child) { + return FALSE; + } + + path[idx++] = node; + node = node->right; + } + } + + /* + * The following code is almost equal to q_tree_remove_node, + * except that we do not have to call q_tree_node_parent. + */ + balance = parent = path[--idx]; + g_assert(!parent || parent->left == node || parent->right == node); + left_node = (parent && node == parent->left); + + if (!node->left_child) { + if (!node->right_child) { + if (!parent) { + tree->root = NULL; + } else if (left_node) { + parent->left_child = FALSE; + parent->left = node->left; + parent->balance += 1; + } else { + parent->right_child = FALSE; + parent->right = node->right; + parent->balance -= 1; + } + } else { + /* node has a right child */ + QTreeNode *tmp = q_tree_node_next(node); + tmp->left = node->left; + + if (!parent) { + tree->root = node->right; + } else if (left_node) { + parent->left = node->right; + parent->balance += 1; + } else { + parent->right = node->right; + parent->balance -= 1; + } + } + } else { + /* node has a left child */ + if (!node->right_child) { + QTreeNode *tmp = q_tree_node_previous(node); + tmp->right = node->right; + + if (parent == NULL) { + tree->root = node->left; + } else if (left_node) { + parent->left = node->left; + parent->balance += 1; + } else { + parent->right = node->left; + parent->balance -= 1; + } + } else { + /* node has a both children (pant, pant!) */ + QTreeNode *prev = node->left; + QTreeNode *next = node->right; + QTreeNode *nextp = node; + int old_idx = idx + 1; + idx++; + + /* path[idx] == parent */ + /* find the immediately next node (and its parent) */ + while (next->left_child) { + path[++idx] = nextp = next; + next = next->left; + } + + path[old_idx] = next; + balance = path[idx]; + + /* remove 'next' from the tree */ + if (nextp != node) { + if (next->right_child) { + nextp->left = next->right; + } else { + nextp->left_child = FALSE; + } + nextp->balance += 1; + + next->right_child = TRUE; + next->right = node->right; + } else { + node->balance -= 1; + } + + /* set the prev to point to the right place */ + while (prev->right_child) { + prev = prev->right; + } + prev->right = next; + + /* prepare 'next' to replace 'node' */ + next->left_child = TRUE; + next->left = node->left; + next->balance = node->balance; + + if (!parent) { + tree->root = next; + } else if (left_node) { + parent->left = next; + } else { + parent->right = next; + } + } + } + + /* restore balance */ + if (balance) { + while (1) { + QTreeNode *bparent = path[--idx]; + g_assert(!bparent || + bparent->left == balance || + bparent->right == balance); + left_node = (bparent && balance == bparent->left); + + if (balance->balance < -1 || balance->balance > 1) { + balance = q_tree_node_balance(balance); + if (!bparent) { + tree->root = balance; + } else if (left_node) { + bparent->left = balance; + } else { + bparent->right = balance; + } + } + + if (balance->balance != 0 || !bparent) { + break; + } + + if (left_node) { + bparent->balance += 1; + } else { + bparent->balance -= 1; + } + + balance = bparent; + } + } + + if (!steal) { + if (tree->key_destroy_func) { + tree->key_destroy_func(node->key); + } + if (tree->value_destroy_func) { + tree->value_destroy_func(node->value); + } + } + + g_free(node); + + tree->nnodes--; + + return TRUE; +} + +/** + * q_tree_lookup_node: + * @tree: a #QTree + * @key: the key to look up + * + * Gets the tree node corresponding to the given key. Since a #QTree is + * automatically balanced as key/value pairs are added, key lookup + * is O(log n) (where n is the number of key/value pairs in the tree). + * + * Returns: (nullable) (transfer none): the tree node corresponding to + * the key, or %NULL if the key was not found + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_lookup_node(QTree *tree, + gconstpointer key) +{ + g_return_val_if_fail(tree != NULL, NULL); + + return q_tree_find_node(tree, key); +} + +/** + * q_tree_lookup: + * @tree: a #QTree + * @key: the key to look up + * + * Gets the value corresponding to the given key. Since a #QTree is + * automatically balanced as key/value pairs are added, key lookup + * is O(log n) (where n is the number of key/value pairs in the tree). + * + * Returns: the value corresponding to the key, or %NULL + * if the key was not found + */ +gpointer +q_tree_lookup(QTree *tree, + gconstpointer key) +{ + QTreeNode *node; + + node = q_tree_lookup_node(tree, key); + + return node ? node->value : NULL; +} + +/** + * q_tree_lookup_extended: + * @tree: a #QTree + * @lookup_key: the key to look up + * @orig_key: (out) (optional) (nullable): returns the original key + * @value: (out) (optional) (nullable): returns the value associated with + * the key + * + * Looks up a key in the #QTree, returning the original key and the + * associated value. This is useful if you need to free the memory + * allocated for the original key, for example before calling + * q_tree_remove(). + * + * Returns: %TRUE if the key was found in the #QTree + */ +gboolean +q_tree_lookup_extended(QTree *tree, + gconstpointer lookup_key, + gpointer *orig_key, + gpointer *value) +{ + QTreeNode *node; + + g_return_val_if_fail(tree != NULL, FALSE); + + node = q_tree_find_node(tree, lookup_key); + + if (node) { + if (orig_key) { + *orig_key = node->key; + } + if (value) { + *value = node->value; + } + return TRUE; + } else { + return FALSE; + } +} + +/** + * q_tree_foreach: + * @tree: a #QTree + * @func: the function to call for each node visited. + * If this function returns %TRUE, the traversal is stopped. + * @user_data: user data to pass to the function + * + * Calls the given function for each of the key/value pairs in the #QTree. + * The function is passed the key and value of each pair, and the given + * @data parameter. The tree is traversed in sorted order. + * + * The tree may not be modified while iterating over it (you can't + * add/remove items). To remove all items matching a predicate, you need + * to add each item to a list in your #GTraverseFunc as you walk over + * the tree, then walk the list and remove each item. + */ +void +q_tree_foreach(QTree *tree, + GTraverseFunc func, + gpointer user_data) +{ + QTreeNode *node; + + g_return_if_fail(tree != NULL); + + if (!tree->root) { + return; + } + + node = q_tree_node_first(tree); + + while (node) { + if ((*func)(node->key, node->value, user_data)) { + break; + } + + node = q_tree_node_next(node); + } +} + +/** + * q_tree_search_node: + * @tree: a #QTree + * @search_func: a function used to search the #QTree + * @user_data: the data passed as the second argument to @search_func + * + * Searches a #QTree using @search_func. + * + * The @search_func is called with a pointer to the key of a key/value + * pair in the tree, and the passed in @user_data. If @search_func returns + * 0 for a key/value pair, then the corresponding node is returned as + * the result of q_tree_search(). If @search_func returns -1, searching + * will proceed among the key/value pairs that have a smaller key; if + * @search_func returns 1, searching will proceed among the key/value + * pairs that have a larger key. + * + * Returns: (nullable) (transfer none): the node corresponding to the + * found key, or %NULL if the key was not found + * + * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API. + */ +static QTreeNode * +q_tree_search_node(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data) +{ + g_return_val_if_fail(tree != NULL, NULL); + + if (!tree->root) { + return NULL; + } + + return q_tree_node_search(tree->root, search_func, user_data); +} + +/** + * q_tree_search: + * @tree: a #QTree + * @search_func: a function used to search the #QTree + * @user_data: the data passed as the second argument to @search_func + * + * Searches a #QTree using @search_func. + * + * The @search_func is called with a pointer to the key of a key/value + * pair in the tree, and the passed in @user_data. If @search_func returns + * 0 for a key/value pair, then the corresponding value is returned as + * the result of q_tree_search(). If @search_func returns -1, searching + * will proceed among the key/value pairs that have a smaller key; if + * @search_func returns 1, searching will proceed among the key/value + * pairs that have a larger key. + * + * Returns: the value corresponding to the found key, or %NULL + * if the key was not found + */ +gpointer +q_tree_search(QTree *tree, + GCompareFunc search_func, + gconstpointer user_data) +{ + QTreeNode *node; + + node = q_tree_search_node(tree, search_func, user_data); + + return node ? node->value : NULL; +} + +/** + * q_tree_height: + * @tree: a #QTree + * + * Gets the height of a #QTree. + * + * If the #QTree contains no nodes, the height is 0. + * If the #QTree contains only one root node the height is 1. + * If the root node has children the height is 2, etc. + * + * Returns: the height of @tree + */ +gint +q_tree_height(QTree *tree) +{ + QTreeNode *node; + gint height; + + g_return_val_if_fail(tree != NULL, 0); + + if (!tree->root) { + return 0; + } + + height = 0; + node = tree->root; + + while (1) { + height += 1 + MAX(node->balance, 0); + + if (!node->left_child) { + return height; + } + + node = node->left; + } +} + +/** + * q_tree_nnodes: + * @tree: a #QTree + * + * Gets the number of nodes in a #QTree. + * + * Returns: the number of nodes in @tree + */ +gint +q_tree_nnodes(QTree *tree) +{ + g_return_val_if_fail(tree != NULL, 0); + + return tree->nnodes; +} + +static QTreeNode * +q_tree_node_balance(QTreeNode *node) +{ + if (node->balance < -1) { + if (node->left->balance > 0) { + node->left = q_tree_node_rotate_left(node->left); + } + node = q_tree_node_rotate_right(node); + } else if (node->balance > 1) { + if (node->right->balance < 0) { + node->right = q_tree_node_rotate_right(node->right); + } + node = q_tree_node_rotate_left(node); + } + + return node; +} + +static QTreeNode * QEMU_DISABLE_CFI +q_tree_find_node(QTree *tree, + gconstpointer key) +{ + QTreeNode *node; + gint cmp; + + node = tree->root; + if (!node) { + return NULL; + } + + while (1) { + cmp = tree->key_compare(key, node->key, tree->key_compare_data); + if (cmp == 0) { + return node; + } else if (cmp < 0) { + if (!node->left_child) { + return NULL; + } + + node = node->left; + } else { + if (!node->right_child) { + return NULL; + } + + node = node->right; + } + } +} + +static QTreeNode * +q_tree_node_search(QTreeNode *node, + GCompareFunc search_func, + gconstpointer data) +{ + gint dir; + + if (!node) { + return NULL; + } + + while (1) { + dir = (*search_func)(node->key, data); + if (dir == 0) { + return node; + } else if (dir < 0) { + if (!node->left_child) { + return NULL; + } + + node = node->left; + } else { + if (!node->right_child) { + return NULL; + } + + node = node->right; + } + } +} + +static QTreeNode * +q_tree_node_rotate_left(QTreeNode *node) +{ + QTreeNode *right; + gint a_bal; + gint b_bal; + + right = node->right; + + if (right->left_child) { + node->right = right->left; + } else { + node->right_child = FALSE; + right->left_child = TRUE; + } + right->left = node; + + a_bal = node->balance; + b_bal = right->balance; + + if (b_bal <= 0) { + if (a_bal >= 1) { + right->balance = b_bal - 1; + } else { + right->balance = a_bal + b_bal - 2; + } + node->balance = a_bal - 1; + } else { + if (a_bal <= b_bal) { + right->balance = a_bal - 2; + } else { + right->balance = b_bal - 1; + } + node->balance = a_bal - b_bal - 1; + } + + return right; +} + +static QTreeNode * +q_tree_node_rotate_right(QTreeNode *node) +{ + QTreeNode *left; + gint a_bal; + gint b_bal; + + left = node->left; + + if (left->right_child) { + node->left = left->right; + } else { + node->left_child = FALSE; + left->right_child = TRUE; + } + left->right = node; + + a_bal = node->balance; + b_bal = left->balance; + + if (b_bal <= 0) { + if (b_bal > a_bal) { + left->balance = b_bal + 1; + } else { + left->balance = a_bal + 2; + } + node->balance = a_bal - b_bal + 1; + } else { + if (a_bal <= -1) { + left->balance = b_bal + 1; + } else { + left->balance = a_bal + b_bal + 2; + } + node->balance = a_bal + 1; + } + + return left; +} + +#ifdef Q_TREE_DEBUG +static gint +q_tree_node_height(QTreeNode *node) +{ + gint left_height; + gint right_height; + + if (node) { + left_height = 0; + right_height = 0; + + if (node->left_child) { + left_height = q_tree_node_height(node->left); + } + + if (node->right_child) { + right_height = q_tree_node_height(node->right); + } + + return MAX(left_height, right_height) + 1; + } + + return 0; +} + +static void q_tree_node_check(QTreeNode *node) +{ + gint left_height; + gint right_height; + gint balance; + QTreeNode *tmp; + + if (node) { + if (node->left_child) { + tmp = q_tree_node_previous(node); + g_assert(tmp->right == node); + } + + if (node->right_child) { + tmp = q_tree_node_next(node); + g_assert(tmp->left == node); + } + + left_height = 0; + right_height = 0; + + if (node->left_child) { + left_height = q_tree_node_height(node->left); + } + if (node->right_child) { + right_height = q_tree_node_height(node->right); + } + + balance = right_height - left_height; + g_assert(balance == node->balance); + + if (node->left_child) { + q_tree_node_check(node->left); + } + if (node->right_child) { + q_tree_node_check(node->right); + } + } +} +#endif diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index 40f36ea214..5b6216069c 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -346,10 +346,9 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc, aio_context_release(server->ctx); } +/* server->ctx acquired by caller */ void vhost_user_server_stop(VuServer *server) { - aio_context_acquire(server->ctx); - qemu_bh_delete(server->restart_listener_bh); server->restart_listener_bh = NULL; @@ -366,8 +365,6 @@ void vhost_user_server_stop(VuServer *server) AIO_WAIT_WHILE(server->ctx, server->co_trip); } - aio_context_release(server->ctx); - if (server->listener) { qio_net_listener_disconnect(server->listener); object_unref(OBJECT(server->listener)); |