diff options
| author | Richard Henderson <richard.henderson@linaro.org> | 2025-04-23 12:37:28 -0700 |
|---|---|---|
| committer | Richard Henderson <richard.henderson@linaro.org> | 2025-04-30 12:45:05 -0700 |
| commit | 4af02681ff77bf105b11ee1a5ca289ca29b64a54 (patch) | |
| tree | bab0b965475195ff544568feae20788063270cd0 | |
| parent | e4ad80ceac03cc47d8351172f0e4625bb40e2b78 (diff) | |
| download | focaccia-qemu-4af02681ff77bf105b11ee1a5ca289ca29b64a54.tar.gz focaccia-qemu-4af02681ff77bf105b11ee1a5ca289ca29b64a54.zip | |
accel/tcg: Merge tb_invalidate_phys_range{__locked}
Merge tb_invalidate_phys_page_fast__locked into its only caller, tb_invalidate_phys_range_fast. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
| -rw-r--r-- | accel/tcg/tb-maint.c | 36 |
1 files changed, 11 insertions, 25 deletions
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index 927e9c8ede..c893ea3073 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -1204,37 +1204,23 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) } /* - * Call with all @pages in the range [@start, @start + len[ locked. - */ -static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages, - tb_page_addr_t start, - unsigned len, uintptr_t ra) -{ - PageDesc *p; - - p = page_find(start >> TARGET_PAGE_BITS); - if (!p) { - return; - } - - assert_page_locked(p); - tb_invalidate_phys_page_range__locked(NULL, pages, p, start, start + len - 1, ra); -} - -/* * len must be <= 8 and start must be a multiple of len. * Called via softmmu_template.h when code areas are written to with * iothread mutex not held. */ -void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, - unsigned size, - uintptr_t retaddr) +void tb_invalidate_phys_range_fast(ram_addr_t start, + unsigned len, uintptr_t ra) { - struct page_collection *pages; + PageDesc *p = page_find(start >> TARGET_PAGE_BITS); - pages = page_collection_lock(ram_addr, ram_addr + size - 1); - tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr); - page_collection_unlock(pages); + if (p) { + ram_addr_t last = start + len - 1; + struct page_collection *pages = page_collection_lock(start, last); + + tb_invalidate_phys_page_range__locked(NULL, pages, p, + start, last, ra); + page_collection_unlock(pages); + } } #endif /* CONFIG_USER_ONLY */ |