summary refs log tree commit diff stats
path: root/memory.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-11-28 17:29:45 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2019-01-11 13:57:24 +0100
commit1f7af8042903a5eeb2f707efff34175162f75664 (patch)
tree7b84f453086f1bf4196091fef03cc0daa1010795 /memory.c
parent909bf76302fc0d9329c4995dac9f8c30b68da864 (diff)
downloadfocaccia-qemu-1f7af8042903a5eeb2f707efff34175162f75664.tar.gz
focaccia-qemu-1f7af8042903a5eeb2f707efff34175162f75664.zip
memory: avoid unnecessary coalesced_io_del operations
Store whether the FlatRange has had any coalesced I/O ranges applied,
and if not avoid calling coalesced_io_del.  This is useful in preparation
for the next patch, which will call coalesced_io_del when rendering
memory regions.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to '')
-rw-r--r--memory.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/memory.c b/memory.c
index 119b6e46d5..072769aa06 100644
--- a/memory.c
+++ b/memory.c
@@ -217,6 +217,7 @@ struct FlatRange {
     bool romd_mode;
     bool readonly;
     bool nonvolatile;
+    bool has_coalesced_range;
 };
 
 #define FOR_EACH_FLAT_RANGE(var, view)          \
@@ -650,6 +651,7 @@ static void render_memory_region(FlatView *view,
     fr.romd_mode = mr->romd_mode;
     fr.readonly = readonly;
     fr.nonvolatile = nonvolatile;
+    fr.has_coalesced_range = false;
 
     /* Render the region itself into any gaps left by the current view. */
     for (i = 0; i < view->nr && int128_nz(remain); ++i) {
@@ -852,6 +854,10 @@ static void address_space_update_ioeventfds(AddressSpace *as)
 
 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
 {
+    if (!fr->has_coalesced_range) {
+        return;
+    }
+
     MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
                                   int128_get64(fr->addr.start),
                                   int128_get64(fr->addr.size));
@@ -863,6 +869,11 @@ static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
     CoalescedMemoryRange *cmr;
     AddrRange tmp;
 
+    if (QTAILQ_EMPTY(&mr->coalesced)) {
+        return;
+    }
+
+    fr->has_coalesced_range = true;
     QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
         tmp = addrrange_shift(cmr->addr,
                               int128_sub(fr->addr.start,