summary refs log tree commit diff stats
path: root/migration/ram.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-05-16 10:24:08 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-05-16 10:24:08 +0100
commitc1497fba36465d0259d4d04f2bf09ea59ed42680 (patch)
tree80b616d06242e232762600f80b2d7745ea5015e2 /migration/ram.c
parente329ad2ab72c43b56df88b34954c2c7d839bb373 (diff)
parent9d3250d5ba8c4c5389530b861686e22e77fddcc7 (diff)
downloadfocaccia-qemu-c1497fba36465d0259d4d04f2bf09ea59ed42680.tar.gz
focaccia-qemu-c1497fba36465d0259d4d04f2bf09ea59ed42680.zip
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20190514b' into staging
Migration pull 2019-05-14

Small fixes/cleanups
One HMP/monitor fix

# gpg: Signature made Tue 14 May 2019 19:03:53 BST
# gpg:                using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full]
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20190514b:
  monitor: Call mon_get_cpu() only once at hmp_gva2gpa()
  migration/ram.c: fix typos in comments
  migration: Fix use-after-free during process exit
  migration/savevm: wrap into qemu_loadvm_state_header()
  migration/savevm: load_header before load_setup
  migration/savevm: remove duplicate check of migration_is_blocked
  migration: update comments of migration bitmap
  migration/ram.c: start of migration_bitmap_sync_range is always 0
  qemu-option.hx: Update missed parameter for colo-compare
  migration/colo.h: Remove obsolete codes
  migration/colo.c: Remove redundant input parameter
  migration: savevm: fix error code with migration blockers
  vmstate: check subsection_found is enough
  migration: remove not used field xfer_limit
  migration: not necessary to check ops again
  migration: comment VMSTATE_UNUSED*() properly

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 1ca9ba77b6..4c60869226 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -917,7 +917,7 @@ struct {
  *    - to make easier to know what to free at the end of migration
  *
  * This way we always know who is the owner of each "pages" struct,
- * and we don't need any loocking.  It belongs to the migration thread
+ * and we don't need any locking.  It belongs to the migration thread
  * or to the channel thread.  Switching is safe because the migration
  * thread is using the channel mutex when changing it, and the channel
  * have to had finish with its own, otherwise pending_job can't be
@@ -1630,9 +1630,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
 /**
  * migration_bitmap_find_dirty: find the next dirty page from start
  *
- * Called with rcu_read_lock() to protect migration_bitmap
- *
- * Returns the byte offset within memory region of the start of a dirty page
+ * Returns the page offset within memory region of the start of a dirty page
  *
  * @rs: current RAM state
  * @rb: RAMBlock where to search for dirty pages
@@ -1681,10 +1679,10 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
 }
 
 static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
-                                        ram_addr_t start, ram_addr_t length)
+                                        ram_addr_t length)
 {
     rs->migration_dirty_pages +=
-        cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
+        cpu_physical_memory_sync_dirty_bitmap(rb, 0, length,
                                               &rs->num_dirty_pages_period);
 }
 
@@ -1773,7 +1771,7 @@ static void migration_bitmap_sync(RAMState *rs)
     qemu_mutex_lock(&rs->bitmap_mutex);
     rcu_read_lock();
     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
-        migration_bitmap_sync_range(rs, block, 0, block->used_length);
+        migration_bitmap_sync_range(rs, block, block->used_length);
     }
     ram_counters.remaining = ram_bytes_remaining();
     rcu_read_unlock();
@@ -2146,7 +2144,7 @@ retry:
  * find_dirty_block: find the next dirty page and update any state
  * associated with the search process.
  *
- * Returns if a page is found
+ * Returns true if a page is found
  *
  * @rs: current RAM state
  * @pss: data about the state of the current dirty page scan
@@ -2242,7 +2240,7 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
  *
  * Skips pages that are already sent (!dirty)
  *
- * Returns if a queued page is found
+ * Returns true if a queued page is found
  *
  * @rs: current RAM state
  * @pss: data about the state of the current dirty page scan
@@ -2681,7 +2679,7 @@ static void ram_save_cleanup(void *opaque)
     RAMBlock *block;
 
     /* caller have hold iothread lock or is in a bh, so there is
-     * no writing race against this migration_bitmap
+     * no writing race against the migration bitmap
      */
     memory_global_dirty_log_stop();
 
@@ -3449,7 +3447,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
 
         /* we want to check in the 1st loop, just in case it was the 1st time
            and we had to sync the dirty bitmap.
-           qemu_get_clock_ns() is a bit expensive, so we only check each some
+           qemu_clock_get_ns() is a bit expensive, so we only check each some
            iterations
         */
         if ((i & 63) == 0) {
@@ -4196,7 +4194,7 @@ static void colo_flush_ram_cache(void)
     memory_global_dirty_log_sync();
     rcu_read_lock();
     RAMBLOCK_FOREACH_NOT_IGNORED(block) {
-        migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
+        migration_bitmap_sync_range(ram_state, block, block->used_length);
     }
     rcu_read_unlock();