diff options
| author | Peter Maydell <peter.maydell@linaro.org> | 2017-02-13 18:49:26 +0000 |
|---|---|---|
| committer | Peter Maydell <peter.maydell@linaro.org> | 2017-02-13 18:49:26 +0000 |
| commit | ec7a9bd5bb2c46c60cc0ec9b9d9f2ce404226ec0 (patch) | |
| tree | ae717012129be5c9c27827e1eab623ff467ddb1c /migration/ram.c | |
| parent | 305e6c8a2ff7a6e3f4942b50e853230f18eeb5a9 (diff) | |
| parent | 982b78c5e37864c06fd7b5f156d80bf02628a855 (diff) | |
| download | focaccia-qemu-ec7a9bd5bb2c46c60cc0ec9b9d9f2ce404226ec0.tar.gz focaccia-qemu-ec7a9bd5bb2c46c60cc0ec9b9d9f2ce404226ec0.zip | |
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20170213a' into staging
Migration
Amit: migration: remove myself as maintainer
MAINTAINERS: update my email address
Ashijeet: migrate: Introduce zero RAM checks to skip RAM migration
Pavel: Postcopy release RAM
Halil: consolidate VMStateField.start
Hailiang: COLO: fix setting checkpoint-delay not working properly
COLO: Shutdown related socket fd while do failover
COLO: Don't process failover request while loading VM's state
Me:
migration: Add VMSTATE_UNUSED_VARRAY_UINT32
migration: Add VMSTATE_WITH_TMP
tests/migration: Add test for VMSTATE_WITH_TMP
virtio-net VMState conversion and new VMSTATE macros
# gpg: Signature made Mon 13 Feb 2017 17:36:39 GMT
# gpg: using RSA key 0x0516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7
* remotes/dgilbert/tags/pull-migration-20170213a:
virtio/migration: Migrate virtio-net to VMState
tests/migration: Add test for VMSTATE_WITH_TMP
migration: Add VMSTATE_WITH_TMP
migration: Add VMSTATE_UNUSED_VARRAY_UINT32
COLO: Don't process failover request while loading VM's state
COLO: Shutdown related socket fd while do failover
COLO: fix setting checkpoint-delay not working properly
migration: consolidate VMStateField.start
migrate: Introduce zero RAM checks to skip RAM migration
migration: discard non-dirty ram pages after the start of postcopy
add 'release-ram' migrate capability
migration: add MigrationState arg for ram_save_/compressed_/page()
MAINTAINERS: update my email address
migration: remove myself as maintainer
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'migration/ram.c')
| -rw-r--r-- | migration/ram.c | 78 |
1 files changed, 63 insertions, 15 deletions
diff --git a/migration/ram.c b/migration/ram.c index ef8fadfe69..f289fcddd5 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -705,6 +705,16 @@ static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, return pages; } +static void ram_release_pages(MigrationState *ms, const char *block_name, + uint64_t offset, int pages) +{ + if (!migrate_release_ram() || !migration_in_postcopy(ms)) { + return; + } + + ram_discard_range(NULL, block_name, offset, pages << TARGET_PAGE_BITS); +} + /** * ram_save_page: Send the given page to the stream * @@ -713,13 +723,14 @@ static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, * >=0 - Number of pages written - this might legally be 0 * if xbzrle noticed the page was the same. * + * @ms: The current migration state. * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes */ -static int ram_save_page(QEMUFile *f, PageSearchStatus *pss, +static int ram_save_page(MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, bool last_stage, uint64_t *bytes_transferred) { int pages = -1; @@ -764,9 +775,9 @@ static int ram_save_page(QEMUFile *f, PageSearchStatus *pss, * page would be stale */ xbzrle_cache_zero_page(current_addr); + ram_release_pages(ms, block->idstr, pss->offset, pages); } else if (!ram_bulk_stage && - !migration_in_postcopy(migrate_get_current()) && - migrate_use_xbzrle()) { + !migration_in_postcopy(ms) && migrate_use_xbzrle()) { pages = save_xbzrle_page(f, &p, current_addr, block, offset, last_stage, bytes_transferred); if (!last_stage) { @@ -783,7 +794,9 @@ static int ram_save_page(QEMUFile *f, PageSearchStatus *pss, *bytes_transferred += save_page_header(f, block, offset | RAM_SAVE_FLAG_PAGE); if (send_async) { - qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE); + qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE, + migrate_release_ram() & + migration_in_postcopy(ms)); } else { qemu_put_buffer(f, p, TARGET_PAGE_SIZE); } @@ -813,6 +826,8 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block, error_report("compressed data failed!"); } else { bytes_sent += blen; + ram_release_pages(migrate_get_current(), block->idstr, + offset & TARGET_PAGE_MASK, 1); } return bytes_sent; @@ -893,14 +908,15 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, * * Returns: Number of pages written. * + * @ms: The current migration state. * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes */ -static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss, - bool last_stage, +static int ram_save_compressed_page(MigrationState *ms, QEMUFile *f, + PageSearchStatus *pss, bool last_stage, uint64_t *bytes_transferred) { int pages = -1; @@ -951,12 +967,17 @@ static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss, error_report("compressed data failed!"); } } + if (pages > 0) { + ram_release_pages(ms, block->idstr, pss->offset, pages); + } } else { offset |= RAM_SAVE_FLAG_CONTINUE; pages = save_zero_page(f, block, offset, p, bytes_transferred); if (pages == -1) { pages = compress_page_with_multi_thread(f, block, offset, bytes_transferred); + } else { + ram_release_pages(ms, block->idstr, pss->offset, pages); } } } @@ -1231,11 +1252,11 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f, if (migration_bitmap_clear_dirty(dirty_ram_abs)) { unsigned long *unsentmap; if (compression_switch && migrate_use_compression()) { - res = ram_save_compressed_page(f, pss, + res = ram_save_compressed_page(ms, f, pss, last_stage, bytes_transferred); } else { - res = ram_save_page(f, pss, last_stage, + res = ram_save_page(ms, f, pss, last_stage, bytes_transferred); } @@ -1325,6 +1346,11 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage, ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in ram_addr_t space */ + /* No dirty page as there is zero RAM */ + if (!ram_bytes_total()) { + return pages; + } + pss.block = last_seen_block; pss.offset = last_offset; pss.complete_round = false; @@ -1516,6 +1542,25 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected) /* **** functions for postcopy ***** */ +void ram_postcopy_migrated_memory_release(MigrationState *ms) +{ + struct RAMBlock *block; + unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + unsigned long first = block->offset >> TARGET_PAGE_BITS; + unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS); + unsigned long run_start = find_next_zero_bit(bitmap, range, first); + + while (run_start < range) { + unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); + ram_discard_range(NULL, block->idstr, run_start << TARGET_PAGE_BITS, + (run_end - run_start) << TARGET_PAGE_BITS); + run_start = find_next_zero_bit(bitmap, range, run_end + 1); + } + } +} + /* * Callback from postcopy_each_ram_send_discard for each RAMBlock * Note: At this point the 'unsentmap' is the processed bitmap combined @@ -1912,14 +1957,17 @@ static int ram_save_init_globals(void) bytes_transferred = 0; reset_ram_globals(); - ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; migration_bitmap_rcu = g_new0(struct BitmapRcu, 1); - migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages); - bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages); - - if (migrate_postcopy_ram()) { - migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages); - bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages); + /* Skip setting bitmap if there is no RAM */ + if (ram_bytes_total()) { + ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; + migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages); + bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages); + + if (migrate_postcopy_ram()) { + migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages); + bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages); + } } /* |