diff options
Diffstat (limited to 'migration/ram.c')
| -rw-r--r-- | migration/ram.c | 81 |
1 files changed, 79 insertions, 2 deletions
diff --git a/migration/ram.c b/migration/ram.c index 9aac89638a..5eef2efc78 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -53,6 +53,8 @@ #include "qemu/rcu_queue.h" #include "migration/colo.h" #include "system/cpu-throttle.h" +#include "system/physmem.h" +#include "system/ramblock.h" #include "savevm.h" #include "qemu/iov.h" #include "multifd.h" @@ -936,10 +938,85 @@ bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) } /* Called with RCU critical section */ +static uint64_t physical_memory_sync_dirty_bitmap(RAMBlock *rb, + ram_addr_t start, + ram_addr_t length) +{ + ram_addr_t addr; + unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); + uint64_t num_dirty = 0; + unsigned long *dest = rb->bmap; + + /* start address and length is aligned at the start of a word? */ + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == + (start + rb->offset) && + !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { + int k; + int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); + unsigned long * const *src; + unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; + unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % + DIRTY_MEMORY_BLOCK_SIZE); + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); + + src = qatomic_rcu_read( + &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; + + for (k = page; k < page + nr; k++) { + if (src[idx][offset]) { + unsigned long bits = qatomic_xchg(&src[idx][offset], 0); + unsigned long new_dirty; + new_dirty = ~dest[k]; + dest[k] |= bits; + new_dirty &= bits; + num_dirty += ctpopl(new_dirty); + } + + if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { + offset = 0; + idx++; + } + } + if (num_dirty) { + physical_memory_dirty_bits_cleared(start, length); + } + + if (rb->clear_bmap) { + /* + * Postpone the dirty bitmap clear to the point before we + * really send the pages, also we will split the clear + * dirty procedure into smaller chunks. + */ + clear_bmap_set(rb, start >> TARGET_PAGE_BITS, + length >> TARGET_PAGE_BITS); + } else { + /* Slow path - still do that in a huge chunk */ + memory_region_clear_dirty_bitmap(rb->mr, start, length); + } + } else { + ram_addr_t offset = rb->offset; + + for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { + if (physical_memory_test_and_clear_dirty( + start + addr + offset, + TARGET_PAGE_SIZE, + DIRTY_MEMORY_MIGRATION)) { + long k = (start + addr) >> TARGET_PAGE_BITS; + if (!test_and_set_bit(k, dest)) { + num_dirty++; + } + } + } + } + + return num_dirty; +} + +/* Called with RCU critical section */ static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) { uint64_t new_dirty_pages = - cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); + physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); rs->migration_dirty_pages += new_dirty_pages; rs->num_dirty_pages_period += new_dirty_pages; @@ -4370,7 +4447,7 @@ static bool ram_has_postcopy(void *opaque) { RAMBlock *rb; RAMBLOCK_FOREACH_NOT_IGNORED(rb) { - if (ramblock_is_pmem(rb)) { + if (ram_block_is_pmem(rb)) { info_report("Block: %s, host: %p is a nvdimm memory, postcopy" "is not supported now!", rb->idstr, rb->host); return false; |