diff options
Diffstat (limited to 'migration/ram.c')
| -rw-r--r-- | migration/ram.c | 150 |
1 files changed, 44 insertions, 106 deletions
diff --git a/migration/ram.c b/migration/ram.c index 92769902bb..34724e8fe8 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -305,17 +305,15 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file, qemu_put_be64(file, size); qemu_put_buffer(file, (const uint8_t *)le_bitmap, size); + g_free(le_bitmap); /* * Mark as an end, in case the middle part is screwed up due to * some "mysterious" reason. */ qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING); - qemu_fflush(file); - - g_free(le_bitmap); - - if (qemu_file_get_error(file)) { - return qemu_file_get_error(file); + int ret = qemu_fflush(file); + if (ret) { + return ret; } return size + sizeof(size); @@ -369,13 +367,6 @@ struct RAMState { bool xbzrle_started; /* Are we on the last stage of migration */ bool last_stage; - /* compression statistics since the beginning of the period */ - /* amount of count that no free thread to compress data */ - uint64_t compress_thread_busy_prev; - /* amount bytes after compression */ - uint64_t compressed_size_prev; - /* amount of compressed pages */ - uint64_t compress_pages_prev; /* total handled target pages at the beginning of period */ uint64_t target_page_count_prev; @@ -455,7 +446,6 @@ void ram_transferred_add(uint64_t bytes) } else { stat64_add(&mig_stats.downtime_bytes, bytes); } - stat64_add(&mig_stats.transferred, bytes); } struct MigrationOps { @@ -564,7 +554,7 @@ void mig_throttle_counter_reset(void) rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); + rs->bytes_xfer_prev = migration_transferred_bytes(); } /** @@ -939,13 +929,12 @@ uint64_t ram_get_total_transferred_pages(void) { return stat64_get(&mig_stats.normal_pages) + stat64_get(&mig_stats.zero_pages) + - ram_compressed_pages() + xbzrle_counters.pages; + compress_ram_pages() + xbzrle_counters.pages; } static void migration_update_rates(RAMState *rs, int64_t end_time) { uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; - double compressed_size; /* calculate period counters */ stat64_set(&mig_stats.dirty_pages_rate, @@ -973,26 +962,7 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) rs->xbzrle_pages_prev = xbzrle_counters.pages; rs->xbzrle_bytes_prev = xbzrle_counters.bytes; } - - if (migrate_compress()) { - compression_counters.busy_rate = (double)(compression_counters.busy - - rs->compress_thread_busy_prev) / page_count; - rs->compress_thread_busy_prev = compression_counters.busy; - - compressed_size = compression_counters.compressed_size - - rs->compressed_size_prev; - if (compressed_size) { - double uncompressed_size = (compression_counters.pages - - rs->compress_pages_prev) * TARGET_PAGE_SIZE; - - /* Compression-Ratio = Uncompressed-size / Compressed-size */ - compression_counters.compression_rate = - uncompressed_size / compressed_size; - - rs->compress_pages_prev = compression_counters.pages; - rs->compressed_size_prev = compression_counters.compressed_size; - } - } + compress_update_rates(page_count); } /* @@ -1030,7 +1000,7 @@ static void migration_trigger_throttle(RAMState *rs) { uint64_t threshold = migrate_throttle_trigger_threshold(); uint64_t bytes_xfer_period = - stat64_get(&mig_stats.transferred) - rs->bytes_xfer_prev; + migration_transferred_bytes() - rs->bytes_xfer_prev; uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; @@ -1100,7 +1070,7 @@ static void migration_bitmap_sync(RAMState *rs, bool last_stage) /* reset period counters */ rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = stat64_get(&mig_stats.transferred); + rs->bytes_xfer_prev = migration_transferred_bytes(); } if (migrate_events()) { uint64_t generation = stat64_get(&mig_stats.dirty_sync_count); @@ -1291,9 +1261,7 @@ static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, return 1; } -static bool save_page_use_compression(RAMState *rs); - -static int send_queued_data(CompressParam *param) +int compress_send_queued_data(CompressParam *param) { PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY]; MigrationState *ms = migrate_get_current(); @@ -1329,15 +1297,6 @@ static int send_queued_data(CompressParam *param) return len; } -static void ram_flush_compressed_data(RAMState *rs) -{ - if (!save_page_use_compression(rs)) { - return; - } - - flush_compressed_data(send_queued_data); -} - #define PAGE_ALL_CLEAN 0 #define PAGE_TRY_AGAIN 1 #define PAGE_DIRTY_FOUND 2 @@ -1393,7 +1352,7 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) * Also If xbzrle is on, stop using the data compression at this * point. In theory, xbzrle can do better than compression. */ - ram_flush_compressed_data(rs); + compress_flush_data(); /* Hit the end of the list */ pss->block = QLIST_FIRST_RCU(&ram_list.blocks); @@ -2042,24 +2001,6 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) return 0; } -static bool save_page_use_compression(RAMState *rs) -{ - if (!migrate_compress()) { - return false; - } - - /* - * If xbzrle is enabled (e.g., after first round of migration), stop - * using the data compression. In theory, xbzrle can do better than - * compression. - */ - if (rs->xbzrle_started) { - return false; - } - - return true; -} - /* * try to compress the page before posting it out, return true if the page * has been properly handled by compression, otherwise needs other @@ -2068,7 +2009,7 @@ static bool save_page_use_compression(RAMState *rs) static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, ram_addr_t offset) { - if (!save_page_use_compression(rs)) { + if (!migrate_compress()) { return false; } @@ -2083,17 +2024,12 @@ static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, * much CPU resource. */ if (pss->block != pss->last_sent_block) { - ram_flush_compressed_data(rs); + compress_flush_data(); return false; } - if (compress_page_with_multi_thread(pss->block, offset, - send_queued_data) > 0) { - return true; - } - - compression_counters.busy++; - return false; + return compress_page_with_multi_thread(pss->block, offset, + compress_send_queued_data); } /** @@ -3034,11 +2970,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque) ret = rdma_registration_start(f, RAM_CONTROL_SETUP); if (ret < 0) { qemu_file_set_error(f, ret); + return ret; } ret = rdma_registration_stop(f, RAM_CONTROL_SETUP); if (ret < 0) { qemu_file_set_error(f, ret); + return ret; } migration_ops = g_malloc0(sizeof(MigrationOps)); @@ -3056,9 +2994,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - qemu_fflush(f); - - return 0; + return qemu_fflush(f); } /** @@ -3104,6 +3040,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) ret = rdma_registration_start(f, RAM_CONTROL_ROUND); if (ret < 0) { qemu_file_set_error(f, ret); + goto out; } t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); @@ -3135,7 +3072,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) * page is sent in one chunk. */ if (migrate_postcopy_ram()) { - ram_flush_compressed_data(rs); + compress_flush_data(); } /* @@ -3177,10 +3114,8 @@ out: } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - qemu_fflush(f); ram_transferred_add(8); - - ret = qemu_file_get_error(f); + ret = qemu_fflush(f); } if (ret < 0) { return ret; @@ -3215,6 +3150,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) ret = rdma_registration_start(f, RAM_CONTROL_FINISH); if (ret < 0) { qemu_file_set_error(f, ret); + return ret; } /* try transferring iterative blocks of memory */ @@ -3230,24 +3166,21 @@ static int ram_save_complete(QEMUFile *f, void *opaque) break; } if (pages < 0) { - ret = pages; - break; + qemu_mutex_unlock(&rs->bitmap_mutex); + return pages; } } qemu_mutex_unlock(&rs->bitmap_mutex); - ram_flush_compressed_data(rs); + compress_flush_data(); - int ret = rdma_registration_stop(f, RAM_CONTROL_FINISH); + ret = rdma_registration_stop(f, RAM_CONTROL_FINISH); if (ret < 0) { qemu_file_set_error(f, ret); + return ret; } } - if (ret < 0) { - return ret; - } - ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); if (ret < 0) { return ret; @@ -3257,9 +3190,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - qemu_fflush(f); - - return 0; + return qemu_fflush(f); } static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, @@ -3446,7 +3377,7 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block, } /** - * ram_handle_compressed: handle the zero page case + * ram_handle_zero: handle the zero page case * * If a page (or a whole RDMA chunk) has been * determined to be zero, then zap it. @@ -3455,10 +3386,10 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block, * @ch: what the page is filled from. We only support zero * @size: size of the zero page */ -void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) +void ram_handle_zero(void *host, uint64_t size) { - if (ch != 0 || !buffer_is_zero(host, size)) { - memset(host, ch, size); + if (!buffer_is_zero(host, size)) { + memset(host, 0, size); } } @@ -3715,16 +3646,18 @@ int ram_load_postcopy(QEMUFile *f, int channel) switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { case RAM_SAVE_FLAG_ZERO: ch = qemu_get_byte(f); + if (ch != 0) { + error_report("Found a zero page with value %d", ch); + ret = -EINVAL; + break; + } /* * Can skip to set page_buffer when * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). */ - if (ch || !matches_target_page_size) { + if (!matches_target_page_size) { memset(page_buffer, ch, TARGET_PAGE_SIZE); } - if (ch) { - tmp_page->all_zero = false; - } break; case RAM_SAVE_FLAG_PAGE: @@ -4030,7 +3963,12 @@ static int ram_load_precopy(QEMUFile *f) case RAM_SAVE_FLAG_ZERO: ch = qemu_get_byte(f); - ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); + if (ch != 0) { + error_report("Found a zero page with value %d", ch); + ret = -EINVAL; + break; + } + ram_handle_zero(host, TARGET_PAGE_SIZE); break; case RAM_SAVE_FLAG_PAGE: |