diff options
Diffstat (limited to 'migration/ram.c')
| -rw-r--r-- | migration/ram.c | 108 |
1 files changed, 48 insertions, 60 deletions
diff --git a/migration/ram.c b/migration/ram.c index 79d881f735..01356f60a4 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -57,6 +57,7 @@ #include "qemu/iov.h" #include "multifd.h" #include "sysemu/runstate.h" +#include "options.h" #include "hw/boards.h" /* for machine_dump_guest_core() */ @@ -155,14 +156,14 @@ static struct { static void XBZRLE_cache_lock(void) { - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { qemu_mutex_lock(&XBZRLE.lock); } } static void XBZRLE_cache_unlock(void) { - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { qemu_mutex_unlock(&XBZRLE.lock); } } @@ -458,30 +459,18 @@ uint64_t ram_bytes_remaining(void) 0; } -/* - * NOTE: not all stats in ram_counters are used in reality. See comments - * for struct MigrationAtomicStats. The ultimate result of ram migration - * counters will be a merged version with both ram_counters and the atomic - * fields in ram_atomic_counters. - */ -MigrationStats ram_counters; -MigrationAtomicStats ram_atomic_counters; +RAMStats ram_counters; void ram_transferred_add(uint64_t bytes) { if (runstate_is_running()) { - ram_counters.precopy_bytes += bytes; + stat64_add(&ram_counters.precopy_bytes, bytes); } else if (migration_in_postcopy()) { - stat64_add(&ram_atomic_counters.postcopy_bytes, bytes); + stat64_add(&ram_counters.postcopy_bytes, bytes); } else { - ram_counters.downtime_bytes += bytes; + stat64_add(&ram_counters.downtime_bytes, bytes); } - stat64_add(&ram_atomic_counters.transferred, bytes); -} - -void dirty_sync_missed_zero_copy(void) -{ - ram_counters.dirty_sync_missed_zero_copy++; + stat64_add(&ram_counters.transferred, bytes); } struct MigrationOps { @@ -597,7 +586,7 @@ static void compress_threads_save_cleanup(void) { int i, thread_count; - if (!migrate_use_compression() || !comp_param) { + if (!migrate_compress() || !comp_param) { return; } @@ -636,7 +625,7 @@ static int compress_threads_save_setup(void) { int i, thread_count; - if (!migrate_use_compression()) { + if (!migrate_compress()) { return 0; } thread_count = migrate_compress_threads(); @@ -722,11 +711,10 @@ static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f, static void mig_throttle_guest_down(uint64_t bytes_dirty_period, uint64_t bytes_dirty_threshold) { - MigrationState *s = migrate_get_current(); - uint64_t pct_initial = s->parameters.cpu_throttle_initial; - uint64_t pct_increment = s->parameters.cpu_throttle_increment; - bool pct_tailslow = s->parameters.cpu_throttle_tailslow; - int pct_max = s->parameters.max_cpu_throttle; + uint64_t pct_initial = migrate_cpu_throttle_initial(); + uint64_t pct_increment = migrate_cpu_throttle_increment(); + bool pct_tailslow = migrate_cpu_throttle_tailslow(); + int pct_max = migrate_max_cpu_throttle(); uint64_t throttle_now = cpu_throttle_get_percentage(); uint64_t cpu_now, cpu_ideal, throttle_inc; @@ -756,7 +744,7 @@ void mig_throttle_counter_reset(void) rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = stat64_get(&ram_atomic_counters.transferred); + rs->bytes_xfer_prev = stat64_get(&ram_counters.transferred); } /** @@ -776,7 +764,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) /* We don't care if this fails to allocate a new cache page * as long as it updated an old one */ cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, - ram_counters.dirty_sync_count); + stat64_get(&ram_counters.dirty_sync_count)); } #define ENCODING_FLAG_XBZRLE 0x1 @@ -802,13 +790,13 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; QEMUFile *file = pss->pss_channel; + uint64_t generation = stat64_get(&ram_counters.dirty_sync_count); - if (!cache_is_cached(XBZRLE.cache, current_addr, - ram_counters.dirty_sync_count)) { + if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) { xbzrle_counters.cache_miss++; if (!rs->last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data, - ram_counters.dirty_sync_count) == -1) { + generation) == -1) { return -1; } else { /* update *current_data when the page has been @@ -1130,8 +1118,8 @@ uint64_t ram_pagesize_summary(void) uint64_t ram_get_total_transferred_pages(void) { - return stat64_get(&ram_atomic_counters.normal) + - stat64_get(&ram_atomic_counters.duplicate) + + return stat64_get(&ram_counters.normal_pages) + + stat64_get(&ram_counters.zero_pages) + compression_counters.pages + xbzrle_counters.pages; } @@ -1148,7 +1136,7 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) return; } - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { double encoded_size, unencoded_size; xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - @@ -1166,7 +1154,7 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) rs->xbzrle_bytes_prev = xbzrle_counters.bytes; } - if (migrate_use_compression()) { + if (migrate_compress()) { compression_counters.busy_rate = (double)(compression_counters.busy - rs->compress_thread_busy_prev) / page_count; rs->compress_thread_busy_prev = compression_counters.busy; @@ -1189,10 +1177,9 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) static void migration_trigger_throttle(RAMState *rs) { - MigrationState *s = migrate_get_current(); - uint64_t threshold = s->parameters.throttle_trigger_threshold; + uint64_t threshold = migrate_throttle_trigger_threshold(); uint64_t bytes_xfer_period = - stat64_get(&ram_atomic_counters.transferred) - rs->bytes_xfer_prev; + stat64_get(&ram_counters.transferred) - rs->bytes_xfer_prev; uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; @@ -1221,7 +1208,7 @@ static void migration_bitmap_sync(RAMState *rs) RAMBlock *block; int64_t end_time; - ram_counters.dirty_sync_count++; + stat64_add(&ram_counters.dirty_sync_count, 1); if (!rs->time_last_bitmap_sync) { rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); @@ -1255,10 +1242,11 @@ static void migration_bitmap_sync(RAMState *rs) /* reset period counters */ rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = stat64_get(&ram_atomic_counters.transferred); + rs->bytes_xfer_prev = stat64_get(&ram_counters.transferred); } - if (migrate_use_events()) { - qapi_event_send_migration_pass(ram_counters.dirty_sync_count); + if (migrate_events()) { + uint64_t generation = stat64_get(&ram_counters.dirty_sync_count); + qapi_event_send_migration_pass(generation); } } @@ -1331,7 +1319,7 @@ static int save_zero_page(PageSearchStatus *pss, QEMUFile *f, RAMBlock *block, int len = save_zero_page_to_file(pss, f, block, offset); if (len) { - stat64_add(&ram_atomic_counters.duplicate, 1); + stat64_add(&ram_counters.zero_pages, 1); ram_transferred_add(len); return 1; } @@ -1368,9 +1356,9 @@ static bool control_save_page(PageSearchStatus *pss, RAMBlock *block, } if (bytes_xmit > 0) { - stat64_add(&ram_atomic_counters.normal, 1); + stat64_add(&ram_counters.normal_pages, 1); } else if (bytes_xmit == 0) { - stat64_add(&ram_atomic_counters.duplicate, 1); + stat64_add(&ram_counters.zero_pages, 1); } return true; @@ -1402,7 +1390,7 @@ static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); } ram_transferred_add(TARGET_PAGE_SIZE); - stat64_add(&ram_atomic_counters.normal, 1); + stat64_add(&ram_counters.normal_pages, 1); return 1; } @@ -1458,7 +1446,7 @@ static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, if (multifd_queue_page(file, block, offset) < 0) { return -1; } - stat64_add(&ram_atomic_counters.normal, 1); + stat64_add(&ram_counters.normal_pages, 1); return 1; } @@ -1497,7 +1485,7 @@ update_compress_thread_counts(const CompressParam *param, int bytes_xmit) ram_transferred_add(bytes_xmit); if (param->zero_page) { - stat64_add(&ram_atomic_counters.duplicate, 1); + stat64_add(&ram_counters.zero_pages, 1); return; } @@ -1636,7 +1624,7 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) /* Flag that we've looped */ pss->complete_round = true; /* After the first round, enable XBZRLE. */ - if (migrate_use_xbzrle()) { + if (migrate_xbzrle()) { rs->xbzrle_enabled = true; } } @@ -2180,7 +2168,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) RAMBlock *ramblock; RAMState *rs = ram_state; - ram_counters.postcopy_requests++; + stat64_add(&ram_counters.postcopy_requests, 1); RCU_READ_LOCK_GUARD(); if (!rbname) { @@ -2280,7 +2268,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) static bool save_page_use_compression(RAMState *rs) { - if (!migrate_use_compression()) { + if (!migrate_compress()) { return false; } @@ -2372,7 +2360,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) * if host page size == guest page size the dest guest during run may * still see partially copied pages which is data corruption. */ - if (migrate_use_multifd() && !migration_in_postcopy()) { + if (migrate_multifd() && !migration_in_postcopy()) { return ram_save_multifd_page(pss->pss_channel, block, offset); } @@ -2632,9 +2620,9 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) uint64_t pages = size / TARGET_PAGE_SIZE; if (zero) { - stat64_add(&ram_atomic_counters.duplicate, pages); + stat64_add(&ram_counters.zero_pages, pages); } else { - stat64_add(&ram_atomic_counters.normal, pages); + stat64_add(&ram_counters.normal_pages, pages); ram_transferred_add(size); qemu_file_credit_transfer(f, size); } @@ -2989,7 +2977,7 @@ static int xbzrle_init(void) { Error *local_err = NULL; - if (!migrate_use_xbzrle()) { + if (!migrate_xbzrle()) { return 0; } @@ -3293,7 +3281,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) migration_ops = g_malloc0(sizeof(MigrationOps)); migration_ops->ram_save_target_page = ram_save_target_page_legacy; - ret = multifd_send_sync_main(f); + ret = multifd_send_sync_main(f); if (ret < 0) { return ret; } @@ -3744,7 +3732,7 @@ static int wait_for_decompress_done(void) { int idx, thread_count; - if (!migrate_use_compression()) { + if (!migrate_compress()) { return 0; } @@ -3763,7 +3751,7 @@ static void compress_threads_load_cleanup(void) { int i, thread_count; - if (!migrate_use_compression()) { + if (!migrate_compress()) { return; } thread_count = migrate_decompress_threads(); @@ -3804,7 +3792,7 @@ static int compress_threads_load_setup(QEMUFile *f) { int i, thread_count; - if (!migrate_use_compression()) { + if (!migrate_compress()) { return 0; } @@ -4270,7 +4258,7 @@ static int ram_load_precopy(QEMUFile *f) int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0; /* ADVISE is earlier, it shows the source has the postcopy capability on */ bool postcopy_advised = migration_incoming_postcopy_advised(); - if (!migrate_use_compression()) { + if (!migrate_compress()) { invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE; } |