diff options
Diffstat (limited to 'migration')
| -rw-r--r-- | migration/migration.c | 409 | ||||
| -rw-r--r-- | migration/migration.h | 6 | ||||
| -rw-r--r-- | migration/page_cache.c | 8 | ||||
| -rw-r--r-- | migration/page_cache.h | 2 | ||||
| -rw-r--r-- | migration/qemu-file.c | 2 | ||||
| -rw-r--r-- | migration/ram.c | 307 | ||||
| -rw-r--r-- | migration/ram.h | 8 | ||||
| -rw-r--r-- | migration/savevm.c | 341 | ||||
| -rw-r--r-- | migration/savevm.h | 3 | ||||
| -rw-r--r-- | migration/trace-events | 2 |
10 files changed, 1000 insertions, 88 deletions
diff --git a/migration/migration.c b/migration/migration.c index 1986cb8573..a5ddf43559 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -58,6 +58,7 @@ #include "qemu/queue.h" #include "multifd.h" #include "qemu/yank.h" +#include "sysemu/cpus.h" #ifdef CONFIG_VFIO #include "hw/vfio/vfio-common.h" @@ -134,6 +135,38 @@ enum mig_rp_message_type { MIG_RP_MSG_MAX }; +/* Migration capabilities set */ +struct MigrateCapsSet { + int size; /* Capability set size */ + MigrationCapability caps[]; /* Variadic array of capabilities */ +}; +typedef struct MigrateCapsSet MigrateCapsSet; + +/* Define and initialize MigrateCapsSet */ +#define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \ + MigrateCapsSet _name = { \ + .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \ + .caps = { __VA_ARGS__ } \ + } + +/* Background-snapshot compatibility check list */ +static const +INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, + MIGRATION_CAPABILITY_POSTCOPY_RAM, + MIGRATION_CAPABILITY_DIRTY_BITMAPS, + MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME, + MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE, + MIGRATION_CAPABILITY_RETURN_PATH, + MIGRATION_CAPABILITY_MULTIFD, + MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER, + MIGRATION_CAPABILITY_AUTO_CONVERGE, + MIGRATION_CAPABILITY_RELEASE_RAM, + MIGRATION_CAPABILITY_RDMA_PIN_ALL, + MIGRATION_CAPABILITY_COMPRESS, + MIGRATION_CAPABILITY_XBZRLE, + MIGRATION_CAPABILITY_X_COLO, + MIGRATION_CAPABILITY_VALIDATE_UUID); + /* When we add fault tolerance, we could have several migrations at once. For now we don't need to add dynamic creation of migration */ @@ -141,6 +174,8 @@ enum mig_rp_message_type { static MigrationState *current_migration; static MigrationIncomingState *current_incoming; +static GSList *migration_blockers; + static bool migration_object_check(MigrationState *ms, Error **errp); static int migration_maybe_pause(MigrationState *s, int *current_active_state, @@ -1041,6 +1076,27 @@ static void fill_source_migration_info(MigrationInfo *info) { MigrationState *s = migrate_get_current(); + info->blocked = migration_is_blocked(NULL); + info->has_blocked_reasons = info->blocked; + info->blocked_reasons = NULL; + if (info->blocked) { + GSList *cur_blocker = migration_blockers; + + /* + * There are two types of reasons a migration might be blocked; + * a) devices marked in VMState as non-migratable, and + * b) Explicit migration blockers + * We need to add both of them here. + */ + qemu_savevm_non_migratable_list(&info->blocked_reasons); + + while (cur_blocker) { + QAPI_LIST_PREPEND(info->blocked_reasons, + g_strdup(error_get_pretty(cur_blocker->data))); + cur_blocker = g_slist_next(cur_blocker); + } + } + switch (s->state) { case MIGRATION_STATUS_NONE: /* no migration has happened ever */ @@ -1089,6 +1145,31 @@ static void fill_source_migration_info(MigrationInfo *info) info->status = s->state; } +typedef enum WriteTrackingSupport { + WT_SUPPORT_UNKNOWN = 0, + WT_SUPPORT_ABSENT, + WT_SUPPORT_AVAILABLE, + WT_SUPPORT_COMPATIBLE +} WriteTrackingSupport; + +static +WriteTrackingSupport migrate_query_write_tracking(void) +{ + /* Check if kernel supports required UFFD features */ + if (!ram_write_tracking_available()) { + return WT_SUPPORT_ABSENT; + } + /* + * Check if current memory configuration is + * compatible with required UFFD features. + */ + if (!ram_write_tracking_compatible()) { + return WT_SUPPORT_AVAILABLE; + } + + return WT_SUPPORT_COMPATIBLE; +} + /** * @migration_caps_check - check capability validity * @@ -1150,6 +1231,39 @@ static bool migrate_caps_check(bool *cap_list, } } + if (cap_list[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) { + WriteTrackingSupport wt_support; + int idx; + /* + * Check if 'background-snapshot' capability is supported by + * host kernel and compatible with guest memory configuration. + */ + wt_support = migrate_query_write_tracking(); + if (wt_support < WT_SUPPORT_AVAILABLE) { + error_setg(errp, "Background-snapshot is not supported by host kernel"); + return false; + } + if (wt_support < WT_SUPPORT_COMPATIBLE) { + error_setg(errp, "Background-snapshot is not compatible " + "with guest memory configuration"); + return false; + } + + /* + * Check if there are any migration capabilities + * incompatible with 'background-snapshot'. + */ + for (idx = 0; idx < check_caps_background_snapshot.size; idx++) { + int incomp_cap = check_caps_background_snapshot.caps[idx]; + if (cap_list[incomp_cap]) { + error_setg(errp, + "Background-snapshot is not compatible with %s", + MigrationCapability_str(incomp_cap)); + return false; + } + } + } + return true; } @@ -1226,21 +1340,21 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) if (params->has_compress_level && (params->compress_level > 9)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", - "is invalid, it should be in the range of 0 to 9"); + "a value between 0 and 9"); return false; } if (params->has_compress_threads && (params->compress_threads < 1)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_threads", - "is invalid, it should be in the range of 1 to 255"); + "a value between 1 and 255"); return false; } if (params->has_decompress_threads && (params->decompress_threads < 1)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "decompress_threads", - "is invalid, it should be in the range of 1 to 255"); + "a value between 1 and 255"); return false; } @@ -1293,21 +1407,21 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) if (params->has_multifd_channels && (params->multifd_channels < 1)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_channels", - "is invalid, it should be in the range of 1 to 255"); + "a value between 1 and 255"); return false; } if (params->has_multifd_zlib_level && (params->multifd_zlib_level > 9)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level", - "is invalid, it should be in the range of 0 to 9"); + "a value between 0 and 9"); return false; } if (params->has_multifd_zstd_level && (params->multifd_zstd_level > 20)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level", - "is invalid, it should be in the range of 0 to 20"); + "a value between 0 and 20"); return false; } @@ -1316,8 +1430,7 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) !is_power_of_2(params->xbzrle_cache_size))) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "xbzrle_cache_size", - "is invalid, it should be bigger than target page size" - " and a power of 2"); + "a power of two no less than the target page size"); return false; } @@ -1334,21 +1447,21 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) params->announce_initial > 100000) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "announce_initial", - "is invalid, it must be less than 100000 ms"); + "a value between 0 and 100000"); return false; } if (params->has_announce_max && params->announce_max > 100000) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "announce_max", - "is invalid, it must be less than 100000 ms"); + "a value between 0 and 100000"); return false; } if (params->has_announce_rounds && params->announce_rounds > 1000) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "announce_rounds", - "is invalid, it must be in the range of 0 to 1000"); + "a value between 0 and 1000"); return false; } if (params->has_announce_step && @@ -1356,7 +1469,7 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) params->announce_step > 10000)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "announce_step", - "is invalid, it must be in the range of 1 to 10000 ms"); + "a value between 0 and 10000"); return false; } @@ -1909,6 +2022,7 @@ void migrate_init(MigrationState *s) * locks. */ s->cleanup_bh = 0; + s->vm_start_bh = 0; s->to_dst_file = NULL; s->state = MIGRATION_STATUS_NONE; s->rp_state.from_dst_file = NULL; @@ -1934,8 +2048,6 @@ void migrate_init(MigrationState *s) s->threshold_size = 0; } -static GSList *migration_blockers; - int migrate_add_blocker(Error *reason, Error **errp) { if (only_migratable) { @@ -2216,7 +2328,7 @@ void qmp_migrate_set_cache_size(int64_t value, Error **errp) qmp_migrate_set_parameters(&p, errp); } -int64_t qmp_query_migrate_cache_size(Error **errp) +uint64_t qmp_query_migrate_cache_size(Error **errp) { return migrate_xbzrle_cache_size(); } @@ -2446,7 +2558,7 @@ int migrate_use_xbzrle(void) return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; } -int64_t migrate_xbzrle_cache_size(void) +uint64_t migrate_xbzrle_cache_size(void) { MigrationState *s; @@ -2491,6 +2603,15 @@ bool migrate_use_block_incremental(void) return s->parameters.block_incremental; } +bool migrate_background_snapshot(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]; +} + /* migration thread support */ /* * Something bad happened to the RP stream, mark an error @@ -3117,6 +3238,50 @@ fail: MIGRATION_STATUS_FAILED); } +/** + * bg_migration_completion: Used by bg_migration_thread when after all the + * RAM has been saved. The caller 'breaks' the loop when this returns. + * + * @s: Current migration state + */ +static void bg_migration_completion(MigrationState *s) +{ + int current_active_state = s->state; + + /* + * Stop tracking RAM writes - un-protect memory, un-register UFFD + * memory ranges, flush kernel wait queues and wake up threads + * waiting for write fault to be resolved. + */ + ram_write_tracking_stop(); + + if (s->state == MIGRATION_STATUS_ACTIVE) { + /* + * By this moment we have RAM content saved into the migration stream. + * The next step is to flush the non-RAM content (device state) + * right after the ram content. The device state has been stored into + * the temporary buffer before RAM saving started. + */ + qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); + qemu_fflush(s->to_dst_file); + } else if (s->state == MIGRATION_STATUS_CANCELLING) { + goto fail; + } + + if (qemu_file_get_error(s->to_dst_file)) { + trace_migration_completion_file_err(); + goto fail; + } + + migrate_set_state(&s->state, current_active_state, + MIGRATION_STATUS_COMPLETED); + return; + +fail: + migrate_set_state(&s->state, current_active_state, + MIGRATION_STATUS_FAILED); +} + bool migrate_colo_enabled(void) { MigrationState *s = migrate_get_current(); @@ -3457,6 +3622,47 @@ static void migration_iteration_finish(MigrationState *s) qemu_mutex_unlock_iothread(); } +static void bg_migration_iteration_finish(MigrationState *s) +{ + qemu_mutex_lock_iothread(); + switch (s->state) { + case MIGRATION_STATUS_COMPLETED: + migration_calculate_complete(s); + break; + + case MIGRATION_STATUS_ACTIVE: + case MIGRATION_STATUS_FAILED: + case MIGRATION_STATUS_CANCELLED: + case MIGRATION_STATUS_CANCELLING: + break; + + default: + /* Should not reach here, but if so, forgive the VM. */ + error_report("%s: Unknown ending state %d", __func__, s->state); + break; + } + + migrate_fd_cleanup_schedule(s); + qemu_mutex_unlock_iothread(); +} + +/* + * Return true if continue to the next iteration directly, false + * otherwise. + */ +static MigIterateState bg_migration_iteration_run(MigrationState *s) +{ + int res; + + res = qemu_savevm_state_iterate(s->to_dst_file, false); + if (res > 0) { + bg_migration_completion(s); + return MIG_ITERATE_BREAK; + } + + return MIG_ITERATE_RESUME; +} + void migration_make_urgent_request(void) { qemu_sem_post(&migrate_get_current()->rate_limit_sem); @@ -3604,6 +3810,165 @@ static void *migration_thread(void *opaque) return NULL; } +static void bg_migration_vm_start_bh(void *opaque) +{ + MigrationState *s = opaque; + + qemu_bh_delete(s->vm_start_bh); + s->vm_start_bh = NULL; + + vm_start(); + s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start; +} + +/** + * Background snapshot thread, based on live migration code. + * This is an alternative implementation of live migration mechanism + * introduced specifically to support background snapshots. + * + * It takes advantage of userfault_fd write protection mechanism introduced + * in v5.7 kernel. Compared to existing dirty page logging migration much + * lesser stream traffic is produced resulting in smaller snapshot images, + * simply cause of no page duplicates can get into the stream. + * + * Another key point is that generated vmstate stream reflects machine state + * 'frozen' at the beginning of snapshot creation compared to dirty page logging + * mechanism, which effectively results in that saved snapshot is the state of VM + * at the end of the process. + */ +static void *bg_migration_thread(void *opaque) +{ + MigrationState *s = opaque; + int64_t setup_start; + MigThrError thr_error; + QEMUFile *fb; + bool early_fail = true; + + rcu_register_thread(); + object_ref(OBJECT(s)); + + qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); + + setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); + /* + * We want to save vmstate for the moment when migration has been + * initiated but also we want to save RAM content while VM is running. + * The RAM content should appear first in the vmstate. So, we first + * stash the non-RAM part of the vmstate to the temporary buffer, + * then write RAM part of the vmstate to the migration stream + * with vCPUs running and, finally, write stashed non-RAM part of + * the vmstate from the buffer to the migration stream. + */ + s->bioc = qio_channel_buffer_new(128 * 1024); + qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); + fb = qemu_fopen_channel_output(QIO_CHANNEL(s->bioc)); + object_unref(OBJECT(s->bioc)); + + update_iteration_initial_status(s); + + qemu_savevm_state_header(s->to_dst_file); + qemu_savevm_state_setup(s->to_dst_file); + + if (qemu_savevm_state_guest_unplug_pending()) { + migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, + MIGRATION_STATUS_WAIT_UNPLUG); + + while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && + qemu_savevm_state_guest_unplug_pending()) { + qemu_sem_timedwait(&s->wait_unplug_sem, 250); + } + + migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, + MIGRATION_STATUS_ACTIVE); + } else { + migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, + MIGRATION_STATUS_ACTIVE); + } + s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; + + trace_migration_thread_setup_complete(); + s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + + qemu_mutex_lock_iothread(); + + /* + * If VM is currently in suspended state, then, to make a valid runstate + * transition in vm_stop_force_state() we need to wakeup it up. + */ + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); + s->vm_was_running = runstate_is_running(); + + if (global_state_store()) { + goto fail; + } + /* Forcibly stop VM before saving state of vCPUs and devices */ + if (vm_stop_force_state(RUN_STATE_PAUSED)) { + goto fail; + } + /* + * Put vCPUs in sync with shadow context structures, then + * save their state to channel-buffer along with devices. + */ + cpu_synchronize_all_states(); + if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { + goto fail; + } + /* Now initialize UFFD context and start tracking RAM writes */ + if (ram_write_tracking_start()) { + goto fail; + } + early_fail = false; + + /* + * Start VM from BH handler to avoid write-fault lock here. + * UFFD-WP protection for the whole RAM is already enabled so + * calling VM state change notifiers from vm_start() would initiate + * writes to virtio VQs memory which is in write-protected region. + */ + s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); + qemu_bh_schedule(s->vm_start_bh); + + qemu_mutex_unlock_iothread(); + + while (migration_is_active(s)) { + MigIterateState iter_state = bg_migration_iteration_run(s); + if (iter_state == MIG_ITERATE_SKIP) { + continue; + } else if (iter_state == MIG_ITERATE_BREAK) { + break; + } + + /* + * Try to detect any kind of failures, and see whether we + * should stop the migration now. + */ + thr_error = migration_detect_error(s); + if (thr_error == MIG_THR_ERR_FATAL) { + /* Stop migration */ + break; + } + + migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + } + + trace_migration_thread_after_loop(); + +fail: + if (early_fail) { + migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); + qemu_mutex_unlock_iothread(); + } + + bg_migration_iteration_finish(s); + + qemu_fclose(fb); + object_unref(OBJECT(s)); + rcu_unregister_thread(); + + return NULL; +} + void migrate_fd_connect(MigrationState *s, Error *error_in) { Error *local_err = NULL; @@ -3667,8 +4032,14 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) migrate_fd_cleanup(s); return; } - qemu_thread_create(&s->thread, "live_migration", migration_thread, s, - QEMU_THREAD_JOINABLE); + + if (migrate_background_snapshot()) { + qemu_thread_create(&s->thread, "bg_snapshot", + bg_migration_thread, s, QEMU_THREAD_JOINABLE); + } else { + qemu_thread_create(&s->thread, "live_migration", + migration_thread, s, QEMU_THREAD_JOINABLE); + } s->migration_thread_running = true; } @@ -3784,6 +4155,8 @@ static Property migration_properties[] = { DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH), DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD), + DEFINE_PROP_MIG_CAP("x-background-snapshot", + MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), DEFINE_PROP_END_OF_LIST(), }; diff --git a/migration/migration.h b/migration/migration.h index d096b77f74..db6708326b 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -20,6 +20,7 @@ #include "qemu/thread.h" #include "qemu/coroutine_int.h" #include "io/channel.h" +#include "io/channel-buffer.h" #include "net/announce.h" #include "qom/object.h" @@ -147,8 +148,10 @@ struct MigrationState { /*< public >*/ QemuThread thread; + QEMUBH *vm_start_bh; QEMUBH *cleanup_bh; QEMUFile *to_dst_file; + QIOChannelBuffer *bioc; /* * Protects to_dst_file pointer. We need to make sure we won't * yield or hang during the critical section, since this lock will @@ -324,7 +327,7 @@ int migrate_multifd_zlib_level(void); int migrate_multifd_zstd_level(void); int migrate_use_xbzrle(void); -int64_t migrate_xbzrle_cache_size(void); +uint64_t migrate_xbzrle_cache_size(void); bool migrate_colo_enabled(void); bool migrate_use_block(void); @@ -341,6 +344,7 @@ int migrate_compress_wait_thread(void); int migrate_decompress_threads(void); bool migrate_use_events(void); bool migrate_postcopy_blocktime(void); +bool migrate_background_snapshot(void); /* Sending on the return path - generic and then for each message type */ void migrate_send_rp_shut(MigrationIncomingState *mis, diff --git a/migration/page_cache.c b/migration/page_cache.c index 098b436223..6d4f7a9bbc 100644 --- a/migration/page_cache.c +++ b/migration/page_cache.c @@ -38,7 +38,7 @@ struct PageCache { size_t num_items; }; -PageCache *cache_init(int64_t new_size, size_t page_size, Error **errp) +PageCache *cache_init(uint64_t new_size, size_t page_size, Error **errp) { int64_t i; size_t num_pages = new_size / page_size; @@ -60,8 +60,7 @@ PageCache *cache_init(int64_t new_size, size_t page_size, Error **errp) /* We prefer not to abort if there is no memory */ cache = g_try_malloc(sizeof(*cache)); if (!cache) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", - "Failed to allocate cache"); + error_setg(errp, "Failed to allocate cache"); return NULL; } cache->page_size = page_size; @@ -74,8 +73,7 @@ PageCache *cache_init(int64_t new_size, size_t page_size, Error **errp) cache->page_cache = g_try_malloc((cache->max_num_items) * sizeof(*cache->page_cache)); if (!cache->page_cache) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", - "Failed to allocate page cache"); + error_setg(errp, "Failed to allocate page cache"); g_free(cache); return NULL; } diff --git a/migration/page_cache.h b/migration/page_cache.h index 0cb94498a0..8733b4df6e 100644 --- a/migration/page_cache.h +++ b/migration/page_cache.h @@ -28,7 +28,7 @@ typedef struct PageCache PageCache; * @page_size: cache page size * @errp: set *errp if the check failed, with reason */ -PageCache *cache_init(int64_t cache_size, size_t page_size, Error **errp); +PageCache *cache_init(uint64_t cache_size, size_t page_size, Error **errp); /** * cache_fini: free all cache resources * @cache pointer to the PageCache struct diff --git a/migration/qemu-file.c b/migration/qemu-file.c index be21518c57..d6e03dbc0e 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -595,7 +595,7 @@ size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size) { if (size < IO_BUF_SIZE) { size_t res; - uint8_t *src; + uint8_t *src = NULL; res = qemu_peek_buffer(f, &src, size, 0); diff --git a/migration/ram.c b/migration/ram.c index 7811cde643..72143da0ac 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -56,6 +56,11 @@ #include "savevm.h" #include "qemu/iov.h" #include "multifd.h" +#include "sysemu/runstate.h" + +#if defined(__linux__) +#include "qemu/userfaultfd.h" +#endif /* defined(__linux__) */ /***********************************************************/ /* ram save/restore */ @@ -126,7 +131,7 @@ static void XBZRLE_cache_unlock(void) * @new_size: new cache size * @errp: set *errp if the check failed, with reason */ -int xbzrle_cache_resize(int64_t new_size, Error **errp) +int xbzrle_cache_resize(uint64_t new_size, Error **errp) { PageCache *new_cache; int64_t ret = 0; @@ -298,6 +303,8 @@ struct RAMSrcPageRequest { struct RAMState { /* QEMUFile used for this migration */ QEMUFile *f; + /* UFFD file descriptor, used in 'write-tracking' migration */ + int uffdio_fd; /* Last block that we have visited searching for dirty pages */ RAMBlock *last_seen_block; /* Last block from where we have sent data */ @@ -1434,6 +1441,269 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) return block; } +#if defined(__linux__) +/** + * poll_fault_page: try to get next UFFD write fault page and, if pending fault + * is found, return RAM block pointer and page offset + * + * Returns pointer to the RAMBlock containing faulting page, + * NULL if no write faults are pending + * + * @rs: current RAM state + * @offset: page offset from the beginning of the block + */ +static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) +{ + struct uffd_msg uffd_msg; + void *page_address; + RAMBlock *bs; + int res; + + if (!migrate_background_snapshot()) { + return NULL; + } + + res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); + if (res <= 0) { + return NULL; + } + + page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address; + bs = qemu_ram_block_from_host(page_address, false, offset); + assert(bs && (bs->flags & RAM_UF_WRITEPROTECT) != 0); + return bs; +} + +/** + * ram_save_release_protection: release UFFD write protection after + * a range of pages has been saved + * + * @rs: current RAM state + * @pss: page-search-status structure + * @start_page: index of the first page in the range relative to pss->block + * + * Returns 0 on success, negative value in case of an error +*/ +static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, + unsigned long start_page) +{ + int res = 0; + + /* Check if page is from UFFD-managed region. */ + if (pss->block->flags & RAM_UF_WRITEPROTECT) { + void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); + uint64_t run_length = (pss->page - start_page + 1) << TARGET_PAGE_BITS; + + /* Flush async buffers before un-protect. */ + qemu_fflush(rs->f); + /* Un-protect memory range. */ + res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, + false, false); + } + + return res; +} + +/* ram_write_tracking_available: check if kernel supports required UFFD features + * + * Returns true if supports, false otherwise + */ +bool ram_write_tracking_available(void) +{ + uint64_t uffd_features; + int res; + + res = uffd_query_features(&uffd_features); + return (res == 0 && + (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0); +} + +/* ram_write_tracking_compatible: check if guest configuration is + * compatible with 'write-tracking' + * + * Returns true if compatible, false otherwise + */ +bool ram_write_tracking_compatible(void) +{ + const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT); + int uffd_fd; + RAMBlock *bs; + bool ret = false; + + /* Open UFFD file descriptor */ + uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false); + if (uffd_fd < 0) { + return false; + } + + RCU_READ_LOCK_GUARD(); + + RAMBLOCK_FOREACH_NOT_IGNORED(bs) { + uint64_t uffd_ioctls; + + /* Nothing to do with read-only and MMIO-writable regions */ + if (bs->mr->readonly || bs->mr->rom_device) { + continue; + } + /* Try to register block memory via UFFD-IO to track writes */ + if (uffd_register_memory(uffd_fd, bs->host, bs->max_length, + UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) { + goto out; + } + if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) { + goto out; + } + } + ret = true; + +out: + uffd_close_fd(uffd_fd); + return ret; +} + +/* + * ram_write_tracking_start: start UFFD-WP memory tracking + * + * Returns 0 for success or negative value in case of error + */ +int ram_write_tracking_start(void) +{ + int uffd_fd; + RAMState *rs = ram_state; + RAMBlock *bs; + + /* Open UFFD file descriptor */ + uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true); + if (uffd_fd < 0) { + return uffd_fd; + } + rs->uffdio_fd = uffd_fd; + + RCU_READ_LOCK_GUARD(); + + RAMBLOCK_FOREACH_NOT_IGNORED(bs) { + /* Nothing to do with read-only and MMIO-writable regions */ + if (bs->mr->readonly || bs->mr->rom_device) { + continue; + } + + /* Register block memory with UFFD to track writes */ + if (uffd_register_memory(rs->uffdio_fd, bs->host, + bs->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { + goto fail; + } + /* Apply UFFD write protection to the block memory range */ + if (uffd_change_protection(rs->uffdio_fd, bs->host, + bs->max_length, true, false)) { + goto fail; + } + bs->flags |= RAM_UF_WRITEPROTECT; + memory_region_ref(bs->mr); + + trace_ram_write_tracking_ramblock_start(bs->idstr, bs->page_size, + bs->host, bs->max_length); + } + + return 0; + +fail: + error_report("ram_write_tracking_start() failed: restoring initial memory state"); + + RAMBLOCK_FOREACH_NOT_IGNORED(bs) { + if ((bs->flags & RAM_UF_WRITEPROTECT) == 0) { + continue; + } + /* + * In case some memory block failed to be write-protected + * remove protection and unregister all succeeded RAM blocks + */ + uffd_change_protection(rs->uffdio_fd, bs->host, bs->max_length, false, false); + uffd_unregister_memory(rs->uffdio_fd, bs->host, bs->max_length); + /* Cleanup flags and remove reference */ + bs->flags &= ~RAM_UF_WRITEPROTECT; + memory_region_unref(bs->mr); + } + + uffd_close_fd(uffd_fd); + rs->uffdio_fd = -1; + return -1; +} + +/** + * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection + */ +void ram_write_tracking_stop(void) +{ + RAMState *rs = ram_state; + RAMBlock *bs; + + RCU_READ_LOCK_GUARD(); + + RAMBLOCK_FOREACH_NOT_IGNORED(bs) { + if ((bs->flags & RAM_UF_WRITEPROTECT) == 0) { + continue; + } + /* Remove protection and unregister all affected RAM blocks */ + uffd_change_protection(rs->uffdio_fd, bs->host, bs->max_length, false, false); + uffd_unregister_memory(rs->uffdio_fd, bs->host, bs->max_length); + + trace_ram_write_tracking_ramblock_stop(bs->idstr, bs->page_size, + bs->host, bs->max_length); + + /* Cleanup flags and remove reference */ + bs->flags &= ~RAM_UF_WRITEPROTECT; + memory_region_unref(bs->mr); + } + + /* Finally close UFFD file descriptor */ + uffd_close_fd(rs->uffdio_fd); + rs->uffdio_fd = -1; +} + +#else +/* No target OS support, stubs just fail or ignore */ + +static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset) +{ + (void) rs; + (void) offset; + + return NULL; +} + +static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, + unsigned long start_page) +{ + (void) rs; + (void) pss; + (void) start_page; + + return 0; +} + +bool ram_write_tracking_available(void) +{ + return false; +} + +bool ram_write_tracking_compatible(void) +{ + assert(0); + return false; +} + +int ram_write_tracking_start(void) +{ + assert(0); + return -1; +} + +void ram_write_tracking_stop(void) +{ + assert(0); +} +#endif /* defined(__linux__) */ + /** * get_queued_page: unqueue a page from the postcopy requests * @@ -1473,6 +1743,14 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) } while (block && !dirty); + if (!block) { + /* + * Poll write faults too if background snapshot is enabled; that's + * when we have vcpus got blocked by the write protected pages. + */ + block = poll_fault_page(rs, &offset); + } + if (block) { /* * As soon as we start servicing pages out of order, then we have @@ -1715,6 +1993,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, int tmppages, pages = 0; size_t pagesize_bits = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; + unsigned long start_page = pss->page; + int res; if (ramblock_is_ignored(pss->block)) { error_report("block %s should not be migrated !", pss->block->idstr); @@ -1740,10 +2020,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, } while ((pss->page & (pagesize_bits - 1)) && offset_in_ramblock(pss->block, ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)); - /* The offset we leave with is the last one we looked at */ pss->page--; - return pages; + + res = ram_save_release_protection(rs, pss, start_page); + return (res < 0 ? res : pages); } /** @@ -1880,10 +2161,13 @@ static void ram_save_cleanup(void *opaque) RAMState **rsp = opaque; RAMBlock *block; - /* caller have hold iothread lock or is in a bh, so there is - * no writing race against the migration bitmap - */ - memory_global_dirty_log_stop(); + /* We don't use dirty log with background snapshots */ + if (!migrate_background_snapshot()) { + /* caller have hold iothread lock or is in a bh, so there is + * no writing race against the migration bitmap + */ + memory_global_dirty_log_stop(); + } RAMBLOCK_FOREACH_NOT_IGNORED(block) { g_free(block->clear_bmap); @@ -2343,8 +2627,11 @@ static void ram_init_bitmaps(RAMState *rs) WITH_RCU_READ_LOCK_GUARD() { ram_list_init_bitmaps(); - memory_global_dirty_log_start(); - migration_bitmap_sync_precopy(rs); + /* We don't use dirty log with background snapshots */ + if (!migrate_background_snapshot()) { + memory_global_dirty_log_start(); + migration_bitmap_sync_precopy(rs); + } } qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); @@ -3521,7 +3808,7 @@ static int ram_load_precopy(QEMUFile *f) } } /* For postcopy we need to check hugepage sizes match */ - if (postcopy_advised && + if (postcopy_advised && migrate_postcopy_ram() && block->page_size != qemu_host_page_size) { uint64_t remote_page_size = qemu_get_be64(f); if (remote_page_size != block->page_size) { diff --git a/migration/ram.h b/migration/ram.h index 011e85414e..6378bb3ebc 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -47,7 +47,7 @@ bool ramblock_is_ignored(RAMBlock *block); INTERNAL_RAMBLOCK_FOREACH(block) \ if (!qemu_ram_is_migratable(block)) {} else -int xbzrle_cache_resize(int64_t new_size, Error **errp); +int xbzrle_cache_resize(uint64_t new_size, Error **errp); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_total(void); @@ -79,4 +79,10 @@ void colo_flush_ram_cache(void); void colo_release_ram_cache(void); void colo_incoming_start_dirty_log(void); +/* Background snapshot */ +bool ram_write_tracking_available(void); +bool ram_write_tracking_compatible(void); +int ram_write_tracking_start(void); +void ram_write_tracking_stop(void); + #endif diff --git a/migration/savevm.c b/migration/savevm.c index 4f3b69ecfc..52e2d72e4b 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -43,6 +43,8 @@ #include "qapi/error.h" #include "qapi/qapi-commands-migration.h" #include "qapi/qmp/json-writer.h" +#include "qapi/clone-visitor.h" +#include "qapi/qapi-builtin-visit.h" #include "qapi/qmp/qerror.h" #include "qemu/error-report.h" #include "sysemu/cpus.h" @@ -315,6 +317,16 @@ static int configuration_pre_save(void *opaque) return 0; } +static int configuration_post_save(void *opaque) +{ + SaveState *state = opaque; + + g_free(state->capabilities); + state->capabilities = NULL; + state->caps_count = 0; + return 0; +} + static int configuration_pre_load(void *opaque) { SaveState *state = opaque; @@ -365,24 +377,36 @@ static int configuration_post_load(void *opaque, int version_id) { SaveState *state = opaque; const char *current_name = MACHINE_GET_CLASS(current_machine)->name; + int ret = 0; if (strncmp(state->name, current_name, state->len) != 0) { error_report("Machine type received is '%.*s' and local is '%s'", (int) state->len, state->name, current_name); - return -EINVAL; + ret = -EINVAL; + goto out; } if (state->target_page_bits != qemu_target_page_bits()) { error_report("Received TARGET_PAGE_BITS is %d but local is %d", state->target_page_bits, qemu_target_page_bits()); - return -EINVAL; + ret = -EINVAL; + goto out; } if (!configuration_validate_capabilities(state)) { - return -EINVAL; + ret = -EINVAL; + goto out; } - return 0; +out: + g_free((void *)state->name); + state->name = NULL; + state->len = 0; + g_free(state->capabilities); + state->capabilities = NULL; + state->caps_count = 0; + + return ret; } static int get_capability(QEMUFile *f, void *pv, size_t size, @@ -516,6 +540,7 @@ static const VMStateDescription vmstate_configuration = { .pre_load = configuration_pre_load, .post_load = configuration_post_load, .pre_save = configuration_pre_save, + .post_save = configuration_post_save, .fields = (VMStateField[]) { VMSTATE_UINT32(len, SaveState), VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), @@ -1131,6 +1156,19 @@ bool qemu_savevm_state_blocked(Error **errp) return false; } +void qemu_savevm_non_migratable_list(strList **reasons) +{ + SaveStateEntry *se; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if (se->vmsd && se->vmsd->unmigratable) { + QAPI_LIST_PREPEND(*reasons, + g_strdup_printf("non-migratable device: %s", + se->idstr)); + } + } +} + void qemu_savevm_state_header(QEMUFile *f) { trace_savevm_state_header(); @@ -1355,7 +1393,6 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) return 0; } -static int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, bool in_postcopy, bool inactivate_disks) @@ -2729,9 +2766,10 @@ int qemu_load_device_state(QEMUFile *f) return 0; } -int save_snapshot(const char *name, Error **errp) +bool save_snapshot(const char *name, bool overwrite, const char *vmstate, + bool has_devices, strList *devices, Error **errp) { - BlockDriverState *bs, *bs1; + BlockDriverState *bs; QEMUSnapshotInfo sn1, *sn = &sn1; int ret = -1, ret2; QEMUFile *f; @@ -2742,35 +2780,43 @@ int save_snapshot(const char *name, Error **errp) AioContext *aio_context; if (migration_is_blocked(errp)) { - return ret; + return false; } if (!replay_can_snapshot()) { error_setg(errp, "Record/replay does not allow making snapshot " "right now. Try once more later."); - return ret; + return false; } - if (!bdrv_all_can_snapshot(&bs)) { - error_setg(errp, "Device '%s' is writable but does not support " - "snapshots", bdrv_get_device_or_node_name(bs)); - return ret; + if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { + return false; } /* Delete old snapshots of the same name */ if (name) { - ret = bdrv_all_delete_snapshot(name, &bs1, errp); - if (ret < 0) { - error_prepend(errp, "Error while deleting snapshot on device " - "'%s': ", bdrv_get_device_or_node_name(bs1)); - return ret; + if (overwrite) { + if (bdrv_all_delete_snapshot(name, has_devices, + devices, errp) < 0) { + return false; + } + } else { + ret2 = bdrv_all_has_snapshot(name, has_devices, devices, errp); + if (ret2 < 0) { + return false; + } + if (ret2 == 1) { + error_setg(errp, + "Snapshot '%s' already exists in one or more devices", + name); + return false; + } } } - bs = bdrv_all_find_vmstate_bs(); + bs = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); if (bs == NULL) { - error_setg(errp, "No block device can accept snapshots"); - return ret; + return false; } aio_context = bdrv_get_aio_context(bs); @@ -2779,7 +2825,7 @@ int save_snapshot(const char *name, Error **errp) ret = global_state_store(); if (ret) { error_setg(errp, "Error saving global state"); - return ret; + return false; } vm_stop(RUN_STATE_SAVE_VM); @@ -2833,11 +2879,10 @@ int save_snapshot(const char *name, Error **errp) aio_context_release(aio_context); aio_context = NULL; - ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, &bs); + ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, + has_devices, devices, errp); if (ret < 0) { - error_setg(errp, "Error while creating snapshot on '%s'", - bdrv_get_device_or_node_name(bs)); - bdrv_all_delete_snapshot(sn->name, &bs, NULL); + bdrv_all_delete_snapshot(sn->name, has_devices, devices, NULL); goto the_end; } @@ -2853,7 +2898,7 @@ int save_snapshot(const char *name, Error **errp) if (saved_vm_running) { vm_start(); } - return ret; + return ret == 0; } void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, @@ -2938,33 +2983,32 @@ void qmp_xen_load_devices_state(const char *filename, Error **errp) migration_incoming_state_destroy(); } -int load_snapshot(const char *name, Error **errp) +bool load_snapshot(const char *name, const char *vmstate, + bool has_devices, strList *devices, Error **errp) { - BlockDriverState *bs, *bs_vm_state; + BlockDriverState *bs_vm_state; QEMUSnapshotInfo sn; QEMUFile *f; int ret; AioContext *aio_context; MigrationIncomingState *mis = migration_incoming_get_current(); - if (!bdrv_all_can_snapshot(&bs)) { - error_setg(errp, - "Device '%s' is writable but does not support snapshots", - bdrv_get_device_or_node_name(bs)); - return -ENOTSUP; + if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { + return false; } - ret = bdrv_all_find_snapshot(name, &bs); + ret = bdrv_all_has_snapshot(name, has_devices, devices, errp); if (ret < 0) { - error_setg(errp, - "Device '%s' does not have the requested snapshot '%s'", - bdrv_get_device_or_node_name(bs), name); - return ret; + return false; + } + if (ret == 0) { + error_setg(errp, "Snapshot '%s' does not exist in one or more devices", + name); + return false; } - bs_vm_state = bdrv_all_find_vmstate_bs(); + bs_vm_state = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); if (!bs_vm_state) { - error_setg(errp, "No block device supports snapshots"); - return -ENOTSUP; + return false; } aio_context = bdrv_get_aio_context(bs_vm_state); @@ -2973,11 +3017,11 @@ int load_snapshot(const char *name, Error **errp) ret = bdrv_snapshot_find(bs_vm_state, &sn, name); aio_context_release(aio_context); if (ret < 0) { - return ret; + return false; } else if (sn.vm_state_size == 0) { error_setg(errp, "This is a disk-only snapshot. Revert to it " " offline using qemu-img"); - return -EINVAL; + return false; } /* @@ -2989,10 +3033,8 @@ int load_snapshot(const char *name, Error **errp) /* Flush all IO requests so they don't interfere with the new state. */ bdrv_drain_all_begin(); - ret = bdrv_all_goto_snapshot(name, &bs, errp); + ret = bdrv_all_goto_snapshot(name, has_devices, devices, errp); if (ret < 0) { - error_prepend(errp, "Could not load snapshot '%s' on '%s': ", - name, bdrv_get_device_or_node_name(bs)); goto err_drain; } @@ -3000,7 +3042,6 @@ int load_snapshot(const char *name, Error **errp) f = qemu_fopen_bdrv(bs_vm_state, 0); if (!f) { error_setg(errp, "Could not open VM state file"); - ret = -EINVAL; goto err_drain; } @@ -3020,14 +3061,28 @@ int load_snapshot(const char *name, Error **errp) if (ret < 0) { error_setg(errp, "Error %d while loading VM state", ret); - return ret; + return false; } - return 0; + return true; err_drain: bdrv_drain_all_end(); - return ret; + return false; +} + +bool delete_snapshot(const char *name, bool has_devices, + strList *devices, Error **errp) +{ + if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { + return false; + } + + if (bdrv_all_delete_snapshot(name, has_devices, devices, errp) < 0) { + return false; + } + + return true; } void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) @@ -3057,3 +3112,187 @@ bool vmstate_check_only_migratable(const VMStateDescription *vmsd) return !(vmsd && vmsd->unmigratable); } + +typedef struct SnapshotJob { + Job common; + char *tag; + char *vmstate; + strList *devices; + Coroutine *co; + Error **errp; + bool ret; +} SnapshotJob; + +static void qmp_snapshot_job_free(SnapshotJob *s) +{ + g_free(s->tag); + g_free(s->vmstate); + qapi_free_strList(s->devices); +} + + +static void snapshot_load_job_bh(void *opaque) +{ + Job *job = opaque; + SnapshotJob *s = container_of(job, SnapshotJob, common); + int orig_vm_running; + + job_progress_set_remaining(&s->common, 1); + + orig_vm_running = runstate_is_running(); + vm_stop(RUN_STATE_RESTORE_VM); + + s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); + if (s->ret && orig_vm_running) { + vm_start(); + } + + job_progress_update(&s->common, 1); + + qmp_snapshot_job_free(s); + aio_co_wake(s->co); +} + +static void snapshot_save_job_bh(void *opaque) +{ + Job *job = opaque; + SnapshotJob *s = container_of(job, SnapshotJob, common); + + job_progress_set_remaining(&s->common, 1); + s->ret = save_snapshot(s->tag, false, s->vmstate, + true, s->devices, s->errp); + job_progress_update(&s->common, 1); + + qmp_snapshot_job_free(s); + aio_co_wake(s->co); +} + +static void snapshot_delete_job_bh(void *opaque) +{ + Job *job = opaque; + SnapshotJob *s = container_of(job, SnapshotJob, common); + + job_progress_set_remaining(&s->common, 1); + s->ret = delete_snapshot(s->tag, true, s->devices, s->errp); + job_progress_update(&s->common, 1); + + qmp_snapshot_job_free(s); + aio_co_wake(s->co); +} + +static int coroutine_fn snapshot_save_job_run(Job *job, Error **errp) +{ + SnapshotJob *s = container_of(job, SnapshotJob, common); + s->errp = errp; + s->co = qemu_coroutine_self(); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + snapshot_save_job_bh, job); + qemu_coroutine_yield(); + return s->ret ? 0 : -1; +} + +static int coroutine_fn snapshot_load_job_run(Job *job, Error **errp) +{ + SnapshotJob *s = container_of(job, SnapshotJob, common); + s->errp = errp; + s->co = qemu_coroutine_self(); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + snapshot_load_job_bh, job); + qemu_coroutine_yield(); + return s->ret ? 0 : -1; +} + +static int coroutine_fn snapshot_delete_job_run(Job *job, Error **errp) +{ + SnapshotJob *s = container_of(job, SnapshotJob, common); + s->errp = errp; + s->co = qemu_coroutine_self(); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + snapshot_delete_job_bh, job); + qemu_coroutine_yield(); + return s->ret ? 0 : -1; +} + + +static const JobDriver snapshot_load_job_driver = { + .instance_size = sizeof(SnapshotJob), + .job_type = JOB_TYPE_SNAPSHOT_LOAD, + .run = snapshot_load_job_run, +}; + +static const JobDriver snapshot_save_job_driver = { + .instance_size = sizeof(SnapshotJob), + .job_type = JOB_TYPE_SNAPSHOT_SAVE, + .run = snapshot_save_job_run, +}; + +static const JobDriver snapshot_delete_job_driver = { + .instance_size = sizeof(SnapshotJob), + .job_type = JOB_TYPE_SNAPSHOT_DELETE, + .run = snapshot_delete_job_run, +}; + + +void qmp_snapshot_save(const char *job_id, + const char *tag, + const char *vmstate, + strList *devices, + Error **errp) +{ + SnapshotJob *s; + + s = job_create(job_id, &snapshot_save_job_driver, NULL, + qemu_get_aio_context(), JOB_MANUAL_DISMISS, + NULL, NULL, errp); + if (!s) { + return; + } + + s->tag = g_strdup(tag); + s->vmstate = g_strdup(vmstate); + s->devices = QAPI_CLONE(strList, devices); + + job_start(&s->common); +} + +void qmp_snapshot_load(const char *job_id, + const char *tag, + const char *vmstate, + strList *devices, + Error **errp) +{ + SnapshotJob *s; + + s = job_create(job_id, &snapshot_load_job_driver, NULL, + qemu_get_aio_context(), JOB_MANUAL_DISMISS, + NULL, NULL, errp); + if (!s) { + return; + } + + s->tag = g_strdup(tag); + s->vmstate = g_strdup(vmstate); + s->devices = QAPI_CLONE(strList, devices); + + job_start(&s->common); +} + +void qmp_snapshot_delete(const char *job_id, + const char *tag, + strList *devices, + Error **errp) +{ + SnapshotJob *s; + + s = job_create(job_id, &snapshot_delete_job_driver, NULL, + qemu_get_aio_context(), JOB_MANUAL_DISMISS, + NULL, NULL, errp); + if (!s) { + return; + } + + s->tag = g_strdup(tag); + s->devices = QAPI_CLONE(strList, devices); + + job_start(&s->common); +} diff --git a/migration/savevm.h b/migration/savevm.h index ba64a7e271..6461342cb4 100644 --- a/migration/savevm.h +++ b/migration/savevm.h @@ -30,6 +30,7 @@ #define QEMU_VM_SECTION_FOOTER 0x7e bool qemu_savevm_state_blocked(Error **errp); +void qemu_savevm_non_migratable_list(strList **reasons); void qemu_savevm_state_setup(QEMUFile *f); bool qemu_savevm_state_guest_unplug_pending(void); int qemu_savevm_state_resume_prepare(MigrationState *s); @@ -64,5 +65,7 @@ int qemu_loadvm_state(QEMUFile *f); void qemu_loadvm_state_cleanup(void); int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis); int qemu_load_device_state(QEMUFile *f); +int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, + bool in_postcopy, bool inactivate_disks); #endif diff --git a/migration/trace-events b/migration/trace-events index 75de5004ac..668c562fed 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -111,6 +111,8 @@ save_xbzrle_page_skipping(void) "" save_xbzrle_page_overflow(void) "" ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRIu64 " milliseconds, %d iterations" ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64 +ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" +ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" # multifd.c multifd_new_send_channel_async(uint8_t id) "channel %d" |