diff options
Diffstat (limited to 'migration')
| -rw-r--r-- | migration/exec.c | 74 | ||||
| -rw-r--r-- | migration/exec.h | 8 | ||||
| -rw-r--r-- | migration/file.c | 24 | ||||
| -rw-r--r-- | migration/file.h | 10 | ||||
| -rw-r--r-- | migration/migration-hmp-cmds.c | 34 | ||||
| -rw-r--r-- | migration/migration.c | 550 | ||||
| -rw-r--r-- | migration/migration.h | 14 | ||||
| -rw-r--r-- | migration/options.c | 21 | ||||
| -rw-r--r-- | migration/options.h | 1 | ||||
| -rw-r--r-- | migration/qemu-file.c | 2 | ||||
| -rw-r--r-- | migration/qemu-file.h | 1 | ||||
| -rw-r--r-- | migration/ram.c | 61 | ||||
| -rw-r--r-- | migration/ram.h | 5 | ||||
| -rw-r--r-- | migration/rdma.c | 33 | ||||
| -rw-r--r-- | migration/rdma.h | 6 | ||||
| -rw-r--r-- | migration/savevm.c | 95 | ||||
| -rw-r--r-- | migration/socket.c | 39 | ||||
| -rw-r--r-- | migration/socket.h | 7 | ||||
| -rw-r--r-- | migration/trace-events | 8 |
19 files changed, 698 insertions, 295 deletions
diff --git a/migration/exec.c b/migration/exec.c index 2bf882bbe1..47d2f3b8fb 100644 --- a/migration/exec.c +++ b/migration/exec.c @@ -27,7 +27,6 @@ #include "qemu/cutils.h" #ifdef WIN32 -const char *exec_get_cmd_path(void); const char *exec_get_cmd_path(void) { g_autofree char *detected_path = g_new(char, MAX_PATH); @@ -40,20 +39,51 @@ const char *exec_get_cmd_path(void) } #endif -void exec_start_outgoing_migration(MigrationState *s, const char *command, Error **errp) +/* provides the length of strList */ +static int +str_list_length(strList *list) +{ + int len = 0; + strList *elem; + + for (elem = list; elem != NULL; elem = elem->next) { + len++; + } + + return len; +} + +static void +init_exec_array(strList *command, char **argv, Error **errp) +{ + int i = 0; + strList *lst; + + for (lst = command; lst; lst = lst->next) { + argv[i++] = lst->value; + } + + argv[i] = NULL; + return; +} + +void exec_start_outgoing_migration(MigrationState *s, strList *command, + Error **errp) { QIOChannel *ioc; -#ifdef WIN32 - const char *argv[] = { exec_get_cmd_path(), "/c", command, NULL }; -#else - const char *argv[] = { "/bin/sh", "-c", command, NULL }; -#endif + int length = str_list_length(command); + g_auto(GStrv) argv = (char **) g_new0(const char *, length + 1); - trace_migration_exec_outgoing(command); - ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv, - O_RDWR, - errp)); + init_exec_array(command, argv, errp); + g_autofree char *new_command = g_strjoinv(" ", (char **)argv); + + trace_migration_exec_outgoing(new_command); + ioc = QIO_CHANNEL( + qio_channel_command_new_spawn( + (const char * const *) g_steal_pointer(&argv), + O_RDWR, + errp)); if (!ioc) { return; } @@ -72,20 +102,22 @@ static gboolean exec_accept_incoming_migration(QIOChannel *ioc, return G_SOURCE_REMOVE; } -void exec_start_incoming_migration(const char *command, Error **errp) +void exec_start_incoming_migration(strList *command, Error **errp) { QIOChannel *ioc; -#ifdef WIN32 - const char *argv[] = { exec_get_cmd_path(), "/c", command, NULL }; -#else - const char *argv[] = { "/bin/sh", "-c", command, NULL }; -#endif + int length = str_list_length(command); + g_auto(GStrv) argv = (char **) g_new0(const char *, length + 1); + + init_exec_array(command, argv, errp); + g_autofree char *new_command = g_strjoinv(" ", (char **)argv); - trace_migration_exec_incoming(command); - ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv, - O_RDWR, - errp)); + trace_migration_exec_incoming(new_command); + ioc = QIO_CHANNEL( + qio_channel_command_new_spawn( + (const char * const *) g_steal_pointer(&argv), + O_RDWR, + errp)); if (!ioc) { return; } diff --git a/migration/exec.h b/migration/exec.h index b210ffde7a..3107f205e3 100644 --- a/migration/exec.h +++ b/migration/exec.h @@ -19,8 +19,12 @@ #ifndef QEMU_MIGRATION_EXEC_H #define QEMU_MIGRATION_EXEC_H -void exec_start_incoming_migration(const char *host_port, Error **errp); -void exec_start_outgoing_migration(MigrationState *s, const char *host_port, +#ifdef WIN32 +const char *exec_get_cmd_path(void); +#endif +void exec_start_incoming_migration(strList *host_port, Error **errp); + +void exec_start_outgoing_migration(MigrationState *s, strList *host_port, Error **errp); #endif diff --git a/migration/file.c b/migration/file.c index cf5b1bf365..5d4975f43e 100644 --- a/migration/file.c +++ b/migration/file.c @@ -19,7 +19,7 @@ /* Remove the offset option from @filespec and return it in @offsetp. */ -static int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp) +int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp) { char *option = strstr(filespec, OFFSET_OPTION); int ret; @@ -36,20 +36,16 @@ static int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp) return 0; } -void file_start_outgoing_migration(MigrationState *s, const char *filespec, - Error **errp) +void file_start_outgoing_migration(MigrationState *s, + FileMigrationArgs *file_args, Error **errp) { - g_autofree char *filename = g_strdup(filespec); g_autoptr(QIOChannelFile) fioc = NULL; - uint64_t offset = 0; + g_autofree char *filename = g_strdup(file_args->filename); + uint64_t offset = file_args->offset; QIOChannel *ioc; trace_migration_file_outgoing(filename); - if (file_parse_offset(filename, &offset, errp)) { - return; - } - fioc = qio_channel_file_new_path(filename, O_CREAT | O_WRONLY | O_TRUNC, 0600, errp); if (!fioc) { @@ -73,19 +69,15 @@ static gboolean file_accept_incoming_migration(QIOChannel *ioc, return G_SOURCE_REMOVE; } -void file_start_incoming_migration(const char *filespec, Error **errp) +void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp) { - g_autofree char *filename = g_strdup(filespec); + g_autofree char *filename = g_strdup(file_args->filename); QIOChannelFile *fioc = NULL; - uint64_t offset = 0; + uint64_t offset = file_args->offset; QIOChannel *ioc; trace_migration_file_incoming(filename); - if (file_parse_offset(filename, &offset, errp)) { - return; - } - fioc = qio_channel_file_new_path(filename, O_RDONLY, 0, errp); if (!fioc) { return; diff --git a/migration/file.h b/migration/file.h index 90fa4849e0..37d6a08bfc 100644 --- a/migration/file.h +++ b/migration/file.h @@ -7,8 +7,12 @@ #ifndef QEMU_MIGRATION_FILE_H #define QEMU_MIGRATION_FILE_H -void file_start_incoming_migration(const char *filename, Error **errp); -void file_start_outgoing_migration(MigrationState *s, const char *filename, - Error **errp); +#include "qapi/qapi-types-migration.h" + +void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp); + +void file_start_outgoing_migration(MigrationState *s, + FileMigrationArgs *file_args, Error **errp); +int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp); #endif diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c index dfe98da355..86ae832176 100644 --- a/migration/migration-hmp-cmds.c +++ b/migration/migration-hmp-cmds.c @@ -387,6 +387,11 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%s: %" PRIu64 " MB/s\n", MigrationParameter_str(MIGRATION_PARAMETER_VCPU_DIRTY_LIMIT), params->vcpu_dirty_limit); + + assert(params->has_mode); + monitor_printf(mon, "%s: %s\n", + MigrationParameter_str(MIGRATION_PARAMETER_MODE), + qapi_enum_lookup(&MigMode_lookup, params->mode)); } qapi_free_MigrationParameters(params); @@ -446,9 +451,18 @@ void hmp_migrate_incoming(Monitor *mon, const QDict *qdict) { Error *err = NULL; const char *uri = qdict_get_str(qdict, "uri"); + MigrationChannelList *caps = NULL; + g_autoptr(MigrationChannel) channel = NULL; + + if (!migrate_uri_parse(uri, &channel, &err)) { + goto end; + } + QAPI_LIST_PREPEND(caps, g_steal_pointer(&channel)); - qmp_migrate_incoming(uri, &err); + qmp_migrate_incoming(NULL, true, caps, &err); + qapi_free_MigrationChannelList(caps); +end: hmp_handle_error(mon, err); } @@ -661,6 +675,10 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) p->has_vcpu_dirty_limit = true; visit_type_size(v, param, &p->vcpu_dirty_limit, &err); break; + case MIGRATION_PARAMETER_MODE: + p->has_mode = true; + visit_type_MigMode(v, param, &p->mode, &err); + break; default: assert(0); } @@ -744,6 +762,8 @@ void hmp_migrate(Monitor *mon, const QDict *qdict) bool resume = qdict_get_try_bool(qdict, "resume", false); const char *uri = qdict_get_str(qdict, "uri"); Error *err = NULL; + MigrationChannelList *caps = NULL; + g_autoptr(MigrationChannel) channel = NULL; if (inc) { warn_report("option '-i' is deprecated;" @@ -755,12 +775,20 @@ void hmp_migrate(Monitor *mon, const QDict *qdict) " use blockdev-mirror with NBD instead"); } - qmp_migrate(uri, !!blk, blk, !!inc, inc, - false, false, true, resume, &err); + if (!migrate_uri_parse(uri, &channel, &err)) { + hmp_handle_error(mon, err); + return; + } + QAPI_LIST_PREPEND(caps, g_steal_pointer(&channel)); + + qmp_migrate(NULL, true, caps, !!blk, blk, !!inc, inc, + false, false, true, resume, &err); if (hmp_handle_error(mon, err)) { return; } + qapi_free_MigrationChannelList(caps); + if (!detach) { HMPMigrationStatus *status; diff --git a/migration/migration.c b/migration/migration.c index 6abcbefd9c..28a34c9068 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -66,6 +66,7 @@ #include "sysemu/qtest.h" #include "options.h" #include "sysemu/dirtylimit.h" +#include "qemu/sockets.h" static NotifierList migration_state_notifiers = NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); @@ -92,31 +93,55 @@ enum mig_rp_message_type { static MigrationState *current_migration; static MigrationIncomingState *current_incoming; -static GSList *migration_blockers; +static GSList *migration_blockers[MIG_MODE__MAX]; static bool migration_object_check(MigrationState *ms, Error **errp); static int migration_maybe_pause(MigrationState *s, int *current_active_state, int new_state); static void migrate_fd_cancel(MigrationState *s); -static int close_return_path_on_source(MigrationState *s); +static bool close_return_path_on_source(MigrationState *s); + +static void migration_downtime_start(MigrationState *s) +{ + trace_vmstate_downtime_checkpoint("src-downtime-start"); + s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); +} + +static void migration_downtime_end(MigrationState *s) +{ + int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + + /* + * If downtime already set, should mean that postcopy already set it, + * then that should be the real downtime already. + */ + if (!s->downtime) { + s->downtime = now - s->downtime_start; + } + + trace_vmstate_downtime_checkpoint("src-downtime-end"); +} static bool migration_needs_multiple_sockets(void) { return migrate_multifd() || migrate_postcopy_preempt(); } -static bool uri_supports_multi_channels(const char *uri) +static bool transport_supports_multi_channels(SocketAddress *saddr) { - return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) || - strstart(uri, "vsock:", NULL); + return saddr->type == SOCKET_ADDRESS_TYPE_INET || + saddr->type == SOCKET_ADDRESS_TYPE_UNIX || + saddr->type == SOCKET_ADDRESS_TYPE_VSOCK; } static bool -migration_channels_and_uri_compatible(const char *uri, Error **errp) +migration_channels_and_transport_compatible(MigrationAddress *addr, + Error **errp) { if (migration_needs_multiple_sockets() && - !uri_supports_multi_channels(uri)) { + (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) && + !transport_supports_multi_channels(&addr->u.socket)) { error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); return false; } @@ -131,6 +156,15 @@ static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) return (a > b) - (a < b); } +int migration_stop_vm(RunState state) +{ + int ret = vm_stop_force_state(state); + + trace_vmstate_downtime_checkpoint("src-vm-stopped"); + + return ret; +} + void migration_object_init(void) { /* This can only be called once. */ @@ -423,25 +457,114 @@ void migrate_add_address(SocketAddress *address) QAPI_CLONE(SocketAddress, address)); } -static void qemu_start_incoming_migration(const char *uri, Error **errp) +bool migrate_uri_parse(const char *uri, MigrationChannel **channel, + Error **errp) { - const char *p = NULL; + g_autoptr(MigrationChannel) val = g_new0(MigrationChannel, 1); + g_autoptr(MigrationAddress) addr = g_new0(MigrationAddress, 1); + SocketAddress *saddr = NULL; + InetSocketAddress *isock = &addr->u.rdma; + strList **tail = &addr->u.exec.args; + + if (strstart(uri, "exec:", NULL)) { + addr->transport = MIGRATION_ADDRESS_TYPE_EXEC; +#ifdef WIN32 + QAPI_LIST_APPEND(tail, g_strdup(exec_get_cmd_path())); + QAPI_LIST_APPEND(tail, g_strdup("/c")); +#else + QAPI_LIST_APPEND(tail, g_strdup("/bin/sh")); + QAPI_LIST_APPEND(tail, g_strdup("-c")); +#endif + QAPI_LIST_APPEND(tail, g_strdup(uri + strlen("exec:"))); + } else if (strstart(uri, "rdma:", NULL)) { + if (inet_parse(isock, uri + strlen("rdma:"), errp)) { + qapi_free_InetSocketAddress(isock); + return false; + } + addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; + } else if (strstart(uri, "tcp:", NULL) || + strstart(uri, "unix:", NULL) || + strstart(uri, "vsock:", NULL) || + strstart(uri, "fd:", NULL)) { + addr->transport = MIGRATION_ADDRESS_TYPE_SOCKET; + saddr = socket_parse(uri, errp); + if (!saddr) { + return false; + } + addr->u.socket.type = saddr->type; + addr->u.socket.u = saddr->u; + } else if (strstart(uri, "file:", NULL)) { + addr->transport = MIGRATION_ADDRESS_TYPE_FILE; + addr->u.file.filename = g_strdup(uri + strlen("file:")); + if (file_parse_offset(addr->u.file.filename, &addr->u.file.offset, + errp)) { + return false; + } + } else { + error_setg(errp, "unknown migration protocol: %s", uri); + return false; + } + + val->channel_type = MIGRATION_CHANNEL_TYPE_MAIN; + val->addr = g_steal_pointer(&addr); + *channel = g_steal_pointer(&val); + return true; +} + +static void qemu_start_incoming_migration(const char *uri, bool has_channels, + MigrationChannelList *channels, + Error **errp) +{ + MigrationChannel *channel = NULL; + MigrationAddress *addr = NULL; MigrationIncomingState *mis = migration_incoming_get_current(); - /* URI is not suitable for migration? */ - if (!migration_channels_and_uri_compatible(uri, errp)) { + /* + * Having preliminary checks for uri and channel + */ + if (uri && has_channels) { + error_setg(errp, "'uri' and 'channels' arguments are mutually " + "exclusive; exactly one of the two should be present in " + "'migrate-incoming' qmp command "); + return; + } else if (channels) { + /* To verify that Migrate channel list has only item */ + if (channels->next) { + error_setg(errp, "Channel list has more than one entries"); + return; + } + channel = channels->value; + } else if (uri) { + /* caller uses the old URI syntax */ + if (!migrate_uri_parse(uri, &channel, errp)) { + return; + } + } else { + error_setg(errp, "neither 'uri' or 'channels' argument are " + "specified in 'migrate-incoming' qmp command "); + return; + } + addr = channel->addr; + + /* transport mechanism not suitable for migration? */ + if (!migration_channels_and_transport_compatible(addr, errp)) { return; } migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); - if (strstart(uri, "tcp:", &p) || - strstart(uri, "unix:", NULL) || - strstart(uri, "vsock:", NULL)) { - socket_start_incoming_migration(p ? p : uri, errp); + if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { + SocketAddress *saddr = &addr->u.socket; + if (saddr->type == SOCKET_ADDRESS_TYPE_INET || + saddr->type == SOCKET_ADDRESS_TYPE_UNIX || + saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { + socket_start_incoming_migration(saddr, errp); + } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { + fd_start_incoming_migration(saddr->u.fd.str, errp); + } #ifdef CONFIG_RDMA - } else if (strstart(uri, "rdma:", &p)) { + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { if (migrate_compress()) { error_setg(errp, "RDMA and compression can't be used together"); return; @@ -454,14 +577,12 @@ static void qemu_start_incoming_migration(const char *uri, Error **errp) error_setg(errp, "RDMA and multifd can't be used together"); return; } - rdma_start_incoming_migration(p, errp); + rdma_start_incoming_migration(&addr->u.rdma, errp); #endif - } else if (strstart(uri, "exec:", &p)) { - exec_start_incoming_migration(p, errp); - } else if (strstart(uri, "fd:", &p)) { - fd_start_incoming_migration(p, errp); - } else if (strstart(uri, "file:", &p)) { - file_start_incoming_migration(p, errp); + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { + exec_start_incoming_migration(addr->u.exec.args, errp); + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { + file_start_incoming_migration(&addr->u.file, errp); } else { error_setg(errp, "unknown migration protocol: %s", uri); } @@ -472,6 +593,8 @@ static void process_incoming_migration_bh(void *opaque) Error *local_err = NULL; MigrationIncomingState *mis = opaque; + trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); + /* If capability late_block_activate is set: * Only fire up the block code now if we're going to restart the * VM, else 'cont' will do it. @@ -497,6 +620,8 @@ static void process_incoming_migration_bh(void *opaque) */ qemu_announce_self(&mis->announce_timer, migrate_announce_params()); + trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); + multifd_load_shutdown(); dirty_bitmap_mig_before_vm_start(); @@ -514,6 +639,7 @@ static void process_incoming_migration_bh(void *opaque) } else { runstate_set(global_state_get_runstate()); } + trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started"); /* * This must happen after any state changes since as soon as an external * observer sees this event they might start to prod at the VM assuming @@ -548,6 +674,8 @@ process_incoming_migration_co(void *opaque) ret = qemu_loadvm_state(mis->from_src_file); mis->loadvm_co = NULL; + trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); + ps = postcopy_state_get(); trace_process_incoming_migration_co_end(ret, ps); if (ps != POSTCOPY_INCOMING_NONE) { @@ -1006,7 +1134,7 @@ static void fill_source_migration_info(MigrationInfo *info) { MigrationState *s = migrate_get_current(); int state = qatomic_read(&s->state); - GSList *cur_blocker = migration_blockers; + GSList *cur_blocker = migration_blockers[migrate_mode()]; info->blocked_reasons = NULL; @@ -1356,6 +1484,17 @@ bool migration_in_postcopy(void) } } +bool migration_postcopy_is_alive(int state) +{ + switch (state) { + case MIGRATION_STATUS_POSTCOPY_ACTIVE: + case MIGRATION_STATUS_POSTCOPY_RECOVER: + return true; + default: + return false; + } +} + bool migration_in_postcopy_after_devices(MigrationState *s) { return migration_in_postcopy() && s->postcopy_after_devices; @@ -1438,7 +1577,6 @@ int migrate_init(MigrationState *s, Error **errp) s->to_dst_file = NULL; s->state = MIGRATION_STATUS_NONE; s->rp_state.from_dst_file = NULL; - s->rp_state.error = false; s->mbps = 0.0; s->pages_per_second = 0.0; s->downtime = 0; @@ -1470,44 +1608,112 @@ int migrate_init(MigrationState *s, Error **errp) return 0; } -int migrate_add_blocker_internal(Error **reasonp, Error **errp) +static bool is_busy(Error **reasonp, Error **errp) { + ERRP_GUARD(); + /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { error_propagate_prepend(errp, *reasonp, "disallowing migration blocker " "(migration/snapshot in progress) for: "); *reasonp = NULL; - return -EBUSY; + return true; } - - migration_blockers = g_slist_prepend(migration_blockers, *reasonp); - return 0; + return false; } -int migrate_add_blocker(Error **reasonp, Error **errp) +static bool is_only_migratable(Error **reasonp, Error **errp, int modes) { - if (only_migratable) { + ERRP_GUARD(); + + if (only_migratable && (modes & BIT(MIG_MODE_NORMAL))) { error_propagate_prepend(errp, *reasonp, "disallowing migration blocker " "(--only-migratable) for: "); *reasonp = NULL; + return true; + } + return false; +} + +static int get_modes(MigMode mode, va_list ap) +{ + int modes = 0; + + while (mode != -1 && mode != MIG_MODE_ALL) { + assert(mode >= MIG_MODE_NORMAL && mode < MIG_MODE__MAX); + modes |= BIT(mode); + mode = va_arg(ap, MigMode); + } + if (mode == MIG_MODE_ALL) { + modes = BIT(MIG_MODE__MAX) - 1; + } + return modes; +} + +static int add_blockers(Error **reasonp, Error **errp, int modes) +{ + for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { + if (modes & BIT(mode)) { + migration_blockers[mode] = g_slist_prepend(migration_blockers[mode], + *reasonp); + } + } + return 0; +} + +int migrate_add_blocker(Error **reasonp, Error **errp) +{ + return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_ALL); +} + +int migrate_add_blocker_normal(Error **reasonp, Error **errp) +{ + return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_NORMAL, -1); +} + +int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...) +{ + int modes; + va_list ap; + + va_start(ap, mode); + modes = get_modes(mode, ap); + va_end(ap); + + if (is_only_migratable(reasonp, errp, modes)) { return -EACCES; + } else if (is_busy(reasonp, errp)) { + return -EBUSY; } + return add_blockers(reasonp, errp, modes); +} - return migrate_add_blocker_internal(reasonp, errp); +int migrate_add_blocker_internal(Error **reasonp, Error **errp) +{ + int modes = BIT(MIG_MODE__MAX) - 1; + + if (is_busy(reasonp, errp)) { + return -EBUSY; + } + return add_blockers(reasonp, errp, modes); } void migrate_del_blocker(Error **reasonp) { if (*reasonp) { - migration_blockers = g_slist_remove(migration_blockers, *reasonp); + for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { + migration_blockers[mode] = g_slist_remove(migration_blockers[mode], + *reasonp); + } error_free(*reasonp); *reasonp = NULL; } } -void qmp_migrate_incoming(const char *uri, Error **errp) +void qmp_migrate_incoming(const char *uri, bool has_channels, + MigrationChannelList *channels, Error **errp) { Error *local_err = NULL; static bool once = true; @@ -1525,7 +1731,7 @@ void qmp_migrate_incoming(const char *uri, Error **errp) return; } - qemu_start_incoming_migration(uri, &local_err); + qemu_start_incoming_migration(uri, has_channels, channels, &local_err); if (local_err) { yank_unregister_instance(MIGRATION_YANK_INSTANCE); @@ -1561,7 +1767,7 @@ void qmp_migrate_recover(const char *uri, Error **errp) * only re-setup the migration stream and poke existing migration * to continue using that newly established channel. */ - qemu_start_incoming_migration(uri, errp); + qemu_start_incoming_migration(uri, false, NULL, errp); } void qmp_migrate_pause(Error **errp) @@ -1570,8 +1776,15 @@ void qmp_migrate_pause(Error **errp) MigrationIncomingState *mis = migration_incoming_get_current(); int ret = 0; - if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { + if (migration_postcopy_is_alive(ms->state)) { /* Source side, during postcopy */ + Error *error = NULL; + + /* Tell the core migration that we're pausing */ + error_setg(&error, "Postcopy migration is paused by the user"); + migrate_set_error(ms, error); + error_free(error); + qemu_mutex_lock(&ms->qemu_file_lock); if (ms->to_dst_file) { ret = qemu_file_shutdown(ms->to_dst_file); @@ -1580,10 +1793,17 @@ void qmp_migrate_pause(Error **errp) if (ret) { error_setg(errp, "Failed to pause source migration"); } + + /* + * Kick the migration thread out of any waiting windows (on behalf + * of the rp thread). + */ + migration_rp_kick(ms); + return; } - if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { + if (migration_postcopy_is_alive(mis->state)) { ret = qemu_file_shutdown(mis->from_src_file); if (ret) { error_setg(errp, "Failed to pause destination migration"); @@ -1592,17 +1812,19 @@ void qmp_migrate_pause(Error **errp) } error_setg(errp, "migrate-pause is currently only supported " - "during postcopy-active state"); + "during postcopy-active or postcopy-recover state"); } bool migration_is_blocked(Error **errp) { + GSList *blockers = migration_blockers[migrate_mode()]; + if (qemu_savevm_state_blocked(errp)) { return true; } - if (migration_blockers) { - error_propagate(errp, error_copy(migration_blockers->data)); + if (blockers) { + error_propagate(errp, error_copy(blockers->data)); return true; } @@ -1702,17 +1924,46 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, return true; } -void qmp_migrate(const char *uri, bool has_blk, bool blk, +void qmp_migrate(const char *uri, bool has_channels, + MigrationChannelList *channels, bool has_blk, bool blk, bool has_inc, bool inc, bool has_detach, bool detach, bool has_resume, bool resume, Error **errp) { bool resume_requested; Error *local_err = NULL; MigrationState *s = migrate_get_current(); - const char *p = NULL; + MigrationChannel *channel = NULL; + MigrationAddress *addr = NULL; - /* URI is not suitable for migration? */ - if (!migration_channels_and_uri_compatible(uri, errp)) { + /* + * Having preliminary checks for uri and channel + */ + if (uri && has_channels) { + error_setg(errp, "'uri' and 'channels' arguments are mutually " + "exclusive; exactly one of the two should be present in " + "'migrate' qmp command "); + return; + } else if (channels) { + /* To verify that Migrate channel list has only item */ + if (channels->next) { + error_setg(errp, "Channel list has more than one entries"); + return; + } + channel = channels->value; + } else if (uri) { + /* caller uses the old URI syntax */ + if (!migrate_uri_parse(uri, &channel, errp)) { + return; + } + } else { + error_setg(errp, "neither 'uri' or 'channels' argument are " + "specified in 'migrate' qmp command "); + return; + } + addr = channel->addr; + + /* transport mechanism not suitable for migration? */ + if (!migration_channels_and_transport_compatible(addr, errp)) { return; } @@ -1729,20 +1980,23 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, } } - if (strstart(uri, "tcp:", &p) || - strstart(uri, "unix:", NULL) || - strstart(uri, "vsock:", NULL)) { - socket_start_outgoing_migration(s, p ? p : uri, &local_err); + if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { + SocketAddress *saddr = &addr->u.socket; + if (saddr->type == SOCKET_ADDRESS_TYPE_INET || + saddr->type == SOCKET_ADDRESS_TYPE_UNIX || + saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { + socket_start_outgoing_migration(s, saddr, &local_err); + } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { + fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err); + } #ifdef CONFIG_RDMA - } else if (strstart(uri, "rdma:", &p)) { - rdma_start_outgoing_migration(s, p, &local_err); + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { + rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); #endif - } else if (strstart(uri, "exec:", &p)) { - exec_start_outgoing_migration(s, p, &local_err); - } else if (strstart(uri, "fd:", &p)) { - fd_start_outgoing_migration(s, p, &local_err); - } else if (strstart(uri, "file:", &p)) { - file_start_outgoing_migration(s, p, &local_err); + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { + exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { + file_start_outgoing_migration(s, &addr->u.file, &local_err); } else { error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol"); @@ -1777,19 +2031,21 @@ void qmp_migrate_continue(MigrationStatus state, Error **errp) qemu_sem_post(&s->pause_sem); } -/* migration thread support */ -/* - * Something bad happened to the RP stream, mark an error - * The caller shall print or trace something to indicate why - */ -static void mark_source_rp_bad(MigrationState *s) +int migration_rp_wait(MigrationState *s) { - s->rp_state.error = true; -} + /* If migration has failure already, ignore the wait */ + if (migrate_has_error(s)) { + return -1; + } -void migration_rp_wait(MigrationState *s) -{ qemu_sem_wait(&s->rp_state.rp_sem); + + /* After wait, double check that there's no failure */ + if (migrate_has_error(s)) { + return -1; + } + + return 0; } void migration_rp_kick(MigrationState *s) @@ -1817,8 +2073,9 @@ static struct rp_cmd_args { * We're allowed to send more than requested (e.g. to round to our page size) * and we don't need to send pages that have already been sent. */ -static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, - ram_addr_t start, size_t len) +static void +migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, + ram_addr_t start, size_t len, Error **errp) { long our_host_ps = qemu_real_host_page_size(); @@ -1830,38 +2087,37 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, */ if (!QEMU_IS_ALIGNED(start, our_host_ps) || !QEMU_IS_ALIGNED(len, our_host_ps)) { - error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT - " len: %zd", __func__, start, len); - mark_source_rp_bad(ms); + error_setg(errp, "MIG_RP_MSG_REQ_PAGES: Misaligned page request, start:" + RAM_ADDR_FMT " len: %zd", start, len); return; } - if (ram_save_queue_pages(rbname, start, len)) { - mark_source_rp_bad(ms); - } + ram_save_queue_pages(rbname, start, len, errp); } -static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name) +static bool migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name, + Error **errp) { RAMBlock *block = qemu_ram_block_by_name(block_name); if (!block) { - error_report("%s: invalid block name '%s'", __func__, block_name); - return -EINVAL; + error_setg(errp, "MIG_RP_MSG_RECV_BITMAP has invalid block name '%s'", + block_name); + return false; } /* Fetch the received bitmap and refresh the dirty bitmap */ - return ram_dirty_bitmap_reload(s, block); + return ram_dirty_bitmap_reload(s, block, errp); } -static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value) +static bool migrate_handle_rp_resume_ack(MigrationState *s, + uint32_t value, Error **errp) { trace_source_return_path_thread_resume_ack(value); if (value != MIGRATION_RESUME_ACK_VALUE) { - error_report("%s: illegal resume_ack value %"PRIu32, - __func__, value); - return -1; + error_setg(errp, "illegal resume_ack value %"PRIu32, value); + return false; } /* Now both sides are active. */ @@ -1871,7 +2127,7 @@ static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value) /* Notify send thread that time to continue send pages */ migration_rp_kick(s); - return 0; + return true; } /* @@ -1919,48 +2175,46 @@ static void *source_return_path_thread(void *opaque) uint32_t tmp32, sibling_error; ram_addr_t start = 0; /* =0 to silence warning */ size_t len = 0, expected_len; + Error *err = NULL; int res; trace_source_return_path_thread_entry(); rcu_register_thread(); - while (!ms->rp_state.error && !qemu_file_get_error(rp) && - migration_is_setup_or_active(ms->state)) { + while (migration_is_setup_or_active(ms->state)) { trace_source_return_path_thread_loop_top(); + header_type = qemu_get_be16(rp); header_len = qemu_get_be16(rp); if (qemu_file_get_error(rp)) { - mark_source_rp_bad(ms); + qemu_file_get_error_obj(rp, &err); goto out; } if (header_type >= MIG_RP_MSG_MAX || header_type == MIG_RP_MSG_INVALID) { - error_report("RP: Received invalid message 0x%04x length 0x%04x", - header_type, header_len); - mark_source_rp_bad(ms); + error_setg(&err, "Received invalid message 0x%04x length 0x%04x", + header_type, header_len); goto out; } if ((rp_cmd_args[header_type].len != -1 && header_len != rp_cmd_args[header_type].len) || header_len > sizeof(buf)) { - error_report("RP: Received '%s' message (0x%04x) with" - "incorrect length %d expecting %zu", - rp_cmd_args[header_type].name, header_type, header_len, - (size_t)rp_cmd_args[header_type].len); - mark_source_rp_bad(ms); + error_setg(&err, "Received '%s' message (0x%04x) with" + "incorrect length %d expecting %zu", + rp_cmd_args[header_type].name, header_type, header_len, + (size_t)rp_cmd_args[header_type].len); goto out; } /* We know we've got a valid header by this point */ res = qemu_get_buffer(rp, buf, header_len); if (res != header_len) { - error_report("RP: Failed reading data for message 0x%04x" - " read %d expected %d", - header_type, res, header_len); - mark_source_rp_bad(ms); + error_setg(&err, "Failed reading data for message 0x%04x" + " read %d expected %d", + header_type, res, header_len); goto out; } @@ -1970,8 +2224,7 @@ static void *source_return_path_thread(void *opaque) sibling_error = ldl_be_p(buf); trace_source_return_path_thread_shut(sibling_error); if (sibling_error) { - error_report("RP: Sibling indicated error %d", sibling_error); - mark_source_rp_bad(ms); + error_setg(&err, "Sibling indicated error %d", sibling_error); } /* * We'll let the main thread deal with closing the RP @@ -1989,7 +2242,10 @@ static void *source_return_path_thread(void *opaque) case MIG_RP_MSG_REQ_PAGES: start = ldq_be_p(buf); len = ldl_be_p(buf + 8); - migrate_handle_rp_req_pages(ms, NULL, start, len); + migrate_handle_rp_req_pages(ms, NULL, start, len, &err); + if (err) { + goto out; + } break; case MIG_RP_MSG_REQ_PAGES_ID: @@ -2004,32 +2260,32 @@ static void *source_return_path_thread(void *opaque) expected_len += tmp32; } if (header_len != expected_len) { - error_report("RP: Req_Page_id with length %d expecting %zd", - header_len, expected_len); - mark_source_rp_bad(ms); + error_setg(&err, "Req_Page_id with length %d expecting %zd", + header_len, expected_len); + goto out; + } + migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len, + &err); + if (err) { goto out; } - migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); break; case MIG_RP_MSG_RECV_BITMAP: if (header_len < 1) { - error_report("%s: missing block name", __func__); - mark_source_rp_bad(ms); + error_setg(&err, "MIG_RP_MSG_RECV_BITMAP missing block name"); goto out; } /* Format: len (1B) + idstr (<255B). This ends the idstr. */ buf[buf[0] + 1] = '\0'; - if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) { - mark_source_rp_bad(ms); + if (!migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1), &err)) { goto out; } break; case MIG_RP_MSG_RESUME_ACK: tmp32 = ldl_be_p(buf); - if (migrate_handle_rp_resume_ack(ms, tmp32)) { - mark_source_rp_bad(ms); + if (!migrate_handle_rp_resume_ack(ms, tmp32, &err)) { goto out; } break; @@ -2045,13 +2301,29 @@ static void *source_return_path_thread(void *opaque) } out: - if (qemu_file_get_error(rp)) { + if (err) { + migrate_set_error(ms, err); + error_free(err); trace_source_return_path_thread_bad_end(); - mark_source_rp_bad(ms); + } + + if (ms->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { + /* + * this will be extremely unlikely: that we got yet another network + * issue during recovering of the 1st network failure.. during this + * period the main migration thread can be waiting on rp_sem for + * this thread to sync with the other side. + * + * When this happens, explicitly kick the migration thread out of + * RECOVER stage and back to PAUSED, so the admin can try + * everything again. + */ + migration_rp_kick(ms); } trace_source_return_path_thread_end(); rcu_unregister_thread(); + return NULL; } @@ -2073,12 +2345,11 @@ static int open_return_path_on_source(MigrationState *ms) return 0; } -static int close_return_path_on_source(MigrationState *ms) +/* Return true if error detected, or false otherwise */ +static bool close_return_path_on_source(MigrationState *ms) { - int ret; - if (!ms->rp_state.rp_thread_created) { - return 0; + return false; } trace_migration_return_path_end_before(); @@ -2096,18 +2367,13 @@ static int close_return_path_on_source(MigrationState *ms) } } - trace_await_return_path_close_on_source_joining(); qemu_thread_join(&ms->rp_state.rp_thread); ms->rp_state.rp_thread_created = false; - trace_await_return_path_close_on_source_close(); - - ret = ms->rp_state.error; - ms->rp_state.error = false; - migration_release_dst_files(ms); + trace_migration_return_path_end_after(); - trace_migration_return_path_end_after(ret); - return ret; + /* Return path will persist the error in MigrationState when quit */ + return migrate_has_error(ms); } static inline void @@ -2126,7 +2392,6 @@ static int postcopy_start(MigrationState *ms, Error **errp) int ret; QIOChannelBuffer *bioc; QEMUFile *fb; - int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); uint64_t bandwidth = migrate_max_postcopy_bandwidth(); bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; @@ -2148,9 +2413,11 @@ static int postcopy_start(MigrationState *ms, Error **errp) qemu_mutex_lock_iothread(); trace_postcopy_start_set_run(); + migration_downtime_start(ms); + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); global_state_store(); - ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); + ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE); if (ret < 0) { goto fail; } @@ -2250,7 +2517,7 @@ static int postcopy_start(MigrationState *ms, Error **errp) ms->postcopy_after_devices = true; migration_call_notifiers(ms); - ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; + migration_downtime_end(ms); qemu_mutex_unlock_iothread(); @@ -2346,13 +2613,13 @@ static int migration_completion_precopy(MigrationState *s, int ret; qemu_mutex_lock_iothread(); - s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + migration_downtime_start(s); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); s->vm_old_state = runstate_get(); global_state_store(); - ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); + ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE); trace_migration_completion_vm_stop(ret); if (ret < 0) { goto out_unlock; @@ -2519,7 +2786,9 @@ static int postcopy_resume_handshake(MigrationState *s) qemu_savevm_send_postcopy_resume(s->to_dst_file); while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { - migration_rp_wait(s); + if (migration_rp_wait(s)) { + return -1; + } } if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { @@ -2703,15 +2972,8 @@ static void migration_calculate_complete(MigrationState *s) int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t transfer_time; + migration_downtime_end(s); s->total_time = end_time - s->start_time; - if (!s->downtime) { - /* - * It's still not set, so we are precopy migration. For - * postcopy, downtime is calculated during postcopy_start(). - */ - s->downtime = end_time - s->downtime_start; - } - transfer_time = s->total_time - s->setup_time; if (transfer_time) { s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; @@ -3130,7 +3392,7 @@ static void bg_migration_vm_start_bh(void *opaque) s->vm_start_bh = NULL; vm_start(); - s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start; + migration_downtime_end(s); } /** @@ -3197,7 +3459,7 @@ static void *bg_migration_thread(void *opaque) s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; trace_migration_thread_setup_complete(); - s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + migration_downtime_start(s); qemu_mutex_lock_iothread(); @@ -3210,7 +3472,7 @@ static void *bg_migration_thread(void *opaque) global_state_store(); /* Forcibly stop VM before saving state of vCPUs and devices */ - if (vm_stop_force_state(RUN_STATE_PAUSED)) { + if (migration_stop_vm(RUN_STATE_PAUSED)) { goto fail; } /* diff --git a/migration/migration.h b/migration/migration.h index ae82004892..cf2c9c88e0 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -308,7 +308,6 @@ struct MigrationState { /* Protected by qemu_file_lock */ QEMUFile *from_dst_file; QemuThread rp_thread; - bool error; /* * We can also check non-zero of rp_thread, but there's no "official" * way to do this, so this bool makes it slightly more elegant. @@ -495,6 +494,7 @@ int migrate_init(MigrationState *s, Error **errp); bool migration_is_blocked(Error **errp); /* True if outgoing migration has entered postcopy phase */ bool migration_in_postcopy(void); +bool migration_postcopy_is_alive(int state); MigrationState *migrate_get_current(void); uint64_t ram_get_total_transferred_pages(void); @@ -520,7 +520,8 @@ bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm, Error **errp); void migrate_add_address(SocketAddress *address); - +bool migrate_uri_parse(const char *uri, MigrationChannel **channel, + Error **errp); int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque); #define qemu_ram_foreach_block \ @@ -535,8 +536,11 @@ void migration_populate_vfio_info(MigrationInfo *info); void migration_reset_vfio_bytes_transferred(void); void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page); -/* Migration thread waiting for return path thread. */ -void migration_rp_wait(MigrationState *s); +/* + * Migration thread waiting for return path thread. Return non-zero if an + * error is detected. + */ +int migration_rp_wait(MigrationState *s); /* * Kick the migration thread waiting for return path messages. NOTE: the * name can be slightly confusing (when read as "kick the rp thread"), just @@ -544,4 +548,6 @@ void migration_rp_wait(MigrationState *s); */ void migration_rp_kick(MigrationState *s); +int migration_stop_vm(RunState state); + #endif diff --git a/migration/options.c b/migration/options.c index 9a39826ca5..8d8ec73ad9 100644 --- a/migration/options.c +++ b/migration/options.c @@ -176,6 +176,9 @@ Property migration_properties[] = { DEFINE_PROP_UINT64("vcpu-dirty-limit", MigrationState, parameters.vcpu_dirty_limit, DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT), + DEFINE_PROP_MIG_MODE("mode", MigrationState, + parameters.mode, + MIG_MODE_NORMAL), /* Migration capabilities */ DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), @@ -827,6 +830,13 @@ uint64_t migrate_max_postcopy_bandwidth(void) return s->parameters.max_postcopy_bandwidth; } +MigMode migrate_mode(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.mode; +} + int migrate_multifd_channels(void) { MigrationState *s = migrate_get_current(); @@ -999,6 +1009,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) params->x_vcpu_dirty_limit_period = s->parameters.x_vcpu_dirty_limit_period; params->has_vcpu_dirty_limit = true; params->vcpu_dirty_limit = s->parameters.vcpu_dirty_limit; + params->has_mode = true; + params->mode = s->parameters.mode; return params; } @@ -1034,6 +1046,7 @@ void migrate_params_init(MigrationParameters *params) params->has_announce_step = true; params->has_x_vcpu_dirty_limit_period = true; params->has_vcpu_dirty_limit = true; + params->has_mode = true; } /* @@ -1331,6 +1344,10 @@ static void migrate_params_test_apply(MigrateSetParameters *params, if (params->has_vcpu_dirty_limit) { dest->vcpu_dirty_limit = params->vcpu_dirty_limit; } + + if (params->has_mode) { + dest->mode = params->mode; + } } static void migrate_params_apply(MigrateSetParameters *params, Error **errp) @@ -1471,6 +1488,10 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) if (params->has_vcpu_dirty_limit) { s->parameters.vcpu_dirty_limit = params->vcpu_dirty_limit; } + + if (params->has_mode) { + s->parameters.mode = params->mode; + } } void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) diff --git a/migration/options.h b/migration/options.h index 237f2d6b4a..246c160aee 100644 --- a/migration/options.h +++ b/migration/options.h @@ -83,6 +83,7 @@ uint8_t migrate_max_cpu_throttle(void); uint64_t migrate_max_bandwidth(void); uint64_t migrate_avail_switchover_bandwidth(void); uint64_t migrate_max_postcopy_bandwidth(void); +MigMode migrate_mode(void); int migrate_multifd_channels(void); MultiFDCompression migrate_multifd_compression(void); int migrate_multifd_zlib_level(void); diff --git a/migration/qemu-file.c b/migration/qemu-file.c index d64500310d..94231ff295 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -137,7 +137,7 @@ QEMUFile *qemu_file_new_input(QIOChannel *ioc) * * If errp is specified, a verbose error message will be copied over. */ -static int qemu_file_get_error_obj(QEMUFile *f, Error **errp) +int qemu_file_get_error_obj(QEMUFile *f, Error **errp) { if (!f->last_error) { return 0; diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 1774116f79..8aec9fabf7 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -68,6 +68,7 @@ int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset); void qemu_file_skip(QEMUFile *f, int size); int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp); void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err); +int qemu_file_get_error_obj(QEMUFile *f, Error **errp); void qemu_file_set_error(QEMUFile *f, int ret); int qemu_file_shutdown(QEMUFile *f); QEMUFile *qemu_file_get_return_path(QEMUFile *f); diff --git a/migration/ram.c b/migration/ram.c index 34724e8fe8..a0f3b86663 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1898,7 +1898,8 @@ static void migration_page_queue_free(RAMState *rs) * @start: starting address from the start of the RAMBlock * @len: length (in bytes) to send */ -int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) +int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len, + Error **errp) { RAMBlock *ramblock; RAMState *rs = ram_state; @@ -1915,7 +1916,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) * Shouldn't happen, we can't reuse the last RAMBlock if * it's the 1st request. */ - error_report("ram_save_queue_pages no previous block"); + error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no previous block"); return -1; } } else { @@ -1923,16 +1924,17 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) if (!ramblock) { /* We shouldn't be asked for a non-existent RAMBlock */ - error_report("ram_save_queue_pages no block '%s'", rbname); + error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname); return -1; } rs->last_req_rb = ramblock; } trace_ram_save_queue_pages(ramblock->idstr, start, len); if (!offset_in_ramblock(ramblock, start + len - 1)) { - error_report("%s request overrun start=" RAM_ADDR_FMT " len=" - RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, - __func__, start, len, ramblock->used_length); + error_setg(errp, "MIG_RP_MSG_REQ_PAGES request overrun, " + "start=" RAM_ADDR_FMT " len=" + RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, + start, len, ramblock->used_length); return -1; } @@ -1964,9 +1966,9 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) assert(len % page_size == 0); while (len) { if (ram_save_host_page_urgent(pss)) { - error_report("%s: ram_save_host_page_urgent() failed: " - "ramblock=%s, start_addr=0x"RAM_ADDR_FMT, - __func__, ramblock->idstr, start); + error_setg(errp, "ram_save_host_page_urgent() failed: " + "ramblock=%s, start_addr=0x"RAM_ADDR_FMT, + ramblock->idstr, start); ret = -1; break; } @@ -4097,7 +4099,9 @@ static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs) /* Wait until all the ramblocks' dirty bitmap synced */ while (qatomic_read(&rs->postcopy_bmap_sync_requested)) { - migration_rp_wait(s); + if (migration_rp_wait(s)) { + return -1; + } } trace_ram_dirty_bitmap_sync_complete(); @@ -4109,10 +4113,11 @@ static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs) * Read the received bitmap, revert it as the initial dirty bitmap. * This is only used when the postcopy migration is paused but wants * to resume from a middle point. + * + * Returns true if succeeded, false for errors. */ -int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) +bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block, Error **errp) { - int ret = -EINVAL; /* from_dst_file is always valid because we're within rp_thread */ QEMUFile *file = s->rp_state.from_dst_file; g_autofree unsigned long *le_bitmap = NULL; @@ -4124,9 +4129,9 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) trace_ram_dirty_bitmap_reload_begin(block->idstr); if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { - error_report("%s: incorrect state %s", __func__, - MigrationStatus_str(s->state)); - return -EINVAL; + error_setg(errp, "Reload bitmap in incorrect state %s", + MigrationStatus_str(s->state)); + return false; } /* @@ -4142,27 +4147,25 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) /* The size of the bitmap should match with our ramblock */ if (size != local_size) { - error_report("%s: ramblock '%s' bitmap size mismatch " - "(0x%"PRIx64" != 0x%"PRIx64")", __func__, - block->idstr, size, local_size); - return -EINVAL; + error_setg(errp, "ramblock '%s' bitmap size mismatch (0x%"PRIx64 + " != 0x%"PRIx64")", block->idstr, size, local_size); + return false; } size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size); end_mark = qemu_get_be64(file); - ret = qemu_file_get_error(file); - if (ret || size != local_size) { - error_report("%s: read bitmap failed for ramblock '%s': %d" - " (size 0x%"PRIx64", got: 0x%"PRIx64")", - __func__, block->idstr, ret, local_size, size); - return -EIO; + if (qemu_file_get_error(file) || size != local_size) { + error_setg(errp, "read bitmap failed for ramblock '%s': " + "(size 0x%"PRIx64", got: 0x%"PRIx64")", + block->idstr, local_size, size); + return false; } if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) { - error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64, - __func__, block->idstr, end_mark); - return -EINVAL; + error_setg(errp, "ramblock '%s' end mark incorrect: 0x%"PRIx64, + block->idstr, end_mark); + return false; } /* @@ -4194,7 +4197,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) */ migration_rp_kick(s); - return 0; + return true; } static int ram_resume_prepare(MigrationState *s, void *opaque) diff --git a/migration/ram.h b/migration/ram.h index 9f3ad1ee81..9b937a446b 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -50,7 +50,8 @@ uint64_t ram_bytes_total(void); void mig_throttle_counter_reset(void); uint64_t ram_pagesize_summary(void); -int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); +int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len, + Error **errp); void ram_postcopy_migrated_memory_release(MigrationState *ms); /* For outgoing discard bitmap */ void ram_postcopy_send_discard_bitmap(MigrationState *ms); @@ -70,7 +71,7 @@ void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr); void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, size_t nr); int64_t ramblock_recv_bitmap_send(QEMUFile *file, const char *block_name); -int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); +bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb, Error **errp); bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start); void postcopy_preempt_shutdown_file(MigrationState *s); void *postcopy_preempt_thread(void *opaque); diff --git a/migration/rdma.c b/migration/rdma.c index 2938db4f64..6a29e53daf 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -289,7 +289,6 @@ typedef struct RDMALocalBlocks { typedef struct RDMAContext { char *host; int port; - char *host_port; RDMAWorkRequestData wr_data[RDMA_WRID_MAX]; @@ -2431,9 +2430,7 @@ static void qemu_rdma_cleanup(RDMAContext *rdma) rdma->channel = NULL; } g_free(rdma->host); - g_free(rdma->host_port); rdma->host = NULL; - rdma->host_port = NULL; } @@ -2723,28 +2720,16 @@ static void qemu_rdma_return_path_dest_init(RDMAContext *rdma_return_path, rdma_return_path->is_return_path = true; } -static RDMAContext *qemu_rdma_data_init(const char *host_port, Error **errp) +static RDMAContext *qemu_rdma_data_init(InetSocketAddress *saddr, Error **errp) { RDMAContext *rdma = NULL; - InetSocketAddress *addr; rdma = g_new0(RDMAContext, 1); rdma->current_index = -1; rdma->current_chunk = -1; - addr = g_new(InetSocketAddress, 1); - if (!inet_parse(addr, host_port, NULL)) { - rdma->port = atoi(addr->port); - rdma->host = g_strdup(addr->host); - rdma->host_port = g_strdup(host_port); - } else { - error_setg(errp, "RDMA ERROR: bad RDMA migration address '%s'", - host_port); - g_free(rdma); - rdma = NULL; - } - - qapi_free_InetSocketAddress(addr); + rdma->host = g_strdup(saddr->host); + rdma->port = atoi(saddr->port); return rdma; } @@ -3353,6 +3338,7 @@ static int qemu_rdma_accept(RDMAContext *rdma) .private_data_len = sizeof(cap), }; RDMAContext *rdma_return_path = NULL; + g_autoptr(InetSocketAddress) isock = g_new0(InetSocketAddress, 1); struct rdma_cm_event *cm_event; struct ibv_context *verbs; int ret; @@ -3367,13 +3353,16 @@ static int qemu_rdma_accept(RDMAContext *rdma) goto err_rdma_dest_wait; } + isock->host = rdma->host; + isock->port = g_strdup_printf("%d", rdma->port); + /* * initialize the RDMAContext for return path for postcopy after first * connection request reached. */ if ((migrate_postcopy() || migrate_return_path()) && !rdma->is_return_path) { - rdma_return_path = qemu_rdma_data_init(rdma->host_port, NULL); + rdma_return_path = qemu_rdma_data_init(isock, NULL); if (rdma_return_path == NULL) { rdma_ack_cm_event(cm_event); goto err_rdma_dest_wait; @@ -4074,7 +4063,8 @@ static void rdma_accept_incoming_migration(void *opaque) } } -void rdma_start_incoming_migration(const char *host_port, Error **errp) +void rdma_start_incoming_migration(InetSocketAddress *host_port, + Error **errp) { MigrationState *s = migrate_get_current(); int ret; @@ -4118,13 +4108,12 @@ cleanup_rdma: err: if (rdma) { g_free(rdma->host); - g_free(rdma->host_port); } g_free(rdma); } void rdma_start_outgoing_migration(void *opaque, - const char *host_port, Error **errp) + InetSocketAddress *host_port, Error **errp) { MigrationState *s = opaque; RDMAContext *rdma_return_path = NULL; diff --git a/migration/rdma.h b/migration/rdma.h index 30b15b4466..a8d27f33b8 100644 --- a/migration/rdma.h +++ b/migration/rdma.h @@ -14,15 +14,17 @@ * */ +#include "qemu/sockets.h" + #ifndef QEMU_MIGRATION_RDMA_H #define QEMU_MIGRATION_RDMA_H #include "exec/memory.h" -void rdma_start_outgoing_migration(void *opaque, const char *host_port, +void rdma_start_outgoing_migration(void *opaque, InetSocketAddress *host_port, Error **errp); -void rdma_start_incoming_migration(const char *host_port, Error **errp); +void rdma_start_incoming_migration(InetSocketAddress *host_port, Error **errp); /* * Constants used by rdma return codes diff --git a/migration/savevm.c b/migration/savevm.c index c7835e9c73..bc98c2ea6f 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -237,6 +237,8 @@ static SaveState savevm_state = { .global_section_id = 0, }; +static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id); + static bool should_validate_capability(int capability) { assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX); @@ -716,6 +718,18 @@ static void savevm_state_handler_insert(SaveStateEntry *nse) assert(priority <= MIG_PRI_MAX); + /* + * This should never happen otherwise migration will probably fail + * silently somewhere because we can be wrongly applying one + * object properties upon another one. Bail out ASAP. + */ + if (find_se(nse->idstr, nse->instance_id)) { + error_report("%s: Detected duplicate SaveStateEntry: " + "id=%s, instance_id=0x%"PRIx32, __func__, + nse->idstr, nse->instance_id); + exit(EXIT_FAILURE); + } + for (i = priority - 1; i >= 0; i--) { se = savevm_state.handler_pri_head[i]; if (se != NULL) { @@ -846,6 +860,24 @@ static void vmstate_check(const VMStateDescription *vmsd) } } +/* + * See comment in hw/intc/xics.c:icp_realize() + * + * This function can be removed when + * pre_2_10_vmstate_register_dummy_icp() is removed. + */ +int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id, + const VMStateDescription *vmsd, + void *opaque) +{ + SaveStateEntry *se = find_se(vmsd->name, instance_id); + + if (se) { + savevm_state_handler_remove(se); + } + return vmstate_register(obj, instance_id, vmsd, opaque); +} + int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, const VMStateDescription *vmsd, void *opaque, int alias_id, @@ -1459,6 +1491,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f) static int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) { + int64_t start_ts_each, end_ts_each; SaveStateEntry *se; int ret; @@ -1475,6 +1508,8 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) continue; } } + + start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); trace_savevm_section_start(se->idstr, se->section_id); save_section_header(f, se, QEMU_VM_SECTION_END); @@ -1486,8 +1521,13 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) qemu_file_set_error(f, ret); return -1; } + end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id, + end_ts_each - start_ts_each); } + trace_vmstate_downtime_checkpoint("src-iterable-saved"); + return 0; } @@ -1496,6 +1536,7 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, bool inactivate_disks) { MigrationState *ms = migrate_get_current(); + int64_t start_ts_each, end_ts_each; JSONWriter *vmdesc = ms->vmdesc; int vmdesc_len; SaveStateEntry *se; @@ -1507,11 +1548,17 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, continue; } + start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + ret = vmstate_save(f, se, vmdesc); if (ret) { qemu_file_set_error(f, ret); return ret; } + + end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + trace_vmstate_downtime_save("non-iterable", se->idstr, se->instance_id, + end_ts_each - start_ts_each); } if (inactivate_disks) { @@ -1547,6 +1594,8 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, json_writer_free(vmdesc); ms->vmdesc = NULL; + trace_vmstate_downtime_checkpoint("src-non-iterable-saved"); + return 0; } @@ -2088,18 +2137,18 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) Error *local_err = NULL; MigrationIncomingState *mis = opaque; - trace_loadvm_postcopy_handle_run_bh("enter"); + trace_vmstate_downtime_checkpoint("dst-postcopy-bh-enter"); /* TODO we should move all of this lot into postcopy_ram.c or a shared code * in migration.c */ cpu_synchronize_all_post_init(); - trace_loadvm_postcopy_handle_run_bh("after cpu sync"); + trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cpu-synced"); qemu_announce_self(&mis->announce_timer, migrate_announce_params()); - trace_loadvm_postcopy_handle_run_bh("after announce"); + trace_vmstate_downtime_checkpoint("dst-postcopy-bh-announced"); /* Make sure all file formats throw away their mutable metadata. * If we get an error here, just don't restart the VM yet. */ @@ -2110,7 +2159,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) autostart = false; } - trace_loadvm_postcopy_handle_run_bh("after invalidate cache"); + trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated"); dirty_bitmap_mig_before_vm_start(); @@ -2124,7 +2173,7 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) qemu_bh_delete(mis->bh); - trace_loadvm_postcopy_handle_run_bh("return"); + trace_vmstate_downtime_checkpoint("dst-postcopy-bh-vm-started"); } /* After all discards we can start running and asking for pages */ @@ -2505,9 +2554,12 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) } static int -qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) +qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis, + uint8_t type) { + bool trace_downtime = (type == QEMU_VM_SECTION_FULL); uint32_t instance_id, version_id, section_id; + int64_t start_ts, end_ts; SaveStateEntry *se; char idstr[256]; int ret; @@ -2556,12 +2608,23 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) return -EINVAL; } + if (trace_downtime) { + start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + } + ret = vmstate_load(f, se); if (ret < 0) { error_report("error while loading state for instance 0x%"PRIx32" of" " device '%s'", instance_id, idstr); return ret; } + + if (trace_downtime) { + end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + trace_vmstate_downtime_load("non-iterable", se->idstr, + se->instance_id, end_ts - start_ts); + } + if (!check_section_footer(f, se)) { return -EINVAL; } @@ -2570,8 +2633,11 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) } static int -qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) +qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis, + uint8_t type) { + bool trace_downtime = (type == QEMU_VM_SECTION_END); + int64_t start_ts, end_ts; uint32_t section_id; SaveStateEntry *se; int ret; @@ -2596,12 +2662,23 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) return -EINVAL; } + if (trace_downtime) { + start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + } + ret = vmstate_load(f, se); if (ret < 0) { error_report("error while loading state section id %d(%s)", section_id, se->idstr); return ret; } + + if (trace_downtime) { + end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + trace_vmstate_downtime_load("iterable", se->idstr, + se->instance_id, end_ts - start_ts); + } + if (!check_section_footer(f, se)) { return -EINVAL; } @@ -2790,14 +2867,14 @@ retry: switch (section_type) { case QEMU_VM_SECTION_START: case QEMU_VM_SECTION_FULL: - ret = qemu_loadvm_section_start_full(f, mis); + ret = qemu_loadvm_section_start_full(f, mis, section_type); if (ret < 0) { goto out; } break; case QEMU_VM_SECTION_PART: case QEMU_VM_SECTION_END: - ret = qemu_loadvm_section_part_end(f, mis); + ret = qemu_loadvm_section_part_end(f, mis, section_type); if (ret < 0) { goto out; } diff --git a/migration/socket.c b/migration/socket.c index 1b6f5baefb..98e3ea1514 100644 --- a/migration/socket.c +++ b/migration/socket.c @@ -28,6 +28,8 @@ #include "trace.h" #include "postcopy-ram.h" #include "options.h" +#include "qapi/clone-visitor.h" +#include "qapi/qapi-visit-sockets.h" struct SocketOutgoingArgs { SocketAddress *saddr; @@ -108,19 +110,19 @@ out: object_unref(OBJECT(sioc)); } -static void -socket_start_outgoing_migration_internal(MigrationState *s, - SocketAddress *saddr, - Error **errp) +void socket_start_outgoing_migration(MigrationState *s, + SocketAddress *saddr, + Error **errp) { QIOChannelSocket *sioc = qio_channel_socket_new(); struct SocketConnectData *data = g_new0(struct SocketConnectData, 1); + SocketAddress *addr = QAPI_CLONE(SocketAddress, saddr); data->s = s; /* in case previous migration leaked it */ qapi_free_SocketAddress(outgoing_args.saddr); - outgoing_args.saddr = saddr; + outgoing_args.saddr = addr; if (saddr->type == SOCKET_ADDRESS_TYPE_INET) { data->hostname = g_strdup(saddr->u.inet.host); @@ -135,18 +137,6 @@ socket_start_outgoing_migration_internal(MigrationState *s, NULL); } -void socket_start_outgoing_migration(MigrationState *s, - const char *str, - Error **errp) -{ - Error *err = NULL; - SocketAddress *saddr = socket_parse(str, &err); - if (!err) { - socket_start_outgoing_migration_internal(s, saddr, &err); - } - error_propagate(errp, err); -} - static void socket_accept_incoming_migration(QIONetListener *listener, QIOChannelSocket *cioc, gpointer opaque) @@ -172,9 +162,8 @@ socket_incoming_migration_end(void *opaque) object_unref(OBJECT(listener)); } -static void -socket_start_incoming_migration_internal(SocketAddress *saddr, - Error **errp) +void socket_start_incoming_migration(SocketAddress *saddr, + Error **errp) { QIONetListener *listener = qio_net_listener_new(); MigrationIncomingState *mis = migration_incoming_get_current(); @@ -213,13 +202,3 @@ socket_start_incoming_migration_internal(SocketAddress *saddr, } } -void socket_start_incoming_migration(const char *str, Error **errp) -{ - Error *err = NULL; - SocketAddress *saddr = socket_parse(str, &err); - if (!err) { - socket_start_incoming_migration_internal(saddr, &err); - } - qapi_free_SocketAddress(saddr); - error_propagate(errp, err); -} diff --git a/migration/socket.h b/migration/socket.h index dc54df4e6c..5e4c33b8ea 100644 --- a/migration/socket.h +++ b/migration/socket.h @@ -19,13 +19,14 @@ #include "io/channel.h" #include "io/task.h" +#include "qemu/sockets.h" void socket_send_channel_create(QIOTaskFunc f, void *data); QIOChannel *socket_send_channel_create_sync(Error **errp); int socket_send_channel_destroy(QIOChannel *send); -void socket_start_incoming_migration(const char *str, Error **errp); +void socket_start_incoming_migration(SocketAddress *saddr, Error **errp); -void socket_start_outgoing_migration(MigrationState *s, const char *str, - Error **errp); +void socket_start_outgoing_migration(MigrationState *s, + SocketAddress *saddr, Error **errp); #endif diff --git a/migration/trace-events b/migration/trace-events index fa9486dffe..de4a743c8a 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -17,7 +17,6 @@ loadvm_handle_recv_bitmap(char *s) "%s" loadvm_postcopy_handle_advise(void) "" loadvm_postcopy_handle_listen(const char *str) "%s" loadvm_postcopy_handle_run(void) "" -loadvm_postcopy_handle_run_bh(const char *str) "%s" loadvm_postcopy_handle_resume(void) "" loadvm_postcopy_ram_handle_discard(void) "" loadvm_postcopy_ram_handle_discard_end(void) "" @@ -48,6 +47,9 @@ savevm_state_cleanup(void) "" savevm_state_complete_precopy(void) "" vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s" vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s" +vmstate_downtime_save(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64 +vmstate_downtime_load(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64 +vmstate_downtime_checkpoint(const char *checkpoint) "%s" postcopy_pause_incoming(void) "" postcopy_pause_incoming_continued(void) "" postcopy_page_req_sync(void *host_addr) "sync page req %p" @@ -148,8 +150,6 @@ multifd_tls_outgoing_handshake_complete(void *ioc) "ioc=%p" multifd_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostname) "ioc=%p ioctype=%s hostname=%s" # migration.c -await_return_path_close_on_source_close(void) "" -await_return_path_close_on_source_joining(void) "" migrate_set_state(const char *new_state) "new state %s" migrate_fd_cleanup(void) "" migrate_fd_error(const char *error_desc) "error=%s" @@ -166,7 +166,7 @@ migration_completion_postcopy_end_after_complete(void) "" migration_rate_limit_pre(int ms) "%d ms" migration_rate_limit_post(int urgent) "urgent: %d" migration_return_path_end_before(void) "" -migration_return_path_end_after(int rp_error) "%d" +migration_return_path_end_after(void) "" migration_thread_after_loop(void) "" migration_thread_file_err(void) "" migration_thread_setup_complete(void) "" |