diff options
Diffstat (limited to 'migration/migration.c')
| -rw-r--r-- | migration/migration.c | 136 |
1 files changed, 52 insertions, 84 deletions
diff --git a/migration/migration.c b/migration/migration.c index df61ca4e93..2d1da917c7 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -135,6 +135,21 @@ static bool migration_needs_multiple_sockets(void) return migrate_multifd() || migrate_postcopy_preempt(); } +static RunState migration_get_target_runstate(void) +{ + /* + * When the global state is not migrated, it means we don't know the + * runstate of the src QEMU. We don't have much choice but assuming + * the VM is running. NOTE: this is pretty rare case, so far only Xen + * uses it. + */ + if (!global_state_received()) { + return RUN_STATE_RUNNING; + } + + return global_state_get_runstate(); +} + static bool transport_supports_multi_channels(MigrationAddress *addr) { if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { @@ -723,30 +738,10 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, static void process_incoming_migration_bh(void *opaque) { - Error *local_err = NULL; MigrationIncomingState *mis = opaque; trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); - /* If capability late_block_activate is set: - * Only fire up the block code now if we're going to restart the - * VM, else 'cont' will do it. - * This causes file locking to happen; so we don't want it to happen - * unless we really are starting the VM. - */ - if (!migrate_late_block_activate() || - (autostart && (!global_state_received() || - runstate_is_live(global_state_get_runstate())))) { - /* Make sure all file formats throw away their mutable metadata. - * If we get an error here, just don't restart the VM yet. */ - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - local_err = NULL; - autostart = false; - } - } - /* * This must happen after all error conditions are dealt with and * we're sure the VM is going to be running on this host. @@ -759,10 +754,23 @@ static void process_incoming_migration_bh(void *opaque) dirty_bitmap_mig_before_vm_start(); - if (!global_state_received() || - runstate_is_live(global_state_get_runstate())) { + if (runstate_is_live(migration_get_target_runstate())) { if (autostart) { - vm_start(); + /* + * Block activation is always delayed until VM starts, either + * here (which means we need to start the dest VM right now..), + * or until qmp_cont() later. + * + * We used to have cap 'late-block-activate' but now we do this + * unconditionally, as it has no harm but only benefit. E.g., + * it's not part of migration ABI on the time of disk activation. + * + * Make sure all file formats throw away their mutable + * metadata. If error, don't restart the VM yet. + */ + if (migration_block_activate(NULL)) { + vm_start(); + } } else { runstate_set(RUN_STATE_PAUSED); } @@ -1547,16 +1555,6 @@ static void migrate_fd_cancel(MigrationState *s) } } } - if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { - Error *local_err = NULL; - - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - } else { - s->block_inactive = false; - } - } } void migration_add_notifier_mode(NotifierWithReturn *notify, @@ -1840,6 +1838,12 @@ void qmp_migrate_incoming(const char *uri, bool has_channels, return; } + /* + * Newly setup incoming QEMU. Mark the block active state to reflect + * that the src currently owns the disks. + */ + migration_block_active_setup(false); + once = false; } @@ -2492,7 +2496,6 @@ static int postcopy_start(MigrationState *ms, Error **errp) QIOChannelBuffer *bioc; QEMUFile *fb; uint64_t bandwidth = migrate_max_postcopy_bandwidth(); - bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; if (migrate_postcopy_preempt()) { @@ -2528,13 +2531,10 @@ static int postcopy_start(MigrationState *ms, Error **errp) goto fail; } - ret = bdrv_inactivate_all(); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Failed in bdrv_inactivate_all()", - __func__); + if (!migration_block_inactivate()) { + error_setg(errp, "%s: Failed in bdrv_inactivate_all()", __func__); goto fail; } - restart_block = true; /* * Cause any non-postcopiable, but iterative devices to @@ -2604,8 +2604,6 @@ static int postcopy_start(MigrationState *ms, Error **errp) goto fail_closefb; } - restart_block = false; - /* Now send that blob */ if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { error_setg(errp, "%s: Failed to send packaged data", __func__); @@ -2650,17 +2648,7 @@ fail_closefb: fail: migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, MIGRATION_STATUS_FAILED); - if (restart_block) { - /* A failure happened early enough that we know the destination hasn't - * accessed block devices, so we're safe to recover. - */ - Error *local_err = NULL; - - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - } - } + migration_block_activate(NULL); migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); bql_unlock(); return -1; @@ -2729,14 +2717,11 @@ static int migration_completion_precopy(MigrationState *s, goto out_unlock; } - /* - * Inactivate disks except in COLO, and track that we have done so in order - * to remember to reactivate them if migration fails or is cancelled. - */ - s->block_inactive = !migrate_colo(); migration_rate_set(RATE_LIMIT_DISABLED); + + /* Inactivate disks except in COLO */ ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, - s->block_inactive); + !migrate_colo()); out_unlock: bql_unlock(); return ret; @@ -2761,31 +2746,6 @@ static void migration_completion_postcopy(MigrationState *s) trace_migration_completion_postcopy_end_after_complete(); } -static void migration_completion_failed(MigrationState *s, - int current_active_state) -{ - if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || - s->state == MIGRATION_STATUS_DEVICE)) { - /* - * If not doing postcopy, vm_start() will be called: let's - * regain control on images. - */ - Error *local_err = NULL; - - bql_lock(); - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - } else { - s->block_inactive = false; - } - bql_unlock(); - } - - migrate_set_state(&s->state, current_active_state, - MIGRATION_STATUS_FAILED); -} - /** * migration_completion: Used by migration_thread when there's not much left. * The caller 'breaks' the loop when this returns. @@ -2839,7 +2799,8 @@ fail: error_free(local_err); } - migration_completion_failed(s, current_active_state); + migrate_set_state(&s->state, current_active_state, + MIGRATION_STATUS_FAILED); } /** @@ -3269,6 +3230,11 @@ static void migration_iteration_finish(MigrationState *s) case MIGRATION_STATUS_FAILED: case MIGRATION_STATUS_CANCELLED: case MIGRATION_STATUS_CANCELLING: + /* + * Re-activate the block drives if they're inactivated. Note, COLO + * shouldn't use block_active at all, so it should be no-op there. + */ + migration_block_activate(NULL); if (runstate_is_live(s->vm_old_state)) { if (!runstate_check(RUN_STATE_SHUTDOWN)) { vm_start(); @@ -3842,6 +3808,8 @@ static void migration_instance_init(Object *obj) ms->state = MIGRATION_STATUS_NONE; ms->mbps = -1; ms->pages_per_second = -1; + /* Freshly started QEMU owns all the block devices */ + migration_block_active_setup(true); qemu_sem_init(&ms->pause_sem, 0); qemu_mutex_init(&ms->error_mutex); |