diff options
Diffstat (limited to 'block/mirror.c')
| -rw-r--r-- | block/mirror.c | 88 |
1 files changed, 63 insertions, 25 deletions
diff --git a/block/mirror.c b/block/mirror.c index c839542774..2096fade90 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -479,7 +479,7 @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, return bytes_handled; } -static void coroutine_fn mirror_iteration(MirrorBlockJob *s) +static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s) { BlockDriverState *source = s->mirror_top_bs->backing->bs; MirrorOp *pseudo_op; @@ -678,6 +678,7 @@ static int mirror_exit_common(Job *job) s->prepared = true; aio_context_acquire(qemu_get_aio_context()); + bdrv_graph_rdlock_main_loop(); mirror_top_bs = s->mirror_top_bs; bs_opaque = mirror_top_bs->opaque; @@ -696,6 +697,8 @@ static int mirror_exit_common(Job *job) bdrv_ref(mirror_top_bs); bdrv_ref(target_bs); + bdrv_graph_rdunlock_main_loop(); + /* * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before * inserting target_bs at s->to_replace, where we might not be able to get @@ -709,12 +712,12 @@ static int mirror_exit_common(Job *job) * these permissions any more means that we can't allow any new requests on * mirror_top_bs from now on, so keep it drained. */ bdrv_drained_begin(mirror_top_bs); + bdrv_drained_begin(target_bs); bs_opaque->stop = true; bdrv_graph_rdlock_main_loop(); bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, &error_abort); - bdrv_graph_rdunlock_main_loop(); if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { BlockDriverState *backing = s->is_none_mode ? src : s->base; @@ -737,6 +740,7 @@ static int mirror_exit_common(Job *job) local_err = NULL; } } + bdrv_graph_rdunlock_main_loop(); if (s->to_replace) { replace_aio_context = bdrv_get_aio_context(s->to_replace); @@ -754,15 +758,13 @@ static int mirror_exit_common(Job *job) /* The mirror job has no requests in flight any more, but we need to * drain potential other users of the BDS before changing the graph. */ assert(s->in_drain); - bdrv_drained_begin(target_bs); + bdrv_drained_begin(to_replace); /* * Cannot use check_to_replace_node() here, because that would * check for an op blocker on @to_replace, and we have our own * there. - * - * TODO Pull out the writer lock from bdrv_replace_node() to here */ - bdrv_graph_rdlock_main_loop(); + bdrv_graph_wrlock(target_bs); if (bdrv_recurse_can_replace(src, to_replace)) { bdrv_replace_node(to_replace, target_bs, &local_err); } else { @@ -771,8 +773,8 @@ static int mirror_exit_common(Job *job) "would not lead to an abrupt change of visible data", to_replace->node_name, target_bs->node_name); } - bdrv_graph_rdunlock_main_loop(); - bdrv_drained_end(target_bs); + bdrv_graph_wrunlock(); + bdrv_drained_end(to_replace); if (local_err) { error_report_err(local_err); ret = -EPERM; @@ -787,7 +789,6 @@ static int mirror_exit_common(Job *job) aio_context_release(replace_aio_context); } g_free(s->replaces); - bdrv_unref(target_bs); /* * Remove the mirror filter driver from the graph. Before this, get rid of @@ -795,7 +796,12 @@ static int mirror_exit_common(Job *job) * valid. */ block_job_remove_all_bdrv(bjob); + bdrv_graph_wrlock(mirror_top_bs); bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); + bdrv_graph_wrunlock(); + + bdrv_drained_end(target_bs); + bdrv_unref(target_bs); bs_opaque->job = NULL; @@ -833,14 +839,18 @@ static void coroutine_fn mirror_throttle(MirrorBlockJob *s) } } -static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) +static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s) { int64_t offset; - BlockDriverState *bs = s->mirror_top_bs->backing->bs; + BlockDriverState *bs; BlockDriverState *target_bs = blk_bs(s->target); int ret; int64_t count; + bdrv_graph_co_rdlock(); + bs = s->mirror_top_bs->backing->bs; + bdrv_graph_co_rdunlock(); + if (s->zero_target) { if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); @@ -920,7 +930,7 @@ static int coroutine_fn mirror_flush(MirrorBlockJob *s) static int coroutine_fn mirror_run(Job *job, Error **errp) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); - BlockDriverState *bs = s->mirror_top_bs->backing->bs; + BlockDriverState *bs; MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque; BlockDriverState *target_bs = blk_bs(s->target); bool need_drain = true; @@ -932,6 +942,10 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) checking for a NULL string */ int ret = 0; + bdrv_graph_co_rdlock(); + bs = bdrv_filter_bs(s->mirror_top_bs); + bdrv_graph_co_rdunlock(); + if (job_is_cancelled(&s->common.job)) { goto immediate_exit; } @@ -992,13 +1006,13 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) } else { s->target_cluster_size = BDRV_SECTOR_SIZE; } - bdrv_graph_co_rdunlock(); if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) && s->granularity < s->target_cluster_size) { s->buf_size = MAX(s->buf_size, s->target_cluster_size); s->cow_bitmap = bitmap_new(length); } s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); + bdrv_graph_co_rdunlock(); s->buf = qemu_try_blockalign(bs, s->buf_size); if (s->buf == NULL) { @@ -1064,7 +1078,9 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) mirror_wait_for_free_in_flight_slot(s); continue; } else if (cnt != 0) { + bdrv_graph_co_rdlock(); mirror_iteration(s); + bdrv_graph_co_rdunlock(); } } @@ -1634,7 +1650,7 @@ bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) offset, bytes, NULL, 0); } -static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) +static void GRAPH_RDLOCK bdrv_mirror_top_refresh_filename(BlockDriverState *bs) { if (bs->backing == NULL) { /* we can be here after failed bdrv_attach_child in @@ -1744,12 +1760,15 @@ static BlockJob *mirror_start_job( buf_size = DEFAULT_MIRROR_BUF_SIZE; } + bdrv_graph_rdlock_main_loop(); if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) { error_setg(errp, "Can't mirror node into itself"); + bdrv_graph_rdunlock_main_loop(); return NULL; } target_is_backing = bdrv_chain_contains(bs, target); + bdrv_graph_rdunlock_main_loop(); /* In the case of active commit, add dummy driver to provide consistent * reads on the top, while disabling it in the intermediate nodes, and make @@ -1832,14 +1851,19 @@ static BlockJob *mirror_start_job( } target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE; - } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { - /* - * We may want to allow this in the future, but it would - * require taking some extra care. - */ - error_setg(errp, "Cannot mirror to a filter on top of a node in the " - "source's backing chain"); - goto fail; + } else { + bdrv_graph_rdlock_main_loop(); + if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { + /* + * We may want to allow this in the future, but it would + * require taking some extra care. + */ + error_setg(errp, "Cannot mirror to a filter on top of a node in " + "the source's backing chain"); + bdrv_graph_rdunlock_main_loop(); + goto fail; + } + bdrv_graph_rdunlock_main_loop(); } s->target = blk_new(s->common.job.aio_context, @@ -1860,6 +1884,7 @@ static BlockJob *mirror_start_job( blk_set_allow_aio_context_change(s->target, true); blk_set_disable_request_queuing(s->target, true); + bdrv_graph_rdlock_main_loop(); s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; @@ -1875,6 +1900,7 @@ static BlockJob *mirror_start_job( if (auto_complete) { s->should_complete = true; } + bdrv_graph_rdunlock_main_loop(); s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity, NULL, errp); @@ -1888,11 +1914,13 @@ static BlockJob *mirror_start_job( */ bdrv_disable_dirty_bitmap(s->dirty_bitmap); + bdrv_graph_wrlock(bs); ret = block_job_add_bdrv(&s->common, "source", bs, 0, BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | BLK_PERM_CONSISTENT_READ, errp); if (ret < 0) { + bdrv_graph_wrunlock(); goto fail; } @@ -1937,14 +1965,17 @@ static BlockJob *mirror_start_job( ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, iter_shared_perms, errp); if (ret < 0) { + bdrv_graph_wrunlock(); goto fail; } } if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { + bdrv_graph_wrunlock(); goto fail; } } + bdrv_graph_wrunlock(); QTAILQ_INIT(&s->ops_in_flight); @@ -1969,11 +2000,14 @@ fail: } bs_opaque->stop = true; - bdrv_graph_rdlock_main_loop(); + bdrv_drained_begin(bs); + bdrv_graph_wrlock(bs); + assert(mirror_top_bs->backing->bs == bs); bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, &error_abort); - bdrv_graph_rdunlock_main_loop(); - bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); + bdrv_replace_node(mirror_top_bs, bs, &error_abort); + bdrv_graph_wrunlock(); + bdrv_drained_end(bs); bdrv_unref(mirror_top_bs); @@ -2002,8 +2036,12 @@ void mirror_start(const char *job_id, BlockDriverState *bs, MirrorSyncMode_str(mode)); return; } + + bdrv_graph_rdlock_main_loop(); is_none_mode = mode == MIRROR_SYNC_MODE_NONE; base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; + bdrv_graph_rdunlock_main_loop(); + mirror_start_job(job_id, bs, creation_flags, target, replaces, speed, granularity, buf_size, backing_mode, zero_target, on_source_error, on_target_error, unmap, NULL, NULL, |