diff options
Diffstat (limited to 'block')
| -rw-r--r-- | block/backup.c | 19 | ||||
| -rw-r--r-- | block/bochs.c | 4 | ||||
| -rw-r--r-- | block/cloop.c | 4 | ||||
| -rw-r--r-- | block/commit.c | 1 | ||||
| -rw-r--r-- | block/create.c | 6 | ||||
| -rw-r--r-- | block/curl.c | 133 | ||||
| -rw-r--r-- | block/dmg.c | 4 | ||||
| -rw-r--r-- | block/file-posix.c | 83 | ||||
| -rw-r--r-- | block/io.c | 8 | ||||
| -rw-r--r-- | block/mirror.c | 28 | ||||
| -rw-r--r-- | block/nfs.c | 9 | ||||
| -rw-r--r-- | block/qcow2-cluster.c | 48 | ||||
| -rw-r--r-- | block/qcow2-threads.c | 63 | ||||
| -rw-r--r-- | block/qcow2.c | 15 | ||||
| -rw-r--r-- | block/qcow2.h | 8 | ||||
| -rw-r--r-- | block/stream.c | 1 | ||||
| -rw-r--r-- | block/vpc.c | 3 | ||||
| -rw-r--r-- | block/vvfat.c | 8 |
18 files changed, 172 insertions, 273 deletions
diff --git a/block/backup.c b/block/backup.c index 03637aeb11..763f0d7ff6 100644 --- a/block/backup.c +++ b/block/backup.c @@ -425,21 +425,6 @@ void backup_do_checkpoint(BlockJob *job, Error **errp) bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len); } -static void backup_drain(BlockJob *job) -{ - BackupBlockJob *s = container_of(job, BackupBlockJob, common); - - /* Need to keep a reference in case blk_drain triggers execution - * of backup_complete... - */ - if (s->target) { - BlockBackend *target = s->target; - blk_ref(target); - blk_drain(target); - blk_unref(target); - } -} - static BlockErrorAction backup_error_action(BackupBlockJob *job, bool read, int error) { @@ -588,13 +573,11 @@ static const BlockJobDriver backup_job_driver = { .job_type = JOB_TYPE_BACKUP, .free = block_job_free, .user_resume = block_job_user_resume, - .drain = block_job_drain, .run = backup_run, .commit = backup_commit, .abort = backup_abort, .clean = backup_clean, - }, - .drain = backup_drain, + } }; static int64_t backup_calculate_cluster_size(BlockDriverState *target, diff --git a/block/bochs.c b/block/bochs.c index 962f18592d..32bb83b268 100644 --- a/block/bochs.c +++ b/block/bochs.c @@ -248,8 +248,8 @@ bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector local_qiov; int ret; - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); qemu_iovec_init(&local_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); diff --git a/block/cloop.c b/block/cloop.c index 384c9735bb..4de94876d4 100644 --- a/block/cloop.c +++ b/block/cloop.c @@ -253,8 +253,8 @@ cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, int nb_sectors = bytes >> BDRV_SECTOR_BITS; int ret, i; - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); qemu_co_mutex_lock(&s->lock); diff --git a/block/commit.c b/block/commit.c index 408ae15389..bc8454463d 100644 --- a/block/commit.c +++ b/block/commit.c @@ -216,7 +216,6 @@ static const BlockJobDriver commit_job_driver = { .job_type = JOB_TYPE_COMMIT, .free = block_job_free, .user_resume = block_job_user_resume, - .drain = block_job_drain, .run = commit_run, .prepare = commit_prepare, .abort = commit_abort, diff --git a/block/create.c b/block/create.c index 1bd00ed5f8..89812669df 100644 --- a/block/create.c +++ b/block/create.c @@ -64,9 +64,13 @@ void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options, const char *fmt = BlockdevDriver_str(options->driver); BlockDriver *drv = bdrv_find_format(fmt); + if (!drv) { + error_setg(errp, "Block driver '%s' not found or not supported", fmt); + return; + } + /* If the driver is in the schema, we know that it exists. But it may not * be whitelisted. */ - assert(drv); if (bdrv_uses_whitelist() && !bdrv_is_whitelisted(drv, false)) { error_setg(errp, "Driver is not whitelisted"); return; diff --git a/block/curl.c b/block/curl.c index d4c8e94f3e..f86299378e 100644 --- a/block/curl.c +++ b/block/curl.c @@ -80,6 +80,7 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle, #define CURL_BLOCK_OPT_TIMEOUT_DEFAULT 5 struct BDRVCURLState; +struct CURLState; static bool libcurl_initialized; @@ -97,6 +98,7 @@ typedef struct CURLAIOCB { typedef struct CURLSocket { int fd; + struct CURLState *state; QLIST_ENTRY(CURLSocket) next; } CURLSocket; @@ -137,7 +139,6 @@ typedef struct BDRVCURLState { static void curl_clean_state(CURLState *s); static void curl_multi_do(void *arg); -static void curl_multi_read(void *arg); #ifdef NEED_CURL_TIMER_CALLBACK /* Called from curl_multi_do_locked, with s->mutex held. */ @@ -170,33 +171,29 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, QLIST_FOREACH(socket, &state->sockets, next) { if (socket->fd == fd) { - if (action == CURL_POLL_REMOVE) { - QLIST_REMOVE(socket, next); - g_free(socket); - } break; } } if (!socket) { socket = g_new0(CURLSocket, 1); socket->fd = fd; + socket->state = state; QLIST_INSERT_HEAD(&state->sockets, socket, next); } - socket = NULL; trace_curl_sock_cb(action, (int)fd); switch (action) { case CURL_POLL_IN: aio_set_fd_handler(s->aio_context, fd, false, - curl_multi_read, NULL, NULL, state); + curl_multi_do, NULL, NULL, socket); break; case CURL_POLL_OUT: aio_set_fd_handler(s->aio_context, fd, false, - NULL, curl_multi_do, NULL, state); + NULL, curl_multi_do, NULL, socket); break; case CURL_POLL_INOUT: aio_set_fd_handler(s->aio_context, fd, false, - curl_multi_read, curl_multi_do, NULL, state); + curl_multi_do, curl_multi_do, NULL, socket); break; case CURL_POLL_REMOVE: aio_set_fd_handler(s->aio_context, fd, false, @@ -204,6 +201,11 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, break; } + if (action == CURL_POLL_REMOVE) { + QLIST_REMOVE(socket, next); + g_free(socket); + } + return 0; } @@ -227,7 +229,6 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) { CURLState *s = ((CURLState*)opaque); size_t realsize = size * nmemb; - int i; trace_curl_read_cb(realsize); @@ -243,32 +244,6 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) memcpy(s->orig_buf + s->buf_off, ptr, realsize); s->buf_off += realsize; - for(i=0; i<CURL_NUM_ACB; i++) { - CURLAIOCB *acb = s->acb[i]; - - if (!acb) - continue; - - if ((s->buf_off >= acb->end)) { - size_t request_length = acb->bytes; - - qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start, - acb->end - acb->start); - - if (acb->end - acb->start < request_length) { - size_t offset = acb->end - acb->start; - qemu_iovec_memset(acb->qiov, offset, 0, - request_length - offset); - } - - acb->ret = 0; - s->acb[i] = NULL; - qemu_mutex_unlock(&s->s->mutex); - aio_co_wake(acb->co); - qemu_mutex_lock(&s->s->mutex); - } - } - read_end: /* curl will error out if we do not return this value */ return size * nmemb; @@ -349,13 +324,14 @@ static void curl_multi_check_completion(BDRVCURLState *s) break; if (msg->msg == CURLMSG_DONE) { + int i; CURLState *state = NULL; + bool error = msg->data.result != CURLE_OK; + curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, (char **)&state); - /* ACBs for successful messages get completed in curl_read_cb */ - if (msg->data.result != CURLE_OK) { - int i; + if (error) { static int errcount = 100; /* Don't lose the original error message from curl, since @@ -367,20 +343,35 @@ static void curl_multi_check_completion(BDRVCURLState *s) error_report("curl: further errors suppressed"); } } + } - for (i = 0; i < CURL_NUM_ACB; i++) { - CURLAIOCB *acb = state->acb[i]; + for (i = 0; i < CURL_NUM_ACB; i++) { + CURLAIOCB *acb = state->acb[i]; - if (acb == NULL) { - continue; - } + if (acb == NULL) { + continue; + } + + if (!error) { + /* Assert that we have read all data */ + assert(state->buf_off >= acb->end); + + qemu_iovec_from_buf(acb->qiov, 0, + state->orig_buf + acb->start, + acb->end - acb->start); - acb->ret = -EIO; - state->acb[i] = NULL; - qemu_mutex_unlock(&s->mutex); - aio_co_wake(acb->co); - qemu_mutex_lock(&s->mutex); + if (acb->end - acb->start < acb->bytes) { + size_t offset = acb->end - acb->start; + qemu_iovec_memset(acb->qiov, offset, 0, + acb->bytes - offset); + } } + + acb->ret = error ? -EIO : 0; + state->acb[i] = NULL; + qemu_mutex_unlock(&s->mutex); + aio_co_wake(acb->co); + qemu_mutex_lock(&s->mutex); } curl_clean_state(state); @@ -390,42 +381,30 @@ static void curl_multi_check_completion(BDRVCURLState *s) } /* Called with s->mutex held. */ -static void curl_multi_do_locked(CURLState *s) +static void curl_multi_do_locked(CURLSocket *socket) { - CURLSocket *socket, *next_socket; + BDRVCURLState *s = socket->state->s; int running; int r; - if (!s->s->multi) { + if (!s->multi) { return; } - /* Need to use _SAFE because curl_multi_socket_action() may trigger - * curl_sock_cb() which might modify this list */ - QLIST_FOREACH_SAFE(socket, &s->sockets, next, next_socket) { - do { - r = curl_multi_socket_action(s->s->multi, socket->fd, 0, &running); - } while (r == CURLM_CALL_MULTI_PERFORM); - } + do { + r = curl_multi_socket_action(s->multi, socket->fd, 0, &running); + } while (r == CURLM_CALL_MULTI_PERFORM); } static void curl_multi_do(void *arg) { - CURLState *s = (CURLState *)arg; + CURLSocket *socket = arg; + BDRVCURLState *s = socket->state->s; - qemu_mutex_lock(&s->s->mutex); - curl_multi_do_locked(s); - qemu_mutex_unlock(&s->s->mutex); -} - -static void curl_multi_read(void *arg) -{ - CURLState *s = (CURLState *)arg; - - qemu_mutex_lock(&s->s->mutex); - curl_multi_do_locked(s); - curl_multi_check_completion(s->s); - qemu_mutex_unlock(&s->s->mutex); + qemu_mutex_lock(&s->mutex); + curl_multi_do_locked(socket); + curl_multi_check_completion(s); + qemu_mutex_unlock(&s->mutex); } static void curl_multi_timeout_do(void *arg) @@ -903,7 +882,13 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) trace_curl_setup_preadv(acb->bytes, start, state->range); curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range); - curl_multi_add_handle(s->multi, state->curl); + if (curl_multi_add_handle(s->multi, state->curl) != CURLM_OK) { + state->acb[0] = NULL; + acb->ret = -EIO; + + curl_clean_state(state); + goto out; + } /* Tell curl it needs to kick things off */ curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); diff --git a/block/dmg.c b/block/dmg.c index 45f6b28f17..4a045f2b3e 100644 --- a/block/dmg.c +++ b/block/dmg.c @@ -697,8 +697,8 @@ dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, int nb_sectors = bytes >> BDRV_SECTOR_BITS; int ret, i; - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); qemu_co_mutex_lock(&s->lock); diff --git a/block/file-posix.c b/block/file-posix.c index 87c5a4ccbd..f12c06de2d 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -1459,59 +1459,6 @@ out: } } -#ifdef CONFIG_XFS -static int xfs_write_zeroes(BDRVRawState *s, int64_t offset, uint64_t bytes) -{ - int64_t len; - struct xfs_flock64 fl; - int err; - - len = lseek(s->fd, 0, SEEK_END); - if (len < 0) { - return -errno; - } - - if (offset + bytes > len) { - /* XFS_IOC_ZERO_RANGE does not increase the file length */ - if (ftruncate(s->fd, offset + bytes) < 0) { - return -errno; - } - } - - memset(&fl, 0, sizeof(fl)); - fl.l_whence = SEEK_SET; - fl.l_start = offset; - fl.l_len = bytes; - - if (xfsctl(NULL, s->fd, XFS_IOC_ZERO_RANGE, &fl) < 0) { - err = errno; - trace_file_xfs_write_zeroes(strerror(errno)); - return -err; - } - - return 0; -} - -static int xfs_discard(BDRVRawState *s, int64_t offset, uint64_t bytes) -{ - struct xfs_flock64 fl; - int err; - - memset(&fl, 0, sizeof(fl)); - fl.l_whence = SEEK_SET; - fl.l_start = offset; - fl.l_len = bytes; - - if (xfsctl(NULL, s->fd, XFS_IOC_UNRESVSP64, &fl) < 0) { - err = errno; - trace_file_xfs_discard(strerror(errno)); - return -err; - } - - return 0; -} -#endif - static int translate_err(int err) { if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP || @@ -1555,22 +1502,20 @@ static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb) } while (errno == EINTR); ret = translate_err(-errno); + if (ret == -ENOTSUP) { + s->has_write_zeroes = false; + } } #endif - if (ret == -ENOTSUP) { - s->has_write_zeroes = false; - } return ret; } static int handle_aiocb_write_zeroes(void *opaque) { RawPosixAIOData *aiocb = opaque; -#if defined(CONFIG_FALLOCATE) || defined(CONFIG_XFS) - BDRVRawState *s = aiocb->bs->opaque; -#endif #ifdef CONFIG_FALLOCATE + BDRVRawState *s = aiocb->bs->opaque; int64_t len; #endif @@ -1578,12 +1523,6 @@ static int handle_aiocb_write_zeroes(void *opaque) return handle_aiocb_write_zeroes_block(aiocb); } -#ifdef CONFIG_XFS - if (s->is_xfs) { - return xfs_write_zeroes(s, aiocb->aio_offset, aiocb->aio_nbytes); - } -#endif - #ifdef CONFIG_FALLOCATE_ZERO_RANGE if (s->has_write_zeroes) { int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, @@ -1653,14 +1592,6 @@ static int handle_aiocb_write_zeroes_unmap(void *opaque) } #endif -#ifdef CONFIG_XFS - if (s->is_xfs) { - /* xfs_discard() guarantees that the discarded area reads as all-zero - * afterwards, so we can use it here. */ - return xfs_discard(s, aiocb->aio_offset, aiocb->aio_nbytes); - } -#endif - /* If we couldn't manage to unmap while guaranteed that the area reads as * all-zero afterwards, just write zeroes without unmapping */ ret = handle_aiocb_write_zeroes(aiocb); @@ -1737,12 +1668,6 @@ static int handle_aiocb_discard(void *opaque) ret = -errno; #endif } else { -#ifdef CONFIG_XFS - if (s->is_xfs) { - return xfs_discard(s, aiocb->aio_offset, aiocb->aio_nbytes); - } -#endif - #ifdef CONFIG_FALLOCATE_PUNCH_HOLE ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, aiocb->aio_offset, aiocb->aio_nbytes); diff --git a/block/io.c b/block/io.c index 16a598fd08..f8c3596131 100644 --- a/block/io.c +++ b/block/io.c @@ -1097,8 +1097,8 @@ static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, sector_num = offset >> BDRV_SECTOR_BITS; nb_sectors = bytes >> BDRV_SECTOR_BITS; - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); assert(bytes <= BDRV_REQUEST_MAX_BYTES); assert(drv->bdrv_co_readv); @@ -1171,8 +1171,8 @@ static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, sector_num = offset >> BDRV_SECTOR_BITS; nb_sectors = bytes >> BDRV_SECTOR_BITS; - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); assert(bytes <= BDRV_REQUEST_MAX_BYTES); assert(drv->bdrv_co_writev); diff --git a/block/mirror.c b/block/mirror.c index 853e2c7510..fe984efb90 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -646,14 +646,11 @@ static int mirror_exit_common(Job *job) bdrv_ref(mirror_top_bs); bdrv_ref(target_bs); - /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before + /* + * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before * inserting target_bs at s->to_replace, where we might not be able to get * these permissions. - * - * Note that blk_unref() alone doesn't necessarily drop permissions because - * we might be running nested inside mirror_drain(), which takes an extra - * reference, so use an explicit blk_set_perm() first. */ - blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort); + */ blk_unref(s->target); s->target = NULL; @@ -1149,28 +1146,12 @@ static bool mirror_drained_poll(BlockJob *job) return !!s->in_flight; } -static void mirror_drain(BlockJob *job) -{ - MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); - - /* Need to keep a reference in case blk_drain triggers execution - * of mirror_complete... - */ - if (s->target) { - BlockBackend *target = s->target; - blk_ref(target); - blk_drain(target); - blk_unref(target); - } -} - static const BlockJobDriver mirror_job_driver = { .job_driver = { .instance_size = sizeof(MirrorBlockJob), .job_type = JOB_TYPE_MIRROR, .free = block_job_free, .user_resume = block_job_user_resume, - .drain = block_job_drain, .run = mirror_run, .prepare = mirror_prepare, .abort = mirror_abort, @@ -1178,7 +1159,6 @@ static const BlockJobDriver mirror_job_driver = { .complete = mirror_complete, }, .drained_poll = mirror_drained_poll, - .drain = mirror_drain, }; static const BlockJobDriver commit_active_job_driver = { @@ -1187,7 +1167,6 @@ static const BlockJobDriver commit_active_job_driver = { .job_type = JOB_TYPE_COMMIT, .free = block_job_free, .user_resume = block_job_user_resume, - .drain = block_job_drain, .run = mirror_run, .prepare = mirror_prepare, .abort = mirror_abort, @@ -1195,7 +1174,6 @@ static const BlockJobDriver commit_active_job_driver = { .complete = mirror_complete, }, .drained_poll = mirror_drained_poll, - .drain = mirror_drain, }; static void coroutine_fn diff --git a/block/nfs.c b/block/nfs.c index 0ec50953e4..f39acfdb28 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -390,12 +390,17 @@ static void nfs_attach_aio_context(BlockDriverState *bs, static void nfs_client_close(NFSClient *client) { if (client->context) { + qemu_mutex_lock(&client->mutex); + aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), + false, NULL, NULL, NULL, NULL); + qemu_mutex_unlock(&client->mutex); if (client->fh) { nfs_close(client->context, client->fh); client->fh = NULL; } - aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), - false, NULL, NULL, NULL, NULL); +#ifdef LIBNFS_FEATURE_UMOUNT + nfs_umount(client->context); +#endif nfs_destroy_context(client->context); client->context = NULL; } diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c index f09cc992af..8d5fa1539c 100644 --- a/block/qcow2-cluster.c +++ b/block/qcow2-cluster.c @@ -462,27 +462,6 @@ static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, return 0; } -static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, - uint64_t src_cluster_offset, - uint64_t cluster_offset, - unsigned offset_in_cluster, - uint8_t *buffer, - unsigned bytes) -{ - if (bytes && bs->encrypted) { - BDRVQcow2State *s = bs->opaque; - assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); - assert((bytes & ~BDRV_SECTOR_MASK) == 0); - assert(s->crypto); - if (qcow2_co_encrypt(bs, cluster_offset, - src_cluster_offset + offset_in_cluster, - buffer, bytes) < 0) { - return false; - } - } - return true; -} - static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, uint64_t cluster_offset, unsigned offset_in_cluster, @@ -890,12 +869,19 @@ static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) /* Encrypt the data if necessary before writing it */ if (bs->encrypted) { - if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, - start->offset, start_buffer, - start->nb_bytes) || - !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, - end->offset, end_buffer, end->nb_bytes)) { - ret = -EIO; + ret = qcow2_co_encrypt(bs, + m->alloc_offset + start->offset, + m->offset + start->offset, + start_buffer, start->nb_bytes); + if (ret < 0) { + goto fail; + } + + ret = qcow2_co_encrypt(bs, + m->alloc_offset + end->offset, + m->offset + end->offset, + end_buffer, end->nb_bytes); + if (ret < 0) { goto fail; } } @@ -1351,13 +1337,7 @@ static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, } entry = be64_to_cpu(l2_slice[l2_index]); - - /* For the moment, overwrite compressed clusters one by one */ - if (entry & QCOW_OFLAG_COMPRESSED) { - nb_clusters = 1; - } else { - nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index); - } + nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index); /* This function is only called when there were no non-COW clusters, so if * we can't find any unallocated or COW clusters either, something is diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c index 3b1e63fe41..8f5a0d1ebe 100644 --- a/block/qcow2-threads.c +++ b/block/qcow2-threads.c @@ -234,35 +234,70 @@ static int qcow2_encdec_pool_func(void *opaque) } static int coroutine_fn -qcow2_co_encdec(BlockDriverState *bs, uint64_t file_cluster_offset, - uint64_t offset, void *buf, size_t len, Qcow2EncDecFunc func) +qcow2_co_encdec(BlockDriverState *bs, uint64_t host_offset, + uint64_t guest_offset, void *buf, size_t len, + Qcow2EncDecFunc func) { BDRVQcow2State *s = bs->opaque; Qcow2EncDecData arg = { .block = s->crypto, - .offset = s->crypt_physical_offset ? - file_cluster_offset + offset_into_cluster(s, offset) : - offset, + .offset = s->crypt_physical_offset ? host_offset : guest_offset, .buf = buf, .len = len, .func = func, }; - return qcow2_co_process(bs, qcow2_encdec_pool_func, &arg); + assert(QEMU_IS_ALIGNED(guest_offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(host_offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(len, BDRV_SECTOR_SIZE)); + assert(s->crypto); + + return len == 0 ? 0 : qcow2_co_process(bs, qcow2_encdec_pool_func, &arg); } +/* + * qcow2_co_encrypt() + * + * Encrypts one or more contiguous aligned sectors + * + * @host_offset - underlying storage offset of the first sector of the + * data to be encrypted + * + * @guest_offset - guest (virtual) offset of the first sector of the + * data to be encrypted + * + * @buf - buffer with the data to encrypt, that after encryption + * will be written to the underlying storage device at + * @host_offset + * + * @len - length of the buffer (must be a BDRV_SECTOR_SIZE multiple) + * + * Depending on the encryption method, @host_offset and/or @guest_offset + * may be used for generating the initialization vector for + * encryption. + * + * Note that while the whole range must be aligned on sectors, it + * does not have to be aligned on clusters and can also cross cluster + * boundaries + */ int coroutine_fn -qcow2_co_encrypt(BlockDriverState *bs, uint64_t file_cluster_offset, - uint64_t offset, void *buf, size_t len) +qcow2_co_encrypt(BlockDriverState *bs, uint64_t host_offset, + uint64_t guest_offset, void *buf, size_t len) { - return qcow2_co_encdec(bs, file_cluster_offset, offset, buf, len, - qcrypto_block_encrypt); + return qcow2_co_encdec(bs, host_offset, guest_offset, buf, len, + qcrypto_block_encrypt); } +/* + * qcow2_co_decrypt() + * + * Decrypts one or more contiguous aligned sectors + * Similar to qcow2_co_encrypt + */ int coroutine_fn -qcow2_co_decrypt(BlockDriverState *bs, uint64_t file_cluster_offset, - uint64_t offset, void *buf, size_t len) +qcow2_co_decrypt(BlockDriverState *bs, uint64_t host_offset, + uint64_t guest_offset, void *buf, size_t len) { - return qcow2_co_encdec(bs, file_cluster_offset, offset, buf, len, - qcrypto_block_decrypt); + return qcow2_co_encdec(bs, host_offset, guest_offset, buf, len, + qcrypto_block_decrypt); } diff --git a/block/qcow2.c b/block/qcow2.c index 0882ff6e92..4d16393e61 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -828,7 +828,11 @@ static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, bool l2_cache_entry_size_set; int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; - uint64_t max_l2_cache = virtual_disk_size / (s->cluster_size / 8); + uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); + /* An L2 table is always one cluster in size so the max cache size + * should be a multiple of the cluster size. */ + uint64_t max_l2_cache = ROUND_UP(max_l2_entries * sizeof(uint64_t), + s->cluster_size); combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); @@ -2063,9 +2067,10 @@ static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs, goto fail; } - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); - if (qcow2_co_decrypt(bs, cluster_offset, offset, + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(cur_bytes, BDRV_SECTOR_SIZE)); + if (qcow2_co_decrypt(bs, cluster_offset + offset_in_cluster, + offset, cluster_data, cur_bytes) < 0) { ret = -EIO; goto fail; @@ -2284,7 +2289,7 @@ static coroutine_fn int qcow2_co_pwritev_part( qemu_iovec_to_buf(qiov, qiov_offset + bytes_done, cluster_data, cur_bytes); - if (qcow2_co_encrypt(bs, cluster_offset, offset, + if (qcow2_co_encrypt(bs, cluster_offset + offset_in_cluster, offset, cluster_data, cur_bytes) < 0) { ret = -EIO; goto out_unlocked; diff --git a/block/qcow2.h b/block/qcow2.h index 998bcdaef1..a488d761ff 100644 --- a/block/qcow2.h +++ b/block/qcow2.h @@ -758,10 +758,10 @@ ssize_t coroutine_fn qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size, const void *src, size_t src_size); int coroutine_fn -qcow2_co_encrypt(BlockDriverState *bs, uint64_t file_cluster_offset, - uint64_t offset, void *buf, size_t len); +qcow2_co_encrypt(BlockDriverState *bs, uint64_t host_offset, + uint64_t guest_offset, void *buf, size_t len); int coroutine_fn -qcow2_co_decrypt(BlockDriverState *bs, uint64_t file_cluster_offset, - uint64_t offset, void *buf, size_t len); +qcow2_co_decrypt(BlockDriverState *bs, uint64_t host_offset, + uint64_t guest_offset, void *buf, size_t len); #endif diff --git a/block/stream.c b/block/stream.c index 0d3a6ac7c3..5562ccbf57 100644 --- a/block/stream.c +++ b/block/stream.c @@ -212,7 +212,6 @@ static const BlockJobDriver stream_job_driver = { .abort = stream_abort, .clean = stream_clean, .user_resume = block_job_user_resume, - .drain = block_job_drain, }, }; diff --git a/block/vpc.c b/block/vpc.c index b25aab0425..5cd3890780 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -885,6 +885,7 @@ static int create_dynamic_disk(BlockBackend *blk, uint8_t *buf, goto fail; } + ret = 0; fail: return ret; } @@ -908,7 +909,7 @@ static int create_fixed_disk(BlockBackend *blk, uint8_t *buf, return ret; } - return ret; + return 0; } static int calculate_rounded_image_size(BlockdevCreateOptionsVpc *vpc_opts, diff --git a/block/vvfat.c b/block/vvfat.c index f6c28805dd..019b8f1341 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -1547,8 +1547,8 @@ vvfat_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, int nb_sectors = bytes >> BDRV_SECTOR_BITS; void *buf; - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); buf = g_try_malloc(bytes); if (bytes && buf == NULL) { @@ -3082,8 +3082,8 @@ vvfat_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, int nb_sectors = bytes >> BDRV_SECTOR_BITS; void *buf; - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); + assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); buf = g_try_malloc(bytes); if (bytes && buf == NULL) { |