summary refs log tree commit diff stats
path: root/migration
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-05-09 10:11:09 +0200
committerRichard Henderson <richard.henderson@linaro.org>2024-05-09 10:11:09 +0200
commita016dd50052924d0de255e31b9b9f6a188ebc08b (patch)
tree0a2c31b7ada89cfa1b1959d6593edd61094aa637 /migration
parent36fa7c686e9eac490002ffc439c4affaa352c17c (diff)
parentdb8cb7b6e73690233bc7c6abbb90979af3a18143 (diff)
downloadfocaccia-qemu-a016dd50052924d0de255e31b9b9f6a188ebc08b.tar.gz
focaccia-qemu-a016dd50052924d0de255e31b9b9f6a188ebc08b.zip
Merge tag 'migration-20240508-pull-request' of https://gitlab.com/farosas/qemu into staging
Migration pull request

- Will's WITH_QEMU_LOCK_GUARD cleanup
- Vladimir's new exit-on-error parameter
- Fabiano's removals and deprecations series
  (block migration and non-multifd compression removed)
- Peter's documentation fix for HMP migrate command

v2:
- updated Peter's documentation fix.

# -----BEGIN PGP SIGNATURE-----
#
# iQJEBAABCAAuFiEEqhtIsKIjJqWkw2TPx5jcdBvsMZ0FAmY7934QHGZhcm9zYXNA
# c3VzZS5kZQAKCRDHmNx0G+wxnXynEADHjRa7HqwuYPhft3wGgLiFbCyQNFpNrjM9
# prQSiLlYt9gRlE4c9ZavCxR28xtOrK2oFhCnLMXaIEkct6JuylfiwCPwPuxNQP9+
# EZirECf1yKkyt+RV/LfIx3R/prJgoH5XWhpna+WIBFFo2qSorHTAzjb5dKYZDjkB
# EjfN8R9goVH6aCPd4SyiCUUNxuR6/0si9AxfhUgUvUXyLZmE1ztZEoWI02FCYzVj
# kKDdVK2+Z1Rlv88tyY4/E6z4pwYLWx5EiXSFv0NXIpTdyO3dM+jeAHxcN7KmQ1+5
# GvX0n+mFYOzRIbRfAnhSZbkez/nuPcbJ76phzSYDs8f/7YtOpuOFKFw7yuGrl5N5
# ZqXo5MOOGliF2wozTjacsOrUhB+MbSb0iA71T7aAdBC2s4H9+XIWfoN/OZfsBhAW
# r2i1gSytVLQqsip7A0CFF+DqeSse9QHHlH8vfb8NUn1Tp0o2QfsX+/7LHlvl/2eJ
# EP/zmjD6c/8vjB3fTKZr52h2lEO/36xmX+OtZpep3EBvvl1BY1LP4nBNOW1vQM/b
# fzcq+agaikwS5gI2QSOC9HJ3aX6q416+wZEm3rQ8XRGSPDFfLPKM/GPPfWdj6ngb
# +e3EZPrs+3dOeH1kly5xVMGXGUof+VVBmVwdv4C+XNMM8fRZOxoqd0SD8dz/vOC7
# nSGztXUPqw==
# =5T+K
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 09 May 2024 12:06:54 AM CEST
# gpg:                using RSA key AA1B48B0A22326A5A4C364CFC798DC741BEC319D
# gpg:                issuer "farosas@suse.de"
# gpg: Good signature from "Fabiano Rosas <farosas@suse.de>" [unknown]
# gpg:                 aka "Fabiano Almeida Rosas <fabiano.rosas@suse.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: AA1B 48B0 A223 26A5 A4C3  64CF C798 DC74 1BEC 319D

* tag 'migration-20240508-pull-request' of https://gitlab.com/farosas/qemu:
  hmp/migration: Fix "migrate" command's documentation
  migration: Deprecate fd: for file migration
  migration: Remove non-multifd compression
  migration: Remove block migration
  migration: Remove 'blk/-b' option from migrate commands
  migration: Remove 'inc' option from migrate command
  migration: Remove 'skipped' field from MigrationStats
  qapi: introduce exit-on-error parameter for migrate-incoming
  migration: process_incoming_migration_co(): rework error reporting
  migration: process_incoming_migration_co(): fix reporting s->error
  migration: process_incoming_migration_co(): complete cleanup on failure
  migration: move trace-point from migrate_fd_error to migrate_set_error
  migration/ram.c: API Conversion qemu_mutex_lock(), and qemu_mutex_unlock() to WITH_QEMU_LOCK_GUARD macro

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'migration')
-rw-r--r--migration/block.c1019
-rw-r--r--migration/block.h52
-rw-r--r--migration/colo.c1
-rw-r--r--migration/fd.c12
-rw-r--r--migration/meson.build4
-rw-r--r--migration/migration-hmp-cmds.c99
-rw-r--r--migration/migration.c121
-rw-r--r--migration/migration.h14
-rw-r--r--migration/options.c229
-rw-r--r--migration/options.h13
-rw-r--r--migration/qemu-file.c78
-rw-r--r--migration/qemu-file.h4
-rw-r--r--migration/ram-compress.c564
-rw-r--r--migration/ram-compress.h77
-rw-r--r--migration/ram.c181
-rw-r--r--migration/savevm.c5
-rw-r--r--migration/trace-events2
17 files changed, 74 insertions, 2401 deletions
diff --git a/migration/block.c b/migration/block.c
deleted file mode 100644
index bae6e94891..0000000000
--- a/migration/block.c
+++ /dev/null
@@ -1,1019 +0,0 @@
-/*
- * QEMU live block migration
- *
- * Copyright IBM, Corp. 2009
- *
- * Authors:
- *  Liran Schour   <lirans@il.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu/error-report.h"
-#include "qemu/main-loop.h"
-#include "qemu/cutils.h"
-#include "qemu/queue.h"
-#include "block.h"
-#include "block/dirty-bitmap.h"
-#include "migration/misc.h"
-#include "migration.h"
-#include "migration-stats.h"
-#include "migration/register.h"
-#include "qemu-file.h"
-#include "migration/vmstate.h"
-#include "sysemu/block-backend.h"
-#include "trace.h"
-#include "options.h"
-
-#define BLK_MIG_BLOCK_SIZE           (1ULL << 20)
-#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS)
-
-#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
-#define BLK_MIG_FLAG_EOS                0x02
-#define BLK_MIG_FLAG_PROGRESS           0x04
-#define BLK_MIG_FLAG_ZERO_BLOCK         0x08
-
-#define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
-
-#define MAX_IO_BUFFERS 512
-#define MAX_PARALLEL_IO 16
-
-typedef struct BlkMigDevState {
-    /* Written during setup phase.  Can be read without a lock.  */
-    BlockBackend *blk;
-    char *blk_name;
-    int shared_base;
-    int64_t total_sectors;
-    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
-    Error *blocker;
-
-    /* Only used by migration thread.  Does not need a lock.  */
-    int bulk_completed;
-    int64_t cur_sector;
-    int64_t cur_dirty;
-
-    /* Data in the aio_bitmap is protected by block migration lock.
-     * Allocation and free happen during setup and cleanup respectively.
-     */
-    unsigned long *aio_bitmap;
-
-    /* Protected by block migration lock.  */
-    int64_t completed_sectors;
-
-    /* During migration this is protected by bdrv_dirty_bitmap_lock().
-     * Allocation and free happen during setup and cleanup respectively.
-     */
-    BdrvDirtyBitmap *dirty_bitmap;
-} BlkMigDevState;
-
-typedef struct BlkMigBlock {
-    /* Only used by migration thread.  */
-    uint8_t *buf;
-    BlkMigDevState *bmds;
-    int64_t sector;
-    int nr_sectors;
-    QEMUIOVector qiov;
-    BlockAIOCB *aiocb;
-
-    /* Protected by block migration lock.  */
-    int ret;
-    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
-} BlkMigBlock;
-
-typedef struct BlkMigState {
-    QSIMPLEQ_HEAD(, BlkMigDevState) bmds_list;
-    int64_t total_sector_sum;
-    bool zero_blocks;
-
-    /* Protected by lock.  */
-    QSIMPLEQ_HEAD(, BlkMigBlock) blk_list;
-    int submitted;
-    int read_done;
-
-    /* Only used by migration thread.  Does not need a lock.  */
-    int transferred;
-    int prev_progress;
-    int bulk_completed;
-
-    /* Lock must be taken _inside_ the BQL.  */
-    QemuMutex lock;
-} BlkMigState;
-
-static BlkMigState block_mig_state;
-
-static void blk_mig_lock(void)
-{
-    qemu_mutex_lock(&block_mig_state.lock);
-}
-
-static void blk_mig_unlock(void)
-{
-    qemu_mutex_unlock(&block_mig_state.lock);
-}
-
-/* Must run outside of the BQL during the bulk phase,
- * or the VM will stall.
- */
-
-static void blk_send(QEMUFile *f, BlkMigBlock * blk)
-{
-    int len;
-    uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
-
-    if (block_mig_state.zero_blocks &&
-        buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) {
-        flags |= BLK_MIG_FLAG_ZERO_BLOCK;
-    }
-
-    /* sector number and flags */
-    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
-                     | flags);
-
-    /* device name */
-    len = strlen(blk->bmds->blk_name);
-    qemu_put_byte(f, len);
-    qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
-
-    /* if a block is zero we need to flush here since the network
-     * bandwidth is now a lot higher than the storage device bandwidth.
-     * thus if we queue zero blocks we slow down the migration */
-    if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
-        qemu_fflush(f);
-        return;
-    }
-
-    qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE);
-}
-
-int blk_mig_active(void)
-{
-    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
-}
-
-int blk_mig_bulk_active(void)
-{
-    return blk_mig_active() && !block_mig_state.bulk_completed;
-}
-
-uint64_t blk_mig_bytes_transferred(void)
-{
-    BlkMigDevState *bmds;
-    uint64_t sum = 0;
-
-    blk_mig_lock();
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        sum += bmds->completed_sectors;
-    }
-    blk_mig_unlock();
-    return sum << BDRV_SECTOR_BITS;
-}
-
-uint64_t blk_mig_bytes_remaining(void)
-{
-    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
-}
-
-uint64_t blk_mig_bytes_total(void)
-{
-    BlkMigDevState *bmds;
-    uint64_t sum = 0;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        sum += bmds->total_sectors;
-    }
-    return sum << BDRV_SECTOR_BITS;
-}
-
-
-/* Called with migration lock held.  */
-
-static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
-{
-    int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
-
-    if (sector < bmds->total_sectors) {
-        return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
-            (1UL << (chunk % (sizeof(unsigned long) * 8))));
-    } else {
-        return 0;
-    }
-}
-
-/* Called with migration lock held.  */
-
-static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
-                             int nb_sectors, int set)
-{
-    int64_t start, end;
-    unsigned long val, idx, bit;
-
-    start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
-    end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
-
-    for (; start <= end; start++) {
-        idx = start / (sizeof(unsigned long) * 8);
-        bit = start % (sizeof(unsigned long) * 8);
-        val = bmds->aio_bitmap[idx];
-        if (set) {
-            val |= 1UL << bit;
-        } else {
-            val &= ~(1UL << bit);
-        }
-        bmds->aio_bitmap[idx] = val;
-    }
-}
-
-static void alloc_aio_bitmap(BlkMigDevState *bmds)
-{
-    int64_t bitmap_size;
-
-    bitmap_size = bmds->total_sectors + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
-    bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
-
-    bmds->aio_bitmap = g_malloc0(bitmap_size);
-}
-
-/* Never hold migration lock when yielding to the main loop!  */
-
-static void blk_mig_read_cb(void *opaque, int ret)
-{
-    BlkMigBlock *blk = opaque;
-
-    blk_mig_lock();
-    blk->ret = ret;
-
-    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
-    bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
-
-    block_mig_state.submitted--;
-    block_mig_state.read_done++;
-    assert(block_mig_state.submitted >= 0);
-    blk_mig_unlock();
-}
-
-/* Called with no lock taken.  */
-
-static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
-{
-    int64_t total_sectors = bmds->total_sectors;
-    int64_t cur_sector = bmds->cur_sector;
-    BlockBackend *bb = bmds->blk;
-    BlkMigBlock *blk;
-    int nr_sectors;
-    int64_t count;
-
-    if (bmds->shared_base) {
-        bql_lock();
-        /* Skip unallocated sectors; intentionally treats failure or
-         * partial sector as an allocated sector */
-        while (cur_sector < total_sectors &&
-               !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE,
-                                  MAX_IS_ALLOCATED_SEARCH, &count)) {
-            if (count < BDRV_SECTOR_SIZE) {
-                break;
-            }
-            cur_sector += count >> BDRV_SECTOR_BITS;
-        }
-        bql_unlock();
-    }
-
-    if (cur_sector >= total_sectors) {
-        bmds->cur_sector = bmds->completed_sectors = total_sectors;
-        return 1;
-    }
-
-    bmds->completed_sectors = cur_sector;
-
-    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
-
-    /* we are going to transfer a full block even if it is not allocated */
-    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
-
-    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
-        nr_sectors = total_sectors - cur_sector;
-    }
-
-    blk = g_new(BlkMigBlock, 1);
-    blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
-    blk->bmds = bmds;
-    blk->sector = cur_sector;
-    blk->nr_sectors = nr_sectors;
-
-    qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE);
-
-    blk_mig_lock();
-    block_mig_state.submitted++;
-    blk_mig_unlock();
-
-    /*
-     * The migration thread does not have an AioContext. Lock the BQL so that
-     * I/O runs in the main loop AioContext (see
-     * qemu_get_current_aio_context()).
-     */
-    bql_lock();
-    bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
-                            nr_sectors * BDRV_SECTOR_SIZE);
-    blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
-                                0, blk_mig_read_cb, blk);
-    bql_unlock();
-
-    bmds->cur_sector = cur_sector + nr_sectors;
-    return (bmds->cur_sector >= total_sectors);
-}
-
-/* Called with the BQL taken.  */
-
-static int set_dirty_tracking(void)
-{
-    BlkMigDevState *bmds;
-    int ret;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
-                                                      BLK_MIG_BLOCK_SIZE,
-                                                      NULL, NULL);
-        if (!bmds->dirty_bitmap) {
-            ret = -errno;
-            goto fail;
-        }
-    }
-    return 0;
-
-fail:
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        if (bmds->dirty_bitmap) {
-            bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
-        }
-    }
-    return ret;
-}
-
-/* Called with the BQL taken.  */
-
-static void unset_dirty_tracking(void)
-{
-    BlkMigDevState *bmds;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        if (bmds->dirty_bitmap) {
-            bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
-        }
-    }
-}
-
-static int init_blk_migration(QEMUFile *f, Error **errp)
-{
-    BlockDriverState *bs;
-    BlkMigDevState *bmds;
-    int64_t sectors;
-    BdrvNextIterator it;
-    int i, num_bs = 0;
-    struct {
-        BlkMigDevState *bmds;
-        BlockDriverState *bs;
-    } *bmds_bs;
-    int ret;
-
-    GRAPH_RDLOCK_GUARD_MAINLOOP();
-
-    block_mig_state.submitted = 0;
-    block_mig_state.read_done = 0;
-    block_mig_state.transferred = 0;
-    block_mig_state.total_sector_sum = 0;
-    block_mig_state.prev_progress = -1;
-    block_mig_state.bulk_completed = 0;
-    block_mig_state.zero_blocks = migrate_zero_blocks();
-
-    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
-        num_bs++;
-    }
-    bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
-
-    for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
-        if (bdrv_is_read_only(bs)) {
-            continue;
-        }
-
-        sectors = bdrv_nb_sectors(bs);
-        if (sectors == 0) {
-            continue;
-        }
-        if (sectors < 0) {
-            error_setg(errp, "Error getting length of block device %s",
-                       bdrv_get_device_name(bs));
-            ret = sectors;
-            bdrv_next_cleanup(&it);
-            goto out;
-        }
-
-        bmds = g_new0(BlkMigDevState, 1);
-        bmds->blk = blk_new(qemu_get_aio_context(),
-                            BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
-        bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
-        bmds->bulk_completed = 0;
-        bmds->total_sectors = sectors;
-        bmds->completed_sectors = 0;
-        bmds->shared_base = migrate_block_incremental();
-
-        assert(i < num_bs);
-        bmds_bs[i].bmds = bmds;
-        bmds_bs[i].bs = bs;
-
-        block_mig_state.total_sector_sum += sectors;
-
-        if (bmds->shared_base) {
-            trace_migration_block_init_shared(bdrv_get_device_name(bs));
-        } else {
-            trace_migration_block_init_full(bdrv_get_device_name(bs));
-        }
-
-        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
-    }
-
-    /* Can only insert new BDSes now because doing so while iterating block
-     * devices may end up in a deadlock (iterating the new BDSes, too). */
-    for (i = 0; i < num_bs; i++) {
-        bmds = bmds_bs[i].bmds;
-        bs = bmds_bs[i].bs;
-
-        if (bmds) {
-            ret = blk_insert_bs(bmds->blk, bs, errp);
-            if (ret < 0) {
-                goto out;
-            }
-
-            alloc_aio_bitmap(bmds);
-            error_setg(&bmds->blocker, "block device is in use by migration");
-            bdrv_op_block_all(bs, bmds->blocker);
-        }
-    }
-
-    ret = 0;
-out:
-    g_free(bmds_bs);
-    return ret;
-}
-
-/* Called with no lock taken.  */
-
-static int blk_mig_save_bulked_block(QEMUFile *f)
-{
-    int64_t completed_sector_sum = 0;
-    BlkMigDevState *bmds;
-    int progress;
-    int ret = 0;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        if (bmds->bulk_completed == 0) {
-            if (mig_save_device_bulk(f, bmds) == 1) {
-                /* completed bulk section for this device */
-                bmds->bulk_completed = 1;
-            }
-            completed_sector_sum += bmds->completed_sectors;
-            ret = 1;
-            break;
-        } else {
-            completed_sector_sum += bmds->completed_sectors;
-        }
-    }
-
-    if (block_mig_state.total_sector_sum != 0) {
-        progress = completed_sector_sum * 100 /
-                   block_mig_state.total_sector_sum;
-    } else {
-        progress = 100;
-    }
-    if (progress != block_mig_state.prev_progress) {
-        block_mig_state.prev_progress = progress;
-        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
-                         | BLK_MIG_FLAG_PROGRESS);
-        trace_migration_block_progression(progress);
-    }
-
-    return ret;
-}
-
-static void blk_mig_reset_dirty_cursor(void)
-{
-    BlkMigDevState *bmds;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        bmds->cur_dirty = 0;
-    }
-}
-
-/* Called with the BQL taken.  */
-
-static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
-                                 int is_async)
-{
-    BlkMigBlock *blk;
-    int64_t total_sectors = bmds->total_sectors;
-    int64_t sector;
-    int nr_sectors;
-    int ret = -EIO;
-
-    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
-        blk_mig_lock();
-        if (bmds_aio_inflight(bmds, sector)) {
-            blk_mig_unlock();
-            blk_drain(bmds->blk);
-        } else {
-            blk_mig_unlock();
-        }
-        bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
-        if (bdrv_dirty_bitmap_get_locked(bmds->dirty_bitmap,
-                                         sector * BDRV_SECTOR_SIZE)) {
-            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
-                nr_sectors = total_sectors - sector;
-            } else {
-                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
-            }
-            bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap,
-                                           sector * BDRV_SECTOR_SIZE,
-                                           nr_sectors * BDRV_SECTOR_SIZE);
-            bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
-
-            blk = g_new(BlkMigBlock, 1);
-            blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
-            blk->bmds = bmds;
-            blk->sector = sector;
-            blk->nr_sectors = nr_sectors;
-
-            if (is_async) {
-                qemu_iovec_init_buf(&blk->qiov, blk->buf,
-                                    nr_sectors * BDRV_SECTOR_SIZE);
-
-                blk->aiocb = blk_aio_preadv(bmds->blk,
-                                            sector * BDRV_SECTOR_SIZE,
-                                            &blk->qiov, 0, blk_mig_read_cb,
-                                            blk);
-
-                blk_mig_lock();
-                block_mig_state.submitted++;
-                bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
-                blk_mig_unlock();
-            } else {
-                ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE,
-                                nr_sectors * BDRV_SECTOR_SIZE, blk->buf, 0);
-                if (ret < 0) {
-                    goto error;
-                }
-                blk_send(f, blk);
-
-                g_free(blk->buf);
-                g_free(blk);
-            }
-
-            sector += nr_sectors;
-            bmds->cur_dirty = sector;
-            break;
-        }
-
-        bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
-        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
-        bmds->cur_dirty = sector;
-    }
-
-    return (bmds->cur_dirty >= bmds->total_sectors);
-
-error:
-    trace_migration_block_save_device_dirty(sector);
-    g_free(blk->buf);
-    g_free(blk);
-    return ret;
-}
-
-/* Called with the BQL taken.
- *
- * return value:
- * 0: too much data for max_downtime
- * 1: few enough data for max_downtime
-*/
-static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
-{
-    BlkMigDevState *bmds;
-    int ret = 1;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        ret = mig_save_device_dirty(f, bmds, is_async);
-        if (ret <= 0) {
-            break;
-        }
-    }
-
-    return ret;
-}
-
-/* Called with no locks taken.  */
-
-static int flush_blks(QEMUFile *f)
-{
-    BlkMigBlock *blk;
-    int ret = 0;
-
-    trace_migration_block_flush_blks("Enter", block_mig_state.submitted,
-                                     block_mig_state.read_done,
-                                     block_mig_state.transferred);
-
-    blk_mig_lock();
-    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
-        if (migration_rate_exceeded(f)) {
-            break;
-        }
-        if (blk->ret < 0) {
-            ret = blk->ret;
-            break;
-        }
-
-        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
-        blk_mig_unlock();
-        blk_send(f, blk);
-        blk_mig_lock();
-
-        g_free(blk->buf);
-        g_free(blk);
-
-        block_mig_state.read_done--;
-        block_mig_state.transferred++;
-        assert(block_mig_state.read_done >= 0);
-    }
-    blk_mig_unlock();
-
-    trace_migration_block_flush_blks("Exit", block_mig_state.submitted,
-                                     block_mig_state.read_done,
-                                     block_mig_state.transferred);
-    return ret;
-}
-
-/* Called with the BQL taken.  */
-
-static int64_t get_remaining_dirty(void)
-{
-    BlkMigDevState *bmds;
-    int64_t dirty = 0;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
-        dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
-        bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
-    }
-
-    return dirty;
-}
-
-
-
-/* Called with the BQL taken.  */
-static void block_migration_cleanup_bmds(void)
-{
-    BlkMigDevState *bmds;
-    BlockDriverState *bs;
-
-    unset_dirty_tracking();
-
-    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
-        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
-
-        bs = blk_bs(bmds->blk);
-        if (bs) {
-            bdrv_op_unblock_all(bs, bmds->blocker);
-        }
-        error_free(bmds->blocker);
-        blk_unref(bmds->blk);
-        g_free(bmds->blk_name);
-        g_free(bmds->aio_bitmap);
-        g_free(bmds);
-    }
-}
-
-/* Called with the BQL taken.  */
-static void block_migration_cleanup(void *opaque)
-{
-    BlkMigBlock *blk;
-
-    bdrv_drain_all();
-
-    block_migration_cleanup_bmds();
-
-    blk_mig_lock();
-    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
-        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
-        g_free(blk->buf);
-        g_free(blk);
-    }
-    blk_mig_unlock();
-}
-
-static int block_save_setup(QEMUFile *f, void *opaque, Error **errp)
-{
-    int ret;
-
-    trace_migration_block_save("setup", block_mig_state.submitted,
-                               block_mig_state.transferred);
-
-    warn_report("block migration is deprecated;"
-                " use blockdev-mirror with NBD instead");
-
-    ret = init_blk_migration(f, errp);
-    if (ret < 0) {
-        return ret;
-    }
-
-    /* start track dirty blocks */
-    ret = set_dirty_tracking();
-    if (ret) {
-        error_setg_errno(errp, -ret, "Failed to start block dirty tracking");
-        return ret;
-    }
-
-    ret = flush_blks(f);
-    if (ret) {
-        error_setg_errno(errp, -ret, "Flushing block failed");
-        return ret;
-    }
-    blk_mig_reset_dirty_cursor();
-    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
-
-    return ret;
-}
-
-static int block_save_iterate(QEMUFile *f, void *opaque)
-{
-    int ret;
-    uint64_t last_bytes = qemu_file_transferred(f);
-
-    trace_migration_block_save("iterate", block_mig_state.submitted,
-                               block_mig_state.transferred);
-
-    ret = flush_blks(f);
-    if (ret) {
-        return ret;
-    }
-
-    blk_mig_reset_dirty_cursor();
-
-    /* control the rate of transfer */
-    blk_mig_lock();
-    while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE <
-           migration_rate_get() &&
-           block_mig_state.submitted < MAX_PARALLEL_IO &&
-           (block_mig_state.submitted + block_mig_state.read_done) <
-           MAX_IO_BUFFERS) {
-        blk_mig_unlock();
-        if (block_mig_state.bulk_completed == 0) {
-            /* first finish the bulk phase */
-            if (blk_mig_save_bulked_block(f) == 0) {
-                /* finished saving bulk on all devices */
-                block_mig_state.bulk_completed = 1;
-            }
-            ret = 0;
-        } else {
-            /* Always called with the BQL taken for
-             * simplicity, block_save_complete also calls it.
-             */
-            bql_lock();
-            ret = blk_mig_save_dirty_block(f, 1);
-            bql_unlock();
-        }
-        if (ret < 0) {
-            return ret;
-        }
-        blk_mig_lock();
-        if (ret != 0) {
-            /* no more dirty blocks */
-            break;
-        }
-    }
-    blk_mig_unlock();
-
-    ret = flush_blks(f);
-    if (ret) {
-        return ret;
-    }
-
-    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
-    uint64_t delta_bytes = qemu_file_transferred(f) - last_bytes;
-    return (delta_bytes > 0);
-}
-
-/* Called with the BQL taken.  */
-
-static int block_save_complete(QEMUFile *f, void *opaque)
-{
-    int ret;
-
-    trace_migration_block_save("complete", block_mig_state.submitted,
-                               block_mig_state.transferred);
-
-    ret = flush_blks(f);
-    if (ret) {
-        return ret;
-    }
-
-    blk_mig_reset_dirty_cursor();
-
-    /* we know for sure that save bulk is completed and
-       all async read completed */
-    blk_mig_lock();
-    assert(block_mig_state.submitted == 0);
-    blk_mig_unlock();
-
-    do {
-        ret = blk_mig_save_dirty_block(f, 0);
-        if (ret < 0) {
-            return ret;
-        }
-    } while (ret == 0);
-
-    /* report completion */
-    qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
-
-    trace_migration_block_save_complete();
-
-    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
-
-    /* Make sure that our BlockBackends are gone, so that the block driver
-     * nodes can be inactivated. */
-    block_migration_cleanup_bmds();
-
-    return 0;
-}
-
-static void block_state_pending(void *opaque, uint64_t *must_precopy,
-                                uint64_t *can_postcopy)
-{
-    /* Estimate pending number of bytes to send */
-    uint64_t pending;
-
-    bql_lock();
-    pending = get_remaining_dirty();
-    bql_unlock();
-
-    blk_mig_lock();
-    pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
-               block_mig_state.read_done * BLK_MIG_BLOCK_SIZE;
-    blk_mig_unlock();
-
-    /* Report at least one block pending during bulk phase */
-    if (!pending && !block_mig_state.bulk_completed) {
-        pending = BLK_MIG_BLOCK_SIZE;
-    }
-
-    trace_migration_block_state_pending(pending);
-    /* We don't do postcopy */
-    *must_precopy += pending;
-}
-
-static int block_load(QEMUFile *f, void *opaque, int version_id)
-{
-    static int banner_printed;
-    int len, flags;
-    char device_name[256];
-    int64_t addr;
-    BlockBackend *blk, *blk_prev = NULL;
-    Error *local_err = NULL;
-    uint8_t *buf;
-    int64_t total_sectors = 0;
-    int nr_sectors;
-    int ret;
-    BlockDriverInfo bdi;
-    int cluster_size = BLK_MIG_BLOCK_SIZE;
-
-    do {
-        addr = qemu_get_be64(f);
-
-        flags = addr & (BDRV_SECTOR_SIZE - 1);
-        addr >>= BDRV_SECTOR_BITS;
-
-        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
-            /* get device name */
-            len = qemu_get_byte(f);
-            qemu_get_buffer(f, (uint8_t *)device_name, len);
-            device_name[len] = '\0';
-
-            blk = blk_by_name(device_name);
-            if (!blk) {
-                fprintf(stderr, "Error unknown block device %s\n",
-                        device_name);
-                return -EINVAL;
-            }
-
-            if (blk != blk_prev) {
-                blk_prev = blk;
-                total_sectors = blk_nb_sectors(blk);
-                if (total_sectors <= 0) {
-                    error_report("Error getting length of block device %s",
-                                 device_name);
-                    return -EINVAL;
-                }
-
-                blk_activate(blk, &local_err);
-                if (local_err) {
-                    error_report_err(local_err);
-                    return -EINVAL;
-                }
-
-                ret = bdrv_get_info(blk_bs(blk), &bdi);
-                if (ret == 0 && bdi.cluster_size > 0 &&
-                    bdi.cluster_size <= BLK_MIG_BLOCK_SIZE &&
-                    BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) {
-                    cluster_size = bdi.cluster_size;
-                } else {
-                    cluster_size = BLK_MIG_BLOCK_SIZE;
-                }
-            }
-
-            if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
-                nr_sectors = total_sectors - addr;
-            } else {
-                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
-            }
-
-            if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
-                ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
-                                        nr_sectors * BDRV_SECTOR_SIZE,
-                                        BDRV_REQ_MAY_UNMAP);
-            } else {
-                int i;
-                int64_t cur_addr;
-                uint8_t *cur_buf;
-
-                buf = g_malloc(BLK_MIG_BLOCK_SIZE);
-                qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE);
-                for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) {
-                    cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
-                    cur_buf = buf + i * cluster_size;
-
-                    if ((!block_mig_state.zero_blocks ||
-                        cluster_size < BLK_MIG_BLOCK_SIZE) &&
-                        buffer_is_zero(cur_buf, cluster_size)) {
-                        ret = blk_pwrite_zeroes(blk, cur_addr,
-                                                cluster_size,
-                                                BDRV_REQ_MAY_UNMAP);
-                    } else {
-                        ret = blk_pwrite(blk, cur_addr, cluster_size, cur_buf,
-                                         0);
-                    }
-                    if (ret < 0) {
-                        break;
-                    }
-                }
-                g_free(buf);
-            }
-
-            if (ret < 0) {
-                return ret;
-            }
-        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
-            if (!banner_printed) {
-                printf("Receiving block device images\n");
-                banner_printed = 1;
-            }
-            printf("Completed %d %%%c", (int)addr,
-                   (addr == 100) ? '\n' : '\r');
-            fflush(stdout);
-        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
-            fprintf(stderr, "Unknown block migration flags: 0x%x\n", flags);
-            return -EINVAL;
-        }
-        ret = qemu_file_get_error(f);
-        if (ret != 0) {
-            return ret;
-        }
-    } while (!(flags & BLK_MIG_FLAG_EOS));
-
-    return 0;
-}
-
-static bool block_is_active(void *opaque)
-{
-    return migrate_block();
-}
-
-static SaveVMHandlers savevm_block_handlers = {
-    .save_setup = block_save_setup,
-    .save_live_iterate = block_save_iterate,
-    .save_live_complete_precopy = block_save_complete,
-    .state_pending_exact = block_state_pending,
-    .state_pending_estimate = block_state_pending,
-    .load_state = block_load,
-    .save_cleanup = block_migration_cleanup,
-    .is_active = block_is_active,
-};
-
-void blk_mig_init(void)
-{
-    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
-    QSIMPLEQ_INIT(&block_mig_state.blk_list);
-    qemu_mutex_init(&block_mig_state.lock);
-
-    register_savevm_live("block", 0, 1, &savevm_block_handlers,
-                         &block_mig_state);
-}
diff --git a/migration/block.h b/migration/block.h
deleted file mode 100644
index 3178609dbd..0000000000
--- a/migration/block.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * QEMU live block migration
- *
- * Copyright IBM, Corp. 2009
- *
- * Authors:
- *  Liran Schour   <lirans@il.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- */
-
-#ifndef MIGRATION_BLOCK_H
-#define MIGRATION_BLOCK_H
-
-#ifdef CONFIG_LIVE_BLOCK_MIGRATION
-int blk_mig_active(void);
-int blk_mig_bulk_active(void);
-uint64_t blk_mig_bytes_transferred(void);
-uint64_t blk_mig_bytes_remaining(void);
-uint64_t blk_mig_bytes_total(void);
-
-#else
-static inline int blk_mig_active(void)
-{
-    return false;
-}
-
-static inline int blk_mig_bulk_active(void)
-{
-    return false;
-}
-
-static inline uint64_t blk_mig_bytes_transferred(void)
-{
-    return 0;
-}
-
-static inline uint64_t blk_mig_bytes_remaining(void)
-{
-    return 0;
-}
-
-static inline uint64_t blk_mig_bytes_total(void)
-{
-    return 0;
-}
-#endif /* CONFIG_LIVE_BLOCK_MIGRATION */
-
-void migrate_set_block_enabled(bool value, Error **errp);
-#endif /* MIGRATION_BLOCK_H */
diff --git a/migration/colo.c b/migration/colo.c
index 5600a43d78..e2b450c132 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -18,7 +18,6 @@
 #include "qemu-file.h"
 #include "savevm.h"
 #include "migration/colo.h"
-#include "block.h"
 #include "io/channel-buffer.h"
 #include "trace.h"
 #include "qemu/error-report.h"
diff --git a/migration/fd.c b/migration/fd.c
index 449adaa2de..aab5189eac 100644
--- a/migration/fd.c
+++ b/migration/fd.c
@@ -20,6 +20,8 @@
 #include "file.h"
 #include "migration.h"
 #include "monitor/monitor.h"
+#include "qemu/error-report.h"
+#include "qemu/sockets.h"
 #include "io/channel-util.h"
 #include "trace.h"
 
@@ -32,6 +34,11 @@ void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **
         return;
     }
 
+    if (!fd_is_socket(fd)) {
+        warn_report("fd: migration to a file is deprecated."
+                    " Use file: instead.");
+    }
+
     trace_migration_fd_outgoing(fd);
     ioc = qio_channel_new_fd(fd, errp);
     if (!ioc) {
@@ -61,6 +68,11 @@ void fd_start_incoming_migration(const char *fdname, Error **errp)
         return;
     }
 
+    if (!fd_is_socket(fd)) {
+        warn_report("fd: migration to a file is deprecated."
+                    " Use file: instead.");
+    }
+
     trace_migration_fd_incoming(fd);
 
     ioc = qio_channel_new_fd(fd, errp);
diff --git a/migration/meson.build b/migration/meson.build
index f76b1ba328..8815f80837 100644
--- a/migration/meson.build
+++ b/migration/meson.build
@@ -23,7 +23,6 @@ system_ss.add(files(
   'multifd.c',
   'multifd-zlib.c',
   'multifd-zero-page.c',
-  'ram-compress.c',
   'options.c',
   'postcopy-ram.c',
   'savevm.c',
@@ -39,9 +38,6 @@ else
 endif
 
 system_ss.add(when: rdma, if_true: files('rdma.c'))
-if get_option('live_block_migration').allowed()
-  system_ss.add(files('block.c'))
-endif
 system_ss.add(when: zstd, if_true: files('multifd-zstd.c'))
 
 specific_ss.add(when: 'CONFIG_SYSTEM_ONLY',
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index 7e96ae6ffd..9f0e8029e0 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -46,8 +46,6 @@ static void migration_global_dump(Monitor *mon)
                    ms->send_configuration ? "on" : "off");
     monitor_printf(mon, "send-section-footer: %s\n",
                    ms->send_section_footer ? "on" : "off");
-    monitor_printf(mon, "decompress-error-check: %s\n",
-                   ms->decompress_error_check ? "on" : "off");
     monitor_printf(mon, "clear-bitmap-shift: %u\n",
                    ms->clear_bitmap_shift);
 }
@@ -105,8 +103,6 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
                        info->ram->total >> 10);
         monitor_printf(mon, "duplicate: %" PRIu64 " pages\n",
                        info->ram->duplicate);
-        monitor_printf(mon, "skipped: %" PRIu64 " pages\n",
-                       info->ram->skipped);
         monitor_printf(mon, "normal: %" PRIu64 " pages\n",
                        info->ram->normal);
         monitor_printf(mon, "normal bytes: %" PRIu64 " kbytes\n",
@@ -147,15 +143,6 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
         }
     }
 
-    if (info->disk) {
-        monitor_printf(mon, "transferred disk: %" PRIu64 " kbytes\n",
-                       info->disk->transferred >> 10);
-        monitor_printf(mon, "remaining disk: %" PRIu64 " kbytes\n",
-                       info->disk->remaining >> 10);
-        monitor_printf(mon, "total disk: %" PRIu64 " kbytes\n",
-                       info->disk->total >> 10);
-    }
-
     if (info->xbzrle_cache) {
         monitor_printf(mon, "cache size: %" PRIu64 " bytes\n",
                        info->xbzrle_cache->cache_size);
@@ -173,19 +160,6 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
                        info->xbzrle_cache->overflow);
     }
 
-    if (info->compression) {
-        monitor_printf(mon, "compression pages: %" PRIu64 " pages\n",
-                       info->compression->pages);
-        monitor_printf(mon, "compression busy: %" PRIu64 "\n",
-                       info->compression->busy);
-        monitor_printf(mon, "compression busy rate: %0.2f\n",
-                       info->compression->busy_rate);
-        monitor_printf(mon, "compressed size: %" PRIu64 " kbytes\n",
-                       info->compression->compressed_size >> 10);
-        monitor_printf(mon, "compression rate: %0.2f\n",
-                       info->compression->compression_rate);
-    }
-
     if (info->has_cpu_throttle_percentage) {
         monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n",
                        info->cpu_throttle_percentage);
@@ -274,22 +248,6 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
         monitor_printf(mon, "%s: %" PRIu64 " ms\n",
             MigrationParameter_str(MIGRATION_PARAMETER_ANNOUNCE_STEP),
             params->announce_step);
-        assert(params->has_compress_level);
-        monitor_printf(mon, "%s: %u\n",
-            MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_LEVEL),
-            params->compress_level);
-        assert(params->has_compress_threads);
-        monitor_printf(mon, "%s: %u\n",
-            MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_THREADS),
-            params->compress_threads);
-        assert(params->has_compress_wait_thread);
-        monitor_printf(mon, "%s: %s\n",
-            MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_WAIT_THREAD),
-            params->compress_wait_thread ? "on" : "off");
-        assert(params->has_decompress_threads);
-        monitor_printf(mon, "%s: %u\n",
-            MigrationParameter_str(MIGRATION_PARAMETER_DECOMPRESS_THREADS),
-            params->decompress_threads);
         assert(params->has_throttle_trigger_threshold);
         monitor_printf(mon, "%s: %u\n",
             MigrationParameter_str(MIGRATION_PARAMETER_THROTTLE_TRIGGER_THRESHOLD),
@@ -334,10 +292,6 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
         monitor_printf(mon, "%s: %u ms\n",
             MigrationParameter_str(MIGRATION_PARAMETER_X_CHECKPOINT_DELAY),
             params->x_checkpoint_delay);
-        assert(params->has_block_incremental);
-        monitor_printf(mon, "%s: %s\n",
-            MigrationParameter_str(MIGRATION_PARAMETER_BLOCK_INCREMENTAL),
-            params->block_incremental ? "on" : "off");
         monitor_printf(mon, "%s: %u\n",
             MigrationParameter_str(MIGRATION_PARAMETER_MULTIFD_CHANNELS),
             params->multifd_channels);
@@ -466,7 +420,7 @@ void hmp_migrate_incoming(Monitor *mon, const QDict *qdict)
     }
     QAPI_LIST_PREPEND(caps, g_steal_pointer(&channel));
 
-    qmp_migrate_incoming(NULL, true, caps, &err);
+    qmp_migrate_incoming(NULL, true, caps, true, false, &err);
     qapi_free_MigrationChannelList(caps);
 
 end:
@@ -535,22 +489,6 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
     }
 
     switch (val) {
-    case MIGRATION_PARAMETER_COMPRESS_LEVEL:
-        p->has_compress_level = true;
-        visit_type_uint8(v, param, &p->compress_level, &err);
-        break;
-    case MIGRATION_PARAMETER_COMPRESS_THREADS:
-        p->has_compress_threads = true;
-        visit_type_uint8(v, param, &p->compress_threads, &err);
-        break;
-    case MIGRATION_PARAMETER_COMPRESS_WAIT_THREAD:
-        p->has_compress_wait_thread = true;
-        visit_type_bool(v, param, &p->compress_wait_thread, &err);
-        break;
-    case MIGRATION_PARAMETER_DECOMPRESS_THREADS:
-        p->has_decompress_threads = true;
-        visit_type_uint8(v, param, &p->decompress_threads, &err);
-        break;
     case MIGRATION_PARAMETER_THROTTLE_TRIGGER_THRESHOLD:
         p->has_throttle_trigger_threshold = true;
         visit_type_uint8(v, param, &p->throttle_trigger_threshold, &err);
@@ -618,10 +556,6 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
         p->has_x_checkpoint_delay = true;
         visit_type_uint32(v, param, &p->x_checkpoint_delay, &err);
         break;
-    case MIGRATION_PARAMETER_BLOCK_INCREMENTAL:
-        p->has_block_incremental = true;
-        visit_type_bool(v, param, &p->block_incremental, &err);
-        break;
     case MIGRATION_PARAMETER_MULTIFD_CHANNELS:
         p->has_multifd_channels = true;
         visit_type_uint8(v, param, &p->multifd_channels, &err);
@@ -736,24 +670,8 @@ static void hmp_migrate_status_cb(void *opaque)
     info = qmp_query_migrate(NULL);
     if (!info->has_status || info->status == MIGRATION_STATUS_ACTIVE ||
         info->status == MIGRATION_STATUS_SETUP) {
-        if (info->disk) {
-            int progress;
-
-            if (info->disk->remaining) {
-                progress = info->disk->transferred * 100 / info->disk->total;
-            } else {
-                progress = 100;
-            }
-
-            monitor_printf(status->mon, "Completed %d %%\r", progress);
-            monitor_flush(status->mon);
-        }
-
         timer_mod(status->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
     } else {
-        if (migrate_block()) {
-            monitor_printf(status->mon, "\n");
-        }
         if (info->error_desc) {
             error_report("%s", info->error_desc);
         }
@@ -768,32 +686,19 @@ static void hmp_migrate_status_cb(void *opaque)
 void hmp_migrate(Monitor *mon, const QDict *qdict)
 {
     bool detach = qdict_get_try_bool(qdict, "detach", false);
-    bool blk = qdict_get_try_bool(qdict, "blk", false);
-    bool inc = qdict_get_try_bool(qdict, "inc", false);
     bool resume = qdict_get_try_bool(qdict, "resume", false);
     const char *uri = qdict_get_str(qdict, "uri");
     Error *err = NULL;
     g_autoptr(MigrationChannelList) caps = NULL;
     g_autoptr(MigrationChannel) channel = NULL;
 
-    if (inc) {
-        warn_report("option '-i' is deprecated;"
-                    " use blockdev-mirror with NBD instead");
-    }
-
-    if (blk) {
-        warn_report("option '-b' is deprecated;"
-                    " use blockdev-mirror with NBD instead");
-    }
-
     if (!migrate_uri_parse(uri, &channel, &err)) {
         hmp_handle_error(mon, err);
         return;
     }
     QAPI_LIST_PREPEND(caps, g_steal_pointer(&channel));
 
-    qmp_migrate(NULL, true, caps, !!blk, blk, !!inc, inc,
-                 false, false, true, resume, &err);
+    qmp_migrate(NULL, true, caps, false, false, true, resume, &err);
     if (hmp_handle_error(mon, err)) {
         return;
     }
diff --git a/migration/migration.c b/migration/migration.c
index b5af6b5105..e88b24f1e6 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -27,7 +27,6 @@
 #include "sysemu/cpu-throttle.h"
 #include "rdma.h"
 #include "ram.h"
-#include "ram-compress.h"
 #include "migration/global_state.h"
 #include "migration/misc.h"
 #include "migration.h"
@@ -46,7 +45,6 @@
 #include "qapi/qmp/qerror.h"
 #include "qapi/qmp/qnull.h"
 #include "qemu/rcu.h"
-#include "block.h"
 #include "postcopy-ram.h"
 #include "qemu/thread.h"
 #include "trace.h"
@@ -72,6 +70,8 @@
 #define NOTIFIER_ELEM_INIT(array, elem)    \
     [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem])
 
+#define INMIGRATE_DEFAULT_EXIT_ON_ERROR true
+
 static NotifierWithReturnList migration_state_notifiers[] = {
     NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL),
     NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT),
@@ -234,9 +234,10 @@ void migration_object_init(void)
     qemu_cond_init(&current_incoming->page_request_cond);
     current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
 
+    current_incoming->exit_on_error = INMIGRATE_DEFAULT_EXIT_ON_ERROR;
+
     migration_object_check(current_migration, &error_fatal);
 
-    blk_mig_init();
     ram_mig_init();
     dirty_bitmap_mig_init();
 }
@@ -354,7 +355,6 @@ void migration_incoming_state_destroy(void)
     struct MigrationIncomingState *mis = migration_incoming_get_current();
 
     multifd_recv_cleanup();
-    compress_threads_load_cleanup();
 
     if (mis->to_src_file) {
         /* Tell source that we are done */
@@ -647,10 +647,6 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
         }
 #ifdef CONFIG_RDMA
     } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) {
-        if (migrate_compress()) {
-            error_setg(errp, "RDMA and compression can't be used together");
-            return;
-        }
         if (migrate_xbzrle()) {
             error_setg(errp, "RDMA and XBZRLE can't be used together");
             return;
@@ -735,17 +731,14 @@ static void process_incoming_migration_bh(void *opaque)
 static void coroutine_fn
 process_incoming_migration_co(void *opaque)
 {
+    MigrationState *s = migrate_get_current();
     MigrationIncomingState *mis = migration_incoming_get_current();
     PostcopyState ps;
     int ret;
+    Error *local_err = NULL;
 
     assert(mis->from_src_file);
 
-    if (compress_threads_load_setup(mis->from_src_file)) {
-        error_report("Failed to setup decompress threads");
-        goto fail;
-    }
-
     mis->largest_page_size = qemu_ram_pagesize_largest();
     postcopy_state_set(POSTCOPY_INCOMING_NONE);
     migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP,
@@ -779,18 +772,12 @@ process_incoming_migration_co(void *opaque)
     }
 
     if (ret < 0) {
-        MigrationState *s = migrate_get_current();
-
-        if (migrate_has_error(s)) {
-            WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
-                error_report_err(s->error);
-            }
-        }
-        error_report("load of migration failed: %s", strerror(-ret));
+        error_setg(&local_err, "load of migration failed: %s", strerror(-ret));
         goto fail;
     }
 
     if (colo_incoming_co() < 0) {
+        error_setg(&local_err, "colo incoming failed");
         goto fail;
     }
 
@@ -799,12 +786,19 @@ process_incoming_migration_co(void *opaque)
 fail:
     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                       MIGRATION_STATUS_FAILED);
-    qemu_fclose(mis->from_src_file);
+    migrate_set_error(s, local_err);
+    error_free(local_err);
 
-    multifd_recv_cleanup();
-    compress_threads_load_cleanup();
+    migration_incoming_state_destroy();
+
+    if (mis->exit_on_error) {
+        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
+            error_report_err(s->error);
+            s->error = NULL;
+        }
 
-    exit(EXIT_FAILURE);
+        exit(EXIT_FAILURE);
+    }
 }
 
 /**
@@ -1149,8 +1143,6 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
     info->ram->transferred = migration_transferred_bytes();
     info->ram->total = ram_bytes_total();
     info->ram->duplicate = stat64_get(&mig_stats.zero_pages);
-    /* legacy value.  It is not used anymore */
-    info->ram->skipped = 0;
     info->ram->normal = stat64_get(&mig_stats.normal_pages);
     info->ram->normal_bytes = info->ram->normal * page_size;
     info->ram->mbps = s->mbps;
@@ -1178,8 +1170,6 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
         info->xbzrle_cache->overflow = xbzrle_counters.overflow;
     }
 
-    populate_compress(info);
-
     if (cpu_throttle_active()) {
         info->has_cpu_throttle_percentage = true;
         info->cpu_throttle_percentage = cpu_throttle_get_percentage();
@@ -1201,16 +1191,6 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
     }
 }
 
-static void populate_disk_info(MigrationInfo *info)
-{
-    if (blk_mig_active()) {
-        info->disk = g_malloc0(sizeof(*info->disk));
-        info->disk->transferred = blk_mig_bytes_transferred();
-        info->disk->remaining = blk_mig_bytes_remaining();
-        info->disk->total = blk_mig_bytes_total();
-    }
-}
-
 static void fill_source_migration_info(MigrationInfo *info)
 {
     MigrationState *s = migrate_get_current();
@@ -1253,7 +1233,6 @@ static void fill_source_migration_info(MigrationInfo *info)
         /* TODO add some postcopy stats */
         populate_time_info(info, s);
         populate_ram_info(info, s);
-        populate_disk_info(info);
         migration_populate_vfio_info(info);
         break;
     case MIGRATION_STATUS_COLO:
@@ -1313,6 +1292,15 @@ static void fill_destination_migration_info(MigrationInfo *info)
         break;
     }
     info->status = mis->state;
+
+    if (!info->error_desc) {
+        MigrationState *s = migrate_get_current();
+        QEMU_LOCK_GUARD(&s->error_mutex);
+
+        if (s->error) {
+            info->error_desc = g_strdup(error_get_pretty(s->error));
+        }
+    }
 }
 
 MigrationInfo *qmp_query_migrate(Error **errp)
@@ -1409,7 +1397,6 @@ static void migrate_fd_cleanup(MigrationState *s)
     type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED :
                                      MIG_EVENT_PRECOPY_DONE;
     migration_call_notifiers(s, type, NULL);
-    block_cleanup_parameters();
     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
 }
 
@@ -1421,6 +1408,9 @@ static void migrate_fd_cleanup_bh(void *opaque)
 void migrate_set_error(MigrationState *s, const Error *error)
 {
     QEMU_LOCK_GUARD(&s->error_mutex);
+
+    trace_migrate_error(error_get_pretty(error));
+
     if (!s->error) {
         s->error = error_copy(error);
     }
@@ -1444,7 +1434,6 @@ static void migrate_error_free(MigrationState *s)
 
 static void migrate_fd_error(MigrationState *s, const Error *error)
 {
-    trace_migrate_fd_error(error_get_pretty(error));
     assert(s->to_dst_file == NULL);
     migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                       MIGRATION_STATUS_FAILED);
@@ -1794,10 +1783,13 @@ void migrate_del_blocker(Error **reasonp)
 }
 
 void qmp_migrate_incoming(const char *uri, bool has_channels,
-                          MigrationChannelList *channels, Error **errp)
+                          MigrationChannelList *channels,
+                          bool has_exit_on_error, bool exit_on_error,
+                          Error **errp)
 {
     Error *local_err = NULL;
     static bool once = true;
+    MigrationIncomingState *mis = migration_incoming_get_current();
 
     if (!once) {
         error_setg(errp, "The incoming migration has already been started");
@@ -1812,6 +1804,9 @@ void qmp_migrate_incoming(const char *uri, bool has_channels,
         return;
     }
 
+    mis->exit_on_error =
+        has_exit_on_error ? exit_on_error : INMIGRATE_DEFAULT_EXIT_ON_ERROR;
+
     qemu_start_incoming_migration(uri, has_channels, channels, &local_err);
 
     if (local_err) {
@@ -1913,19 +1908,8 @@ bool migration_is_blocked(Error **errp)
 }
 
 /* Returns true if continue to migrate, or false if error detected */
-static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
-                            bool resume, Error **errp)
+static bool migrate_prepare(MigrationState *s, bool resume, Error **errp)
 {
-    if (blk_inc) {
-        warn_report("parameter 'inc' is deprecated;"
-                    " use blockdev-mirror with NBD instead");
-    }
-
-    if (blk) {
-        warn_report("parameter 'blk' is deprecated;"
-                    " use blockdev-mirror with NBD instead");
-    }
-
     if (resume) {
         if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
             error_setg(errp, "Cannot resume if there is no "
@@ -2010,26 +1994,6 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
         }
     }
 
-    if (blk || blk_inc) {
-        if (migrate_colo()) {
-            error_setg(errp, "No disk migration is required in COLO mode");
-            return false;
-        }
-        if (migrate_block() || migrate_block_incremental()) {
-            error_setg(errp, "Command options are incompatible with "
-                       "current migration capabilities");
-            return false;
-        }
-        if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) {
-            return false;
-        }
-        s->must_remove_block_options = true;
-    }
-
-    if (blk_inc) {
-        migrate_set_block_incremental(true);
-    }
-
     if (migrate_init(s, errp)) {
         return false;
     }
@@ -2038,8 +2002,7 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
 }
 
 void qmp_migrate(const char *uri, bool has_channels,
-                 MigrationChannelList *channels, bool has_blk, bool blk,
-                 bool has_inc, bool inc, bool has_detach, bool detach,
+                 MigrationChannelList *channels, bool has_detach, bool detach,
                  bool has_resume, bool resume, Error **errp)
 {
     bool resume_requested;
@@ -2079,8 +2042,7 @@ void qmp_migrate(const char *uri, bool has_channels,
     }
 
     resume_requested = has_resume && resume;
-    if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
-                         resume_requested, errp)) {
+    if (!migrate_prepare(s, resume_requested, errp)) {
         /* Error detected, put into errp */
         return;
     }
@@ -2113,7 +2075,6 @@ void qmp_migrate(const char *uri, bool has_channels,
                    "a valid migration protocol");
         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                           MIGRATION_STATUS_FAILED);
-        block_cleanup_parameters();
     }
 
     if (local_err) {
diff --git a/migration/migration.h b/migration/migration.h
index 6c612c0381..6af01362d4 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -227,6 +227,9 @@ struct MigrationIncomingState {
      * is needed as this field is updated serially.
      */
     unsigned int switchover_ack_pending_num;
+
+    /* Do exit on incoming migration failure */
+    bool exit_on_error;
 };
 
 MigrationIncomingState *migration_incoming_get_current(void);
@@ -376,10 +379,6 @@ struct MigrationState {
     /* mutex to protect errp */
     QemuMutex error_mutex;
 
-    /* Do we have to clean up -b/-i from old migrate parameters */
-    /* This feature is deprecated and will be removed */
-    bool must_remove_block_options;
-
     /*
      * Global switch on whether we need to store the global state
      * during migration.
@@ -394,13 +393,6 @@ struct MigrationState {
     /* Needed by postcopy-pause state */
     QemuSemaphore postcopy_pause_sem;
     /*
-     * Whether we abort the migration if decompression errors are
-     * detected at the destination. It is left at false for qemu
-     * older than 3.0, since only newer qemu sends streams that
-     * do not trigger spurious decompression errors.
-     */
-    bool decompress_error_check;
-    /*
      * This variable only affects behavior when postcopy preempt mode is
      * enabled.
      *
diff --git a/migration/options.c b/migration/options.c
index 239f5ecfb4..5ab5b6d85d 100644
--- a/migration/options.c
+++ b/migration/options.c
@@ -40,13 +40,6 @@
  * for sending the last part */
 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
 
-/* Default compression thread count */
-#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
-/* Default decompression thread count, usually decompression is at
- * least 4 times as fast as compression.*/
-#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
-/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
-#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
 /* Define default autoconverge cpu throttle migration parameters */
 #define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50
 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
@@ -92,8 +85,6 @@ Property migration_properties[] = {
                      send_configuration, true),
     DEFINE_PROP_BOOL("send-section-footer", MigrationState,
                      send_section_footer, true),
-    DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
-                      decompress_error_check, true),
     DEFINE_PROP_BOOL("multifd-flush-after-each-section", MigrationState,
                       multifd_flush_after_each_section, false),
     DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
@@ -102,17 +93,6 @@ Property migration_properties[] = {
                      preempt_pre_7_2, false),
 
     /* Migration parameters */
-    DEFINE_PROP_UINT8("x-compress-level", MigrationState,
-                      parameters.compress_level,
-                      DEFAULT_MIGRATE_COMPRESS_LEVEL),
-    DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
-                      parameters.compress_threads,
-                      DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
-    DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
-                      parameters.compress_wait_thread, true),
-    DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
-                      parameters.decompress_threads,
-                      DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
     DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState,
                       parameters.throttle_trigger_threshold,
                       DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD),
@@ -188,14 +168,12 @@ Property migration_properties[] = {
     DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL),
     DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE),
     DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS),
-    DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS),
     DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS),
     DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
     DEFINE_PROP_MIG_CAP("x-postcopy-preempt",
                         MIGRATION_CAPABILITY_POSTCOPY_PREEMPT),
     DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
     DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
-    DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK),
     DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),
     DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD),
     DEFINE_PROP_MIG_CAP("x-background-snapshot",
@@ -225,13 +203,6 @@ bool migrate_background_snapshot(void)
     return s->capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
 }
 
-bool migrate_block(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    return s->capabilities[MIGRATION_CAPABILITY_BLOCK];
-}
-
 bool migrate_colo(void)
 {
     MigrationState *s = migrate_get_current();
@@ -239,13 +210,6 @@ bool migrate_colo(void)
     return s->capabilities[MIGRATION_CAPABILITY_X_COLO];
 }
 
-bool migrate_compress(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    return s->capabilities[MIGRATION_CAPABILITY_COMPRESS];
-}
-
 bool migrate_dirty_bitmaps(void)
 {
     MigrationState *s = migrate_get_current();
@@ -459,7 +423,6 @@ INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
     MIGRATION_CAPABILITY_AUTO_CONVERGE,
     MIGRATION_CAPABILITY_RELEASE_RAM,
     MIGRATION_CAPABILITY_RDMA_PIN_ALL,
-    MIGRATION_CAPABILITY_COMPRESS,
     MIGRATION_CAPABILITY_XBZRLE,
     MIGRATION_CAPABILITY_X_COLO,
     MIGRATION_CAPABILITY_VALIDATE_UUID,
@@ -484,24 +447,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
     ERRP_GUARD();
     MigrationIncomingState *mis = migration_incoming_get_current();
 
-#ifndef CONFIG_LIVE_BLOCK_MIGRATION
-    if (new_caps[MIGRATION_CAPABILITY_BLOCK]) {
-        error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
-                   "block migration");
-        error_append_hint(errp, "Use blockdev-mirror with NBD instead.\n");
-        return false;
-    }
-#endif
-    if (new_caps[MIGRATION_CAPABILITY_BLOCK]) {
-        warn_report("block migration is deprecated;"
-                    " use blockdev-mirror with NBD instead");
-    }
-
-    if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
-        warn_report("old compression method is deprecated;"
-                    " use multifd compression methods instead");
-    }
-
 #ifndef CONFIG_REPLICATION
     if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
         error_setg(errp, "QEMU compiled without replication module"
@@ -570,7 +515,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
 #ifdef CONFIG_LINUX
     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND] &&
         (!new_caps[MIGRATION_CAPABILITY_MULTIFD] ||
-         new_caps[MIGRATION_CAPABILITY_COMPRESS] ||
          new_caps[MIGRATION_CAPABILITY_XBZRLE] ||
          migrate_multifd_compression() ||
          migrate_tls())) {
@@ -592,17 +536,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
             return false;
         }
 
-        /*
-         * Preempt mode requires urgent pages to be sent in separate
-         * channel, OTOH compression logic will disorder all pages into
-         * different compression channels, which is not compatible with the
-         * preempt assumptions on channel assignments.
-         */
-        if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
-            error_setg(errp, "Postcopy preempt not compatible with compress");
-            return false;
-        }
-
         if (migrate_incoming_started()) {
             error_setg(errp,
                        "Postcopy preempt must be set before incoming starts");
@@ -611,10 +544,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
     }
 
     if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
-        if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
-            error_setg(errp, "Multifd is not compatible with compress");
-            return false;
-        }
         if (migrate_incoming_started()) {
             error_setg(errp, "Multifd must be set before incoming starts");
             return false;
@@ -649,13 +578,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
         }
     }
 
-    if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
-        if (new_caps[MIGRATION_CAPABILITY_XBZRLE]) {
-            error_setg(errp, "Compression is not compatible with xbzrle");
-            return false;
-        }
-    }
-
     if (new_caps[MIGRATION_CAPABILITY_MAPPED_RAM]) {
         if (new_caps[MIGRATION_CAPABILITY_XBZRLE]) {
             error_setg(errp,
@@ -663,12 +585,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
             return false;
         }
 
-        if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
-            error_setg(errp,
-                       "Mapped-ram migration is incompatible with compression");
-            return false;
-        }
-
         if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
             error_setg(errp,
                        "Mapped-ram migration is incompatible with postcopy");
@@ -707,11 +623,6 @@ MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
     int i;
 
     for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
-#ifndef CONFIG_LIVE_BLOCK_MIGRATION
-        if (i == MIGRATION_CAPABILITY_BLOCK) {
-            continue;
-        }
-#endif
         caps = g_malloc0(sizeof(*caps));
         caps->capability = i;
         caps->state = s->capabilities[i];
@@ -763,13 +674,6 @@ bool migrate_has_block_bitmap_mapping(void)
     return s->parameters.has_block_bitmap_mapping;
 }
 
-bool migrate_block_incremental(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    return s->parameters.block_incremental;
-}
-
 uint32_t migrate_checkpoint_delay(void)
 {
     MigrationState *s = migrate_get_current();
@@ -777,27 +681,6 @@ uint32_t migrate_checkpoint_delay(void)
     return s->parameters.x_checkpoint_delay;
 }
 
-int migrate_compress_level(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    return s->parameters.compress_level;
-}
-
-int migrate_compress_threads(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    return s->parameters.compress_threads;
-}
-
-int migrate_compress_wait_thread(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    return s->parameters.compress_wait_thread;
-}
-
 uint8_t migrate_cpu_throttle_increment(void)
 {
     MigrationState *s = migrate_get_current();
@@ -819,13 +702,6 @@ bool migrate_cpu_throttle_tailslow(void)
     return s->parameters.cpu_throttle_tailslow;
 }
 
-int migrate_decompress_threads(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    return s->parameters.decompress_threads;
-}
-
 uint64_t migrate_downtime_limit(void)
 {
     MigrationState *s = migrate_get_current();
@@ -948,29 +824,8 @@ ZeroPageDetection migrate_zero_page_detection(void)
     return s->parameters.zero_page_detection;
 }
 
-/* parameter setters */
-
-void migrate_set_block_incremental(bool value)
-{
-    MigrationState *s = migrate_get_current();
-
-    s->parameters.block_incremental = value;
-}
-
 /* parameters helpers */
 
-void block_cleanup_parameters(void)
-{
-    MigrationState *s = migrate_get_current();
-
-    if (s->must_remove_block_options) {
-        /* setting to false can never fail */
-        migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, false, &error_abort);
-        migrate_set_block_incremental(false);
-        s->must_remove_block_options = false;
-    }
-}
-
 AnnounceParameters *migrate_announce_params(void)
 {
     static AnnounceParameters ap;
@@ -992,14 +847,6 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp)
 
     /* TODO use QAPI_CLONE() instead of duplicating it inline */
     params = g_malloc0(sizeof(*params));
-    params->has_compress_level = true;
-    params->compress_level = s->parameters.compress_level;
-    params->has_compress_threads = true;
-    params->compress_threads = s->parameters.compress_threads;
-    params->has_compress_wait_thread = true;
-    params->compress_wait_thread = s->parameters.compress_wait_thread;
-    params->has_decompress_threads = true;
-    params->decompress_threads = s->parameters.decompress_threads;
     params->has_throttle_trigger_threshold = true;
     params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold;
     params->has_cpu_throttle_initial = true;
@@ -1020,8 +867,6 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp)
     params->downtime_limit = s->parameters.downtime_limit;
     params->has_x_checkpoint_delay = true;
     params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
-    params->has_block_incremental = true;
-    params->block_incremental = s->parameters.block_incremental;
     params->has_multifd_channels = true;
     params->multifd_channels = s->parameters.multifd_channels;
     params->has_multifd_compression = true;
@@ -1070,10 +915,6 @@ void migrate_params_init(MigrationParameters *params)
     params->tls_creds = g_strdup("");
 
     /* Set has_* up only for parameter checks */
-    params->has_compress_level = true;
-    params->has_compress_threads = true;
-    params->has_compress_wait_thread = true;
-    params->has_decompress_threads = true;
     params->has_throttle_trigger_threshold = true;
     params->has_cpu_throttle_initial = true;
     params->has_cpu_throttle_increment = true;
@@ -1081,7 +922,6 @@ void migrate_params_init(MigrationParameters *params)
     params->has_max_bandwidth = true;
     params->has_downtime_limit = true;
     params->has_x_checkpoint_delay = true;
-    params->has_block_incremental = true;
     params->has_multifd_channels = true;
     params->has_multifd_compression = true;
     params->has_multifd_zlib_level = true;
@@ -1107,27 +947,6 @@ bool migrate_params_check(MigrationParameters *params, Error **errp)
 {
     ERRP_GUARD();
 
-    if (params->has_compress_level &&
-        (params->compress_level > 9)) {
-        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
-                   "a value between 0 and 9");
-        return false;
-    }
-
-    if (params->has_compress_threads && (params->compress_threads < 1)) {
-        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
-                   "compress_threads",
-                   "a value between 1 and 255");
-        return false;
-    }
-
-    if (params->has_decompress_threads && (params->decompress_threads < 1)) {
-        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
-                   "decompress_threads",
-                   "a value between 1 and 255");
-        return false;
-    }
-
     if (params->has_throttle_trigger_threshold &&
         (params->throttle_trigger_threshold < 1 ||
          params->throttle_trigger_threshold > 100)) {
@@ -1301,22 +1120,6 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
 
     /* TODO use QAPI_CLONE() instead of duplicating it inline */
 
-    if (params->has_compress_level) {
-        dest->compress_level = params->compress_level;
-    }
-
-    if (params->has_compress_threads) {
-        dest->compress_threads = params->compress_threads;
-    }
-
-    if (params->has_compress_wait_thread) {
-        dest->compress_wait_thread = params->compress_wait_thread;
-    }
-
-    if (params->has_decompress_threads) {
-        dest->decompress_threads = params->decompress_threads;
-    }
-
     if (params->has_throttle_trigger_threshold) {
         dest->throttle_trigger_threshold = params->throttle_trigger_threshold;
     }
@@ -1359,9 +1162,6 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
         dest->x_checkpoint_delay = params->x_checkpoint_delay;
     }
 
-    if (params->has_block_incremental) {
-        dest->block_incremental = params->block_incremental;
-    }
     if (params->has_multifd_channels) {
         dest->multifd_channels = params->multifd_channels;
     }
@@ -1424,30 +1224,6 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
 
     /* TODO use QAPI_CLONE() instead of duplicating it inline */
 
-    if (params->has_compress_level) {
-        warn_report("old compression is deprecated;"
-                    " use multifd compression methods instead");
-        s->parameters.compress_level = params->compress_level;
-    }
-
-    if (params->has_compress_threads) {
-        warn_report("old compression is deprecated;"
-                    " use multifd compression methods instead");
-        s->parameters.compress_threads = params->compress_threads;
-    }
-
-    if (params->has_compress_wait_thread) {
-        warn_report("old compression is deprecated;"
-                    " use multifd compression methods instead");
-        s->parameters.compress_wait_thread = params->compress_wait_thread;
-    }
-
-    if (params->has_decompress_threads) {
-        warn_report("old compression is deprecated;"
-                    " use multifd compression methods instead");
-        s->parameters.decompress_threads = params->decompress_threads;
-    }
-
     if (params->has_throttle_trigger_threshold) {
         s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold;
     }
@@ -1502,11 +1278,6 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
         colo_checkpoint_delay_set();
     }
 
-    if (params->has_block_incremental) {
-        warn_report("block migration is deprecated;"
-                    " use blockdev-mirror with NBD instead");
-        s->parameters.block_incremental = params->block_incremental;
-    }
     if (params->has_multifd_channels) {
         s->parameters.multifd_channels = params->multifd_channels;
     }
diff --git a/migration/options.h b/migration/options.h
index ab8199e207..4b21cc2669 100644
--- a/migration/options.h
+++ b/migration/options.h
@@ -25,9 +25,7 @@ extern Property migration_properties[];
 /* capabilities */
 
 bool migrate_auto_converge(void);
-bool migrate_block(void);
 bool migrate_colo(void);
-bool migrate_compress(void);
 bool migrate_dirty_bitmaps(void);
 bool migrate_events(void);
 bool migrate_mapped_ram(void);
@@ -67,15 +65,10 @@ bool migrate_cap_set(int cap, bool value, Error **errp);
 const BitmapMigrationNodeAliasList *migrate_block_bitmap_mapping(void);
 bool migrate_has_block_bitmap_mapping(void);
 
-bool migrate_block_incremental(void);
 uint32_t migrate_checkpoint_delay(void);
-int migrate_compress_level(void);
-int migrate_compress_threads(void);
-int migrate_compress_wait_thread(void);
 uint8_t migrate_cpu_throttle_increment(void);
 uint8_t migrate_cpu_throttle_initial(void);
 bool migrate_cpu_throttle_tailslow(void);
-int migrate_decompress_threads(void);
 uint64_t migrate_downtime_limit(void);
 uint8_t migrate_max_cpu_throttle(void);
 uint64_t migrate_max_bandwidth(void);
@@ -92,14 +85,8 @@ const char *migrate_tls_hostname(void);
 uint64_t migrate_xbzrle_cache_size(void);
 ZeroPageDetection migrate_zero_page_detection(void);
 
-/* parameters setters */
-
-void migrate_set_block_incremental(bool value);
-
 /* parameters helpers */
 
 bool migrate_params_check(MigrationParameters *params, Error **errp);
 void migrate_params_init(MigrationParameters *params);
-void block_cleanup_parameters(void);
-
 #endif
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index a10882d47f..9ccbbb0099 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -778,84 +778,6 @@ uint64_t qemu_get_be64(QEMUFile *f)
     return v;
 }
 
-/* return the size after compression, or negative value on error */
-static int qemu_compress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
-                              const uint8_t *source, size_t source_len)
-{
-    int err;
-
-    err = deflateReset(stream);
-    if (err != Z_OK) {
-        return -1;
-    }
-
-    stream->avail_in = source_len;
-    stream->next_in = (uint8_t *)source;
-    stream->avail_out = dest_len;
-    stream->next_out = dest;
-
-    err = deflate(stream, Z_FINISH);
-    if (err != Z_STREAM_END) {
-        return -1;
-    }
-
-    return stream->next_out - dest;
-}
-
-/* Compress size bytes of data start at p and store the compressed
- * data to the buffer of f.
- *
- * Since the file is dummy file with empty_ops, return -1 if f has no space to
- * save the compressed data.
- */
-ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream,
-                                  const uint8_t *p, size_t size)
-{
-    ssize_t blen = IO_BUF_SIZE - f->buf_index - sizeof(int32_t);
-
-    if (blen < compressBound(size)) {
-        return -1;
-    }
-
-    blen = qemu_compress_data(stream, f->buf + f->buf_index + sizeof(int32_t),
-                              blen, p, size);
-    if (blen < 0) {
-        return -1;
-    }
-
-    qemu_put_be32(f, blen);
-    add_buf_to_iovec(f, blen);
-    return blen + sizeof(int32_t);
-}
-
-/* Put the data in the buffer of f_src to the buffer of f_des, and
- * then reset the buf_index of f_src to 0.
- */
-
-int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src)
-{
-    int len = 0;
-
-    if (f_src->buf_index > 0) {
-        len = f_src->buf_index;
-        qemu_put_buffer(f_des, f_src->buf, f_src->buf_index);
-        f_src->buf_index = 0;
-        f_src->iovcnt = 0;
-    }
-    return len;
-}
-
-/*
- * Check if the writable buffer is empty
- */
-
-bool qemu_file_buffer_empty(QEMUFile *file)
-{
-    assert(qemu_file_is_writable(file));
-
-    return !file->iovcnt;
-}
-
 /*
  * Get a string whose length is determined by a single preceding byte
  * A preallocated 256 byte buffer must be passed in.
diff --git a/migration/qemu-file.h b/migration/qemu-file.h
index 32fd4a34fd..11c2120edd 100644
--- a/migration/qemu-file.h
+++ b/migration/qemu-file.h
@@ -54,10 +54,6 @@ void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size,
 
 size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset);
 size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size);
-ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream,
-                                  const uint8_t *p, size_t size);
-int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src);
-bool qemu_file_buffer_empty(QEMUFile *file);
 
 /*
  * Note that you can only peek continuous bytes from where the current pointer
diff --git a/migration/ram-compress.c b/migration/ram-compress.c
deleted file mode 100644
index fa4388f6a6..0000000000
--- a/migration/ram-compress.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- * Copyright (c) 2011-2015 Red Hat Inc
- *
- * Authors:
- *  Juan Quintela <quintela@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/cutils.h"
-
-#include "ram-compress.h"
-
-#include "qemu/error-report.h"
-#include "qemu/stats64.h"
-#include "migration.h"
-#include "options.h"
-#include "io/channel-null.h"
-#include "exec/target_page.h"
-#include "exec/ramblock.h"
-#include "ram.h"
-#include "migration-stats.h"
-
-static struct {
-    int64_t pages;
-    int64_t busy;
-    double busy_rate;
-    int64_t compressed_size;
-    double compression_rate;
-    /* compression statistics since the beginning of the period */
-    /* amount of count that no free thread to compress data */
-    uint64_t compress_thread_busy_prev;
-    /* amount bytes after compression */
-    uint64_t compressed_size_prev;
-    /* amount of compressed pages */
-    uint64_t compress_pages_prev;
-} compression_counters;
-
-static CompressParam *comp_param;
-static QemuThread *compress_threads;
-/* comp_done_cond is used to wake up the migration thread when
- * one of the compression threads has finished the compression.
- * comp_done_lock is used to co-work with comp_done_cond.
- */
-static QemuMutex comp_done_lock;
-static QemuCond comp_done_cond;
-
-struct DecompressParam {
-    bool done;
-    bool quit;
-    QemuMutex mutex;
-    QemuCond cond;
-    void *des;
-    uint8_t *compbuf;
-    int len;
-    z_stream stream;
-};
-typedef struct DecompressParam DecompressParam;
-
-static QEMUFile *decomp_file;
-static DecompressParam *decomp_param;
-static QemuThread *decompress_threads;
-static QemuMutex decomp_done_lock;
-static QemuCond decomp_done_cond;
-
-static CompressResult do_compress_ram_page(QEMUFile *f, z_stream *stream,
-                                           RAMBlock *block, ram_addr_t offset,
-                                           uint8_t *source_buf);
-
-static void *do_data_compress(void *opaque)
-{
-    CompressParam *param = opaque;
-    RAMBlock *block;
-    ram_addr_t offset;
-    CompressResult result;
-
-    qemu_mutex_lock(&param->mutex);
-    while (!param->quit) {
-        if (param->trigger) {
-            block = param->block;
-            offset = param->offset;
-            param->trigger = false;
-            qemu_mutex_unlock(&param->mutex);
-
-            result = do_compress_ram_page(param->file, &param->stream,
-                                          block, offset, param->originbuf);
-
-            qemu_mutex_lock(&comp_done_lock);
-            param->done = true;
-            param->result = result;
-            qemu_cond_signal(&comp_done_cond);
-            qemu_mutex_unlock(&comp_done_lock);
-
-            qemu_mutex_lock(&param->mutex);
-        } else {
-            qemu_cond_wait(&param->cond, &param->mutex);
-        }
-    }
-    qemu_mutex_unlock(&param->mutex);
-
-    return NULL;
-}
-
-void compress_threads_save_cleanup(void)
-{
-    int i, thread_count;
-
-    if (!migrate_compress() || !comp_param) {
-        return;
-    }
-
-    thread_count = migrate_compress_threads();
-    for (i = 0; i < thread_count; i++) {
-        /*
-         * we use it as a indicator which shows if the thread is
-         * properly init'd or not
-         */
-        if (!comp_param[i].file) {
-            break;
-        }
-
-        qemu_mutex_lock(&comp_param[i].mutex);
-        comp_param[i].quit = true;
-        qemu_cond_signal(&comp_param[i].cond);
-        qemu_mutex_unlock(&comp_param[i].mutex);
-
-        qemu_thread_join(compress_threads + i);
-        qemu_mutex_destroy(&comp_param[i].mutex);
-        qemu_cond_destroy(&comp_param[i].cond);
-        deflateEnd(&comp_param[i].stream);
-        g_free(comp_param[i].originbuf);
-        qemu_fclose(comp_param[i].file);
-        comp_param[i].file = NULL;
-    }
-    qemu_mutex_destroy(&comp_done_lock);
-    qemu_cond_destroy(&comp_done_cond);
-    g_free(compress_threads);
-    g_free(comp_param);
-    compress_threads = NULL;
-    comp_param = NULL;
-}
-
-int compress_threads_save_setup(void)
-{
-    int i, thread_count;
-
-    if (!migrate_compress()) {
-        return 0;
-    }
-    thread_count = migrate_compress_threads();
-    compress_threads = g_new0(QemuThread, thread_count);
-    comp_param = g_new0(CompressParam, thread_count);
-    qemu_cond_init(&comp_done_cond);
-    qemu_mutex_init(&comp_done_lock);
-    for (i = 0; i < thread_count; i++) {
-        comp_param[i].originbuf = g_try_malloc(qemu_target_page_size());
-        if (!comp_param[i].originbuf) {
-            goto exit;
-        }
-
-        if (deflateInit(&comp_param[i].stream,
-                        migrate_compress_level()) != Z_OK) {
-            g_free(comp_param[i].originbuf);
-            goto exit;
-        }
-
-        /* comp_param[i].file is just used as a dummy buffer to save data,
-         * set its ops to empty.
-         */
-        comp_param[i].file = qemu_file_new_output(
-            QIO_CHANNEL(qio_channel_null_new()));
-        comp_param[i].done = true;
-        comp_param[i].quit = false;
-        qemu_mutex_init(&comp_param[i].mutex);
-        qemu_cond_init(&comp_param[i].cond);
-        qemu_thread_create(compress_threads + i, "compress",
-                           do_data_compress, comp_param + i,
-                           QEMU_THREAD_JOINABLE);
-    }
-    return 0;
-
-exit:
-    compress_threads_save_cleanup();
-    return -1;
-}
-
-static CompressResult do_compress_ram_page(QEMUFile *f, z_stream *stream,
-                                           RAMBlock *block, ram_addr_t offset,
-                                           uint8_t *source_buf)
-{
-    uint8_t *p = block->host + offset;
-    size_t page_size = qemu_target_page_size();
-    int ret;
-
-    assert(qemu_file_buffer_empty(f));
-
-    if (buffer_is_zero(p, page_size)) {
-        return RES_ZEROPAGE;
-    }
-
-    /*
-     * copy it to a internal buffer to avoid it being modified by VM
-     * so that we can catch up the error during compression and
-     * decompression
-     */
-    memcpy(source_buf, p, page_size);
-    ret = qemu_put_compression_data(f, stream, source_buf, page_size);
-    if (ret < 0) {
-        qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
-        error_report("compressed data failed!");
-        qemu_fflush(f);
-        return RES_NONE;
-    }
-    return RES_COMPRESS;
-}
-
-static inline void compress_reset_result(CompressParam *param)
-{
-    param->result = RES_NONE;
-    param->block = NULL;
-    param->offset = 0;
-}
-
-void compress_flush_data(void)
-{
-    int thread_count = migrate_compress_threads();
-
-    if (!migrate_compress()) {
-        return;
-    }
-
-    qemu_mutex_lock(&comp_done_lock);
-    for (int i = 0; i < thread_count; i++) {
-        while (!comp_param[i].done) {
-            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
-        }
-    }
-    qemu_mutex_unlock(&comp_done_lock);
-
-    for (int i = 0; i < thread_count; i++) {
-        qemu_mutex_lock(&comp_param[i].mutex);
-        if (!comp_param[i].quit) {
-            CompressParam *param = &comp_param[i];
-            compress_send_queued_data(param);
-            assert(qemu_file_buffer_empty(param->file));
-            compress_reset_result(param);
-        }
-        qemu_mutex_unlock(&comp_param[i].mutex);
-    }
-}
-
-static inline void set_compress_params(CompressParam *param, RAMBlock *block,
-                                       ram_addr_t offset)
-{
-    param->block = block;
-    param->offset = offset;
-    param->trigger = true;
-}
-
-/*
- * Return true when it compress a page
- */
-bool compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset,
-                                     int (send_queued_data(CompressParam *)))
-{
-    int thread_count;
-    bool wait = migrate_compress_wait_thread();
-
-    thread_count = migrate_compress_threads();
-    qemu_mutex_lock(&comp_done_lock);
-
-    while (true) {
-        for (int i = 0; i < thread_count; i++) {
-            if (comp_param[i].done) {
-                CompressParam *param = &comp_param[i];
-                qemu_mutex_lock(&param->mutex);
-                param->done = false;
-                send_queued_data(param);
-                assert(qemu_file_buffer_empty(param->file));
-                compress_reset_result(param);
-                set_compress_params(param, block, offset);
-
-                qemu_cond_signal(&param->cond);
-                qemu_mutex_unlock(&param->mutex);
-                qemu_mutex_unlock(&comp_done_lock);
-                return true;
-            }
-        }
-        if (!wait) {
-            qemu_mutex_unlock(&comp_done_lock);
-            compression_counters.busy++;
-            return false;
-        }
-        /*
-         * wait for a free thread if the user specifies
-         * 'compress-wait-thread', otherwise we will post the page out
-         * in the main thread as normal page.
-         */
-        qemu_cond_wait(&comp_done_cond, &comp_done_lock);
-    }
-}
-
-/* return the size after decompression, or negative value on error */
-static int
-qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
-                     const uint8_t *source, size_t source_len)
-{
-    int err;
-
-    err = inflateReset(stream);
-    if (err != Z_OK) {
-        return -1;
-    }
-
-    stream->avail_in = source_len;
-    stream->next_in = (uint8_t *)source;
-    stream->avail_out = dest_len;
-    stream->next_out = dest;
-
-    err = inflate(stream, Z_NO_FLUSH);
-    if (err != Z_STREAM_END) {
-        return -1;
-    }
-
-    return stream->total_out;
-}
-
-static void *do_data_decompress(void *opaque)
-{
-    DecompressParam *param = opaque;
-    unsigned long pagesize;
-    uint8_t *des;
-    int len, ret;
-
-    qemu_mutex_lock(&param->mutex);
-    while (!param->quit) {
-        if (param->des) {
-            des = param->des;
-            len = param->len;
-            param->des = 0;
-            qemu_mutex_unlock(&param->mutex);
-
-            pagesize = qemu_target_page_size();
-
-            ret = qemu_uncompress_data(&param->stream, des, pagesize,
-                                       param->compbuf, len);
-            if (ret < 0 && migrate_get_current()->decompress_error_check) {
-                error_report("decompress data failed");
-                qemu_file_set_error(decomp_file, ret);
-            }
-
-            qemu_mutex_lock(&decomp_done_lock);
-            param->done = true;
-            qemu_cond_signal(&decomp_done_cond);
-            qemu_mutex_unlock(&decomp_done_lock);
-
-            qemu_mutex_lock(&param->mutex);
-        } else {
-            qemu_cond_wait(&param->cond, &param->mutex);
-        }
-    }
-    qemu_mutex_unlock(&param->mutex);
-
-    return NULL;
-}
-
-int wait_for_decompress_done(void)
-{
-    if (!migrate_compress()) {
-        return 0;
-    }
-
-    int thread_count = migrate_decompress_threads();
-    qemu_mutex_lock(&decomp_done_lock);
-    for (int i = 0; i < thread_count; i++) {
-        while (!decomp_param[i].done) {
-            qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
-        }
-    }
-    qemu_mutex_unlock(&decomp_done_lock);
-    return qemu_file_get_error(decomp_file);
-}
-
-void compress_threads_load_cleanup(void)
-{
-    int i, thread_count;
-
-    if (!migrate_compress()) {
-        return;
-    }
-    thread_count = migrate_decompress_threads();
-    for (i = 0; i < thread_count; i++) {
-        /*
-         * we use it as a indicator which shows if the thread is
-         * properly init'd or not
-         */
-        if (!decomp_param[i].compbuf) {
-            break;
-        }
-
-        qemu_mutex_lock(&decomp_param[i].mutex);
-        decomp_param[i].quit = true;
-        qemu_cond_signal(&decomp_param[i].cond);
-        qemu_mutex_unlock(&decomp_param[i].mutex);
-    }
-    for (i = 0; i < thread_count; i++) {
-        if (!decomp_param[i].compbuf) {
-            break;
-        }
-
-        qemu_thread_join(decompress_threads + i);
-        qemu_mutex_destroy(&decomp_param[i].mutex);
-        qemu_cond_destroy(&decomp_param[i].cond);
-        inflateEnd(&decomp_param[i].stream);
-        g_free(decomp_param[i].compbuf);
-        decomp_param[i].compbuf = NULL;
-    }
-    g_free(decompress_threads);
-    g_free(decomp_param);
-    decompress_threads = NULL;
-    decomp_param = NULL;
-    decomp_file = NULL;
-}
-
-int compress_threads_load_setup(QEMUFile *f)
-{
-    int i, thread_count;
-
-    if (!migrate_compress()) {
-        return 0;
-    }
-
-    /*
-     * set compression_counters memory to zero for a new migration
-     */
-    memset(&compression_counters, 0, sizeof(compression_counters));
-
-    thread_count = migrate_decompress_threads();
-    decompress_threads = g_new0(QemuThread, thread_count);
-    decomp_param = g_new0(DecompressParam, thread_count);
-    qemu_mutex_init(&decomp_done_lock);
-    qemu_cond_init(&decomp_done_cond);
-    decomp_file = f;
-    for (i = 0; i < thread_count; i++) {
-        if (inflateInit(&decomp_param[i].stream) != Z_OK) {
-            goto exit;
-        }
-
-        size_t compbuf_size = compressBound(qemu_target_page_size());
-        decomp_param[i].compbuf = g_malloc0(compbuf_size);
-        qemu_mutex_init(&decomp_param[i].mutex);
-        qemu_cond_init(&decomp_param[i].cond);
-        decomp_param[i].done = true;
-        decomp_param[i].quit = false;
-        qemu_thread_create(decompress_threads + i, "decompress",
-                           do_data_decompress, decomp_param + i,
-                           QEMU_THREAD_JOINABLE);
-    }
-    return 0;
-exit:
-    compress_threads_load_cleanup();
-    return -1;
-}
-
-void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len)
-{
-    int thread_count = migrate_decompress_threads();
-    QEMU_LOCK_GUARD(&decomp_done_lock);
-    while (true) {
-        for (int i = 0; i < thread_count; i++) {
-            if (decomp_param[i].done) {
-                decomp_param[i].done = false;
-                qemu_mutex_lock(&decomp_param[i].mutex);
-                qemu_get_buffer(f, decomp_param[i].compbuf, len);
-                decomp_param[i].des = host;
-                decomp_param[i].len = len;
-                qemu_cond_signal(&decomp_param[i].cond);
-                qemu_mutex_unlock(&decomp_param[i].mutex);
-                return;
-            }
-        }
-        qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
-    }
-}
-
-void populate_compress(MigrationInfo *info)
-{
-    if (!migrate_compress()) {
-        return;
-    }
-    info->compression = g_malloc0(sizeof(*info->compression));
-    info->compression->pages = compression_counters.pages;
-    info->compression->busy = compression_counters.busy;
-    info->compression->busy_rate = compression_counters.busy_rate;
-    info->compression->compressed_size = compression_counters.compressed_size;
-    info->compression->compression_rate = compression_counters.compression_rate;
-}
-
-uint64_t compress_ram_pages(void)
-{
-    return compression_counters.pages;
-}
-
-void update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
-{
-    ram_transferred_add(bytes_xmit);
-
-    if (param->result == RES_ZEROPAGE) {
-        stat64_add(&mig_stats.zero_pages, 1);
-        return;
-    }
-
-    /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
-    compression_counters.compressed_size += bytes_xmit - 8;
-    compression_counters.pages++;
-}
-
-void compress_update_rates(uint64_t page_count)
-{
-    if (!migrate_compress()) {
-        return;
-    }
-    compression_counters.busy_rate = (double)(compression_counters.busy -
-            compression_counters.compress_thread_busy_prev) / page_count;
-    compression_counters.compress_thread_busy_prev =
-            compression_counters.busy;
-
-    double compressed_size = compression_counters.compressed_size -
-        compression_counters.compressed_size_prev;
-    if (compressed_size) {
-        double uncompressed_size = (compression_counters.pages -
-                                    compression_counters.compress_pages_prev) *
-            qemu_target_page_size();
-
-        /* Compression-Ratio = Uncompressed-size / Compressed-size */
-        compression_counters.compression_rate =
-            uncompressed_size / compressed_size;
-
-        compression_counters.compress_pages_prev =
-            compression_counters.pages;
-        compression_counters.compressed_size_prev =
-            compression_counters.compressed_size;
-    }
-}
diff --git a/migration/ram-compress.h b/migration/ram-compress.h
deleted file mode 100644
index 0d89a2f55e..0000000000
--- a/migration/ram-compress.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- * Copyright (c) 2011-2015 Red Hat Inc
- *
- * Authors:
- *  Juan Quintela <quintela@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef QEMU_MIGRATION_COMPRESS_H
-#define QEMU_MIGRATION_COMPRESS_H
-
-#include "qemu-file.h"
-#include "qapi/qapi-types-migration.h"
-
-enum CompressResult {
-    RES_NONE = 0,
-    RES_ZEROPAGE = 1,
-    RES_COMPRESS = 2
-};
-typedef enum CompressResult CompressResult;
-
-struct CompressParam {
-    bool done;
-    bool quit;
-    bool trigger;
-    CompressResult result;
-    QEMUFile *file;
-    QemuMutex mutex;
-    QemuCond cond;
-    RAMBlock *block;
-    ram_addr_t offset;
-
-    /* internally used fields */
-    z_stream stream;
-    uint8_t *originbuf;
-};
-typedef struct CompressParam CompressParam;
-
-void compress_threads_save_cleanup(void);
-int compress_threads_save_setup(void);
-
-bool compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset,
-                                      int (send_queued_data(CompressParam *)));
-
-int wait_for_decompress_done(void);
-void compress_threads_load_cleanup(void);
-int compress_threads_load_setup(QEMUFile *f);
-void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len);
-
-void populate_compress(MigrationInfo *info);
-uint64_t compress_ram_pages(void);
-void update_compress_thread_counts(const CompressParam *param, int bytes_xmit);
-void compress_update_rates(uint64_t page_count);
-int compress_send_queued_data(CompressParam *param);
-void compress_flush_data(void);
-
-#endif
diff --git a/migration/ram.c b/migration/ram.c
index a975c5af16..ceea586b06 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -33,7 +33,6 @@
 #include "qemu/madvise.h"
 #include "qemu/main-loop.h"
 #include "xbzrle.h"
-#include "ram-compress.h"
 #include "ram.h"
 #include "migration.h"
 #include "migration-stats.h"
@@ -53,7 +52,6 @@
 #include "exec/target_page.h"
 #include "qemu/rcu_queue.h"
 #include "migration/colo.h"
-#include "block.h"
 #include "sysemu/cpu-throttle.h"
 #include "savevm.h"
 #include "qemu/iov.h"
@@ -78,9 +76,10 @@
  * worked for pages that were filled with the same char.  We switched
  * it to only search for the zero value.  And to avoid confusion with
  * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
- */
-/*
- * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now
+ *
+ * RAM_SAVE_FLAG_FULL was obsoleted in 2009.
+ *
+ * RAM_SAVE_FLAG_COMPRESS_PAGE (0x100) was removed in QEMU 9.1.
  */
 #define RAM_SAVE_FLAG_FULL     0x01
 #define RAM_SAVE_FLAG_ZERO     0x02
@@ -90,7 +89,6 @@
 #define RAM_SAVE_FLAG_CONTINUE 0x20
 #define RAM_SAVE_FLAG_XBZRLE   0x40
 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
-#define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
 #define RAM_SAVE_FLAG_MULTIFD_FLUSH    0x200
 /* We can't use any flag that is bigger than 0x200 */
 
@@ -691,8 +689,7 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
     qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len);
     bytes_xbzrle += encoded_len + 1 + 2;
     /*
-     * Like compressed_size (please see update_compress_thread_counts),
-     * the xbzrle encoded bytes don't count the 8 byte header with
+     * The xbzrle encoded bytes don't count the 8 byte header with
      * RAM_SAVE_FLAG_CONTINUE.
      */
     xbzrle_counters.bytes += bytes_xbzrle - 8;
@@ -950,7 +947,7 @@ uint64_t ram_get_total_transferred_pages(void)
 {
     return stat64_get(&mig_stats.normal_pages) +
         stat64_get(&mig_stats.zero_pages) +
-        compress_ram_pages() + xbzrle_counters.pages;
+        xbzrle_counters.pages;
 }
 
 static void migration_update_rates(RAMState *rs, int64_t end_time)
@@ -983,7 +980,6 @@ static void migration_update_rates(RAMState *rs, int64_t end_time)
         rs->xbzrle_pages_prev = xbzrle_counters.pages;
         rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
     }
-    compress_update_rates(page_count);
 }
 
 /*
@@ -1025,13 +1021,6 @@ static void migration_trigger_throttle(RAMState *rs)
     uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
     uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
 
-    /* During block migration the auto-converge logic incorrectly detects
-     * that ram migration makes no progress. Avoid this by disabling the
-     * throttling logic during the bulk phase of block migration. */
-    if (blk_mig_bulk_active()) {
-        return;
-    }
-
     /*
      * The following detection logic can be refined later. For now:
      * Check to see if the ratio between dirtied bytes and the approx.
@@ -1066,14 +1055,14 @@ static void migration_bitmap_sync(RAMState *rs, bool last_stage)
     trace_migration_bitmap_sync_start();
     memory_global_dirty_log_sync(last_stage);
 
-    qemu_mutex_lock(&rs->bitmap_mutex);
-    WITH_RCU_READ_LOCK_GUARD() {
-        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
-            ramblock_sync_dirty_bitmap(rs, block);
+    WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) {
+        WITH_RCU_READ_LOCK_GUARD() {
+            RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+                ramblock_sync_dirty_bitmap(rs, block);
+            }
+            stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining());
         }
-        stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining());
     }
-    qemu_mutex_unlock(&rs->bitmap_mutex);
 
     memory_global_after_dirty_log_sync();
     trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
@@ -1296,41 +1285,6 @@ static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset)
     return 1;
 }
 
-int compress_send_queued_data(CompressParam *param)
-{
-    PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY];
-    MigrationState *ms = migrate_get_current();
-    QEMUFile *file = ms->to_dst_file;
-    int len = 0;
-
-    RAMBlock *block = param->block;
-    ram_addr_t offset = param->offset;
-
-    if (param->result == RES_NONE) {
-        return 0;
-    }
-
-    assert(block == pss->last_sent_block);
-
-    if (param->result == RES_ZEROPAGE) {
-        assert(qemu_file_buffer_empty(param->file));
-        len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
-        qemu_put_byte(file, 0);
-        len += 1;
-        ram_release_page(block->idstr, offset);
-    } else if (param->result == RES_COMPRESS) {
-        assert(!qemu_file_buffer_empty(param->file));
-        len += save_page_header(pss, file, block,
-                                offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
-        len += qemu_put_qemu_file(file, param->file);
-    } else {
-        abort();
-    }
-
-    update_compress_thread_counts(param, len);
-
-    return len;
-}
 
 #define PAGE_ALL_CLEAN 0
 #define PAGE_TRY_AGAIN 1
@@ -1382,16 +1336,6 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
                     qemu_fflush(f);
                 }
             }
-            /*
-             * If memory migration starts over, we will meet a dirtied page
-             * which may still exists in compression threads's ring, so we
-             * should flush the compressed data to make sure the new page
-             * is not overwritten by the old one in the destination.
-             *
-             * Also If xbzrle is on, stop using the data compression at this
-             * point. In theory, xbzrle can do better than compression.
-             */
-            compress_flush_data();
 
             /* Hit the end of the list */
             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
@@ -2042,37 +1986,6 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len,
     return 0;
 }
 
-/*
- * try to compress the page before posting it out, return true if the page
- * has been properly handled by compression, otherwise needs other
- * paths to handle it
- */
-static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
-                               ram_addr_t offset)
-{
-    if (!migrate_compress()) {
-        return false;
-    }
-
-    /*
-     * When starting the process of a new block, the first page of
-     * the block should be sent out before other pages in the same
-     * block, and all the pages in last block should have been sent
-     * out, keeping this order is important, because the 'cont' flag
-     * is used to avoid resending the block name.
-     *
-     * We post the fist page as normal page as compression will take
-     * much CPU resource.
-     */
-    if (pss->block != pss->last_sent_block) {
-        compress_flush_data();
-        return false;
-    }
-
-    return compress_page_with_multi_thread(pss->block, offset,
-                                           compress_send_queued_data);
-}
-
 /**
  * ram_save_target_page_legacy: save one target page
  *
@@ -2090,10 +2003,6 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
         return res;
     }
 
-    if (save_compress_page(rs, pss, offset)) {
-        return 1;
-    }
-
     if (save_zero_page(rs, pss, offset)) {
         return 1;
     }
@@ -2478,7 +2387,6 @@ static void ram_save_cleanup(void *opaque)
     ram_bitmaps_destroy();
 
     xbzrle_cleanup();
-    compress_threads_save_cleanup();
     ram_state_cleanup(rsp);
     g_free(migration_ops);
     migration_ops = NULL;
@@ -3097,15 +3005,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
     RAMBlock *block;
     int ret, max_hg_page_size;
 
-    if (compress_threads_save_setup()) {
-        error_setg(errp, "%s: failed to start compress threads", __func__);
-        return -1;
-    }
-
     /* migration has already setup the bitmap, reuse it. */
     if (!migration_in_colo_state()) {
         if (ram_init_all(rsp, errp) != 0) {
-            compress_threads_save_cleanup();
             return -1;
         }
     }
@@ -3230,13 +3132,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     int64_t t0;
     int done = 0;
 
-    if (blk_mig_bulk_active()) {
-        /* Avoid transferring ram during bulk phase of block migration as
-         * the bulk phase will usually take a long time and transferring
-         * ram updates during that time is pointless. */
-        goto out;
-    }
-
     /*
      * We'll take this lock a little bit long, but it's okay for two reasons.
      * Firstly, the only possible other thread to take it is who calls
@@ -3284,14 +3179,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
                 rs->target_page_count += pages;
 
                 /*
-                 * During postcopy, it is necessary to make sure one whole host
-                 * page is sent in one chunk.
-                 */
-                if (migrate_postcopy_ram()) {
-                    compress_flush_data();
-                }
-
-                /*
                  * we want to check in the 1st loop, just in case it was the 1st
                  * time and we had to sync the dirty bitmap.
                  * qemu_clock_get_ns() is a bit expensive, so we only check each
@@ -3389,8 +3276,6 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
         }
         qemu_mutex_unlock(&rs->bitmap_mutex);
 
-        compress_flush_data();
-
         ret = rdma_registration_stop(f, RAM_CONTROL_FINISH);
         if (ret < 0) {
             qemu_file_set_error(f, ret);
@@ -3804,7 +3689,6 @@ int ram_load_postcopy(QEMUFile *f, int channel)
         void *place_source = NULL;
         RAMBlock *block = NULL;
         uint8_t ch;
-        int len;
 
         addr = qemu_get_be64(f);
 
@@ -3821,8 +3705,7 @@ int ram_load_postcopy(QEMUFile *f, int channel)
         addr &= TARGET_PAGE_MASK;
 
         trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags);
-        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
-                     RAM_SAVE_FLAG_COMPRESS_PAGE)) {
+        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
             block = ram_block_from_stream(mis, f, flags, channel);
             if (!block) {
                 ret = -EINVAL;
@@ -3917,16 +3800,6 @@ int ram_load_postcopy(QEMUFile *f, int channel)
                                          TARGET_PAGE_SIZE);
             }
             break;
-        case RAM_SAVE_FLAG_COMPRESS_PAGE:
-            tmp_page->all_zero = false;
-            len = qemu_get_be32(f);
-            if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
-                error_report("Invalid compressed data length: %d", len);
-                ret = -EINVAL;
-                break;
-            }
-            decompress_data_with_multi_threads(f, page_buffer, len);
-            break;
         case RAM_SAVE_FLAG_MULTIFD_FLUSH:
             multifd_recv_sync_main();
             break;
@@ -3944,11 +3817,6 @@ int ram_load_postcopy(QEMUFile *f, int channel)
             break;
         }
 
-        /* Got the whole host page, wait for decompress before placing. */
-        if (place_needed) {
-            ret |= wait_for_decompress_done();
-        }
-
         /* Detect for any possible file errors */
         if (!ret && qemu_file_get_error(f)) {
             ret = qemu_file_get_error(f);
@@ -4253,11 +4121,7 @@ static int parse_ramblocks(QEMUFile *f, ram_addr_t total_ram_bytes)
 static int ram_load_precopy(QEMUFile *f)
 {
     MigrationIncomingState *mis = migration_incoming_get_current();
-    int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
-
-    if (!migrate_compress()) {
-        invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
-    }
+    int flags = 0, ret = 0, invalid_flags = 0, i = 0;
 
     if (migrate_mapped_ram()) {
         invalid_flags |= (RAM_SAVE_FLAG_HOOK | RAM_SAVE_FLAG_MULTIFD_FLUSH |
@@ -4294,16 +4158,12 @@ static int ram_load_precopy(QEMUFile *f)
         if (flags & invalid_flags) {
             error_report("Unexpected RAM flags: %d", flags & invalid_flags);
 
-            if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
-                error_report("Received an unexpected compressed page");
-            }
-
             ret = -EINVAL;
             break;
         }
 
         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
-                     RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
+                     RAM_SAVE_FLAG_XBZRLE)) {
             RAMBlock *block = ram_block_from_stream(mis, f, flags,
                                                     RAM_CHANNEL_PRECOPY);
 
@@ -4372,16 +4232,6 @@ static int ram_load_precopy(QEMUFile *f)
             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
             break;
 
-        case RAM_SAVE_FLAG_COMPRESS_PAGE:
-            len = qemu_get_be32(f);
-            if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
-                error_report("Invalid compressed data length: %d", len);
-                ret = -EINVAL;
-                break;
-            }
-            decompress_data_with_multi_threads(f, host, len);
-            break;
-
         case RAM_SAVE_FLAG_XBZRLE:
             if (load_xbzrle(f, addr, host) < 0) {
                 error_report("Failed to decompress XBZRLE page at "
@@ -4423,7 +4273,6 @@ static int ram_load_precopy(QEMUFile *f)
         }
     }
 
-    ret |= wait_for_decompress_done();
     return ret;
 }
 
diff --git a/migration/savevm.c b/migration/savevm.c
index 4509482ec4..6c789bd54b 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1711,11 +1711,6 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
         return -EINVAL;
     }
 
-    if (migrate_block()) {
-        error_setg(errp, "Block migration and snapshots are incompatible");
-        return -EINVAL;
-    }
-
     ret = migrate_init(ms, errp);
     if (ret) {
         return ret;
diff --git a/migration/trace-events b/migration/trace-events
index f0e1cb80c7..d0c44c3853 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -152,7 +152,7 @@ multifd_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostnam
 # migration.c
 migrate_set_state(const char *new_state) "new state %s"
 migrate_fd_cleanup(void) ""
-migrate_fd_error(const char *error_desc) "error=%s"
+migrate_error(const char *error_desc) "error=%s"
 migrate_fd_cancel(void) ""
 migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx"
 migrate_pending_exact(uint64_t size, uint64_t pre, uint64_t post) "exact pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"