diff options
| author | Peter Maydell <peter.maydell@linaro.org> | 2019-08-19 10:55:03 +0100 |
|---|---|---|
| committer | Peter Maydell <peter.maydell@linaro.org> | 2019-08-19 10:55:03 +0100 |
| commit | 1f37316238d0d412cbc16482c5c24b11c2c7dcec (patch) | |
| tree | ad24471e680d1074cc2ebcee74473035746216ff /util/hbitmap.c | |
| parent | afd760539308a5524accf964107cdb1d54a059e3 (diff) | |
| parent | a5f8a60b3eafd5563af48546d5d126d448e62ac5 (diff) | |
| download | focaccia-qemu-1f37316238d0d412cbc16482c5c24b11c2c7dcec.tar.gz focaccia-qemu-1f37316238d0d412cbc16482c5c24b11c2c7dcec.zip | |
Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging
Pull request
Rebase notes:
011/36:[0003] [FC] 'block/backup: upgrade copy_bitmap to BdrvDirtyBitmap'
016/36:[----] [-C] 'iotests: Add virtio-scsi device helper'
017/36:[0002] [FC] 'iotests: add test 257 for bitmap-mode backups'
030/36:[0011] [FC] 'block/backup: teach TOP to never copy unallocated regions'
032/36:[0018] [FC] 'iotests/257: test traditional sync modes'
11: A new hbitmap call was added late in 4.1, changed to
bdrv_dirty_bitmap_next_zero.
16: Context-only (self.has_quit is new context in 040)
17: Removed 'auto' to follow upstream trends in iotest fashion
30: Handled explicitly on-list with R-B from Max.
32: Fix capitalization in test, as mentioned on-list.
# gpg: Signature made Sat 17 Aug 2019 00:12:13 BST
# gpg: using RSA key F9B7ABDBBCACDF95BE76CBD07DEF8106AAFC390E
# gpg: Good signature from "John Snow (John Huston) <jsnow@redhat.com>" [full]
# Primary key fingerprint: FAEB 9711 A12C F475 812F 18F2 88A9 064D 1835 61EB
# Subkey fingerprint: F9B7 ABDB BCAC DF95 BE76 CBD0 7DEF 8106 AAFC 390E
* remotes/jnsnow/tags/bitmaps-pull-request: (36 commits)
tests/test-hbitmap: test next_zero and _next_dirty_area after truncate
block/backup: refactor write_flags
block/backup: deal with zero detection
qapi: add dirty-bitmaps to query-named-block-nodes result
iotests/257: test traditional sync modes
block/backup: support bitmap sync modes for non-bitmap backups
block/backup: teach TOP to never copy unallocated regions
block/backup: add backup_is_cluster_allocated
block/backup: centralize copy_bitmap initialization
block/backup: improve sync=bitmap work estimates
iotests/257: test API failures
block/backup: hoist bitmap check into QMP interface
iotests/257: Refactor backup helpers
iotests/257: add EmulatedBitmap class
iotests/257: add Pattern class
iotests: test bitmap moving inside 254
qapi: implement block-dirty-bitmap-remove transaction action
blockdev: reduce aio_context locked sections in bitmap add/remove
block/backup: loosen restriction on readonly bitmaps
iotests: add test 257 for bitmap-mode backups
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util/hbitmap.c')
| -rw-r--r-- | util/hbitmap.c | 49 |
1 files changed, 45 insertions, 4 deletions
diff --git a/util/hbitmap.c b/util/hbitmap.c index bcc0acdc6a..fd44c897ab 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -781,12 +781,33 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) { - return (a->size == b->size) && (a->granularity == b->granularity); + return (a->orig_size == b->orig_size); } /** - * Given HBitmaps A and B, let A := A (BITOR) B. - * Bitmap B will not be modified. + * hbitmap_sparse_merge: performs dst = dst | src + * works with differing granularities. + * best used when src is sparsely populated. + */ +static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) +{ + uint64_t offset = 0; + uint64_t count = src->orig_size; + + while (hbitmap_next_dirty_area(src, &offset, &count)) { + hbitmap_set(dst, offset, count); + offset += count; + if (offset >= src->orig_size) { + break; + } + count = src->orig_size - offset; + } +} + +/** + * Given HBitmaps A and B, let R := A (BITOR) B. + * Bitmaps A and B will not be modified, + * except when bitmap R is an alias of A or B. * * @return true if the merge was successful, * false if it was not attempted. @@ -801,7 +822,26 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) } assert(hbitmap_can_merge(b, result)); - if (hbitmap_count(b) == 0) { + if ((!hbitmap_count(a) && result == b) || + (!hbitmap_count(b) && result == a)) { + return true; + } + + if (!hbitmap_count(a) && !hbitmap_count(b)) { + hbitmap_reset_all(result); + return true; + } + + if (a->granularity != b->granularity) { + if ((a != result) && (b != result)) { + hbitmap_reset_all(result); + } + if (a != result) { + hbitmap_sparse_merge(result, a); + } + if (b != result) { + hbitmap_sparse_merge(result, b); + } return true; } @@ -809,6 +849,7 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) * It may be possible to improve running times for sparsely populated maps * by using hbitmap_iter_next, but this is suboptimal for dense maps. */ + assert(a->size == b->size); for (i = HBITMAP_LEVELS - 1; i >= 0; i--) { for (j = 0; j < a->sizes[i]; j++) { result->levels[i][j] = a->levels[i][j] | b->levels[i][j]; |