summary refs log tree commit diff stats
path: root/util/hbitmap.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-01-17 12:48:42 +0000
committerPeter Maydell <peter.maydell@linaro.org>2019-01-17 12:48:42 +0000
commit681d61362d3f766a00806b89d6581869041f73cb (patch)
tree605b3b579713a778c1c3a09c5f3d36c0ebe0b075 /util/hbitmap.c
parent6f2f34177a25bffd6fd92a05e6e66c8d22d97094 (diff)
parent19c021e1948a81c4ba19b3ff735432b45b6aebee (diff)
downloadfocaccia-qemu-681d61362d3f766a00806b89d6581869041f73cb.tar.gz
focaccia-qemu-681d61362d3f766a00806b89d6581869041f73cb.zip
Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging
Pull request

# gpg: Signature made Wed 16 Jan 2019 01:00:25 GMT
# gpg:                using RSA key 7DEF8106AAFC390E
# gpg: Good signature from "John Snow (John Huston) <jsnow@redhat.com>"
# Primary key fingerprint: FAEB 9711 A12C F475 812F  18F2 88A9 064D 1835 61EB
#      Subkey fingerprint: F9B7 ABDB BCAC DF95 BE76  CBD0 7DEF 8106 AAFC 390E

* remotes/jnsnow/tags/bitmaps-pull-request:
  Revert "hbitmap: Add @advance param to hbitmap_iter_next()"
  Revert "test-hbitmap: Add non-advancing iter_next tests"
  Revert "block/dirty-bitmap: Add bdrv_dirty_iter_next_area"
  block/mirror: fix and improve do_sync_target_write
  tests: add tests for hbitmap_next_dirty_area
  dirty-bitmap: add bdrv_dirty_bitmap_next_dirty_area
  tests: add tests for hbitmap_next_zero with specified end parameter
  dirty-bitmap: improve bdrv_dirty_bitmap_next_zero

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util/hbitmap.c')
-rw-r--r--util/hbitmap.c76
1 files changed, 64 insertions, 12 deletions
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 8d402c59d9..7905212a8b 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -53,6 +53,9 @@
  */
 
 struct HBitmap {
+    /* Size of the bitmap, as requested in hbitmap_alloc. */
+    uint64_t orig_size;
+
     /* Number of total bits in the bottom level.  */
     uint64_t size;
 
@@ -141,7 +144,7 @@ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)
     return cur;
 }
 
-int64_t hbitmap_iter_next(HBitmapIter *hbi, bool advance)
+int64_t hbitmap_iter_next(HBitmapIter *hbi)
 {
     unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &
             hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];
@@ -154,12 +157,8 @@ int64_t hbitmap_iter_next(HBitmapIter *hbi, bool advance)
         }
     }
 
-    if (advance) {
-        /* The next call will resume work from the next bit.  */
-        hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
-    } else {
-        hbi->cur[HBITMAP_LEVELS - 1] = cur;
-    }
+    /* The next call will resume work from the next bit.  */
+    hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
     item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);
 
     return item << hbi->granularity;
@@ -192,16 +191,28 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
     }
 }
 
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
 {
     size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
     unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
-    uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
     unsigned long cur = last_lev[pos];
-    unsigned start_bit_offset =
-            (start >> hb->granularity) & (BITS_PER_LONG - 1);
+    unsigned start_bit_offset;
+    uint64_t end_bit, sz;
     int64_t res;
 
+    if (start >= hb->orig_size || count == 0) {
+        return -1;
+    }
+
+    end_bit = count > hb->orig_size - start ?
+                hb->size :
+                ((start + count - 1) >> hb->granularity) + 1;
+    sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
+
+    /* There may be some zero bits in @cur before @start. We are not interested
+     * in them, let's set them.
+     */
+    start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1);
     cur |= (1UL << start_bit_offset) - 1;
     assert((start >> hb->granularity) < hb->size);
 
@@ -218,7 +229,7 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
     }
 
     res = (pos << BITS_PER_LEVEL) + ctol(cur);
-    if (res >= hb->size) {
+    if (res >= end_bit) {
         return -1;
     }
 
@@ -231,6 +242,45 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
     return res;
 }
 
+bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
+                             uint64_t *count)
+{
+    HBitmapIter hbi;
+    int64_t firt_dirty_off, area_end;
+    uint32_t granularity = 1UL << hb->granularity;
+    uint64_t end;
+
+    if (*start >= hb->orig_size || *count == 0) {
+        return false;
+    }
+
+    end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count;
+
+    hbitmap_iter_init(&hbi, hb, *start);
+    firt_dirty_off = hbitmap_iter_next(&hbi);
+
+    if (firt_dirty_off < 0 || firt_dirty_off >= end) {
+        return false;
+    }
+
+    if (firt_dirty_off + granularity >= end) {
+        area_end = end;
+    } else {
+        area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity,
+                                     end - firt_dirty_off - granularity);
+        if (area_end < 0) {
+            area_end = end;
+        }
+    }
+
+    if (firt_dirty_off > *start) {
+        *start = firt_dirty_off;
+    }
+    *count = area_end - *start;
+
+    return true;
+}
+
 bool hbitmap_empty(const HBitmap *hb)
 {
     return hb->count == 0;
@@ -652,6 +702,8 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity)
     HBitmap *hb = g_new0(struct HBitmap, 1);
     unsigned i;
 
+    hb->orig_size = size;
+
     assert(granularity >= 0 && granularity < 64);
     size = (size + (1ULL << granularity) - 1) >> granularity;
     assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));