summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--aio-posix.c3
-rw-r--r--hw/intc/s390_flic_kvm.c2
-rw-r--r--include/qemu/hbitmap.h13
-rw-r--r--tests/test-hbitmap.c11
-rw-r--r--util/hbitmap.c22
5 files changed, 46 insertions, 5 deletions
diff --git a/aio-posix.c b/aio-posix.c
index 9453d83743..a8d7090bd8 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -508,7 +508,8 @@ static bool run_poll_handlers_once(AioContext *ctx)
 
     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
         if (!node->deleted && node->io_poll &&
-                node->io_poll(node->opaque)) {
+            aio_node_check(ctx, node->is_external) &&
+            node->io_poll(node->opaque)) {
             progress = true;
         }
 
diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c
index da8e4dfab6..e86a84e49a 100644
--- a/hw/intc/s390_flic_kvm.c
+++ b/hw/intc/s390_flic_kvm.c
@@ -303,7 +303,7 @@ static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size,
          * migration state */
         error_report("flic: couldn't allocate memory");
         qemu_put_be64(f, FLIC_FAILED);
-        return;
+        return 0;
     }
 
     count = __get_all_irqs(flic, &buf, len);
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index eb464759d5..9239fe515e 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -146,6 +146,19 @@ void hbitmap_reset_all(HBitmap *hb);
 bool hbitmap_get(const HBitmap *hb, uint64_t item);
 
 /**
+ * hbitmap_is_serializable:
+ * @hb: HBitmap which should be (de-)serialized.
+ *
+ * Returns whether the bitmap can actually be (de-)serialized. Other
+ * (de-)serialization functions may only be invoked if this function returns
+ * true.
+ *
+ * Calling (de-)serialization functions does not affect a bitmap's
+ * (de-)serializability.
+ */
+bool hbitmap_is_serializable(const HBitmap *hb);
+
+/**
  * hbitmap_serialization_granularity:
  * @hb: HBitmap to operate on.
  *
diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c
index 9b7495cc32..23773d2051 100644
--- a/tests/test-hbitmap.c
+++ b/tests/test-hbitmap.c
@@ -744,6 +744,8 @@ static void test_hbitmap_serialize_granularity(TestHBitmapData *data,
     int r;
 
     hbitmap_test_init(data, L3 * 2, 3);
+    g_assert(hbitmap_is_serializable(data->hb));
+
     r = hbitmap_serialization_granularity(data->hb);
     g_assert_cmpint(r, ==, 64 << 3);
 }
@@ -768,6 +770,8 @@ static void hbitmap_test_serialize_range(TestHBitmapData *data,
     if (count) {
         hbitmap_set(data->hb, pos, count);
     }
+
+    g_assert(hbitmap_is_serializable(data->hb));
     hbitmap_serialize_part(data->hb, buf, 0, data->size);
 
     /* Serialized buffer is inherently LE, convert it back manually to test */
@@ -788,6 +792,8 @@ static void hbitmap_test_serialize_range(TestHBitmapData *data,
     memset(buf, 0, buf_size);
     hbitmap_serialize_part(data->hb, buf, 0, data->size);
     hbitmap_reset_all(data->hb);
+
+    g_assert(hbitmap_is_serializable(data->hb));
     hbitmap_deserialize_part(data->hb, buf, 0, data->size, true);
 
     for (i = 0; i < data->size; i++) {
@@ -810,6 +816,7 @@ static void test_hbitmap_serialize_basic(TestHBitmapData *data,
     int num_positions = sizeof(positions) / sizeof(positions[0]);
 
     hbitmap_test_init(data, L3, 0);
+    g_assert(hbitmap_is_serializable(data->hb));
     buf_size = hbitmap_serialization_size(data->hb, 0, data->size);
     buf = g_malloc0(buf_size);
 
@@ -841,6 +848,8 @@ static void test_hbitmap_serialize_part(TestHBitmapData *data,
         hbitmap_set(data->hb, positions[i], 1);
     }
 
+    g_assert(hbitmap_is_serializable(data->hb));
+
     for (i = 0; i < data->size; i += buf_size) {
         unsigned long *el = (unsigned long *)buf;
         hbitmap_serialize_part(data->hb, buf, i, buf_size);
@@ -879,6 +888,8 @@ static void test_hbitmap_serialize_zeroes(TestHBitmapData *data,
         hbitmap_set(data->hb, positions[i], L1);
     }
 
+    g_assert(hbitmap_is_serializable(data->hb));
+
     for (i = 0; i < num_positions; i++) {
         hbitmap_deserialize_zeroes(data->hb, positions[i], min_l1, true);
         hbitmap_iter_init(&iter, data->hb, 0);
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 9f691b76bd..35088e19c4 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -387,6 +387,24 @@ void hbitmap_reset_all(HBitmap *hb)
     hb->count = 0;
 }
 
+bool hbitmap_is_serializable(const HBitmap *hb)
+{
+    /* Every serialized chunk must be aligned to 64 bits so that endianness
+     * requirements can be fulfilled on both 64 bit and 32 bit hosts.
+     * We have hbitmap_serialization_granularity() which converts this
+     * alignment requirement from bitmap bits to items covered (e.g. sectors).
+     * That value is:
+     *    64 << hb->granularity
+     * Since this value must not exceed UINT64_MAX, hb->granularity must be
+     * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
+     *
+     * In order for hbitmap_serialization_granularity() to always return a
+     * meaningful value, bitmaps that are to be serialized must have a
+     * granularity of less than 58. */
+
+    return hb->granularity < 58;
+}
+
 bool hbitmap_get(const HBitmap *hb, uint64_t item)
 {
     /* Compute position and bit in the last layer.  */
@@ -399,9 +417,7 @@ bool hbitmap_get(const HBitmap *hb, uint64_t item)
 
 uint64_t hbitmap_serialization_granularity(const HBitmap *hb)
 {
-    /* Must hold true so that the shift below is defined
-     * (ld(64) == 6, i.e. 1 << 6 == 64) */
-    assert(hb->granularity < 64 - 6);
+    assert(hbitmap_is_serializable(hb));
 
     /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
      * hosts. */