summary refs log tree commit diff stats
path: root/hw/rdma/rdma_backend.c
diff options
context:
space:
mode:
authorMarkus Armbruster <armbru@redhat.com>2020-12-11 18:11:39 +0100
committerMarkus Armbruster <armbru@redhat.com>2020-12-19 10:38:43 +0100
commitbce800869b44ce82705205147018bffbbc798834 (patch)
tree8bbb2029f5cf60bc3019650ab9a30508674e85b8 /hw/rdma/rdma_backend.c
parent88e25b1e6d8a0e3672ba8d5bae5c1df768c35bc8 (diff)
downloadfocaccia-qemu-bce800869b44ce82705205147018bffbbc798834.tar.gz
focaccia-qemu-bce800869b44ce82705205147018bffbbc798834.zip
hw/rdma: Replace QList by GQueue
RdmaProtectedQList provides a thread-safe queue of int64_t on top of a
QList.

rdma_protected_qlist_destroy() calls qlist_destroy_obj() directly.
qlist_destroy_obj() is actually for use by qobject_destroy() only.
The next commit will make that obvious.

The minimal fix would be calling qobject_unref() instead.  But QList
is actually a bad fit here.  It's designed for representing JSON
arrays.  We're better off with a GQueue here.  Replace.

Cc: Yuval Shaia <yuval.shaia.ml@gmail.com>
Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20201211171152.146877-8-armbru@redhat.com>
Diffstat (limited to 'hw/rdma/rdma_backend.c')
-rw-r--r--hw/rdma/rdma_backend.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index 5de010b1fa..6dcdfbbbe2 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -78,7 +78,7 @@ static void clean_recv_mads(RdmaBackendDev *backend_dev)
     unsigned long cqe_ctx_id;
 
     do {
-        cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->
+        cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->
                                                     recv_mads_list);
         if (cqe_ctx_id != -ENOENT) {
             qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
@@ -597,7 +597,7 @@ static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
     bctx->up_ctx = ctx;
     bctx->sge = *sge;
 
-    rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id);
+    rdma_protected_gqueue_append_int64(&backend_dev->recv_mads_list, bctx_id);
 
     return 0;
 }
@@ -1111,7 +1111,7 @@ static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
 
     trace_mad_message("recv", msg->umad.mad, msg->umad_len);
 
-    cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list);
+    cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->recv_mads_list);
     if (cqe_ctx_id == -ENOENT) {
         rdma_warn_report("No more free MADs buffers, waiting for a while");
         sleep(THR_POLL_TO);
@@ -1185,7 +1185,7 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
         return -EIO;
     }
 
-    rdma_protected_qlist_init(&backend_dev->recv_mads_list);
+    rdma_protected_gqueue_init(&backend_dev->recv_mads_list);
 
     enable_rdmacm_mux_async(backend_dev);
 
@@ -1205,7 +1205,7 @@ static void mad_fini(RdmaBackendDev *backend_dev)
 {
     disable_rdmacm_mux_async(backend_dev);
     qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
-    rdma_protected_qlist_destroy(&backend_dev->recv_mads_list);
+    rdma_protected_gqueue_destroy(&backend_dev->recv_mads_list);
 }
 
 int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,