hw/rdma: Replace QList by GQueue

RdmaProtectedQList provides a thread-safe queue of int64_t on top of a
QList.

rdma_protected_qlist_destroy() calls qlist_destroy_obj() directly.
qlist_destroy_obj() is actually for use by qobject_destroy() only.
The next commit will make that obvious.

The minimal fix would be calling qobject_unref() instead.  But QList
is actually a bad fit here.  It's designed for representing JSON
arrays.  We're better off with a GQueue here.  Replace.

Cc: Yuval Shaia <yuval.shaia.ml@gmail.com>
Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20201211171152.146877-8-armbru@redhat.com>
stable-6.0
Markus Armbruster 2020-12-11 18:11:39 +01:00
parent 88e25b1e6d
commit bce800869b
4 changed files with 30 additions and 26 deletions

View File

@ -78,7 +78,7 @@ static void clean_recv_mads(RdmaBackendDev *backend_dev)
unsigned long cqe_ctx_id;
do {
cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->
cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->
recv_mads_list);
if (cqe_ctx_id != -ENOENT) {
qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
@ -597,7 +597,7 @@ static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
bctx->up_ctx = ctx;
bctx->sge = *sge;
rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id);
rdma_protected_gqueue_append_int64(&backend_dev->recv_mads_list, bctx_id);
return 0;
}
@ -1111,7 +1111,7 @@ static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
trace_mad_message("recv", msg->umad.mad, msg->umad_len);
cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list);
cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->recv_mads_list);
if (cqe_ctx_id == -ENOENT) {
rdma_warn_report("No more free MADs buffers, waiting for a while");
sleep(THR_POLL_TO);
@ -1185,7 +1185,7 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
return -EIO;
}
rdma_protected_qlist_init(&backend_dev->recv_mads_list);
rdma_protected_gqueue_init(&backend_dev->recv_mads_list);
enable_rdmacm_mux_async(backend_dev);
@ -1205,7 +1205,7 @@ static void mad_fini(RdmaBackendDev *backend_dev)
{
disable_rdmacm_mux_async(backend_dev);
qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
rdma_protected_qlist_destroy(&backend_dev->recv_mads_list);
rdma_protected_gqueue_destroy(&backend_dev->recv_mads_list);
}
int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,

View File

@ -43,7 +43,7 @@ typedef struct RdmaBackendDev {
struct ibv_context *context;
struct ibv_comp_channel *channel;
uint8_t port_num;
RdmaProtectedQList recv_mads_list;
RdmaProtectedGQueue recv_mads_list;
RdmaCmMux rdmacm_mux;
} RdmaBackendDev;

View File

@ -14,8 +14,6 @@
*/
#include "qemu/osdep.h"
#include "qapi/qmp/qlist.h"
#include "qapi/qmp/qnum.h"
#include "trace.h"
#include "rdma_utils.h"
@ -54,41 +52,46 @@ void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
}
}
void rdma_protected_qlist_init(RdmaProtectedQList *list)
void rdma_protected_gqueue_init(RdmaProtectedGQueue *list)
{
qemu_mutex_init(&list->lock);
list->list = qlist_new();
list->list = g_queue_new();
}
void rdma_protected_qlist_destroy(RdmaProtectedQList *list)
void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list)
{
if (list->list) {
qlist_destroy_obj(QOBJECT(list->list));
g_queue_free_full(list->list, g_free);
qemu_mutex_destroy(&list->lock);
list->list = NULL;
}
}
void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value)
void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list,
int64_t value)
{
qemu_mutex_lock(&list->lock);
qlist_append_int(list->list, value);
g_queue_push_tail(list->list, g_memdup(&value, sizeof(value)));
qemu_mutex_unlock(&list->lock);
}
int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list)
int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list)
{
QObject *obj;
int64_t *valp;
int64_t val;
qemu_mutex_lock(&list->lock);
obj = qlist_pop(list->list);
valp = g_queue_pop_head(list->list);
qemu_mutex_unlock(&list->lock);
if (!obj) {
if (!valp) {
return -ENOENT;
}
return qnum_get_uint(qobject_to(QNum, obj));
val = *valp;
g_free(valp);
return val;
}
void rdma_protected_gslist_init(RdmaProtectedGSList *list)

View File

@ -28,10 +28,10 @@
#define rdma_info_report(fmt, ...) \
info_report("%s: " fmt, "rdma", ## __VA_ARGS__)
typedef struct RdmaProtectedQList {
typedef struct RdmaProtectedGQueue {
QemuMutex lock;
QList *list;
} RdmaProtectedQList;
GQueue *list;
} RdmaProtectedGQueue;
typedef struct RdmaProtectedGSList {
QemuMutex lock;
@ -40,10 +40,11 @@ typedef struct RdmaProtectedGSList {
void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen);
void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len);
void rdma_protected_qlist_init(RdmaProtectedQList *list);
void rdma_protected_qlist_destroy(RdmaProtectedQList *list);
void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value);
int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list);
void rdma_protected_gqueue_init(RdmaProtectedGQueue *list);
void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list);
void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list,
int64_t value);
int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list);
void rdma_protected_gslist_init(RdmaProtectedGSList *list);
void rdma_protected_gslist_destroy(RdmaProtectedGSList *list);
void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list,