RE: [PATCH rdma-next 1/1] RDMA/mana_ib: process QP error events

From: Long Li
Date: Wed Jun 05 2024 - 18:43:01 EST


> Subject: [PATCH rdma-next 1/1] RDMA/mana_ib: process QP error events
>
> From: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
>
> Process QP fatal events from the error event queue.
> For that, find the QP, using QPN from the event, and then call its event_handler.
> To find the QPs, store created RC QPs in an xarray.
>
> Signed-off-by: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
> Reviewed-by: Wei Hu <weh@xxxxxxxxxxxxx>
> ---
> drivers/infiniband/hw/mana/device.c | 3 ++
> drivers/infiniband/hw/mana/main.c | 37 ++++++++++++++++++-
> drivers/infiniband/hw/mana/mana_ib.h | 4 ++
> drivers/infiniband/hw/mana/qp.c | 11 ++++++
> .../net/ethernet/microsoft/mana/gdma_main.c | 1 +
> include/net/mana/gdma.h | 1 +
> 6 files changed, 55 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index 9a7da2e..9eb714e 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -126,6 +126,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
> if (ret)
> goto destroy_eqs;
>
> + xa_init_flags(&dev->qp_table_rq, XA_FLAGS_LOCK_IRQ);
> ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
> if (ret) {
> ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
> @@ -143,6 +144,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
> return 0;
>
> destroy_rnic:
> + xa_destroy(&dev->qp_table_rq);
> mana_ib_gd_destroy_rnic_adapter(dev);
> destroy_eqs:
> mana_ib_destroy_eqs(dev);
> @@ -158,6 +160,7 @@ static void mana_ib_remove(struct auxiliary_device
> *adev)
> struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
>
> ib_unregister_device(&dev->ib_dev);
> + xa_destroy(&dev->qp_table_rq);
> mana_ib_gd_destroy_rnic_adapter(dev);
> mana_ib_destroy_eqs(dev);
> mana_gd_deregister_device(dev->gdma_dev);
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 365b4f1..dfcfb88 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -667,6 +667,39 @@ int mana_ib_gd_query_adapter_caps(struct
> mana_ib_dev *dev)
> return 0;
> }
>
> +static void
> +mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct
> +gdma_event *event) {
> + struct mana_ib_dev *mdev = (struct mana_ib_dev *)ctx;
> + struct mana_ib_qp *qp;
> + struct ib_event ev;
> + unsigned long flag;
> + u32 qpn;
> +
> + switch (event->type) {
> + case GDMA_EQE_RNIC_QP_FATAL:
> + qpn = event->details[0];
> + xa_lock_irqsave(&mdev->qp_table_rq, flag);
> + qp = xa_load(&mdev->qp_table_rq, qpn);
> + if (qp)
> + refcount_inc(&qp->refcount);


Move this to after checking for "if (!qp) break".

> + xa_unlock_irqrestore(&mdev->qp_table_rq, flag);
> + if (!qp)
> + break;
> + if (qp->ibqp.event_handler) {
> + ev.device = qp->ibqp.device;
> + ev.element.qp = &qp->ibqp;
> + ev.event = IB_EVENT_QP_FATAL;
> + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
> + }
> + if (refcount_dec_and_test(&qp->refcount))
> + complete(&qp->free);
> + break;
> + default:
> + break;
> + }
> +}
> +
> int mana_ib_create_eqs(struct mana_ib_dev *mdev) {
> struct gdma_context *gc = mdev_to_gc(mdev); @@ -676,7 +709,7 @@
> int mana_ib_create_eqs(struct mana_ib_dev *mdev)
> spec.type = GDMA_EQ;
> spec.monitor_avl_buf = false;
> spec.queue_size = EQ_SIZE;
> - spec.eq.callback = NULL;
> + spec.eq.callback = mana_ib_event_handler;
> spec.eq.context = mdev;
> spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
> spec.eq.msix_index = 0;
> @@ -691,7 +724,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
> err = -ENOMEM;
> goto destroy_fatal_eq;
> }
> -
> + spec.eq.callback = NULL;
> for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
> spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
> err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec,
> &mdev->eqs[i]); diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 60bc548..b732555 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -62,6 +62,7 @@ struct mana_ib_dev {
> mana_handle_t adapter_handle;
> struct gdma_queue *fatal_err_eq;
> struct gdma_queue **eqs;
> + struct xarray qp_table_rq;
> struct mana_ib_adapter_caps adapter_caps; };
>
> @@ -124,6 +125,9 @@ struct mana_ib_qp {
>
> /* The port on the IB device, starting with 1 */
> u32 port;
> +
> + refcount_t refcount;
> + struct completion free;
> };
>
> struct mana_ib_ucontext {
> diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
> index 34a9372..3f4fcc9 100644
> --- a/drivers/infiniband/hw/mana/qp.c
> +++ b/drivers/infiniband/hw/mana/qp.c
> @@ -460,6 +460,12 @@ static int mana_ib_create_rc_qp(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
> }
> }
>
> + refcount_set(&qp->refcount, 1);
> + init_completion(&qp->free);
> + err = xa_insert_irq(&mdev->qp_table_rq, qp->ibqp.qp_num, qp,
> GFP_KERNEL);
> + if (err)
> + goto destroy_qp;
> +
> return 0;
>
> destroy_qp:
> @@ -620,6 +626,11 @@ static int mana_ib_destroy_rc_qp(struct mana_ib_qp
> *qp, struct ib_udata *udata)
> container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
> int i;
>
> + xa_erase_irq(&mdev->qp_table_rq, qp->ibqp.qp_num);
> + if (refcount_dec_and_test(&qp->refcount))
> + complete(&qp->free);
> + wait_for_completion(&qp->free);

Strange logic. Why not do:
if (!refcount_dec_and_test(&qp->refcount))
wait_for_completion(&qp->free);