RE: [PATCH rdma-next 05/13] RDMA/mana_ib: Create and destroy UD/GSI QP
From: Long Li
Date: Thu Jan 23 2025 - 00:41:00 EST
> Subject: [PATCH rdma-next 05/13] RDMA/mana_ib: Create and destroy UD/GSI
> QP
>
> From: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
>
> Implement HW requests to create and destroy UD/GSI QPs.
> An UD/GSI QP has send and receive queues.
>
> Signed-off-by: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
> Reviewed-by: Shiraz Saleem <shirazsaleem@xxxxxxxxxxxxx>
Reviewed-by: Long Li <longli@xxxxxxxxxxxxx>
> ---
> drivers/infiniband/hw/mana/main.c | 58 ++++++++++++++++++++++++++++
> drivers/infiniband/hw/mana/mana_ib.h | 49 +++++++++++++++++++++++
> 2 files changed, 107 insertions(+)
>
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index f2f6bb3..b0c55cb 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -1013,3 +1013,61 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev
> *mdev, struct mana_ib_qp *qp)
> }
> return 0;
> }
> +
> +int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp
> *qp,
> + struct ib_qp_init_attr *attr, u32 doorbell, u32 type) {
> + struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct
> mana_ib_cq, ibcq);
> + struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct
> mana_ib_cq, ibcq);
> + struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd,
> ibpd);
> + struct gdma_context *gc = mdev_to_gc(mdev);
> + struct mana_rnic_create_udqp_resp resp = {};
> + struct mana_rnic_create_udqp_req req = {};
> + int err, i;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req),
> sizeof(resp));
> + req.hdr.dev_id = gc->mana_ib.dev_id;
> + req.adapter = mdev->adapter_handle;
> + req.pd_handle = pd->pd_handle;
> + req.send_cq_handle = send_cq->cq_handle;
> + req.recv_cq_handle = recv_cq->cq_handle;
> + for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++)
> + req.dma_region[i] = qp->ud_qp.queues[i].gdma_region;
> + req.doorbell_page = doorbell;
> + req.max_send_wr = attr->cap.max_send_wr;
> + req.max_recv_wr = attr->cap.max_recv_wr;
> + req.max_send_sge = attr->cap.max_send_sge;
> + req.max_recv_sge = attr->cap.max_recv_sge;
> + req.qp_type = type;
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
> + if (err) {
> + ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
> + return err;
> + }
> + qp->qp_handle = resp.qp_handle;
> + for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
> + qp->ud_qp.queues[i].id = resp.queue_ids[i];
> + /* The GDMA regions are now owned by the RNIC QP handle */
> + qp->ud_qp.queues[i].gdma_region =
> GDMA_INVALID_DMA_REGION;
> + }
> + return 0;
> +}
> +
> +int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct
> +mana_ib_qp *qp) {
> + struct mana_rnic_destroy_udqp_resp resp = {0};
> + struct mana_rnic_destroy_udqp_req req = {0};
> + struct gdma_context *gc = mdev_to_gc(mdev);
> + int err;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP,
> sizeof(req), sizeof(resp));
> + req.hdr.dev_id = gc->mana_ib.dev_id;
> + req.adapter = mdev->adapter_handle;
> + req.qp_handle = qp->qp_handle;
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
> + if (err) {
> + ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
> + return err;
> + }
> + return 0;
> +}
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 79ebd95..5e470f1 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -115,6 +115,17 @@ struct mana_ib_rc_qp {
> struct mana_ib_queue queues[MANA_RC_QUEUE_TYPE_MAX]; };
>
> +enum mana_ud_queue_type {
> + MANA_UD_SEND_QUEUE = 0,
> + MANA_UD_RECV_QUEUE,
> + MANA_UD_QUEUE_TYPE_MAX,
> +};
> +
> +struct mana_ib_ud_qp {
> + struct mana_ib_queue queues[MANA_UD_QUEUE_TYPE_MAX];
> + u32 sq_psn;
> +};
> +
> struct mana_ib_qp {
> struct ib_qp ibqp;
>
> @@ -122,6 +133,7 @@ struct mana_ib_qp {
> union {
> struct mana_ib_queue raw_sq;
> struct mana_ib_rc_qp rc_qp;
> + struct mana_ib_ud_qp ud_qp;
> };
>
> /* The port on the IB device, starting with 1 */ @@ -146,6 +158,8 @@
> enum mana_ib_command_code {
> MANA_IB_DESTROY_ADAPTER = 0x30003,
> MANA_IB_CONFIG_IP_ADDR = 0x30004,
> MANA_IB_CONFIG_MAC_ADDR = 0x30005,
> + MANA_IB_CREATE_UD_QP = 0x30006,
> + MANA_IB_DESTROY_UD_QP = 0x30007,
> MANA_IB_CREATE_CQ = 0x30008,
> MANA_IB_DESTROY_CQ = 0x30009,
> MANA_IB_CREATE_RC_QP = 0x3000a,
> @@ -297,6 +311,37 @@ struct mana_rnic_destroy_rc_qp_resp {
> struct gdma_resp_hdr hdr;
> }; /* HW Data */
>
> +struct mana_rnic_create_udqp_req {
> + struct gdma_req_hdr hdr;
> + mana_handle_t adapter;
> + mana_handle_t pd_handle;
> + mana_handle_t send_cq_handle;
> + mana_handle_t recv_cq_handle;
> + u64 dma_region[MANA_UD_QUEUE_TYPE_MAX];
> + u32 qp_type;
> + u32 doorbell_page;
> + u32 max_send_wr;
> + u32 max_recv_wr;
> + u32 max_send_sge;
> + u32 max_recv_sge;
> +}; /* HW Data */
> +
> +struct mana_rnic_create_udqp_resp {
> + struct gdma_resp_hdr hdr;
> + mana_handle_t qp_handle;
> + u32 queue_ids[MANA_UD_QUEUE_TYPE_MAX];
> +}; /* HW Data*/
> +
> +struct mana_rnic_destroy_udqp_req {
> + struct gdma_req_hdr hdr;
> + mana_handle_t adapter;
> + mana_handle_t qp_handle;
> +}; /* HW Data */
> +
> +struct mana_rnic_destroy_udqp_resp {
> + struct gdma_resp_hdr hdr;
> +}; /* HW Data */
> +
> struct mana_ib_ah_attr {
> u8 src_addr[16];
> u8 dest_addr[16];
> @@ -483,4 +528,8 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev,
> struct mana_ib_cq *cq); int mana_ib_gd_create_rc_qp(struct mana_ib_dev
> *mdev, struct mana_ib_qp *qp,
> struct ib_qp_init_attr *attr, u32 doorbell, u64 flags);
> int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp
> *qp);
> +
> +int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp
> *qp,
> + struct ib_qp_init_attr *attr, u32 doorbell, u32 type); int
> +mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp
> +*qp);
> #endif
> --
> 2.43.0