RE: [PATCH rdma-next v2 1/1] RDMA/mana_ib: memory windows
From: Long Li
Date: Wed Mar 25 2026 - 13:27:57 EST
> From: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
>
> Implement .alloc_mw() and .dealloc_mw() for mana device.
>
> Signed-off-by: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
Reviewed-by: Long Li <longli@xxxxxxxxxxxxx>
> ---
> v2: fixed comments. Cleaned up the use of mana_gd_send_request()
> drivers/infiniband/hw/mana/device.c | 3 ++
> drivers/infiniband/hw/mana/mana_ib.h | 8 +++++
> drivers/infiniband/hw/mana/mr.c | 53
> +++++++++++++++++++++++++++-
> include/net/mana/gdma.h | 5 +++
> 4 files changed, 68 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index ccc2279ca..9811570ab 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -17,6 +17,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
> .uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
>
> .add_gid = mana_ib_gd_add_gid,
> + .alloc_mw = mana_ib_alloc_mw,
> .alloc_pd = mana_ib_alloc_pd,
> .alloc_ucontext = mana_ib_alloc_ucontext,
> .create_ah = mana_ib_create_ah,
> @@ -24,6 +25,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
> .create_qp = mana_ib_create_qp,
> .create_rwq_ind_table = mana_ib_create_rwq_ind_table,
> .create_wq = mana_ib_create_wq,
> + .dealloc_mw = mana_ib_dealloc_mw,
> .dealloc_pd = mana_ib_dealloc_pd,
> .dealloc_ucontext = mana_ib_dealloc_ucontext,
> .del_gid = mana_ib_gd_del_gid,
> @@ -53,6 +55,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
>
> INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
> INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq),
> + INIT_RDMA_OBJ_SIZE(ib_mw, mana_ib_mw, ibmw),
> INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd),
> INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp),
> INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext),
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index a7c8c0fd7..c9c94e86a 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -125,6 +125,11 @@ struct mana_ib_ah {
> dma_addr_t dma_handle;
> };
>
> +struct mana_ib_mw {
> + struct ib_mw ibmw;
> + mana_handle_t mw_handle;
> +};
> +
> struct mana_ib_mr {
> struct ib_mr ibmr;
> struct ib_umem *umem;
> @@ -736,6 +741,9 @@ void mana_drain_gsi_sqs(struct mana_ib_dev
> *mdev); int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc
> *wc); int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
>
> +int mana_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); int
> +mana_ib_dealloc_mw(struct ib_mw *mw);
> +
> struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
> u64 length,
> u64 iova, int fd, int mr_access_flags,
> struct ib_dmah *dmah,
> diff --git a/drivers/infiniband/hw/mana/mr.c
> b/drivers/infiniband/hw/mana/mr.c index 9613b225d..02236488f 100644
> --- a/drivers/infiniband/hw/mana/mr.c
> +++ b/drivers/infiniband/hw/mana/mr.c
> @@ -6,7 +6,7 @@
> #include "mana_ib.h"
>
> #define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE |
> IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
> - IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
> + IB_ACCESS_REMOTE_ATOMIC |
> IB_ACCESS_MW_BIND | IB_ZERO_BASED)
>
> #define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
>
> @@ -27,6 +27,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
> if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
> flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
>
> + if (access_flags & IB_ACCESS_MW_BIND)
> + flags |= GDMA_ACCESS_FLAG_BIND_MW;
> +
> return flags;
> }
>
> @@ -304,6 +307,54 @@ struct ib_mr *mana_ib_get_dma_mr(struct ib_pd
> *ibpd, int access_flags)
> return ERR_PTR(err);
> }
>
> +static int mana_ib_gd_create_mw(struct mana_ib_dev *dev, struct
> +mana_ib_pd *pd, struct ib_mw *ibmw) {
> + struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw,
> ibmw);
> + struct gdma_context *gc = mdev_to_gc(dev);
> + struct gdma_create_mr_response resp = {};
> + struct gdma_create_mr_request req = {};
> + int err;
> +
> + mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
> sizeof(resp));
> + req.pd_handle = pd->pd_handle;
> +
> + switch (mw->ibmw.type) {
> + case IB_MW_TYPE_1:
> + req.mr_type = GDMA_MR_TYPE_MW1;
> + break;
> + case IB_MW_TYPE_2:
> + req.mr_type = GDMA_MR_TYPE_MW2;
> + break;
> + default:
> + return -EINVAL;
> + }
> +
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
> &resp);
> + if (err)
> + return err;
> +
> + mw->ibmw.rkey = resp.rkey;
> + mw->mw_handle = resp.mr_handle;
> +
> + return 0;
> +}
> +
> +int mana_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) {
> + struct mana_ib_dev *mdev = container_of(ibmw->device, struct
> mana_ib_dev, ib_dev);
> + struct mana_ib_pd *pd = container_of(ibmw->pd, struct mana_ib_pd,
> +ibpd);
> +
> + return mana_ib_gd_create_mw(mdev, pd, ibmw); }
> +
> +int mana_ib_dealloc_mw(struct ib_mw *ibmw) {
> + struct mana_ib_dev *dev = container_of(ibmw->device, struct
> mana_ib_dev, ib_dev);
> + struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw,
> ibmw);
> +
> + return mana_ib_gd_destroy_mr(dev, mw->mw_handle); }
> +
> int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) {
> struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr,
> ibmr); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
> index 766f4fb25..fc6468ac7 100644
> --- a/include/net/mana/gdma.h
> +++ b/include/net/mana/gdma.h
> @@ -778,6 +778,7 @@ enum gdma_mr_access_flags {
> GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
> GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
> GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
> + GDMA_ACCESS_FLAG_BIND_MW = BIT_ULL(5),
> };
>
> /* GDMA_CREATE_DMA_REGION */
> @@ -870,6 +871,10 @@ enum gdma_mr_type {
> GDMA_MR_TYPE_ZBVA = 4,
> /* Device address MRs */
> GDMA_MR_TYPE_DM = 5,
> + /* Memory Window type 1 */
> + GDMA_MR_TYPE_MW1 = 6,
> + /* Memory Window type 2 */
> + GDMA_MR_TYPE_MW2 = 7,
> };
>
> struct gdma_create_mr_params {
> --
> 2.43.0