Re: [net-next PATCH v2 2/4] octeontx2-af: npa: cn20k: Add DPC support
From: Simon Horman
Date: Fri Mar 20 2026 - 13:15:40 EST
This is an AI-generated review of your patch. The human sending this
email has considered the AI review valid, or at least plausible.
For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
---
octeontx2-af: npa: cn20k: Add DPC support
CN20k introduces 32 diagnostic and performance counters that are shared
across all NPA LFs. This patch adds new mbox messages npa_dpc_alloc/free
to allocate and map counters to LFs, and ensures LF to DPC counter
mappings are cleared at LF free/teardown.
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
> index c963f43dc7b0..1def2504872f 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
[ ... ]
> @@ -46,3 +48,117 @@ int rvu_npa_halo_hwctx_disable(struct npa_aq_enq_req *req)
>
> return 0;
> }
> +
> +int npa_cn20k_dpc_alloc(struct rvu *rvu, struct npa_cn20k_dpc_alloc_req *req,
> + struct npa_cn20k_dpc_alloc_rsp *rsp)
> +{
> + struct rvu_hwinfo *hw = rvu->hw;
> + u16 pcifunc = req->hdr.pcifunc;
> + int cntr, lf, blkaddr, ridx;
> + struct rvu_block *block;
> + struct rvu_pfvf *pfvf;
> + u64 val, lfmask;
> +
> + pfvf = rvu_get_pfvf(rvu, pcifunc);
> +
> + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
> + if (!pfvf->npalf || blkaddr < 0)
> + return NPA_AF_ERR_AF_LF_INVALID;
> +
> + block = &hw->block[blkaddr];
> + lf = rvu_get_lf(rvu, block, pcifunc, 0);
> + if (lf < 0)
> + return NPA_AF_ERR_AF_LF_INVALID;
> +
> + /* allocate a new counter */
> + cntr = rvu_alloc_rsrc(&rvu->npa_dpc);
^^^^
Are rvu_alloc_rsrc() and rvu_free_rsrc() calls synchronized here?
These functions use non-atomic find_first_zero_bit() + __set_bit() and
__clear_bit() operations on the bitmap. Looking at other callers like
rvu_mbox_handler_attach_resources() in rvu.c:
int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
{
...
mutex_lock(&rvu->rsrc_lock);
for (q_cnt = 0; q_cnt < attach->npa_msixoff; q_cnt++) {
msix = rvu_alloc_rsrc(&pfvf->msix);
...
}
mutex_unlock(&rvu->rsrc_lock);
...
}
Similarly, rvu_mbox_handler_nix_bandprof_alloc() and
rvu_mbox_handler_mcs_alloc_resources() also acquire rvu->rsrc_lock
before calling rvu_alloc_rsrc(). rvu_mbox_handler_npc_mcam_alloc_counter()
uses mcam->lock.
Since npa_cn20k_dpc_alloc() can be invoked by the mbox handler
rvu_mbox_handler_npa_cn20k_dpc_alloc() concurrently from different
PF/VF workqueue items in __rvu_mbox_handler(), two threads could race:
Thread 1 Thread 2
npa_cn20k_dpc_alloc() npa_cn20k_dpc_alloc()
find_first_zero_bit(npa_dpc) = 0 find_first_zero_bit(npa_dpc) = 0
__set_bit(0, npa_dpc) __set_bit(0, npa_dpc)
return cntr = 0 return cntr = 0
Both threads get counter 0, leading to duplicate allocation. The same
race exists in npa_cn20k_dpc_free() with rvu_free_rsrc().
Should these calls be protected with mutex_lock(&rvu->rsrc_lock) similar
to other resource allocation paths?
> + if (cntr < 0)
> + return cntr;
> + rsp->cntr_id = cntr;
> +
> + /* DPC counter config */
> + rvu_write64(rvu, blkaddr, NPA_AF_DPCX_CFG(cntr), req->dpc_conf);
> +
> + /* 0 to 63 lfs -> idx 0, 64 - 127 lfs -> idx 1 */
> + ridx = lf >> 6;
> + lfmask = BIT_ULL(ridx ? lf - NPA_DPC_LFS_PER_REG : lf);
> +
> + ridx = 2 * cntr + ridx;
> + /* Give permission for LF access */
> + val = rvu_read64(rvu, blkaddr, NPA_AF_DPC_PERMITX(ridx));
> + val |= lfmask;
> + rvu_write64(rvu, blkaddr, NPA_AF_DPC_PERMITX(ridx), val);
> +
> + return 0;
> +}
> +
> +int rvu_mbox_handler_npa_cn20k_dpc_alloc(struct rvu *rvu,
> + struct npa_cn20k_dpc_alloc_req *req,
> + struct npa_cn20k_dpc_alloc_rsp *rsp)
> +{
> + return npa_cn20k_dpc_alloc(rvu, req, rsp);
> +}
> +
> +int npa_cn20k_dpc_free(struct rvu *rvu, struct npa_cn20k_dpc_free_req *req)
> +{
> + struct rvu_hwinfo *hw = rvu->hw;
> + u16 pcifunc = req->hdr.pcifunc;
> + int cntr, lf, blkaddr, ridx;
> + struct rvu_block *block;
> + struct rvu_pfvf *pfvf;
> + u64 val, lfmask;
> +
> + pfvf = rvu_get_pfvf(rvu, pcifunc);
> +
> + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
> + if (!pfvf->npalf || blkaddr < 0)
> + return NPA_AF_ERR_AF_LF_INVALID;
> +
> + block = &hw->block[blkaddr];
> + lf = rvu_get_lf(rvu, block, pcifunc, 0);
> + if (lf < 0)
> + return NPA_AF_ERR_AF_LF_INVALID;
> +
> + if (req->cntr_id >= NPA_DPC_MAX)
> + return NPA_AF_ERR_PARAM;
> +
> + /* 0 to 63 lfs -> idx 0, 64 - 127 lfs -> idx 1 */
> + ridx = lf >> 6;
> + lfmask = BIT_ULL(ridx ? lf - NPA_DPC_LFS_PER_REG : lf);
> + cntr = req->cntr_id;
> +
> + ridx = 2 * cntr + ridx;
> +
> + val = rvu_read64(rvu, blkaddr, NPA_AF_DPC_PERMITX(ridx));
> + /* Check if the counter is allotted to this LF */
> + if (!(val & lfmask))
> + return 0;
> +
> + /* Revert permission */
> + val &= ~lfmask;
> + rvu_write64(rvu, blkaddr, NPA_AF_DPC_PERMITX(ridx), val);
> +
> + /* Free this counter */
> + rvu_free_rsrc(&rvu->npa_dpc, req->cntr_id);
> +
> + return 0;
> +}
[ ... ]