RE: Re: Re: [PATCH net-next] octeontx2-pf: Add support for page pool

From: Ratheesh Kannoth
Date: Tue May 16 2023 - 05:23:53 EST



> -----Original Message-----
> From: Paolo Abeni <pabeni@xxxxxxxxxx>
> Sent: Tuesday, May 16, 2023 2:21 PM
> To: Ratheesh Kannoth <rkannoth@xxxxxxxxxxx>; Yunsheng Lin
> <linyunsheng@xxxxxxxxxx>; netdev@xxxxxxxxxxxxxxx; linux-
> kernel@xxxxxxxxxxxxxxx
> Cc: Sunil Kovvuri Goutham <sgoutham@xxxxxxxxxxx>;
> davem@xxxxxxxxxxxxx; edumazet@xxxxxxxxxx; kuba@xxxxxxxxxx
> Subject: Re: Re: [PATCH net-next] octeontx2-pf: Add support for page
> pool
> >
> > > ....
> > > > @@ -1170,15 +1199,24 @@ void otx2_free_aura_ptr(struct otx2_nic
> > > > *pfvf,
> > > int type)
> > > >   /* Free SQB and RQB pointers from the aura pool */
> > > >   for (pool_id = pool_start; pool_id < pool_end;
> > > > pool_id++) {
> > > >   iova = otx2_aura_allocptr(pfvf, pool_id);
> > > > + pool = &pfvf->qset.pool[pool_id];
> > > >   while (iova) {
> > > >   if (type == AURA_NIX_RQ)
> > > >   iova -= OTX2_HEAD_ROOM;
> > > >
> > > >   pa = otx2_iova_to_phys(pfvf-
> > > > >iommu_domain,
> > > iova);
> > > > - dma_unmap_page_attrs(pfvf->dev, iova,
> > > > size,
> > > > - DMA_FROM_DEVICE,
> > > > -
> > > > DMA_ATTR_SKIP_CPU_SYNC);
> > > > -
> > > > put_page(virt_to_page(phys_to_virt(pa)));
> > > > + page = virt_to_page(phys_to_virt(pa));
> > >
> > > virt_to_page() seems ok for order-0 page allocated from page pool as
> > > it does now, but it may break for order-1+ page as
> > > page_pool_put_page() expects head page of compound page or base
> > > page.
> > > Maybe add a comment for that or use virt_to_head_page() explicitly.
> > Thanks !!.
> > >
> > > > +
> > > > + if (pool->page_pool) {
> > > > + page_pool_put_page(pool-
> > > > >page_pool,
> > > page, size, true);
> > >
> > > page_pool_put_full_page() seems more appropriate here, as the
> > > PP_FLAG_DMA_SYNC_DEV flag is not set, even if it is set, it seems
> > > the whole page need to be synced instead of a frag.
> > Agree.
> > >
> > >
> > > > + } else {
> > > > + dma_unmap_page_attrs(pfvf->dev,
> > > > iova,
> > > size,
> > > > +
> > > > DMA_FROM_DEVICE,
> > > > +
> > > DMA_ATTR_SKIP_CPU_SYNC);
> > > > +
> > > > + put_page(page);
> > > > + }
> > > > +
> > > >   iova = otx2_aura_allocptr(pfvf, pool_id);
> > > >   }
> > > >   }
> > > > @@ -1196,6 +1234,8 @@ void otx2_aura_pool_free(struct otx2_nic
> > > > *pfvf)
> > > >   pool = &pfvf->qset.pool[pool_id];
> > > >   qmem_free(pfvf->dev, pool->stack);
> > > >   qmem_free(pfvf->dev, pool->fc_addr);
> > > > + page_pool_destroy(pool->page_pool);
> > > > + pool->page_pool = NULL;
> > > >   }
> > > >   devm_kfree(pfvf->dev, pfvf->qset.pool);
> > > >   pfvf->qset.pool = NULL;
> > > > @@ -1279,8 +1319,10 @@ static int otx2_aura_init(struct otx2_nic
> > > > *pfvf, int aura_id, }
> > > >
> > > >  static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
> > > > - int stack_pages, int numptrs, int
> > > > buf_size)
> > > > + int stack_pages, int numptrs, int
> > > > buf_size,
> > > > + int type)
> > > >  {
> > > > + struct page_pool_params pp_params = { 0 };
> > > >   struct npa_aq_enq_req *aq;
> > > >   struct otx2_pool *pool;
> > > >   int err;
> > > > @@ -1324,6 +1366,22 @@ static int otx2_pool_init(struct otx2_nic
> > > > *pfvf,
> > > u16 pool_id,
> > > >   aq->ctype = NPA_AQ_CTYPE_POOL;
> > > >   aq->op = NPA_AQ_INSTOP_INIT;
> > > >
> > > > + if (type != AURA_NIX_RQ) {
> > > > + pool->page_pool = NULL;
> > > > + return 0;
> > > > + }
> > > > +
> > > > + pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
> > > > + pp_params.pool_size = numptrs;
> > > > + pp_params.nid = NUMA_NO_NODE;
> > > > + pp_params.dev = pfvf->dev;
> > > > + pp_params.dma_dir = DMA_FROM_DEVICE;
> > > > + pool->page_pool = page_pool_create(&pp_params);
> > > > + if (!pool->page_pool) {
> > > > + netdev_err(pfvf->netdev, "Creation of page pool
> > > > failed\n");
> > > > + return -EFAULT;
> > > > + }
> > > > +
> > > >   return 0;
> > > >  }
> > > >
> > > > @@ -1358,7 +1416,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic
> > > > *pfvf)
> > > >
> > > >   /* Initialize pool context */
> > > >   err = otx2_pool_init(pfvf, pool_id, stack_pages,
> > > > - num_sqbs, hw->sqb_size);
> > > > + num_sqbs, hw->sqb_size,
> > > > AURA_NIX_SQ);
> > > >   if (err)
> > > >   goto fail;
> > > >   }
> > > > @@ -1421,7 +1479,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic
> > > > *pfvf)
> > > >   }
> > > >   for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
> > > >   err = otx2_pool_init(pfvf, pool_id, stack_pages,
> > > > - num_ptrs, pfvf->rbsize);
> > > > + num_ptrs, pfvf->rbsize,
> > > > AURA_NIX_RQ);
> > > >   if (err)
> > > >   goto fail;
> > > >   }
> > >
> > > ...
> > >
> > > > diff --git
> > > > a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > > > b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > > > index 7045fedfd73a..df5f45aa6980 100644
> > > > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > > > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
> > > > @@ -217,9 +217,10 @@ static bool otx2_skb_add_frag(struct otx2_nic
> > > *pfvf, struct sk_buff *skb,
> > > >   skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
> > > >   va - page_address(page) + off,
> > > >   len - off, pfvf->rbsize);
> > > > -
> > > > +#ifndef CONFIG_PAGE_POOL
> > >
> > > Most driver does 'select PAGE_POOL' in config when adding page pool
> > > support, is there any reason it does not do it here?
> > We thought about it. User should be able to use the driver without
> > PAGE_POOL support.
>
> Uhm... the above looks like a questionable choice, as page pull is a small infra,
> and the performance delta should be relevant.
>
Okay. Will select PAGE_POOL in config.

> Anyway if you really want to use such strategy, please be consistent and
> guard any relevant chunck of code with compiler guards. Likely it would be
> better providing dummy helpers for the few page_pool functions still missing
> them when !CONFIG_PAGE_POOL

As page pool brings in performance improvement and is selected by default. I am thinking, not to implement dummy functions.

>
> >
> Cheers,
>
> Paolo