RE: [PATCH v2 net-next 13/14] net: dsa: netc: initialize buffer bool table and implement flow-control
From: Wei Fang
Date: Tue Mar 24 2026 - 21:59:00 EST
> > +static u32 netc_get_buffer_pool_num(struct netc_switch *priv)
> > +{
> > + return netc_base_rd(&priv->regs, NETC_BPCAPR) & BPCAPR_NUM_BP;
> > +}
> > +
> > +static void netc_port_set_pbpmcr(struct netc_port *np, u64 mapping)
> > +{
> > + u32 pbpmcr0 = lower_32_bits(mapping);
> > + u32 pbpmcr1 = upper_32_bits(mapping);
> > +
> > + netc_port_wr(np, NETC_PBPMCR0, pbpmcr0);
> > + netc_port_wr(np, NETC_PBPMCR1, pbpmcr1);
> > +}
> > +
> > +static void netc_ipv_to_buffer_pool_mapping(struct netc_switch *priv)
> > +{
> > + int num_port_bp = priv->num_bp / priv->info->num_ports;
> > + int q = NETC_IPV_NUM / num_port_bp;
> > ^^^^
>
> Can this cause a division by zero? If netc_get_buffer_pool_num() reads
>From hardware perspective, num_bp is more than num_ports, so num_port_bp
will not be zero. Let me think how to improve it to avoid such false positive.
> the hardware register NETC_BPCAPR and returns 0, then num_port_bp would
> be
> 0 and this division would crash the kernel.
>
> The call chain is:
>
> netc_setup()
> netc_switch_bpt_default_config()
> priv->num_bp = netc_get_buffer_pool_num()
> netc_ipv_to_buffer_pool_mapping()
> num_port_bp = priv->num_bp / priv->info->num_ports;
> q = NETC_IPV_NUM / num_port_bp; <- division by zero
>
> Should netc_switch_bpt_default_config() validate that priv->num_bp is
> non-zero before proceeding?
>
> > + int r = NETC_IPV_NUM % num_port_bp;
> > + int num = q + r;
> > +
> > + /* IPV-to-buffer-pool mapping per port:
>
> [ ... ]
>
> > +static int netc_switch_bpt_default_config(struct netc_switch *priv)
> > +{
> > + priv->num_bp = netc_get_buffer_pool_num(priv);
> > + priv->bpt_list = devm_kcalloc(priv->dev, priv->num_bp,
> > + sizeof(struct bpt_cfge_data),
> > + GFP_KERNEL);
> > + if (!priv->bpt_list)
> > + return -ENOMEM;
> > +
> > + /* Initialize the maximum threshold of each buffer pool entry */
> > + for (int i = 0; i < priv->num_bp; i++) {
> > + struct bpt_cfge_data *cfge = &priv->bpt_list[i];
> > + int err;
> > +
> > + cfge->max_thresh = cpu_to_le16(NETC_BP_THRESH);
> > + err = ntmp_bpt_update_entry(&priv->ntmp, i, cfge);
> > + if (err)
> > + return err;
> > + }
> > +
> > + netc_ipv_to_buffer_pool_mapping(priv);
> > +
> > + return 0;
> > +}
> > +
>
> [ ... ]
>
> > @@ -1224,6 +1313,40 @@ static void netc_port_set_hd_flow_control(struct
> netc_port *np, bool en)
> > en ? PM_CMD_CFG_HD_FCEN : 0);
> > }
> >
> > +static void netc_port_set_tx_pause(struct netc_port *np, bool tx_pause)
> > +{
> > + struct netc_switch *priv = np->switch_priv;
> > + int port = np->dp->index;
> > + int i, j, num_bp;
> > +
> > + num_bp = priv->num_bp / priv->info->num_ports;
> > + for (i = 0, j = port * num_bp; i < num_bp; i++, j++) {
> > + struct bpt_cfge_data *cfge;
> > +
> > + cfge = &priv->bpt_list[j];
> > + if (tx_pause) {
> > + cfge->fc_on_thresh = cpu_to_le16(NETC_FC_THRESH_ON);
> > + cfge->fc_off_thresh = cpu_to_le16(NETC_FC_THRESH_OFF);
> > + cfge->fccfg_sbpen = FIELD_PREP(BPT_FC_CFG,
> > + BPT_FC_CFG_EN_BPFC);
> > + cfge->fc_ports = cpu_to_le32(BIT(port));
> > + } else {
> > + cfge->fc_on_thresh = cpu_to_le16(0);
> > + cfge->fc_off_thresh = cpu_to_le16(0);
> > + cfge->fccfg_sbpen = 0;
> > + cfge->fc_ports = cpu_to_le32(0);
> > + }
> > +
> > + ntmp_bpt_update_entry(&priv->ntmp, j, cfge);
> > ^^^^
>
> Should the return value of ntmp_bpt_update_entry() be checked here?
>
> Simon Horman noted in v1 review that other NTMP table operations in the
> driver propagate errors from the update functions. While this was fixed
> in netc_switch_bpt_default_config() in v2, the same issue remains here.
>
> If the hardware update fails (for example, DMA allocation failure or
> hardware timeout), the driver's bpt_list will have flow control values that
> do not match the actual hardware state, potentially causing incorrect flow
> control behavior.
Okay, I will improve it.