Re: [PATCH v7 net-next 08/11] net/nebula-matrix: add vsi resource implementation
From: Paolo Abeni
Date: Thu Mar 12 2026 - 08:05:24 EST
On 3/10/26 1:09 PM, illusion.wang wrote:
> +static int nbl_dped_init(struct nbl_hw_mgt *hw_mgt)
> +{
> + nbl_hw_wr32(hw_mgt, NBL_DPED_VLAN_OFFSET, 0xC);
> + nbl_hw_wr32(hw_mgt, NBL_DPED_DSCP_OFFSET_0, 0x8);
> + nbl_hw_wr32(hw_mgt, NBL_DPED_DSCP_OFFSET_1, 0x4);
> +
> + // dped checksum offload
Minor nit: use /* */ for comments.
> + nbl_configure_dped_checksum(hw_mgt);
> +
> + return 0;
> +}
> +
> +static int nbl_uped_init(struct nbl_hw_mgt *hw_mgt)
> +{
> + struct ped_hw_edit_profile hw_edit;
> +
> + nbl_hw_rd_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u32 *)&hw_edit,
> + sizeof(hw_edit));
> + hw_edit.l3_len = 0;
> + nbl_hw_wr_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u32 *)&hw_edit,
> + sizeof(hw_edit));
> +
> + nbl_hw_rd_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u32 *)&hw_edit,
> + sizeof(hw_edit));
> + hw_edit.l3_len = 1;
> + nbl_hw_wr_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u32 *)&hw_edit,
> + sizeof(hw_edit));
> +
> + return 0;
> +}
> +
> +static void nbl_shaping_eth_init(struct nbl_hw_mgt *hw_mgt, u8 eth_id, u8 speed)
> +{
> + struct nbl_shaping_dvn_dport dvn_dport = { 0 };
> + struct nbl_shaping_dport dport = { 0 };
> + u32 rate, half_rate;
> +
> + if (speed == NBL_FW_PORT_SPEED_100G) {
> + rate = NBL_SHAPING_DPORT_100G_RATE;
> + half_rate = NBL_SHAPING_DPORT_HALF_100G_RATE;
> + } else {
> + rate = NBL_SHAPING_DPORT_25G_RATE;
> + half_rate = NBL_SHAPING_DPORT_HALF_25G_RATE;
> + }
> +
> + dport.cir = rate;
> + dport.pir = rate;
> + dport.depth = max(dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH);
> + dport.cbs = dport.depth;
> + dport.pbs = dport.depth;
> + dport.valid = 1;
> +
> + dvn_dport.cir = half_rate;
> + dvn_dport.pir = rate;
> + dvn_dport.depth = dport.depth;
> + dvn_dport.cbs = dvn_dport.depth;
> + dvn_dport.pbs = dvn_dport.depth;
> + dvn_dport.valid = 1;
> +
> + nbl_hw_wr_regs(hw_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u32 *)&dport,
> + sizeof(dport));
> + nbl_hw_wr_regs(hw_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id),
> + (u32 *)&dvn_dport, sizeof(dvn_dport));
> +}
> +
> +static int nbl_shaping_init(struct nbl_hw_mgt *hw_mgt, u8 speed)
> +{
> +#define NBL_SHAPING_FLUSH_INTERVAL 128
> + struct nbl_shaping_net net_shaping = { 0 };
> + struct dsch_psha_en psha_en = { 0 };
> + int i;
> +
> + for (i = 0; i < NBL_MAX_ETHERNET; i++)
> + nbl_shaping_eth_init(hw_mgt, i, speed);
> +
> + psha_en.en = 0xF;
> + nbl_hw_wr_regs(hw_mgt, NBL_DSCH_PSHA_EN_ADDR, (u32 *)&psha_en,
> + sizeof(psha_en));
> +
> + for (i = 0; i < NBL_MAX_FUNC; i++) {
> + nbl_hw_wr_regs(hw_mgt, NBL_SHAPING_NET_REG(i),
> + (u32 *)&net_shaping, sizeof(net_shaping));
> + if ((i % NBL_SHAPING_FLUSH_INTERVAL) == 0)
> + nbl_flush_writes(hw_mgt);
> + }
> + nbl_flush_writes(hw_mgt);
> + return 0;
> +}
> +
> +static int nbl_dsch_qid_max_init(struct nbl_hw_mgt *hw_mgt)
> +{
> + struct dsch_vn_quanta quanta = { 0 };
> +
> + quanta.h_qua = NBL_HOST_QUANTA;
> + quanta.e_qua = NBL_ECPU_QUANTA;
> + nbl_hw_wr_regs(hw_mgt, NBL_DSCH_VN_QUANTA_ADDR, (u32 *)&quanta,
> + sizeof(quanta));
> + nbl_hw_wr32(hw_mgt, NBL_DSCH_HOST_QID_MAX, NBL_MAX_QUEUE_ID);
> +
> + nbl_hw_wr32(hw_mgt, NBL_DVN_ECPU_QUEUE_NUM, 0);
> + nbl_hw_wr32(hw_mgt, NBL_UVN_ECPU_QUEUE_NUM, 0);
> +
> + return 0;
> +}
> +
> +static int nbl_ustore_init(struct nbl_hw_mgt *hw_mgt, u8 eth_num)
> +{
> + struct nbl_ustore_port_drop_th drop_th = { 0 };
> + struct ustore_pkt_len pkt_len;
> + int i;
> +
> + nbl_hw_rd_regs(hw_mgt, NBL_USTORE_PKT_LEN_ADDR, (u32 *)&pkt_len,
> + sizeof(pkt_len));
> + /* min arp packet length 42 (14 + 28) */
> + pkt_len.min = 42;
> + nbl_hw_wr_regs(hw_mgt, NBL_USTORE_PKT_LEN_ADDR, (u32 *)&pkt_len,
> + sizeof(pkt_len));
> +
> + drop_th.en = 1;
> + if (eth_num == 1)
> + drop_th.disc_th = NBL_USTORE_SIGNLE_ETH_DROP_TH;
> + else if (eth_num == 2)
> + drop_th.disc_th = NBL_USTORE_DUAL_ETH_DROP_TH;
> + else
> + drop_th.disc_th = NBL_USTORE_QUAD_ETH_DROP_TH;
> +
> + for (i = 0; i < 4; i++)
> + nbl_hw_wr_regs(hw_mgt, NBL_USTORE_PORT_DROP_TH_REG_ARR(i),
> + (u32 *)&drop_th, sizeof(drop_th));
> +
> + for (i = 0; i < NBL_MAX_ETHERNET; i++) {
> + nbl_hw_rd32(hw_mgt, NBL_USTORE_BUF_PORT_DROP_PKT(i));
> + nbl_hw_rd32(hw_mgt, NBL_USTORE_BUF_PORT_TRUN_PKT(i));
> + }
> +
> + return 0;
> +}
> +
> +static int nbl_dstore_init(struct nbl_hw_mgt *hw_mgt, u8 speed)
> +{
> + struct dstore_port_drop_th drop_th;
> + struct dstore_d_dport_fc_th fc_th;
> + struct dstore_disc_bp_th bp_th;
> + int i;
> +
> + for (i = 0; i < 6; i++) {
> + nbl_hw_rd_regs(hw_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i),
> + (u32 *)&drop_th, sizeof(drop_th));
> + drop_th.en = 0;
> + nbl_hw_wr_regs(hw_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i),
> + (u32 *)&drop_th, sizeof(drop_th));
> + }
> +
> + nbl_hw_rd_regs(hw_mgt, NBL_DSTORE_DISC_BP_TH, (u32 *)&bp_th,
> + sizeof(bp_th));
> + bp_th.en = 1;
> + nbl_hw_wr_regs(hw_mgt, NBL_DSTORE_DISC_BP_TH, (u32 *)&bp_th,
> + sizeof(bp_th));
> +
> + for (i = 0; i < 4; i++) {
> + nbl_hw_rd_regs(hw_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i),
> + (u32 *)&fc_th, sizeof(fc_th));
> + if (speed == NBL_FW_PORT_SPEED_100G) {
> + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G;
> + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G;
> + } else {
> + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH;
> + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH;
> + }
> +
> + fc_th.fc_en = 1;
> + nbl_hw_wr_regs(hw_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i),
> + (u32 *)&fc_th, sizeof(fc_th));
> + }
> +
> + return 0;
> +}
> +
> +static void nbl_dvn_descreq_num_cfg(struct nbl_hw_mgt *hw_mgt, u32 descreq_num)
> +{
> + u32 split_ring_prefect_num = (descreq_num >> 16) & 0xffff;
> + u32 packet_ring_prefect_num = descreq_num & 0xffff;
> + struct nbl_dvn_descreq_num_cfg num_cfg = { 0 };
> +
> + packet_ring_prefect_num =
> + packet_ring_prefect_num > 32 ? 32 : packet_ring_prefect_num;
> + packet_ring_prefect_num =
> + packet_ring_prefect_num < 8 ? 8 : packet_ring_prefect_num;
> + num_cfg.packed_l1_num = (packet_ring_prefect_num - 8) / 4;
> +
> + split_ring_prefect_num =
> + split_ring_prefect_num > 16 ? 16 : split_ring_prefect_num;
> + split_ring_prefect_num =
> + split_ring_prefect_num < 8 ? 8 : split_ring_prefect_num;
> + num_cfg.avring_cfg_num = split_ring_prefect_num > 8 ? 1 : 0;
Minor nit: prefer human readable macro names to magic numbers (8, 16, 32
above).
/P