Re: [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA
From: kernel test robot
Date: Tue Nov 03 2020 - 02:27:42 EST
Hi George,
I love your patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/George-Cherian/Add-devlink-and-devlink-health-reporters-to/20201102-130844
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git c43fd36f7fec6c227c5e8a8ddd7d3fe97472182f
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# https://github.com/0day-ci/linux/commit/b407a9eab03c85981a41a1e03c88d04036a860d6
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review George-Cherian/Add-devlink-and-devlink-health-reporters-to/20201102-130844
git checkout b407a9eab03c85981a41a1e03c88d04036a860d6
# save the attached .config to linux build tree
make W=1 ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>
All warnings (new ones prefixed by >>):
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:18:5: warning: no previous prototype for 'rvu_report_pair_start' [-Wmissing-prototypes]
18 | int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
| ^~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:29:5: warning: no previous prototype for 'rvu_report_pair_end' [-Wmissing-prototypes]
29 | int rvu_report_pair_end(struct devlink_fmsg *fmsg)
| ^~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:201:5: warning: no previous prototype for 'rvu_npa_register_interrupts' [-Wmissing-prototypes]
201 | int rvu_npa_register_interrupts(struct rvu *rvu)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/rvu_report_pair_start +18 drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
17
> 18 int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
19 {
20 int err;
21
22 err = devlink_fmsg_pair_nest_start(fmsg, name);
23 if (err)
24 return err;
25
26 return devlink_fmsg_obj_nest_start(fmsg);
27 }
28
> 29 int rvu_report_pair_end(struct devlink_fmsg *fmsg)
30 {
31 int err;
32
33 err = devlink_fmsg_obj_nest_end(fmsg);
34 if (err)
35 return err;
36
37 return devlink_fmsg_pair_nest_end(fmsg);
38 }
39
40 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
41 {
42 struct rvu_npa_event_cnt *npa_event_count;
43 struct rvu_devlink *rvu_dl = rvu_irq;
44 struct rvu *rvu;
45 int blkaddr;
46 u64 intr;
47
48 rvu = rvu_dl->rvu;
49 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
50 if (blkaddr < 0)
51 return IRQ_NONE;
52
53 npa_event_count = rvu_dl->npa_event_cnt;
54 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
55
56 if (intr & BIT_ULL(0))
57 npa_event_count->unmap_slot_count++;
58 /* Clear interrupts */
59 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
60 return IRQ_HANDLED;
61 }
62
63 static int rvu_npa_inpq_to_cnt(u16 in,
64 struct rvu_npa_event_cnt *npa_event_count)
65 {
66 switch (in) {
67 case 0:
68 return 0;
69 case BIT(NPA_INPQ_NIX0_RX):
70 return npa_event_count->free_dis_nix0_rx_count++;
71 case BIT(NPA_INPQ_NIX0_TX):
72 return npa_event_count->free_dis_nix0_tx_count++;
73 case BIT(NPA_INPQ_NIX1_RX):
74 return npa_event_count->free_dis_nix1_rx_count++;
75 case BIT(NPA_INPQ_NIX1_TX):
76 return npa_event_count->free_dis_nix1_tx_count++;
77 case BIT(NPA_INPQ_SSO):
78 return npa_event_count->free_dis_sso_count++;
79 case BIT(NPA_INPQ_TIM):
80 return npa_event_count->free_dis_tim_count++;
81 case BIT(NPA_INPQ_DPI):
82 return npa_event_count->free_dis_dpi_count++;
83 case BIT(NPA_INPQ_AURA_OP):
84 return npa_event_count->free_dis_aura_count++;
85 case BIT(NPA_INPQ_INTERNAL_RSV):
86 return npa_event_count->free_dis_rsvd_count++;
87 }
88
89 return npa_event_count->alloc_dis_rsvd_count++;
90 }
91
92 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
93 {
94 struct rvu_npa_event_cnt *npa_event_count;
95 struct rvu_devlink *rvu_dl = rvu_irq;
96 struct rvu *rvu;
97 int blkaddr, val;
98 u64 intr;
99
100 rvu = rvu_dl->rvu;
101 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
102 if (blkaddr < 0)
103 return IRQ_NONE;
104
105 npa_event_count = rvu_dl->npa_event_cnt;
106 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
107
108 if (intr & BIT_ULL(32))
109 npa_event_count->unmap_pf_count++;
110
111 val = FIELD_GET(GENMASK(31, 16), intr);
112 rvu_npa_inpq_to_cnt(val, npa_event_count);
113
114 val = FIELD_GET(GENMASK(15, 0), intr);
115 rvu_npa_inpq_to_cnt(val, npa_event_count);
116
117 /* Clear interrupts */
118 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
119 return IRQ_HANDLED;
120 }
121
122 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
123 {
124 struct rvu_npa_event_cnt *npa_event_count;
125 struct rvu_devlink *rvu_dl = rvu_irq;
126 struct rvu *rvu;
127 int blkaddr;
128 u64 intr;
129
130 rvu = rvu_dl->rvu;
131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
132 if (blkaddr < 0)
133 return IRQ_NONE;
134
135 npa_event_count = rvu_dl->npa_event_cnt;
136 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
137
138 if (intr & BIT_ULL(14))
139 npa_event_count->aq_inst_count++;
140
141 if (intr & BIT_ULL(13))
142 npa_event_count->aq_res_count++;
143
144 if (intr & BIT_ULL(12))
145 npa_event_count->aq_db_count++;
146
147 /* Clear interrupts */
148 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
149 return IRQ_HANDLED;
150 }
151
152 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
153 {
154 struct rvu_npa_event_cnt *npa_event_count;
155 struct rvu_devlink *rvu_dl = rvu_irq;
156 struct rvu *rvu;
157 int blkaddr;
158 u64 intr;
159
160 rvu = rvu_dl->rvu;
161 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
162 if (blkaddr < 0)
163 return IRQ_NONE;
164
165 npa_event_count = rvu_dl->npa_event_cnt;
166 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
167
168 if (intr & BIT_ULL(34))
169 npa_event_count->poison_aq_inst_count++;
170
171 if (intr & BIT_ULL(33))
172 npa_event_count->poison_aq_res_count++;
173
174 if (intr & BIT_ULL(32))
175 npa_event_count->poison_aq_cxt_count++;
176
177 /* Clear interrupts */
178 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
179 return IRQ_HANDLED;
180 }
181
182 static bool rvu_npa_af_request_irq(struct rvu *rvu, int blkaddr, int offset,
183 const char *name, irq_handler_t fn)
184 {
185 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
186 int rc;
187
188 WARN_ON(rvu->irq_allocated[offset]);
189 rvu->irq_allocated[offset] = false;
190 sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
191 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
192 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
193 if (rc)
194 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
195 else
196 rvu->irq_allocated[offset] = true;
197
198 return rvu->irq_allocated[offset];
199 }
200
> 201 int rvu_npa_register_interrupts(struct rvu *rvu)
202 {
203 int blkaddr, base;
204 bool rc;
205
206 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
207 if (blkaddr < 0)
208 return blkaddr;
209
210 /* Get NPA AF MSIX vectors offset. */
211 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
212 if (!base) {
213 dev_warn(rvu->dev,
214 "Failed to get NPA_AF_INT vector offsets\n");
215 return 0;
216 }
217
218 /* Register and enable NPA_AF_RVU_INT interrupt */
219 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_RVU,
220 "NPA_AF_RVU_INT",
221 rvu_npa_af_rvu_intr_handler);
222 if (!rc)
223 goto err;
224 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
225
226 /* Register and enable NPA_AF_GEN_INT interrupt */
227 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_GEN,
228 "NPA_AF_RVU_GEN",
229 rvu_npa_af_gen_intr_handler);
230 if (!rc)
231 goto err;
232 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
233
234 /* Register and enable NPA_AF_ERR_INT interrupt */
235 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_AF_ERR,
236 "NPA_AF_ERR_INT",
237 rvu_npa_af_err_intr_handler);
238 if (!rc)
239 goto err;
240 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
241
242 /* Register and enable NPA_AF_RAS interrupt */
243 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_POISON,
244 "NPA_AF_RAS",
245 rvu_npa_af_ras_intr_handler);
246 if (!rc)
247 goto err;
248 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
249
250 return 0;
251 err:
252 rvu_npa_unregister_interrupts(rvu);
253 return rc;
254 }
255
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip