1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #include <inttypes.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_malloc.h>
9
10 #include "otx2_ethdev.h"
11
12 static void
nix_lf_err_irq(void * param)13 nix_lf_err_irq(void *param)
14 {
15 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
16 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
17 uint64_t intr;
18
19 intr = otx2_read64(dev->base + NIX_LF_ERR_INT);
20 if (intr == 0)
21 return;
22
23 otx2_err("Err_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
24
25 /* Clear interrupt */
26 otx2_write64(intr, dev->base + NIX_LF_ERR_INT);
27
28 /* Dump registers to std out */
29 otx2_nix_reg_dump(dev, NULL);
30 otx2_nix_queues_ctx_dump(eth_dev);
31 }
32
33 static int
nix_lf_register_err_irq(struct rte_eth_dev * eth_dev)34 nix_lf_register_err_irq(struct rte_eth_dev *eth_dev)
35 {
36 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
37 struct rte_intr_handle *handle = &pci_dev->intr_handle;
38 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
39 int rc, vec;
40
41 vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
42
43 /* Clear err interrupt */
44 otx2_nix_err_intr_enb_dis(eth_dev, false);
45 /* Set used interrupt vectors */
46 rc = otx2_register_irq(handle, nix_lf_err_irq, eth_dev, vec);
47 /* Enable all dev interrupt except for RQ_DISABLED */
48 otx2_nix_err_intr_enb_dis(eth_dev, true);
49
50 return rc;
51 }
52
53 static void
nix_lf_unregister_err_irq(struct rte_eth_dev * eth_dev)54 nix_lf_unregister_err_irq(struct rte_eth_dev *eth_dev)
55 {
56 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
57 struct rte_intr_handle *handle = &pci_dev->intr_handle;
58 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
59 int vec;
60
61 vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
62
63 /* Clear err interrupt */
64 otx2_nix_err_intr_enb_dis(eth_dev, false);
65 otx2_unregister_irq(handle, nix_lf_err_irq, eth_dev, vec);
66 }
67
68 static void
nix_lf_ras_irq(void * param)69 nix_lf_ras_irq(void *param)
70 {
71 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
72 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
73 uint64_t intr;
74
75 intr = otx2_read64(dev->base + NIX_LF_RAS);
76 if (intr == 0)
77 return;
78
79 otx2_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
80
81 /* Clear interrupt */
82 otx2_write64(intr, dev->base + NIX_LF_RAS);
83
84 /* Dump registers to std out */
85 otx2_nix_reg_dump(dev, NULL);
86 otx2_nix_queues_ctx_dump(eth_dev);
87 }
88
89 static int
nix_lf_register_ras_irq(struct rte_eth_dev * eth_dev)90 nix_lf_register_ras_irq(struct rte_eth_dev *eth_dev)
91 {
92 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
93 struct rte_intr_handle *handle = &pci_dev->intr_handle;
94 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
95 int rc, vec;
96
97 vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
98
99 /* Clear err interrupt */
100 otx2_nix_ras_intr_enb_dis(eth_dev, false);
101 /* Set used interrupt vectors */
102 rc = otx2_register_irq(handle, nix_lf_ras_irq, eth_dev, vec);
103 /* Enable dev interrupt */
104 otx2_nix_ras_intr_enb_dis(eth_dev, true);
105
106 return rc;
107 }
108
109 static void
nix_lf_unregister_ras_irq(struct rte_eth_dev * eth_dev)110 nix_lf_unregister_ras_irq(struct rte_eth_dev *eth_dev)
111 {
112 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
113 struct rte_intr_handle *handle = &pci_dev->intr_handle;
114 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
115 int vec;
116
117 vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
118
119 /* Clear err interrupt */
120 otx2_nix_ras_intr_enb_dis(eth_dev, false);
121 otx2_unregister_irq(handle, nix_lf_ras_irq, eth_dev, vec);
122 }
123
124 static inline uint8_t
nix_lf_q_irq_get_and_clear(struct otx2_eth_dev * dev,uint16_t q,uint32_t off,uint64_t mask)125 nix_lf_q_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t q,
126 uint32_t off, uint64_t mask)
127 {
128 uint64_t reg, wdata;
129 uint8_t qint;
130
131 wdata = (uint64_t)q << 44;
132 reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(dev->base + off));
133
134 if (reg & BIT_ULL(42) /* OP_ERR */) {
135 otx2_err("Failed execute irq get off=0x%x", off);
136 return 0;
137 }
138
139 qint = reg & 0xff;
140 wdata &= mask;
141 otx2_write64(wdata | qint, dev->base + off);
142
143 return qint;
144 }
145
146 static inline uint8_t
nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev * dev,uint16_t rq)147 nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t rq)
148 {
149 return nix_lf_q_irq_get_and_clear(dev, rq, NIX_LF_RQ_OP_INT, ~0xff00);
150 }
151
152 static inline uint8_t
nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev * dev,uint16_t cq)153 nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t cq)
154 {
155 return nix_lf_q_irq_get_and_clear(dev, cq, NIX_LF_CQ_OP_INT, ~0xff00);
156 }
157
158 static inline uint8_t
nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev * dev,uint16_t sq)159 nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq)
160 {
161 return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
162 }
163
164 static inline void
nix_lf_sq_debug_reg(struct otx2_eth_dev * dev,uint32_t off)165 nix_lf_sq_debug_reg(struct otx2_eth_dev *dev, uint32_t off)
166 {
167 uint64_t reg;
168
169 reg = otx2_read64(dev->base + off);
170 if (reg & BIT_ULL(44))
171 otx2_err("SQ=%d err_code=0x%x",
172 (int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff));
173 }
174
175 static void
nix_lf_cq_irq(void * param)176 nix_lf_cq_irq(void *param)
177 {
178 struct otx2_qint *cint = (struct otx2_qint *)param;
179 struct rte_eth_dev *eth_dev = cint->eth_dev;
180 struct otx2_eth_dev *dev;
181
182 dev = otx2_eth_pmd_priv(eth_dev);
183 /* Clear interrupt */
184 otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_INT(cint->qintx));
185 }
186
187 static void
nix_lf_q_irq(void * param)188 nix_lf_q_irq(void *param)
189 {
190 struct otx2_qint *qint = (struct otx2_qint *)param;
191 struct rte_eth_dev *eth_dev = qint->eth_dev;
192 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
193 uint8_t irq, qintx = qint->qintx;
194 int q, cq, rq, sq;
195 uint64_t intr;
196
197 intr = otx2_read64(dev->base + NIX_LF_QINTX_INT(qintx));
198 if (intr == 0)
199 return;
200
201 otx2_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d",
202 intr, qintx, dev->pf, dev->vf);
203
204 /* Handle RQ interrupts */
205 for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
206 rq = q % dev->qints;
207 irq = nix_lf_rq_irq_get_and_clear(dev, rq);
208
209 if (irq & BIT_ULL(NIX_RQINT_DROP))
210 otx2_err("RQ=%d NIX_RQINT_DROP", rq);
211
212 if (irq & BIT_ULL(NIX_RQINT_RED))
213 otx2_err("RQ=%d NIX_RQINT_RED", rq);
214 }
215
216 /* Handle CQ interrupts */
217 for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
218 cq = q % dev->qints;
219 irq = nix_lf_cq_irq_get_and_clear(dev, cq);
220
221 if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
222 otx2_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
223
224 if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
225 otx2_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
226
227 if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
228 otx2_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
229 }
230
231 /* Handle SQ interrupts */
232 for (q = 0; q < eth_dev->data->nb_tx_queues; q++) {
233 sq = q % dev->qints;
234 irq = nix_lf_sq_irq_get_and_clear(dev, sq);
235
236 if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) {
237 otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq);
238 nix_lf_sq_debug_reg(dev, NIX_LF_SQ_OP_ERR_DBG);
239 }
240 if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
241 otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq);
242 nix_lf_sq_debug_reg(dev, NIX_LF_MNQ_ERR_DBG);
243 }
244 if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) {
245 otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq);
246 nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
247 }
248 if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) {
249 otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
250 nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
251 }
252 }
253
254 /* Clear interrupt */
255 otx2_write64(intr, dev->base + NIX_LF_QINTX_INT(qintx));
256
257 /* Dump registers to std out */
258 otx2_nix_reg_dump(dev, NULL);
259 otx2_nix_queues_ctx_dump(eth_dev);
260 }
261
262 int
oxt2_nix_register_queue_irqs(struct rte_eth_dev * eth_dev)263 oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev)
264 {
265 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
266 struct rte_intr_handle *handle = &pci_dev->intr_handle;
267 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
268 int vec, q, sqs, rqs, qs, rc = 0;
269
270 /* Figure out max qintx required */
271 rqs = RTE_MIN(dev->qints, eth_dev->data->nb_rx_queues);
272 sqs = RTE_MIN(dev->qints, eth_dev->data->nb_tx_queues);
273 qs = RTE_MAX(rqs, sqs);
274
275 dev->configured_qints = qs;
276
277 for (q = 0; q < qs; q++) {
278 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
279
280 /* Clear QINT CNT */
281 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
282
283 /* Clear interrupt */
284 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
285
286 dev->qints_mem[q].eth_dev = eth_dev;
287 dev->qints_mem[q].qintx = q;
288
289 /* Sync qints_mem update */
290 rte_smp_wmb();
291
292 /* Register queue irq vector */
293 rc = otx2_register_irq(handle, nix_lf_q_irq,
294 &dev->qints_mem[q], vec);
295 if (rc)
296 break;
297
298 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
299 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
300 /* Enable QINT interrupt */
301 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1S(q));
302 }
303
304 return rc;
305 }
306
307 void
oxt2_nix_unregister_queue_irqs(struct rte_eth_dev * eth_dev)308 oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev)
309 {
310 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
311 struct rte_intr_handle *handle = &pci_dev->intr_handle;
312 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
313 int vec, q;
314
315 for (q = 0; q < dev->configured_qints; q++) {
316 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
317
318 /* Clear QINT CNT */
319 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
320 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
321
322 /* Clear interrupt */
323 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
324
325 /* Unregister queue irq vector */
326 otx2_unregister_irq(handle, nix_lf_q_irq,
327 &dev->qints_mem[q], vec);
328 }
329 }
330
331 int
oxt2_nix_register_cq_irqs(struct rte_eth_dev * eth_dev)332 oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev)
333 {
334 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
335 struct rte_intr_handle *handle = &pci_dev->intr_handle;
336 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
337 uint8_t rc = 0, vec, q;
338
339 dev->configured_cints = RTE_MIN(dev->cints,
340 eth_dev->data->nb_rx_queues);
341
342 for (q = 0; q < dev->configured_cints; q++) {
343 vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
344
345 /* Clear CINT CNT */
346 otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
347
348 /* Clear interrupt */
349 otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
350
351 dev->cints_mem[q].eth_dev = eth_dev;
352 dev->cints_mem[q].qintx = q;
353
354 /* Sync cints_mem update */
355 rte_smp_wmb();
356
357 /* Register queue irq vector */
358 rc = otx2_register_irq(handle, nix_lf_cq_irq,
359 &dev->cints_mem[q], vec);
360 if (rc) {
361 otx2_err("Fail to register CQ irq, rc=%d", rc);
362 return rc;
363 }
364
365 if (!handle->intr_vec) {
366 handle->intr_vec = rte_zmalloc("intr_vec",
367 dev->configured_cints *
368 sizeof(int), 0);
369 if (!handle->intr_vec) {
370 otx2_err("Failed to allocate %d rx intr_vec",
371 dev->configured_cints);
372 return -ENOMEM;
373 }
374 }
375 /* VFIO vector zero is resereved for misc interrupt so
376 * doing required adjustment. (b13bfab4cd)
377 */
378 handle->intr_vec[q] = RTE_INTR_VEC_RXTX_OFFSET + vec;
379
380 /* Configure CQE interrupt coalescing parameters */
381 otx2_write64(((CQ_CQE_THRESH_DEFAULT) |
382 (CQ_CQE_THRESH_DEFAULT << 32) |
383 (CQ_TIMER_THRESH_DEFAULT << 48)),
384 dev->base + NIX_LF_CINTX_WAIT((q)));
385
386 /* Keeping the CQ interrupt disabled as the rx interrupt
387 * feature needs to be enabled/disabled on demand.
388 */
389 }
390
391 return rc;
392 }
393
394 void
oxt2_nix_unregister_cq_irqs(struct rte_eth_dev * eth_dev)395 oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev)
396 {
397 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
398 struct rte_intr_handle *handle = &pci_dev->intr_handle;
399 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
400 int vec, q;
401
402 for (q = 0; q < dev->configured_cints; q++) {
403 vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
404
405 /* Clear CINT CNT */
406 otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
407
408 /* Clear interrupt */
409 otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
410
411 /* Unregister queue irq vector */
412 otx2_unregister_irq(handle, nix_lf_cq_irq,
413 &dev->cints_mem[q], vec);
414 }
415 }
416
417 int
otx2_nix_register_irqs(struct rte_eth_dev * eth_dev)418 otx2_nix_register_irqs(struct rte_eth_dev *eth_dev)
419 {
420 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
421 int rc;
422
423 if (dev->nix_msixoff == MSIX_VECTOR_INVALID) {
424 otx2_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
425 dev->nix_msixoff);
426 return -EINVAL;
427 }
428
429 /* Register lf err interrupt */
430 rc = nix_lf_register_err_irq(eth_dev);
431 /* Register RAS interrupt */
432 rc |= nix_lf_register_ras_irq(eth_dev);
433
434 return rc;
435 }
436
437 void
otx2_nix_unregister_irqs(struct rte_eth_dev * eth_dev)438 otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev)
439 {
440 nix_lf_unregister_err_irq(eth_dev);
441 nix_lf_unregister_ras_irq(eth_dev);
442 }
443
444 int
otx2_nix_rx_queue_intr_enable(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)445 otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
446 uint16_t rx_queue_id)
447 {
448 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
449
450 /* Enable CINT interrupt */
451 otx2_write64(BIT_ULL(0), dev->base +
452 NIX_LF_CINTX_ENA_W1S(rx_queue_id));
453
454 return 0;
455 }
456
457 int
otx2_nix_rx_queue_intr_disable(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)458 otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
459 uint16_t rx_queue_id)
460 {
461 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
462
463 /* Clear and disable CINT interrupt */
464 otx2_write64(BIT_ULL(0), dev->base +
465 NIX_LF_CINTX_ENA_W1C(rx_queue_id));
466
467 return 0;
468 }
469
470 void
otx2_nix_err_intr_enb_dis(struct rte_eth_dev * eth_dev,bool enb)471 otx2_nix_err_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb)
472 {
473 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
474
475 /* Enable all nix lf error interrupts except
476 * RQ_DISABLED and CQ_DISABLED.
477 */
478 if (enb)
479 otx2_write64(~(BIT_ULL(11) | BIT_ULL(24)),
480 dev->base + NIX_LF_ERR_INT_ENA_W1S);
481 else
482 otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
483 }
484
485 void
otx2_nix_ras_intr_enb_dis(struct rte_eth_dev * eth_dev,bool enb)486 otx2_nix_ras_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb)
487 {
488 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
489
490 if (enb)
491 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1S);
492 else
493 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
494 }
495