xref: /dpdk/drivers/common/cnxk/roc_nix_irq.c (revision 8bc924cf66b3e16c2b8f0391d6a601f33cbc9c5f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 static void
nix_err_intr_enb_dis(struct nix * nix,bool enb)9 nix_err_intr_enb_dis(struct nix *nix, bool enb)
10 {
11 	/* Enable all nix lf error irqs except RQ_DISABLED and CQ_DISABLED */
12 	if (enb)
13 		plt_write64(~(BIT_ULL(11) | BIT_ULL(24)),
14 			    nix->base + NIX_LF_ERR_INT_ENA_W1S);
15 	else
16 		plt_write64(~0ull, nix->base + NIX_LF_ERR_INT_ENA_W1C);
17 }
18 
19 static void
nix_ras_intr_enb_dis(struct nix * nix,bool enb)20 nix_ras_intr_enb_dis(struct nix *nix, bool enb)
21 {
22 	if (enb)
23 		plt_write64(~0ull, nix->base + NIX_LF_RAS_ENA_W1S);
24 	else
25 		plt_write64(~0ull, nix->base + NIX_LF_RAS_ENA_W1C);
26 }
27 
28 void
roc_nix_rx_queue_intr_enable(struct roc_nix * roc_nix,uint16_t rx_queue_id)29 roc_nix_rx_queue_intr_enable(struct roc_nix *roc_nix, uint16_t rx_queue_id)
30 {
31 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
32 
33 	/* Enable CINT interrupt */
34 	plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1S(rx_queue_id));
35 }
36 
37 void
roc_nix_rx_queue_intr_disable(struct roc_nix * roc_nix,uint16_t rx_queue_id)38 roc_nix_rx_queue_intr_disable(struct roc_nix *roc_nix, uint16_t rx_queue_id)
39 {
40 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
41 
42 	/* Clear and disable CINT interrupt */
43 	plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(rx_queue_id));
44 }
45 
46 void
roc_nix_err_intr_ena_dis(struct roc_nix * roc_nix,bool enb)47 roc_nix_err_intr_ena_dis(struct roc_nix *roc_nix, bool enb)
48 {
49 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
50 
51 	return nix_err_intr_enb_dis(nix, enb);
52 }
53 
54 void
roc_nix_ras_intr_ena_dis(struct roc_nix * roc_nix,bool enb)55 roc_nix_ras_intr_ena_dis(struct roc_nix *roc_nix, bool enb)
56 {
57 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
58 
59 	return nix_ras_intr_enb_dis(nix, enb);
60 }
61 
62 static void
nix_lf_err_irq(void * param)63 nix_lf_err_irq(void *param)
64 {
65 	struct nix *nix = (struct nix *)param;
66 	struct dev *dev = &nix->dev;
67 	uint64_t intr;
68 
69 	intr = plt_read64(nix->base + NIX_LF_ERR_INT);
70 	if (intr == 0)
71 		return;
72 
73 	plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
74 
75 	/* Clear interrupt */
76 	plt_write64(intr, nix->base + NIX_LF_ERR_INT);
77 	/* Dump registers to std out */
78 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
79 	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
80 }
81 
82 static int
nix_lf_register_err_irq(struct nix * nix)83 nix_lf_register_err_irq(struct nix *nix)
84 {
85 	struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
86 	int rc, vec;
87 
88 	vec = nix->msixoff + NIX_LF_INT_VEC_ERR_INT;
89 	/* Clear err interrupt */
90 	nix_err_intr_enb_dis(nix, false);
91 	/* Set used interrupt vectors */
92 	rc = dev_irq_register(handle, nix_lf_err_irq, nix, vec);
93 	/* Enable all dev interrupt except for RQ_DISABLED */
94 	nix_err_intr_enb_dis(nix, true);
95 
96 	return rc;
97 }
98 
99 static void
nix_lf_unregister_err_irq(struct nix * nix)100 nix_lf_unregister_err_irq(struct nix *nix)
101 {
102 	struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
103 	int vec;
104 
105 	vec = nix->msixoff + NIX_LF_INT_VEC_ERR_INT;
106 	/* Clear err interrupt */
107 	nix_err_intr_enb_dis(nix, false);
108 	dev_irq_unregister(handle, nix_lf_err_irq, nix, vec);
109 }
110 
111 static void
nix_lf_ras_irq(void * param)112 nix_lf_ras_irq(void *param)
113 {
114 	struct nix *nix = (struct nix *)param;
115 	struct dev *dev = &nix->dev;
116 	uint64_t intr;
117 
118 	intr = plt_read64(nix->base + NIX_LF_RAS);
119 	if (intr == 0)
120 		return;
121 
122 	plt_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
123 	/* Clear interrupt */
124 	plt_write64(intr, nix->base + NIX_LF_RAS);
125 
126 	/* Dump registers to std out */
127 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
128 	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
129 }
130 
131 static int
nix_lf_register_ras_irq(struct nix * nix)132 nix_lf_register_ras_irq(struct nix *nix)
133 {
134 	struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
135 	int rc, vec;
136 
137 	vec = nix->msixoff + NIX_LF_INT_VEC_POISON;
138 	/* Clear err interrupt */
139 	nix_ras_intr_enb_dis(nix, false);
140 	/* Set used interrupt vectors */
141 	rc = dev_irq_register(handle, nix_lf_ras_irq, nix, vec);
142 	/* Enable dev interrupt */
143 	nix_ras_intr_enb_dis(nix, true);
144 
145 	return rc;
146 }
147 
148 static void
nix_lf_unregister_ras_irq(struct nix * nix)149 nix_lf_unregister_ras_irq(struct nix *nix)
150 {
151 	struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
152 	int vec;
153 
154 	vec = nix->msixoff + NIX_LF_INT_VEC_POISON;
155 	/* Clear err interrupt */
156 	nix_ras_intr_enb_dis(nix, false);
157 	dev_irq_unregister(handle, nix_lf_ras_irq, nix, vec);
158 }
159 
160 static inline uint8_t
nix_lf_q_irq_get_and_clear(struct nix * nix,uint16_t q,uint32_t off,uint64_t mask)161 nix_lf_q_irq_get_and_clear(struct nix *nix, uint16_t q, uint32_t off,
162 			   uint64_t mask)
163 {
164 	uint64_t reg, wdata;
165 	uint8_t qint;
166 
167 	wdata = (uint64_t)q << 44;
168 	reg = roc_atomic64_add_nosync(wdata, (int64_t *)(nix->base + off));
169 
170 	if (reg & BIT_ULL(42) /* OP_ERR */) {
171 		plt_err("Failed execute irq get off=0x%x", off);
172 		return 0;
173 	}
174 	qint = reg & 0xff;
175 	wdata &= mask;
176 	plt_write64(wdata | qint, nix->base + off);
177 
178 	return qint;
179 }
180 
181 static inline uint8_t
nix_lf_rq_irq_get_and_clear(struct nix * nix,uint16_t rq)182 nix_lf_rq_irq_get_and_clear(struct nix *nix, uint16_t rq)
183 {
184 	return nix_lf_q_irq_get_and_clear(nix, rq, NIX_LF_RQ_OP_INT, ~0xff00);
185 }
186 
187 static inline uint8_t
nix_lf_cq_irq_get_and_clear(struct nix * nix,uint16_t cq)188 nix_lf_cq_irq_get_and_clear(struct nix *nix, uint16_t cq)
189 {
190 	return nix_lf_q_irq_get_and_clear(nix, cq, NIX_LF_CQ_OP_INT, ~0xff00);
191 }
192 
193 static inline uint8_t
nix_lf_sq_irq_get_and_clear(struct nix * nix,uint16_t sq)194 nix_lf_sq_irq_get_and_clear(struct nix *nix, uint16_t sq)
195 {
196 	return nix_lf_q_irq_get_and_clear(nix, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
197 }
198 
199 static inline bool
nix_lf_is_sqb_null(struct dev * dev,int q)200 nix_lf_is_sqb_null(struct dev *dev, int q)
201 {
202 	bool is_sqb_null = false;
203 	volatile void *ctx;
204 	int rc;
205 
206 	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_SQ, q, &ctx);
207 	if (rc) {
208 		plt_err("Failed to get sq context");
209 	} else {
210 		is_sqb_null =
211 			roc_model_is_cn9k() ?
212 				(((__io struct nix_sq_ctx_s *)ctx)->next_sqb ==
213 				 0) :
214 				(((__io struct nix_cn10k_sq_ctx_s *)ctx)
215 					 ->next_sqb == 0);
216 	}
217 
218 	return is_sqb_null;
219 }
220 
221 static inline uint8_t
nix_lf_sq_debug_reg(struct nix * nix,uint32_t off)222 nix_lf_sq_debug_reg(struct nix *nix, uint32_t off)
223 {
224 	uint8_t err = 0;
225 	uint64_t reg;
226 
227 	reg = plt_read64(nix->base + off);
228 	if (reg & BIT_ULL(44)) {
229 		err = reg & 0xff;
230 		/* Clear valid bit */
231 		plt_write64(BIT_ULL(44), nix->base + off);
232 	}
233 
234 	return err;
235 }
236 
237 static void
nix_lf_cq_irq(void * param)238 nix_lf_cq_irq(void *param)
239 {
240 	struct nix_qint *cint = (struct nix_qint *)param;
241 	struct nix *nix = cint->nix;
242 
243 	/* Clear interrupt */
244 	plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_INT(cint->qintx));
245 }
246 
247 static void
nix_lf_q_irq(void * param)248 nix_lf_q_irq(void *param)
249 {
250 	struct nix_qint *qint = (struct nix_qint *)param;
251 	uint8_t irq, qintx = qint->qintx;
252 	int q, cq, rq, sq, intr_cb = 0;
253 	struct nix *nix = qint->nix;
254 	struct dev *dev = &nix->dev;
255 	uint64_t intr;
256 	uint8_t rc;
257 
258 	intr = plt_read64(nix->base + NIX_LF_QINTX_INT(qintx));
259 	if (intr == 0)
260 		return;
261 
262 	plt_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d", intr, qintx,
263 		dev->pf, dev->vf);
264 
265 	/* Handle RQ interrupts */
266 	for (q = 0; q < nix->nb_rx_queues; q++) {
267 		rq = q % nix->qints;
268 		irq = nix_lf_rq_irq_get_and_clear(nix, rq);
269 
270 		if (irq & BIT_ULL(NIX_RQINT_DROP))
271 			plt_err("RQ=%d NIX_RQINT_DROP", rq);
272 
273 		if (irq & BIT_ULL(NIX_RQINT_RED))
274 			plt_err("RQ=%d NIX_RQINT_RED", rq);
275 	}
276 
277 	/* Handle CQ interrupts */
278 	for (q = 0; q < nix->nb_rx_queues; q++) {
279 		cq = q % nix->qints;
280 		irq = nix_lf_cq_irq_get_and_clear(nix, cq);
281 
282 		if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
283 			plt_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
284 
285 		if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
286 			plt_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
287 
288 		if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
289 			plt_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
290 
291 		if (irq & BIT_ULL(NIX_CQERRINT_CPT_DROP))
292 			plt_err("CQ=%d NIX_CQERRINT_CPT_DROP", cq);
293 	}
294 
295 	/* Handle SQ interrupts */
296 	for (q = 0; q < nix->nb_tx_queues; q++) {
297 		sq = q % nix->qints;
298 		irq = nix_lf_sq_irq_get_and_clear(nix, sq);
299 
300 		/* Detect LMT store error */
301 		rc = nix_lf_sq_debug_reg(nix, NIX_LF_SQ_OP_ERR_DBG);
302 		if (rc)
303 			plt_err("SQ=%d NIX_SQINT_LMT_ERR, errcode %x", sq, rc);
304 
305 		/* Detect Meta-descriptor enqueue error */
306 		rc = nix_lf_sq_debug_reg(nix, NIX_LF_MNQ_ERR_DBG);
307 		if (rc) {
308 			plt_err("SQ=%d NIX_SQINT_MNQ_ERR, errcode %x", sq, rc);
309 			intr_cb = 1;
310 		}
311 
312 		/* Detect Send error */
313 		rc = nix_lf_sq_debug_reg(nix, NIX_LF_SEND_ERR_DBG);
314 		if (rc)
315 			plt_err("SQ=%d NIX_SQINT_SEND_ERR, errcode %x", sq, rc);
316 
317 		/* Detect SQB fault, read SQ context to check SQB NULL case */
318 		if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL) ||
319 		    nix_lf_is_sqb_null(dev, q))
320 			plt_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
321 	}
322 
323 	/* Clear interrupt */
324 	plt_write64(intr, nix->base + NIX_LF_QINTX_INT(qintx));
325 
326 	/* Dump registers to std out */
327 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
328 	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
329 
330 	/* Call reset callback */
331 	if (intr_cb && dev->ops->q_err_cb)
332 		dev->ops->q_err_cb(nix_priv_to_roc_nix(nix), NULL);
333 }
334 
335 int
roc_nix_register_queue_irqs(struct roc_nix * roc_nix)336 roc_nix_register_queue_irqs(struct roc_nix *roc_nix)
337 {
338 	int vec, q, sqs, rqs, qs, rc = 0;
339 	struct plt_intr_handle *handle;
340 	struct nix *nix;
341 
342 	nix = roc_nix_to_nix_priv(roc_nix);
343 	handle = nix->pci_dev->intr_handle;
344 
345 	/* Figure out max qintx required */
346 	rqs = PLT_MIN(nix->qints, nix->nb_rx_queues);
347 	sqs = PLT_MIN(nix->qints, nix->nb_tx_queues);
348 	qs = PLT_MAX(rqs, sqs);
349 
350 	nix->configured_qints = qs;
351 
352 	nix->qints_mem =
353 		plt_zmalloc(nix->configured_qints * sizeof(struct nix_qint), 0);
354 	if (nix->qints_mem == NULL)
355 		return -ENOMEM;
356 
357 	for (q = 0; q < qs; q++) {
358 		vec = nix->msixoff + NIX_LF_INT_VEC_QINT_START + q;
359 
360 		/* Clear QINT CNT */
361 		plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
362 
363 		/* Clear interrupt */
364 		plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1C(q));
365 
366 		nix->qints_mem[q].nix = nix;
367 		nix->qints_mem[q].qintx = q;
368 
369 		/* Sync qints_mem update */
370 		plt_wmb();
371 
372 		/* Register queue irq vector */
373 		rc = dev_irq_register(handle, nix_lf_q_irq, &nix->qints_mem[q],
374 				      vec);
375 		if (rc)
376 			break;
377 
378 		plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
379 		plt_write64(0, nix->base + NIX_LF_QINTX_INT(q));
380 		/* Enable QINT interrupt */
381 		plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1S(q));
382 	}
383 
384 	return rc;
385 }
386 
387 void
roc_nix_unregister_queue_irqs(struct roc_nix * roc_nix)388 roc_nix_unregister_queue_irqs(struct roc_nix *roc_nix)
389 {
390 	struct plt_intr_handle *handle;
391 	struct nix *nix;
392 	int vec, q;
393 
394 	nix = roc_nix_to_nix_priv(roc_nix);
395 	handle = nix->pci_dev->intr_handle;
396 
397 	for (q = 0; q < nix->configured_qints; q++) {
398 		vec = nix->msixoff + NIX_LF_INT_VEC_QINT_START + q;
399 
400 		/* Clear QINT CNT */
401 		plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
402 		plt_write64(0, nix->base + NIX_LF_QINTX_INT(q));
403 
404 		/* Clear interrupt */
405 		plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1C(q));
406 
407 		/* Unregister queue irq vector */
408 		dev_irq_unregister(handle, nix_lf_q_irq, &nix->qints_mem[q],
409 				   vec);
410 	}
411 	nix->configured_qints = 0;
412 
413 	plt_free(nix->qints_mem);
414 	nix->qints_mem = NULL;
415 }
416 
417 int
roc_nix_register_cq_irqs(struct roc_nix * roc_nix)418 roc_nix_register_cq_irqs(struct roc_nix *roc_nix)
419 {
420 	struct plt_intr_handle *handle;
421 	uint8_t rc = 0, vec, q;
422 	struct nix *nix;
423 
424 	nix = roc_nix_to_nix_priv(roc_nix);
425 	handle = nix->pci_dev->intr_handle;
426 
427 	nix->configured_cints = PLT_MIN(nix->cints, nix->nb_rx_queues);
428 
429 	nix->cints_mem =
430 		plt_zmalloc(nix->configured_cints * sizeof(struct nix_qint), 0);
431 	if (nix->cints_mem == NULL)
432 		return -ENOMEM;
433 
434 	for (q = 0; q < nix->configured_cints; q++) {
435 		vec = nix->msixoff + NIX_LF_INT_VEC_CINT_START + q;
436 
437 		/* Clear CINT CNT */
438 		plt_write64(0, nix->base + NIX_LF_CINTX_CNT(q));
439 
440 		/* Clear interrupt */
441 		plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(q));
442 
443 		nix->cints_mem[q].nix = nix;
444 		nix->cints_mem[q].qintx = q;
445 
446 		/* Sync cints_mem update */
447 		plt_wmb();
448 
449 		/* Register queue irq vector */
450 		rc = dev_irq_register(handle, nix_lf_cq_irq, &nix->cints_mem[q],
451 				      vec);
452 		if (rc) {
453 			plt_err("Fail to register CQ irq, rc=%d", rc);
454 			return rc;
455 		}
456 
457 		rc = plt_intr_vec_list_alloc(handle, "cnxk",
458 					     nix->configured_cints);
459 		if (rc) {
460 			plt_err("Fail to allocate intr vec list, rc=%d",
461 				rc);
462 			return rc;
463 		}
464 		/* VFIO vector zero is reserved for misc interrupt so
465 		 * doing required adjustment. (b13bfab4cd)
466 		 */
467 		if (plt_intr_vec_list_index_set(handle, q,
468 						PLT_INTR_VEC_RXTX_OFFSET + vec))
469 			return -1;
470 
471 		/* Configure CQE interrupt coalescing parameters */
472 		plt_write64(((CQ_CQE_THRESH_DEFAULT) |
473 			     (CQ_CQE_THRESH_DEFAULT << 32) |
474 			     (CQ_TIMER_THRESH_DEFAULT << 48)),
475 			    nix->base + NIX_LF_CINTX_WAIT((q)));
476 
477 		/* Keeping the CQ interrupt disabled as the rx interrupt
478 		 * feature needs to be enabled/disabled on demand.
479 		 */
480 	}
481 
482 	return rc;
483 }
484 
485 void
roc_nix_unregister_cq_irqs(struct roc_nix * roc_nix)486 roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix)
487 {
488 	struct plt_intr_handle *handle;
489 	struct nix *nix;
490 	int vec, q;
491 
492 	nix = roc_nix_to_nix_priv(roc_nix);
493 	handle = nix->pci_dev->intr_handle;
494 
495 	for (q = 0; q < nix->configured_cints; q++) {
496 		vec = nix->msixoff + NIX_LF_INT_VEC_CINT_START + q;
497 
498 		/* Clear CINT CNT */
499 		plt_write64(0, nix->base + NIX_LF_CINTX_CNT(q));
500 
501 		/* Clear interrupt */
502 		plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(q));
503 
504 		/* Unregister queue irq vector */
505 		dev_irq_unregister(handle, nix_lf_cq_irq, &nix->cints_mem[q],
506 				   vec);
507 	}
508 
509 	plt_intr_vec_list_free(handle);
510 	plt_free(nix->cints_mem);
511 }
512 
513 int
nix_register_irqs(struct nix * nix)514 nix_register_irqs(struct nix *nix)
515 {
516 	int rc;
517 
518 	if (nix->msixoff == MSIX_VECTOR_INVALID) {
519 		plt_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
520 			nix->msixoff);
521 		return NIX_ERR_PARAM;
522 	}
523 
524 	/* Register lf err interrupt */
525 	rc = nix_lf_register_err_irq(nix);
526 	/* Register RAS interrupt */
527 	rc |= nix_lf_register_ras_irq(nix);
528 
529 	return rc;
530 }
531 
532 void
nix_unregister_irqs(struct nix * nix)533 nix_unregister_irqs(struct nix *nix)
534 {
535 	nix_lf_unregister_err_irq(nix);
536 	nix_lf_unregister_ras_irq(nix);
537 }
538