xref: /dpdk/drivers/common/cnxk/roc_nix_inl_dev_irq.c (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 #define WORK_LIMIT 1000
9 
10 static void
11 nix_inl_sso_work_cb(struct nix_inl_dev *inl_dev)
12 {
13 	uintptr_t getwrk_op = inl_dev->ssow_base + SSOW_LF_GWS_OP_GET_WORK0;
14 	uintptr_t tag_wqe_op = inl_dev->ssow_base + SSOW_LF_GWS_WQE0;
15 	uint32_t wdata = BIT(16) | 1;
16 	union {
17 		__uint128_t get_work;
18 		uint64_t u64[2];
19 	} gw;
20 	uint16_t cnt = 0;
21 	uint64_t work;
22 
23 again:
24 	/* Try to do get work */
25 	gw.get_work = wdata;
26 	plt_write64(gw.u64[0], getwrk_op);
27 	do {
28 		roc_load_pair(gw.u64[0], gw.u64[1], tag_wqe_op);
29 	} while (gw.u64[0] & BIT_ULL(63));
30 
31 	work = gw.u64[1];
32 	/* Do we have any work? */
33 	if (work) {
34 		if (inl_dev->work_cb)
35 			inl_dev->work_cb(gw.u64, inl_dev->cb_args, false);
36 		else
37 			plt_warn("Undelivered inl dev work gw0: %p gw1: %p",
38 				 (void *)gw.u64[0], (void *)gw.u64[1]);
39 		cnt++;
40 		if (cnt < WORK_LIMIT)
41 			goto again;
42 	}
43 
44 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
45 }
46 
47 static int
48 nix_inl_nix_reg_dump(struct nix_inl_dev *inl_dev)
49 {
50 	uintptr_t nix_base = inl_dev->nix_base;
51 
52 	/* General registers */
53 	nix_lf_gen_reg_dump(nix_base, NULL);
54 
55 	/* Rx, Tx stat registers */
56 	nix_lf_stat_reg_dump(nix_base, NULL, inl_dev->lf_tx_stats,
57 			     inl_dev->lf_rx_stats);
58 
59 	/* Intr registers */
60 	nix_lf_int_reg_dump(nix_base, NULL, inl_dev->qints, inl_dev->cints);
61 
62 	return 0;
63 }
64 
65 static void
66 nix_inl_sso_hwgrp_irq(void *param)
67 {
68 	struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
69 	uintptr_t sso_base = inl_dev->sso_base;
70 	uint64_t intr;
71 
72 	intr = plt_read64(sso_base + SSO_LF_GGRP_INT);
73 	if (intr == 0)
74 		return;
75 
76 	/* Check for work executable interrupt */
77 	if (intr & BIT(1))
78 		nix_inl_sso_work_cb(inl_dev);
79 
80 	if (intr & ~BIT(1))
81 		plt_err("GGRP 0 GGRP_INT=0x%" PRIx64 "", intr);
82 
83 	/* Clear interrupt */
84 	plt_write64(intr, sso_base + SSO_LF_GGRP_INT);
85 }
86 
87 static void
88 nix_inl_sso_hws_irq(void *param)
89 {
90 	struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
91 	uintptr_t ssow_base = inl_dev->ssow_base;
92 	uint64_t intr;
93 
94 	intr = plt_read64(ssow_base + SSOW_LF_GWS_INT);
95 	if (intr == 0)
96 		return;
97 
98 	plt_err("GWS 0 GWS_INT=0x%" PRIx64 "", intr);
99 
100 	/* Clear interrupt */
101 	plt_write64(intr, ssow_base + SSOW_LF_GWS_INT);
102 }
103 
104 int
105 nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev)
106 {
107 	struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
108 	uintptr_t ssow_base = inl_dev->ssow_base;
109 	uintptr_t sso_base = inl_dev->sso_base;
110 	uint16_t sso_msixoff, ssow_msixoff;
111 	int rc;
112 
113 	ssow_msixoff = inl_dev->ssow_msixoff;
114 	sso_msixoff = inl_dev->sso_msixoff;
115 	if (sso_msixoff == MSIX_VECTOR_INVALID ||
116 	    ssow_msixoff == MSIX_VECTOR_INVALID) {
117 		plt_err("Invalid SSO/SSOW MSIX offsets (0x%x, 0x%x)",
118 			sso_msixoff, ssow_msixoff);
119 		return -EINVAL;
120 	}
121 
122 	/*
123 	 * Setup SSOW interrupt
124 	 */
125 
126 	/* Clear SSOW interrupt enable */
127 	plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
128 	/* Register interrupt with vfio */
129 	rc = dev_irq_register(handle, nix_inl_sso_hws_irq, inl_dev,
130 			      ssow_msixoff + SSOW_LF_INT_VEC_IOP);
131 	/* Set SSOW interrupt enable */
132 	plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1S);
133 
134 	/*
135 	 * Setup SSO/HWGRP interrupt
136 	 */
137 
138 	/* Clear SSO interrupt enable */
139 	plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
140 	/* Register IRQ */
141 	rc |= dev_irq_register(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
142 			       sso_msixoff + SSO_LF_INT_VEC_GRP);
143 	/* Enable hw interrupt */
144 	plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1S);
145 
146 	/* Setup threshold for work exec interrupt to 100us timeout
147 	 * based on time counter.
148 	 */
149 	plt_write64(BIT_ULL(63) | 10ULL << 48, sso_base + SSO_LF_GGRP_INT_THR);
150 
151 	return rc;
152 }
153 
154 void
155 nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev)
156 {
157 	struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
158 	uintptr_t ssow_base = inl_dev->ssow_base;
159 	uintptr_t sso_base = inl_dev->sso_base;
160 	uint16_t sso_msixoff, ssow_msixoff;
161 
162 	ssow_msixoff = inl_dev->ssow_msixoff;
163 	sso_msixoff = inl_dev->sso_msixoff;
164 
165 	/* Clear SSOW interrupt enable */
166 	plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
167 	/* Clear SSO/HWGRP interrupt enable */
168 	plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
169 	/* Clear SSO threshold */
170 	plt_write64(0, sso_base + SSO_LF_GGRP_INT_THR);
171 
172 	/* Unregister IRQ */
173 	dev_irq_unregister(handle, nix_inl_sso_hws_irq, (void *)inl_dev,
174 			   ssow_msixoff + SSOW_LF_INT_VEC_IOP);
175 	dev_irq_unregister(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
176 			   sso_msixoff + SSO_LF_INT_VEC_GRP);
177 }
178 
179 static void
180 nix_inl_nix_q_irq(void *param)
181 {
182 	struct nix_inl_qint *qints_mem = (struct nix_inl_qint *)param;
183 	struct nix_inl_dev *inl_dev = qints_mem->inl_dev;
184 	uintptr_t nix_base = inl_dev->nix_base;
185 	struct dev *dev = &inl_dev->dev;
186 	uint16_t qint = qints_mem->qint;
187 	volatile void *ctx;
188 	uint64_t reg, intr;
189 	uint64_t wdata;
190 	uint8_t irq;
191 	int rc, q;
192 
193 	intr = plt_read64(nix_base + NIX_LF_QINTX_INT(qint));
194 	if (intr == 0)
195 		return;
196 
197 	plt_err("Queue_intr=0x%" PRIx64 " qintx 0 pf=%d, vf=%d", intr, dev->pf,
198 		dev->vf);
199 
200 	/* Handle RQ interrupts */
201 	for (q = 0; q < inl_dev->nb_rqs; q++) {
202 		/* Get and clear RQ interrupts */
203 		wdata = (uint64_t)q << 44;
204 		reg = roc_atomic64_add_nosync(wdata,
205 					      (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
206 		if (reg & BIT_ULL(42) /* OP_ERR */) {
207 			plt_err("Failed to get rq_int");
208 			return;
209 		}
210 		irq = reg & 0xff;
211 		plt_write64(wdata | irq, nix_base + NIX_LF_RQ_OP_INT);
212 
213 		if (irq & BIT_ULL(NIX_RQINT_DROP))
214 			plt_err("RQ=0 NIX_RQINT_DROP");
215 
216 		if (irq & BIT_ULL(NIX_RQINT_RED))
217 			plt_err("RQ=0 NIX_RQINT_RED");
218 	}
219 
220 	/* Clear interrupt */
221 	plt_write64(intr, nix_base + NIX_LF_QINTX_INT(qint));
222 
223 	/* Dump registers to std out */
224 	nix_inl_nix_reg_dump(inl_dev);
225 
226 	/* Dump RQs */
227 	for (q = 0; q < inl_dev->nb_rqs; q++) {
228 		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
229 		if (rc) {
230 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
231 			continue;
232 		}
233 		nix_lf_rq_dump(ctx);
234 	}
235 }
236 
237 static void
238 nix_inl_nix_ras_irq(void *param)
239 {
240 	struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
241 	uintptr_t nix_base = inl_dev->nix_base;
242 	struct dev *dev = &inl_dev->dev;
243 	volatile void *ctx;
244 	uint64_t intr;
245 	int rc, q;
246 
247 	intr = plt_read64(nix_base + NIX_LF_RAS);
248 	if (intr == 0)
249 		return;
250 
251 	plt_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
252 	/* Clear interrupt */
253 	plt_write64(intr, nix_base + NIX_LF_RAS);
254 
255 	/* Dump registers to std out */
256 	nix_inl_nix_reg_dump(inl_dev);
257 
258 	/* Dump RQs */
259 	for (q = 0; q < inl_dev->nb_rqs; q++) {
260 		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
261 		if (rc) {
262 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
263 			continue;
264 		}
265 		nix_lf_rq_dump(ctx);
266 	}
267 }
268 
269 static void
270 nix_inl_nix_err_irq(void *param)
271 {
272 	struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
273 	uintptr_t nix_base = inl_dev->nix_base;
274 	struct dev *dev = &inl_dev->dev;
275 	volatile void *ctx;
276 	uint64_t intr;
277 	int rc, q;
278 
279 	intr = plt_read64(nix_base + NIX_LF_ERR_INT);
280 	if (intr == 0)
281 		return;
282 
283 	plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
284 
285 	/* Clear interrupt */
286 	plt_write64(intr, nix_base + NIX_LF_ERR_INT);
287 
288 	/* Dump registers to std out */
289 	nix_inl_nix_reg_dump(inl_dev);
290 
291 	/* Dump RQs */
292 	for (q = 0; q < inl_dev->nb_rqs; q++) {
293 		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
294 		if (rc) {
295 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
296 			continue;
297 		}
298 		nix_lf_rq_dump(ctx);
299 	}
300 }
301 
302 int
303 nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
304 {
305 	struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
306 	uintptr_t nix_base = inl_dev->nix_base;
307 	struct nix_inl_qint *qints_mem;
308 	int rc, q, ret = 0;
309 	uint16_t msixoff;
310 	int qints;
311 
312 	msixoff = inl_dev->nix_msixoff;
313 	if (msixoff == MSIX_VECTOR_INVALID) {
314 		plt_err("Invalid NIXLF MSIX vector offset: 0x%x", msixoff);
315 		return -EINVAL;
316 	}
317 
318 	/* Disable err interrupts */
319 	plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
320 	/* DIsable RAS interrupts */
321 	plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
322 
323 	/* Register err irq */
324 	rc = dev_irq_register(handle, nix_inl_nix_err_irq, inl_dev,
325 			      msixoff + NIX_LF_INT_VEC_ERR_INT);
326 	rc |= dev_irq_register(handle, nix_inl_nix_ras_irq, inl_dev,
327 			       msixoff + NIX_LF_INT_VEC_POISON);
328 
329 	/* Enable all nix lf error irqs except RQ_DISABLED and CQ_DISABLED */
330 	plt_write64(~(BIT_ULL(11) | BIT_ULL(24)),
331 		    nix_base + NIX_LF_ERR_INT_ENA_W1S);
332 	/* Enable RAS interrupts */
333 	plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1S);
334 
335 	/* Setup queue irq for RQ's */
336 	qints = PLT_MIN(inl_dev->nb_rqs, inl_dev->qints);
337 	qints_mem = plt_zmalloc(sizeof(struct nix_inl_qint) * qints, 0);
338 	if (!qints_mem) {
339 		plt_err("Failed to allocate memory for %u qints", qints);
340 		return -ENOMEM;
341 	}
342 
343 	inl_dev->configured_qints = qints;
344 	inl_dev->qints_mem = qints_mem;
345 
346 	for (q = 0; q < qints; q++) {
347 		/* Clear QINT CNT, interrupt */
348 		plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
349 		plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
350 
351 		/* Register queue irq vector */
352 		ret = dev_irq_register(handle, nix_inl_nix_q_irq, &qints_mem[q],
353 				       msixoff + NIX_LF_INT_VEC_QINT_START + q);
354 		if (ret)
355 			break;
356 
357 		plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
358 		plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
359 		/* Enable QINT interrupt */
360 		plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(q));
361 
362 		qints_mem[q].inl_dev = inl_dev;
363 		qints_mem[q].qint = q;
364 	}
365 
366 	rc |= ret;
367 	return rc;
368 }
369 
370 void
371 nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
372 {
373 	struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
374 	struct nix_inl_qint *qints_mem = inl_dev->qints_mem;
375 	uintptr_t nix_base = inl_dev->nix_base;
376 	uint16_t msixoff;
377 	int q;
378 
379 	msixoff = inl_dev->nix_msixoff;
380 	/* Disable err interrupts */
381 	plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
382 	/* DIsable RAS interrupts */
383 	plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
384 
385 	dev_irq_unregister(handle, nix_inl_nix_err_irq, inl_dev,
386 			   msixoff + NIX_LF_INT_VEC_ERR_INT);
387 	dev_irq_unregister(handle, nix_inl_nix_ras_irq, inl_dev,
388 			   msixoff + NIX_LF_INT_VEC_POISON);
389 
390 	for (q = 0; q < inl_dev->configured_qints; q++) {
391 		/* Clear QINT CNT */
392 		plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
393 		plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
394 
395 		/* Disable QINT interrupt */
396 		plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
397 
398 		/* Unregister queue irq vector */
399 		dev_irq_unregister(handle, nix_inl_nix_q_irq, &qints_mem[q],
400 				   msixoff + NIX_LF_INT_VEC_QINT_START + q);
401 	}
402 
403 	plt_free(inl_dev->qints_mem);
404 	inl_dev->qints_mem = NULL;
405 }
406