xref: /dpdk/drivers/common/cnxk/roc_npa_irq.c (revision dfb02998e06866a1539c3a5c2a4fa58a82ee86b8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 static void
npa_err_irq(void * param)9 npa_err_irq(void *param)
10 {
11 	struct npa_lf *lf = (struct npa_lf *)param;
12 	uint64_t intr;
13 
14 	intr = plt_read64(lf->base + NPA_LF_ERR_INT);
15 	if (intr == 0)
16 		return;
17 
18 	plt_err("Err_intr=0x%" PRIx64 "", intr);
19 
20 	/* Clear interrupt */
21 	plt_write64(intr, lf->base + NPA_LF_ERR_INT);
22 }
23 
24 static int
npa_register_err_irq(struct npa_lf * lf)25 npa_register_err_irq(struct npa_lf *lf)
26 {
27 	struct plt_intr_handle *handle = lf->intr_handle;
28 	int rc, vec;
29 
30 	vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
31 
32 	/* Clear err interrupt */
33 	plt_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
34 	/* Register err interrupt vector */
35 	rc = dev_irq_register(handle, npa_err_irq, lf, vec);
36 
37 	/* Enable hw interrupt */
38 	plt_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1S);
39 
40 	return rc;
41 }
42 
43 static void
npa_unregister_err_irq(struct npa_lf * lf)44 npa_unregister_err_irq(struct npa_lf *lf)
45 {
46 	struct plt_intr_handle *handle = lf->intr_handle;
47 	int vec;
48 
49 	vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
50 
51 	/* Clear err interrupt */
52 	plt_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
53 	dev_irq_unregister(handle, npa_err_irq, lf, vec);
54 }
55 
56 static void
npa_ras_irq(void * param)57 npa_ras_irq(void *param)
58 {
59 	struct npa_lf *lf = (struct npa_lf *)param;
60 	uint64_t intr;
61 
62 	intr = plt_read64(lf->base + NPA_LF_RAS);
63 	if (intr == 0)
64 		return;
65 
66 	plt_err("Ras_intr=0x%" PRIx64 "", intr);
67 
68 	/* Clear interrupt */
69 	plt_write64(intr, lf->base + NPA_LF_RAS);
70 }
71 
72 static int
npa_register_ras_irq(struct npa_lf * lf)73 npa_register_ras_irq(struct npa_lf *lf)
74 {
75 	struct plt_intr_handle *handle = lf->intr_handle;
76 	int rc, vec;
77 
78 	vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
79 
80 	/* Clear err interrupt */
81 	plt_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
82 	/* Set used interrupt vectors */
83 	rc = dev_irq_register(handle, npa_ras_irq, lf, vec);
84 	/* Enable hw interrupt */
85 	plt_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1S);
86 
87 	return rc;
88 }
89 
90 static void
npa_unregister_ras_irq(struct npa_lf * lf)91 npa_unregister_ras_irq(struct npa_lf *lf)
92 {
93 	int vec;
94 	struct plt_intr_handle *handle = lf->intr_handle;
95 
96 	vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
97 
98 	/* Clear err interrupt */
99 	plt_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
100 	dev_irq_unregister(handle, npa_ras_irq, lf, vec);
101 }
102 
103 static inline uint8_t
npa_q_irq_get_and_clear(struct npa_lf * lf,uint32_t q,uint32_t off,uint64_t mask)104 npa_q_irq_get_and_clear(struct npa_lf *lf, uint32_t q, uint32_t off,
105 			uint64_t mask)
106 {
107 	uint64_t reg, wdata;
108 	uint8_t qint;
109 
110 	wdata = (uint64_t)q << 44;
111 	reg = roc_atomic64_add_nosync(wdata, (int64_t *)(lf->base + off));
112 
113 	if (reg & BIT_ULL(42) /* OP_ERR */) {
114 		plt_err("Failed execute irq get off=0x%x", off);
115 		return 0;
116 	}
117 
118 	qint = reg & 0xff;
119 	wdata &= mask;
120 	plt_write64(wdata | qint, lf->base + off);
121 
122 	return qint;
123 }
124 
125 static inline uint8_t
npa_pool_irq_get_and_clear(struct npa_lf * lf,uint32_t p)126 npa_pool_irq_get_and_clear(struct npa_lf *lf, uint32_t p)
127 {
128 	return npa_q_irq_get_and_clear(lf, p, NPA_LF_POOL_OP_INT, ~0xff00);
129 }
130 
131 static inline uint8_t
npa_aura_irq_get_and_clear(struct npa_lf * lf,uint32_t a)132 npa_aura_irq_get_and_clear(struct npa_lf *lf, uint32_t a)
133 {
134 	return npa_q_irq_get_and_clear(lf, a, NPA_LF_AURA_OP_INT, ~0xff00);
135 }
136 
137 static void
npa_q_irq(void * param)138 npa_q_irq(void *param)
139 {
140 	struct npa_qint *qint = (struct npa_qint *)param;
141 	struct npa_lf *lf = qint->lf;
142 	uint8_t irq, qintx = qint->qintx;
143 	uint32_t q, pool, aura;
144 	uint64_t intr;
145 
146 	intr = plt_read64(lf->base + NPA_LF_QINTX_INT(qintx));
147 	if (intr == 0)
148 		return;
149 
150 	plt_err("queue_intr=0x%" PRIx64 " qintx=%d", intr, qintx);
151 
152 	/* Handle pool queue interrupts */
153 	for (q = 0; q < lf->nr_pools; q++) {
154 		/* Skip disabled POOL */
155 		if (plt_bitmap_get(lf->npa_bmp, q))
156 			continue;
157 
158 		pool = q % lf->qints;
159 		irq = npa_pool_irq_get_and_clear(lf, pool);
160 
161 		if (irq & BIT_ULL(NPA_POOL_ERR_INT_OVFLS))
162 			plt_err("Pool=%d NPA_POOL_ERR_INT_OVFLS", pool);
163 
164 		if (irq & BIT_ULL(NPA_POOL_ERR_INT_RANGE))
165 			plt_err("Pool=%d NPA_POOL_ERR_INT_RANGE", pool);
166 
167 		if (irq & BIT_ULL(NPA_POOL_ERR_INT_PERR))
168 			plt_err("Pool=%d NPA_POOL_ERR_INT_PERR", pool);
169 	}
170 
171 	/* Handle aura queue interrupts */
172 	for (q = 0; q < lf->nr_pools; q++) {
173 		/* Skip disabled AURA */
174 		if (plt_bitmap_get(lf->npa_bmp, q))
175 			continue;
176 
177 		aura = q % lf->qints;
178 		irq = npa_aura_irq_get_and_clear(lf, aura);
179 
180 		if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_OVER))
181 			plt_err("Aura=%d NPA_AURA_ERR_INT_ADD_OVER", aura);
182 
183 		if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_UNDER))
184 			plt_err("Aura=%d NPA_AURA_ERR_INT_ADD_UNDER", aura);
185 
186 		if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_FREE_UNDER))
187 			plt_err("Aura=%d NPA_AURA_ERR_INT_FREE_UNDER", aura);
188 
189 		if (irq & BIT_ULL(NPA_AURA_ERR_INT_POOL_DIS))
190 			plt_err("Aura=%d NPA_AURA_ERR_POOL_DIS", aura);
191 	}
192 
193 	/* Clear interrupt */
194 	plt_write64(intr, lf->base + NPA_LF_QINTX_INT(qintx));
195 	roc_npa_ctx_dump();
196 }
197 
198 static int
npa_register_queue_irqs(struct npa_lf * lf)199 npa_register_queue_irqs(struct npa_lf *lf)
200 {
201 	struct plt_intr_handle *handle = lf->intr_handle;
202 	int vec, q, qs, rc = 0;
203 
204 	/* Figure out max qintx required */
205 	qs = PLT_MIN(lf->qints, lf->nr_pools);
206 
207 	for (q = 0; q < qs; q++) {
208 		vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
209 
210 		/* Clear QINT CNT */
211 		plt_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
212 
213 		/* Clear interrupt */
214 		plt_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
215 
216 		struct npa_qint *qintmem = lf->npa_qint_mem;
217 
218 		qintmem += q;
219 
220 		qintmem->lf = lf;
221 		qintmem->qintx = q;
222 
223 		/* Sync qints_mem update */
224 		plt_wmb();
225 
226 		/* Register queue irq vector */
227 		rc = dev_irq_register(handle, npa_q_irq, qintmem, vec);
228 		if (rc)
229 			break;
230 
231 		plt_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
232 		plt_write64(0, lf->base + NPA_LF_QINTX_INT(q));
233 		/* Enable QINT interrupt */
234 		plt_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1S(q));
235 	}
236 
237 	return rc;
238 }
239 
240 static void
npa_unregister_queue_irqs(struct npa_lf * lf)241 npa_unregister_queue_irqs(struct npa_lf *lf)
242 {
243 	struct plt_intr_handle *handle = lf->intr_handle;
244 	int vec, q, qs;
245 
246 	/* Figure out max qintx required */
247 	qs = PLT_MIN(lf->qints, lf->nr_pools);
248 
249 	for (q = 0; q < qs; q++) {
250 		vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
251 
252 		/* Clear QINT CNT */
253 		plt_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
254 		plt_write64(0, lf->base + NPA_LF_QINTX_INT(q));
255 
256 		/* Clear interrupt */
257 		plt_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
258 
259 		struct npa_qint *qintmem = lf->npa_qint_mem;
260 
261 		qintmem += q;
262 
263 		/* Unregister queue irq vector */
264 		dev_irq_unregister(handle, npa_q_irq, qintmem, vec);
265 
266 		qintmem->lf = NULL;
267 		qintmem->qintx = 0;
268 	}
269 }
270 
271 int
npa_register_irqs(struct npa_lf * lf)272 npa_register_irqs(struct npa_lf *lf)
273 {
274 	int rc;
275 
276 	if (lf->npa_msixoff == MSIX_VECTOR_INVALID) {
277 		plt_err("Invalid NPALF MSIX vector offset vector: 0x%x",
278 			lf->npa_msixoff);
279 		return NPA_ERR_PARAM;
280 	}
281 
282 	/* Register lf err interrupt */
283 	rc = npa_register_err_irq(lf);
284 	/* Register RAS interrupt */
285 	rc |= npa_register_ras_irq(lf);
286 	/* Register queue interrupts */
287 	rc |= npa_register_queue_irqs(lf);
288 
289 	return rc;
290 }
291 
292 void
npa_unregister_irqs(struct npa_lf * lf)293 npa_unregister_irqs(struct npa_lf *lf)
294 {
295 	npa_unregister_err_irq(lf);
296 	npa_unregister_ras_irq(lf);
297 	npa_unregister_queue_irqs(lf);
298 }
299