1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #define WORK_LIMIT 1000
9
10 static void
nix_inl_sso_work_cb(struct nix_inl_dev * inl_dev)11 nix_inl_sso_work_cb(struct nix_inl_dev *inl_dev)
12 {
13 uintptr_t getwrk_op = inl_dev->ssow_base + SSOW_LF_GWS_OP_GET_WORK0;
14 uintptr_t tag_wqe_op = inl_dev->ssow_base + SSOW_LF_GWS_WQE0;
15 uint32_t wdata = BIT(16) | 1;
16 union {
17 __uint128_t get_work;
18 uint64_t u64[2];
19 } gw;
20 uint16_t cnt = 0;
21 uint64_t work;
22
23 again:
24 /* Try to do get work */
25 gw.get_work = wdata;
26 plt_write64(gw.u64[0], getwrk_op);
27 do {
28 roc_load_pair(gw.u64[0], gw.u64[1], tag_wqe_op);
29 } while (gw.u64[0] & BIT_ULL(63));
30
31 work = gw.u64[1];
32 /* Do we have any work? */
33 if (work) {
34 if (inl_dev->work_cb)
35 inl_dev->work_cb(gw.u64, inl_dev->cb_args, false);
36 else
37 plt_warn("Undelivered inl dev work gw0: %p gw1: %p",
38 (void *)gw.u64[0], (void *)gw.u64[1]);
39 cnt++;
40 if (cnt < WORK_LIMIT)
41 goto again;
42 }
43
44 inl_dev->sso_work_cnt += cnt;
45 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
46 }
47
48 static int
nix_inl_nix_reg_dump(struct nix_inl_dev * inl_dev)49 nix_inl_nix_reg_dump(struct nix_inl_dev *inl_dev)
50 {
51 uintptr_t nix_base = inl_dev->nix_base;
52
53 /* General registers */
54 nix_lf_gen_reg_dump(nix_base, NULL);
55
56 /* Rx, Tx stat registers */
57 nix_lf_stat_reg_dump(nix_base, NULL, inl_dev->lf_tx_stats,
58 inl_dev->lf_rx_stats);
59
60 /* Intr registers */
61 nix_lf_int_reg_dump(nix_base, NULL, inl_dev->qints, inl_dev->cints);
62
63 return 0;
64 }
65
66 static void
nix_inl_sso_hwgrp_irq(void * param)67 nix_inl_sso_hwgrp_irq(void *param)
68 {
69 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
70 uintptr_t sso_base = inl_dev->sso_base;
71 uint64_t intr;
72
73 intr = plt_read64(sso_base + SSO_LF_GGRP_INT);
74 if (intr == 0)
75 return;
76
77 /* Check for work executable interrupt */
78 if (intr & BIT(1))
79 nix_inl_sso_work_cb(inl_dev);
80
81 if (intr & ~BIT(1))
82 plt_err("GGRP 0 GGRP_INT=0x%" PRIx64 "", intr);
83
84 /* Clear interrupt */
85 plt_write64(intr, sso_base + SSO_LF_GGRP_INT);
86 }
87
88 static void
nix_inl_sso_hws_irq(void * param)89 nix_inl_sso_hws_irq(void *param)
90 {
91 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
92 uintptr_t ssow_base = inl_dev->ssow_base;
93 uint64_t intr;
94
95 intr = plt_read64(ssow_base + SSOW_LF_GWS_INT);
96 if (intr == 0)
97 return;
98
99 plt_err("GWS 0 GWS_INT=0x%" PRIx64 "", intr);
100
101 /* Clear interrupt */
102 plt_write64(intr, ssow_base + SSOW_LF_GWS_INT);
103 }
104
105 int
nix_inl_sso_register_irqs(struct nix_inl_dev * inl_dev)106 nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev)
107 {
108 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
109 uintptr_t ssow_base = inl_dev->ssow_base;
110 uintptr_t sso_base = inl_dev->sso_base;
111 uint16_t sso_msixoff, ssow_msixoff;
112 int rc;
113
114 ssow_msixoff = inl_dev->ssow_msixoff;
115 sso_msixoff = inl_dev->sso_msixoff;
116 if (sso_msixoff == MSIX_VECTOR_INVALID ||
117 ssow_msixoff == MSIX_VECTOR_INVALID) {
118 plt_err("Invalid SSO/SSOW MSIX offsets (0x%x, 0x%x)",
119 sso_msixoff, ssow_msixoff);
120 return -EINVAL;
121 }
122
123 /*
124 * Setup SSOW interrupt
125 */
126
127 /* Clear SSOW interrupt enable */
128 plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
129 /* Register interrupt with vfio */
130 rc = dev_irq_register(handle, nix_inl_sso_hws_irq, inl_dev,
131 ssow_msixoff + SSOW_LF_INT_VEC_IOP);
132 /* Set SSOW interrupt enable */
133 plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1S);
134
135 /*
136 * Setup SSO/HWGRP interrupt
137 */
138
139 /* Clear SSO interrupt enable */
140 plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
141 /* Register IRQ */
142 rc |= dev_irq_register(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
143 sso_msixoff + SSO_LF_INT_VEC_GRP);
144 /* Enable hw interrupt */
145 plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1S);
146
147 /* Setup threshold for work exec interrupt to 100us timeout
148 * based on time counter.
149 */
150 plt_write64(BIT_ULL(63) | 10ULL << 48, sso_base + SSO_LF_GGRP_INT_THR);
151
152 return rc;
153 }
154
155 void
nix_inl_sso_unregister_irqs(struct nix_inl_dev * inl_dev)156 nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev)
157 {
158 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
159 uintptr_t ssow_base = inl_dev->ssow_base;
160 uintptr_t sso_base = inl_dev->sso_base;
161 uint16_t sso_msixoff, ssow_msixoff;
162
163 ssow_msixoff = inl_dev->ssow_msixoff;
164 sso_msixoff = inl_dev->sso_msixoff;
165
166 /* Clear SSOW interrupt enable */
167 plt_write64(~0ull, ssow_base + SSOW_LF_GWS_INT_ENA_W1C);
168 /* Clear SSO/HWGRP interrupt enable */
169 plt_write64(~0ull, sso_base + SSO_LF_GGRP_INT_ENA_W1C);
170 /* Clear SSO threshold */
171 plt_write64(0, sso_base + SSO_LF_GGRP_INT_THR);
172
173 /* Unregister IRQ */
174 dev_irq_unregister(handle, nix_inl_sso_hws_irq, (void *)inl_dev,
175 ssow_msixoff + SSOW_LF_INT_VEC_IOP);
176 dev_irq_unregister(handle, nix_inl_sso_hwgrp_irq, (void *)inl_dev,
177 sso_msixoff + SSO_LF_INT_VEC_GRP);
178 }
179
180 static void
nix_inl_nix_q_irq(void * param)181 nix_inl_nix_q_irq(void *param)
182 {
183 struct nix_inl_qint *qints_mem = (struct nix_inl_qint *)param;
184 struct nix_inl_dev *inl_dev = qints_mem->inl_dev;
185 uintptr_t nix_base = inl_dev->nix_base;
186 struct dev *dev = &inl_dev->dev;
187 uint16_t qint = qints_mem->qint;
188 volatile void *ctx;
189 uint64_t reg, intr;
190 uint64_t wdata;
191 uint8_t irq;
192 int rc, q;
193
194 intr = plt_read64(nix_base + NIX_LF_QINTX_INT(qint));
195 if (intr == 0)
196 return;
197
198 plt_err("Queue_intr=0x%" PRIx64 " qintx 0 pf=%d, vf=%d", intr, dev->pf,
199 dev->vf);
200
201 /* Handle RQ interrupts */
202 for (q = 0; q < inl_dev->nb_rqs; q++) {
203 /* Get and clear RQ interrupts */
204 wdata = (uint64_t)q << 44;
205 reg = roc_atomic64_add_nosync(wdata,
206 (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
207 if (reg & BIT_ULL(42) /* OP_ERR */) {
208 plt_err("Failed to get rq_int");
209 return;
210 }
211 irq = reg & 0xff;
212 plt_write64(wdata | irq, nix_base + NIX_LF_RQ_OP_INT);
213
214 if (irq & BIT_ULL(NIX_RQINT_DROP))
215 plt_err("RQ=0 NIX_RQINT_DROP");
216
217 if (irq & BIT_ULL(NIX_RQINT_RED))
218 plt_err("RQ=0 NIX_RQINT_RED");
219 }
220
221 /* Clear interrupt */
222 plt_write64(intr, nix_base + NIX_LF_QINTX_INT(qint));
223
224 /* Dump registers to std out */
225 nix_inl_nix_reg_dump(inl_dev);
226
227 /* Dump RQs */
228 for (q = 0; q < inl_dev->nb_rqs; q++) {
229 rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
230 if (rc) {
231 plt_err("Failed to get rq %d context, rc=%d", q, rc);
232 continue;
233 }
234 nix_lf_rq_dump(ctx, NULL);
235 }
236 }
237
238 static void
nix_inl_nix_ras_irq(void * param)239 nix_inl_nix_ras_irq(void *param)
240 {
241 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
242 uintptr_t nix_base = inl_dev->nix_base;
243 struct dev *dev = &inl_dev->dev;
244 volatile void *ctx;
245 uint64_t intr;
246 int rc, q;
247
248 intr = plt_read64(nix_base + NIX_LF_RAS);
249 if (intr == 0)
250 return;
251
252 plt_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
253 /* Clear interrupt */
254 plt_write64(intr, nix_base + NIX_LF_RAS);
255
256 /* Dump registers to std out */
257 nix_inl_nix_reg_dump(inl_dev);
258
259 /* Dump RQs */
260 for (q = 0; q < inl_dev->nb_rqs; q++) {
261 rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
262 if (rc) {
263 plt_err("Failed to get rq %d context, rc=%d", q, rc);
264 continue;
265 }
266 nix_lf_rq_dump(ctx, NULL);
267 }
268 }
269
270 static void
nix_inl_nix_err_irq(void * param)271 nix_inl_nix_err_irq(void *param)
272 {
273 struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
274 uintptr_t nix_base = inl_dev->nix_base;
275 struct dev *dev = &inl_dev->dev;
276 volatile void *ctx;
277 uint64_t intr;
278 int rc, q;
279
280 intr = plt_read64(nix_base + NIX_LF_ERR_INT);
281 if (intr == 0)
282 return;
283
284 plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
285
286 /* Clear interrupt */
287 plt_write64(intr, nix_base + NIX_LF_ERR_INT);
288
289 /* Dump registers to std out */
290 nix_inl_nix_reg_dump(inl_dev);
291
292 /* Dump RQs */
293 for (q = 0; q < inl_dev->nb_rqs; q++) {
294 rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
295 if (rc) {
296 plt_err("Failed to get rq %d context, rc=%d", q, rc);
297 continue;
298 }
299 nix_lf_rq_dump(ctx, NULL);
300 }
301 }
302
303 int
nix_inl_nix_register_irqs(struct nix_inl_dev * inl_dev)304 nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
305 {
306 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
307 uintptr_t nix_base = inl_dev->nix_base;
308 struct nix_inl_qint *qints_mem;
309 int rc, q, ret = 0;
310 uint16_t msixoff;
311 int qints;
312
313 msixoff = inl_dev->nix_msixoff;
314 if (msixoff == MSIX_VECTOR_INVALID) {
315 plt_err("Invalid NIXLF MSIX vector offset: 0x%x", msixoff);
316 return -EINVAL;
317 }
318
319 /* Disable err interrupts */
320 plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
321 /* DIsable RAS interrupts */
322 plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
323
324 /* Register err irq */
325 rc = dev_irq_register(handle, nix_inl_nix_err_irq, inl_dev,
326 msixoff + NIX_LF_INT_VEC_ERR_INT);
327 rc |= dev_irq_register(handle, nix_inl_nix_ras_irq, inl_dev,
328 msixoff + NIX_LF_INT_VEC_POISON);
329
330 /* Enable all nix lf error irqs except RQ_DISABLED and CQ_DISABLED */
331 plt_write64(~(BIT_ULL(11) | BIT_ULL(24)),
332 nix_base + NIX_LF_ERR_INT_ENA_W1S);
333 /* Enable RAS interrupts */
334 plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1S);
335
336 /* Setup queue irq for RQ's */
337 qints = PLT_MIN(inl_dev->nb_rqs, inl_dev->qints);
338 qints_mem = plt_zmalloc(sizeof(struct nix_inl_qint) * qints, 0);
339 if (!qints_mem) {
340 plt_err("Failed to allocate memory for %u qints", qints);
341 return -ENOMEM;
342 }
343
344 inl_dev->configured_qints = qints;
345 inl_dev->qints_mem = qints_mem;
346
347 for (q = 0; q < qints; q++) {
348 /* Clear QINT CNT, interrupt */
349 plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
350 plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
351
352 /* Register queue irq vector */
353 ret = dev_irq_register(handle, nix_inl_nix_q_irq, &qints_mem[q],
354 msixoff + NIX_LF_INT_VEC_QINT_START + q);
355 if (ret)
356 break;
357
358 plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
359 plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
360 /* Enable QINT interrupt */
361 plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(q));
362
363 qints_mem[q].inl_dev = inl_dev;
364 qints_mem[q].qint = q;
365 }
366
367 rc |= ret;
368 return rc;
369 }
370
371 void
nix_inl_nix_unregister_irqs(struct nix_inl_dev * inl_dev)372 nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
373 {
374 struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
375 struct nix_inl_qint *qints_mem = inl_dev->qints_mem;
376 uintptr_t nix_base = inl_dev->nix_base;
377 uint16_t msixoff;
378 int q;
379
380 msixoff = inl_dev->nix_msixoff;
381 /* Disable err interrupts */
382 plt_write64(~0ull, nix_base + NIX_LF_ERR_INT_ENA_W1C);
383 /* DIsable RAS interrupts */
384 plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1C);
385
386 dev_irq_unregister(handle, nix_inl_nix_err_irq, inl_dev,
387 msixoff + NIX_LF_INT_VEC_ERR_INT);
388 dev_irq_unregister(handle, nix_inl_nix_ras_irq, inl_dev,
389 msixoff + NIX_LF_INT_VEC_POISON);
390
391 for (q = 0; q < inl_dev->configured_qints; q++) {
392 /* Clear QINT CNT */
393 plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
394 plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
395
396 /* Disable QINT interrupt */
397 plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
398
399 /* Unregister queue irq vector */
400 dev_irq_unregister(handle, nix_inl_nix_q_irq, &qints_mem[q],
401 msixoff + NIX_LF_INT_VEC_QINT_START + q);
402 }
403
404 plt_free(inl_dev->qints_mem);
405 inl_dev->qints_mem = NULL;
406 }
407