xref: /dpdk/drivers/common/cnxk/roc_nix_inl_dev.c (revision edc13af9a8bbdca92bd8974165df2ff7049f45b7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 #include <unistd.h>
9 
10 #define NIX_AURA_DROP_PC_DFLT 40
11 
12 /* Default Rx Config for Inline NIX LF */
13 #define NIX_INL_LF_RX_CFG                                                      \
14 	(ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
15 	 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
16 	 ROC_NIX_LF_RX_CFG_LEN_IL3 | ROC_NIX_LF_RX_CFG_LEN_OL3)
17 
18 #define INL_NIX_RX_STATS(val) plt_read64(inl_dev->nix_base + NIX_LF_RX_STATX(val))
19 
20 extern uint32_t soft_exp_consumer_cnt;
21 static bool soft_exp_poll_thread_exit = true;
22 
23 uint16_t
24 nix_inl_dev_pffunc_get(void)
25 {
26 	struct idev_cfg *idev = idev_get_cfg();
27 	struct nix_inl_dev *inl_dev;
28 
29 	if (idev != NULL) {
30 		inl_dev = idev->nix_inl_dev;
31 		if (inl_dev)
32 			return inl_dev->dev.pf_func;
33 	}
34 	return 0;
35 }
36 
37 uint16_t
38 roc_nix_inl_dev_pffunc_get(void)
39 {
40 	return nix_inl_dev_pffunc_get();
41 }
42 
43 static void
44 nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
45 {
46 	uintptr_t work = gw[1];
47 
48 	(void)soft_exp_event;
49 	*((uintptr_t *)args + (gw[0] & 0x1)) = work;
50 
51 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
52 }
53 
54 static int
55 nix_inl_selftest(void)
56 {
57 	struct idev_cfg *idev = idev_get_cfg();
58 	roc_nix_inl_sso_work_cb_t save_cb;
59 	static uintptr_t work_arr[2];
60 	struct nix_inl_dev *inl_dev;
61 	void *save_cb_args;
62 	uint64_t add_work0;
63 	int rc = 0;
64 
65 	if (idev == NULL)
66 		return -ENOTSUP;
67 
68 	inl_dev = idev->nix_inl_dev;
69 	if (inl_dev == NULL)
70 		return -ENOTSUP;
71 
72 	plt_info("Performing nix inl self test");
73 
74 	/* Save and update cb to test cb */
75 	save_cb = inl_dev->work_cb;
76 	save_cb_args = inl_dev->cb_args;
77 	inl_dev->work_cb = nix_inl_selftest_work_cb;
78 	inl_dev->cb_args = work_arr;
79 
80 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
81 
82 #define WORK_MAGIC1 0x335577ff0
83 #define WORK_MAGIC2 0xdeadbeef0
84 
85 	/* Add work */
86 	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
87 	roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
88 	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
89 	roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
90 
91 	plt_delay_ms(10000);
92 
93 	/* Check if we got expected work */
94 	if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
95 		plt_err("Failed to get expected work, [0]=%p [1]=%p",
96 			(void *)work_arr[0], (void *)work_arr[1]);
97 		rc = -EFAULT;
98 		goto exit;
99 	}
100 
101 	plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
102 		 (void *)work_arr[1]);
103 
104 exit:
105 	/* Restore state */
106 	inl_dev->work_cb = save_cb;
107 	inl_dev->cb_args = save_cb_args;
108 	return rc;
109 }
110 
111 static int
112 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
113 {
114 	struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
115 	struct msg_req *req;
116 	int rc;
117 
118 	req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
119 	if (req == NULL) {
120 		rc = -ENOSPC;
121 		goto exit;
122 	}
123 
124 	rc = mbox_process(mbox);
125 exit:
126 	mbox_put(mbox);
127 	return rc;
128 }
129 
130 static int
131 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
132 {
133 	struct nix_inline_ipsec_lf_cfg *lf_cfg;
134 	struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
135 	uint64_t max_sa;
136 	uint32_t sa_w;
137 	int rc;
138 
139 	lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
140 	if (lf_cfg == NULL) {
141 		rc = -ENOSPC;
142 		goto exit;
143 	}
144 
145 	if (ena) {
146 
147 		max_sa = inl_dev->inb_spi_mask + 1;
148 		sa_w = plt_log2_u32(max_sa);
149 
150 		lf_cfg->enable = 1;
151 		lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
152 		lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
153 		/* CN9K SA size is different */
154 		if (roc_model_is_cn9k())
155 			lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
156 		else
157 			lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
158 		lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
159 		lf_cfg->ipsec_cfg0.sa_pow2_size =
160 			plt_log2_u32(inl_dev->inb_sa_sz);
161 
162 		lf_cfg->ipsec_cfg0.tag_const = 0;
163 		lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
164 	} else {
165 		lf_cfg->enable = 0;
166 	}
167 
168 	rc = mbox_process(mbox);
169 exit:
170 	mbox_put(mbox);
171 	return rc;
172 }
173 
174 static int
175 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
176 {
177 	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
178 	struct dev *dev = &inl_dev->dev;
179 	bool ctx_ilen_valid = false;
180 	uint8_t eng_grpmask;
181 	uint8_t ctx_ilen = 0;
182 	int rc;
183 
184 	if (!inl_dev->attach_cptlf)
185 		return 0;
186 
187 	/* Alloc CPT LF */
188 	eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
189 		       1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
190 		       1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
191 	if (roc_errata_cpt_has_ctx_fetch_issue()) {
192 		ctx_ilen = (ROC_NIX_INL_OT_IPSEC_INB_HW_SZ / 128) - 1;
193 		ctx_ilen_valid = true;
194 	}
195 
196 	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid,
197 			   ctx_ilen);
198 	if (rc) {
199 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
200 		return rc;
201 	}
202 
203 	/* Setup CPT LF for submitting control opcode */
204 	lf = &inl_dev->cpt_lf;
205 	lf->lf_id = 0;
206 	lf->nb_desc = 0; /* Set to default */
207 	lf->dev = &inl_dev->dev;
208 	lf->msixoff = inl_dev->cpt_msixoff;
209 	lf->pci_dev = inl_dev->pci_dev;
210 
211 	rc = cpt_lf_init(lf);
212 	if (rc) {
213 		plt_err("Failed to initialize CPT LF, rc=%d", rc);
214 		goto lf_free;
215 	}
216 
217 	roc_cpt_iq_enable(lf);
218 	return 0;
219 lf_free:
220 	rc |= cpt_lfs_free(dev);
221 	return rc;
222 }
223 
224 static int
225 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
226 {
227 	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
228 	struct dev *dev = &inl_dev->dev;
229 	int rc;
230 
231 	if (!inl_dev->attach_cptlf)
232 		return 0;
233 
234 	/* Cleanup CPT LF queue */
235 	cpt_lf_fini(lf);
236 
237 	/* Free LF resources */
238 	rc = cpt_lfs_free(dev);
239 	if (!rc)
240 		lf->dev = NULL;
241 	else
242 		plt_err("Failed to free CPT LF resources, rc=%d", rc);
243 	return rc;
244 }
245 
246 static int
247 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
248 {
249 	struct sso_lf_alloc_rsp *sso_rsp;
250 	struct dev *dev = &inl_dev->dev;
251 	uint16_t hwgrp[1] = {0};
252 	int rc;
253 
254 	/* Alloc SSOW LF */
255 	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
256 	if (rc) {
257 		plt_err("Failed to alloc SSO HWS, rc=%d", rc);
258 		return rc;
259 	}
260 
261 	/* Alloc HWGRP LF */
262 	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
263 	if (rc) {
264 		plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
265 		goto free_ssow;
266 	}
267 
268 	inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
269 	inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
270 	inl_dev->iue = sso_rsp->in_unit_entries;
271 
272 	inl_dev->nb_xae = inl_dev->iue;
273 	rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
274 				     inl_dev->xae_waes, inl_dev->xaq_buf_size,
275 				     1);
276 	if (rc) {
277 		plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
278 		goto free_sso;
279 	}
280 
281 	/* Setup xaq for hwgrps */
282 	rc = sso_hwgrp_alloc_xaq(dev, roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1);
283 	if (rc) {
284 		plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
285 		goto destroy_pool;
286 	}
287 
288 	/* Register SSO, SSOW error and work irq's */
289 	rc = nix_inl_sso_register_irqs(inl_dev);
290 	if (rc) {
291 		plt_err("Failed to register sso irq's, rc=%d", rc);
292 		goto release_xaq;
293 	}
294 
295 	/* Setup hwgrp->hws link */
296 	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, true);
297 
298 	/* Enable HWGRP */
299 	plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
300 
301 	return 0;
302 
303 release_xaq:
304 	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
305 destroy_pool:
306 	sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
307 free_sso:
308 	sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
309 free_ssow:
310 	sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
311 	return rc;
312 }
313 
314 static int
315 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
316 {
317 	uint16_t hwgrp[1] = {0};
318 
319 	/* Disable HWGRP */
320 	plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
321 
322 	/* Unregister SSO/SSOW IRQ's */
323 	nix_inl_sso_unregister_irqs(inl_dev);
324 
325 	/* Unlink hws */
326 	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, false);
327 
328 	/* Release XAQ aura */
329 	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
330 
331 	/* Free SSO, SSOW LF's */
332 	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
333 	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
334 
335 	/* Free the XAQ aura */
336 	sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
337 
338 	return 0;
339 }
340 
341 static int
342 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
343 {
344 	uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
345 	uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
346 	struct dev *dev = &inl_dev->dev;
347 	struct mbox *mbox = dev->mbox;
348 	struct nix_lf_alloc_rsp *rsp;
349 	struct nix_lf_alloc_req *req;
350 	struct nix_hw_info *hw_info;
351 	struct roc_nix_rq *rqs;
352 	uint64_t max_sa, i;
353 	size_t inb_sa_sz;
354 	int rc = -ENOSPC;
355 	void *sa;
356 
357 	max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
358 
359 	/* Alloc NIX LF needed for single RQ */
360 	req = mbox_alloc_msg_nix_lf_alloc(mbox_get(mbox));
361 	if (req == NULL) {
362 		mbox_put(mbox);
363 		return rc;
364 	}
365 	/* We will have per-port RQ if it is not with channel masking */
366 	req->rq_cnt = inl_dev->nb_rqs;
367 	req->sq_cnt = 1;
368 	req->cq_cnt = 1;
369 	/* XQESZ is W16 */
370 	req->xqe_sz = NIX_XQESZ_W16;
371 	/* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
372 	req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
373 	req->rss_grps = ROC_NIX_RSS_GRPS;
374 	req->npa_func = idev_npa_pffunc_get();
375 	req->sso_func = dev->pf_func;
376 	req->rx_cfg = NIX_INL_LF_RX_CFG;
377 	req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
378 
379 	if (roc_errata_nix_has_no_drop_re())
380 		req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
381 
382 	rc = mbox_process_msg(mbox, (void *)&rsp);
383 	if (rc) {
384 		plt_err("Failed to alloc lf, rc=%d", rc);
385 		mbox_put(mbox);
386 		return rc;
387 	}
388 
389 	inl_dev->lf_tx_stats = rsp->lf_tx_stats;
390 	inl_dev->lf_rx_stats = rsp->lf_rx_stats;
391 	inl_dev->qints = rsp->qints;
392 	inl_dev->cints = rsp->cints;
393 	mbox_put(mbox);
394 
395 	/* Get VWQE info if supported */
396 	if (roc_model_is_cn10k()) {
397 		mbox_alloc_msg_nix_get_hw_info(mbox_get(mbox));
398 		rc = mbox_process_msg(mbox, (void *)&hw_info);
399 		if (rc) {
400 			plt_err("Failed to get HW info, rc=%d", rc);
401 			mbox_put(mbox);
402 			goto lf_free;
403 		}
404 		inl_dev->vwqe_interval = hw_info->vwqe_delay;
405 		mbox_put(mbox);
406 	}
407 
408 	/* Register nix interrupts */
409 	rc = nix_inl_nix_register_irqs(inl_dev);
410 	if (rc) {
411 		plt_err("Failed to register nix irq's, rc=%d", rc);
412 		goto lf_free;
413 	}
414 
415 	/* CN9K SA is different */
416 	if (roc_model_is_cn9k())
417 		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
418 	else
419 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
420 
421 	/* Alloc contiguous memory for Inbound SA's */
422 	inl_dev->inb_sa_sz = inb_sa_sz;
423 	inl_dev->inb_spi_mask = max_sa - 1;
424 	inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
425 					   ROC_NIX_INL_SA_BASE_ALIGN);
426 	if (!inl_dev->inb_sa_base) {
427 		plt_err("Failed to allocate memory for Inbound SA");
428 		rc = -ENOMEM;
429 		goto unregister_irqs;
430 	}
431 
432 	if (roc_model_is_cn10k()) {
433 		for (i = 0; i < max_sa; i++) {
434 			sa = ((uint8_t *)inl_dev->inb_sa_base) +
435 			     (i * inb_sa_sz);
436 			roc_ot_ipsec_inb_sa_init(sa, true);
437 		}
438 	}
439 	/* Setup device specific inb SA table */
440 	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
441 	if (rc) {
442 		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
443 		goto free_mem;
444 	}
445 
446 	/* Allocate memory for RQ's */
447 	rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
448 	if (!rqs) {
449 		plt_err("Failed to allocate memory for RQ's");
450 		goto free_mem;
451 	}
452 	inl_dev->rqs = rqs;
453 
454 	return 0;
455 free_mem:
456 	plt_free(inl_dev->inb_sa_base);
457 	inl_dev->inb_sa_base = NULL;
458 unregister_irqs:
459 	nix_inl_nix_unregister_irqs(inl_dev);
460 lf_free:
461 	mbox_alloc_msg_nix_lf_free(mbox_get(mbox));
462 	rc |= mbox_process(mbox);
463 	mbox_put(mbox);
464 	return rc;
465 }
466 
467 static int
468 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
469 {
470 	struct dev *dev = &inl_dev->dev;
471 	struct mbox *mbox = dev->mbox;
472 	struct nix_lf_free_req *req;
473 	struct ndc_sync_op *ndc_req;
474 	int rc = -ENOSPC;
475 
476 	/* Disable Inbound processing */
477 	rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
478 	if (rc)
479 		plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
480 
481 	/* Sync NDC-NIX for LF */
482 	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
483 	if (ndc_req == NULL) {
484 		mbox_put(mbox);
485 		return rc;
486 	}
487 	ndc_req->nix_lf_rx_sync = 1;
488 	rc = mbox_process(mbox);
489 	if (rc)
490 		plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
491 	mbox_put(mbox);
492 
493 	/* Unregister IRQs */
494 	nix_inl_nix_unregister_irqs(inl_dev);
495 
496 	/* By default all associated mcam rules are deleted */
497 	req = mbox_alloc_msg_nix_lf_free(mbox_get(mbox));
498 	if (req == NULL) {
499 		mbox_put(mbox);
500 		return -ENOSPC;
501 	}
502 
503 	rc = mbox_process(mbox);
504 	if (rc) {
505 		mbox_put(mbox);
506 		return rc;
507 	}
508 	mbox_put(mbox);
509 
510 	plt_free(inl_dev->rqs);
511 	plt_free(inl_dev->inb_sa_base);
512 	inl_dev->rqs = NULL;
513 	inl_dev->inb_sa_base = NULL;
514 	return 0;
515 }
516 
517 static int
518 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
519 {
520 	struct msix_offset_rsp *msix_rsp;
521 	struct dev *dev = &inl_dev->dev;
522 	struct mbox *mbox = mbox_get(dev->mbox);
523 	struct rsrc_attach_req *req;
524 	uint64_t nix_blkaddr;
525 	int rc = -ENOSPC;
526 
527 	req = mbox_alloc_msg_attach_resources(mbox);
528 	if (req == NULL)
529 		goto exit;
530 	req->modify = true;
531 	/* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
532 	req->nixlf = true;
533 	req->ssow = 1;
534 	req->sso = 1;
535 	if (inl_dev->attach_cptlf) {
536 		req->cptlfs = 1;
537 		req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
538 	}
539 
540 	rc = mbox_process(dev->mbox);
541 	if (rc)
542 		goto exit;
543 
544 	/* Get MSIX vector offsets */
545 	mbox_alloc_msg_msix_offset(mbox);
546 	rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
547 	if (rc)
548 		goto exit;
549 
550 	inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
551 	inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
552 	inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
553 	inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
554 
555 	nix_blkaddr = nix_get_blkaddr(dev);
556 	inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
557 
558 	/* Update base addresses for LF's */
559 	inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
560 	inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
561 	inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
562 	inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
563 
564 	rc = 0;
565 exit:
566 	mbox_put(mbox);
567 	return rc;
568 }
569 
570 static int
571 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
572 {
573 	struct dev *dev = &inl_dev->dev;
574 	struct mbox *mbox = mbox_get(dev->mbox);
575 	struct rsrc_detach_req *req;
576 	int rc = -ENOSPC;
577 
578 	req = mbox_alloc_msg_detach_resources(mbox);
579 	if (req == NULL)
580 		goto exit;
581 	req->partial = true;
582 	req->nixlf = true;
583 	req->ssow = true;
584 	req->sso = true;
585 	req->cptlfs = !!inl_dev->attach_cptlf;
586 
587 	rc = mbox_process(dev->mbox);
588 exit:
589 	mbox_put(mbox);
590 	return rc;
591 }
592 
593 static int
594 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
595 {
596 	uintptr_t sso_base = inl_dev->sso_base;
597 	int wait_ms = 3000;
598 
599 	while (wait_ms > 0) {
600 		/* Break when empty */
601 		if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
602 		    !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
603 			return 0;
604 
605 		plt_delay_us(1000);
606 		wait_ms -= 1;
607 	}
608 
609 	return -ETIMEDOUT;
610 }
611 
612 int
613 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
614 {
615 	struct idev_cfg *idev = idev_get_cfg();
616 	struct nix_inl_dev *inl_dev;
617 	int rc, i;
618 
619 	if (idev == NULL)
620 		return 0;
621 
622 	inl_dev = idev->nix_inl_dev;
623 	/* Nothing to do if no inline device */
624 	if (!inl_dev)
625 		return 0;
626 
627 	if (!aura_handle) {
628 		inl_dev->nb_xae = inl_dev->iue;
629 		goto no_pool;
630 	}
631 
632 	/* Check if aura is already considered */
633 	for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
634 		if (inl_dev->pkt_pools[i] == aura_handle)
635 			return 0;
636 	}
637 
638 no_pool:
639 	/* Disable RQ if enabled */
640 	for (i = 0; i < inl_dev->nb_rqs; i++) {
641 		if (!inl_dev->rqs[i].inl_dev_refs)
642 			continue;
643 		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
644 		if (rc) {
645 			plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
646 				rc);
647 			return rc;
648 		}
649 	}
650 
651 	/* Wait for events to be removed */
652 	rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
653 	if (rc) {
654 		plt_err("Timeout waiting for inline device event cleanup");
655 		goto exit;
656 	}
657 
658 	/* Disable HWGRP */
659 	plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
660 
661 	inl_dev->pkt_pools_cnt++;
662 	inl_dev->pkt_pools =
663 		plt_realloc(inl_dev->pkt_pools,
664 			    sizeof(uint64_t) * inl_dev->pkt_pools_cnt, 0);
665 	if (!inl_dev->pkt_pools)
666 		inl_dev->pkt_pools_cnt = 0;
667 	else
668 		inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
669 	inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
670 
671 	/* Realloc XAQ aura */
672 	rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
673 				     inl_dev->nb_xae, inl_dev->xae_waes,
674 				     inl_dev->xaq_buf_size, 1);
675 	if (rc) {
676 		plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
677 		return rc;
678 	}
679 
680 	/* Setup xaq for hwgrps */
681 	rc = sso_hwgrp_alloc_xaq(&inl_dev->dev,
682 				 roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1);
683 	if (rc) {
684 		plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
685 		return rc;
686 	}
687 
688 	/* Enable HWGRP */
689 	plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
690 
691 exit:
692 	/* Renable RQ */
693 	for (i = 0; i < inl_dev->nb_rqs; i++) {
694 		if (!inl_dev->rqs[i].inl_dev_refs)
695 			continue;
696 
697 		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
698 		if (rc)
699 			plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
700 				rc);
701 	}
702 
703 	return rc;
704 }
705 
706 static void
707 inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
708 {
709 	union roc_ot_ipsec_err_ring_head head;
710 	struct roc_ot_ipsec_outb_sa *sa;
711 	uint16_t head_l, tail_l;
712 	uint64_t *ring_base;
713 	uint32_t port_id;
714 
715 	port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
716 	ring_base = PLT_PTR_CAST(inl_dev->sa_soft_exp_ring[ring_idx]);
717 	if (!ring_base) {
718 		plt_err("Invalid soft exp ring base");
719 		return;
720 	}
721 
722 	head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
723 	head_l = head.s.head_pos;
724 	tail_l = head.s.tail_pos;
725 
726 	while (tail_l != head_l) {
727 		union roc_ot_ipsec_err_ring_entry entry;
728 		int poll_counter = 0;
729 
730 		while (poll_counter++ <
731 		       ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
732 			plt_delay_us(20);
733 			entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
734 						    __ATOMIC_ACQUIRE);
735 			if (likely(entry.u64))
736 				break;
737 		}
738 
739 		entry.u64 = plt_be_to_cpu_64(entry.u64);
740 		sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
741 						      << 51) |
742 						     (entry.s.data0 << 7));
743 
744 		if (sa != NULL) {
745 			uint64_t tmp = ~(uint32_t)0x0;
746 			inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
747 			__atomic_store_n(ring_base + tail_l + 1, 0ULL,
748 					 __ATOMIC_RELAXED);
749 			__atomic_fetch_add((uint32_t *)ring_base, 1,
750 					   __ATOMIC_ACQ_REL);
751 		} else
752 			plt_err("Invalid SA");
753 
754 		tail_l++;
755 	}
756 }
757 
758 static uint32_t
759 nix_inl_outb_poll_thread(void *args)
760 {
761 	struct nix_inl_dev *inl_dev = args;
762 	uint32_t poll_freq;
763 	uint32_t i;
764 	bool bit;
765 
766 	poll_freq = inl_dev->soft_exp_poll_freq;
767 
768 	while (!soft_exp_poll_thread_exit) {
769 		if (soft_exp_consumer_cnt) {
770 			for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
771 				bit = plt_bitmap_get(
772 					inl_dev->soft_exp_ring_bmap, i);
773 				if (bit)
774 					inl_outb_soft_exp_poll(inl_dev, i);
775 			}
776 		}
777 		usleep(poll_freq);
778 	}
779 
780 	return 0;
781 }
782 
783 static int
784 nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
785 {
786 	struct plt_bitmap *bmap;
787 	size_t bmap_sz;
788 	uint32_t i;
789 	void *mem;
790 	int rc;
791 
792 	/* Allocate a bitmap that pool thread uses to get the port_id
793 	 * that's corresponding to the inl_outb_soft_exp_ring
794 	 */
795 	bmap_sz =
796 		plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
797 	mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
798 	if (mem == NULL) {
799 		plt_err("soft expiry ring bmap alloc failed");
800 		rc = -ENOMEM;
801 		goto exit;
802 	}
803 
804 	bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
805 	if (!bmap) {
806 		plt_err("soft expiry ring bmap init failed");
807 		plt_free(mem);
808 		rc = -ENOMEM;
809 		goto exit;
810 	}
811 
812 	inl_dev->soft_exp_ring_bmap_mem = mem;
813 	inl_dev->soft_exp_ring_bmap = bmap;
814 	inl_dev->sa_soft_exp_ring = plt_zmalloc(
815 		ROC_NIX_INL_MAX_SOFT_EXP_RNGS * sizeof(uint64_t), 0);
816 	if (!inl_dev->sa_soft_exp_ring) {
817 		plt_err("soft expiry ring pointer array alloc failed");
818 		plt_free(mem);
819 		rc = -ENOMEM;
820 		goto exit;
821 	}
822 
823 	for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
824 		plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
825 
826 	soft_exp_consumer_cnt = 0;
827 	soft_exp_poll_thread_exit = false;
828 	rc = plt_thread_create_control(&inl_dev->soft_exp_poll_thread,
829 			"outb-poll", nix_inl_outb_poll_thread, inl_dev);
830 	if (rc) {
831 		plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
832 		plt_free(inl_dev->soft_exp_ring_bmap_mem);
833 	}
834 
835 exit:
836 	return rc;
837 }
838 
839 int
840 roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats)
841 {
842 	struct idev_cfg *idev = idev_get_cfg();
843 	struct nix_inl_dev *inl_dev = NULL;
844 
845 	if (stats == NULL)
846 		return NIX_ERR_PARAM;
847 
848 	if (idev && idev->nix_inl_dev)
849 		inl_dev = idev->nix_inl_dev;
850 
851 	if (!inl_dev)
852 		return -EINVAL;
853 
854 	stats->rx_octs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_OCTS);
855 	stats->rx_ucast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_UCAST);
856 	stats->rx_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_BCAST);
857 	stats->rx_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_MCAST);
858 	stats->rx_drop = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP);
859 	stats->rx_drop_octs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP_OCTS);
860 	stats->rx_fcs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_FCS);
861 	stats->rx_err = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_ERR);
862 	stats->rx_drop_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_BCAST);
863 	stats->rx_drop_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_MCAST);
864 	stats->rx_drop_l3_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3BCAST);
865 	stats->rx_drop_l3_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3MCAST);
866 
867 	return 0;
868 }
869 
870 int
871 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
872 {
873 	struct plt_pci_device *pci_dev;
874 	struct nix_inl_dev *inl_dev;
875 	struct idev_cfg *idev;
876 	int start_index;
877 	int resp_count;
878 	int rc, i;
879 
880 	pci_dev = roc_inl_dev->pci_dev;
881 
882 	/* Skip probe if already done */
883 	idev = idev_get_cfg();
884 	if (idev == NULL)
885 		return -ENOTSUP;
886 
887 	if (idev->nix_inl_dev) {
888 		plt_info("Skipping device %s, inline device already probed",
889 			 pci_dev->name);
890 		return -EEXIST;
891 	}
892 
893 	PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
894 
895 	inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
896 	memset(inl_dev, 0, sizeof(*inl_dev));
897 
898 	inl_dev->pci_dev = pci_dev;
899 	inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
900 	inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
901 	inl_dev->selftest = roc_inl_dev->selftest;
902 	inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
903 	inl_dev->channel = roc_inl_dev->channel;
904 	inl_dev->chan_mask = roc_inl_dev->chan_mask;
905 	inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
906 	inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
907 	inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
908 	inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
909 	inl_dev->set_soft_exp_poll = !!roc_inl_dev->soft_exp_poll_freq;
910 	inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
911 	inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
912 	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
913 	inl_dev->soft_exp_poll_freq = roc_inl_dev->soft_exp_poll_freq;
914 
915 	if (roc_inl_dev->spb_drop_pc)
916 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
917 	if (roc_inl_dev->lpb_drop_pc)
918 		inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
919 
920 	/* Initialize base device */
921 	rc = dev_init(&inl_dev->dev, pci_dev);
922 	if (rc) {
923 		plt_err("Failed to init roc device");
924 		goto error;
925 	}
926 
927 	/* Attach LF resources */
928 	rc = nix_inl_lf_attach(inl_dev);
929 	if (rc) {
930 		plt_err("Failed to attach LF resources, rc=%d", rc);
931 		goto dev_cleanup;
932 	}
933 
934 	/* Setup NIX LF */
935 	rc = nix_inl_nix_setup(inl_dev);
936 	if (rc)
937 		goto lf_detach;
938 
939 	/* Setup SSO LF */
940 	rc = nix_inl_sso_setup(inl_dev);
941 	if (rc)
942 		goto nix_release;
943 
944 	/* Setup CPT LF */
945 	rc = nix_inl_cpt_setup(inl_dev, false);
946 	if (rc)
947 		goto sso_release;
948 
949 	if (inl_dev->set_soft_exp_poll) {
950 		rc = nix_inl_outb_poll_thread_setup(inl_dev);
951 		if (rc)
952 			goto cpt_release;
953 	}
954 
955 	/* Perform selftest if asked for */
956 	if (inl_dev->selftest) {
957 		rc = nix_inl_selftest();
958 		if (rc)
959 			goto cpt_release;
960 	}
961 	inl_dev->max_ipsec_rules = roc_inl_dev->max_ipsec_rules;
962 
963 	if (inl_dev->max_ipsec_rules && roc_inl_dev->is_multi_channel) {
964 		inl_dev->ipsec_index =
965 			plt_zmalloc(sizeof(int) * inl_dev->max_ipsec_rules, PLT_CACHE_LINE_SIZE);
966 		if (inl_dev->ipsec_index == NULL) {
967 			rc = NPC_ERR_NO_MEM;
968 			goto cpt_release;
969 		}
970 		rc = npc_mcam_alloc_entries(inl_dev->dev.mbox, inl_dev->max_ipsec_rules,
971 					    inl_dev->ipsec_index, inl_dev->max_ipsec_rules,
972 					    NPC_MCAM_HIGHER_PRIO, &resp_count, 1);
973 		if (rc) {
974 			plt_free(inl_dev->ipsec_index);
975 			goto cpt_release;
976 		}
977 
978 		start_index = inl_dev->ipsec_index[0];
979 		for (i = 0; i < resp_count; i++)
980 			inl_dev->ipsec_index[i] = start_index + i;
981 
982 		inl_dev->curr_ipsec_idx = 0;
983 		inl_dev->alloc_ipsec_rules = resp_count;
984 	}
985 
986 	idev->nix_inl_dev = inl_dev;
987 
988 	return 0;
989 cpt_release:
990 	rc |= nix_inl_cpt_release(inl_dev);
991 sso_release:
992 	rc |= nix_inl_sso_release(inl_dev);
993 nix_release:
994 	rc |= nix_inl_nix_release(inl_dev);
995 lf_detach:
996 	rc |= nix_inl_lf_detach(inl_dev);
997 dev_cleanup:
998 	rc |= dev_fini(&inl_dev->dev, pci_dev);
999 error:
1000 	return rc;
1001 }
1002 
1003 int
1004 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
1005 {
1006 	struct plt_pci_device *pci_dev;
1007 	struct nix_inl_dev *inl_dev;
1008 	struct idev_cfg *idev;
1009 	uint32_t i;
1010 	int rc;
1011 
1012 	idev = idev_get_cfg();
1013 	if (idev == NULL)
1014 		return 0;
1015 
1016 	if (!idev->nix_inl_dev ||
1017 	    PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
1018 		return 0;
1019 
1020 	inl_dev = idev->nix_inl_dev;
1021 	pci_dev = inl_dev->pci_dev;
1022 
1023 	if (inl_dev->ipsec_index && roc_inl_dev->is_multi_channel) {
1024 		for (i = inl_dev->curr_ipsec_idx; i < inl_dev->alloc_ipsec_rules; i++)
1025 			npc_mcam_free_entry(inl_dev->dev.mbox, inl_dev->ipsec_index[i]);
1026 		plt_free(inl_dev->ipsec_index);
1027 	}
1028 
1029 	if (inl_dev->set_soft_exp_poll) {
1030 		soft_exp_poll_thread_exit = true;
1031 		rte_thread_join(inl_dev->soft_exp_poll_thread, NULL);
1032 		plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
1033 		plt_free(inl_dev->soft_exp_ring_bmap_mem);
1034 		plt_free(inl_dev->sa_soft_exp_ring);
1035 	}
1036 
1037 	/* Flush Inbound CTX cache entries */
1038 	nix_inl_cpt_ctx_cache_sync(inl_dev);
1039 
1040 	/* Release CPT */
1041 	rc = nix_inl_cpt_release(inl_dev);
1042 
1043 	/* Release SSO */
1044 	rc |= nix_inl_sso_release(inl_dev);
1045 
1046 	/* Release NIX */
1047 	rc |= nix_inl_nix_release(inl_dev);
1048 
1049 	/* Detach LF's */
1050 	rc |= nix_inl_lf_detach(inl_dev);
1051 
1052 	/* Cleanup mbox */
1053 	rc |= dev_fini(&inl_dev->dev, pci_dev);
1054 	if (rc)
1055 		return rc;
1056 
1057 	idev->nix_inl_dev = NULL;
1058 	return 0;
1059 }
1060 
1061 int
1062 roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso)
1063 {
1064 	struct idev_cfg *idev = idev_get_cfg();
1065 	struct nix_inl_dev *inl_dev = NULL;
1066 
1067 	if (!idev || !idev->nix_inl_dev)
1068 		return -ENOENT;
1069 	inl_dev = idev->nix_inl_dev;
1070 
1071 	if (inl_dev->cpt_lf.dev != NULL)
1072 		return -EBUSY;
1073 
1074 	return nix_inl_cpt_setup(inl_dev, use_inl_dev_sso);
1075 }
1076 
1077 int
1078 roc_nix_inl_dev_cpt_release(void)
1079 {
1080 	struct idev_cfg *idev = idev_get_cfg();
1081 	struct nix_inl_dev *inl_dev = NULL;
1082 
1083 	if (!idev || !idev->nix_inl_dev)
1084 		return -ENOENT;
1085 	inl_dev = idev->nix_inl_dev;
1086 
1087 	if (inl_dev->cpt_lf.dev == NULL)
1088 		return 0;
1089 
1090 	return nix_inl_cpt_release(inl_dev);
1091 }
1092