xref: /dpdk/drivers/common/cnxk/roc_nix_inl_dev.c (revision 03b152389fb15f96e25d9acd87b84c9c22cf8b2b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 #include <unistd.h>
9 
10 #define NIX_AURA_DROP_PC_DFLT 40
11 
12 /* Default Rx Config for Inline NIX LF */
13 #define NIX_INL_LF_RX_CFG                                                      \
14 	(ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
15 	 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
16 	 ROC_NIX_LF_RX_CFG_LEN_IL3 | ROC_NIX_LF_RX_CFG_LEN_OL3)
17 
18 #define INL_NIX_RX_STATS(val) plt_read64(inl_dev->nix_base + NIX_LF_RX_STATX(val))
19 
20 extern uint32_t soft_exp_consumer_cnt;
21 static bool soft_exp_poll_thread_exit = true;
22 
23 uint16_t
24 nix_inl_dev_pffunc_get(void)
25 {
26 	struct idev_cfg *idev = idev_get_cfg();
27 	struct nix_inl_dev *inl_dev;
28 
29 	if (idev != NULL) {
30 		inl_dev = idev->nix_inl_dev;
31 		if (inl_dev)
32 			return inl_dev->dev.pf_func;
33 	}
34 	return 0;
35 }
36 
37 static void
38 nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
39 {
40 	uintptr_t work = gw[1];
41 
42 	(void)soft_exp_event;
43 	*((uintptr_t *)args + (gw[0] & 0x1)) = work;
44 
45 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
46 }
47 
48 static int
49 nix_inl_selftest(void)
50 {
51 	struct idev_cfg *idev = idev_get_cfg();
52 	roc_nix_inl_sso_work_cb_t save_cb;
53 	static uintptr_t work_arr[2];
54 	struct nix_inl_dev *inl_dev;
55 	void *save_cb_args;
56 	uint64_t add_work0;
57 	int rc = 0;
58 
59 	if (idev == NULL)
60 		return -ENOTSUP;
61 
62 	inl_dev = idev->nix_inl_dev;
63 	if (inl_dev == NULL)
64 		return -ENOTSUP;
65 
66 	plt_info("Performing nix inl self test");
67 
68 	/* Save and update cb to test cb */
69 	save_cb = inl_dev->work_cb;
70 	save_cb_args = inl_dev->cb_args;
71 	inl_dev->work_cb = nix_inl_selftest_work_cb;
72 	inl_dev->cb_args = work_arr;
73 
74 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
75 
76 #define WORK_MAGIC1 0x335577ff0
77 #define WORK_MAGIC2 0xdeadbeef0
78 
79 	/* Add work */
80 	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
81 	roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
82 	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
83 	roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
84 
85 	plt_delay_ms(10000);
86 
87 	/* Check if we got expected work */
88 	if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
89 		plt_err("Failed to get expected work, [0]=%p [1]=%p",
90 			(void *)work_arr[0], (void *)work_arr[1]);
91 		rc = -EFAULT;
92 		goto exit;
93 	}
94 
95 	plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
96 		 (void *)work_arr[1]);
97 
98 exit:
99 	/* Restore state */
100 	inl_dev->work_cb = save_cb;
101 	inl_dev->cb_args = save_cb_args;
102 	return rc;
103 }
104 
105 static int
106 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
107 {
108 	struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
109 	struct msg_req *req;
110 	int rc;
111 
112 	req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
113 	if (req == NULL) {
114 		rc = -ENOSPC;
115 		goto exit;
116 	}
117 
118 	rc = mbox_process(mbox);
119 exit:
120 	mbox_put(mbox);
121 	return rc;
122 }
123 
124 static int
125 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
126 {
127 	struct nix_inline_ipsec_lf_cfg *lf_cfg;
128 	struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
129 	uint64_t max_sa;
130 	uint32_t sa_w;
131 	int rc;
132 
133 	lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
134 	if (lf_cfg == NULL) {
135 		rc = -ENOSPC;
136 		goto exit;
137 	}
138 
139 	if (ena) {
140 
141 		max_sa = inl_dev->inb_spi_mask + 1;
142 		sa_w = plt_log2_u32(max_sa);
143 
144 		lf_cfg->enable = 1;
145 		lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
146 		lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
147 		/* CN9K SA size is different */
148 		if (roc_model_is_cn9k())
149 			lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
150 		else
151 			lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
152 		lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
153 		lf_cfg->ipsec_cfg0.sa_pow2_size =
154 			plt_log2_u32(inl_dev->inb_sa_sz);
155 
156 		lf_cfg->ipsec_cfg0.tag_const = 0;
157 		lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
158 	} else {
159 		lf_cfg->enable = 0;
160 	}
161 
162 	rc = mbox_process(mbox);
163 exit:
164 	mbox_put(mbox);
165 	return rc;
166 }
167 
168 static int
169 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
170 {
171 	struct roc_nix_inl_dev_q *q_info;
172 	struct dev *dev = &inl_dev->dev;
173 	bool ctx_ilen_valid = false;
174 	struct roc_cpt_lf *lf;
175 	uint8_t eng_grpmask;
176 	uint8_t ctx_ilen = 0;
177 	int rc;
178 
179 	if (!inl_dev->attach_cptlf)
180 		return 0;
181 
182 	/* Alloc CPT LF */
183 	eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
184 		       1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
185 		       1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
186 	if (roc_errata_cpt_has_ctx_fetch_issue()) {
187 		ctx_ilen = (ROC_NIX_INL_OT_IPSEC_INB_HW_SZ / 128) - 1;
188 		ctx_ilen_valid = true;
189 	}
190 
191 	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid,
192 			   ctx_ilen, inl_dev->rx_inj_ena, inl_dev->nb_cptlf - 1);
193 	if (rc) {
194 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
195 		return rc;
196 	}
197 
198 	for (int i = 0; i < inl_dev->nb_cptlf; i++) {
199 		/* Setup CPT LF for submitting control opcode */
200 		lf = &inl_dev->cpt_lf[i];
201 		lf->lf_id = i;
202 		lf->nb_desc = 0; /* Set to default */
203 		lf->dev = &inl_dev->dev;
204 		lf->msixoff = inl_dev->cpt_msixoff[i];
205 		lf->pci_dev = inl_dev->pci_dev;
206 
207 		rc = cpt_lf_init(lf);
208 		if (rc) {
209 			plt_err("Failed to initialize CPT LF, rc=%d", rc);
210 			goto lf_free;
211 		}
212 
213 		q_info = &inl_dev->q_info[i];
214 		q_info->nb_desc = lf->nb_desc;
215 		q_info->fc_addr = lf->fc_addr;
216 		q_info->io_addr = lf->io_addr;
217 		q_info->lmt_base = lf->lmt_base;
218 		q_info->rbase = lf->rbase;
219 
220 		roc_cpt_iq_enable(lf);
221 	}
222 	return 0;
223 lf_free:
224 	rc |= cpt_lfs_free(dev);
225 	return rc;
226 }
227 
228 static int
229 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
230 {
231 	struct dev *dev = &inl_dev->dev;
232 	int rc, i;
233 
234 	if (!inl_dev->attach_cptlf)
235 		return 0;
236 
237 	/* Cleanup CPT LF queue */
238 	for (i = 0; i < inl_dev->nb_cptlf; i++)
239 		cpt_lf_fini(&inl_dev->cpt_lf[i]);
240 
241 	/* Free LF resources */
242 	rc = cpt_lfs_free(dev);
243 	if (!rc) {
244 		for (i = 0; i < inl_dev->nb_cptlf; i++)
245 			inl_dev->cpt_lf[i].dev = NULL;
246 	} else
247 		plt_err("Failed to free CPT LF resources, rc=%d", rc);
248 	return rc;
249 }
250 
251 static int
252 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
253 {
254 	struct sso_lf_alloc_rsp *sso_rsp;
255 	struct dev *dev = &inl_dev->dev;
256 	uint16_t hwgrp[1] = {0};
257 	int rc;
258 
259 	/* Alloc SSOW LF */
260 	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
261 	if (rc) {
262 		plt_err("Failed to alloc SSO HWS, rc=%d", rc);
263 		return rc;
264 	}
265 
266 	/* Alloc HWGRP LF */
267 	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
268 	if (rc) {
269 		plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
270 		goto free_ssow;
271 	}
272 
273 	inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
274 	inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
275 	inl_dev->iue = sso_rsp->in_unit_entries;
276 
277 	inl_dev->nb_xae = inl_dev->iue;
278 	rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
279 				     inl_dev->xae_waes, inl_dev->xaq_buf_size,
280 				     1);
281 	if (rc) {
282 		plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
283 		goto free_sso;
284 	}
285 
286 	/* Setup xaq for hwgrps */
287 	rc = sso_hwgrp_alloc_xaq(dev, roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1);
288 	if (rc) {
289 		plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
290 		goto destroy_pool;
291 	}
292 
293 	/* Register SSO, SSOW error and work irq's */
294 	rc = nix_inl_sso_register_irqs(inl_dev);
295 	if (rc) {
296 		plt_err("Failed to register sso irq's, rc=%d", rc);
297 		goto release_xaq;
298 	}
299 
300 	/* Setup hwgrp->hws link */
301 	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, true);
302 
303 	/* Enable HWGRP */
304 	plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
305 
306 	return 0;
307 
308 release_xaq:
309 	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
310 destroy_pool:
311 	sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
312 free_sso:
313 	sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
314 free_ssow:
315 	sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
316 	return rc;
317 }
318 
319 static int
320 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
321 {
322 	uint16_t hwgrp[1] = {0};
323 
324 	/* Disable HWGRP */
325 	plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
326 
327 	/* Unregister SSO/SSOW IRQ's */
328 	nix_inl_sso_unregister_irqs(inl_dev);
329 
330 	/* Unlink hws */
331 	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, false);
332 
333 	/* Release XAQ aura */
334 	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
335 
336 	/* Free SSO, SSOW LF's */
337 	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
338 	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
339 
340 	/* Free the XAQ aura */
341 	sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
342 
343 	return 0;
344 }
345 
346 static int
347 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
348 {
349 	uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
350 	uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
351 	struct dev *dev = &inl_dev->dev;
352 	struct mbox *mbox = dev->mbox;
353 	struct nix_lf_alloc_rsp *rsp;
354 	struct nix_lf_alloc_req *req;
355 	struct nix_hw_info *hw_info;
356 	struct roc_nix_rq *rqs;
357 	uint64_t max_sa, i;
358 	size_t inb_sa_sz;
359 	int rc = -ENOSPC;
360 	void *sa;
361 
362 	max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
363 
364 	/* Alloc NIX LF needed for single RQ */
365 	req = mbox_alloc_msg_nix_lf_alloc(mbox_get(mbox));
366 	if (req == NULL) {
367 		mbox_put(mbox);
368 		return rc;
369 	}
370 	/* We will have per-port RQ if it is not with channel masking */
371 	req->rq_cnt = inl_dev->nb_rqs;
372 	req->sq_cnt = 1;
373 	req->cq_cnt = 1;
374 	/* XQESZ is W16 */
375 	req->xqe_sz = NIX_XQESZ_W16;
376 	/* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
377 	req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
378 	req->rss_grps = ROC_NIX_RSS_GRPS;
379 	req->npa_func = idev_npa_pffunc_get();
380 	req->sso_func = dev->pf_func;
381 	req->rx_cfg = NIX_INL_LF_RX_CFG;
382 	req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
383 
384 	if (roc_errata_nix_has_no_drop_re())
385 		req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
386 
387 	rc = mbox_process_msg(mbox, (void *)&rsp);
388 	if (rc) {
389 		plt_err("Failed to alloc lf, rc=%d", rc);
390 		mbox_put(mbox);
391 		return rc;
392 	}
393 
394 	inl_dev->lf_tx_stats = rsp->lf_tx_stats;
395 	inl_dev->lf_rx_stats = rsp->lf_rx_stats;
396 	inl_dev->qints = rsp->qints;
397 	inl_dev->cints = rsp->cints;
398 	mbox_put(mbox);
399 
400 	/* Get VWQE info if supported */
401 	if (roc_model_is_cn10k()) {
402 		mbox_alloc_msg_nix_get_hw_info(mbox_get(mbox));
403 		rc = mbox_process_msg(mbox, (void *)&hw_info);
404 		if (rc) {
405 			plt_err("Failed to get HW info, rc=%d", rc);
406 			mbox_put(mbox);
407 			goto lf_free;
408 		}
409 		inl_dev->vwqe_interval = hw_info->vwqe_delay;
410 		mbox_put(mbox);
411 	}
412 
413 	/* Register nix interrupts */
414 	rc = nix_inl_nix_register_irqs(inl_dev);
415 	if (rc) {
416 		plt_err("Failed to register nix irq's, rc=%d", rc);
417 		goto lf_free;
418 	}
419 
420 	/* CN9K SA is different */
421 	if (roc_model_is_cn9k())
422 		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
423 	else if (inl_dev->custom_inb_sa)
424 		inb_sa_sz = ROC_NIX_INL_INB_CUSTOM_SA_SZ;
425 	else
426 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
427 
428 	/* Alloc contiguous memory for Inbound SA's */
429 	inl_dev->inb_sa_sz = inb_sa_sz;
430 	inl_dev->inb_spi_mask = max_sa - 1;
431 	inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
432 					   ROC_NIX_INL_SA_BASE_ALIGN);
433 	if (!inl_dev->inb_sa_base) {
434 		plt_err("Failed to allocate memory for Inbound SA");
435 		rc = -ENOMEM;
436 		goto unregister_irqs;
437 	}
438 
439 	if (roc_model_is_cn10k()) {
440 		for (i = 0; i < max_sa; i++) {
441 			sa = ((uint8_t *)inl_dev->inb_sa_base) +
442 			     (i * inb_sa_sz);
443 			roc_ot_ipsec_inb_sa_init(sa, true);
444 		}
445 	}
446 	/* Setup device specific inb SA table */
447 	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
448 	if (rc) {
449 		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
450 		goto free_mem;
451 	}
452 
453 	/* Allocate memory for RQ's */
454 	rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
455 	if (!rqs) {
456 		plt_err("Failed to allocate memory for RQ's");
457 		goto free_mem;
458 	}
459 	inl_dev->rqs = rqs;
460 
461 	return 0;
462 free_mem:
463 	plt_free(inl_dev->inb_sa_base);
464 	inl_dev->inb_sa_base = NULL;
465 unregister_irqs:
466 	nix_inl_nix_unregister_irqs(inl_dev);
467 lf_free:
468 	mbox_alloc_msg_nix_lf_free(mbox_get(mbox));
469 	rc |= mbox_process(mbox);
470 	mbox_put(mbox);
471 	return rc;
472 }
473 
474 static int
475 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
476 {
477 	struct dev *dev = &inl_dev->dev;
478 	struct mbox *mbox = dev->mbox;
479 	struct nix_lf_free_req *req;
480 	struct ndc_sync_op *ndc_req;
481 	int rc = -ENOSPC;
482 
483 	/* Disable Inbound processing */
484 	rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
485 	if (rc)
486 		plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
487 
488 	/* Sync NDC-NIX for LF */
489 	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
490 	if (ndc_req == NULL) {
491 		mbox_put(mbox);
492 		return rc;
493 	}
494 	ndc_req->nix_lf_rx_sync = 1;
495 	rc = mbox_process(mbox);
496 	if (rc)
497 		plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
498 	mbox_put(mbox);
499 
500 	/* Unregister IRQs */
501 	nix_inl_nix_unregister_irqs(inl_dev);
502 
503 	/* By default all associated mcam rules are deleted */
504 	req = mbox_alloc_msg_nix_lf_free(mbox_get(mbox));
505 	if (req == NULL) {
506 		mbox_put(mbox);
507 		return -ENOSPC;
508 	}
509 
510 	rc = mbox_process(mbox);
511 	if (rc) {
512 		mbox_put(mbox);
513 		return rc;
514 	}
515 	mbox_put(mbox);
516 
517 	plt_free(inl_dev->rqs);
518 	plt_free(inl_dev->inb_sa_base);
519 	inl_dev->rqs = NULL;
520 	inl_dev->inb_sa_base = NULL;
521 	return 0;
522 }
523 
524 static int
525 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
526 {
527 	struct msix_offset_rsp *msix_rsp;
528 	struct dev *dev = &inl_dev->dev;
529 	struct mbox *mbox = mbox_get(dev->mbox);
530 	struct rsrc_attach_req *req;
531 	uint64_t nix_blkaddr;
532 	int rc = -ENOSPC;
533 
534 	req = mbox_alloc_msg_attach_resources(mbox);
535 	if (req == NULL)
536 		goto exit;
537 	req->modify = true;
538 	/* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
539 	req->nixlf = true;
540 	req->ssow = 1;
541 	req->sso = 1;
542 	if (inl_dev->attach_cptlf) {
543 		req->cptlfs = inl_dev->nb_cptlf;
544 		req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
545 	}
546 
547 	rc = mbox_process(dev->mbox);
548 	if (rc)
549 		goto exit;
550 
551 	/* Get MSIX vector offsets */
552 	mbox_alloc_msg_msix_offset(mbox);
553 	rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
554 	if (rc)
555 		goto exit;
556 
557 	inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
558 	inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
559 	inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
560 
561 	for (int i = 0; i < inl_dev->nb_cptlf; i++)
562 		inl_dev->cpt_msixoff[i] = msix_rsp->cptlf_msixoff[i];
563 
564 	nix_blkaddr = nix_get_blkaddr(dev);
565 	inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
566 
567 	/* Update base addresses for LF's */
568 	inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
569 	inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
570 	inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
571 	inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
572 
573 	rc = 0;
574 exit:
575 	mbox_put(mbox);
576 	return rc;
577 }
578 
579 static int
580 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
581 {
582 	struct dev *dev = &inl_dev->dev;
583 	struct mbox *mbox = mbox_get(dev->mbox);
584 	struct rsrc_detach_req *req;
585 	int rc = -ENOSPC;
586 
587 	req = mbox_alloc_msg_detach_resources(mbox);
588 	if (req == NULL)
589 		goto exit;
590 	req->partial = true;
591 	req->nixlf = true;
592 	req->ssow = true;
593 	req->sso = true;
594 	req->cptlfs = !!inl_dev->attach_cptlf;
595 
596 	rc = mbox_process(dev->mbox);
597 exit:
598 	mbox_put(mbox);
599 	return rc;
600 }
601 
602 static int
603 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
604 {
605 	uintptr_t sso_base = inl_dev->sso_base;
606 	int wait_ms = 3000;
607 
608 	while (wait_ms > 0) {
609 		/* Break when empty */
610 		if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
611 		    !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
612 			return 0;
613 
614 		plt_delay_us(1000);
615 		wait_ms -= 1;
616 	}
617 
618 	return -ETIMEDOUT;
619 }
620 
621 int
622 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
623 {
624 	struct idev_cfg *idev = idev_get_cfg();
625 	struct nix_inl_dev *inl_dev;
626 	int rc, i;
627 
628 	if (idev == NULL)
629 		return 0;
630 
631 	inl_dev = idev->nix_inl_dev;
632 	/* Nothing to do if no inline device */
633 	if (!inl_dev)
634 		return 0;
635 
636 	if (!aura_handle) {
637 		inl_dev->nb_xae = inl_dev->iue;
638 		goto no_pool;
639 	}
640 
641 	/* Check if aura is already considered */
642 	for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
643 		if (inl_dev->pkt_pools[i] == aura_handle)
644 			return 0;
645 	}
646 
647 no_pool:
648 	/* Disable RQ if enabled */
649 	for (i = 0; i < inl_dev->nb_rqs; i++) {
650 		if (!inl_dev->rqs[i].inl_dev_refs)
651 			continue;
652 		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
653 		if (rc) {
654 			plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
655 				rc);
656 			return rc;
657 		}
658 	}
659 
660 	/* Wait for events to be removed */
661 	rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
662 	if (rc) {
663 		plt_err("Timeout waiting for inline device event cleanup");
664 		goto exit;
665 	}
666 
667 	/* Disable HWGRP */
668 	plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
669 
670 	inl_dev->pkt_pools_cnt++;
671 	inl_dev->pkt_pools =
672 		plt_realloc(inl_dev->pkt_pools,
673 			    sizeof(uint64_t) * inl_dev->pkt_pools_cnt, 0);
674 	if (!inl_dev->pkt_pools)
675 		inl_dev->pkt_pools_cnt = 0;
676 	else
677 		inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
678 	inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
679 
680 	/* Realloc XAQ aura */
681 	rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
682 				     inl_dev->nb_xae, inl_dev->xae_waes,
683 				     inl_dev->xaq_buf_size, 1);
684 	if (rc) {
685 		plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
686 		return rc;
687 	}
688 
689 	/* Setup xaq for hwgrps */
690 	rc = sso_hwgrp_alloc_xaq(&inl_dev->dev,
691 				 roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1);
692 	if (rc) {
693 		plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
694 		return rc;
695 	}
696 
697 	/* Enable HWGRP */
698 	plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
699 
700 exit:
701 	/* Renable RQ */
702 	for (i = 0; i < inl_dev->nb_rqs; i++) {
703 		if (!inl_dev->rqs[i].inl_dev_refs)
704 			continue;
705 
706 		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
707 		if (rc)
708 			plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
709 				rc);
710 	}
711 
712 	return rc;
713 }
714 
715 static void
716 inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
717 {
718 	union roc_ot_ipsec_err_ring_head head;
719 	struct roc_ot_ipsec_outb_sa *sa;
720 	uint16_t head_l, tail_l;
721 	uint64_t *ring_base;
722 	uint32_t port_id;
723 
724 	port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
725 	ring_base = PLT_PTR_CAST(inl_dev->sa_soft_exp_ring[ring_idx]);
726 	if (!ring_base) {
727 		plt_err("Invalid soft exp ring base");
728 		return;
729 	}
730 
731 	head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
732 	head_l = head.s.head_pos;
733 	tail_l = head.s.tail_pos;
734 
735 	while (tail_l != head_l) {
736 		union roc_ot_ipsec_err_ring_entry entry;
737 		int poll_counter = 0;
738 
739 		while (poll_counter++ <
740 		       ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
741 			plt_delay_us(20);
742 			entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
743 						    __ATOMIC_ACQUIRE);
744 			if (likely(entry.u64))
745 				break;
746 		}
747 
748 		entry.u64 = plt_be_to_cpu_64(entry.u64);
749 		sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
750 						      << 51) |
751 						     (entry.s.data0 << 7));
752 
753 		if (sa != NULL) {
754 			uint64_t tmp = ~(uint32_t)0x0;
755 			inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
756 			__atomic_store_n(ring_base + tail_l + 1, 0ULL,
757 					 __ATOMIC_RELAXED);
758 			__atomic_fetch_add((uint32_t *)ring_base, 1,
759 					   __ATOMIC_ACQ_REL);
760 		} else
761 			plt_err("Invalid SA");
762 
763 		tail_l++;
764 	}
765 }
766 
767 static uint32_t
768 nix_inl_outb_poll_thread(void *args)
769 {
770 	struct nix_inl_dev *inl_dev = args;
771 	uint32_t poll_freq;
772 	uint32_t i;
773 	bool bit;
774 
775 	poll_freq = inl_dev->soft_exp_poll_freq;
776 
777 	while (!soft_exp_poll_thread_exit) {
778 		if (soft_exp_consumer_cnt) {
779 			for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
780 				bit = plt_bitmap_get(
781 					inl_dev->soft_exp_ring_bmap, i);
782 				if (bit)
783 					inl_outb_soft_exp_poll(inl_dev, i);
784 			}
785 		}
786 		usleep(poll_freq);
787 	}
788 
789 	return 0;
790 }
791 
792 static int
793 nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
794 {
795 	struct plt_bitmap *bmap;
796 	size_t bmap_sz;
797 	uint32_t i;
798 	void *mem;
799 	int rc;
800 
801 	/* Allocate a bitmap that pool thread uses to get the port_id
802 	 * that's corresponding to the inl_outb_soft_exp_ring
803 	 */
804 	bmap_sz =
805 		plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
806 	mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
807 	if (mem == NULL) {
808 		plt_err("soft expiry ring bmap alloc failed");
809 		rc = -ENOMEM;
810 		goto exit;
811 	}
812 
813 	bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
814 	if (!bmap) {
815 		plt_err("soft expiry ring bmap init failed");
816 		plt_free(mem);
817 		rc = -ENOMEM;
818 		goto exit;
819 	}
820 
821 	inl_dev->soft_exp_ring_bmap_mem = mem;
822 	inl_dev->soft_exp_ring_bmap = bmap;
823 	inl_dev->sa_soft_exp_ring = plt_zmalloc(
824 		ROC_NIX_INL_MAX_SOFT_EXP_RNGS * sizeof(uint64_t), 0);
825 	if (!inl_dev->sa_soft_exp_ring) {
826 		plt_err("soft expiry ring pointer array alloc failed");
827 		plt_free(mem);
828 		rc = -ENOMEM;
829 		goto exit;
830 	}
831 
832 	for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
833 		plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
834 
835 	soft_exp_consumer_cnt = 0;
836 	soft_exp_poll_thread_exit = false;
837 	rc = plt_thread_create_control(&inl_dev->soft_exp_poll_thread,
838 			"outb-poll", nix_inl_outb_poll_thread, inl_dev);
839 	if (rc) {
840 		plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
841 		plt_free(inl_dev->soft_exp_ring_bmap_mem);
842 	}
843 
844 exit:
845 	return rc;
846 }
847 
848 void *
849 roc_nix_inl_dev_qptr_get(uint8_t qid)
850 {
851 	struct idev_cfg *idev = idev_get_cfg();
852 	struct nix_inl_dev *inl_dev = NULL;
853 
854 	if (idev)
855 		inl_dev = idev->nix_inl_dev;
856 
857 	if (!inl_dev) {
858 		plt_err("Inline Device could not be detected");
859 		return NULL;
860 	}
861 	if (!inl_dev->attach_cptlf) {
862 		plt_err("No CPT LFs are attached to Inline Device");
863 		return NULL;
864 	}
865 	if (qid >= inl_dev->nb_cptlf) {
866 		plt_err("Invalid qid: %u total queues: %d", qid, inl_dev->nb_cptlf);
867 		return NULL;
868 	}
869 	return &inl_dev->q_info[qid];
870 }
871 
872 int
873 roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats)
874 {
875 	struct idev_cfg *idev = idev_get_cfg();
876 	struct nix_inl_dev *inl_dev = NULL;
877 
878 	if (stats == NULL)
879 		return NIX_ERR_PARAM;
880 
881 	if (idev && idev->nix_inl_dev)
882 		inl_dev = idev->nix_inl_dev;
883 
884 	if (!inl_dev)
885 		return -EINVAL;
886 
887 	stats->rx_octs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_OCTS);
888 	stats->rx_ucast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_UCAST);
889 	stats->rx_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_BCAST);
890 	stats->rx_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_MCAST);
891 	stats->rx_drop = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP);
892 	stats->rx_drop_octs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP_OCTS);
893 	stats->rx_fcs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_FCS);
894 	stats->rx_err = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_ERR);
895 	stats->rx_drop_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_BCAST);
896 	stats->rx_drop_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_MCAST);
897 	stats->rx_drop_l3_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3BCAST);
898 	stats->rx_drop_l3_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3MCAST);
899 
900 	return 0;
901 }
902 
903 int
904 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
905 {
906 	struct plt_pci_device *pci_dev;
907 	struct nix_inl_dev *inl_dev;
908 	struct idev_cfg *idev;
909 	int start_index;
910 	int resp_count;
911 	int rc, i;
912 
913 	pci_dev = roc_inl_dev->pci_dev;
914 
915 	/* Skip probe if already done */
916 	idev = idev_get_cfg();
917 	if (idev == NULL)
918 		return -ENOTSUP;
919 
920 	if (idev->nix_inl_dev) {
921 		plt_info("Skipping device %s, inline device already probed",
922 			 pci_dev->name);
923 		return -EEXIST;
924 	}
925 
926 	PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
927 
928 	inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
929 	memset(inl_dev, 0, sizeof(*inl_dev));
930 
931 	inl_dev->pci_dev = pci_dev;
932 	inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
933 	inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
934 	inl_dev->selftest = roc_inl_dev->selftest;
935 	inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
936 	inl_dev->channel = roc_inl_dev->channel;
937 	inl_dev->chan_mask = roc_inl_dev->chan_mask;
938 	inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
939 	inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
940 	inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
941 	inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
942 	inl_dev->set_soft_exp_poll = !!roc_inl_dev->soft_exp_poll_freq;
943 	inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
944 	inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
945 	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
946 	inl_dev->soft_exp_poll_freq = roc_inl_dev->soft_exp_poll_freq;
947 	inl_dev->custom_inb_sa = roc_inl_dev->custom_inb_sa;
948 
949 	if (roc_inl_dev->rx_inj_ena) {
950 		inl_dev->rx_inj_ena = 1;
951 		inl_dev->nb_cptlf = NIX_INL_CPT_LF;
952 	} else
953 		inl_dev->nb_cptlf = 1;
954 
955 	if (roc_inl_dev->spb_drop_pc)
956 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
957 	if (roc_inl_dev->lpb_drop_pc)
958 		inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
959 
960 	/* Initialize base device */
961 	rc = dev_init(&inl_dev->dev, pci_dev);
962 	if (rc) {
963 		plt_err("Failed to init roc device");
964 		goto error;
965 	}
966 
967 	/* Attach LF resources */
968 	rc = nix_inl_lf_attach(inl_dev);
969 	if (rc) {
970 		plt_err("Failed to attach LF resources, rc=%d", rc);
971 		goto dev_cleanup;
972 	}
973 
974 	/* Setup NIX LF */
975 	rc = nix_inl_nix_setup(inl_dev);
976 	if (rc)
977 		goto lf_detach;
978 
979 	/* Setup SSO LF */
980 	rc = nix_inl_sso_setup(inl_dev);
981 	if (rc)
982 		goto nix_release;
983 
984 	/* Setup CPT LF */
985 	rc = nix_inl_cpt_setup(inl_dev, false);
986 	if (rc)
987 		goto sso_release;
988 
989 	if (inl_dev->set_soft_exp_poll) {
990 		rc = nix_inl_outb_poll_thread_setup(inl_dev);
991 		if (rc)
992 			goto cpt_release;
993 	}
994 
995 	/* Perform selftest if asked for */
996 	if (inl_dev->selftest) {
997 		rc = nix_inl_selftest();
998 		if (rc)
999 			goto cpt_release;
1000 	}
1001 	inl_dev->max_ipsec_rules = roc_inl_dev->max_ipsec_rules;
1002 
1003 	if (inl_dev->max_ipsec_rules && roc_inl_dev->is_multi_channel) {
1004 		inl_dev->ipsec_index =
1005 			plt_zmalloc(sizeof(int) * inl_dev->max_ipsec_rules, PLT_CACHE_LINE_SIZE);
1006 		if (inl_dev->ipsec_index == NULL) {
1007 			rc = NPC_ERR_NO_MEM;
1008 			goto cpt_release;
1009 		}
1010 		rc = npc_mcam_alloc_entries(inl_dev->dev.mbox, inl_dev->max_ipsec_rules,
1011 					    inl_dev->ipsec_index, inl_dev->max_ipsec_rules,
1012 					    NPC_MCAM_HIGHER_PRIO, &resp_count, 1);
1013 		if (rc) {
1014 			plt_free(inl_dev->ipsec_index);
1015 			goto cpt_release;
1016 		}
1017 
1018 		start_index = inl_dev->ipsec_index[0];
1019 		for (i = 0; i < resp_count; i++)
1020 			inl_dev->ipsec_index[i] = start_index + i;
1021 
1022 		inl_dev->curr_ipsec_idx = 0;
1023 		inl_dev->alloc_ipsec_rules = resp_count;
1024 	}
1025 
1026 	idev->nix_inl_dev = inl_dev;
1027 
1028 	return 0;
1029 cpt_release:
1030 	rc |= nix_inl_cpt_release(inl_dev);
1031 sso_release:
1032 	rc |= nix_inl_sso_release(inl_dev);
1033 nix_release:
1034 	rc |= nix_inl_nix_release(inl_dev);
1035 lf_detach:
1036 	rc |= nix_inl_lf_detach(inl_dev);
1037 dev_cleanup:
1038 	rc |= dev_fini(&inl_dev->dev, pci_dev);
1039 error:
1040 	return rc;
1041 }
1042 
1043 int
1044 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
1045 {
1046 	struct plt_pci_device *pci_dev;
1047 	struct nix_inl_dev *inl_dev;
1048 	struct idev_cfg *idev;
1049 	uint32_t i;
1050 	int rc;
1051 
1052 	idev = idev_get_cfg();
1053 	if (idev == NULL)
1054 		return 0;
1055 
1056 	if (!idev->nix_inl_dev ||
1057 	    PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
1058 		return 0;
1059 
1060 	inl_dev = idev->nix_inl_dev;
1061 	pci_dev = inl_dev->pci_dev;
1062 
1063 	if (inl_dev->ipsec_index && roc_inl_dev->is_multi_channel) {
1064 		for (i = inl_dev->curr_ipsec_idx; i < inl_dev->alloc_ipsec_rules; i++)
1065 			npc_mcam_free_entry(inl_dev->dev.mbox, inl_dev->ipsec_index[i]);
1066 		plt_free(inl_dev->ipsec_index);
1067 	}
1068 
1069 	if (inl_dev->set_soft_exp_poll) {
1070 		soft_exp_poll_thread_exit = true;
1071 		plt_thread_join(inl_dev->soft_exp_poll_thread, NULL);
1072 		plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
1073 		plt_free(inl_dev->soft_exp_ring_bmap_mem);
1074 		plt_free(inl_dev->sa_soft_exp_ring);
1075 	}
1076 
1077 	/* Flush Inbound CTX cache entries */
1078 	nix_inl_cpt_ctx_cache_sync(inl_dev);
1079 
1080 	/* Release CPT */
1081 	rc = nix_inl_cpt_release(inl_dev);
1082 
1083 	/* Release SSO */
1084 	rc |= nix_inl_sso_release(inl_dev);
1085 
1086 	/* Release NIX */
1087 	rc |= nix_inl_nix_release(inl_dev);
1088 
1089 	/* Detach LF's */
1090 	rc |= nix_inl_lf_detach(inl_dev);
1091 
1092 	/* Cleanup mbox */
1093 	rc |= dev_fini(&inl_dev->dev, pci_dev);
1094 	if (rc)
1095 		return rc;
1096 
1097 	idev->nix_inl_dev = NULL;
1098 	return 0;
1099 }
1100 
1101 int
1102 roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso)
1103 {
1104 	struct idev_cfg *idev = idev_get_cfg();
1105 	struct nix_inl_dev *inl_dev = NULL;
1106 
1107 	if (!idev || !idev->nix_inl_dev)
1108 		return -ENOENT;
1109 	inl_dev = idev->nix_inl_dev;
1110 
1111 	if (inl_dev->cpt_lf[0].dev != NULL)
1112 		return -EBUSY;
1113 
1114 	return nix_inl_cpt_setup(inl_dev, use_inl_dev_sso);
1115 }
1116 
1117 int
1118 roc_nix_inl_dev_cpt_release(void)
1119 {
1120 	struct idev_cfg *idev = idev_get_cfg();
1121 	struct nix_inl_dev *inl_dev = NULL;
1122 
1123 	if (!idev || !idev->nix_inl_dev)
1124 		return -ENOENT;
1125 	inl_dev = idev->nix_inl_dev;
1126 
1127 	if (inl_dev->cpt_lf[0].dev == NULL)
1128 		return 0;
1129 
1130 	return nix_inl_cpt_release(inl_dev);
1131 }
1132