xref: /dpdk/drivers/common/cnxk/roc_nix_inl_dev.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 #include <unistd.h>
9 
10 #define NIX_AURA_DROP_PC_DFLT 40
11 
12 /* Default Rx Config for Inline NIX LF */
13 #define NIX_INL_LF_RX_CFG                                                      \
14 	(ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
15 	 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
16 	 ROC_NIX_LF_RX_CFG_LEN_IL3 | ROC_NIX_LF_RX_CFG_LEN_OL3)
17 
18 #define INL_NIX_RX_STATS(val) plt_read64(inl_dev->nix_base + NIX_LF_RX_STATX(val))
19 
20 extern uint32_t soft_exp_consumer_cnt;
21 static bool soft_exp_poll_thread_exit = true;
22 
23 uint16_t
24 nix_inl_dev_pffunc_get(void)
25 {
26 	struct idev_cfg *idev = idev_get_cfg();
27 	struct nix_inl_dev *inl_dev;
28 
29 	if (idev != NULL) {
30 		inl_dev = idev->nix_inl_dev;
31 		if (inl_dev)
32 			return inl_dev->dev.pf_func;
33 	}
34 	return 0;
35 }
36 
37 uint16_t
38 roc_nix_inl_dev_pffunc_get(void)
39 {
40 	return nix_inl_dev_pffunc_get();
41 }
42 
43 static void
44 nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
45 {
46 	uintptr_t work = gw[1];
47 
48 	(void)soft_exp_event;
49 	*((uintptr_t *)args + (gw[0] & 0x1)) = work;
50 
51 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
52 }
53 
54 static int
55 nix_inl_selftest(void)
56 {
57 	struct idev_cfg *idev = idev_get_cfg();
58 	roc_nix_inl_sso_work_cb_t save_cb;
59 	static uintptr_t work_arr[2];
60 	struct nix_inl_dev *inl_dev;
61 	void *save_cb_args;
62 	uint64_t add_work0;
63 	int rc = 0;
64 
65 	if (idev == NULL)
66 		return -ENOTSUP;
67 
68 	inl_dev = idev->nix_inl_dev;
69 	if (inl_dev == NULL)
70 		return -ENOTSUP;
71 
72 	plt_info("Performing nix inl self test");
73 
74 	/* Save and update cb to test cb */
75 	save_cb = inl_dev->work_cb;
76 	save_cb_args = inl_dev->cb_args;
77 	inl_dev->work_cb = nix_inl_selftest_work_cb;
78 	inl_dev->cb_args = work_arr;
79 
80 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
81 
82 #define WORK_MAGIC1 0x335577ff0
83 #define WORK_MAGIC2 0xdeadbeef0
84 
85 	/* Add work */
86 	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
87 	roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
88 	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
89 	roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
90 
91 	plt_delay_ms(10000);
92 
93 	/* Check if we got expected work */
94 	if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
95 		plt_err("Failed to get expected work, [0]=%p [1]=%p",
96 			(void *)work_arr[0], (void *)work_arr[1]);
97 		rc = -EFAULT;
98 		goto exit;
99 	}
100 
101 	plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
102 		 (void *)work_arr[1]);
103 
104 exit:
105 	/* Restore state */
106 	inl_dev->work_cb = save_cb;
107 	inl_dev->cb_args = save_cb_args;
108 	return rc;
109 }
110 
111 static int
112 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
113 {
114 	struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
115 	struct msg_req *req;
116 	int rc;
117 
118 	req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
119 	if (req == NULL) {
120 		rc = -ENOSPC;
121 		goto exit;
122 	}
123 
124 	rc = mbox_process(mbox);
125 exit:
126 	mbox_put(mbox);
127 	return rc;
128 }
129 
130 static int
131 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
132 {
133 	struct nix_inline_ipsec_lf_cfg *lf_cfg;
134 	struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
135 	uint64_t max_sa;
136 	uint32_t sa_w;
137 	int rc;
138 
139 	lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
140 	if (lf_cfg == NULL) {
141 		rc = -ENOSPC;
142 		goto exit;
143 	}
144 
145 	if (ena) {
146 
147 		max_sa = inl_dev->inb_spi_mask + 1;
148 		sa_w = plt_log2_u32(max_sa);
149 
150 		lf_cfg->enable = 1;
151 		lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
152 		lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
153 		/* CN9K SA size is different */
154 		if (roc_model_is_cn9k())
155 			lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
156 		else
157 			lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
158 		lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
159 		lf_cfg->ipsec_cfg0.sa_pow2_size =
160 			plt_log2_u32(inl_dev->inb_sa_sz);
161 
162 		lf_cfg->ipsec_cfg0.tag_const = 0;
163 		lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
164 	} else {
165 		lf_cfg->enable = 0;
166 	}
167 
168 	rc = mbox_process(mbox);
169 exit:
170 	mbox_put(mbox);
171 	return rc;
172 }
173 
174 static int
175 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
176 {
177 	struct dev *dev = &inl_dev->dev;
178 	bool ctx_ilen_valid = false;
179 	struct roc_cpt_lf *lf;
180 	uint8_t eng_grpmask;
181 	uint8_t ctx_ilen = 0;
182 	int rc;
183 
184 	if (!inl_dev->attach_cptlf)
185 		return 0;
186 
187 	/* Alloc CPT LF */
188 	eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
189 		       1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
190 		       1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
191 	if (roc_errata_cpt_has_ctx_fetch_issue()) {
192 		ctx_ilen = (ROC_NIX_INL_OT_IPSEC_INB_HW_SZ / 128) - 1;
193 		ctx_ilen_valid = true;
194 	}
195 
196 	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid,
197 			   ctx_ilen, inl_dev->rx_inj_ena, inl_dev->nb_cptlf - 1);
198 	if (rc) {
199 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
200 		return rc;
201 	}
202 
203 	for (int i = 0; i < inl_dev->nb_cptlf; i++) {
204 		/* Setup CPT LF for submitting control opcode */
205 		lf = &inl_dev->cpt_lf[i];
206 		lf->lf_id = i;
207 		lf->nb_desc = 0; /* Set to default */
208 		lf->dev = &inl_dev->dev;
209 		lf->msixoff = inl_dev->cpt_msixoff[i];
210 		lf->pci_dev = inl_dev->pci_dev;
211 
212 		rc = cpt_lf_init(lf);
213 		if (rc) {
214 			plt_err("Failed to initialize CPT LF, rc=%d", rc);
215 			goto lf_free;
216 		}
217 
218 		roc_cpt_iq_enable(lf);
219 	}
220 	return 0;
221 lf_free:
222 	rc |= cpt_lfs_free(dev);
223 	return rc;
224 }
225 
226 static int
227 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
228 {
229 	struct dev *dev = &inl_dev->dev;
230 	int rc, i;
231 
232 	if (!inl_dev->attach_cptlf)
233 		return 0;
234 
235 	/* Cleanup CPT LF queue */
236 	for (i = 0; i < inl_dev->nb_cptlf; i++)
237 		cpt_lf_fini(&inl_dev->cpt_lf[i]);
238 
239 	/* Free LF resources */
240 	rc = cpt_lfs_free(dev);
241 	if (!rc) {
242 		for (i = 0; i < inl_dev->nb_cptlf; i++)
243 			inl_dev->cpt_lf[i].dev = NULL;
244 	} else
245 		plt_err("Failed to free CPT LF resources, rc=%d", rc);
246 	return rc;
247 }
248 
249 static int
250 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
251 {
252 	struct sso_lf_alloc_rsp *sso_rsp;
253 	struct dev *dev = &inl_dev->dev;
254 	uint16_t hwgrp[1] = {0};
255 	int rc;
256 
257 	/* Alloc SSOW LF */
258 	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
259 	if (rc) {
260 		plt_err("Failed to alloc SSO HWS, rc=%d", rc);
261 		return rc;
262 	}
263 
264 	/* Alloc HWGRP LF */
265 	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
266 	if (rc) {
267 		plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
268 		goto free_ssow;
269 	}
270 
271 	inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
272 	inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
273 	inl_dev->iue = sso_rsp->in_unit_entries;
274 
275 	inl_dev->nb_xae = inl_dev->iue;
276 	rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
277 				     inl_dev->xae_waes, inl_dev->xaq_buf_size,
278 				     1);
279 	if (rc) {
280 		plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
281 		goto free_sso;
282 	}
283 
284 	/* Setup xaq for hwgrps */
285 	rc = sso_hwgrp_alloc_xaq(dev, roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1);
286 	if (rc) {
287 		plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
288 		goto destroy_pool;
289 	}
290 
291 	/* Register SSO, SSOW error and work irq's */
292 	rc = nix_inl_sso_register_irqs(inl_dev);
293 	if (rc) {
294 		plt_err("Failed to register sso irq's, rc=%d", rc);
295 		goto release_xaq;
296 	}
297 
298 	/* Setup hwgrp->hws link */
299 	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, true);
300 
301 	/* Enable HWGRP */
302 	plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
303 
304 	return 0;
305 
306 release_xaq:
307 	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
308 destroy_pool:
309 	sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
310 free_sso:
311 	sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
312 free_ssow:
313 	sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
314 	return rc;
315 }
316 
317 static int
318 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
319 {
320 	uint16_t hwgrp[1] = {0};
321 
322 	/* Disable HWGRP */
323 	plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
324 
325 	/* Unregister SSO/SSOW IRQ's */
326 	nix_inl_sso_unregister_irqs(inl_dev);
327 
328 	/* Unlink hws */
329 	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, false);
330 
331 	/* Release XAQ aura */
332 	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
333 
334 	/* Free SSO, SSOW LF's */
335 	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
336 	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
337 
338 	/* Free the XAQ aura */
339 	sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
340 
341 	return 0;
342 }
343 
344 static int
345 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
346 {
347 	uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
348 	uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
349 	struct dev *dev = &inl_dev->dev;
350 	struct mbox *mbox = dev->mbox;
351 	struct nix_lf_alloc_rsp *rsp;
352 	struct nix_lf_alloc_req *req;
353 	struct nix_hw_info *hw_info;
354 	struct roc_nix_rq *rqs;
355 	uint64_t max_sa, i;
356 	size_t inb_sa_sz;
357 	int rc = -ENOSPC;
358 	void *sa;
359 
360 	max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
361 
362 	/* Alloc NIX LF needed for single RQ */
363 	req = mbox_alloc_msg_nix_lf_alloc(mbox_get(mbox));
364 	if (req == NULL) {
365 		mbox_put(mbox);
366 		return rc;
367 	}
368 	/* We will have per-port RQ if it is not with channel masking */
369 	req->rq_cnt = inl_dev->nb_rqs;
370 	req->sq_cnt = 1;
371 	req->cq_cnt = 1;
372 	/* XQESZ is W16 */
373 	req->xqe_sz = NIX_XQESZ_W16;
374 	/* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
375 	req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
376 	req->rss_grps = ROC_NIX_RSS_GRPS;
377 	req->npa_func = idev_npa_pffunc_get();
378 	req->sso_func = dev->pf_func;
379 	req->rx_cfg = NIX_INL_LF_RX_CFG;
380 	req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
381 
382 	if (roc_errata_nix_has_no_drop_re())
383 		req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
384 
385 	rc = mbox_process_msg(mbox, (void *)&rsp);
386 	if (rc) {
387 		plt_err("Failed to alloc lf, rc=%d", rc);
388 		mbox_put(mbox);
389 		return rc;
390 	}
391 
392 	inl_dev->lf_tx_stats = rsp->lf_tx_stats;
393 	inl_dev->lf_rx_stats = rsp->lf_rx_stats;
394 	inl_dev->qints = rsp->qints;
395 	inl_dev->cints = rsp->cints;
396 	mbox_put(mbox);
397 
398 	/* Get VWQE info if supported */
399 	if (roc_model_is_cn10k()) {
400 		mbox_alloc_msg_nix_get_hw_info(mbox_get(mbox));
401 		rc = mbox_process_msg(mbox, (void *)&hw_info);
402 		if (rc) {
403 			plt_err("Failed to get HW info, rc=%d", rc);
404 			mbox_put(mbox);
405 			goto lf_free;
406 		}
407 		inl_dev->vwqe_interval = hw_info->vwqe_delay;
408 		mbox_put(mbox);
409 	}
410 
411 	/* Register nix interrupts */
412 	rc = nix_inl_nix_register_irqs(inl_dev);
413 	if (rc) {
414 		plt_err("Failed to register nix irq's, rc=%d", rc);
415 		goto lf_free;
416 	}
417 
418 	/* CN9K SA is different */
419 	if (roc_model_is_cn9k())
420 		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
421 	else
422 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
423 
424 	/* Alloc contiguous memory for Inbound SA's */
425 	inl_dev->inb_sa_sz = inb_sa_sz;
426 	inl_dev->inb_spi_mask = max_sa - 1;
427 	inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
428 					   ROC_NIX_INL_SA_BASE_ALIGN);
429 	if (!inl_dev->inb_sa_base) {
430 		plt_err("Failed to allocate memory for Inbound SA");
431 		rc = -ENOMEM;
432 		goto unregister_irqs;
433 	}
434 
435 	if (roc_model_is_cn10k()) {
436 		for (i = 0; i < max_sa; i++) {
437 			sa = ((uint8_t *)inl_dev->inb_sa_base) +
438 			     (i * inb_sa_sz);
439 			roc_ot_ipsec_inb_sa_init(sa, true);
440 		}
441 	}
442 	/* Setup device specific inb SA table */
443 	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
444 	if (rc) {
445 		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
446 		goto free_mem;
447 	}
448 
449 	/* Allocate memory for RQ's */
450 	rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
451 	if (!rqs) {
452 		plt_err("Failed to allocate memory for RQ's");
453 		goto free_mem;
454 	}
455 	inl_dev->rqs = rqs;
456 
457 	return 0;
458 free_mem:
459 	plt_free(inl_dev->inb_sa_base);
460 	inl_dev->inb_sa_base = NULL;
461 unregister_irqs:
462 	nix_inl_nix_unregister_irqs(inl_dev);
463 lf_free:
464 	mbox_alloc_msg_nix_lf_free(mbox_get(mbox));
465 	rc |= mbox_process(mbox);
466 	mbox_put(mbox);
467 	return rc;
468 }
469 
470 static int
471 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
472 {
473 	struct dev *dev = &inl_dev->dev;
474 	struct mbox *mbox = dev->mbox;
475 	struct nix_lf_free_req *req;
476 	struct ndc_sync_op *ndc_req;
477 	int rc = -ENOSPC;
478 
479 	/* Disable Inbound processing */
480 	rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
481 	if (rc)
482 		plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
483 
484 	/* Sync NDC-NIX for LF */
485 	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
486 	if (ndc_req == NULL) {
487 		mbox_put(mbox);
488 		return rc;
489 	}
490 	ndc_req->nix_lf_rx_sync = 1;
491 	rc = mbox_process(mbox);
492 	if (rc)
493 		plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
494 	mbox_put(mbox);
495 
496 	/* Unregister IRQs */
497 	nix_inl_nix_unregister_irqs(inl_dev);
498 
499 	/* By default all associated mcam rules are deleted */
500 	req = mbox_alloc_msg_nix_lf_free(mbox_get(mbox));
501 	if (req == NULL) {
502 		mbox_put(mbox);
503 		return -ENOSPC;
504 	}
505 
506 	rc = mbox_process(mbox);
507 	if (rc) {
508 		mbox_put(mbox);
509 		return rc;
510 	}
511 	mbox_put(mbox);
512 
513 	plt_free(inl_dev->rqs);
514 	plt_free(inl_dev->inb_sa_base);
515 	inl_dev->rqs = NULL;
516 	inl_dev->inb_sa_base = NULL;
517 	return 0;
518 }
519 
520 static int
521 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
522 {
523 	struct msix_offset_rsp *msix_rsp;
524 	struct dev *dev = &inl_dev->dev;
525 	struct mbox *mbox = mbox_get(dev->mbox);
526 	struct rsrc_attach_req *req;
527 	uint64_t nix_blkaddr;
528 	int rc = -ENOSPC;
529 
530 	req = mbox_alloc_msg_attach_resources(mbox);
531 	if (req == NULL)
532 		goto exit;
533 	req->modify = true;
534 	/* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
535 	req->nixlf = true;
536 	req->ssow = 1;
537 	req->sso = 1;
538 	if (inl_dev->attach_cptlf) {
539 		req->cptlfs = inl_dev->nb_cptlf;
540 		req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
541 	}
542 
543 	rc = mbox_process(dev->mbox);
544 	if (rc)
545 		goto exit;
546 
547 	/* Get MSIX vector offsets */
548 	mbox_alloc_msg_msix_offset(mbox);
549 	rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
550 	if (rc)
551 		goto exit;
552 
553 	inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
554 	inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
555 	inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
556 
557 	for (int i = 0; i < inl_dev->nb_cptlf; i++)
558 		inl_dev->cpt_msixoff[i] = msix_rsp->cptlf_msixoff[i];
559 
560 	nix_blkaddr = nix_get_blkaddr(dev);
561 	inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
562 
563 	/* Update base addresses for LF's */
564 	inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
565 	inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
566 	inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
567 	inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
568 
569 	rc = 0;
570 exit:
571 	mbox_put(mbox);
572 	return rc;
573 }
574 
575 static int
576 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
577 {
578 	struct dev *dev = &inl_dev->dev;
579 	struct mbox *mbox = mbox_get(dev->mbox);
580 	struct rsrc_detach_req *req;
581 	int rc = -ENOSPC;
582 
583 	req = mbox_alloc_msg_detach_resources(mbox);
584 	if (req == NULL)
585 		goto exit;
586 	req->partial = true;
587 	req->nixlf = true;
588 	req->ssow = true;
589 	req->sso = true;
590 	req->cptlfs = !!inl_dev->attach_cptlf;
591 
592 	rc = mbox_process(dev->mbox);
593 exit:
594 	mbox_put(mbox);
595 	return rc;
596 }
597 
598 static int
599 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
600 {
601 	uintptr_t sso_base = inl_dev->sso_base;
602 	int wait_ms = 3000;
603 
604 	while (wait_ms > 0) {
605 		/* Break when empty */
606 		if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
607 		    !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
608 			return 0;
609 
610 		plt_delay_us(1000);
611 		wait_ms -= 1;
612 	}
613 
614 	return -ETIMEDOUT;
615 }
616 
617 int
618 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
619 {
620 	struct idev_cfg *idev = idev_get_cfg();
621 	struct nix_inl_dev *inl_dev;
622 	int rc, i;
623 
624 	if (idev == NULL)
625 		return 0;
626 
627 	inl_dev = idev->nix_inl_dev;
628 	/* Nothing to do if no inline device */
629 	if (!inl_dev)
630 		return 0;
631 
632 	if (!aura_handle) {
633 		inl_dev->nb_xae = inl_dev->iue;
634 		goto no_pool;
635 	}
636 
637 	/* Check if aura is already considered */
638 	for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
639 		if (inl_dev->pkt_pools[i] == aura_handle)
640 			return 0;
641 	}
642 
643 no_pool:
644 	/* Disable RQ if enabled */
645 	for (i = 0; i < inl_dev->nb_rqs; i++) {
646 		if (!inl_dev->rqs[i].inl_dev_refs)
647 			continue;
648 		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
649 		if (rc) {
650 			plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
651 				rc);
652 			return rc;
653 		}
654 	}
655 
656 	/* Wait for events to be removed */
657 	rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
658 	if (rc) {
659 		plt_err("Timeout waiting for inline device event cleanup");
660 		goto exit;
661 	}
662 
663 	/* Disable HWGRP */
664 	plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
665 
666 	inl_dev->pkt_pools_cnt++;
667 	inl_dev->pkt_pools =
668 		plt_realloc(inl_dev->pkt_pools,
669 			    sizeof(uint64_t) * inl_dev->pkt_pools_cnt, 0);
670 	if (!inl_dev->pkt_pools)
671 		inl_dev->pkt_pools_cnt = 0;
672 	else
673 		inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
674 	inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
675 
676 	/* Realloc XAQ aura */
677 	rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
678 				     inl_dev->nb_xae, inl_dev->xae_waes,
679 				     inl_dev->xaq_buf_size, 1);
680 	if (rc) {
681 		plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
682 		return rc;
683 	}
684 
685 	/* Setup xaq for hwgrps */
686 	rc = sso_hwgrp_alloc_xaq(&inl_dev->dev,
687 				 roc_npa_aura_handle_to_aura(inl_dev->xaq.aura_handle), 1);
688 	if (rc) {
689 		plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
690 		return rc;
691 	}
692 
693 	/* Enable HWGRP */
694 	plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
695 
696 exit:
697 	/* Renable RQ */
698 	for (i = 0; i < inl_dev->nb_rqs; i++) {
699 		if (!inl_dev->rqs[i].inl_dev_refs)
700 			continue;
701 
702 		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
703 		if (rc)
704 			plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
705 				rc);
706 	}
707 
708 	return rc;
709 }
710 
711 static void
712 inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
713 {
714 	union roc_ot_ipsec_err_ring_head head;
715 	struct roc_ot_ipsec_outb_sa *sa;
716 	uint16_t head_l, tail_l;
717 	uint64_t *ring_base;
718 	uint32_t port_id;
719 
720 	port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
721 	ring_base = PLT_PTR_CAST(inl_dev->sa_soft_exp_ring[ring_idx]);
722 	if (!ring_base) {
723 		plt_err("Invalid soft exp ring base");
724 		return;
725 	}
726 
727 	head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
728 	head_l = head.s.head_pos;
729 	tail_l = head.s.tail_pos;
730 
731 	while (tail_l != head_l) {
732 		union roc_ot_ipsec_err_ring_entry entry;
733 		int poll_counter = 0;
734 
735 		while (poll_counter++ <
736 		       ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
737 			plt_delay_us(20);
738 			entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
739 						    __ATOMIC_ACQUIRE);
740 			if (likely(entry.u64))
741 				break;
742 		}
743 
744 		entry.u64 = plt_be_to_cpu_64(entry.u64);
745 		sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
746 						      << 51) |
747 						     (entry.s.data0 << 7));
748 
749 		if (sa != NULL) {
750 			uint64_t tmp = ~(uint32_t)0x0;
751 			inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
752 			__atomic_store_n(ring_base + tail_l + 1, 0ULL,
753 					 __ATOMIC_RELAXED);
754 			__atomic_fetch_add((uint32_t *)ring_base, 1,
755 					   __ATOMIC_ACQ_REL);
756 		} else
757 			plt_err("Invalid SA");
758 
759 		tail_l++;
760 	}
761 }
762 
763 static uint32_t
764 nix_inl_outb_poll_thread(void *args)
765 {
766 	struct nix_inl_dev *inl_dev = args;
767 	uint32_t poll_freq;
768 	uint32_t i;
769 	bool bit;
770 
771 	poll_freq = inl_dev->soft_exp_poll_freq;
772 
773 	while (!soft_exp_poll_thread_exit) {
774 		if (soft_exp_consumer_cnt) {
775 			for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
776 				bit = plt_bitmap_get(
777 					inl_dev->soft_exp_ring_bmap, i);
778 				if (bit)
779 					inl_outb_soft_exp_poll(inl_dev, i);
780 			}
781 		}
782 		usleep(poll_freq);
783 	}
784 
785 	return 0;
786 }
787 
788 static int
789 nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
790 {
791 	struct plt_bitmap *bmap;
792 	size_t bmap_sz;
793 	uint32_t i;
794 	void *mem;
795 	int rc;
796 
797 	/* Allocate a bitmap that pool thread uses to get the port_id
798 	 * that's corresponding to the inl_outb_soft_exp_ring
799 	 */
800 	bmap_sz =
801 		plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
802 	mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
803 	if (mem == NULL) {
804 		plt_err("soft expiry ring bmap alloc failed");
805 		rc = -ENOMEM;
806 		goto exit;
807 	}
808 
809 	bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
810 	if (!bmap) {
811 		plt_err("soft expiry ring bmap init failed");
812 		plt_free(mem);
813 		rc = -ENOMEM;
814 		goto exit;
815 	}
816 
817 	inl_dev->soft_exp_ring_bmap_mem = mem;
818 	inl_dev->soft_exp_ring_bmap = bmap;
819 	inl_dev->sa_soft_exp_ring = plt_zmalloc(
820 		ROC_NIX_INL_MAX_SOFT_EXP_RNGS * sizeof(uint64_t), 0);
821 	if (!inl_dev->sa_soft_exp_ring) {
822 		plt_err("soft expiry ring pointer array alloc failed");
823 		plt_free(mem);
824 		rc = -ENOMEM;
825 		goto exit;
826 	}
827 
828 	for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
829 		plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
830 
831 	soft_exp_consumer_cnt = 0;
832 	soft_exp_poll_thread_exit = false;
833 	rc = plt_thread_create_control(&inl_dev->soft_exp_poll_thread,
834 			"outb-poll", nix_inl_outb_poll_thread, inl_dev);
835 	if (rc) {
836 		plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
837 		plt_free(inl_dev->soft_exp_ring_bmap_mem);
838 	}
839 
840 exit:
841 	return rc;
842 }
843 
844 int
845 roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats)
846 {
847 	struct idev_cfg *idev = idev_get_cfg();
848 	struct nix_inl_dev *inl_dev = NULL;
849 
850 	if (stats == NULL)
851 		return NIX_ERR_PARAM;
852 
853 	if (idev && idev->nix_inl_dev)
854 		inl_dev = idev->nix_inl_dev;
855 
856 	if (!inl_dev)
857 		return -EINVAL;
858 
859 	stats->rx_octs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_OCTS);
860 	stats->rx_ucast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_UCAST);
861 	stats->rx_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_BCAST);
862 	stats->rx_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_MCAST);
863 	stats->rx_drop = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP);
864 	stats->rx_drop_octs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP_OCTS);
865 	stats->rx_fcs = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_FCS);
866 	stats->rx_err = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_ERR);
867 	stats->rx_drop_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_BCAST);
868 	stats->rx_drop_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_MCAST);
869 	stats->rx_drop_l3_bcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3BCAST);
870 	stats->rx_drop_l3_mcast = INL_NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3MCAST);
871 
872 	return 0;
873 }
874 
875 int
876 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
877 {
878 	struct plt_pci_device *pci_dev;
879 	struct nix_inl_dev *inl_dev;
880 	struct idev_cfg *idev;
881 	int start_index;
882 	int resp_count;
883 	int rc, i;
884 
885 	pci_dev = roc_inl_dev->pci_dev;
886 
887 	/* Skip probe if already done */
888 	idev = idev_get_cfg();
889 	if (idev == NULL)
890 		return -ENOTSUP;
891 
892 	if (idev->nix_inl_dev) {
893 		plt_info("Skipping device %s, inline device already probed",
894 			 pci_dev->name);
895 		return -EEXIST;
896 	}
897 
898 	PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
899 
900 	inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
901 	memset(inl_dev, 0, sizeof(*inl_dev));
902 
903 	inl_dev->pci_dev = pci_dev;
904 	inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
905 	inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
906 	inl_dev->selftest = roc_inl_dev->selftest;
907 	inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
908 	inl_dev->channel = roc_inl_dev->channel;
909 	inl_dev->chan_mask = roc_inl_dev->chan_mask;
910 	inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
911 	inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
912 	inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
913 	inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
914 	inl_dev->set_soft_exp_poll = !!roc_inl_dev->soft_exp_poll_freq;
915 	inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
916 	inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
917 	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
918 	inl_dev->soft_exp_poll_freq = roc_inl_dev->soft_exp_poll_freq;
919 
920 	if (roc_inl_dev->rx_inj_ena) {
921 		inl_dev->rx_inj_ena = 1;
922 		inl_dev->nb_cptlf = NIX_INL_CPT_LF;
923 	} else
924 		inl_dev->nb_cptlf = 1;
925 
926 	if (roc_inl_dev->spb_drop_pc)
927 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
928 	if (roc_inl_dev->lpb_drop_pc)
929 		inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
930 
931 	/* Initialize base device */
932 	rc = dev_init(&inl_dev->dev, pci_dev);
933 	if (rc) {
934 		plt_err("Failed to init roc device");
935 		goto error;
936 	}
937 
938 	/* Attach LF resources */
939 	rc = nix_inl_lf_attach(inl_dev);
940 	if (rc) {
941 		plt_err("Failed to attach LF resources, rc=%d", rc);
942 		goto dev_cleanup;
943 	}
944 
945 	/* Setup NIX LF */
946 	rc = nix_inl_nix_setup(inl_dev);
947 	if (rc)
948 		goto lf_detach;
949 
950 	/* Setup SSO LF */
951 	rc = nix_inl_sso_setup(inl_dev);
952 	if (rc)
953 		goto nix_release;
954 
955 	/* Setup CPT LF */
956 	rc = nix_inl_cpt_setup(inl_dev, false);
957 	if (rc)
958 		goto sso_release;
959 
960 	if (inl_dev->set_soft_exp_poll) {
961 		rc = nix_inl_outb_poll_thread_setup(inl_dev);
962 		if (rc)
963 			goto cpt_release;
964 	}
965 
966 	/* Perform selftest if asked for */
967 	if (inl_dev->selftest) {
968 		rc = nix_inl_selftest();
969 		if (rc)
970 			goto cpt_release;
971 	}
972 	inl_dev->max_ipsec_rules = roc_inl_dev->max_ipsec_rules;
973 
974 	if (inl_dev->max_ipsec_rules && roc_inl_dev->is_multi_channel) {
975 		inl_dev->ipsec_index =
976 			plt_zmalloc(sizeof(int) * inl_dev->max_ipsec_rules, PLT_CACHE_LINE_SIZE);
977 		if (inl_dev->ipsec_index == NULL) {
978 			rc = NPC_ERR_NO_MEM;
979 			goto cpt_release;
980 		}
981 		rc = npc_mcam_alloc_entries(inl_dev->dev.mbox, inl_dev->max_ipsec_rules,
982 					    inl_dev->ipsec_index, inl_dev->max_ipsec_rules,
983 					    NPC_MCAM_HIGHER_PRIO, &resp_count, 1);
984 		if (rc) {
985 			plt_free(inl_dev->ipsec_index);
986 			goto cpt_release;
987 		}
988 
989 		start_index = inl_dev->ipsec_index[0];
990 		for (i = 0; i < resp_count; i++)
991 			inl_dev->ipsec_index[i] = start_index + i;
992 
993 		inl_dev->curr_ipsec_idx = 0;
994 		inl_dev->alloc_ipsec_rules = resp_count;
995 	}
996 
997 	idev->nix_inl_dev = inl_dev;
998 
999 	return 0;
1000 cpt_release:
1001 	rc |= nix_inl_cpt_release(inl_dev);
1002 sso_release:
1003 	rc |= nix_inl_sso_release(inl_dev);
1004 nix_release:
1005 	rc |= nix_inl_nix_release(inl_dev);
1006 lf_detach:
1007 	rc |= nix_inl_lf_detach(inl_dev);
1008 dev_cleanup:
1009 	rc |= dev_fini(&inl_dev->dev, pci_dev);
1010 error:
1011 	return rc;
1012 }
1013 
1014 int
1015 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
1016 {
1017 	struct plt_pci_device *pci_dev;
1018 	struct nix_inl_dev *inl_dev;
1019 	struct idev_cfg *idev;
1020 	uint32_t i;
1021 	int rc;
1022 
1023 	idev = idev_get_cfg();
1024 	if (idev == NULL)
1025 		return 0;
1026 
1027 	if (!idev->nix_inl_dev ||
1028 	    PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
1029 		return 0;
1030 
1031 	inl_dev = idev->nix_inl_dev;
1032 	pci_dev = inl_dev->pci_dev;
1033 
1034 	if (inl_dev->ipsec_index && roc_inl_dev->is_multi_channel) {
1035 		for (i = inl_dev->curr_ipsec_idx; i < inl_dev->alloc_ipsec_rules; i++)
1036 			npc_mcam_free_entry(inl_dev->dev.mbox, inl_dev->ipsec_index[i]);
1037 		plt_free(inl_dev->ipsec_index);
1038 	}
1039 
1040 	if (inl_dev->set_soft_exp_poll) {
1041 		soft_exp_poll_thread_exit = true;
1042 		plt_thread_join(inl_dev->soft_exp_poll_thread, NULL);
1043 		plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
1044 		plt_free(inl_dev->soft_exp_ring_bmap_mem);
1045 		plt_free(inl_dev->sa_soft_exp_ring);
1046 	}
1047 
1048 	/* Flush Inbound CTX cache entries */
1049 	nix_inl_cpt_ctx_cache_sync(inl_dev);
1050 
1051 	/* Release CPT */
1052 	rc = nix_inl_cpt_release(inl_dev);
1053 
1054 	/* Release SSO */
1055 	rc |= nix_inl_sso_release(inl_dev);
1056 
1057 	/* Release NIX */
1058 	rc |= nix_inl_nix_release(inl_dev);
1059 
1060 	/* Detach LF's */
1061 	rc |= nix_inl_lf_detach(inl_dev);
1062 
1063 	/* Cleanup mbox */
1064 	rc |= dev_fini(&inl_dev->dev, pci_dev);
1065 	if (rc)
1066 		return rc;
1067 
1068 	idev->nix_inl_dev = NULL;
1069 	return 0;
1070 }
1071 
1072 int
1073 roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso)
1074 {
1075 	struct idev_cfg *idev = idev_get_cfg();
1076 	struct nix_inl_dev *inl_dev = NULL;
1077 
1078 	if (!idev || !idev->nix_inl_dev)
1079 		return -ENOENT;
1080 	inl_dev = idev->nix_inl_dev;
1081 
1082 	if (inl_dev->cpt_lf[0].dev != NULL)
1083 		return -EBUSY;
1084 
1085 	return nix_inl_cpt_setup(inl_dev, use_inl_dev_sso);
1086 }
1087 
1088 int
1089 roc_nix_inl_dev_cpt_release(void)
1090 {
1091 	struct idev_cfg *idev = idev_get_cfg();
1092 	struct nix_inl_dev *inl_dev = NULL;
1093 
1094 	if (!idev || !idev->nix_inl_dev)
1095 		return -ENOENT;
1096 	inl_dev = idev->nix_inl_dev;
1097 
1098 	if (inl_dev->cpt_lf[0].dev == NULL)
1099 		return 0;
1100 
1101 	return nix_inl_cpt_release(inl_dev);
1102 }
1103