xref: /dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c (revision d524a5526efa6b4cc01d13d8d50785c08d9b6891)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9 #include <rte_pmd_cnxk.h>
10 
11 #include <cn10k_ethdev.h>
12 #include <cn10k_rx.h>
13 #include <cnxk_ethdev_mcs.h>
14 #include <cnxk_security.h>
15 #include <roc_priv.h>
16 
17 PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_inb_sa, ctx.ar_winbits) ==
18 		  offsetof(struct roc_ot_ipsec_inb_sa, ctx.ar_winbits));
19 
20 PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_outb_sa, ctx.mib_pkts) ==
21 		  offsetof(struct roc_ot_ipsec_outb_sa, ctx.mib_pkts));
22 
23 PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_CKEY_LEN == ROC_CTX_MAX_CKEY_LEN);
24 PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN == RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN);
25 
26 PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MIN == ROC_AR_WIN_SIZE_MIN);
27 PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MAX == ROC_AR_WIN_SIZE_MAX);
28 PLT_STATIC_ASSERT(RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 == ROC_LOG_MIN_AR_WIN_SIZE_M1);
29 PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WINBITS_SZ == ROC_AR_WINBITS_SZ);
30 
31 cnxk_ethdev_rx_offload_cb_t cnxk_ethdev_rx_offload_cb;
32 void
33 cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb)
34 {
35 	cnxk_ethdev_rx_offload_cb = cb;
36 }
37 
38 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
39 	{	/* AES GCM */
40 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
41 		{.sym = {
42 			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
43 			{.aead = {
44 				.algo = RTE_CRYPTO_AEAD_AES_GCM,
45 				.block_size = 16,
46 				.key_size = {
47 					.min = 16,
48 					.max = 32,
49 					.increment = 8
50 				},
51 				.digest_size = {
52 					.min = 16,
53 					.max = 16,
54 					.increment = 0
55 				},
56 				.aad_size = {
57 					.min = 8,
58 					.max = 12,
59 					.increment = 4
60 				},
61 				.iv_size = {
62 					.min = 12,
63 					.max = 12,
64 					.increment = 0
65 				}
66 			}, }
67 		}, }
68 	},
69 	{	/* AES CBC */
70 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
71 		{.sym = {
72 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
73 			{.cipher = {
74 				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
75 				.block_size = 16,
76 				.key_size = {
77 					.min = 16,
78 					.max = 32,
79 					.increment = 8
80 				},
81 				.iv_size = {
82 					.min = 16,
83 					.max = 16,
84 					.increment = 0
85 				}
86 			}, }
87 		}, }
88 	},
89 	{	/* AES CTR */
90 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
91 		{.sym = {
92 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
93 			{.cipher = {
94 				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
95 				.block_size = 16,
96 				.key_size = {
97 					.min = 16,
98 					.max = 32,
99 					.increment = 8
100 				},
101 				.iv_size = {
102 					.min = 12,
103 					.max = 16,
104 					.increment = 4
105 				}
106 			}, }
107 		}, }
108 	},
109 	{	/* 3DES CBC */
110 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
111 		{.sym = {
112 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
113 			{.cipher = {
114 				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
115 				.block_size = 8,
116 				.key_size = {
117 					.min = 24,
118 					.max = 24,
119 					.increment = 0
120 				},
121 				.iv_size = {
122 					.min = 8,
123 					.max = 16,
124 					.increment = 8
125 				}
126 			}, }
127 		}, }
128 	},
129 	{	/* AES-XCBC */
130 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
131 		{ .sym = {
132 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
133 			{.auth = {
134 				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
135 				.block_size = 16,
136 				.key_size = {
137 					.min = 16,
138 					.max = 16,
139 					.increment = 0
140 				},
141 				.digest_size = {
142 					.min = 12,
143 					.max = 12,
144 					.increment = 0,
145 				},
146 			}, }
147 		}, }
148 	},
149 	{	/* SHA1 HMAC */
150 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
151 		{.sym = {
152 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
153 			{.auth = {
154 				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
155 				.block_size = 64,
156 				.key_size = {
157 					.min = 20,
158 					.max = 64,
159 					.increment = 1
160 				},
161 				.digest_size = {
162 					.min = 12,
163 					.max = 12,
164 					.increment = 0
165 				},
166 			}, }
167 		}, }
168 	},
169 	{	/* SHA256 HMAC */
170 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
171 		{.sym = {
172 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
173 			{.auth = {
174 				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
175 				.block_size = 64,
176 				.key_size = {
177 					.min = 1,
178 					.max = 1024,
179 					.increment = 1
180 				},
181 				.digest_size = {
182 					.min = 16,
183 					.max = 32,
184 					.increment = 16
185 				},
186 			}, }
187 		}, }
188 	},
189 	{	/* SHA384 HMAC */
190 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
191 		{.sym = {
192 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
193 			{.auth = {
194 				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
195 				.block_size = 64,
196 				.key_size = {
197 					.min = 1,
198 					.max = 1024,
199 					.increment = 1
200 				},
201 				.digest_size = {
202 					.min = 24,
203 					.max = 48,
204 					.increment = 24
205 					},
206 			}, }
207 		}, }
208 	},
209 	{	/* SHA512 HMAC */
210 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
211 		{.sym = {
212 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
213 			{.auth = {
214 				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
215 				.block_size = 128,
216 				.key_size = {
217 					.min = 1,
218 					.max = 1024,
219 					.increment = 1
220 				},
221 				.digest_size = {
222 					.min = 32,
223 					.max = 64,
224 					.increment = 32
225 				},
226 			}, }
227 		}, }
228 	},
229 	{	/* AES GMAC (AUTH) */
230 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
231 		{.sym = {
232 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
233 			{.auth = {
234 				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
235 				.block_size = 16,
236 				.key_size = {
237 					.min = 16,
238 					.max = 32,
239 					.increment = 8
240 				},
241 				.digest_size = {
242 					.min = 8,
243 					.max = 16,
244 					.increment = 4
245 				},
246 				.iv_size = {
247 					.min = 12,
248 					.max = 12,
249 					.increment = 0
250 				}
251 			}, }
252 		}, }
253 	},
254 	{	/* AES CCM */
255 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
256 		{.sym = {
257 			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
258 			{.aead = {
259 				.algo = RTE_CRYPTO_AEAD_AES_CCM,
260 				.block_size = 16,
261 				.key_size = {
262 					.min = 16,
263 					.max = 32,
264 					.increment = 8
265 				},
266 				.digest_size = {
267 					.min = 16,
268 					.max = 16,
269 					.increment = 0
270 				},
271 				.aad_size = {
272 					.min = 8,
273 					.max = 12,
274 					.increment = 4
275 				},
276 				.iv_size = {
277 					.min = 11,
278 					.max = 13,
279 					.increment = 1
280 				}
281 			}, }
282 		}, }
283 	},
284 	{	/* NULL (AUTH) */
285 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
286 		{.sym = {
287 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
288 			{.auth = {
289 				.algo = RTE_CRYPTO_AUTH_NULL,
290 				.block_size = 1,
291 				.key_size = {
292 					.min = 0,
293 					.max = 0,
294 					.increment = 0
295 				},
296 				.digest_size = {
297 					.min = 0,
298 					.max = 0,
299 					.increment = 0
300 				},
301 			}, },
302 		}, },
303 	},
304 	{	/* NULL (CIPHER) */
305 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
306 		{.sym = {
307 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
308 			{.cipher = {
309 				.algo = RTE_CRYPTO_CIPHER_NULL,
310 				.block_size = 1,
311 				.key_size = {
312 					.min = 0,
313 					.max = 0,
314 					.increment = 0
315 				},
316 				.iv_size = {
317 					.min = 0,
318 					.max = 0,
319 					.increment = 0
320 				}
321 			}, },
322 		}, }
323 	},
324 
325 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
326 };
327 
328 static const struct rte_security_capability cn10k_eth_sec_ipsec_capabilities[] = {
329 	{	/* IPsec Inline Protocol ESP Tunnel Ingress */
330 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
331 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
332 		.ipsec = {
333 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
334 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
335 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
336 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
337 			.options = {
338 				.udp_encap = 1,
339 				.udp_ports_verify = 1,
340 				.copy_df = 1,
341 				.copy_dscp = 1,
342 				.copy_flabel = 1,
343 				.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR,
344 				.dec_ttl = 1,
345 				.ip_csum_enable = 1,
346 				.l4_csum_enable = 1,
347 				.stats = 1,
348 				.esn = 1,
349 				.ingress_oop = 1,
350 			},
351 		},
352 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
353 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
354 	},
355 	{	/* IPsec Inline Protocol ESP Tunnel Egress */
356 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
357 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
358 		.ipsec = {
359 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
360 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
361 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
362 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
363 			.options = {
364 				.iv_gen_disable = 1,
365 				.udp_encap = 1,
366 				.udp_ports_verify = 1,
367 				.copy_df = 1,
368 				.copy_dscp = 1,
369 				.copy_flabel = 1,
370 				.dec_ttl = 1,
371 				.ip_csum_enable = 1,
372 				.l4_csum_enable = 1,
373 				.stats = 1,
374 				.esn = 1,
375 			},
376 		},
377 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
378 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
379 	},
380 	{	/* IPsec Inline Protocol ESP Transport Egress */
381 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
382 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
383 		.ipsec = {
384 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
385 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
386 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
387 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
388 			.options = {
389 				.iv_gen_disable = 1,
390 				.udp_encap = 1,
391 				.udp_ports_verify = 1,
392 				.copy_df = 1,
393 				.copy_dscp = 1,
394 				.dec_ttl = 1,
395 				.ip_csum_enable = 1,
396 				.l4_csum_enable = 1,
397 				.stats = 1,
398 				.esn = 1,
399 				.ingress_oop = 1,
400 			},
401 		},
402 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
403 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
404 	},
405 	{	/* IPsec Inline Protocol ESP Transport Ingress */
406 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
407 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
408 		.ipsec = {
409 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
410 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
411 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
412 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
413 			.options = {
414 				.udp_encap = 1,
415 				.udp_ports_verify = 1,
416 				.copy_df = 1,
417 				.copy_dscp = 1,
418 				.dec_ttl = 1,
419 				.ip_csum_enable = 1,
420 				.l4_csum_enable = 1,
421 				.stats = 1,
422 				.esn = 1,
423 				.ingress_oop = 1,
424 			},
425 		},
426 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
427 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
428 	},
429 };
430 
431 static const struct rte_security_capability cn10k_eth_sec_macsec_capabilities[] = {
432 	{	/* MACsec Inline Protocol, AES-GCM-128 algo */
433 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
434 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
435 		.macsec = {
436 			.mtu = ROC_MCS_MAX_MTU,
437 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_128,
438 			.max_nb_sc = 128,
439 			.max_nb_sa = 256,
440 			.max_nb_sess = 256,
441 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
442 			.relative_sectag_insert = 1,
443 			.fixed_sectag_insert = 1,
444 			.icv_include_da_sa = 1,
445 			.ctrl_port_enable = 1,
446 			.preserve_sectag = 1,
447 			.preserve_icv = 1,
448 			.validate_frames = 1,
449 			.re_key = 1,
450 			.anti_replay = 1,
451 		},
452 	},
453 	{	/* MACsec Inline Protocol, AES-GCM-256 algo */
454 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
455 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
456 		.macsec = {
457 			.mtu = ROC_MCS_MAX_MTU,
458 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_256,
459 			.max_nb_sc = 128,
460 			.max_nb_sa = 256,
461 			.max_nb_sess = 256,
462 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
463 			.relative_sectag_insert = 1,
464 			.fixed_sectag_insert = 1,
465 			.icv_include_da_sa = 1,
466 			.ctrl_port_enable = 1,
467 			.preserve_sectag = 1,
468 			.preserve_icv = 1,
469 			.validate_frames = 1,
470 			.re_key = 1,
471 			.anti_replay = 1,
472 		},
473 	},
474 	{	/* MACsec Inline Protocol, AES-GCM-XPN-128 algo */
475 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
476 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
477 		.macsec = {
478 			.mtu = ROC_MCS_MAX_MTU,
479 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_XPN_128,
480 			.max_nb_sc = 128,
481 			.max_nb_sa = 256,
482 			.max_nb_sess = 256,
483 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
484 			.relative_sectag_insert = 1,
485 			.fixed_sectag_insert = 1,
486 			.icv_include_da_sa = 1,
487 			.ctrl_port_enable = 1,
488 			.preserve_sectag = 1,
489 			.preserve_icv = 1,
490 			.validate_frames = 1,
491 			.re_key = 1,
492 			.anti_replay = 1,
493 		},
494 	},
495 	{	/* MACsec Inline Protocol, AES-GCM-XPN-256 algo */
496 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
497 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
498 		.macsec = {
499 			.mtu = ROC_MCS_MAX_MTU,
500 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_XPN_256,
501 			.max_nb_sc = 128,
502 			.max_nb_sa = 256,
503 			.max_nb_sess = 256,
504 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
505 			.relative_sectag_insert = 1,
506 			.fixed_sectag_insert = 1,
507 			.icv_include_da_sa = 1,
508 			.ctrl_port_enable = 1,
509 			.preserve_sectag = 1,
510 			.preserve_icv = 1,
511 			.validate_frames = 1,
512 			.re_key = 1,
513 			.anti_replay = 1,
514 		},
515 	},
516 };
517 
518 #define SEC_CAPS_LEN (RTE_DIM(cn10k_eth_sec_ipsec_capabilities) + \
519 		RTE_DIM(cn10k_eth_sec_macsec_capabilities) + 1)
520 
521 static struct rte_security_capability cn10k_eth_sec_capabilities[SEC_CAPS_LEN];
522 
523 static inline void
524 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
525 {
526 	struct rte_mbuf *next;
527 
528 	if (!mbuf)
529 		return;
530 	do {
531 		next = mbuf->next;
532 		roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
533 		mbuf = next;
534 	} while (mbuf != NULL);
535 }
536 
537 void
538 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
539 {
540 	struct rte_eth_event_ipsec_desc desc;
541 	struct cn10k_sec_sess_priv sess_priv;
542 	struct cn10k_outb_priv_data *priv;
543 	struct roc_ot_ipsec_outb_sa *sa;
544 	struct cpt_cn10k_res_s *res;
545 	struct rte_eth_dev *eth_dev;
546 	struct cnxk_eth_dev *dev;
547 	static uint64_t warn_cnt;
548 	uint16_t dlen_adj, rlen;
549 	struct rte_mbuf *mbuf;
550 	uintptr_t sa_base;
551 	uintptr_t nixtx;
552 	uint8_t port;
553 
554 	RTE_SET_USED(args);
555 
556 	switch ((gw[0] >> 28) & 0xF) {
557 	case RTE_EVENT_TYPE_ETHDEV:
558 		/* Event from inbound inline dev due to IPSEC packet bad L4 */
559 		mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
560 		plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
561 		cnxk_pktmbuf_free_no_cache(mbuf);
562 		return;
563 	case RTE_EVENT_TYPE_CPU:
564 		/* Check for subtype */
565 		if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
566 			/* Event from outbound inline error */
567 			mbuf = (struct rte_mbuf *)gw[1];
568 			break;
569 		}
570 		/* Fall through */
571 	default:
572 		if (soft_exp_event & 0x1) {
573 			sa = (struct roc_ot_ipsec_outb_sa *)args;
574 			priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
575 			desc.metadata = (uint64_t)priv->userdata;
576 			if (sa->w2.s.life_unit == ROC_IE_OT_SA_LIFE_UNIT_PKTS)
577 				desc.subtype =
578 					RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
579 			else
580 				desc.subtype =
581 					RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
582 			eth_dev = &rte_eth_devices[soft_exp_event >> 8];
583 			rte_eth_dev_callback_process(eth_dev,
584 				RTE_ETH_EVENT_IPSEC, &desc);
585 		} else {
586 			plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
587 				gw[0], gw[1]);
588 		}
589 		return;
590 	}
591 
592 	/* Get ethdev port from tag */
593 	port = gw[0] & 0xFF;
594 	eth_dev = &rte_eth_devices[port];
595 	dev = cnxk_eth_pmd_priv(eth_dev);
596 
597 	sess_priv.u64 = *rte_security_dynfield(mbuf);
598 	/* Calculate dlen adj */
599 	dlen_adj = mbuf->pkt_len - mbuf->l2_len;
600 	rlen = (dlen_adj + sess_priv.roundup_len) +
601 	       (sess_priv.roundup_byte - 1);
602 	rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
603 	rlen += sess_priv.partial_len;
604 	dlen_adj = rlen - dlen_adj;
605 
606 	/* Find the res area residing on next cacheline after end of data */
607 	nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
608 	nixtx += BIT_ULL(7);
609 	nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
610 	res = (struct cpt_cn10k_res_s *)nixtx;
611 
612 	plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
613 		    mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
614 
615 	sess_priv.u64 = *rte_security_dynfield(mbuf);
616 
617 	sa_base = dev->outb.sa_base;
618 	sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
619 	priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
620 
621 	memset(&desc, 0, sizeof(desc));
622 
623 	switch (res->uc_compcode) {
624 	case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
625 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
626 		break;
627 	case ROC_IE_OT_UCC_ERR_SA_EXPIRED:
628 		if (sa->w2.s.life_unit == ROC_IE_OT_SA_LIFE_UNIT_PKTS)
629 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
630 		else
631 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
632 		break;
633 	case ROC_IE_OT_UCC_ERR_PKT_IP:
634 		warn_cnt++;
635 		if (warn_cnt % 10000 == 0)
636 			plt_warn("Outbound error, bad ip pkt, mbuf %p,"
637 				 " sa_index %u (total warnings %" PRIu64 ")",
638 				 mbuf, sess_priv.sa_idx, warn_cnt);
639 		desc.subtype = -res->uc_compcode;
640 		break;
641 	default:
642 		warn_cnt++;
643 		if (warn_cnt % 10000 == 0)
644 			plt_warn("Outbound error, mbuf %p, sa_index %u,"
645 				 " compcode %x uc %x,"
646 				 " (total warnings %" PRIu64 ")",
647 				 mbuf, sess_priv.sa_idx, res->compcode,
648 				 res->uc_compcode, warn_cnt);
649 		desc.subtype = -res->uc_compcode;
650 		break;
651 	}
652 
653 	desc.metadata = (uint64_t)priv->userdata;
654 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
655 	cnxk_pktmbuf_free_no_cache(mbuf);
656 }
657 
658 static void
659 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
660 {
661 	uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
662 	char *iv_str = strdup(__iv_str);
663 	char *iv_b = NULL, len = 16;
664 	char *save;
665 	int i;
666 
667 	if (!iv_str)
668 		return;
669 
670 	if (outb_sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM ||
671 	    outb_sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_CTR ||
672 	    outb_sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_CCM ||
673 	    outb_sa->w2.s.auth_type == ROC_IE_SA_AUTH_AES_GMAC) {
674 		memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
675 		memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
676 
677 		iv_dbg = outb_sa->iv.s.iv_dbg1;
678 		for (i = 0; i < 4; i++) {
679 			iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
680 			if (!iv_b)
681 				break;
682 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
683 		}
684 		*(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
685 
686 		iv_dbg = outb_sa->iv.s.iv_dbg2;
687 		for (i = 0; i < 4; i++) {
688 			iv_b = strtok_r(NULL, ",", &save);
689 			if (!iv_b)
690 				break;
691 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
692 		}
693 		*(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
694 
695 	} else {
696 		iv_dbg = outb_sa->iv.iv_dbg;
697 		memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
698 
699 		for (i = 0; i < len; i++) {
700 			iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
701 			if (!iv_b)
702 				break;
703 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
704 		}
705 		*(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
706 		*(uint64_t *)&iv_dbg[8] =
707 			rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
708 	}
709 
710 	/* Update source of IV */
711 	outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
712 	free(iv_str);
713 }
714 
715 static int
716 cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix,
717 				struct roc_ot_ipsec_outb_sa *sa, void *sa_cptr,
718 				struct rte_security_ipsec_xform *ipsec_xfrm,
719 				uint32_t sa_idx)
720 {
721 	uint64_t *ring_base, ring_addr;
722 
723 	if (ipsec_xfrm->life.bytes_soft_limit |
724 	    ipsec_xfrm->life.packets_soft_limit) {
725 		ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
726 		if (ring_base == NULL)
727 			return -ENOTSUP;
728 
729 		ring_addr = ring_base[sa_idx >>
730 				      ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
731 		sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
732 		sa->ctx.err_ctl.s.address = ring_addr >> 3;
733 		sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
734 	}
735 
736 	return 0;
737 }
738 
739 static int
740 cn10k_eth_sec_session_create(void *device,
741 			     struct rte_security_session_conf *conf,
742 			     struct rte_security_session *sess)
743 {
744 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
745 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
746 	struct rte_security_ipsec_xform *ipsec;
747 	struct cn10k_sec_sess_priv sess_priv;
748 	struct rte_crypto_sym_xform *crypto;
749 	struct cnxk_eth_sec_sess *eth_sec = SECURITY_GET_SESS_PRIV(sess);
750 	struct roc_nix *nix = &dev->nix;
751 	bool inbound, inl_dev;
752 	rte_spinlock_t *lock;
753 	char tbuf[128] = {0};
754 	int rc = 0;
755 
756 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
757 		return -ENOTSUP;
758 
759 	if (conf->protocol == RTE_SECURITY_PROTOCOL_MACSEC)
760 		return cnxk_eth_macsec_session_create(dev, conf, sess);
761 	else if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
762 		return -ENOTSUP;
763 
764 	if (nix->custom_inb_sa)
765 		return -ENOTSUP;
766 
767 	if (rte_security_dynfield_register() < 0)
768 		return -ENOTSUP;
769 
770 	if (conf->ipsec.options.ip_reassembly_en &&
771 			dev->reass_dynfield_off < 0) {
772 		if (rte_eth_ip_reassembly_dynfield_register(&dev->reass_dynfield_off,
773 					&dev->reass_dynflag_bit) < 0)
774 			return -rte_errno;
775 	}
776 
777 	if (conf->ipsec.options.ingress_oop &&
778 	    rte_security_oop_dynfield_offset < 0) {
779 		/* Register for security OOP dynfield if required */
780 		if (rte_security_oop_dynfield_register() < 0)
781 			return -rte_errno;
782 	}
783 
784 	/* We cannot support inbound reassembly and OOP together */
785 	if (conf->ipsec.options.ip_reassembly_en &&
786 	    conf->ipsec.options.ingress_oop) {
787 		plt_err("Cannot support Inbound reassembly and OOP together");
788 		return -ENOTSUP;
789 	}
790 
791 	ipsec = &conf->ipsec;
792 	crypto = conf->crypto_xform;
793 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
794 	inl_dev = !!dev->inb.inl_dev;
795 
796 	/* Search if a session already exits */
797 	if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
798 		plt_err("%s SA with SPI %u already in use",
799 			inbound ? "Inbound" : "Outbound", ipsec->spi);
800 		return -EEXIST;
801 	}
802 
803 	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
804 	sess_priv.u64 = 0;
805 
806 	lock = inbound ? &dev->inb.lock : &dev->outb.lock;
807 	rte_spinlock_lock(lock);
808 
809 	/* Acquire lock on inline dev for inbound */
810 	if (inbound && inl_dev)
811 		roc_nix_inl_dev_lock();
812 
813 	if (inbound) {
814 		struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
815 		struct cn10k_inb_priv_data *inb_priv;
816 		uint32_t spi_mask;
817 		uintptr_t sa;
818 
819 		PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
820 				  ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
821 
822 		spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
823 
824 		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
825 		sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
826 		if (!sa && dev->inb.inl_dev) {
827 			snprintf(tbuf, sizeof(tbuf),
828 				 "Failed to create ingress sa, inline dev "
829 				 "not found or spi not in range");
830 			rc = -ENOTSUP;
831 			goto err;
832 		} else if (!sa) {
833 			snprintf(tbuf, sizeof(tbuf),
834 				 "Failed to create ingress sa");
835 			rc = -EFAULT;
836 			goto err;
837 		}
838 
839 		inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
840 
841 		/* Check if SA is already in use */
842 		if (inb_sa->w2.s.valid) {
843 			snprintf(tbuf, sizeof(tbuf),
844 				 "Inbound SA with SPI %u already in use",
845 				 ipsec->spi);
846 			rc = -EBUSY;
847 			goto err;
848 		}
849 
850 		inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
851 		memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
852 
853 		/* Fill inbound sa params */
854 		rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
855 					       true);
856 		if (rc) {
857 			snprintf(tbuf, sizeof(tbuf),
858 				 "Failed to init inbound sa, rc=%d", rc);
859 			goto err;
860 		}
861 
862 		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
863 		/* Back pointer to get eth_sec */
864 		inb_priv->eth_sec = eth_sec;
865 		/* Save userdata in inb private area */
866 		inb_priv->userdata = conf->userdata;
867 
868 		/* Save SA index/SPI in cookie for now */
869 		inb_sa_dptr->w1.s.cookie =
870 			rte_cpu_to_be_32(ipsec->spi & spi_mask);
871 
872 		if (ipsec->options.stats == 1) {
873 			/* Enable mib counters */
874 			inb_sa_dptr->w0.s.count_mib_bytes = 1;
875 			inb_sa_dptr->w0.s.count_mib_pkts = 1;
876 		}
877 
878 		/* Enable out-of-place processing */
879 		if (ipsec->options.ingress_oop)
880 			inb_sa_dptr->w0.s.pkt_format =
881 				ROC_IE_OT_SA_PKT_FMT_FULL;
882 
883 		/* Prepare session priv */
884 		sess_priv.inb_sa = 1;
885 		sess_priv.sa_idx = ipsec->spi & spi_mask;
886 
887 		/* Pointer from eth_sec -> inb_sa */
888 		eth_sec->sa = inb_sa;
889 		eth_sec->sess = sess;
890 		eth_sec->sa_idx = ipsec->spi & spi_mask;
891 		eth_sec->spi = ipsec->spi;
892 		eth_sec->inl_dev = !!dev->inb.inl_dev;
893 		eth_sec->inb = true;
894 		eth_sec->inb_oop = !!ipsec->options.ingress_oop;
895 
896 		TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
897 		dev->inb.nb_sess++;
898 		/* Sync session in context cache */
899 		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
900 					   eth_sec->inb,
901 					   sizeof(struct roc_ot_ipsec_inb_sa));
902 		if (rc)
903 			goto err;
904 
905 		if (conf->ipsec.options.ip_reassembly_en) {
906 			inb_priv->reass_dynfield_off = dev->reass_dynfield_off;
907 			inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
908 		}
909 
910 		if (ipsec->options.ingress_oop)
911 			dev->inb.nb_oop++;
912 
913 		/* Update function pointer to handle OOP sessions */
914 		if (dev->inb.nb_oop &&
915 		    !(dev->rx_offload_flags & NIX_RX_REAS_F)) {
916 			dev->rx_offload_flags |= NIX_RX_REAS_F;
917 			cn10k_eth_set_rx_function(eth_dev);
918 			if (cnxk_ethdev_rx_offload_cb)
919 				cnxk_ethdev_rx_offload_cb(eth_dev->data->port_id,
920 							  NIX_RX_REAS_F);
921 		}
922 	} else {
923 		struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
924 		struct cn10k_outb_priv_data *outb_priv;
925 		struct cnxk_ipsec_outb_rlens *rlens;
926 		uint64_t sa_base = dev->outb.sa_base;
927 		const char *iv_str;
928 		uint32_t sa_idx;
929 
930 		PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
931 				  ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
932 
933 		/* Alloc an sa index */
934 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
935 		if (rc)
936 			goto err;
937 
938 		outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
939 		outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
940 		rlens = &outb_priv->rlens;
941 
942 		outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
943 		memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
944 
945 		/* Fill outbound sa params */
946 		rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
947 		if (rc) {
948 			snprintf(tbuf, sizeof(tbuf),
949 				 "Failed to init outbound sa, rc=%d", rc);
950 			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
951 			goto err;
952 		}
953 
954 		if (conf->ipsec.options.iv_gen_disable == 1) {
955 			iv_str = getenv("ETH_SEC_IV_OVR");
956 			if (iv_str)
957 				outb_dbg_iv_update(outb_sa_dptr, iv_str);
958 		}
959 		/* Fill outbound sa misc params */
960 		rc = cn10k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr,
961 						     outb_sa, ipsec, sa_idx);
962 		if (rc) {
963 			snprintf(tbuf, sizeof(tbuf),
964 				 "Failed to init outb sa misc params, rc=%d",
965 				 rc);
966 			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
967 			goto err;
968 		}
969 
970 		/* Save userdata */
971 		outb_priv->userdata = conf->userdata;
972 		outb_priv->sa_idx = sa_idx;
973 		outb_priv->eth_sec = eth_sec;
974 
975 		/* Save rlen info */
976 		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
977 
978 		if (ipsec->options.stats == 1) {
979 			/* Enable mib counters */
980 			outb_sa_dptr->w0.s.count_mib_bytes = 1;
981 			outb_sa_dptr->w0.s.count_mib_pkts = 1;
982 		}
983 
984 		/* Prepare session priv */
985 		sess_priv.sa_idx = outb_priv->sa_idx;
986 		sess_priv.roundup_byte = rlens->roundup_byte;
987 		sess_priv.roundup_len = rlens->roundup_len;
988 		sess_priv.partial_len = rlens->partial_len;
989 		sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
990 		sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
991 		/* Propagate inner checksum enable from SA to fast path */
992 		sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
993 				    !ipsec->options.l4_csum_enable);
994 		sess_priv.dec_ttl = ipsec->options.dec_ttl;
995 		if (roc_feature_nix_has_inl_ipsec_mseg() &&
996 		    dev->outb.cpt_eng_caps & BIT_ULL(35))
997 			sess_priv.nixtx_off = 1;
998 
999 		/* Pointer from eth_sec -> outb_sa */
1000 		eth_sec->sa = outb_sa;
1001 		eth_sec->sess = sess;
1002 		eth_sec->sa_idx = sa_idx;
1003 		eth_sec->spi = ipsec->spi;
1004 
1005 		TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
1006 		dev->outb.nb_sess++;
1007 		/* Sync session in context cache */
1008 		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
1009 					   eth_sec->inb,
1010 					   sizeof(struct roc_ot_ipsec_outb_sa));
1011 		if (rc)
1012 			goto err;
1013 	}
1014 	if (inbound && inl_dev)
1015 		roc_nix_inl_dev_unlock();
1016 	rte_spinlock_unlock(lock);
1017 
1018 	plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
1019 		    inbound ? "inbound" : "outbound", eth_sec->spi,
1020 		    eth_sec->sa_idx, eth_sec->inl_dev);
1021 	/*
1022 	 * Update fast path info in priv area.
1023 	 */
1024 	sess->fast_mdata = sess_priv.u64;
1025 
1026 	return 0;
1027 err:
1028 	if (inbound && inl_dev)
1029 		roc_nix_inl_dev_unlock();
1030 	rte_spinlock_unlock(lock);
1031 
1032 	if (rc)
1033 		plt_err("%s", tbuf);
1034 	return rc;
1035 }
1036 
1037 static int
1038 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
1039 {
1040 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1041 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1042 	struct cnxk_macsec_sess *macsec_sess;
1043 	struct cnxk_eth_sec_sess *eth_sec;
1044 	rte_spinlock_t *lock;
1045 	void *sa_dptr;
1046 
1047 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1048 	if (!eth_sec) {
1049 		macsec_sess = cnxk_eth_macsec_sess_get_by_sess(dev, sess);
1050 		if (macsec_sess)
1051 			return cnxk_eth_macsec_session_destroy(dev, sess);
1052 		return -ENOENT;
1053 	}
1054 	if (dev->nix.custom_inb_sa)
1055 		return -ENOTSUP;
1056 
1057 	lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
1058 	rte_spinlock_lock(lock);
1059 
1060 	if (eth_sec->inl_dev)
1061 		roc_nix_inl_dev_lock();
1062 
1063 	if (eth_sec->inb) {
1064 		/* Disable SA */
1065 		sa_dptr = dev->inb.sa_dptr;
1066 		roc_ot_ipsec_inb_sa_init(sa_dptr, true);
1067 
1068 		roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
1069 				      eth_sec->inb,
1070 				      sizeof(struct roc_ot_ipsec_inb_sa));
1071 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
1072 		dev->inb.nb_sess--;
1073 		if (eth_sec->inb_oop)
1074 			dev->inb.nb_oop--;
1075 
1076 		/* Clear offload flags if was used by OOP */
1077 		if (!dev->inb.nb_oop && !dev->inb.reass_en &&
1078 		    dev->rx_offload_flags & NIX_RX_REAS_F) {
1079 			dev->rx_offload_flags &= ~NIX_RX_REAS_F;
1080 			cn10k_eth_set_rx_function(eth_dev);
1081 		}
1082 	} else {
1083 		/* Disable SA */
1084 		sa_dptr = dev->outb.sa_dptr;
1085 		roc_ot_ipsec_outb_sa_init(sa_dptr);
1086 
1087 		roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
1088 				      eth_sec->inb,
1089 				      sizeof(struct roc_ot_ipsec_outb_sa));
1090 		/* Release Outbound SA index */
1091 		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
1092 		TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
1093 		dev->outb.nb_sess--;
1094 	}
1095 	if (eth_sec->inl_dev)
1096 		roc_nix_inl_dev_unlock();
1097 
1098 	rte_spinlock_unlock(lock);
1099 
1100 	plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
1101 		    eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
1102 		    eth_sec->sa_idx, eth_sec->inl_dev);
1103 
1104 	return 0;
1105 }
1106 
1107 static const struct rte_security_capability *
1108 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
1109 {
1110 	return cn10k_eth_sec_capabilities;
1111 }
1112 
1113 static int
1114 cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
1115 			     struct rte_security_session_conf *conf)
1116 {
1117 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1118 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1119 	struct rte_security_ipsec_xform *ipsec;
1120 	struct cn10k_sec_sess_priv sess_priv;
1121 	struct rte_crypto_sym_xform *crypto;
1122 	struct cnxk_eth_sec_sess *eth_sec;
1123 	bool inbound;
1124 	int rc;
1125 
1126 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1127 	    conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
1128 		return -ENOENT;
1129 
1130 	ipsec = &conf->ipsec;
1131 	crypto = conf->crypto_xform;
1132 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
1133 
1134 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1135 	if (!eth_sec)
1136 		return -ENOENT;
1137 
1138 	eth_sec->spi = conf->ipsec.spi;
1139 
1140 	if (inbound) {
1141 		struct roc_ot_ipsec_inb_sa *inb_sa_dptr, *inb_sa;
1142 		struct cn10k_inb_priv_data *inb_priv;
1143 
1144 		inb_sa = eth_sec->sa;
1145 		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
1146 		inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
1147 		memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
1148 
1149 		rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
1150 					       true);
1151 		if (rc)
1152 			return -EINVAL;
1153 		/* Use cookie for original data */
1154 		inb_sa_dptr->w1.s.cookie = inb_sa->w1.s.cookie;
1155 
1156 		if (ipsec->options.stats == 1) {
1157 			/* Enable mib counters */
1158 			inb_sa_dptr->w0.s.count_mib_bytes = 1;
1159 			inb_sa_dptr->w0.s.count_mib_pkts = 1;
1160 		}
1161 
1162 		/* Enable out-of-place processing */
1163 		if (ipsec->options.ingress_oop)
1164 			inb_sa_dptr->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_FULL;
1165 
1166 		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
1167 					   eth_sec->inb,
1168 					   sizeof(struct roc_ot_ipsec_inb_sa));
1169 		if (rc)
1170 			return -EINVAL;
1171 
1172 		/* Save userdata in inb private area */
1173 		inb_priv->userdata = conf->userdata;
1174 	} else {
1175 		struct roc_ot_ipsec_outb_sa *outb_sa_dptr, *outb_sa;
1176 		struct cn10k_outb_priv_data *outb_priv;
1177 		struct cnxk_ipsec_outb_rlens *rlens;
1178 
1179 		outb_sa = eth_sec->sa;
1180 		outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
1181 		rlens = &outb_priv->rlens;
1182 		outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
1183 		memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
1184 
1185 		rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
1186 		if (rc)
1187 			return -EINVAL;
1188 
1189 		/* Save rlen info */
1190 		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
1191 
1192 		if (ipsec->options.stats == 1) {
1193 			/* Enable mib counters */
1194 			outb_sa_dptr->w0.s.count_mib_bytes = 1;
1195 			outb_sa_dptr->w0.s.count_mib_pkts = 1;
1196 		}
1197 
1198 		sess_priv.u64 = 0;
1199 		sess_priv.sa_idx = outb_priv->sa_idx;
1200 		sess_priv.roundup_byte = rlens->roundup_byte;
1201 		sess_priv.roundup_len = rlens->roundup_len;
1202 		sess_priv.partial_len = rlens->partial_len;
1203 		sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
1204 		sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
1205 		/* Propagate inner checksum enable from SA to fast path */
1206 		sess_priv.chksum =
1207 			(!ipsec->options.ip_csum_enable << 1 | !ipsec->options.l4_csum_enable);
1208 		sess_priv.dec_ttl = ipsec->options.dec_ttl;
1209 		if (roc_feature_nix_has_inl_ipsec_mseg() && dev->outb.cpt_eng_caps & BIT_ULL(35))
1210 			sess_priv.nixtx_off = 1;
1211 
1212 		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
1213 					   eth_sec->inb,
1214 					   sizeof(struct roc_ot_ipsec_outb_sa));
1215 		if (rc)
1216 			return -EINVAL;
1217 
1218 		/* Save userdata */
1219 		outb_priv->userdata = conf->userdata;
1220 		sess->fast_mdata = sess_priv.u64;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static int
1227 cn10k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
1228 			    struct rte_security_stats *stats)
1229 {
1230 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1231 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1232 	struct cnxk_macsec_sess *macsec_sess;
1233 	struct cnxk_eth_sec_sess *eth_sec;
1234 	int rc;
1235 
1236 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1237 	if (eth_sec == NULL) {
1238 		macsec_sess = cnxk_eth_macsec_sess_get_by_sess(dev, sess);
1239 		if (macsec_sess)
1240 			return cnxk_eth_macsec_session_stats_get(dev, macsec_sess, stats);
1241 		return -EINVAL;
1242 	}
1243 
1244 	rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
1245 			    ROC_NIX_INL_SA_OP_FLUSH);
1246 	if (rc)
1247 		return -EINVAL;
1248 	rte_delay_ms(1);
1249 
1250 	stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
1251 
1252 	if (eth_sec->inb) {
1253 		stats->ipsec.ipackets =
1254 			((struct roc_ot_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_pkts;
1255 		stats->ipsec.ibytes =
1256 			((struct roc_ot_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_octs;
1257 	} else {
1258 		stats->ipsec.opackets =
1259 			((struct roc_ot_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_pkts;
1260 		stats->ipsec.obytes =
1261 			((struct roc_ot_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_octs;
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 static void
1268 eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
1269 		 const struct rte_security_capability *caps, uint32_t nb_caps)
1270 {
1271 	PLT_VERIFY(*idx + nb_caps < SEC_CAPS_LEN);
1272 
1273 	rte_memcpy(&eth_sec_caps[*idx], caps, nb_caps * sizeof(caps[0]));
1274 	*idx += nb_caps;
1275 }
1276 
1277 static uint16_t __rte_hot
1278 cn10k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
1279 			    struct rte_security_session **sess, uint16_t nb_pkts)
1280 {
1281 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1282 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1283 
1284 	return cn10k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
1285 }
1286 
1287 static int
1288 cn10k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
1289 {
1290 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1291 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1292 	uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
1293 	struct cnxk_ethdev_inj_cfg *inj_cfg;
1294 	struct roc_nix *nix = &dev->nix;
1295 	struct roc_cpt_lf *inl_lf;
1296 	uint64_t sa_base;
1297 
1298 	if (!rte_eth_dev_is_valid_port(port_id))
1299 		return -EINVAL;
1300 
1301 	if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
1302 		return -EBUSY;
1303 
1304 	if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
1305 		return -ENOTSUP;
1306 
1307 	roc_idev_nix_rx_inject_set(port_id, enable);
1308 
1309 	inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
1310 	sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
1311 
1312 	inj_cfg = &dev->inj_cfg;
1313 	inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
1314 	inj_cfg->io_addr = inl_lf->io_addr;
1315 	inj_cfg->lmt_base = nix->lmt_base;
1316 	channel = roc_nix_get_base_chan(nix);
1317 	pf_func = roc_idev_nix_inl_dev_pffunc_get();
1318 	inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
1319 
1320 	return 0;
1321 }
1322 
1323 #define CPT_LMTST_BURST 32
1324 static uint16_t
1325 cn10k_inl_dev_submit(struct roc_nix_inl_dev_q *q, void *inst, uint16_t nb_inst)
1326 {
1327 	uintptr_t lbase = q->lmt_base;
1328 	uint8_t lnum, shft, loff;
1329 	uint16_t left, burst;
1330 	rte_iova_t io_addr;
1331 	uint16_t lmt_id;
1332 
1333 	/* Check the flow control to avoid the queue overflow */
1334 	if (cnxk_nix_inl_fc_check(q->fc_addr, &q->fc_addr_sw, q->nb_desc, nb_inst))
1335 		return 0;
1336 
1337 	io_addr = q->io_addr;
1338 	ROC_LMT_CPT_BASE_ID_GET(lbase, lmt_id);
1339 
1340 	left = nb_inst;
1341 again:
1342 	burst = left > CPT_LMTST_BURST ? CPT_LMTST_BURST : left;
1343 
1344 	lnum = 0;
1345 	loff = 0;
1346 	shft = 16;
1347 	memcpy(PLT_PTR_CAST(lbase), inst, burst * sizeof(struct cpt_inst_s));
1348 	loff = (burst % 2) ? 1 : 0;
1349 	lnum = (burst / 2);
1350 	shft = shft + (lnum * 3);
1351 
1352 	left -= burst;
1353 	cn10k_nix_sec_steorl(io_addr, lmt_id, lnum, loff, shft);
1354 	rte_io_wmb();
1355 	if (left) {
1356 		inst = RTE_PTR_ADD(inst, burst * sizeof(struct cpt_inst_s));
1357 		goto again;
1358 	}
1359 	return nb_inst;
1360 }
1361 
1362 void
1363 cn10k_eth_sec_ops_override(void)
1364 {
1365 	static int init_once;
1366 	uint32_t idx = 0;
1367 
1368 	if (init_once)
1369 		return;
1370 	init_once = 1;
1371 
1372 	if (roc_feature_nix_has_inl_ipsec())
1373 		eth_sec_caps_add(cn10k_eth_sec_capabilities, &idx,
1374 				 cn10k_eth_sec_ipsec_capabilities,
1375 				 RTE_DIM(cn10k_eth_sec_ipsec_capabilities));
1376 
1377 	if (roc_feature_nix_has_macsec())
1378 		eth_sec_caps_add(cn10k_eth_sec_capabilities, &idx,
1379 				 cn10k_eth_sec_macsec_capabilities,
1380 				 RTE_DIM(cn10k_eth_sec_macsec_capabilities));
1381 
1382 	cn10k_eth_sec_capabilities[idx].action = RTE_SECURITY_ACTION_TYPE_NONE;
1383 
1384 	/* Update platform specific ops */
1385 	cnxk_eth_sec_ops.macsec_sa_create = cnxk_eth_macsec_sa_create;
1386 	cnxk_eth_sec_ops.macsec_sc_create = cnxk_eth_macsec_sc_create;
1387 	cnxk_eth_sec_ops.macsec_sa_destroy = cnxk_eth_macsec_sa_destroy;
1388 	cnxk_eth_sec_ops.macsec_sc_destroy = cnxk_eth_macsec_sc_destroy;
1389 	cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
1390 	cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
1391 	cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
1392 	cnxk_eth_sec_ops.session_update = cn10k_eth_sec_session_update;
1393 	cnxk_eth_sec_ops.session_stats_get = cn10k_eth_sec_session_stats_get;
1394 	cnxk_eth_sec_ops.macsec_sc_stats_get = cnxk_eth_macsec_sc_stats_get;
1395 	cnxk_eth_sec_ops.macsec_sa_stats_get = cnxk_eth_macsec_sa_stats_get;
1396 	cnxk_eth_sec_ops.rx_inject_configure = cn10k_eth_sec_rx_inject_config;
1397 	cnxk_eth_sec_ops.inb_pkt_rx_inject = cn10k_eth_sec_inb_rx_inject;
1398 
1399 	/* Update platform specific rte_pmd_cnxk ops */
1400 	cnxk_pmd_ops.inl_dev_submit = cn10k_inl_dev_submit;
1401 }
1402