xref: /dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9 #include <rte_pmd_cnxk.h>
10 
11 #include <cn10k_ethdev.h>
12 #include <cn10k_rx.h>
13 #include <cnxk_ethdev_mcs.h>
14 #include <cnxk_security.h>
15 #include <roc_priv.h>
16 
17 PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_inb_sa, ctx.ar_winbits) ==
18 		  offsetof(struct roc_ot_ipsec_inb_sa, ctx.ar_winbits));
19 
20 PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_outb_sa, ctx.mib_pkts) ==
21 		  offsetof(struct roc_ot_ipsec_outb_sa, ctx.mib_pkts));
22 
23 PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_CKEY_LEN == ROC_CTX_MAX_CKEY_LEN);
24 PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN == RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN);
25 
26 PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MIN == ROC_AR_WIN_SIZE_MIN);
27 PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MAX == ROC_AR_WIN_SIZE_MAX);
28 PLT_STATIC_ASSERT(RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 == ROC_LOG_MIN_AR_WIN_SIZE_M1);
29 PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WINBITS_SZ == ROC_AR_WINBITS_SZ);
30 
31 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
32 	{	/* AES GCM */
33 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
34 		{.sym = {
35 			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
36 			{.aead = {
37 				.algo = RTE_CRYPTO_AEAD_AES_GCM,
38 				.block_size = 16,
39 				.key_size = {
40 					.min = 16,
41 					.max = 32,
42 					.increment = 8
43 				},
44 				.digest_size = {
45 					.min = 16,
46 					.max = 16,
47 					.increment = 0
48 				},
49 				.aad_size = {
50 					.min = 8,
51 					.max = 12,
52 					.increment = 4
53 				},
54 				.iv_size = {
55 					.min = 12,
56 					.max = 12,
57 					.increment = 0
58 				}
59 			}, }
60 		}, }
61 	},
62 	{	/* AES CBC */
63 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
64 		{.sym = {
65 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
66 			{.cipher = {
67 				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
68 				.block_size = 16,
69 				.key_size = {
70 					.min = 16,
71 					.max = 32,
72 					.increment = 8
73 				},
74 				.iv_size = {
75 					.min = 16,
76 					.max = 16,
77 					.increment = 0
78 				}
79 			}, }
80 		}, }
81 	},
82 	{	/* AES CTR */
83 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
84 		{.sym = {
85 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
86 			{.cipher = {
87 				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
88 				.block_size = 16,
89 				.key_size = {
90 					.min = 16,
91 					.max = 32,
92 					.increment = 8
93 				},
94 				.iv_size = {
95 					.min = 12,
96 					.max = 16,
97 					.increment = 4
98 				}
99 			}, }
100 		}, }
101 	},
102 	{	/* 3DES CBC */
103 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
104 		{.sym = {
105 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
106 			{.cipher = {
107 				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
108 				.block_size = 8,
109 				.key_size = {
110 					.min = 24,
111 					.max = 24,
112 					.increment = 0
113 				},
114 				.iv_size = {
115 					.min = 8,
116 					.max = 16,
117 					.increment = 8
118 				}
119 			}, }
120 		}, }
121 	},
122 	{	/* AES-XCBC */
123 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
124 		{ .sym = {
125 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
126 			{.auth = {
127 				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
128 				.block_size = 16,
129 				.key_size = {
130 					.min = 16,
131 					.max = 16,
132 					.increment = 0
133 				},
134 				.digest_size = {
135 					.min = 12,
136 					.max = 12,
137 					.increment = 0,
138 				},
139 			}, }
140 		}, }
141 	},
142 	{	/* SHA1 HMAC */
143 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
144 		{.sym = {
145 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
146 			{.auth = {
147 				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
148 				.block_size = 64,
149 				.key_size = {
150 					.min = 20,
151 					.max = 64,
152 					.increment = 1
153 				},
154 				.digest_size = {
155 					.min = 12,
156 					.max = 12,
157 					.increment = 0
158 				},
159 			}, }
160 		}, }
161 	},
162 	{	/* SHA256 HMAC */
163 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
164 		{.sym = {
165 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
166 			{.auth = {
167 				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
168 				.block_size = 64,
169 				.key_size = {
170 					.min = 1,
171 					.max = 1024,
172 					.increment = 1
173 				},
174 				.digest_size = {
175 					.min = 16,
176 					.max = 32,
177 					.increment = 16
178 				},
179 			}, }
180 		}, }
181 	},
182 	{	/* SHA384 HMAC */
183 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
184 		{.sym = {
185 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
186 			{.auth = {
187 				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
188 				.block_size = 64,
189 				.key_size = {
190 					.min = 1,
191 					.max = 1024,
192 					.increment = 1
193 				},
194 				.digest_size = {
195 					.min = 24,
196 					.max = 48,
197 					.increment = 24
198 					},
199 			}, }
200 		}, }
201 	},
202 	{	/* SHA512 HMAC */
203 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
204 		{.sym = {
205 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
206 			{.auth = {
207 				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
208 				.block_size = 128,
209 				.key_size = {
210 					.min = 1,
211 					.max = 1024,
212 					.increment = 1
213 				},
214 				.digest_size = {
215 					.min = 32,
216 					.max = 64,
217 					.increment = 32
218 				},
219 			}, }
220 		}, }
221 	},
222 	{	/* AES GMAC (AUTH) */
223 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
224 		{.sym = {
225 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
226 			{.auth = {
227 				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
228 				.block_size = 16,
229 				.key_size = {
230 					.min = 16,
231 					.max = 32,
232 					.increment = 8
233 				},
234 				.digest_size = {
235 					.min = 8,
236 					.max = 16,
237 					.increment = 4
238 				},
239 				.iv_size = {
240 					.min = 12,
241 					.max = 12,
242 					.increment = 0
243 				}
244 			}, }
245 		}, }
246 	},
247 	{	/* AES CCM */
248 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
249 		{.sym = {
250 			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
251 			{.aead = {
252 				.algo = RTE_CRYPTO_AEAD_AES_CCM,
253 				.block_size = 16,
254 				.key_size = {
255 					.min = 16,
256 					.max = 32,
257 					.increment = 8
258 				},
259 				.digest_size = {
260 					.min = 16,
261 					.max = 16,
262 					.increment = 0
263 				},
264 				.aad_size = {
265 					.min = 8,
266 					.max = 12,
267 					.increment = 4
268 				},
269 				.iv_size = {
270 					.min = 11,
271 					.max = 13,
272 					.increment = 1
273 				}
274 			}, }
275 		}, }
276 	},
277 	{	/* NULL (AUTH) */
278 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
279 		{.sym = {
280 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
281 			{.auth = {
282 				.algo = RTE_CRYPTO_AUTH_NULL,
283 				.block_size = 1,
284 				.key_size = {
285 					.min = 0,
286 					.max = 0,
287 					.increment = 0
288 				},
289 				.digest_size = {
290 					.min = 0,
291 					.max = 0,
292 					.increment = 0
293 				},
294 			}, },
295 		}, },
296 	},
297 	{	/* NULL (CIPHER) */
298 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
299 		{.sym = {
300 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
301 			{.cipher = {
302 				.algo = RTE_CRYPTO_CIPHER_NULL,
303 				.block_size = 1,
304 				.key_size = {
305 					.min = 0,
306 					.max = 0,
307 					.increment = 0
308 				},
309 				.iv_size = {
310 					.min = 0,
311 					.max = 0,
312 					.increment = 0
313 				}
314 			}, },
315 		}, }
316 	},
317 
318 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
319 };
320 
321 static const struct rte_security_capability cn10k_eth_sec_ipsec_capabilities[] = {
322 	{	/* IPsec Inline Protocol ESP Tunnel Ingress */
323 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
324 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
325 		.ipsec = {
326 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
327 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
328 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
329 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
330 			.options = {
331 				.udp_encap = 1,
332 				.udp_ports_verify = 1,
333 				.copy_df = 1,
334 				.copy_dscp = 1,
335 				.copy_flabel = 1,
336 				.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR,
337 				.dec_ttl = 1,
338 				.ip_csum_enable = 1,
339 				.l4_csum_enable = 1,
340 				.stats = 1,
341 				.esn = 1,
342 				.ingress_oop = 1,
343 			},
344 		},
345 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
346 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
347 	},
348 	{	/* IPsec Inline Protocol ESP Tunnel Egress */
349 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
350 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
351 		.ipsec = {
352 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
353 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
354 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
355 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
356 			.options = {
357 				.iv_gen_disable = 1,
358 				.udp_encap = 1,
359 				.udp_ports_verify = 1,
360 				.copy_df = 1,
361 				.copy_dscp = 1,
362 				.copy_flabel = 1,
363 				.dec_ttl = 1,
364 				.ip_csum_enable = 1,
365 				.l4_csum_enable = 1,
366 				.stats = 1,
367 				.esn = 1,
368 			},
369 		},
370 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
371 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
372 	},
373 	{	/* IPsec Inline Protocol ESP Transport Egress */
374 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
375 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
376 		.ipsec = {
377 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
378 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
379 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
380 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
381 			.options = {
382 				.iv_gen_disable = 1,
383 				.udp_encap = 1,
384 				.udp_ports_verify = 1,
385 				.copy_df = 1,
386 				.copy_dscp = 1,
387 				.dec_ttl = 1,
388 				.ip_csum_enable = 1,
389 				.l4_csum_enable = 1,
390 				.stats = 1,
391 				.esn = 1,
392 				.ingress_oop = 1,
393 			},
394 		},
395 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
396 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
397 	},
398 	{	/* IPsec Inline Protocol ESP Transport Ingress */
399 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
400 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
401 		.ipsec = {
402 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
403 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
404 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
405 			.replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
406 			.options = {
407 				.udp_encap = 1,
408 				.udp_ports_verify = 1,
409 				.copy_df = 1,
410 				.copy_dscp = 1,
411 				.dec_ttl = 1,
412 				.ip_csum_enable = 1,
413 				.l4_csum_enable = 1,
414 				.stats = 1,
415 				.esn = 1,
416 				.ingress_oop = 1,
417 			},
418 		},
419 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
420 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
421 	},
422 };
423 
424 static const struct rte_security_capability cn10k_eth_sec_macsec_capabilities[] = {
425 	{	/* MACsec Inline Protocol, AES-GCM-128 algo */
426 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
427 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
428 		.macsec = {
429 			.mtu = ROC_MCS_MAX_MTU,
430 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_128,
431 			.max_nb_sc = 128,
432 			.max_nb_sa = 256,
433 			.max_nb_sess = 256,
434 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
435 			.relative_sectag_insert = 1,
436 			.fixed_sectag_insert = 1,
437 			.icv_include_da_sa = 1,
438 			.ctrl_port_enable = 1,
439 			.preserve_sectag = 1,
440 			.preserve_icv = 1,
441 			.validate_frames = 1,
442 			.re_key = 1,
443 			.anti_replay = 1,
444 		},
445 	},
446 	{	/* MACsec Inline Protocol, AES-GCM-256 algo */
447 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
448 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
449 		.macsec = {
450 			.mtu = ROC_MCS_MAX_MTU,
451 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_256,
452 			.max_nb_sc = 128,
453 			.max_nb_sa = 256,
454 			.max_nb_sess = 256,
455 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
456 			.relative_sectag_insert = 1,
457 			.fixed_sectag_insert = 1,
458 			.icv_include_da_sa = 1,
459 			.ctrl_port_enable = 1,
460 			.preserve_sectag = 1,
461 			.preserve_icv = 1,
462 			.validate_frames = 1,
463 			.re_key = 1,
464 			.anti_replay = 1,
465 		},
466 	},
467 	{	/* MACsec Inline Protocol, AES-GCM-XPN-128 algo */
468 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
469 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
470 		.macsec = {
471 			.mtu = ROC_MCS_MAX_MTU,
472 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_XPN_128,
473 			.max_nb_sc = 128,
474 			.max_nb_sa = 256,
475 			.max_nb_sess = 256,
476 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
477 			.relative_sectag_insert = 1,
478 			.fixed_sectag_insert = 1,
479 			.icv_include_da_sa = 1,
480 			.ctrl_port_enable = 1,
481 			.preserve_sectag = 1,
482 			.preserve_icv = 1,
483 			.validate_frames = 1,
484 			.re_key = 1,
485 			.anti_replay = 1,
486 		},
487 	},
488 	{	/* MACsec Inline Protocol, AES-GCM-XPN-256 algo */
489 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
490 		.protocol = RTE_SECURITY_PROTOCOL_MACSEC,
491 		.macsec = {
492 			.mtu = ROC_MCS_MAX_MTU,
493 			.alg = RTE_SECURITY_MACSEC_ALG_GCM_XPN_256,
494 			.max_nb_sc = 128,
495 			.max_nb_sa = 256,
496 			.max_nb_sess = 256,
497 			.replay_win_sz = ROC_MCS_MAX_AR_WINSZ,
498 			.relative_sectag_insert = 1,
499 			.fixed_sectag_insert = 1,
500 			.icv_include_da_sa = 1,
501 			.ctrl_port_enable = 1,
502 			.preserve_sectag = 1,
503 			.preserve_icv = 1,
504 			.validate_frames = 1,
505 			.re_key = 1,
506 			.anti_replay = 1,
507 		},
508 	},
509 };
510 
511 #define SEC_CAPS_LEN (RTE_DIM(cn10k_eth_sec_ipsec_capabilities) + \
512 		RTE_DIM(cn10k_eth_sec_macsec_capabilities) + 1)
513 
514 static struct rte_security_capability cn10k_eth_sec_capabilities[SEC_CAPS_LEN];
515 
516 static inline void
517 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
518 {
519 	struct rte_mbuf *next;
520 
521 	if (!mbuf)
522 		return;
523 	do {
524 		next = mbuf->next;
525 		roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
526 		mbuf = next;
527 	} while (mbuf != NULL);
528 }
529 
530 void
531 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
532 {
533 	struct rte_eth_event_ipsec_desc desc;
534 	struct cn10k_sec_sess_priv sess_priv;
535 	struct cn10k_outb_priv_data *priv;
536 	struct roc_ot_ipsec_outb_sa *sa;
537 	struct cpt_cn10k_res_s *res;
538 	struct rte_eth_dev *eth_dev;
539 	struct cnxk_eth_dev *dev;
540 	static uint64_t warn_cnt;
541 	uint16_t dlen_adj, rlen;
542 	struct rte_mbuf *mbuf;
543 	uintptr_t sa_base;
544 	uintptr_t nixtx;
545 	uint8_t port;
546 
547 	RTE_SET_USED(args);
548 
549 	switch ((gw[0] >> 28) & 0xF) {
550 	case RTE_EVENT_TYPE_ETHDEV:
551 		/* Event from inbound inline dev due to IPSEC packet bad L4 */
552 		mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
553 		plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
554 		cnxk_pktmbuf_free_no_cache(mbuf);
555 		return;
556 	case RTE_EVENT_TYPE_CPU:
557 		/* Check for subtype */
558 		if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
559 			/* Event from outbound inline error */
560 			mbuf = (struct rte_mbuf *)gw[1];
561 			break;
562 		}
563 		/* Fall through */
564 	default:
565 		if (soft_exp_event & 0x1) {
566 			sa = (struct roc_ot_ipsec_outb_sa *)args;
567 			priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
568 			desc.metadata = (uint64_t)priv->userdata;
569 			if (sa->w2.s.life_unit == ROC_IE_OT_SA_LIFE_UNIT_PKTS)
570 				desc.subtype =
571 					RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
572 			else
573 				desc.subtype =
574 					RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
575 			eth_dev = &rte_eth_devices[soft_exp_event >> 8];
576 			rte_eth_dev_callback_process(eth_dev,
577 				RTE_ETH_EVENT_IPSEC, &desc);
578 		} else {
579 			plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
580 				gw[0], gw[1]);
581 		}
582 		return;
583 	}
584 
585 	/* Get ethdev port from tag */
586 	port = gw[0] & 0xFF;
587 	eth_dev = &rte_eth_devices[port];
588 	dev = cnxk_eth_pmd_priv(eth_dev);
589 
590 	sess_priv.u64 = *rte_security_dynfield(mbuf);
591 	/* Calculate dlen adj */
592 	dlen_adj = mbuf->pkt_len - mbuf->l2_len;
593 	rlen = (dlen_adj + sess_priv.roundup_len) +
594 	       (sess_priv.roundup_byte - 1);
595 	rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
596 	rlen += sess_priv.partial_len;
597 	dlen_adj = rlen - dlen_adj;
598 
599 	/* Find the res area residing on next cacheline after end of data */
600 	nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
601 	nixtx += BIT_ULL(7);
602 	nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
603 	res = (struct cpt_cn10k_res_s *)nixtx;
604 
605 	plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
606 		    mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
607 
608 	sess_priv.u64 = *rte_security_dynfield(mbuf);
609 
610 	sa_base = dev->outb.sa_base;
611 	sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
612 	priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
613 
614 	memset(&desc, 0, sizeof(desc));
615 
616 	switch (res->uc_compcode) {
617 	case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
618 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
619 		break;
620 	case ROC_IE_OT_UCC_ERR_SA_EXPIRED:
621 		if (sa->w2.s.life_unit == ROC_IE_OT_SA_LIFE_UNIT_PKTS)
622 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
623 		else
624 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
625 		break;
626 	case ROC_IE_OT_UCC_ERR_PKT_IP:
627 		warn_cnt++;
628 		if (warn_cnt % 10000 == 0)
629 			plt_warn("Outbound error, bad ip pkt, mbuf %p,"
630 				 " sa_index %u (total warnings %" PRIu64 ")",
631 				 mbuf, sess_priv.sa_idx, warn_cnt);
632 		desc.subtype = -res->uc_compcode;
633 		break;
634 	default:
635 		warn_cnt++;
636 		if (warn_cnt % 10000 == 0)
637 			plt_warn("Outbound error, mbuf %p, sa_index %u,"
638 				 " compcode %x uc %x,"
639 				 " (total warnings %" PRIu64 ")",
640 				 mbuf, sess_priv.sa_idx, res->compcode,
641 				 res->uc_compcode, warn_cnt);
642 		desc.subtype = -res->uc_compcode;
643 		break;
644 	}
645 
646 	desc.metadata = (uint64_t)priv->userdata;
647 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
648 	cnxk_pktmbuf_free_no_cache(mbuf);
649 }
650 
651 static void
652 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
653 {
654 	uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
655 	char *iv_str = strdup(__iv_str);
656 	char *iv_b = NULL, len = 16;
657 	char *save;
658 	int i;
659 
660 	if (!iv_str)
661 		return;
662 
663 	if (outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
664 	    outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
665 	    outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
666 	    outb_sa->w2.s.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
667 		memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
668 		memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
669 
670 		iv_dbg = outb_sa->iv.s.iv_dbg1;
671 		for (i = 0; i < 4; i++) {
672 			iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
673 			if (!iv_b)
674 				break;
675 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
676 		}
677 		*(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
678 
679 		iv_dbg = outb_sa->iv.s.iv_dbg2;
680 		for (i = 0; i < 4; i++) {
681 			iv_b = strtok_r(NULL, ",", &save);
682 			if (!iv_b)
683 				break;
684 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
685 		}
686 		*(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
687 
688 	} else {
689 		iv_dbg = outb_sa->iv.iv_dbg;
690 		memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
691 
692 		for (i = 0; i < len; i++) {
693 			iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
694 			if (!iv_b)
695 				break;
696 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
697 		}
698 		*(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
699 		*(uint64_t *)&iv_dbg[8] =
700 			rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
701 	}
702 
703 	/* Update source of IV */
704 	outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
705 	free(iv_str);
706 }
707 
708 static int
709 cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix,
710 				struct roc_ot_ipsec_outb_sa *sa, void *sa_cptr,
711 				struct rte_security_ipsec_xform *ipsec_xfrm,
712 				uint32_t sa_idx)
713 {
714 	uint64_t *ring_base, ring_addr;
715 
716 	if (ipsec_xfrm->life.bytes_soft_limit |
717 	    ipsec_xfrm->life.packets_soft_limit) {
718 		ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
719 		if (ring_base == NULL)
720 			return -ENOTSUP;
721 
722 		ring_addr = ring_base[sa_idx >>
723 				      ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
724 		sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
725 		sa->ctx.err_ctl.s.address = ring_addr >> 3;
726 		sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
727 	}
728 
729 	return 0;
730 }
731 
732 static int
733 cn10k_eth_sec_session_create(void *device,
734 			     struct rte_security_session_conf *conf,
735 			     struct rte_security_session *sess)
736 {
737 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
738 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
739 	struct rte_security_ipsec_xform *ipsec;
740 	struct cn10k_sec_sess_priv sess_priv;
741 	struct rte_crypto_sym_xform *crypto;
742 	struct cnxk_eth_sec_sess *eth_sec = SECURITY_GET_SESS_PRIV(sess);
743 	struct roc_nix *nix = &dev->nix;
744 	bool inbound, inl_dev;
745 	rte_spinlock_t *lock;
746 	char tbuf[128] = {0};
747 	int rc = 0;
748 
749 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
750 		return -ENOTSUP;
751 
752 	if (conf->protocol == RTE_SECURITY_PROTOCOL_MACSEC)
753 		return cnxk_eth_macsec_session_create(dev, conf, sess);
754 	else if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
755 		return -ENOTSUP;
756 
757 	if (rte_security_dynfield_register() < 0)
758 		return -ENOTSUP;
759 
760 	if (conf->ipsec.options.ip_reassembly_en &&
761 			dev->reass_dynfield_off < 0) {
762 		if (rte_eth_ip_reassembly_dynfield_register(&dev->reass_dynfield_off,
763 					&dev->reass_dynflag_bit) < 0)
764 			return -rte_errno;
765 	}
766 
767 	if (conf->ipsec.options.ingress_oop &&
768 	    rte_security_oop_dynfield_offset < 0) {
769 		/* Register for security OOP dynfield if required */
770 		if (rte_security_oop_dynfield_register() < 0)
771 			return -rte_errno;
772 	}
773 
774 	/* We cannot support inbound reassembly and OOP together */
775 	if (conf->ipsec.options.ip_reassembly_en &&
776 	    conf->ipsec.options.ingress_oop) {
777 		plt_err("Cannot support Inbound reassembly and OOP together");
778 		return -ENOTSUP;
779 	}
780 
781 	ipsec = &conf->ipsec;
782 	crypto = conf->crypto_xform;
783 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
784 	inl_dev = !!dev->inb.inl_dev;
785 
786 	/* Search if a session already exits */
787 	if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
788 		plt_err("%s SA with SPI %u already in use",
789 			inbound ? "Inbound" : "Outbound", ipsec->spi);
790 		return -EEXIST;
791 	}
792 
793 	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
794 	sess_priv.u64 = 0;
795 
796 	lock = inbound ? &dev->inb.lock : &dev->outb.lock;
797 	rte_spinlock_lock(lock);
798 
799 	/* Acquire lock on inline dev for inbound */
800 	if (inbound && inl_dev)
801 		roc_nix_inl_dev_lock();
802 
803 	if (inbound) {
804 		struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
805 		struct cn10k_inb_priv_data *inb_priv;
806 		uint32_t spi_mask;
807 		uintptr_t sa;
808 
809 		PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
810 				  ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
811 
812 		spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
813 
814 		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
815 		sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
816 		if (!sa && dev->inb.inl_dev) {
817 			snprintf(tbuf, sizeof(tbuf),
818 				 "Failed to create ingress sa, inline dev "
819 				 "not found or spi not in range");
820 			rc = -ENOTSUP;
821 			goto err;
822 		} else if (!sa) {
823 			snprintf(tbuf, sizeof(tbuf),
824 				 "Failed to create ingress sa");
825 			rc = -EFAULT;
826 			goto err;
827 		}
828 
829 		inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
830 
831 		/* Check if SA is already in use */
832 		if (inb_sa->w2.s.valid) {
833 			snprintf(tbuf, sizeof(tbuf),
834 				 "Inbound SA with SPI %u already in use",
835 				 ipsec->spi);
836 			rc = -EBUSY;
837 			goto err;
838 		}
839 
840 		inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
841 		memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
842 
843 		/* Fill inbound sa params */
844 		rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
845 					       true);
846 		if (rc) {
847 			snprintf(tbuf, sizeof(tbuf),
848 				 "Failed to init inbound sa, rc=%d", rc);
849 			goto err;
850 		}
851 
852 		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
853 		/* Back pointer to get eth_sec */
854 		inb_priv->eth_sec = eth_sec;
855 		/* Save userdata in inb private area */
856 		inb_priv->userdata = conf->userdata;
857 
858 		/* Save SA index/SPI in cookie for now */
859 		inb_sa_dptr->w1.s.cookie =
860 			rte_cpu_to_be_32(ipsec->spi & spi_mask);
861 
862 		if (ipsec->options.stats == 1) {
863 			/* Enable mib counters */
864 			inb_sa_dptr->w0.s.count_mib_bytes = 1;
865 			inb_sa_dptr->w0.s.count_mib_pkts = 1;
866 		}
867 
868 		/* Enable out-of-place processing */
869 		if (ipsec->options.ingress_oop)
870 			inb_sa_dptr->w0.s.pkt_format =
871 				ROC_IE_OT_SA_PKT_FMT_FULL;
872 
873 		/* Prepare session priv */
874 		sess_priv.inb_sa = 1;
875 		sess_priv.sa_idx = ipsec->spi & spi_mask;
876 
877 		/* Pointer from eth_sec -> inb_sa */
878 		eth_sec->sa = inb_sa;
879 		eth_sec->sess = sess;
880 		eth_sec->sa_idx = ipsec->spi & spi_mask;
881 		eth_sec->spi = ipsec->spi;
882 		eth_sec->inl_dev = !!dev->inb.inl_dev;
883 		eth_sec->inb = true;
884 		eth_sec->inb_oop = !!ipsec->options.ingress_oop;
885 
886 		TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
887 		dev->inb.nb_sess++;
888 		/* Sync session in context cache */
889 		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
890 					   eth_sec->inb,
891 					   sizeof(struct roc_ot_ipsec_inb_sa));
892 		if (rc)
893 			goto err;
894 
895 		if (conf->ipsec.options.ip_reassembly_en) {
896 			inb_priv->reass_dynfield_off = dev->reass_dynfield_off;
897 			inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
898 		}
899 
900 		if (ipsec->options.ingress_oop)
901 			dev->inb.nb_oop++;
902 
903 		/* Update function pointer to handle OOP sessions */
904 		if (dev->inb.nb_oop &&
905 		    !(dev->rx_offload_flags & NIX_RX_REAS_F)) {
906 			dev->rx_offload_flags |= NIX_RX_REAS_F;
907 			cn10k_eth_set_rx_function(eth_dev);
908 		}
909 	} else {
910 		struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
911 		struct cn10k_outb_priv_data *outb_priv;
912 		struct cnxk_ipsec_outb_rlens *rlens;
913 		uint64_t sa_base = dev->outb.sa_base;
914 		const char *iv_str;
915 		uint32_t sa_idx;
916 
917 		PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
918 				  ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
919 
920 		/* Alloc an sa index */
921 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
922 		if (rc)
923 			goto err;
924 
925 		outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
926 		outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
927 		rlens = &outb_priv->rlens;
928 
929 		outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
930 		memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
931 
932 		/* Fill outbound sa params */
933 		rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
934 		if (rc) {
935 			snprintf(tbuf, sizeof(tbuf),
936 				 "Failed to init outbound sa, rc=%d", rc);
937 			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
938 			goto err;
939 		}
940 
941 		if (conf->ipsec.options.iv_gen_disable == 1) {
942 			iv_str = getenv("ETH_SEC_IV_OVR");
943 			if (iv_str)
944 				outb_dbg_iv_update(outb_sa_dptr, iv_str);
945 		}
946 		/* Fill outbound sa misc params */
947 		rc = cn10k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr,
948 						     outb_sa, ipsec, sa_idx);
949 		if (rc) {
950 			snprintf(tbuf, sizeof(tbuf),
951 				 "Failed to init outb sa misc params, rc=%d",
952 				 rc);
953 			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
954 			goto err;
955 		}
956 
957 		/* Save userdata */
958 		outb_priv->userdata = conf->userdata;
959 		outb_priv->sa_idx = sa_idx;
960 		outb_priv->eth_sec = eth_sec;
961 
962 		/* Save rlen info */
963 		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
964 
965 		if (ipsec->options.stats == 1) {
966 			/* Enable mib counters */
967 			outb_sa_dptr->w0.s.count_mib_bytes = 1;
968 			outb_sa_dptr->w0.s.count_mib_pkts = 1;
969 		}
970 
971 		/* Prepare session priv */
972 		sess_priv.sa_idx = outb_priv->sa_idx;
973 		sess_priv.roundup_byte = rlens->roundup_byte;
974 		sess_priv.roundup_len = rlens->roundup_len;
975 		sess_priv.partial_len = rlens->partial_len;
976 		sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
977 		sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
978 		/* Propagate inner checksum enable from SA to fast path */
979 		sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
980 				    !ipsec->options.l4_csum_enable);
981 		sess_priv.dec_ttl = ipsec->options.dec_ttl;
982 		if (roc_feature_nix_has_inl_ipsec_mseg() &&
983 		    dev->outb.cpt_eng_caps & BIT_ULL(35))
984 			sess_priv.nixtx_off = 1;
985 
986 		/* Pointer from eth_sec -> outb_sa */
987 		eth_sec->sa = outb_sa;
988 		eth_sec->sess = sess;
989 		eth_sec->sa_idx = sa_idx;
990 		eth_sec->spi = ipsec->spi;
991 
992 		TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
993 		dev->outb.nb_sess++;
994 		/* Sync session in context cache */
995 		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
996 					   eth_sec->inb,
997 					   sizeof(struct roc_ot_ipsec_outb_sa));
998 		if (rc)
999 			goto err;
1000 	}
1001 	if (inbound && inl_dev)
1002 		roc_nix_inl_dev_unlock();
1003 	rte_spinlock_unlock(lock);
1004 
1005 	plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
1006 		    inbound ? "inbound" : "outbound", eth_sec->spi,
1007 		    eth_sec->sa_idx, eth_sec->inl_dev);
1008 	/*
1009 	 * Update fast path info in priv area.
1010 	 */
1011 	sess->fast_mdata = sess_priv.u64;
1012 
1013 	return 0;
1014 err:
1015 	if (inbound && inl_dev)
1016 		roc_nix_inl_dev_unlock();
1017 	rte_spinlock_unlock(lock);
1018 
1019 	if (rc)
1020 		plt_err("%s", tbuf);
1021 	return rc;
1022 }
1023 
1024 static int
1025 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
1026 {
1027 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1028 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1029 	struct cnxk_macsec_sess *macsec_sess;
1030 	struct cnxk_eth_sec_sess *eth_sec;
1031 	rte_spinlock_t *lock;
1032 	void *sa_dptr;
1033 
1034 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1035 	if (!eth_sec) {
1036 		macsec_sess = cnxk_eth_macsec_sess_get_by_sess(dev, sess);
1037 		if (macsec_sess)
1038 			return cnxk_eth_macsec_session_destroy(dev, sess);
1039 		return -ENOENT;
1040 	}
1041 
1042 	lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
1043 	rte_spinlock_lock(lock);
1044 
1045 	if (eth_sec->inl_dev)
1046 		roc_nix_inl_dev_lock();
1047 
1048 	if (eth_sec->inb) {
1049 		/* Disable SA */
1050 		sa_dptr = dev->inb.sa_dptr;
1051 		roc_ot_ipsec_inb_sa_init(sa_dptr, true);
1052 
1053 		roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
1054 				      eth_sec->inb,
1055 				      sizeof(struct roc_ot_ipsec_inb_sa));
1056 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
1057 		dev->inb.nb_sess--;
1058 		if (eth_sec->inb_oop)
1059 			dev->inb.nb_oop--;
1060 
1061 		/* Clear offload flags if was used by OOP */
1062 		if (!dev->inb.nb_oop && !dev->inb.reass_en &&
1063 		    dev->rx_offload_flags & NIX_RX_REAS_F) {
1064 			dev->rx_offload_flags &= ~NIX_RX_REAS_F;
1065 			cn10k_eth_set_rx_function(eth_dev);
1066 		}
1067 	} else {
1068 		/* Disable SA */
1069 		sa_dptr = dev->outb.sa_dptr;
1070 		roc_ot_ipsec_outb_sa_init(sa_dptr);
1071 
1072 		roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
1073 				      eth_sec->inb,
1074 				      sizeof(struct roc_ot_ipsec_outb_sa));
1075 		/* Release Outbound SA index */
1076 		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
1077 		TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
1078 		dev->outb.nb_sess--;
1079 	}
1080 	if (eth_sec->inl_dev)
1081 		roc_nix_inl_dev_unlock();
1082 
1083 	rte_spinlock_unlock(lock);
1084 
1085 	plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
1086 		    eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
1087 		    eth_sec->sa_idx, eth_sec->inl_dev);
1088 
1089 	return 0;
1090 }
1091 
1092 static const struct rte_security_capability *
1093 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
1094 {
1095 	return cn10k_eth_sec_capabilities;
1096 }
1097 
1098 static int
1099 cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
1100 			     struct rte_security_session_conf *conf)
1101 {
1102 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1103 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1104 	struct roc_ot_ipsec_inb_sa *inb_sa_dptr;
1105 	struct rte_security_ipsec_xform *ipsec;
1106 	struct rte_crypto_sym_xform *crypto;
1107 	struct cnxk_eth_sec_sess *eth_sec;
1108 	bool inbound;
1109 	int rc;
1110 
1111 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1112 	    conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
1113 		return -ENOENT;
1114 
1115 	ipsec = &conf->ipsec;
1116 	crypto = conf->crypto_xform;
1117 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
1118 
1119 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1120 	if (!eth_sec)
1121 		return -ENOENT;
1122 
1123 	eth_sec->spi = conf->ipsec.spi;
1124 
1125 	if (inbound) {
1126 		inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
1127 		memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
1128 
1129 		rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
1130 					       true);
1131 		if (rc)
1132 			return -EINVAL;
1133 
1134 		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
1135 					   eth_sec->inb,
1136 					   sizeof(struct roc_ot_ipsec_inb_sa));
1137 		if (rc)
1138 			return -EINVAL;
1139 	} else {
1140 		struct roc_ot_ipsec_outb_sa *outb_sa_dptr;
1141 
1142 		outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
1143 		memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
1144 
1145 		rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
1146 		if (rc)
1147 			return -EINVAL;
1148 		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
1149 					   eth_sec->inb,
1150 					   sizeof(struct roc_ot_ipsec_outb_sa));
1151 		if (rc)
1152 			return -EINVAL;
1153 	}
1154 
1155 	return 0;
1156 }
1157 
1158 int
1159 rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
1160 			union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
1161 {
1162 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1163 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1164 	struct cnxk_eth_sec_sess *eth_sec;
1165 	int rc;
1166 
1167 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1168 	if (eth_sec == NULL)
1169 		return -EINVAL;
1170 
1171 	rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
1172 			    ROC_NIX_INL_SA_OP_FLUSH);
1173 	if (rc)
1174 		return -EINVAL;
1175 	rte_delay_ms(1);
1176 	memcpy(data, eth_sec->sa, len);
1177 
1178 	return 0;
1179 }
1180 
1181 int
1182 rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
1183 			 union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
1184 {
1185 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1186 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1187 	struct cnxk_eth_sec_sess *eth_sec;
1188 	int rc = -EINVAL;
1189 
1190 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1191 	if (eth_sec == NULL)
1192 		return rc;
1193 	rc = roc_nix_inl_ctx_write(&dev->nix, data, eth_sec->sa, eth_sec->inb,
1194 				   len);
1195 	if (rc)
1196 		return rc;
1197 
1198 	return 0;
1199 }
1200 
1201 void *
1202 rte_pmd_cnxk_inl_ipsec_res(struct rte_mbuf *mbuf)
1203 {
1204 	const union nix_rx_parse_u *rx;
1205 	uint16_t desc_size;
1206 	uintptr_t wqe;
1207 
1208 	if (!mbuf || !(mbuf->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD))
1209 		return NULL;
1210 
1211 	wqe = (uintptr_t)(mbuf + 1);
1212 	rx = (const union nix_rx_parse_u *)(wqe + 8);
1213 	desc_size = (rx->desc_sizem1 + 1) * 16;
1214 
1215 	/* cpt_res_s sits after SG list at 16B aligned address */
1216 	return (void *)(wqe + 64 + desc_size);
1217 }
1218 
1219 static int
1220 cn10k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
1221 			    struct rte_security_stats *stats)
1222 {
1223 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1224 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1225 	struct cnxk_macsec_sess *macsec_sess;
1226 	struct cnxk_eth_sec_sess *eth_sec;
1227 	int rc;
1228 
1229 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1230 	if (eth_sec == NULL) {
1231 		macsec_sess = cnxk_eth_macsec_sess_get_by_sess(dev, sess);
1232 		if (macsec_sess)
1233 			return cnxk_eth_macsec_session_stats_get(dev, macsec_sess, stats);
1234 		return -EINVAL;
1235 	}
1236 
1237 	rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
1238 			    ROC_NIX_INL_SA_OP_FLUSH);
1239 	if (rc)
1240 		return -EINVAL;
1241 	rte_delay_ms(1);
1242 
1243 	stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
1244 
1245 	if (eth_sec->inb) {
1246 		stats->ipsec.ipackets =
1247 			((struct roc_ot_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_pkts;
1248 		stats->ipsec.ibytes =
1249 			((struct roc_ot_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_octs;
1250 	} else {
1251 		stats->ipsec.opackets =
1252 			((struct roc_ot_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_pkts;
1253 		stats->ipsec.obytes =
1254 			((struct roc_ot_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_octs;
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 static void
1261 eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
1262 		 const struct rte_security_capability *caps, uint32_t nb_caps)
1263 {
1264 	PLT_VERIFY(*idx + nb_caps < SEC_CAPS_LEN);
1265 
1266 	rte_memcpy(&eth_sec_caps[*idx], caps, nb_caps * sizeof(caps[0]));
1267 	*idx += nb_caps;
1268 }
1269 
1270 static uint16_t __rte_hot
1271 cn10k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
1272 			    struct rte_security_session **sess, uint16_t nb_pkts)
1273 {
1274 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1275 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1276 
1277 	return cn10k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
1278 }
1279 
1280 static int
1281 cn10k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
1282 {
1283 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1284 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1285 	uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
1286 	struct cnxk_ethdev_inj_cfg *inj_cfg;
1287 	struct roc_nix *nix = &dev->nix;
1288 	struct roc_cpt_lf *inl_lf;
1289 	uint64_t sa_base;
1290 
1291 	if (!rte_eth_dev_is_valid_port(port_id))
1292 		return -EINVAL;
1293 
1294 	if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
1295 		return -EBUSY;
1296 
1297 	if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
1298 		return -ENOTSUP;
1299 
1300 	roc_idev_nix_rx_inject_set(port_id, enable);
1301 
1302 	inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
1303 	sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
1304 
1305 	inj_cfg = &dev->inj_cfg;
1306 	inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
1307 	inj_cfg->io_addr = inl_lf->io_addr;
1308 	inj_cfg->lmt_base = nix->lmt_base;
1309 	channel = roc_nix_get_base_chan(nix);
1310 	pf_func = roc_nix_inl_dev_pffunc_get();
1311 	inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
1312 
1313 	return 0;
1314 }
1315 
1316 void
1317 cn10k_eth_sec_ops_override(void)
1318 {
1319 	static int init_once;
1320 	uint32_t idx = 0;
1321 
1322 	if (init_once)
1323 		return;
1324 	init_once = 1;
1325 
1326 	if (roc_feature_nix_has_inl_ipsec())
1327 		eth_sec_caps_add(cn10k_eth_sec_capabilities, &idx,
1328 				 cn10k_eth_sec_ipsec_capabilities,
1329 				 RTE_DIM(cn10k_eth_sec_ipsec_capabilities));
1330 
1331 	if (roc_feature_nix_has_macsec())
1332 		eth_sec_caps_add(cn10k_eth_sec_capabilities, &idx,
1333 				 cn10k_eth_sec_macsec_capabilities,
1334 				 RTE_DIM(cn10k_eth_sec_macsec_capabilities));
1335 
1336 	cn10k_eth_sec_capabilities[idx].action = RTE_SECURITY_ACTION_TYPE_NONE;
1337 
1338 	/* Update platform specific ops */
1339 	cnxk_eth_sec_ops.macsec_sa_create = cnxk_eth_macsec_sa_create;
1340 	cnxk_eth_sec_ops.macsec_sc_create = cnxk_eth_macsec_sc_create;
1341 	cnxk_eth_sec_ops.macsec_sa_destroy = cnxk_eth_macsec_sa_destroy;
1342 	cnxk_eth_sec_ops.macsec_sc_destroy = cnxk_eth_macsec_sc_destroy;
1343 	cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
1344 	cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
1345 	cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
1346 	cnxk_eth_sec_ops.session_update = cn10k_eth_sec_session_update;
1347 	cnxk_eth_sec_ops.session_stats_get = cn10k_eth_sec_session_stats_get;
1348 	cnxk_eth_sec_ops.macsec_sc_stats_get = cnxk_eth_macsec_sc_stats_get;
1349 	cnxk_eth_sec_ops.macsec_sa_stats_get = cnxk_eth_macsec_sa_stats_get;
1350 	cnxk_eth_sec_ops.rx_inject_configure = cn10k_eth_sec_rx_inject_config;
1351 	cnxk_eth_sec_ops.inb_pkt_rx_inject = cn10k_eth_sec_inb_rx_inject;
1352 }
1353