xref: /dpdk/examples/ipsec-secgw/ipsec.c (revision 2ede1422fa57225b0864702083a8c7bea2c5117e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_event_crypto_adapter.h>
10 #include <rte_log.h>
11 #include <rte_crypto.h>
12 #include <rte_security.h>
13 #include <rte_cryptodev.h>
14 #include <rte_ipsec.h>
15 #include <rte_ethdev.h>
16 #include <rte_mbuf.h>
17 #include <rte_hash.h>
18 
19 #include "ipsec.h"
20 #include "esp.h"
21 
22 static inline void
23 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
24 {
25 	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
26 		struct rte_security_ipsec_tunnel_param *tunnel =
27 				&ipsec->tunnel;
28 		if (IS_IP4_TUNNEL(sa->flags)) {
29 			tunnel->type =
30 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
31 			tunnel->ipv4.ttl = IPDEFTTL;
32 
33 			memcpy((uint8_t *)&tunnel->ipv4.src_ip,
34 				(uint8_t *)&sa->src.ip.ip4, 4);
35 
36 			memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
37 				(uint8_t *)&sa->dst.ip.ip4, 4);
38 		} else if (IS_IP6_TUNNEL(sa->flags)) {
39 			tunnel->type =
40 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
41 			tunnel->ipv6.hlimit = IPDEFTTL;
42 			tunnel->ipv6.dscp = 0;
43 			tunnel->ipv6.flabel = 0;
44 			tunnel->ipv6.src_addr = sa->src.ip.ip6;
45 			tunnel->ipv6.dst_addr = sa->dst.ip.ip6;
46 		}
47 		/* TODO support for Transport */
48 	}
49 	ipsec->replay_win_sz = app_sa_prm.window_size;
50 	ipsec->options.esn = app_sa_prm.enable_esn;
51 	ipsec->options.udp_encap = sa->udp_encap;
52 	if (IS_HW_REASSEMBLY_EN(sa->flags))
53 		ipsec->options.ip_reassembly_en = 1;
54 }
55 
56 static inline int
57 verify_crypto_xform(const struct rte_cryptodev_capabilities *capabilities,
58 		struct rte_crypto_sym_xform *crypto_xform)
59 {
60 	const struct rte_cryptodev_capabilities *crypto_cap;
61 	int j = 0;
62 
63 	while ((crypto_cap = &capabilities[j++])->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
64 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
65 				crypto_cap->sym.xform_type == crypto_xform->type) {
66 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
67 					crypto_cap->sym.aead.algo == crypto_xform->aead.algo) {
68 				if (rte_cryptodev_sym_capability_check_aead(&crypto_cap->sym,
69 						crypto_xform->aead.key.length,
70 						crypto_xform->aead.digest_length,
71 						crypto_xform->aead.aad_length,
72 						crypto_xform->aead.iv.length) == 0)
73 					return 0;
74 			}
75 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
76 					crypto_cap->sym.cipher.algo == crypto_xform->cipher.algo) {
77 				if (rte_cryptodev_sym_capability_check_cipher(&crypto_cap->sym,
78 						crypto_xform->cipher.key.length,
79 						crypto_xform->cipher.iv.length) == 0)
80 					return 0;
81 			}
82 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
83 					crypto_cap->sym.auth.algo == crypto_xform->auth.algo) {
84 				if (rte_cryptodev_sym_capability_check_auth(&crypto_cap->sym,
85 						crypto_xform->auth.key.length,
86 						crypto_xform->auth.digest_length,
87 						crypto_xform->auth.iv.length) == 0)
88 					return 0;
89 			}
90 		}
91 	}
92 
93 	return -ENOTSUP;
94 }
95 
96 static inline int
97 verify_crypto_capabilities(const struct rte_cryptodev_capabilities *capabilities,
98 		struct rte_crypto_sym_xform *crypto_xform)
99 {
100 	if (crypto_xform->next != NULL)
101 		return (verify_crypto_xform(capabilities, crypto_xform) ||
102 		    verify_crypto_xform(capabilities, crypto_xform->next));
103 	else
104 		return verify_crypto_xform(capabilities, crypto_xform);
105 }
106 
107 static inline int
108 verify_ipsec_capabilities(struct rte_security_ipsec_xform *ipsec_xform,
109 		const struct rte_security_capability *sec_cap)
110 {
111 	/* Verify security capabilities */
112 
113 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
114 		RTE_LOG(INFO, USER1, "ESN is not supported\n");
115 		return -ENOTSUP;
116 	}
117 
118 	if (ipsec_xform->options.udp_encap == 1 &&
119 	    sec_cap->ipsec.options.udp_encap == 0) {
120 		RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
121 		return -ENOTSUP;
122 	}
123 
124 	if (ipsec_xform->options.udp_ports_verify == 1 &&
125 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
126 		RTE_LOG(DEBUG, USER1,
127 			"UDP encapsulation ports verification is not supported\n");
128 		return -ENOTSUP;
129 	}
130 
131 	if (ipsec_xform->options.copy_dscp == 1 &&
132 	    sec_cap->ipsec.options.copy_dscp == 0) {
133 		RTE_LOG(DEBUG, USER1, "Copy DSCP is not supported\n");
134 		return -ENOTSUP;
135 	}
136 
137 	if (ipsec_xform->options.copy_flabel == 1 &&
138 	    sec_cap->ipsec.options.copy_flabel == 0) {
139 		RTE_LOG(DEBUG, USER1, "Copy Flow Label is not supported\n");
140 		return -ENOTSUP;
141 	}
142 
143 	if (ipsec_xform->options.copy_df == 1 &&
144 	    sec_cap->ipsec.options.copy_df == 0) {
145 		RTE_LOG(DEBUG, USER1, "Copy DP bit is not supported\n");
146 		return -ENOTSUP;
147 	}
148 
149 	if (ipsec_xform->options.dec_ttl == 1 &&
150 	    sec_cap->ipsec.options.dec_ttl == 0) {
151 		RTE_LOG(DEBUG, USER1, "Decrement TTL is not supported\n");
152 		return -ENOTSUP;
153 	}
154 
155 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
156 		RTE_LOG(DEBUG, USER1, "ECN is not supported\n");
157 		return -ENOTSUP;
158 	}
159 
160 	if (ipsec_xform->options.stats == 1 &&
161 	    sec_cap->ipsec.options.stats == 0) {
162 		RTE_LOG(DEBUG, USER1, "Stats is not supported\n");
163 		return -ENOTSUP;
164 	}
165 
166 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
167 	    (ipsec_xform->options.iv_gen_disable == 1) &&
168 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
169 		RTE_LOG(DEBUG, USER1, "Application provided IV is not supported\n");
170 		return -ENOTSUP;
171 	}
172 
173 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
174 	    (ipsec_xform->options.tunnel_hdr_verify >
175 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
176 		RTE_LOG(DEBUG, USER1, "Tunnel header verify is not supported\n");
177 		return -ENOTSUP;
178 	}
179 
180 	if (ipsec_xform->options.ip_csum_enable == 1 &&
181 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
182 		RTE_LOG(DEBUG, USER1, "Inner IP checksum is not supported\n");
183 		return -ENOTSUP;
184 	}
185 
186 	if (ipsec_xform->options.l4_csum_enable == 1 &&
187 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
188 		RTE_LOG(DEBUG, USER1, "Inner L4 checksum is not supported\n");
189 		return -ENOTSUP;
190 	}
191 
192 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
193 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
194 			RTE_LOG(DEBUG, USER1, "Replay window size is not supported\n");
195 			return -ENOTSUP;
196 		}
197 	}
198 
199 	return 0;
200 }
201 
202 
203 static inline int
204 verify_security_capabilities(void *ctx,
205 		struct rte_security_session_conf *sess_conf,
206 		uint32_t *ol_flags)
207 {
208 	struct rte_security_capability_idx sec_cap_idx;
209 	const struct rte_security_capability *sec_cap;
210 
211 	sec_cap_idx.action = sess_conf->action_type;
212 	sec_cap_idx.protocol = sess_conf->protocol;
213 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
214 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
215 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
216 
217 	sec_cap = rte_security_capability_get(ctx, &sec_cap_idx);
218 	if (sec_cap == NULL)
219 		return -ENOTSUP;
220 
221 	if (verify_crypto_capabilities(sec_cap->crypto_capabilities,
222 				sess_conf->crypto_xform))
223 		return -ENOTSUP;
224 
225 	if (verify_ipsec_capabilities(&sess_conf->ipsec, sec_cap))
226 		return -ENOTSUP;
227 
228 	if (ol_flags != NULL)
229 		*ol_flags = sec_cap->ol_flags;
230 
231 	return 0;
232 }
233 
234 int
235 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
236 	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
237 	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
238 {
239 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
240 	enum rte_crypto_op_sess_type sess_type;
241 	struct rte_cryptodev_info cdev_info;
242 	enum rte_crypto_op_type op_type;
243 	unsigned long cdev_id_qp = 0;
244 	struct ipsec_ctx *ipsec_ctx;
245 	struct cdev_key key = { 0 };
246 	void *sess = NULL;
247 	uint32_t lcore_id;
248 	int32_t ret = 0;
249 
250 	RTE_LCORE_FOREACH(lcore_id) {
251 		ipsec_ctx = ipsec_ctx_lcore[lcore_id];
252 
253 		/* Core is not bound to any cryptodev, skip it */
254 		if (ipsec_ctx->cdev_map == NULL)
255 			continue;
256 
257 		/* Looking for cryptodev, which can handle this SA */
258 		key.lcore_id = lcore_id;
259 		key.cipher_algo = (uint8_t)sa->cipher_algo;
260 		key.auth_algo = (uint8_t)sa->auth_algo;
261 		key.aead_algo = (uint8_t)sa->aead_algo;
262 
263 		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
264 				(void **)&cdev_id_qp);
265 		if (ret == -ENOENT)
266 			continue;
267 		if (ret < 0) {
268 			RTE_LOG(ERR, IPSEC,
269 					"No cryptodev: core %u, cipher_algo %u, "
270 					"auth_algo %u, aead_algo %u\n",
271 					key.lcore_id,
272 					key.cipher_algo,
273 					key.auth_algo,
274 					key.aead_algo);
275 			return ret;
276 		}
277 
278 		/* Verify that all cores are using same cryptodev for current
279 		 * algorithm combination, required by SA.
280 		 * Current cryptodev mapping process will map SA to the first
281 		 * cryptodev that matches requirements, so it's a double check,
282 		 * not an additional restriction.
283 		 */
284 		if (cdev_id == RTE_CRYPTO_MAX_DEVS)
285 			cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
286 		else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) {
287 			struct rte_cryptodev_info dev_info_1, dev_info_2;
288 			rte_cryptodev_info_get(cdev_id, &dev_info_1);
289 			rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
290 					&dev_info_2);
291 			if (dev_info_1.driver_id == dev_info_2.driver_id) {
292 				RTE_LOG(WARNING, IPSEC,
293 					"SA mapped to multiple cryptodevs for SPI %d\n",
294 					sa->spi);
295 
296 			} else {
297 				RTE_LOG(WARNING, IPSEC,
298 					"SA mapped to multiple cryptodevs of different types for SPI %d\n",
299 					sa->spi);
300 
301 			}
302 		}
303 
304 		/* Store per core queue pair information */
305 		sa->cqp[lcore_id] = &ipsec_ctx->tbl[cdev_id_qp];
306 	}
307 	if (cdev_id == RTE_CRYPTO_MAX_DEVS) {
308 		RTE_LOG(WARNING, IPSEC, "No cores found to handle SA\n");
309 		return 0;
310 	}
311 
312 	RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
313 			"%u\n", sa->spi, cdev_id);
314 
315 	if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
316 		ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
317 		struct rte_security_session_conf sess_conf = {
318 			.action_type = ips->type,
319 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
320 			{.ipsec = {
321 				.spi = sa->spi,
322 				.salt = sa->salt,
323 				.options = { 0 },
324 				.replay_win_sz = 0,
325 				.direction = sa->direction,
326 				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
327 				.mode = (IS_TUNNEL(sa->flags)) ?
328 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
329 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
330 			} },
331 			.crypto_xform = sa->xforms,
332 			.userdata = NULL,
333 
334 		};
335 
336 		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
337 			void *ctx = rte_cryptodev_get_sec_ctx(cdev_id);
338 
339 			/* Set IPsec parameters in conf */
340 			set_ipsec_conf(sa, &(sess_conf.ipsec));
341 
342 			if (verify_security_capabilities(ctx, &sess_conf, NULL)) {
343 				RTE_LOG(ERR, IPSEC,
344 					"Requested security session config not supported\n");
345 				return -1;
346 			}
347 
348 			ips->security.ses = rte_security_session_create(ctx,
349 					&sess_conf, skt_ctx->session_pool);
350 			if (ips->security.ses == NULL) {
351 				RTE_LOG(ERR, IPSEC,
352 				"SEC Session init failed: err: %d\n", ret);
353 				return -1;
354 			}
355 			ips->security.ctx = ctx;
356 
357 			sess = ips->security.ses;
358 			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
359 			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
360 		} else {
361 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
362 			return -1;
363 		}
364 	} else {
365 		struct rte_cryptodev_info info;
366 
367 		rte_cryptodev_info_get(cdev_id, &info);
368 
369 		if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
370 			if (!(info.feature_flags &
371 				RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
372 				return -ENOTSUP;
373 
374 		}
375 
376 		if (verify_crypto_capabilities(info.capabilities, sa->xforms)) {
377 			RTE_LOG(ERR, IPSEC,
378 				"Requested crypto session config not supported\n");
379 			return -1;
380 		}
381 
382 		ips->crypto.dev_id = cdev_id;
383 		ips->crypto.ses = rte_cryptodev_sym_session_create(cdev_id,
384 				sa->xforms, skt_ctx->session_pool);
385 
386 		rte_cryptodev_info_get(cdev_id, &cdev_info);
387 	}
388 
389 	/* Setup meta data required by event crypto adapter */
390 	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
391 		union rte_event_crypto_metadata m_data;
392 		const struct eventdev_params *eventdev_conf;
393 
394 		eventdev_conf = &(em_conf->eventdev_config[0]);
395 		memset(&m_data, 0, sizeof(m_data));
396 
397 		/* Fill in response information */
398 		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
399 		m_data.response_info.op = RTE_EVENT_OP_NEW;
400 		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
401 
402 		/* Fill in request information */
403 		m_data.request_info.cdev_id = cdev_id;
404 		m_data.request_info.queue_pair_id = 0;
405 
406 		/* Attach meta info to session */
407 		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
408 				sess_type, &m_data, sizeof(m_data));
409 	}
410 
411 	return 0;
412 }
413 
414 int
415 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
416 		struct rte_ipsec_session *ips)
417 {
418 	int32_t ret = 0;
419 	void *sec_ctx;
420 	struct rte_security_session_conf sess_conf = {
421 		.action_type = ips->type,
422 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
423 		{.ipsec = {
424 			.spi = sa->spi,
425 			.salt = sa->salt,
426 			.options = { 0 },
427 			.replay_win_sz = 0,
428 			.direction = sa->direction,
429 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP
430 		} },
431 		.crypto_xform = sa->xforms,
432 		.userdata = NULL,
433 	};
434 
435 	if (IS_TRANSPORT(sa->flags)) {
436 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
437 		if (IS_IP4(sa->flags)) {
438 			sess_conf.ipsec.tunnel.type =
439 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
440 
441 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
442 				sa->src.ip.ip4;
443 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
444 				sa->dst.ip.ip4;
445 		} else if (IS_IP6(sa->flags)) {
446 			sess_conf.ipsec.tunnel.type =
447 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
448 
449 			sess_conf.ipsec.tunnel.ipv6.src_addr = sa->src.ip.ip6;
450 			sess_conf.ipsec.tunnel.ipv6.dst_addr = sa->dst.ip.ip6;
451 		}
452 	} else if (IS_TUNNEL(sa->flags)) {
453 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
454 
455 		if (IS_IP4(sa->flags)) {
456 			sess_conf.ipsec.tunnel.type =
457 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
458 
459 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
460 				sa->src.ip.ip4;
461 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
462 				sa->dst.ip.ip4;
463 		} else if (IS_IP6(sa->flags)) {
464 			sess_conf.ipsec.tunnel.type =
465 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
466 
467 			sess_conf.ipsec.tunnel.ipv6.src_addr = sa->src.ip.ip6;
468 			sess_conf.ipsec.tunnel.ipv6.dst_addr = sa->dst.ip.ip6;
469 		} else {
470 			RTE_LOG(ERR, IPSEC, "invalid tunnel type\n");
471 			return -1;
472 		}
473 	}
474 
475 	if (sa->udp_encap) {
476 		sess_conf.ipsec.options.udp_encap = 1;
477 		sess_conf.ipsec.udp.sport = htons(sa->udp.sport);
478 		sess_conf.ipsec.udp.dport = htons(sa->udp.dport);
479 	}
480 
481 	if (sa->esn > 0) {
482 		sess_conf.ipsec.options.esn = 1;
483 		sess_conf.ipsec.esn.value = sa->esn;
484 	}
485 
486 
487 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
488 		sa->spi, sa->portid);
489 
490 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
491 		struct rte_flow_error err;
492 		int ret = 0;
493 
494 		sec_ctx = rte_eth_dev_get_sec_ctx(sa->portid);
495 		if (sec_ctx == NULL) {
496 			RTE_LOG(ERR, IPSEC,
497 				" rte_eth_dev_get_sec_ctx failed\n");
498 			return -1;
499 		}
500 
501 		if (verify_security_capabilities(sec_ctx, &sess_conf,
502 					&ips->security.ol_flags)) {
503 			RTE_LOG(ERR, IPSEC,
504 				"Requested security session config not supported\n");
505 			return -1;
506 		}
507 
508 		ips->security.ses = rte_security_session_create(sec_ctx,
509 				&sess_conf, skt_ctx->session_pool);
510 		if (ips->security.ses == NULL) {
511 			RTE_LOG(ERR, IPSEC,
512 				"SEC Session init failed: err: %d\n", ret);
513 			return -1;
514 		}
515 
516 		ips->security.ctx = sec_ctx;
517 		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
518 
519 		if (IS_IP6(sa->flags)) {
520 			sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
521 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
522 			sa->pattern[1].spec = &sa->ipv6_spec;
523 			sa->ipv6_spec.hdr.dst_addr = sa->dst.ip.ip6;
524 			sa->ipv6_spec.hdr.src_addr = sa->src.ip.ip6;
525 		} else if (IS_IP4(sa->flags)) {
526 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
527 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
528 			sa->pattern[1].spec = &sa->ipv4_spec;
529 
530 			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
531 			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
532 		}
533 
534 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
535 
536 		if (sa->udp_encap) {
537 
538 			sa->udp_spec.hdr.dst_port =
539 					rte_cpu_to_be_16(sa->udp.dport);
540 			sa->udp_spec.hdr.src_port =
541 					rte_cpu_to_be_16(sa->udp.sport);
542 
543 			sa->pattern[2].mask = &rte_flow_item_udp_mask;
544 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
545 			sa->pattern[2].spec = &sa->udp_spec;
546 
547 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP;
548 			sa->pattern[3].spec = &sa->esp_spec;
549 			sa->pattern[3].mask = &rte_flow_item_esp_mask;
550 
551 			sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END;
552 		} else {
553 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
554 			sa->pattern[2].spec = &sa->esp_spec;
555 			sa->pattern[2].mask = &rte_flow_item_esp_mask;
556 
557 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
558 		}
559 
560 		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
561 		sa->action[0].conf = ips->security.ses;
562 
563 		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
564 
565 		sa->attr.egress = (sa->direction ==
566 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
567 		sa->attr.ingress = (sa->direction ==
568 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
569 		if (sa->attr.ingress) {
570 			uint8_t rss_key[64];
571 			struct rte_eth_rss_conf rss_conf = {
572 				.rss_key = rss_key,
573 				.rss_key_len = sizeof(rss_key),
574 			};
575 			struct rte_eth_dev_info dev_info;
576 			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
577 			struct rte_flow_action_rss action_rss;
578 			unsigned int i;
579 			unsigned int j;
580 
581 			/* Don't create flow if default flow is created */
582 			if (flow_info_tbl[sa->portid].rx_def_flow)
583 				return 0;
584 
585 			ret = rte_eth_dev_info_get(sa->portid, &dev_info);
586 			if (ret != 0) {
587 				RTE_LOG(ERR, IPSEC,
588 					"Error during getting device (port %u) info: %s\n",
589 					sa->portid, strerror(-ret));
590 				return ret;
591 			}
592 
593 			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
594 			/* Try RSS. */
595 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
596 			sa->action[1].conf = &action_rss;
597 			ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
598 					&rss_conf);
599 			if (ret != 0) {
600 				RTE_LOG(ERR, IPSEC,
601 					"rte_eth_dev_rss_hash_conf_get:ret=%d\n",
602 					ret);
603 				return -1;
604 			}
605 			for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
606 				queue[j++] = i;
607 
608 			action_rss = (struct rte_flow_action_rss){
609 					.types = rss_conf.rss_hf,
610 					.key_len = rss_conf.rss_key_len,
611 					.queue_num = j,
612 					.key = rss_key,
613 					.queue = queue,
614 			};
615 			ret = rte_flow_validate(sa->portid, &sa->attr,
616 						sa->pattern, sa->action,
617 						&err);
618 			if (!ret)
619 				goto flow_create;
620 			/* Try Queue. */
621 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
622 			sa->action[1].conf =
623 				&(struct rte_flow_action_queue){
624 				.index = 0,
625 			};
626 			ret = rte_flow_validate(sa->portid, &sa->attr,
627 						sa->pattern, sa->action,
628 						&err);
629 			/* Try End. */
630 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
631 			sa->action[1].conf = NULL;
632 			ret = rte_flow_validate(sa->portid, &sa->attr,
633 						sa->pattern, sa->action,
634 						&err);
635 			if (ret)
636 				goto flow_create_failure;
637 		} else if (sa->attr.egress &&
638 				(ips->security.ol_flags &
639 					RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
640 			sa->action[1].type =
641 					RTE_FLOW_ACTION_TYPE_PASSTHRU;
642 			sa->action[2].type =
643 					RTE_FLOW_ACTION_TYPE_END;
644 		}
645 flow_create:
646 		sa->flow = rte_flow_create(sa->portid,
647 				&sa->attr, sa->pattern, sa->action, &err);
648 		if (sa->flow == NULL) {
649 flow_create_failure:
650 			RTE_LOG(ERR, IPSEC,
651 				"Failed to create ipsec flow msg: %s\n",
652 				err.message);
653 			return -1;
654 		}
655 	} else if (ips->type ==	RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
656 		sec_ctx = rte_eth_dev_get_sec_ctx(sa->portid);
657 
658 		if (sec_ctx == NULL) {
659 			RTE_LOG(ERR, IPSEC,
660 				"Ethernet device doesn't have security features registered\n");
661 			return -1;
662 		}
663 
664 		/* Set IPsec parameters in conf */
665 		set_ipsec_conf(sa, &(sess_conf.ipsec));
666 
667 		/* Save SA as userdata for the security session. When
668 		 * the packet is received, this userdata will be
669 		 * retrieved using the metadata from the packet.
670 		 *
671 		 * The PMD is expected to set similar metadata for other
672 		 * operations, like rte_eth_event, which are tied to
673 		 * security session. In such cases, the userdata could
674 		 * be obtained to uniquely identify the security
675 		 * parameters denoted.
676 		 */
677 
678 		sess_conf.userdata = (void *) sa;
679 
680 		if (verify_security_capabilities(sec_ctx, &sess_conf,
681 					&ips->security.ol_flags)) {
682 			RTE_LOG(ERR, IPSEC,
683 				"Requested security session config not supported\n");
684 			return -1;
685 		}
686 
687 		ips->security.ses = rte_security_session_create(sec_ctx,
688 					&sess_conf, skt_ctx->session_pool);
689 		if (ips->security.ses == NULL) {
690 			RTE_LOG(ERR, IPSEC,
691 				"SEC Session init failed: err: %d\n", ret);
692 			return -1;
693 		}
694 
695 		ips->security.ctx = sec_ctx;
696 	}
697 
698 	return 0;
699 }
700 
701 int
702 create_ipsec_esp_flow(struct ipsec_sa *sa)
703 {
704 	int ret = 0;
705 	struct rte_flow_error err = {};
706 	if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
707 		RTE_LOG(ERR, IPSEC,
708 			"No Flow director rule for Egress traffic\n");
709 		return -1;
710 	}
711 	if (sa->flags == TRANSPORT) {
712 		RTE_LOG(ERR, IPSEC,
713 			"No Flow director rule for transport mode\n");
714 		return -1;
715 	}
716 	sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
717 	sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
718 	sa->action[0].conf = &(struct rte_flow_action_queue) {
719 				.index = sa->fdir_qid,
720 	};
721 	sa->attr.egress = 0;
722 	sa->attr.ingress = 1;
723 	if (IS_IP6(sa->flags)) {
724 		sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
725 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
726 		sa->pattern[1].spec = &sa->ipv6_spec;
727 		sa->ipv6_spec.hdr.dst_addr = sa->dst.ip.ip6;
728 		sa->ipv6_spec.hdr.src_addr = sa->src.ip.ip6;
729 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
730 		sa->pattern[2].spec = &sa->esp_spec;
731 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
732 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
733 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
734 	} else if (IS_IP4(sa->flags)) {
735 		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
736 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
737 		sa->pattern[1].spec = &sa->ipv4_spec;
738 		sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
739 		sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
740 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
741 		sa->pattern[2].spec = &sa->esp_spec;
742 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
743 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
744 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
745 	}
746 	sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
747 
748 	ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
749 				&err);
750 	if (ret < 0) {
751 		RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
752 		return ret;
753 	}
754 
755 	sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
756 					sa->action, &err);
757 	if (!sa->flow) {
758 		RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
759 		return -1;
760 	}
761 
762 	return 0;
763 }
764 
765 /*
766  * queue crypto-ops into PMD queue.
767  */
768 void
769 enqueue_cop_burst(struct cdev_qp *cqp)
770 {
771 	uint32_t i, len, ret;
772 
773 	len = cqp->len;
774 	ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
775 	if (ret < len) {
776 		RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
777 			" enqueued %u crypto ops out of %u\n",
778 			cqp->id, cqp->qp, ret, len);
779 			/* drop packets that we fail to enqueue */
780 			for (i = ret; i < len; i++)
781 				free_pkts(&cqp->buf[i]->sym->m_src, 1);
782 	}
783 	cqp->in_flight += ret;
784 	cqp->len = 0;
785 }
786 
787 static inline void
788 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
789 {
790 	cqp->buf[cqp->len++] = cop;
791 
792 	if (cqp->len == MAX_PKT_BURST)
793 		enqueue_cop_burst(cqp);
794 }
795 
796 static inline void
797 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
798 		struct rte_mbuf *pkts[], void *sas[],
799 		uint16_t nb_pkts)
800 {
801 	int32_t ret = 0, i;
802 	struct ipsec_mbuf_metadata *priv;
803 	struct rte_crypto_sym_op *sym_cop;
804 	struct ipsec_sa *sa;
805 	struct rte_ipsec_session *ips;
806 
807 	for (i = 0; i < nb_pkts; i++) {
808 		if (unlikely(sas[i] == NULL)) {
809 			free_pkts(&pkts[i], 1);
810 			continue;
811 		}
812 
813 		rte_prefetch0(sas[i]);
814 		rte_prefetch0(pkts[i]);
815 
816 		priv = get_priv(pkts[i]);
817 		sa = ipsec_mask_saptr(sas[i]);
818 		priv->sa = sa;
819 		ips = ipsec_get_primary_session(sa);
820 
821 		switch (ips->type) {
822 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
823 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
824 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
825 
826 			rte_prefetch0(&priv->sym_cop);
827 
828 			if (unlikely(ips->security.ses == NULL)) {
829 				free_pkts(&pkts[i], 1);
830 				continue;
831 			}
832 
833 			if (unlikely((pkts[i]->packet_type &
834 					(RTE_PTYPE_TUNNEL_MASK |
835 					RTE_PTYPE_L4_MASK)) ==
836 					MBUF_PTYPE_TUNNEL_ESP_IN_UDP &&
837 					sa->udp_encap != 1)) {
838 				free_pkts(&pkts[i], 1);
839 				continue;
840 			}
841 
842 			sym_cop = get_sym_cop(&priv->cop);
843 			sym_cop->m_src = pkts[i];
844 
845 			rte_security_attach_session(&priv->cop,
846 				ips->security.ses);
847 			break;
848 
849 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
850 			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
851 					" legacy mode.");
852 			free_pkts(&pkts[i], 1);
853 			continue;
854 
855 		case RTE_SECURITY_ACTION_TYPE_NONE:
856 
857 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
858 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
859 
860 			rte_prefetch0(&priv->sym_cop);
861 
862 			if (unlikely(ips->crypto.ses == NULL)) {
863 				free_pkts(&pkts[i], 1);
864 				continue;
865 			}
866 
867 			rte_crypto_op_attach_sym_session(&priv->cop,
868 					ips->crypto.ses);
869 
870 			ret = xform_func(pkts[i], sa, &priv->cop);
871 			if (unlikely(ret)) {
872 				free_pkts(&pkts[i], 1);
873 				continue;
874 			}
875 			break;
876 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
877 			RTE_ASSERT(ips->security.ses != NULL);
878 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
879 			if (ips->security.ol_flags &
880 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
881 				rte_security_set_pkt_metadata(
882 					ips->security.ctx, ips->security.ses,
883 					pkts[i], NULL);
884 			continue;
885 		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
886 			RTE_ASSERT(ips->security.ses != NULL);
887 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
888 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
889 
890 			rte_prefetch0(&priv->sym_cop);
891 			rte_security_attach_session(&priv->cop,
892 					ips->security.ses);
893 
894 			ret = xform_func(pkts[i], sa, &priv->cop);
895 			if (unlikely(ret)) {
896 				free_pkts(&pkts[i], 1);
897 				continue;
898 			}
899 
900 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
901 			if (ips->security.ol_flags &
902 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
903 				rte_security_set_pkt_metadata(
904 					ips->security.ctx, ips->security.ses,
905 					pkts[i], NULL);
906 			continue;
907 		}
908 
909 		RTE_ASSERT(sa->cqp[ipsec_ctx->lcore_id] != NULL);
910 		enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop);
911 	}
912 }
913 
914 static inline int32_t
915 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
916 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
917 {
918 	int32_t nb_pkts, ret;
919 	struct ipsec_mbuf_metadata *priv;
920 	struct ipsec_sa *sa;
921 	struct rte_mbuf *pkt;
922 
923 	nb_pkts = 0;
924 	while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
925 		pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
926 		rte_prefetch0(pkt);
927 		priv = get_priv(pkt);
928 		sa = priv->sa;
929 		ret = xform_func(pkt, sa, &priv->cop);
930 		if (unlikely(ret)) {
931 			free_pkts(&pkt, 1);
932 			continue;
933 		}
934 		pkts[nb_pkts++] = pkt;
935 	}
936 
937 	return nb_pkts;
938 }
939 
940 static inline int
941 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
942 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
943 {
944 	int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
945 	struct ipsec_mbuf_metadata *priv;
946 	struct rte_crypto_op *cops[max_pkts];
947 	struct ipsec_sa *sa;
948 	struct rte_mbuf *pkt;
949 
950 	for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
951 		struct cdev_qp *cqp;
952 
953 		cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
954 		if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
955 			ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
956 
957 		if (cqp->in_flight == 0)
958 			continue;
959 
960 		nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
961 				cops, max_pkts - nb_pkts);
962 
963 		cqp->in_flight -= nb_cops;
964 
965 		for (j = 0; j < nb_cops; j++) {
966 			pkt = cops[j]->sym->m_src;
967 			rte_prefetch0(pkt);
968 
969 			priv = get_priv(pkt);
970 			sa = priv->sa;
971 
972 			RTE_ASSERT(sa != NULL);
973 
974 			if (ipsec_get_action_type(sa) ==
975 				RTE_SECURITY_ACTION_TYPE_NONE) {
976 				ret = xform_func(pkt, sa, cops[j]);
977 				if (unlikely(ret)) {
978 					free_pkts(&pkt, 1);
979 					continue;
980 				}
981 			} else if (ipsec_get_action_type(sa) ==
982 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
983 				if (cops[j]->status) {
984 					free_pkts(&pkt, 1);
985 					continue;
986 				}
987 			}
988 			pkts[nb_pkts++] = pkt;
989 		}
990 	}
991 
992 	/* return packets */
993 	return nb_pkts;
994 }
995 
996 uint16_t
997 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
998 		uint16_t nb_pkts, uint16_t len)
999 {
1000 	void *sas[nb_pkts];
1001 
1002 	inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
1003 
1004 	ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
1005 
1006 	return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
1007 }
1008 
1009 uint16_t
1010 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1011 		uint16_t len)
1012 {
1013 	return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
1014 }
1015 
1016 uint16_t
1017 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1018 		uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
1019 {
1020 	void *sas[nb_pkts];
1021 
1022 	outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
1023 
1024 	ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
1025 
1026 	return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
1027 }
1028 
1029 uint16_t
1030 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1031 		uint16_t len)
1032 {
1033 	return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
1034 }
1035