xref: /dpdk/examples/ipsec-secgw/ipsec.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_event_crypto_adapter.h>
10 #include <rte_log.h>
11 #include <rte_crypto.h>
12 #include <rte_security.h>
13 #include <rte_cryptodev.h>
14 #include <rte_ipsec.h>
15 #include <rte_ethdev.h>
16 #include <rte_mbuf.h>
17 #include <rte_hash.h>
18 
19 #include "ipsec.h"
20 #include "esp.h"
21 
22 static inline void
23 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
24 {
25 	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
26 		struct rte_security_ipsec_tunnel_param *tunnel =
27 				&ipsec->tunnel;
28 		if (IS_IP4_TUNNEL(sa->flags)) {
29 			tunnel->type =
30 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
31 			tunnel->ipv4.ttl = IPDEFTTL;
32 
33 			memcpy((uint8_t *)&tunnel->ipv4.src_ip,
34 				(uint8_t *)&sa->src.ip.ip4, 4);
35 
36 			memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
37 				(uint8_t *)&sa->dst.ip.ip4, 4);
38 		} else if (IS_IP6_TUNNEL(sa->flags)) {
39 			tunnel->type =
40 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
41 			tunnel->ipv6.hlimit = IPDEFTTL;
42 			tunnel->ipv6.dscp = 0;
43 			tunnel->ipv6.flabel = 0;
44 
45 			memcpy((uint8_t *)&tunnel->ipv6.src_addr,
46 				(uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
47 
48 			memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
49 				(uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
50 		}
51 		/* TODO support for Transport */
52 	}
53 	ipsec->replay_win_sz = app_sa_prm.window_size;
54 	ipsec->options.esn = app_sa_prm.enable_esn;
55 	ipsec->options.udp_encap = sa->udp_encap;
56 	if (IS_HW_REASSEMBLY_EN(sa->flags))
57 		ipsec->options.ip_reassembly_en = 1;
58 }
59 
60 static inline int
61 verify_crypto_xform(const struct rte_cryptodev_capabilities *capabilities,
62 		struct rte_crypto_sym_xform *crypto_xform)
63 {
64 	const struct rte_cryptodev_capabilities *crypto_cap;
65 	int j = 0;
66 
67 	while ((crypto_cap = &capabilities[j++])->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
68 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
69 				crypto_cap->sym.xform_type == crypto_xform->type) {
70 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
71 					crypto_cap->sym.aead.algo == crypto_xform->aead.algo) {
72 				if (rte_cryptodev_sym_capability_check_aead(&crypto_cap->sym,
73 						crypto_xform->aead.key.length,
74 						crypto_xform->aead.digest_length,
75 						crypto_xform->aead.aad_length,
76 						crypto_xform->aead.iv.length) == 0)
77 					return 0;
78 			}
79 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
80 					crypto_cap->sym.cipher.algo == crypto_xform->cipher.algo) {
81 				if (rte_cryptodev_sym_capability_check_cipher(&crypto_cap->sym,
82 						crypto_xform->cipher.key.length,
83 						crypto_xform->cipher.iv.length) == 0)
84 					return 0;
85 			}
86 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
87 					crypto_cap->sym.auth.algo == crypto_xform->auth.algo) {
88 				if (rte_cryptodev_sym_capability_check_auth(&crypto_cap->sym,
89 						crypto_xform->auth.key.length,
90 						crypto_xform->auth.digest_length,
91 						crypto_xform->auth.iv.length) == 0)
92 					return 0;
93 			}
94 		}
95 	}
96 
97 	return -ENOTSUP;
98 }
99 
100 static inline int
101 verify_crypto_capabilities(const struct rte_cryptodev_capabilities *capabilities,
102 		struct rte_crypto_sym_xform *crypto_xform)
103 {
104 	if (crypto_xform->next != NULL)
105 		return (verify_crypto_xform(capabilities, crypto_xform) ||
106 		    verify_crypto_xform(capabilities, crypto_xform->next));
107 	else
108 		return verify_crypto_xform(capabilities, crypto_xform);
109 }
110 
111 static inline int
112 verify_ipsec_capabilities(struct rte_security_ipsec_xform *ipsec_xform,
113 		const struct rte_security_capability *sec_cap)
114 {
115 	/* Verify security capabilities */
116 
117 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
118 		RTE_LOG(INFO, USER1, "ESN is not supported\n");
119 		return -ENOTSUP;
120 	}
121 
122 	if (ipsec_xform->options.udp_encap == 1 &&
123 	    sec_cap->ipsec.options.udp_encap == 0) {
124 		RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
125 		return -ENOTSUP;
126 	}
127 
128 	if (ipsec_xform->options.udp_ports_verify == 1 &&
129 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
130 		RTE_LOG(DEBUG, USER1,
131 			"UDP encapsulation ports verification is not supported\n");
132 		return -ENOTSUP;
133 	}
134 
135 	if (ipsec_xform->options.copy_dscp == 1 &&
136 	    sec_cap->ipsec.options.copy_dscp == 0) {
137 		RTE_LOG(DEBUG, USER1, "Copy DSCP is not supported\n");
138 		return -ENOTSUP;
139 	}
140 
141 	if (ipsec_xform->options.copy_flabel == 1 &&
142 	    sec_cap->ipsec.options.copy_flabel == 0) {
143 		RTE_LOG(DEBUG, USER1, "Copy Flow Label is not supported\n");
144 		return -ENOTSUP;
145 	}
146 
147 	if (ipsec_xform->options.copy_df == 1 &&
148 	    sec_cap->ipsec.options.copy_df == 0) {
149 		RTE_LOG(DEBUG, USER1, "Copy DP bit is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.dec_ttl == 1 &&
154 	    sec_cap->ipsec.options.dec_ttl == 0) {
155 		RTE_LOG(DEBUG, USER1, "Decrement TTL is not supported\n");
156 		return -ENOTSUP;
157 	}
158 
159 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
160 		RTE_LOG(DEBUG, USER1, "ECN is not supported\n");
161 		return -ENOTSUP;
162 	}
163 
164 	if (ipsec_xform->options.stats == 1 &&
165 	    sec_cap->ipsec.options.stats == 0) {
166 		RTE_LOG(DEBUG, USER1, "Stats is not supported\n");
167 		return -ENOTSUP;
168 	}
169 
170 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
171 	    (ipsec_xform->options.iv_gen_disable == 1) &&
172 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
173 		RTE_LOG(DEBUG, USER1, "Application provided IV is not supported\n");
174 		return -ENOTSUP;
175 	}
176 
177 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
178 	    (ipsec_xform->options.tunnel_hdr_verify >
179 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
180 		RTE_LOG(DEBUG, USER1, "Tunnel header verify is not supported\n");
181 		return -ENOTSUP;
182 	}
183 
184 	if (ipsec_xform->options.ip_csum_enable == 1 &&
185 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
186 		RTE_LOG(DEBUG, USER1, "Inner IP checksum is not supported\n");
187 		return -ENOTSUP;
188 	}
189 
190 	if (ipsec_xform->options.l4_csum_enable == 1 &&
191 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
192 		RTE_LOG(DEBUG, USER1, "Inner L4 checksum is not supported\n");
193 		return -ENOTSUP;
194 	}
195 
196 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
197 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
198 			RTE_LOG(DEBUG, USER1, "Replay window size is not supported\n");
199 			return -ENOTSUP;
200 		}
201 	}
202 
203 	return 0;
204 }
205 
206 
207 static inline int
208 verify_security_capabilities(void *ctx,
209 		struct rte_security_session_conf *sess_conf,
210 		uint32_t *ol_flags)
211 {
212 	struct rte_security_capability_idx sec_cap_idx;
213 	const struct rte_security_capability *sec_cap;
214 
215 	sec_cap_idx.action = sess_conf->action_type;
216 	sec_cap_idx.protocol = sess_conf->protocol;
217 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
218 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
219 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
220 
221 	sec_cap = rte_security_capability_get(ctx, &sec_cap_idx);
222 	if (sec_cap == NULL)
223 		return -ENOTSUP;
224 
225 	if (verify_crypto_capabilities(sec_cap->crypto_capabilities,
226 				sess_conf->crypto_xform))
227 		return -ENOTSUP;
228 
229 	if (verify_ipsec_capabilities(&sess_conf->ipsec, sec_cap))
230 		return -ENOTSUP;
231 
232 	if (ol_flags != NULL)
233 		*ol_flags = sec_cap->ol_flags;
234 
235 	return 0;
236 }
237 
238 int
239 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
240 	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
241 	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
242 {
243 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
244 	enum rte_crypto_op_sess_type sess_type;
245 	struct rte_cryptodev_info cdev_info;
246 	enum rte_crypto_op_type op_type;
247 	unsigned long cdev_id_qp = 0;
248 	struct ipsec_ctx *ipsec_ctx;
249 	struct cdev_key key = { 0 };
250 	void *sess = NULL;
251 	uint32_t lcore_id;
252 	int32_t ret = 0;
253 
254 	RTE_LCORE_FOREACH(lcore_id) {
255 		ipsec_ctx = ipsec_ctx_lcore[lcore_id];
256 
257 		/* Core is not bound to any cryptodev, skip it */
258 		if (ipsec_ctx->cdev_map == NULL)
259 			continue;
260 
261 		/* Looking for cryptodev, which can handle this SA */
262 		key.lcore_id = lcore_id;
263 		key.cipher_algo = (uint8_t)sa->cipher_algo;
264 		key.auth_algo = (uint8_t)sa->auth_algo;
265 		key.aead_algo = (uint8_t)sa->aead_algo;
266 
267 		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
268 				(void **)&cdev_id_qp);
269 		if (ret == -ENOENT)
270 			continue;
271 		if (ret < 0) {
272 			RTE_LOG(ERR, IPSEC,
273 					"No cryptodev: core %u, cipher_algo %u, "
274 					"auth_algo %u, aead_algo %u\n",
275 					key.lcore_id,
276 					key.cipher_algo,
277 					key.auth_algo,
278 					key.aead_algo);
279 			return ret;
280 		}
281 
282 		/* Verify that all cores are using same cryptodev for current
283 		 * algorithm combination, required by SA.
284 		 * Current cryptodev mapping process will map SA to the first
285 		 * cryptodev that matches requirements, so it's a double check,
286 		 * not an additional restriction.
287 		 */
288 		if (cdev_id == RTE_CRYPTO_MAX_DEVS)
289 			cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
290 		else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) {
291 			struct rte_cryptodev_info dev_info_1, dev_info_2;
292 			rte_cryptodev_info_get(cdev_id, &dev_info_1);
293 			rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
294 					&dev_info_2);
295 			if (dev_info_1.driver_id == dev_info_2.driver_id) {
296 				RTE_LOG(WARNING, IPSEC,
297 					"SA mapped to multiple cryptodevs for SPI %d\n",
298 					sa->spi);
299 
300 			} else {
301 				RTE_LOG(WARNING, IPSEC,
302 					"SA mapped to multiple cryptodevs of different types for SPI %d\n",
303 					sa->spi);
304 
305 			}
306 		}
307 
308 		/* Store per core queue pair information */
309 		sa->cqp[lcore_id] = &ipsec_ctx->tbl[cdev_id_qp];
310 	}
311 	if (cdev_id == RTE_CRYPTO_MAX_DEVS) {
312 		RTE_LOG(WARNING, IPSEC, "No cores found to handle SA\n");
313 		return 0;
314 	}
315 
316 	RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
317 			"%u\n", sa->spi, cdev_id);
318 
319 	if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
320 		ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
321 		struct rte_security_session_conf sess_conf = {
322 			.action_type = ips->type,
323 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
324 			{.ipsec = {
325 				.spi = sa->spi,
326 				.salt = sa->salt,
327 				.options = { 0 },
328 				.replay_win_sz = 0,
329 				.direction = sa->direction,
330 				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
331 				.mode = (IS_TUNNEL(sa->flags)) ?
332 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
333 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
334 			} },
335 			.crypto_xform = sa->xforms,
336 			.userdata = NULL,
337 
338 		};
339 
340 		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
341 			void *ctx = rte_cryptodev_get_sec_ctx(cdev_id);
342 
343 			/* Set IPsec parameters in conf */
344 			set_ipsec_conf(sa, &(sess_conf.ipsec));
345 
346 			if (verify_security_capabilities(ctx, &sess_conf, NULL)) {
347 				RTE_LOG(ERR, IPSEC,
348 					"Requested security session config not supported\n");
349 				return -1;
350 			}
351 
352 			ips->security.ses = rte_security_session_create(ctx,
353 					&sess_conf, skt_ctx->session_pool);
354 			if (ips->security.ses == NULL) {
355 				RTE_LOG(ERR, IPSEC,
356 				"SEC Session init failed: err: %d\n", ret);
357 				return -1;
358 			}
359 			ips->security.ctx = ctx;
360 
361 			sess = ips->security.ses;
362 			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
363 			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
364 		} else {
365 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
366 			return -1;
367 		}
368 	} else {
369 		struct rte_cryptodev_info info;
370 
371 		rte_cryptodev_info_get(cdev_id, &info);
372 
373 		if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
374 			if (!(info.feature_flags &
375 				RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
376 				return -ENOTSUP;
377 
378 		}
379 
380 		if (verify_crypto_capabilities(info.capabilities, sa->xforms)) {
381 			RTE_LOG(ERR, IPSEC,
382 				"Requested crypto session config not supported\n");
383 			return -1;
384 		}
385 
386 		ips->crypto.dev_id = cdev_id;
387 		ips->crypto.ses = rte_cryptodev_sym_session_create(cdev_id,
388 				sa->xforms, skt_ctx->session_pool);
389 
390 		rte_cryptodev_info_get(cdev_id, &cdev_info);
391 	}
392 
393 	/* Setup meta data required by event crypto adapter */
394 	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
395 		union rte_event_crypto_metadata m_data;
396 		const struct eventdev_params *eventdev_conf;
397 
398 		eventdev_conf = &(em_conf->eventdev_config[0]);
399 		memset(&m_data, 0, sizeof(m_data));
400 
401 		/* Fill in response information */
402 		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
403 		m_data.response_info.op = RTE_EVENT_OP_NEW;
404 		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
405 
406 		/* Fill in request information */
407 		m_data.request_info.cdev_id = cdev_id;
408 		m_data.request_info.queue_pair_id = 0;
409 
410 		/* Attach meta info to session */
411 		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
412 				sess_type, &m_data, sizeof(m_data));
413 	}
414 
415 	return 0;
416 }
417 
418 int
419 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
420 		struct rte_ipsec_session *ips)
421 {
422 	int32_t ret = 0;
423 	void *sec_ctx;
424 	struct rte_security_session_conf sess_conf = {
425 		.action_type = ips->type,
426 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
427 		{.ipsec = {
428 			.spi = sa->spi,
429 			.salt = sa->salt,
430 			.options = { 0 },
431 			.replay_win_sz = 0,
432 			.direction = sa->direction,
433 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP
434 		} },
435 		.crypto_xform = sa->xforms,
436 		.userdata = NULL,
437 	};
438 
439 	if (IS_TRANSPORT(sa->flags)) {
440 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
441 		if (IS_IP4(sa->flags)) {
442 			sess_conf.ipsec.tunnel.type =
443 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
444 
445 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
446 				sa->src.ip.ip4;
447 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
448 				sa->dst.ip.ip4;
449 		} else if (IS_IP6(sa->flags)) {
450 			sess_conf.ipsec.tunnel.type =
451 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
452 
453 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
454 				sa->src.ip.ip6.ip6_b, 16);
455 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
456 				sa->dst.ip.ip6.ip6_b, 16);
457 		}
458 	} else if (IS_TUNNEL(sa->flags)) {
459 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
460 
461 		if (IS_IP4(sa->flags)) {
462 			sess_conf.ipsec.tunnel.type =
463 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
464 
465 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
466 				sa->src.ip.ip4;
467 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
468 				sa->dst.ip.ip4;
469 		} else if (IS_IP6(sa->flags)) {
470 			sess_conf.ipsec.tunnel.type =
471 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
472 
473 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
474 				sa->src.ip.ip6.ip6_b, 16);
475 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
476 				sa->dst.ip.ip6.ip6_b, 16);
477 		} else {
478 			RTE_LOG(ERR, IPSEC, "invalid tunnel type\n");
479 			return -1;
480 		}
481 	}
482 
483 	if (sa->udp_encap) {
484 		sess_conf.ipsec.options.udp_encap = 1;
485 		sess_conf.ipsec.udp.sport = htons(sa->udp.sport);
486 		sess_conf.ipsec.udp.dport = htons(sa->udp.dport);
487 	}
488 
489 	if (sa->esn > 0) {
490 		sess_conf.ipsec.options.esn = 1;
491 		sess_conf.ipsec.esn.value = sa->esn;
492 	}
493 
494 
495 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
496 		sa->spi, sa->portid);
497 
498 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
499 		struct rte_flow_error err;
500 		int ret = 0;
501 
502 		sec_ctx = rte_eth_dev_get_sec_ctx(sa->portid);
503 		if (sec_ctx == NULL) {
504 			RTE_LOG(ERR, IPSEC,
505 				" rte_eth_dev_get_sec_ctx failed\n");
506 			return -1;
507 		}
508 
509 		if (verify_security_capabilities(sec_ctx, &sess_conf,
510 					&ips->security.ol_flags)) {
511 			RTE_LOG(ERR, IPSEC,
512 				"Requested security session config not supported\n");
513 			return -1;
514 		}
515 
516 		ips->security.ses = rte_security_session_create(sec_ctx,
517 				&sess_conf, skt_ctx->session_pool);
518 		if (ips->security.ses == NULL) {
519 			RTE_LOG(ERR, IPSEC,
520 				"SEC Session init failed: err: %d\n", ret);
521 			return -1;
522 		}
523 
524 		ips->security.ctx = sec_ctx;
525 		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
526 
527 		if (IS_IP6(sa->flags)) {
528 			sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
529 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
530 			sa->pattern[1].spec = &sa->ipv6_spec;
531 
532 			memcpy(sa->ipv6_spec.hdr.dst_addr,
533 				sa->dst.ip.ip6.ip6_b, 16);
534 			memcpy(sa->ipv6_spec.hdr.src_addr,
535 			       sa->src.ip.ip6.ip6_b, 16);
536 		} else if (IS_IP4(sa->flags)) {
537 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
538 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
539 			sa->pattern[1].spec = &sa->ipv4_spec;
540 
541 			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
542 			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
543 		}
544 
545 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
546 
547 		if (sa->udp_encap) {
548 
549 			sa->udp_spec.hdr.dst_port =
550 					rte_cpu_to_be_16(sa->udp.dport);
551 			sa->udp_spec.hdr.src_port =
552 					rte_cpu_to_be_16(sa->udp.sport);
553 
554 			sa->pattern[2].mask = &rte_flow_item_udp_mask;
555 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
556 			sa->pattern[2].spec = &sa->udp_spec;
557 
558 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP;
559 			sa->pattern[3].spec = &sa->esp_spec;
560 			sa->pattern[3].mask = &rte_flow_item_esp_mask;
561 
562 			sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END;
563 		} else {
564 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
565 			sa->pattern[2].spec = &sa->esp_spec;
566 			sa->pattern[2].mask = &rte_flow_item_esp_mask;
567 
568 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
569 		}
570 
571 		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
572 		sa->action[0].conf = ips->security.ses;
573 
574 		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
575 
576 		sa->attr.egress = (sa->direction ==
577 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
578 		sa->attr.ingress = (sa->direction ==
579 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
580 		if (sa->attr.ingress) {
581 			uint8_t rss_key[64];
582 			struct rte_eth_rss_conf rss_conf = {
583 				.rss_key = rss_key,
584 				.rss_key_len = sizeof(rss_key),
585 			};
586 			struct rte_eth_dev_info dev_info;
587 			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
588 			struct rte_flow_action_rss action_rss;
589 			unsigned int i;
590 			unsigned int j;
591 
592 			/* Don't create flow if default flow is created */
593 			if (flow_info_tbl[sa->portid].rx_def_flow)
594 				return 0;
595 
596 			ret = rte_eth_dev_info_get(sa->portid, &dev_info);
597 			if (ret != 0) {
598 				RTE_LOG(ERR, IPSEC,
599 					"Error during getting device (port %u) info: %s\n",
600 					sa->portid, strerror(-ret));
601 				return ret;
602 			}
603 
604 			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
605 			/* Try RSS. */
606 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
607 			sa->action[1].conf = &action_rss;
608 			ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
609 					&rss_conf);
610 			if (ret != 0) {
611 				RTE_LOG(ERR, IPSEC,
612 					"rte_eth_dev_rss_hash_conf_get:ret=%d\n",
613 					ret);
614 				return -1;
615 			}
616 			for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
617 				queue[j++] = i;
618 
619 			action_rss = (struct rte_flow_action_rss){
620 					.types = rss_conf.rss_hf,
621 					.key_len = rss_conf.rss_key_len,
622 					.queue_num = j,
623 					.key = rss_key,
624 					.queue = queue,
625 			};
626 			ret = rte_flow_validate(sa->portid, &sa->attr,
627 						sa->pattern, sa->action,
628 						&err);
629 			if (!ret)
630 				goto flow_create;
631 			/* Try Queue. */
632 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
633 			sa->action[1].conf =
634 				&(struct rte_flow_action_queue){
635 				.index = 0,
636 			};
637 			ret = rte_flow_validate(sa->portid, &sa->attr,
638 						sa->pattern, sa->action,
639 						&err);
640 			/* Try End. */
641 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
642 			sa->action[1].conf = NULL;
643 			ret = rte_flow_validate(sa->portid, &sa->attr,
644 						sa->pattern, sa->action,
645 						&err);
646 			if (ret)
647 				goto flow_create_failure;
648 		} else if (sa->attr.egress &&
649 				(ips->security.ol_flags &
650 					RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
651 			sa->action[1].type =
652 					RTE_FLOW_ACTION_TYPE_PASSTHRU;
653 			sa->action[2].type =
654 					RTE_FLOW_ACTION_TYPE_END;
655 		}
656 flow_create:
657 		sa->flow = rte_flow_create(sa->portid,
658 				&sa->attr, sa->pattern, sa->action, &err);
659 		if (sa->flow == NULL) {
660 flow_create_failure:
661 			RTE_LOG(ERR, IPSEC,
662 				"Failed to create ipsec flow msg: %s\n",
663 				err.message);
664 			return -1;
665 		}
666 	} else if (ips->type ==	RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
667 		sec_ctx = rte_eth_dev_get_sec_ctx(sa->portid);
668 
669 		if (sec_ctx == NULL) {
670 			RTE_LOG(ERR, IPSEC,
671 				"Ethernet device doesn't have security features registered\n");
672 			return -1;
673 		}
674 
675 		/* Set IPsec parameters in conf */
676 		set_ipsec_conf(sa, &(sess_conf.ipsec));
677 
678 		/* Save SA as userdata for the security session. When
679 		 * the packet is received, this userdata will be
680 		 * retrieved using the metadata from the packet.
681 		 *
682 		 * The PMD is expected to set similar metadata for other
683 		 * operations, like rte_eth_event, which are tied to
684 		 * security session. In such cases, the userdata could
685 		 * be obtained to uniquely identify the security
686 		 * parameters denoted.
687 		 */
688 
689 		sess_conf.userdata = (void *) sa;
690 
691 		if (verify_security_capabilities(sec_ctx, &sess_conf,
692 					&ips->security.ol_flags)) {
693 			RTE_LOG(ERR, IPSEC,
694 				"Requested security session config not supported\n");
695 			return -1;
696 		}
697 
698 		ips->security.ses = rte_security_session_create(sec_ctx,
699 					&sess_conf, skt_ctx->session_pool);
700 		if (ips->security.ses == NULL) {
701 			RTE_LOG(ERR, IPSEC,
702 				"SEC Session init failed: err: %d\n", ret);
703 			return -1;
704 		}
705 
706 		ips->security.ctx = sec_ctx;
707 	}
708 
709 	return 0;
710 }
711 
712 int
713 create_ipsec_esp_flow(struct ipsec_sa *sa)
714 {
715 	int ret = 0;
716 	struct rte_flow_error err = {};
717 	if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
718 		RTE_LOG(ERR, IPSEC,
719 			"No Flow director rule for Egress traffic\n");
720 		return -1;
721 	}
722 	if (sa->flags == TRANSPORT) {
723 		RTE_LOG(ERR, IPSEC,
724 			"No Flow director rule for transport mode\n");
725 		return -1;
726 	}
727 	sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
728 	sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
729 	sa->action[0].conf = &(struct rte_flow_action_queue) {
730 				.index = sa->fdir_qid,
731 	};
732 	sa->attr.egress = 0;
733 	sa->attr.ingress = 1;
734 	if (IS_IP6(sa->flags)) {
735 		sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
736 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
737 		sa->pattern[1].spec = &sa->ipv6_spec;
738 		memcpy(sa->ipv6_spec.hdr.dst_addr,
739 			sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
740 		memcpy(sa->ipv6_spec.hdr.src_addr,
741 			sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
742 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
743 		sa->pattern[2].spec = &sa->esp_spec;
744 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
745 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
746 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
747 	} else if (IS_IP4(sa->flags)) {
748 		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
749 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
750 		sa->pattern[1].spec = &sa->ipv4_spec;
751 		sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
752 		sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
753 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
754 		sa->pattern[2].spec = &sa->esp_spec;
755 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
756 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
757 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
758 	}
759 	sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
760 
761 	ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
762 				&err);
763 	if (ret < 0) {
764 		RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
765 		return ret;
766 	}
767 
768 	sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
769 					sa->action, &err);
770 	if (!sa->flow) {
771 		RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
772 		return -1;
773 	}
774 
775 	return 0;
776 }
777 
778 /*
779  * queue crypto-ops into PMD queue.
780  */
781 void
782 enqueue_cop_burst(struct cdev_qp *cqp)
783 {
784 	uint32_t i, len, ret;
785 
786 	len = cqp->len;
787 	ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
788 	if (ret < len) {
789 		RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
790 			" enqueued %u crypto ops out of %u\n",
791 			cqp->id, cqp->qp, ret, len);
792 			/* drop packets that we fail to enqueue */
793 			for (i = ret; i < len; i++)
794 				free_pkts(&cqp->buf[i]->sym->m_src, 1);
795 	}
796 	cqp->in_flight += ret;
797 	cqp->len = 0;
798 }
799 
800 static inline void
801 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
802 {
803 	cqp->buf[cqp->len++] = cop;
804 
805 	if (cqp->len == MAX_PKT_BURST)
806 		enqueue_cop_burst(cqp);
807 }
808 
809 static inline void
810 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
811 		struct rte_mbuf *pkts[], void *sas[],
812 		uint16_t nb_pkts)
813 {
814 	int32_t ret = 0, i;
815 	struct ipsec_mbuf_metadata *priv;
816 	struct rte_crypto_sym_op *sym_cop;
817 	struct ipsec_sa *sa;
818 	struct rte_ipsec_session *ips;
819 
820 	for (i = 0; i < nb_pkts; i++) {
821 		if (unlikely(sas[i] == NULL)) {
822 			free_pkts(&pkts[i], 1);
823 			continue;
824 		}
825 
826 		rte_prefetch0(sas[i]);
827 		rte_prefetch0(pkts[i]);
828 
829 		priv = get_priv(pkts[i]);
830 		sa = ipsec_mask_saptr(sas[i]);
831 		priv->sa = sa;
832 		ips = ipsec_get_primary_session(sa);
833 
834 		switch (ips->type) {
835 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
836 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
837 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
838 
839 			rte_prefetch0(&priv->sym_cop);
840 
841 			if (unlikely(ips->security.ses == NULL)) {
842 				free_pkts(&pkts[i], 1);
843 				continue;
844 			}
845 
846 			if (unlikely((pkts[i]->packet_type &
847 					(RTE_PTYPE_TUNNEL_MASK |
848 					RTE_PTYPE_L4_MASK)) ==
849 					MBUF_PTYPE_TUNNEL_ESP_IN_UDP &&
850 					sa->udp_encap != 1)) {
851 				free_pkts(&pkts[i], 1);
852 				continue;
853 			}
854 
855 			sym_cop = get_sym_cop(&priv->cop);
856 			sym_cop->m_src = pkts[i];
857 
858 			rte_security_attach_session(&priv->cop,
859 				ips->security.ses);
860 			break;
861 
862 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
863 			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
864 					" legacy mode.");
865 			free_pkts(&pkts[i], 1);
866 			continue;
867 
868 		case RTE_SECURITY_ACTION_TYPE_NONE:
869 
870 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
871 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
872 
873 			rte_prefetch0(&priv->sym_cop);
874 
875 			if (unlikely(ips->crypto.ses == NULL)) {
876 				free_pkts(&pkts[i], 1);
877 				continue;
878 			}
879 
880 			rte_crypto_op_attach_sym_session(&priv->cop,
881 					ips->crypto.ses);
882 
883 			ret = xform_func(pkts[i], sa, &priv->cop);
884 			if (unlikely(ret)) {
885 				free_pkts(&pkts[i], 1);
886 				continue;
887 			}
888 			break;
889 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
890 			RTE_ASSERT(ips->security.ses != NULL);
891 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
892 			if (ips->security.ol_flags &
893 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
894 				rte_security_set_pkt_metadata(
895 					ips->security.ctx, ips->security.ses,
896 					pkts[i], NULL);
897 			continue;
898 		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
899 			RTE_ASSERT(ips->security.ses != NULL);
900 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
901 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
902 
903 			rte_prefetch0(&priv->sym_cop);
904 			rte_security_attach_session(&priv->cop,
905 					ips->security.ses);
906 
907 			ret = xform_func(pkts[i], sa, &priv->cop);
908 			if (unlikely(ret)) {
909 				free_pkts(&pkts[i], 1);
910 				continue;
911 			}
912 
913 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
914 			if (ips->security.ol_flags &
915 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
916 				rte_security_set_pkt_metadata(
917 					ips->security.ctx, ips->security.ses,
918 					pkts[i], NULL);
919 			continue;
920 		}
921 
922 		RTE_ASSERT(sa->cqp[ipsec_ctx->lcore_id] != NULL);
923 		enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop);
924 	}
925 }
926 
927 static inline int32_t
928 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
929 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
930 {
931 	int32_t nb_pkts, ret;
932 	struct ipsec_mbuf_metadata *priv;
933 	struct ipsec_sa *sa;
934 	struct rte_mbuf *pkt;
935 
936 	nb_pkts = 0;
937 	while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
938 		pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
939 		rte_prefetch0(pkt);
940 		priv = get_priv(pkt);
941 		sa = priv->sa;
942 		ret = xform_func(pkt, sa, &priv->cop);
943 		if (unlikely(ret)) {
944 			free_pkts(&pkt, 1);
945 			continue;
946 		}
947 		pkts[nb_pkts++] = pkt;
948 	}
949 
950 	return nb_pkts;
951 }
952 
953 static inline int
954 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
955 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
956 {
957 	int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
958 	struct ipsec_mbuf_metadata *priv;
959 	struct rte_crypto_op *cops[max_pkts];
960 	struct ipsec_sa *sa;
961 	struct rte_mbuf *pkt;
962 
963 	for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
964 		struct cdev_qp *cqp;
965 
966 		cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
967 		if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
968 			ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
969 
970 		if (cqp->in_flight == 0)
971 			continue;
972 
973 		nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
974 				cops, max_pkts - nb_pkts);
975 
976 		cqp->in_flight -= nb_cops;
977 
978 		for (j = 0; j < nb_cops; j++) {
979 			pkt = cops[j]->sym->m_src;
980 			rte_prefetch0(pkt);
981 
982 			priv = get_priv(pkt);
983 			sa = priv->sa;
984 
985 			RTE_ASSERT(sa != NULL);
986 
987 			if (ipsec_get_action_type(sa) ==
988 				RTE_SECURITY_ACTION_TYPE_NONE) {
989 				ret = xform_func(pkt, sa, cops[j]);
990 				if (unlikely(ret)) {
991 					free_pkts(&pkt, 1);
992 					continue;
993 				}
994 			} else if (ipsec_get_action_type(sa) ==
995 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
996 				if (cops[j]->status) {
997 					free_pkts(&pkt, 1);
998 					continue;
999 				}
1000 			}
1001 			pkts[nb_pkts++] = pkt;
1002 		}
1003 	}
1004 
1005 	/* return packets */
1006 	return nb_pkts;
1007 }
1008 
1009 uint16_t
1010 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1011 		uint16_t nb_pkts, uint16_t len)
1012 {
1013 	void *sas[nb_pkts];
1014 
1015 	inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
1016 
1017 	ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
1018 
1019 	return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
1020 }
1021 
1022 uint16_t
1023 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1024 		uint16_t len)
1025 {
1026 	return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
1027 }
1028 
1029 uint16_t
1030 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1031 		uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
1032 {
1033 	void *sas[nb_pkts];
1034 
1035 	outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
1036 
1037 	ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
1038 
1039 	return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
1040 }
1041 
1042 uint16_t
1043 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1044 		uint16_t len)
1045 {
1046 	return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
1047 }
1048