xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision 3c4898ef762eeb2578b9ae3d7f6e3a0e5cbca8c8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
7 #include <rte_esp.h>
8 #include <rte_ip.h>
9 #include <rte_security.h>
10 #include <rte_tcp.h>
11 #include <rte_udp.h>
12 
13 #include "test.h"
14 #include "test_cryptodev_security_ipsec.h"
15 
16 #define IV_LEN_MAX 16
17 #define UDP_CUSTOM_SPORT 4650
18 #define UDP_CUSTOM_DPORT 4660
19 
20 #ifndef IPVERSION
21 #define IPVERSION 4
22 #endif
23 
24 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
25 				  (RTE_DIM(cipher_list) *
26 				   RTE_DIM(auth_list))];
27 
28 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
29 
30 static bool
31 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
32 {
33 	/* The IP version number must be 4 */
34 	if (((pkt->version_ihl) >> 4) != 4)
35 		return false;
36 	/*
37 	 * The IP header length field must be large enough to hold the
38 	 * minimum length legal IP datagram (20 bytes = 5 words).
39 	 */
40 	if ((pkt->version_ihl & 0xf) < 5)
41 		return false;
42 
43 	/*
44 	 * The IP total length field must be large enough to hold the IP
45 	 * datagram header, whose length is specified in the IP header length
46 	 * field.
47 	 */
48 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
49 		return false;
50 
51 	return true;
52 }
53 
54 static bool
55 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
56 {
57 	/* The IP version number must be 6 */
58 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
59 		return false;
60 
61 	return true;
62 }
63 
64 void
65 test_ipsec_alg_list_populate(void)
66 {
67 	unsigned long i, j, index = 0;
68 
69 	for (i = 0; i < RTE_DIM(aead_list); i++) {
70 		alg_list[index].param1 = &aead_list[i];
71 		alg_list[index].param2 = NULL;
72 		index++;
73 	}
74 
75 	for (i = 0; i < RTE_DIM(cipher_list); i++) {
76 		for (j = 0; j < RTE_DIM(auth_list); j++) {
77 			alg_list[index].param1 = &cipher_list[i];
78 			alg_list[index].param2 = &auth_list[j];
79 			index++;
80 		}
81 	}
82 }
83 
84 void
85 test_ipsec_ah_alg_list_populate(void)
86 {
87 	unsigned long i, index = 0;
88 
89 	for (i = 1; i < RTE_DIM(auth_list); i++) {
90 		ah_alg_list[index].param1 = &auth_list[i];
91 		ah_alg_list[index].param2 = NULL;
92 		index++;
93 	}
94 
95 	for (i = 1; i < RTE_DIM(auth_list); i++) {
96 		/* NULL cipher */
97 		ah_alg_list[index].param1 = &cipher_list[0];
98 
99 		ah_alg_list[index].param2 = &auth_list[i];
100 		index++;
101 	}
102 }
103 
104 int
105 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
106 			   const struct rte_security_capability *sec_cap,
107 			   bool silent)
108 {
109 	/* Verify security capabilities */
110 
111 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
112 		if (!silent)
113 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
114 		return -ENOTSUP;
115 	}
116 
117 	if (ipsec_xform->options.udp_encap == 1 &&
118 	    sec_cap->ipsec.options.udp_encap == 0) {
119 		if (!silent)
120 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
121 		return -ENOTSUP;
122 	}
123 
124 	if (ipsec_xform->options.udp_ports_verify == 1 &&
125 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
126 		if (!silent)
127 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
128 				"verification is not supported\n");
129 		return -ENOTSUP;
130 	}
131 
132 	if (ipsec_xform->options.copy_dscp == 1 &&
133 	    sec_cap->ipsec.options.copy_dscp == 0) {
134 		if (!silent)
135 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
136 		return -ENOTSUP;
137 	}
138 
139 	if (ipsec_xform->options.copy_flabel == 1 &&
140 	    sec_cap->ipsec.options.copy_flabel == 0) {
141 		if (!silent)
142 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
143 		return -ENOTSUP;
144 	}
145 
146 	if (ipsec_xform->options.copy_df == 1 &&
147 	    sec_cap->ipsec.options.copy_df == 0) {
148 		if (!silent)
149 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.dec_ttl == 1 &&
154 	    sec_cap->ipsec.options.dec_ttl == 0) {
155 		if (!silent)
156 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
157 		return -ENOTSUP;
158 	}
159 
160 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
161 		if (!silent)
162 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
163 		return -ENOTSUP;
164 	}
165 
166 	if (ipsec_xform->options.stats == 1 &&
167 	    sec_cap->ipsec.options.stats == 0) {
168 		if (!silent)
169 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
170 		return -ENOTSUP;
171 	}
172 
173 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
174 	    (ipsec_xform->options.iv_gen_disable == 1) &&
175 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
176 		if (!silent)
177 			RTE_LOG(INFO, USER1,
178 				"Application provided IV is not supported\n");
179 		return -ENOTSUP;
180 	}
181 
182 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
183 	    (ipsec_xform->options.tunnel_hdr_verify >
184 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
185 		if (!silent)
186 			RTE_LOG(INFO, USER1,
187 				"Tunnel header verify is not supported\n");
188 		return -ENOTSUP;
189 	}
190 
191 	if (ipsec_xform->options.ip_csum_enable == 1 &&
192 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
193 		if (!silent)
194 			RTE_LOG(INFO, USER1,
195 				"Inner IP checksum is not supported\n");
196 		return -ENOTSUP;
197 	}
198 
199 	if (ipsec_xform->options.l4_csum_enable == 1 &&
200 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
201 		if (!silent)
202 			RTE_LOG(INFO, USER1,
203 				"Inner L4 checksum is not supported\n");
204 		return -ENOTSUP;
205 	}
206 
207 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
208 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
209 			if (!silent)
210 				RTE_LOG(INFO, USER1,
211 					"Replay window size is not supported\n");
212 			return -ENOTSUP;
213 		}
214 	}
215 
216 	if (ipsec_xform->options.ingress_oop == 1 &&
217 	    sec_cap->ipsec.options.ingress_oop == 0) {
218 		if (!silent)
219 			RTE_LOG(INFO, USER1,
220 				"Inline Ingress OOP processing is not supported\n");
221 		return -ENOTSUP;
222 	}
223 
224 	return 0;
225 }
226 
227 int
228 test_ipsec_crypto_caps_aead_verify(
229 		const struct rte_security_capability *sec_cap,
230 		struct rte_crypto_sym_xform *aead)
231 {
232 	const struct rte_cryptodev_symmetric_capability *sym_cap;
233 	const struct rte_cryptodev_capabilities *crypto_cap;
234 	int j = 0;
235 
236 	while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
237 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
238 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
239 				crypto_cap->sym.xform_type == aead->type &&
240 				crypto_cap->sym.aead.algo == aead->aead.algo) {
241 			sym_cap = &crypto_cap->sym;
242 			if (rte_cryptodev_sym_capability_check_aead(sym_cap,
243 					aead->aead.key.length,
244 					aead->aead.digest_length,
245 					aead->aead.aad_length,
246 					aead->aead.iv.length) == 0)
247 				return 0;
248 		}
249 	}
250 
251 	return -ENOTSUP;
252 }
253 
254 int
255 test_ipsec_crypto_caps_cipher_verify(
256 		const struct rte_security_capability *sec_cap,
257 		struct rte_crypto_sym_xform *cipher)
258 {
259 	const struct rte_cryptodev_symmetric_capability *sym_cap;
260 	const struct rte_cryptodev_capabilities *cap;
261 	int j = 0;
262 
263 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
264 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
265 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
266 				cap->sym.xform_type == cipher->type &&
267 				cap->sym.cipher.algo == cipher->cipher.algo) {
268 			sym_cap = &cap->sym;
269 			if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
270 					cipher->cipher.key.length,
271 					cipher->cipher.iv.length) == 0)
272 				return 0;
273 		}
274 	}
275 
276 	return -ENOTSUP;
277 }
278 
279 int
280 test_ipsec_crypto_caps_auth_verify(
281 		const struct rte_security_capability *sec_cap,
282 		struct rte_crypto_sym_xform *auth)
283 {
284 	const struct rte_cryptodev_symmetric_capability *sym_cap;
285 	const struct rte_cryptodev_capabilities *cap;
286 	int j = 0;
287 
288 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
289 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
290 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
291 				cap->sym.xform_type == auth->type &&
292 				cap->sym.auth.algo == auth->auth.algo) {
293 			sym_cap = &cap->sym;
294 			if (rte_cryptodev_sym_capability_check_auth(sym_cap,
295 					auth->auth.key.length,
296 					auth->auth.digest_length,
297 					auth->auth.iv.length) == 0)
298 				return 0;
299 		}
300 	}
301 
302 	return -ENOTSUP;
303 }
304 
305 void
306 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
307 			  struct ipsec_test_data *td_in)
308 {
309 	memcpy(td_in, td_out, sizeof(*td_in));
310 
311 	/* Populate output text of td_in with input text of td_out */
312 	memcpy(td_in->output_text.data, td_out->input_text.data,
313 	       td_out->input_text.len);
314 	td_in->output_text.len = td_out->input_text.len;
315 
316 	/* Populate input text of td_in with output text of td_out */
317 	memcpy(td_in->input_text.data, td_out->output_text.data,
318 	       td_out->output_text.len);
319 	td_in->input_text.len = td_out->output_text.len;
320 
321 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
322 
323 	if (td_in->aead) {
324 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
325 	} else {
326 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
327 		td_in->xform.chain.cipher.cipher.op =
328 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
329 	}
330 }
331 
332 static bool
333 is_ipv4(void *ip)
334 {
335 	struct rte_ipv4_hdr *ipv4 = ip;
336 	uint8_t ip_ver;
337 
338 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
339 	if (ip_ver == IPVERSION)
340 		return true;
341 	else
342 		return false;
343 }
344 
345 static void
346 test_ipsec_csum_init(void *ip, bool l3, bool l4)
347 {
348 	struct rte_ipv4_hdr *ipv4;
349 	struct rte_tcp_hdr *tcp;
350 	struct rte_udp_hdr *udp;
351 	uint8_t next_proto;
352 	uint8_t size;
353 
354 	if (is_ipv4(ip)) {
355 		ipv4 = ip;
356 		size = sizeof(struct rte_ipv4_hdr);
357 		next_proto = ipv4->next_proto_id;
358 
359 		if (l3)
360 			ipv4->hdr_checksum = 0;
361 	} else {
362 		size = sizeof(struct rte_ipv6_hdr);
363 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
364 	}
365 
366 	if (l4) {
367 		switch (next_proto) {
368 		case IPPROTO_TCP:
369 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
370 			tcp->cksum = 0;
371 			break;
372 		case IPPROTO_UDP:
373 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
374 			udp->dgram_cksum = 0;
375 			break;
376 		default:
377 			return;
378 		}
379 	}
380 }
381 
382 void
383 test_ipsec_td_prepare(const struct crypto_param *param1,
384 		      const struct crypto_param *param2,
385 		      const struct ipsec_test_flags *flags,
386 		      struct ipsec_test_data *td_array,
387 		      int nb_td)
388 
389 {
390 	struct ipsec_test_data *td;
391 	int i;
392 
393 	memset(td_array, 0, nb_td * sizeof(*td));
394 
395 	for (i = 0; i < nb_td; i++) {
396 		td = &td_array[i];
397 
398 		/* Prepare fields based on param */
399 
400 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
401 			/* Copy template for packet & key fields */
402 			if (flags->ipv6)
403 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
404 			else
405 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
406 
407 			if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM)
408 				td->salt.len = 3;
409 
410 			td->aead = true;
411 			td->xform.aead.aead.algo = param1->alg.aead;
412 			td->xform.aead.aead.key.length = param1->key_length;
413 		} else {
414 			/* Copy template for packet & key fields */
415 			if (flags->ipv6)
416 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
417 					sizeof(*td));
418 			else
419 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
420 					sizeof(*td));
421 
422 			td->aead = false;
423 
424 			if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
425 				td->xform.chain.auth.auth.algo =
426 						param1->alg.auth;
427 				td->xform.chain.auth.auth.key.length =
428 						param1->key_length;
429 				td->xform.chain.auth.auth.digest_length =
430 						param1->digest_length;
431 				td->auth_only = true;
432 
433 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
434 					td->xform.chain.auth.auth.iv.length =
435 						param1->iv_length;
436 					td->aes_gmac = true;
437 				}
438 			} else {
439 				td->xform.chain.cipher.cipher.algo =
440 						param1->alg.cipher;
441 				td->xform.chain.cipher.cipher.key.length =
442 						param1->key_length;
443 				td->xform.chain.cipher.cipher.iv.length =
444 						param1->iv_length;
445 				td->xform.chain.auth.auth.algo =
446 						param2->alg.auth;
447 				td->xform.chain.auth.auth.key.length =
448 						param2->key_length;
449 				td->xform.chain.auth.auth.digest_length =
450 						param2->digest_length;
451 
452 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
453 					td->xform.chain.auth.auth.iv.length =
454 						param2->iv_length;
455 					td->aes_gmac = true;
456 				}
457 			}
458 		}
459 
460 		/* Adjust the data to requested length */
461 		if (flags->plaintext_len && flags->ipv6) {
462 			struct rte_ipv6_hdr *ip6 = (struct rte_ipv6_hdr *)td->input_text.data;
463 			struct rte_tcp_hdr *tcp;
464 			int64_t payload_len;
465 			uint8_t *data;
466 			int64_t i;
467 
468 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
469 			payload_len -= sizeof(struct rte_ipv6_hdr);
470 			payload_len -= sizeof(struct rte_tcp_hdr);
471 			if (payload_len <= 16)
472 				payload_len = 16;
473 
474 			/* IPv6 */
475 			ip6->proto = IPPROTO_TCP;
476 			ip6->payload_len = sizeof(*tcp) + payload_len;
477 			ip6->payload_len = rte_cpu_to_be_16(ip6->payload_len);
478 
479 			/* TCP */
480 			tcp = (struct rte_tcp_hdr *)(ip6 + 1);
481 			data = (uint8_t *)(tcp + 1);
482 			for (i = 0; i < payload_len; i++)
483 				data[i] = i;
484 			tcp->cksum = 0;
485 			tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
486 			td->input_text.len = payload_len + sizeof(struct rte_ipv6_hdr) +
487 				sizeof(struct rte_tcp_hdr);
488 		} else if (flags->plaintext_len) {
489 			struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)td->input_text.data;
490 			struct rte_tcp_hdr *tcp;
491 			int64_t payload_len;
492 			uint8_t *data;
493 			int64_t i;
494 
495 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
496 			payload_len -= sizeof(struct rte_ipv4_hdr);
497 			payload_len -= sizeof(struct rte_tcp_hdr);
498 			if (payload_len <= 8)
499 				payload_len = 8;
500 
501 			/* IPv4 */
502 			ip->next_proto_id = IPPROTO_TCP;
503 			ip->total_length = sizeof(*ip) + sizeof(*tcp) + payload_len;
504 			ip->total_length = rte_cpu_to_be_16(ip->total_length);
505 			ip->hdr_checksum = 0;
506 			ip->hdr_checksum = rte_ipv4_cksum(ip);
507 
508 			/* TCP */
509 			tcp = (struct rte_tcp_hdr *)(ip + 1);
510 			data = (uint8_t *)(tcp + 1);
511 			for (i = 0; i < payload_len; i++)
512 				data[i] = i;
513 			tcp->cksum = 0;
514 			tcp->cksum = rte_ipv4_udptcp_cksum(ip, tcp);
515 			td->input_text.len = payload_len + sizeof(struct rte_ipv4_hdr) +
516 				sizeof(struct rte_tcp_hdr);
517 		}
518 
519 		if (flags->ah) {
520 			td->ipsec_xform.proto =
521 					RTE_SECURITY_IPSEC_SA_PROTO_AH;
522 		}
523 
524 		if (flags->iv_gen)
525 			td->ipsec_xform.options.iv_gen_disable = 0;
526 
527 		if (flags->sa_expiry_pkts_soft)
528 			td->ipsec_xform.life.packets_soft_limit =
529 					IPSEC_TEST_PACKETS_MAX - 1;
530 
531 		if (flags->ip_csum) {
532 			td->ipsec_xform.options.ip_csum_enable = 1;
533 			test_ipsec_csum_init(&td->input_text.data, true, false);
534 		}
535 
536 		if (flags->l4_csum) {
537 			td->ipsec_xform.options.l4_csum_enable = 1;
538 			test_ipsec_csum_init(&td->input_text.data, false, true);
539 		}
540 
541 		if (flags->transport) {
542 			td->ipsec_xform.mode =
543 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
544 		} else {
545 			td->ipsec_xform.mode =
546 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
547 
548 			if (flags->tunnel_ipv6)
549 				td->ipsec_xform.tunnel.type =
550 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
551 			else
552 				td->ipsec_xform.tunnel.type =
553 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
554 		}
555 
556 		if (flags->stats_success)
557 			td->ipsec_xform.options.stats = 1;
558 
559 		if (flags->fragment) {
560 			struct rte_ipv4_hdr *ip;
561 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
562 			ip->fragment_offset = 4;
563 			ip->hdr_checksum = rte_ipv4_cksum(ip);
564 		}
565 
566 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
567 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
568 			td->ipsec_xform.options.copy_df = 1;
569 
570 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
571 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
572 			td->ipsec_xform.options.copy_dscp = 1;
573 
574 		if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
575 		    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
576 			td->ipsec_xform.options.copy_flabel = 1;
577 
578 		if (flags->dec_ttl_or_hop_limit)
579 			td->ipsec_xform.options.dec_ttl = 1;
580 
581 		if (flags->udp_encap && flags->udp_encap_custom_ports) {
582 			td->ipsec_xform.udp.sport = UDP_CUSTOM_SPORT;
583 			td->ipsec_xform.udp.dport = UDP_CUSTOM_DPORT;
584 		}
585 	}
586 }
587 
588 void
589 test_ipsec_td_update(struct ipsec_test_data td_inb[],
590 		     const struct ipsec_test_data td_outb[],
591 		     int nb_td,
592 		     const struct ipsec_test_flags *flags)
593 {
594 	int i;
595 
596 	for (i = 0; i < nb_td; i++) {
597 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
598 		       td_outb[i].input_text.len);
599 		td_inb[i].output_text.len = td_outb->input_text.len;
600 
601 		if (flags->icv_corrupt) {
602 			int icv_pos = td_inb[i].input_text.len - 4;
603 			td_inb[i].input_text.data[icv_pos] += 1;
604 		}
605 
606 		if (flags->sa_expiry_pkts_hard)
607 			td_inb[i].ipsec_xform.life.packets_hard_limit =
608 					IPSEC_TEST_PACKETS_MAX - 1;
609 
610 		if (flags->udp_encap)
611 			td_inb[i].ipsec_xform.options.udp_encap = 1;
612 
613 		if (flags->udp_ports_verify)
614 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
615 
616 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
617 			flags->tunnel_hdr_verify;
618 
619 		if (flags->ip_csum)
620 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
621 
622 		if (flags->l4_csum)
623 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
624 
625 		/* Clear outbound specific flags */
626 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
627 	}
628 }
629 
630 void
631 test_ipsec_display_alg(const struct crypto_param *param1,
632 		       const struct crypto_param *param2)
633 {
634 	if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
635 		printf("\t%s [%d]",
636 		       rte_cryptodev_get_aead_algo_string(param1->alg.aead),
637 		       param1->key_length * 8);
638 	} else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
639 		printf("\t%s",
640 		       rte_cryptodev_get_auth_algo_string(param1->alg.auth));
641 		if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL)
642 			printf(" [%dB ICV]", param1->digest_length);
643 	} else {
644 		printf("\t%s",
645 		       rte_cryptodev_get_cipher_algo_string(param1->alg.cipher));
646 		if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
647 			printf(" [%d]", param1->key_length * 8);
648 		printf(" %s",
649 		       rte_cryptodev_get_auth_algo_string(param2->alg.auth));
650 		if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
651 			printf(" [%dB ICV]", param2->digest_length);
652 	}
653 	printf("\n");
654 }
655 
656 static int
657 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
658 {
659 	int len = 0;
660 
661 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
662 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
663 			if (td->ipsec_xform.tunnel.type ==
664 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
665 				len += sizeof(struct rte_ipv4_hdr);
666 			else
667 				len += sizeof(struct rte_ipv6_hdr);
668 		}
669 	}
670 
671 	return len;
672 }
673 
674 static int
675 test_ipsec_iv_verify_push(const uint8_t *output_text, const struct ipsec_test_data *td)
676 {
677 	static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
678 	int i, iv_pos, iv_len;
679 	static int index;
680 	uint8_t *iv_tmp;
681 
682 	if (td->aead)
683 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
684 	else
685 		iv_len = td->xform.chain.cipher.cipher.iv.length;
686 
687 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
688 	output_text += iv_pos;
689 
690 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
691 
692 	/* Compare against previous values */
693 	for (i = 0; i < index; i++) {
694 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
695 
696 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
697 			printf("IV repeated");
698 			return TEST_FAILED;
699 		}
700 	}
701 
702 	/* Save IV for future comparisons */
703 
704 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
705 	memcpy(iv_tmp, output_text, iv_len);
706 	index++;
707 
708 	if (index == IPSEC_TEST_PACKETS_MAX)
709 		index = 0;
710 
711 	return TEST_SUCCESS;
712 }
713 
714 static int
715 test_ipsec_l3_csum_verify(uint8_t *output_text)
716 {
717 	uint16_t actual_cksum, expected_cksum;
718 	struct rte_ipv4_hdr *ip;
719 
720 	ip = (struct rte_ipv4_hdr *)output_text;
721 
722 	if (!is_ipv4((void *)ip))
723 		return TEST_SKIPPED;
724 
725 	actual_cksum = ip->hdr_checksum;
726 
727 	ip->hdr_checksum = 0;
728 
729 	expected_cksum = rte_ipv4_cksum(ip);
730 
731 	if (actual_cksum != expected_cksum)
732 		return TEST_FAILED;
733 
734 	return TEST_SUCCESS;
735 }
736 
737 static int
738 test_ipsec_l4_csum_verify(uint8_t *output_text)
739 {
740 	uint16_t actual_cksum = 0, expected_cksum = 0;
741 	struct rte_ipv4_hdr *ipv4;
742 	struct rte_ipv6_hdr *ipv6;
743 	struct rte_tcp_hdr *tcp;
744 	struct rte_udp_hdr *udp;
745 	void *ip, *l4;
746 
747 	ip = output_text;
748 
749 	if (is_ipv4(ip)) {
750 		ipv4 = ip;
751 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
752 
753 		switch (ipv4->next_proto_id) {
754 		case IPPROTO_TCP:
755 			tcp = (struct rte_tcp_hdr *)l4;
756 			actual_cksum = tcp->cksum;
757 			tcp->cksum = 0;
758 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
759 			break;
760 		case IPPROTO_UDP:
761 			udp = (struct rte_udp_hdr *)l4;
762 			actual_cksum = udp->dgram_cksum;
763 			udp->dgram_cksum = 0;
764 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
765 			break;
766 		default:
767 			break;
768 		}
769 	} else {
770 		ipv6 = ip;
771 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
772 
773 		switch (ipv6->proto) {
774 		case IPPROTO_TCP:
775 			tcp = (struct rte_tcp_hdr *)l4;
776 			actual_cksum = tcp->cksum;
777 			tcp->cksum = 0;
778 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
779 			break;
780 		case IPPROTO_UDP:
781 			udp = (struct rte_udp_hdr *)l4;
782 			actual_cksum = udp->dgram_cksum;
783 			udp->dgram_cksum = 0;
784 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
785 			break;
786 		default:
787 			break;
788 		}
789 	}
790 
791 	if (actual_cksum != expected_cksum)
792 		return TEST_FAILED;
793 
794 	return TEST_SUCCESS;
795 }
796 
797 static int
798 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
799 {
800 	struct rte_ipv4_hdr *iph4_ex, *iph4_re;
801 	struct rte_ipv6_hdr *iph6_ex, *iph6_re;
802 
803 	if (is_ipv4(received) && is_ipv4(expected)) {
804 		iph4_ex = expected;
805 		iph4_re = received;
806 		iph4_ex->time_to_live -= 1;
807 		if (iph4_re->time_to_live != iph4_ex->time_to_live)
808 			return TEST_FAILED;
809 	} else if (!is_ipv4(received) && !is_ipv4(expected)) {
810 		iph6_ex = expected;
811 		iph6_re = received;
812 		iph6_ex->hop_limits -= 1;
813 		if (iph6_re->hop_limits != iph6_ex->hop_limits)
814 			return TEST_FAILED;
815 	} else {
816 		printf("IP header version miss match\n");
817 		return TEST_FAILED;
818 	}
819 
820 	return TEST_SUCCESS;
821 }
822 
823 static int
824 test_ipsec_td_verify(uint8_t *output_text, uint32_t len, uint32_t ol_flags,
825 		const struct ipsec_test_data *td, bool silent, const struct ipsec_test_flags *flags)
826 {
827 	uint8_t td_output_text[IPSEC_TEXT_MAX_LEN];
828 	uint32_t skip;
829 	int ret;
830 
831 	/* For tests with status as error for test success, skip verification */
832 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
833 	    (flags->icv_corrupt ||
834 	     flags->sa_expiry_pkts_hard ||
835 	     flags->tunnel_hdr_verify ||
836 	     td->ar_packet))
837 		return TEST_SUCCESS;
838 
839 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
840 	   flags->udp_encap) {
841 
842 		len -= sizeof(struct rte_udp_hdr);
843 		output_text += sizeof(struct rte_udp_hdr);
844 	}
845 
846 	if (len != td->output_text.len) {
847 		printf("Output length (%d) not matching with expected (%d)\n",
848 			len, td->output_text.len);
849 		return TEST_FAILED;
850 	}
851 
852 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
853 				flags->fragment) {
854 		const struct rte_ipv4_hdr *iph4;
855 		iph4 = (const struct rte_ipv4_hdr *)output_text;
856 		if (iph4->fragment_offset) {
857 			printf("Output packet is fragmented");
858 			return TEST_FAILED;
859 		}
860 	}
861 
862 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
863 				flags->ip_csum) {
864 		if (ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
865 			ret = test_ipsec_l3_csum_verify(output_text);
866 		else
867 			ret = TEST_FAILED;
868 
869 		if (ret == TEST_FAILED)
870 			printf("Inner IP checksum test failed\n");
871 
872 		return ret;
873 	}
874 
875 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
876 				flags->l4_csum) {
877 		if (ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
878 			ret = test_ipsec_l4_csum_verify(output_text);
879 		else
880 			ret = TEST_FAILED;
881 
882 		if (ret == TEST_FAILED)
883 			printf("Inner L4 checksum test failed\n");
884 
885 		return ret;
886 	}
887 
888 	skip = test_ipsec_tunnel_hdr_len_get(td);
889 
890 	len -= skip;
891 	output_text += skip;
892 
893 	memcpy(td_output_text, td->output_text.data + skip, len);
894 
895 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
896 				flags->dec_ttl_or_hop_limit) {
897 		if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
898 			printf("Inner TTL/hop limit decrement test failed\n");
899 			return TEST_FAILED;
900 		}
901 	}
902 
903 	if (test_ipsec_pkt_update(td_output_text, flags)) {
904 		printf("Could not update expected vector");
905 		return TEST_FAILED;
906 	}
907 
908 	if (memcmp(output_text, td_output_text, len)) {
909 		if (silent)
910 			return TEST_FAILED;
911 
912 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
913 			"output text not as expected\n");
914 
915 		rte_hexdump(stdout, "expected", td_output_text, len);
916 		rte_hexdump(stdout, "actual", output_text, len);
917 		return TEST_FAILED;
918 	}
919 
920 	return TEST_SUCCESS;
921 }
922 
923 static int
924 test_ipsec_res_d_prepare(const uint8_t *output_text, uint32_t len,
925 		const struct ipsec_test_data *td, struct ipsec_test_data *res_d)
926 {
927 	memcpy(res_d, td, sizeof(*res_d));
928 
929 	memcpy(&res_d->input_text.data, output_text, len);
930 	res_d->input_text.len = len;
931 
932 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
933 	if (res_d->aead) {
934 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
935 	} else {
936 		res_d->xform.chain.cipher.cipher.op =
937 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
938 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
939 	}
940 
941 	return TEST_SUCCESS;
942 }
943 
944 static int
945 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
946 			     const struct ipsec_test_flags *flags)
947 {
948 	uint8_t tos, dscp;
949 	uint16_t f_off;
950 
951 	if (!is_valid_ipv4_pkt(iph4)) {
952 		printf("Tunnel outer header is not IPv4\n");
953 		return -1;
954 	}
955 
956 	if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
957 		printf("Tunnel outer header proto is not AH\n");
958 		return -1;
959 	}
960 
961 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
962 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
963 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
964 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
965 			printf("DF bit is not set\n");
966 			return -1;
967 		}
968 	} else {
969 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
970 			printf("DF bit is set\n");
971 			return -1;
972 		}
973 	}
974 
975 	tos = iph4->type_of_service;
976 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
977 
978 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
979 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
980 		if (dscp != TEST_IPSEC_DSCP_VAL) {
981 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
982 			       TEST_IPSEC_DSCP_VAL, dscp);
983 			return -1;
984 		}
985 	} else {
986 		if (dscp != 0) {
987 			printf("DSCP value is set [exp: 0, actual: %x]\n",
988 			       dscp);
989 			return -1;
990 		}
991 	}
992 
993 	return 0;
994 }
995 
996 static int
997 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
998 			     const struct ipsec_test_flags *flags)
999 {
1000 	uint32_t vtc_flow;
1001 	uint32_t flabel;
1002 	uint8_t dscp;
1003 
1004 	if (!is_valid_ipv6_pkt(iph6)) {
1005 		printf("Tunnel outer header is not IPv6\n");
1006 		return -1;
1007 	}
1008 
1009 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1010 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
1011 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
1012 
1013 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1014 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
1015 		if (dscp != TEST_IPSEC_DSCP_VAL) {
1016 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
1017 			       TEST_IPSEC_DSCP_VAL, dscp);
1018 			return -1;
1019 		}
1020 	} else {
1021 		if (dscp != 0) {
1022 			printf("DSCP value is set [exp: 0, actual: %x]\n",
1023 			       dscp);
1024 			return -1;
1025 		}
1026 	}
1027 
1028 	flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
1029 
1030 	if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1031 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1032 		if (flabel != TEST_IPSEC_FLABEL_VAL) {
1033 			printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
1034 			       TEST_IPSEC_FLABEL_VAL, flabel);
1035 			return -1;
1036 		}
1037 	} else {
1038 		if (flabel != 0) {
1039 			printf("FLABEL value is set [exp: 0, actual: %x]\n",
1040 			       flabel);
1041 			return -1;
1042 		}
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 int
1049 test_ipsec_post_process(const struct rte_mbuf *m, const struct ipsec_test_data *td,
1050 			struct ipsec_test_data *res_d, bool silent,
1051 			const struct ipsec_test_flags *flags)
1052 {
1053 	uint32_t len = rte_pktmbuf_pkt_len(m), data_len;
1054 	uint8_t output_text[IPSEC_TEXT_MAX_LEN];
1055 	const struct rte_mbuf *seg;
1056 	const uint8_t *output;
1057 	int ret;
1058 
1059 	memset(output_text, 0, IPSEC_TEXT_MAX_LEN);
1060 	/* Actual data in packet might be less in error cases,
1061 	 * hence take minimum of pkt_len and sum of data_len.
1062 	 * This is done to run through negative test cases.
1063 	 */
1064 	data_len = 0;
1065 	seg = m;
1066 	while (seg) {
1067 		data_len += seg->data_len;
1068 		seg = seg->next;
1069 	}
1070 	len = RTE_MIN(len, data_len);
1071 	/* Copy mbuf payload to continuous buffer */
1072 	output = rte_pktmbuf_read(m, 0, len, output_text);
1073 	if (output != output_text)
1074 		/* Single segment mbuf, copy manually */
1075 		memcpy(output_text, output, len);
1076 
1077 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1078 		const struct rte_ipv4_hdr *iph4;
1079 		const struct rte_ipv6_hdr *iph6;
1080 
1081 		if (flags->iv_gen) {
1082 			ret = test_ipsec_iv_verify_push(output_text, td);
1083 			if (ret != TEST_SUCCESS)
1084 				return ret;
1085 		}
1086 
1087 		iph4 = (const struct rte_ipv4_hdr *)output_text;
1088 
1089 		if (td->ipsec_xform.mode ==
1090 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
1091 			if (flags->ipv6) {
1092 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1093 				if (is_valid_ipv6_pkt(iph6) == false) {
1094 					printf("Transport packet is not IPv6\n");
1095 					return TEST_FAILED;
1096 				}
1097 			} else {
1098 				if (is_valid_ipv4_pkt(iph4) == false) {
1099 					printf("Transport packet is not IPv4\n");
1100 					return TEST_FAILED;
1101 				}
1102 
1103 				if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1104 					printf("Transport IPv4 header proto is not AH\n");
1105 					return -1;
1106 				}
1107 			}
1108 		} else {
1109 			if (td->ipsec_xform.tunnel.type ==
1110 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1111 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
1112 					return TEST_FAILED;
1113 			} else {
1114 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1115 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
1116 					return TEST_FAILED;
1117 			}
1118 		}
1119 	}
1120 
1121 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
1122 	   flags->udp_encap) {
1123 		const struct rte_ipv4_hdr *iph4;
1124 		const struct rte_ipv6_hdr *iph6;
1125 
1126 		if (td->ipsec_xform.tunnel.type ==
1127 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1128 			iph4 = (const struct rte_ipv4_hdr *)output_text;
1129 
1130 			if (iph4->next_proto_id != IPPROTO_UDP) {
1131 				printf("UDP header is not found\n");
1132 				return TEST_FAILED;
1133 			}
1134 
1135 			if (flags->udp_encap_custom_ports) {
1136 				const struct rte_udp_hdr *udph;
1137 
1138 				udph = (const struct rte_udp_hdr *)(output_text +
1139 					sizeof(struct rte_ipv4_hdr));
1140 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1141 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1142 					printf("UDP custom ports not matching.\n");
1143 					return TEST_FAILED;
1144 				}
1145 			}
1146 		} else {
1147 			iph6 = (const struct rte_ipv6_hdr *)output_text;
1148 
1149 			if (iph6->proto != IPPROTO_UDP) {
1150 				printf("UDP header is not found\n");
1151 				return TEST_FAILED;
1152 			}
1153 
1154 			if (flags->udp_encap_custom_ports) {
1155 				const struct rte_udp_hdr *udph;
1156 
1157 				udph = (const struct rte_udp_hdr *)(output_text +
1158 					sizeof(struct rte_ipv6_hdr));
1159 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1160 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1161 					printf("UDP custom ports not matching.\n");
1162 					return TEST_FAILED;
1163 				}
1164 			}
1165 		}
1166 	}
1167 
1168 	/*
1169 	 * In case of known vector tests & all inbound tests, res_d provided
1170 	 * would be NULL and output data need to be validated against expected.
1171 	 * For inbound, output_text would be plain packet and for outbound
1172 	 * output_text would IPsec packet. Validate by comparing against
1173 	 * known vectors.
1174 	 *
1175 	 * In case of combined mode tests, the output_text from outbound
1176 	 * operation (ie, IPsec packet) would need to be inbound processed to
1177 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
1178 	 * that inbound processing can be done.
1179 	 */
1180 
1181 	if (res_d == NULL)
1182 		return test_ipsec_td_verify(output_text, len, m->ol_flags, td, silent, flags);
1183 	else
1184 		return test_ipsec_res_d_prepare(output_text, len, td, res_d);
1185 }
1186 
1187 int
1188 test_ipsec_status_check(const struct ipsec_test_data *td,
1189 			struct rte_crypto_op *op,
1190 			const struct ipsec_test_flags *flags,
1191 			enum rte_security_ipsec_sa_direction dir,
1192 			int pkt_num)
1193 {
1194 	int ret = TEST_SUCCESS;
1195 
1196 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1197 	    td->ar_packet) {
1198 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1199 			printf("Anti replay test case failed\n");
1200 			return TEST_FAILED;
1201 		} else {
1202 			return TEST_SUCCESS;
1203 		}
1204 	}
1205 
1206 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1207 	    flags->sa_expiry_pkts_hard &&
1208 	    pkt_num == IPSEC_TEST_PACKETS_MAX) {
1209 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1210 			printf("SA hard expiry (pkts) test failed\n");
1211 			return TEST_FAILED;
1212 		} else {
1213 			return TEST_SUCCESS;
1214 		}
1215 	}
1216 
1217 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1218 	    flags->tunnel_hdr_verify) {
1219 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1220 			printf("Tunnel header verify test case failed\n");
1221 			return TEST_FAILED;
1222 		} else {
1223 			return TEST_SUCCESS;
1224 		}
1225 	}
1226 
1227 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1228 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1229 			printf("ICV corruption test case failed\n");
1230 			ret = TEST_FAILED;
1231 		}
1232 	} else {
1233 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1234 			printf("Security op processing failed [pkt_num: %d]\n",
1235 			       pkt_num);
1236 			ret = TEST_FAILED;
1237 		}
1238 	}
1239 
1240 	if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1241 		if (!(op->aux_flags &
1242 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1243 			printf("SA soft expiry (pkts) test failed\n");
1244 			ret = TEST_FAILED;
1245 		}
1246 	}
1247 
1248 	return ret;
1249 }
1250 
1251 int
1252 test_ipsec_stats_verify(void *ctx,
1253 			void *sess,
1254 			const struct ipsec_test_flags *flags,
1255 			enum rte_security_ipsec_sa_direction dir)
1256 {
1257 	struct rte_security_stats stats = {0};
1258 	int ret = TEST_SUCCESS;
1259 
1260 	if (flags->stats_success) {
1261 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1262 			return TEST_FAILED;
1263 
1264 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1265 			if (stats.ipsec.opackets != 1 ||
1266 			    stats.ipsec.oerrors != 0)
1267 				ret = TEST_FAILED;
1268 		} else {
1269 			if (stats.ipsec.ipackets != 1 ||
1270 			    stats.ipsec.ierrors != 0)
1271 				ret = TEST_FAILED;
1272 		}
1273 	}
1274 
1275 	return ret;
1276 }
1277 
1278 int
1279 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1280 {
1281 	struct rte_ipv4_hdr *iph4;
1282 	struct rte_ipv6_hdr *iph6;
1283 	bool cksum_dirty = false;
1284 
1285 	iph4 = (struct rte_ipv4_hdr *)pkt;
1286 
1287 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1288 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1289 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1290 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1291 		uint16_t frag_off;
1292 
1293 		if (!is_ipv4(iph4)) {
1294 			printf("Invalid packet type\n");
1295 			return -1;
1296 		}
1297 
1298 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1299 
1300 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1301 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1302 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1303 		else
1304 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1305 
1306 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1307 		cksum_dirty = true;
1308 	}
1309 
1310 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1311 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1312 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1313 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1314 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1315 	    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1316 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1317 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1318 
1319 		if (is_ipv4(iph4)) {
1320 			uint8_t tos;
1321 
1322 			tos = iph4->type_of_service;
1323 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1324 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1325 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1326 					(TEST_IPSEC_DSCP_VAL << 2));
1327 			else
1328 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1329 
1330 			iph4->type_of_service = tos;
1331 			cksum_dirty = true;
1332 		} else {
1333 			uint32_t vtc_flow;
1334 
1335 			iph6 = (struct rte_ipv6_hdr *)pkt;
1336 
1337 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1338 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1339 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1340 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1341 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1342 			else
1343 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1344 
1345 			if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1346 			    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1347 				vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1348 					     (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1349 			else
1350 				vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1351 
1352 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1353 		}
1354 	}
1355 
1356 	if (cksum_dirty && is_ipv4(iph4)) {
1357 		iph4->hdr_checksum = 0;
1358 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1359 	}
1360 
1361 	return 0;
1362 }
1363