xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision af0785a2447b307965377b62f46a5f39457a85a3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
7 #include <rte_esp.h>
8 #include <rte_ip.h>
9 #include <rte_security.h>
10 #include <rte_tcp.h>
11 #include <rte_udp.h>
12 
13 #include "test.h"
14 #include "test_cryptodev_security_ipsec.h"
15 
16 #define IV_LEN_MAX 16
17 #define UDP_CUSTOM_SPORT 4650
18 #define UDP_CUSTOM_DPORT 4660
19 
20 #ifndef IPVERSION
21 #define IPVERSION 4
22 #endif
23 
24 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
25 				  (RTE_DIM(cipher_list) *
26 				   RTE_DIM(auth_list))];
27 
28 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
29 
30 static bool
31 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
32 {
33 	/* The IP version number must be 4 */
34 	if (((pkt->version_ihl) >> 4) != 4)
35 		return false;
36 	/*
37 	 * The IP header length field must be large enough to hold the
38 	 * minimum length legal IP datagram (20 bytes = 5 words).
39 	 */
40 	if ((pkt->version_ihl & 0xf) < 5)
41 		return false;
42 
43 	/*
44 	 * The IP total length field must be large enough to hold the IP
45 	 * datagram header, whose length is specified in the IP header length
46 	 * field.
47 	 */
48 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
49 		return false;
50 
51 	return true;
52 }
53 
54 static bool
55 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
56 {
57 	/* The IP version number must be 6 */
58 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
59 		return false;
60 
61 	return true;
62 }
63 
64 void
65 test_ipsec_alg_list_populate(void)
66 {
67 	unsigned long i, j, index = 0;
68 
69 	for (i = 0; i < RTE_DIM(aead_list); i++) {
70 		alg_list[index].param1 = &aead_list[i];
71 		alg_list[index].param2 = NULL;
72 		index++;
73 	}
74 
75 	for (i = 0; i < RTE_DIM(cipher_list); i++) {
76 		for (j = 0; j < RTE_DIM(auth_list); j++) {
77 			alg_list[index].param1 = &cipher_list[i];
78 			alg_list[index].param2 = &auth_list[j];
79 			index++;
80 		}
81 	}
82 }
83 
84 void
85 test_ipsec_ah_alg_list_populate(void)
86 {
87 	unsigned long i, index = 0;
88 
89 	for (i = 1; i < RTE_DIM(auth_list); i++) {
90 		ah_alg_list[index].param1 = &auth_list[i];
91 		ah_alg_list[index].param2 = NULL;
92 		index++;
93 	}
94 
95 	for (i = 1; i < RTE_DIM(auth_list); i++) {
96 		/* NULL cipher */
97 		ah_alg_list[index].param1 = &cipher_list[0];
98 
99 		ah_alg_list[index].param2 = &auth_list[i];
100 		index++;
101 	}
102 }
103 
104 int
105 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
106 			   const struct rte_security_capability *sec_cap,
107 			   bool silent)
108 {
109 	/* Verify security capabilities */
110 
111 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
112 		if (!silent)
113 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
114 		return -ENOTSUP;
115 	}
116 
117 	if (ipsec_xform->options.udp_encap == 1 &&
118 	    sec_cap->ipsec.options.udp_encap == 0) {
119 		if (!silent)
120 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
121 		return -ENOTSUP;
122 	}
123 
124 	if (ipsec_xform->options.udp_ports_verify == 1 &&
125 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
126 		if (!silent)
127 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
128 				"verification is not supported\n");
129 		return -ENOTSUP;
130 	}
131 
132 	if (ipsec_xform->options.copy_dscp == 1 &&
133 	    sec_cap->ipsec.options.copy_dscp == 0) {
134 		if (!silent)
135 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
136 		return -ENOTSUP;
137 	}
138 
139 	if (ipsec_xform->options.copy_flabel == 1 &&
140 	    sec_cap->ipsec.options.copy_flabel == 0) {
141 		if (!silent)
142 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
143 		return -ENOTSUP;
144 	}
145 
146 	if (ipsec_xform->options.copy_df == 1 &&
147 	    sec_cap->ipsec.options.copy_df == 0) {
148 		if (!silent)
149 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.dec_ttl == 1 &&
154 	    sec_cap->ipsec.options.dec_ttl == 0) {
155 		if (!silent)
156 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
157 		return -ENOTSUP;
158 	}
159 
160 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
161 		if (!silent)
162 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
163 		return -ENOTSUP;
164 	}
165 
166 	if (ipsec_xform->options.stats == 1 &&
167 	    sec_cap->ipsec.options.stats == 0) {
168 		if (!silent)
169 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
170 		return -ENOTSUP;
171 	}
172 
173 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
174 	    (ipsec_xform->options.iv_gen_disable == 1) &&
175 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
176 		if (!silent)
177 			RTE_LOG(INFO, USER1,
178 				"Application provided IV is not supported\n");
179 		return -ENOTSUP;
180 	}
181 
182 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
183 	    (ipsec_xform->options.tunnel_hdr_verify >
184 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
185 		if (!silent)
186 			RTE_LOG(INFO, USER1,
187 				"Tunnel header verify is not supported\n");
188 		return -ENOTSUP;
189 	}
190 
191 	if (ipsec_xform->options.ip_csum_enable == 1 &&
192 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
193 		if (!silent)
194 			RTE_LOG(INFO, USER1,
195 				"Inner IP checksum is not supported\n");
196 		return -ENOTSUP;
197 	}
198 
199 	if (ipsec_xform->options.l4_csum_enable == 1 &&
200 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
201 		if (!silent)
202 			RTE_LOG(INFO, USER1,
203 				"Inner L4 checksum is not supported\n");
204 		return -ENOTSUP;
205 	}
206 
207 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
208 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
209 			if (!silent)
210 				RTE_LOG(INFO, USER1,
211 					"Replay window size is not supported\n");
212 			return -ENOTSUP;
213 		}
214 	}
215 
216 	return 0;
217 }
218 
219 int
220 test_ipsec_crypto_caps_aead_verify(
221 		const struct rte_security_capability *sec_cap,
222 		struct rte_crypto_sym_xform *aead)
223 {
224 	const struct rte_cryptodev_symmetric_capability *sym_cap;
225 	const struct rte_cryptodev_capabilities *crypto_cap;
226 	int j = 0;
227 
228 	while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
229 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
230 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
231 				crypto_cap->sym.xform_type == aead->type &&
232 				crypto_cap->sym.aead.algo == aead->aead.algo) {
233 			sym_cap = &crypto_cap->sym;
234 			if (rte_cryptodev_sym_capability_check_aead(sym_cap,
235 					aead->aead.key.length,
236 					aead->aead.digest_length,
237 					aead->aead.aad_length,
238 					aead->aead.iv.length) == 0)
239 				return 0;
240 		}
241 	}
242 
243 	return -ENOTSUP;
244 }
245 
246 int
247 test_ipsec_crypto_caps_cipher_verify(
248 		const struct rte_security_capability *sec_cap,
249 		struct rte_crypto_sym_xform *cipher)
250 {
251 	const struct rte_cryptodev_symmetric_capability *sym_cap;
252 	const struct rte_cryptodev_capabilities *cap;
253 	int j = 0;
254 
255 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
256 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
257 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
258 				cap->sym.xform_type == cipher->type &&
259 				cap->sym.cipher.algo == cipher->cipher.algo) {
260 			sym_cap = &cap->sym;
261 			if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
262 					cipher->cipher.key.length,
263 					cipher->cipher.iv.length) == 0)
264 				return 0;
265 		}
266 	}
267 
268 	return -ENOTSUP;
269 }
270 
271 int
272 test_ipsec_crypto_caps_auth_verify(
273 		const struct rte_security_capability *sec_cap,
274 		struct rte_crypto_sym_xform *auth)
275 {
276 	const struct rte_cryptodev_symmetric_capability *sym_cap;
277 	const struct rte_cryptodev_capabilities *cap;
278 	int j = 0;
279 
280 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
281 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
282 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
283 				cap->sym.xform_type == auth->type &&
284 				cap->sym.auth.algo == auth->auth.algo) {
285 			sym_cap = &cap->sym;
286 			if (rte_cryptodev_sym_capability_check_auth(sym_cap,
287 					auth->auth.key.length,
288 					auth->auth.digest_length,
289 					auth->auth.iv.length) == 0)
290 				return 0;
291 		}
292 	}
293 
294 	return -ENOTSUP;
295 }
296 
297 void
298 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
299 			  struct ipsec_test_data *td_in)
300 {
301 	memcpy(td_in, td_out, sizeof(*td_in));
302 
303 	/* Populate output text of td_in with input text of td_out */
304 	memcpy(td_in->output_text.data, td_out->input_text.data,
305 	       td_out->input_text.len);
306 	td_in->output_text.len = td_out->input_text.len;
307 
308 	/* Populate input text of td_in with output text of td_out */
309 	memcpy(td_in->input_text.data, td_out->output_text.data,
310 	       td_out->output_text.len);
311 	td_in->input_text.len = td_out->output_text.len;
312 
313 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
314 
315 	if (td_in->aead) {
316 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
317 	} else {
318 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
319 		td_in->xform.chain.cipher.cipher.op =
320 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
321 	}
322 }
323 
324 static bool
325 is_ipv4(void *ip)
326 {
327 	struct rte_ipv4_hdr *ipv4 = ip;
328 	uint8_t ip_ver;
329 
330 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
331 	if (ip_ver == IPVERSION)
332 		return true;
333 	else
334 		return false;
335 }
336 
337 static void
338 test_ipsec_csum_init(void *ip, bool l3, bool l4)
339 {
340 	struct rte_ipv4_hdr *ipv4;
341 	struct rte_tcp_hdr *tcp;
342 	struct rte_udp_hdr *udp;
343 	uint8_t next_proto;
344 	uint8_t size;
345 
346 	if (is_ipv4(ip)) {
347 		ipv4 = ip;
348 		size = sizeof(struct rte_ipv4_hdr);
349 		next_proto = ipv4->next_proto_id;
350 
351 		if (l3)
352 			ipv4->hdr_checksum = 0;
353 	} else {
354 		size = sizeof(struct rte_ipv6_hdr);
355 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
356 	}
357 
358 	if (l4) {
359 		switch (next_proto) {
360 		case IPPROTO_TCP:
361 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
362 			tcp->cksum = 0;
363 			break;
364 		case IPPROTO_UDP:
365 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
366 			udp->dgram_cksum = 0;
367 			break;
368 		default:
369 			return;
370 		}
371 	}
372 }
373 
374 void
375 test_ipsec_td_prepare(const struct crypto_param *param1,
376 		      const struct crypto_param *param2,
377 		      const struct ipsec_test_flags *flags,
378 		      struct ipsec_test_data *td_array,
379 		      int nb_td)
380 
381 {
382 	struct ipsec_test_data *td;
383 	int i;
384 
385 	memset(td_array, 0, nb_td * sizeof(*td));
386 
387 	for (i = 0; i < nb_td; i++) {
388 		td = &td_array[i];
389 
390 		/* Prepare fields based on param */
391 
392 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
393 			/* Copy template for packet & key fields */
394 			if (flags->ipv6)
395 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
396 			else
397 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
398 
399 			if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM)
400 				td->salt.len = 3;
401 
402 			td->aead = true;
403 			td->xform.aead.aead.algo = param1->alg.aead;
404 			td->xform.aead.aead.key.length = param1->key_length;
405 		} else {
406 			/* Copy template for packet & key fields */
407 			if (flags->ipv6)
408 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
409 					sizeof(*td));
410 			else
411 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
412 					sizeof(*td));
413 
414 			td->aead = false;
415 
416 			if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
417 				td->xform.chain.auth.auth.algo =
418 						param1->alg.auth;
419 				td->xform.chain.auth.auth.key.length =
420 						param1->key_length;
421 				td->xform.chain.auth.auth.digest_length =
422 						param1->digest_length;
423 				td->auth_only = true;
424 
425 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
426 					td->xform.chain.auth.auth.iv.length =
427 						param1->iv_length;
428 					td->aes_gmac = true;
429 				}
430 			} else {
431 				td->xform.chain.cipher.cipher.algo =
432 						param1->alg.cipher;
433 				td->xform.chain.cipher.cipher.key.length =
434 						param1->key_length;
435 				td->xform.chain.cipher.cipher.iv.length =
436 						param1->iv_length;
437 				td->xform.chain.auth.auth.algo =
438 						param2->alg.auth;
439 				td->xform.chain.auth.auth.key.length =
440 						param2->key_length;
441 				td->xform.chain.auth.auth.digest_length =
442 						param2->digest_length;
443 
444 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
445 					td->xform.chain.auth.auth.iv.length =
446 						param2->iv_length;
447 					td->aes_gmac = true;
448 				}
449 			}
450 		}
451 
452 		/* Adjust the data to requested length */
453 		if (flags->plaintext_len && flags->ipv6) {
454 			struct rte_ipv6_hdr *ip6 = (struct rte_ipv6_hdr *)td->input_text.data;
455 			struct rte_tcp_hdr *tcp;
456 			int64_t payload_len;
457 			uint8_t *data;
458 			int64_t i;
459 
460 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
461 			payload_len -= sizeof(struct rte_ipv6_hdr);
462 			payload_len -= sizeof(struct rte_tcp_hdr);
463 			if (payload_len <= 16)
464 				payload_len = 16;
465 
466 			/* IPv6 */
467 			ip6->proto = IPPROTO_TCP;
468 			ip6->payload_len = sizeof(*tcp) + payload_len;
469 			ip6->payload_len = rte_cpu_to_be_16(ip6->payload_len);
470 
471 			/* TCP */
472 			tcp = (struct rte_tcp_hdr *)(ip6 + 1);
473 			data = (uint8_t *)(tcp + 1);
474 			for (i = 0; i < payload_len; i++)
475 				data[i] = i;
476 			tcp->cksum = 0;
477 			tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
478 			td->input_text.len = payload_len + sizeof(struct rte_ipv6_hdr) +
479 				sizeof(struct rte_tcp_hdr);
480 		} else if (flags->plaintext_len) {
481 			struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)td->input_text.data;
482 			struct rte_tcp_hdr *tcp;
483 			int64_t payload_len;
484 			uint8_t *data;
485 			int64_t i;
486 
487 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
488 			payload_len -= sizeof(struct rte_ipv4_hdr);
489 			payload_len -= sizeof(struct rte_tcp_hdr);
490 			if (payload_len <= 8)
491 				payload_len = 8;
492 
493 			/* IPv4 */
494 			ip->next_proto_id = IPPROTO_TCP;
495 			ip->total_length = sizeof(*ip) + sizeof(*tcp) + payload_len;
496 			ip->total_length = rte_cpu_to_be_16(ip->total_length);
497 			ip->hdr_checksum = 0;
498 			ip->hdr_checksum = rte_ipv4_cksum(ip);
499 
500 			/* TCP */
501 			tcp = (struct rte_tcp_hdr *)(ip + 1);
502 			data = (uint8_t *)(tcp + 1);
503 			for (i = 0; i < payload_len; i++)
504 				data[i] = i;
505 			tcp->cksum = 0;
506 			tcp->cksum = rte_ipv4_udptcp_cksum(ip, tcp);
507 			td->input_text.len = payload_len + sizeof(struct rte_ipv4_hdr) +
508 				sizeof(struct rte_tcp_hdr);
509 		}
510 
511 		if (flags->ah) {
512 			td->ipsec_xform.proto =
513 					RTE_SECURITY_IPSEC_SA_PROTO_AH;
514 		}
515 
516 		if (flags->iv_gen)
517 			td->ipsec_xform.options.iv_gen_disable = 0;
518 
519 		if (flags->sa_expiry_pkts_soft)
520 			td->ipsec_xform.life.packets_soft_limit =
521 					IPSEC_TEST_PACKETS_MAX - 1;
522 
523 		if (flags->ip_csum) {
524 			td->ipsec_xform.options.ip_csum_enable = 1;
525 			test_ipsec_csum_init(&td->input_text.data, true, false);
526 		}
527 
528 		if (flags->l4_csum) {
529 			td->ipsec_xform.options.l4_csum_enable = 1;
530 			test_ipsec_csum_init(&td->input_text.data, false, true);
531 		}
532 
533 		if (flags->transport) {
534 			td->ipsec_xform.mode =
535 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
536 		} else {
537 			td->ipsec_xform.mode =
538 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
539 
540 			if (flags->tunnel_ipv6)
541 				td->ipsec_xform.tunnel.type =
542 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
543 			else
544 				td->ipsec_xform.tunnel.type =
545 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
546 		}
547 
548 		if (flags->stats_success)
549 			td->ipsec_xform.options.stats = 1;
550 
551 		if (flags->fragment) {
552 			struct rte_ipv4_hdr *ip;
553 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
554 			ip->fragment_offset = 4;
555 			ip->hdr_checksum = rte_ipv4_cksum(ip);
556 		}
557 
558 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
559 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
560 			td->ipsec_xform.options.copy_df = 1;
561 
562 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
563 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
564 			td->ipsec_xform.options.copy_dscp = 1;
565 
566 		if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
567 		    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
568 			td->ipsec_xform.options.copy_flabel = 1;
569 
570 		if (flags->dec_ttl_or_hop_limit)
571 			td->ipsec_xform.options.dec_ttl = 1;
572 
573 		if (flags->udp_encap && flags->udp_encap_custom_ports) {
574 			td->ipsec_xform.udp.sport = UDP_CUSTOM_SPORT;
575 			td->ipsec_xform.udp.dport = UDP_CUSTOM_DPORT;
576 		}
577 	}
578 }
579 
580 void
581 test_ipsec_td_update(struct ipsec_test_data td_inb[],
582 		     const struct ipsec_test_data td_outb[],
583 		     int nb_td,
584 		     const struct ipsec_test_flags *flags)
585 {
586 	int i;
587 
588 	for (i = 0; i < nb_td; i++) {
589 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
590 		       td_outb[i].input_text.len);
591 		td_inb[i].output_text.len = td_outb->input_text.len;
592 
593 		if (flags->icv_corrupt) {
594 			int icv_pos = td_inb[i].input_text.len - 4;
595 			td_inb[i].input_text.data[icv_pos] += 1;
596 		}
597 
598 		if (flags->sa_expiry_pkts_hard)
599 			td_inb[i].ipsec_xform.life.packets_hard_limit =
600 					IPSEC_TEST_PACKETS_MAX - 1;
601 
602 		if (flags->udp_encap)
603 			td_inb[i].ipsec_xform.options.udp_encap = 1;
604 
605 		if (flags->udp_ports_verify)
606 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
607 
608 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
609 			flags->tunnel_hdr_verify;
610 
611 		if (flags->ip_csum)
612 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
613 
614 		if (flags->l4_csum)
615 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
616 
617 		/* Clear outbound specific flags */
618 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
619 	}
620 }
621 
622 void
623 test_ipsec_display_alg(const struct crypto_param *param1,
624 		       const struct crypto_param *param2)
625 {
626 	if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
627 		printf("\t%s [%d]",
628 		       rte_cryptodev_get_aead_algo_string(param1->alg.aead),
629 		       param1->key_length * 8);
630 	} else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
631 		printf("\t%s",
632 		       rte_cryptodev_get_auth_algo_string(param1->alg.auth));
633 		if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL)
634 			printf(" [%dB ICV]", param1->digest_length);
635 	} else {
636 		printf("\t%s",
637 		       rte_cryptodev_get_cipher_algo_string(param1->alg.cipher));
638 		if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
639 			printf(" [%d]", param1->key_length * 8);
640 		printf(" %s",
641 		       rte_cryptodev_get_auth_algo_string(param2->alg.auth));
642 		if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
643 			printf(" [%dB ICV]", param2->digest_length);
644 	}
645 	printf("\n");
646 }
647 
648 static int
649 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
650 {
651 	int len = 0;
652 
653 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
654 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
655 			if (td->ipsec_xform.tunnel.type ==
656 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
657 				len += sizeof(struct rte_ipv4_hdr);
658 			else
659 				len += sizeof(struct rte_ipv6_hdr);
660 		}
661 	}
662 
663 	return len;
664 }
665 
666 static int
667 test_ipsec_iv_verify_push(const uint8_t *output_text, const struct ipsec_test_data *td)
668 {
669 	static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
670 	int i, iv_pos, iv_len;
671 	static int index;
672 	uint8_t *iv_tmp;
673 
674 	if (td->aead)
675 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
676 	else
677 		iv_len = td->xform.chain.cipher.cipher.iv.length;
678 
679 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
680 	output_text += iv_pos;
681 
682 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
683 
684 	/* Compare against previous values */
685 	for (i = 0; i < index; i++) {
686 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
687 
688 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
689 			printf("IV repeated");
690 			return TEST_FAILED;
691 		}
692 	}
693 
694 	/* Save IV for future comparisons */
695 
696 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
697 	memcpy(iv_tmp, output_text, iv_len);
698 	index++;
699 
700 	if (index == IPSEC_TEST_PACKETS_MAX)
701 		index = 0;
702 
703 	return TEST_SUCCESS;
704 }
705 
706 static int
707 test_ipsec_l3_csum_verify(uint8_t *output_text)
708 {
709 	uint16_t actual_cksum, expected_cksum;
710 	struct rte_ipv4_hdr *ip;
711 
712 	ip = (struct rte_ipv4_hdr *)output_text;
713 
714 	if (!is_ipv4((void *)ip))
715 		return TEST_SKIPPED;
716 
717 	actual_cksum = ip->hdr_checksum;
718 
719 	ip->hdr_checksum = 0;
720 
721 	expected_cksum = rte_ipv4_cksum(ip);
722 
723 	if (actual_cksum != expected_cksum)
724 		return TEST_FAILED;
725 
726 	return TEST_SUCCESS;
727 }
728 
729 static int
730 test_ipsec_l4_csum_verify(uint8_t *output_text)
731 {
732 	uint16_t actual_cksum = 0, expected_cksum = 0;
733 	struct rte_ipv4_hdr *ipv4;
734 	struct rte_ipv6_hdr *ipv6;
735 	struct rte_tcp_hdr *tcp;
736 	struct rte_udp_hdr *udp;
737 	void *ip, *l4;
738 
739 	ip = output_text;
740 
741 	if (is_ipv4(ip)) {
742 		ipv4 = ip;
743 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
744 
745 		switch (ipv4->next_proto_id) {
746 		case IPPROTO_TCP:
747 			tcp = (struct rte_tcp_hdr *)l4;
748 			actual_cksum = tcp->cksum;
749 			tcp->cksum = 0;
750 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
751 			break;
752 		case IPPROTO_UDP:
753 			udp = (struct rte_udp_hdr *)l4;
754 			actual_cksum = udp->dgram_cksum;
755 			udp->dgram_cksum = 0;
756 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
757 			break;
758 		default:
759 			break;
760 		}
761 	} else {
762 		ipv6 = ip;
763 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
764 
765 		switch (ipv6->proto) {
766 		case IPPROTO_TCP:
767 			tcp = (struct rte_tcp_hdr *)l4;
768 			actual_cksum = tcp->cksum;
769 			tcp->cksum = 0;
770 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
771 			break;
772 		case IPPROTO_UDP:
773 			udp = (struct rte_udp_hdr *)l4;
774 			actual_cksum = udp->dgram_cksum;
775 			udp->dgram_cksum = 0;
776 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
777 			break;
778 		default:
779 			break;
780 		}
781 	}
782 
783 	if (actual_cksum != expected_cksum)
784 		return TEST_FAILED;
785 
786 	return TEST_SUCCESS;
787 }
788 
789 static int
790 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
791 {
792 	struct rte_ipv4_hdr *iph4_ex, *iph4_re;
793 	struct rte_ipv6_hdr *iph6_ex, *iph6_re;
794 
795 	if (is_ipv4(received) && is_ipv4(expected)) {
796 		iph4_ex = expected;
797 		iph4_re = received;
798 		iph4_ex->time_to_live -= 1;
799 		if (iph4_re->time_to_live != iph4_ex->time_to_live)
800 			return TEST_FAILED;
801 	} else if (!is_ipv4(received) && !is_ipv4(expected)) {
802 		iph6_ex = expected;
803 		iph6_re = received;
804 		iph6_ex->hop_limits -= 1;
805 		if (iph6_re->hop_limits != iph6_ex->hop_limits)
806 			return TEST_FAILED;
807 	} else {
808 		printf("IP header version miss match\n");
809 		return TEST_FAILED;
810 	}
811 
812 	return TEST_SUCCESS;
813 }
814 
815 static int
816 test_ipsec_td_verify(uint8_t *output_text, uint32_t len, uint32_t ol_flags,
817 		const struct ipsec_test_data *td, bool silent, const struct ipsec_test_flags *flags)
818 {
819 	uint8_t td_output_text[IPSEC_TEXT_MAX_LEN];
820 	uint32_t skip;
821 	int ret;
822 
823 	/* For tests with status as error for test success, skip verification */
824 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
825 	    (flags->icv_corrupt ||
826 	     flags->sa_expiry_pkts_hard ||
827 	     flags->tunnel_hdr_verify ||
828 	     td->ar_packet))
829 		return TEST_SUCCESS;
830 
831 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
832 	   flags->udp_encap) {
833 
834 		len -= sizeof(struct rte_udp_hdr);
835 		output_text += sizeof(struct rte_udp_hdr);
836 	}
837 
838 	if (len != td->output_text.len) {
839 		printf("Output length (%d) not matching with expected (%d)\n",
840 			len, td->output_text.len);
841 		return TEST_FAILED;
842 	}
843 
844 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
845 				flags->fragment) {
846 		const struct rte_ipv4_hdr *iph4;
847 		iph4 = (const struct rte_ipv4_hdr *)output_text;
848 		if (iph4->fragment_offset) {
849 			printf("Output packet is fragmented");
850 			return TEST_FAILED;
851 		}
852 	}
853 
854 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
855 				flags->ip_csum) {
856 		if (ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
857 			ret = test_ipsec_l3_csum_verify(output_text);
858 		else
859 			ret = TEST_FAILED;
860 
861 		if (ret == TEST_FAILED)
862 			printf("Inner IP checksum test failed\n");
863 
864 		return ret;
865 	}
866 
867 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
868 				flags->l4_csum) {
869 		if (ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
870 			ret = test_ipsec_l4_csum_verify(output_text);
871 		else
872 			ret = TEST_FAILED;
873 
874 		if (ret == TEST_FAILED)
875 			printf("Inner L4 checksum test failed\n");
876 
877 		return ret;
878 	}
879 
880 	skip = test_ipsec_tunnel_hdr_len_get(td);
881 
882 	len -= skip;
883 	output_text += skip;
884 
885 	memcpy(td_output_text, td->output_text.data + skip, len);
886 
887 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
888 				flags->dec_ttl_or_hop_limit) {
889 		if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
890 			printf("Inner TTL/hop limit decrement test failed\n");
891 			return TEST_FAILED;
892 		}
893 	}
894 
895 	if (test_ipsec_pkt_update(td_output_text, flags)) {
896 		printf("Could not update expected vector");
897 		return TEST_FAILED;
898 	}
899 
900 	if (memcmp(output_text, td_output_text, len)) {
901 		if (silent)
902 			return TEST_FAILED;
903 
904 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
905 			"output text not as expected\n");
906 
907 		rte_hexdump(stdout, "expected", td_output_text, len);
908 		rte_hexdump(stdout, "actual", output_text, len);
909 		return TEST_FAILED;
910 	}
911 
912 	return TEST_SUCCESS;
913 }
914 
915 static int
916 test_ipsec_res_d_prepare(const uint8_t *output_text, uint32_t len,
917 		const struct ipsec_test_data *td, struct ipsec_test_data *res_d)
918 {
919 	memcpy(res_d, td, sizeof(*res_d));
920 
921 	memcpy(&res_d->input_text.data, output_text, len);
922 	res_d->input_text.len = len;
923 
924 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
925 	if (res_d->aead) {
926 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
927 	} else {
928 		res_d->xform.chain.cipher.cipher.op =
929 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
930 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
931 	}
932 
933 	return TEST_SUCCESS;
934 }
935 
936 static int
937 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
938 			     const struct ipsec_test_flags *flags)
939 {
940 	uint8_t tos, dscp;
941 	uint16_t f_off;
942 
943 	if (!is_valid_ipv4_pkt(iph4)) {
944 		printf("Tunnel outer header is not IPv4\n");
945 		return -1;
946 	}
947 
948 	if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
949 		printf("Tunnel outer header proto is not AH\n");
950 		return -1;
951 	}
952 
953 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
954 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
955 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
956 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
957 			printf("DF bit is not set\n");
958 			return -1;
959 		}
960 	} else {
961 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
962 			printf("DF bit is set\n");
963 			return -1;
964 		}
965 	}
966 
967 	tos = iph4->type_of_service;
968 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
969 
970 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
971 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
972 		if (dscp != TEST_IPSEC_DSCP_VAL) {
973 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
974 			       TEST_IPSEC_DSCP_VAL, dscp);
975 			return -1;
976 		}
977 	} else {
978 		if (dscp != 0) {
979 			printf("DSCP value is set [exp: 0, actual: %x]\n",
980 			       dscp);
981 			return -1;
982 		}
983 	}
984 
985 	return 0;
986 }
987 
988 static int
989 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
990 			     const struct ipsec_test_flags *flags)
991 {
992 	uint32_t vtc_flow;
993 	uint32_t flabel;
994 	uint8_t dscp;
995 
996 	if (!is_valid_ipv6_pkt(iph6)) {
997 		printf("Tunnel outer header is not IPv6\n");
998 		return -1;
999 	}
1000 
1001 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1002 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
1003 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
1004 
1005 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1006 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
1007 		if (dscp != TEST_IPSEC_DSCP_VAL) {
1008 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
1009 			       TEST_IPSEC_DSCP_VAL, dscp);
1010 			return -1;
1011 		}
1012 	} else {
1013 		if (dscp != 0) {
1014 			printf("DSCP value is set [exp: 0, actual: %x]\n",
1015 			       dscp);
1016 			return -1;
1017 		}
1018 	}
1019 
1020 	flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
1021 
1022 	if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1023 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1024 		if (flabel != TEST_IPSEC_FLABEL_VAL) {
1025 			printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
1026 			       TEST_IPSEC_FLABEL_VAL, flabel);
1027 			return -1;
1028 		}
1029 	} else {
1030 		if (flabel != 0) {
1031 			printf("FLABEL value is set [exp: 0, actual: %x]\n",
1032 			       flabel);
1033 			return -1;
1034 		}
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 int
1041 test_ipsec_post_process(const struct rte_mbuf *m, const struct ipsec_test_data *td,
1042 			struct ipsec_test_data *res_d, bool silent,
1043 			const struct ipsec_test_flags *flags)
1044 {
1045 	uint32_t len = rte_pktmbuf_pkt_len(m);
1046 	uint8_t output_text[IPSEC_TEXT_MAX_LEN];
1047 	const uint8_t *output;
1048 	int ret;
1049 
1050 	/* Copy mbuf payload to continuous buffer */
1051 	output = rte_pktmbuf_read(m, 0, len, output_text);
1052 	if (output != output_text)
1053 		/* Single segment mbuf, copy manually */
1054 		memcpy(output_text, output, len);
1055 
1056 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1057 		const struct rte_ipv4_hdr *iph4;
1058 		const struct rte_ipv6_hdr *iph6;
1059 
1060 		if (flags->iv_gen) {
1061 			ret = test_ipsec_iv_verify_push(output_text, td);
1062 			if (ret != TEST_SUCCESS)
1063 				return ret;
1064 		}
1065 
1066 		iph4 = (const struct rte_ipv4_hdr *)output_text;
1067 
1068 		if (td->ipsec_xform.mode ==
1069 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
1070 			if (flags->ipv6) {
1071 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1072 				if (is_valid_ipv6_pkt(iph6) == false) {
1073 					printf("Transport packet is not IPv6\n");
1074 					return TEST_FAILED;
1075 				}
1076 			} else {
1077 				if (is_valid_ipv4_pkt(iph4) == false) {
1078 					printf("Transport packet is not IPv4\n");
1079 					return TEST_FAILED;
1080 				}
1081 
1082 				if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1083 					printf("Transport IPv4 header proto is not AH\n");
1084 					return -1;
1085 				}
1086 			}
1087 		} else {
1088 			if (td->ipsec_xform.tunnel.type ==
1089 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1090 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
1091 					return TEST_FAILED;
1092 			} else {
1093 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1094 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
1095 					return TEST_FAILED;
1096 			}
1097 		}
1098 	}
1099 
1100 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
1101 	   flags->udp_encap) {
1102 		const struct rte_ipv4_hdr *iph4;
1103 		const struct rte_ipv6_hdr *iph6;
1104 
1105 		if (td->ipsec_xform.tunnel.type ==
1106 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1107 			iph4 = (const struct rte_ipv4_hdr *)output_text;
1108 
1109 			if (iph4->next_proto_id != IPPROTO_UDP) {
1110 				printf("UDP header is not found\n");
1111 				return TEST_FAILED;
1112 			}
1113 
1114 			if (flags->udp_encap_custom_ports) {
1115 				const struct rte_udp_hdr *udph;
1116 
1117 				udph = (const struct rte_udp_hdr *)(output_text +
1118 					sizeof(struct rte_ipv4_hdr));
1119 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1120 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1121 					printf("UDP custom ports not matching.\n");
1122 					return TEST_FAILED;
1123 				}
1124 			}
1125 		} else {
1126 			iph6 = (const struct rte_ipv6_hdr *)output_text;
1127 
1128 			if (iph6->proto != IPPROTO_UDP) {
1129 				printf("UDP header is not found\n");
1130 				return TEST_FAILED;
1131 			}
1132 
1133 			if (flags->udp_encap_custom_ports) {
1134 				const struct rte_udp_hdr *udph;
1135 
1136 				udph = (const struct rte_udp_hdr *)(output_text +
1137 					sizeof(struct rte_ipv6_hdr));
1138 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1139 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1140 					printf("UDP custom ports not matching.\n");
1141 					return TEST_FAILED;
1142 				}
1143 			}
1144 		}
1145 	}
1146 
1147 	/*
1148 	 * In case of known vector tests & all inbound tests, res_d provided
1149 	 * would be NULL and output data need to be validated against expected.
1150 	 * For inbound, output_text would be plain packet and for outbound
1151 	 * output_text would IPsec packet. Validate by comparing against
1152 	 * known vectors.
1153 	 *
1154 	 * In case of combined mode tests, the output_text from outbound
1155 	 * operation (ie, IPsec packet) would need to be inbound processed to
1156 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
1157 	 * that inbound processing can be done.
1158 	 */
1159 
1160 	if (res_d == NULL)
1161 		return test_ipsec_td_verify(output_text, len, m->ol_flags, td, silent, flags);
1162 	else
1163 		return test_ipsec_res_d_prepare(output_text, len, td, res_d);
1164 }
1165 
1166 int
1167 test_ipsec_status_check(const struct ipsec_test_data *td,
1168 			struct rte_crypto_op *op,
1169 			const struct ipsec_test_flags *flags,
1170 			enum rte_security_ipsec_sa_direction dir,
1171 			int pkt_num)
1172 {
1173 	int ret = TEST_SUCCESS;
1174 
1175 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1176 	    td->ar_packet) {
1177 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1178 			printf("Anti replay test case failed\n");
1179 			return TEST_FAILED;
1180 		} else {
1181 			return TEST_SUCCESS;
1182 		}
1183 	}
1184 
1185 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1186 	    flags->sa_expiry_pkts_hard &&
1187 	    pkt_num == IPSEC_TEST_PACKETS_MAX) {
1188 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1189 			printf("SA hard expiry (pkts) test failed\n");
1190 			return TEST_FAILED;
1191 		} else {
1192 			return TEST_SUCCESS;
1193 		}
1194 	}
1195 
1196 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1197 	    flags->tunnel_hdr_verify) {
1198 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1199 			printf("Tunnel header verify test case failed\n");
1200 			return TEST_FAILED;
1201 		} else {
1202 			return TEST_SUCCESS;
1203 		}
1204 	}
1205 
1206 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1207 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1208 			printf("ICV corruption test case failed\n");
1209 			ret = TEST_FAILED;
1210 		}
1211 	} else {
1212 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1213 			printf("Security op processing failed [pkt_num: %d]\n",
1214 			       pkt_num);
1215 			ret = TEST_FAILED;
1216 		}
1217 	}
1218 
1219 	if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1220 		if (!(op->aux_flags &
1221 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1222 			printf("SA soft expiry (pkts) test failed\n");
1223 			ret = TEST_FAILED;
1224 		}
1225 	}
1226 
1227 	return ret;
1228 }
1229 
1230 int
1231 test_ipsec_stats_verify(struct rte_security_ctx *ctx,
1232 			void *sess,
1233 			const struct ipsec_test_flags *flags,
1234 			enum rte_security_ipsec_sa_direction dir)
1235 {
1236 	struct rte_security_stats stats = {0};
1237 	int ret = TEST_SUCCESS;
1238 
1239 	if (flags->stats_success) {
1240 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1241 			return TEST_FAILED;
1242 
1243 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1244 			if (stats.ipsec.opackets != 1 ||
1245 			    stats.ipsec.oerrors != 0)
1246 				ret = TEST_FAILED;
1247 		} else {
1248 			if (stats.ipsec.ipackets != 1 ||
1249 			    stats.ipsec.ierrors != 0)
1250 				ret = TEST_FAILED;
1251 		}
1252 	}
1253 
1254 	return ret;
1255 }
1256 
1257 int
1258 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1259 {
1260 	struct rte_ipv4_hdr *iph4;
1261 	struct rte_ipv6_hdr *iph6;
1262 	bool cksum_dirty = false;
1263 
1264 	iph4 = (struct rte_ipv4_hdr *)pkt;
1265 
1266 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1267 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1268 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1269 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1270 		uint16_t frag_off;
1271 
1272 		if (!is_ipv4(iph4)) {
1273 			printf("Invalid packet type\n");
1274 			return -1;
1275 		}
1276 
1277 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1278 
1279 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1280 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1281 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1282 		else
1283 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1284 
1285 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1286 		cksum_dirty = true;
1287 	}
1288 
1289 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1290 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1291 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1292 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1293 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1294 	    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1295 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1296 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1297 
1298 		if (is_ipv4(iph4)) {
1299 			uint8_t tos;
1300 
1301 			tos = iph4->type_of_service;
1302 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1303 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1304 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1305 					(TEST_IPSEC_DSCP_VAL << 2));
1306 			else
1307 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1308 
1309 			iph4->type_of_service = tos;
1310 			cksum_dirty = true;
1311 		} else {
1312 			uint32_t vtc_flow;
1313 
1314 			iph6 = (struct rte_ipv6_hdr *)pkt;
1315 
1316 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1317 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1318 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1319 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1320 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1321 			else
1322 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1323 
1324 			if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1325 			    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1326 				vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1327 					     (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1328 			else
1329 				vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1330 
1331 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1332 		}
1333 	}
1334 
1335 	if (cksum_dirty && is_ipv4(iph4)) {
1336 		iph4->hdr_checksum = 0;
1337 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1338 	}
1339 
1340 	return 0;
1341 }
1342