xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision fb360c75062d71014c1bba90db64f493fb0ae9e2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
7 #include <rte_esp.h>
8 #include <rte_ip.h>
9 #include <rte_security.h>
10 #include <rte_tcp.h>
11 #include <rte_udp.h>
12 
13 #include "test.h"
14 #include "test_cryptodev_security_ipsec.h"
15 
16 #define IV_LEN_MAX 16
17 #define UDP_CUSTOM_SPORT 4650
18 #define UDP_CUSTOM_DPORT 4660
19 
20 #ifndef IPVERSION
21 #define IPVERSION 4
22 #endif
23 
24 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
25 				  (RTE_DIM(cipher_list) *
26 				   RTE_DIM(auth_list))];
27 
28 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
29 
30 static bool
31 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
32 {
33 	/* The IP version number must be 4 */
34 	if (((pkt->version_ihl) >> 4) != 4)
35 		return false;
36 	/*
37 	 * The IP header length field must be large enough to hold the
38 	 * minimum length legal IP datagram (20 bytes = 5 words).
39 	 */
40 	if ((pkt->version_ihl & 0xf) < 5)
41 		return false;
42 
43 	/*
44 	 * The IP total length field must be large enough to hold the IP
45 	 * datagram header, whose length is specified in the IP header length
46 	 * field.
47 	 */
48 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
49 		return false;
50 
51 	return true;
52 }
53 
54 static bool
55 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
56 {
57 	/* The IP version number must be 6 */
58 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
59 		return false;
60 
61 	return true;
62 }
63 
64 void
65 test_ipsec_alg_list_populate(void)
66 {
67 	unsigned long i, j, index = 0;
68 
69 	for (i = 0; i < RTE_DIM(aead_list); i++) {
70 		alg_list[index].param1 = &aead_list[i];
71 		alg_list[index].param2 = NULL;
72 		index++;
73 	}
74 
75 	for (i = 0; i < RTE_DIM(cipher_list); i++) {
76 		for (j = 0; j < RTE_DIM(auth_list); j++) {
77 			alg_list[index].param1 = &cipher_list[i];
78 			alg_list[index].param2 = &auth_list[j];
79 			index++;
80 		}
81 	}
82 }
83 
84 void
85 test_ipsec_ah_alg_list_populate(void)
86 {
87 	unsigned long i, index = 0;
88 
89 	for (i = 1; i < RTE_DIM(auth_list); i++) {
90 		ah_alg_list[index].param1 = &auth_list[i];
91 		ah_alg_list[index].param2 = NULL;
92 		index++;
93 	}
94 
95 	for (i = 1; i < RTE_DIM(auth_list); i++) {
96 		/* NULL cipher */
97 		ah_alg_list[index].param1 = &cipher_list[0];
98 
99 		ah_alg_list[index].param2 = &auth_list[i];
100 		index++;
101 	}
102 }
103 
104 int
105 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
106 			   const struct rte_security_capability *sec_cap,
107 			   bool silent)
108 {
109 	/* Verify security capabilities */
110 
111 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
112 		if (!silent)
113 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
114 		return -ENOTSUP;
115 	}
116 
117 	if (ipsec_xform->options.udp_encap == 1 &&
118 	    sec_cap->ipsec.options.udp_encap == 0) {
119 		if (!silent)
120 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
121 		return -ENOTSUP;
122 	}
123 
124 	if (ipsec_xform->options.udp_ports_verify == 1 &&
125 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
126 		if (!silent)
127 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
128 				"verification is not supported\n");
129 		return -ENOTSUP;
130 	}
131 
132 	if (ipsec_xform->options.copy_dscp == 1 &&
133 	    sec_cap->ipsec.options.copy_dscp == 0) {
134 		if (!silent)
135 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
136 		return -ENOTSUP;
137 	}
138 
139 	if (ipsec_xform->options.copy_flabel == 1 &&
140 	    sec_cap->ipsec.options.copy_flabel == 0) {
141 		if (!silent)
142 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
143 		return -ENOTSUP;
144 	}
145 
146 	if (ipsec_xform->options.copy_df == 1 &&
147 	    sec_cap->ipsec.options.copy_df == 0) {
148 		if (!silent)
149 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.dec_ttl == 1 &&
154 	    sec_cap->ipsec.options.dec_ttl == 0) {
155 		if (!silent)
156 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
157 		return -ENOTSUP;
158 	}
159 
160 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
161 		if (!silent)
162 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
163 		return -ENOTSUP;
164 	}
165 
166 	if (ipsec_xform->options.stats == 1 &&
167 	    sec_cap->ipsec.options.stats == 0) {
168 		if (!silent)
169 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
170 		return -ENOTSUP;
171 	}
172 
173 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
174 	    (ipsec_xform->options.iv_gen_disable == 1) &&
175 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
176 		if (!silent)
177 			RTE_LOG(INFO, USER1,
178 				"Application provided IV is not supported\n");
179 		return -ENOTSUP;
180 	}
181 
182 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
183 	    (ipsec_xform->options.tunnel_hdr_verify >
184 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
185 		if (!silent)
186 			RTE_LOG(INFO, USER1,
187 				"Tunnel header verify is not supported\n");
188 		return -ENOTSUP;
189 	}
190 
191 	if (ipsec_xform->options.ip_csum_enable == 1 &&
192 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
193 		if (!silent)
194 			RTE_LOG(INFO, USER1,
195 				"Inner IP checksum is not supported\n");
196 		return -ENOTSUP;
197 	}
198 
199 	if (ipsec_xform->options.l4_csum_enable == 1 &&
200 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
201 		if (!silent)
202 			RTE_LOG(INFO, USER1,
203 				"Inner L4 checksum is not supported\n");
204 		return -ENOTSUP;
205 	}
206 
207 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
208 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
209 			if (!silent)
210 				RTE_LOG(INFO, USER1,
211 					"Replay window size is not supported\n");
212 			return -ENOTSUP;
213 		}
214 	}
215 
216 	return 0;
217 }
218 
219 int
220 test_ipsec_crypto_caps_aead_verify(
221 		const struct rte_security_capability *sec_cap,
222 		struct rte_crypto_sym_xform *aead)
223 {
224 	const struct rte_cryptodev_symmetric_capability *sym_cap;
225 	const struct rte_cryptodev_capabilities *crypto_cap;
226 	int j = 0;
227 
228 	while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
229 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
230 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
231 				crypto_cap->sym.xform_type == aead->type &&
232 				crypto_cap->sym.aead.algo == aead->aead.algo) {
233 			sym_cap = &crypto_cap->sym;
234 			if (rte_cryptodev_sym_capability_check_aead(sym_cap,
235 					aead->aead.key.length,
236 					aead->aead.digest_length,
237 					aead->aead.aad_length,
238 					aead->aead.iv.length) == 0)
239 				return 0;
240 		}
241 	}
242 
243 	return -ENOTSUP;
244 }
245 
246 int
247 test_ipsec_crypto_caps_cipher_verify(
248 		const struct rte_security_capability *sec_cap,
249 		struct rte_crypto_sym_xform *cipher)
250 {
251 	const struct rte_cryptodev_symmetric_capability *sym_cap;
252 	const struct rte_cryptodev_capabilities *cap;
253 	int j = 0;
254 
255 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
256 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
257 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
258 				cap->sym.xform_type == cipher->type &&
259 				cap->sym.cipher.algo == cipher->cipher.algo) {
260 			sym_cap = &cap->sym;
261 			if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
262 					cipher->cipher.key.length,
263 					cipher->cipher.iv.length) == 0)
264 				return 0;
265 		}
266 	}
267 
268 	return -ENOTSUP;
269 }
270 
271 int
272 test_ipsec_crypto_caps_auth_verify(
273 		const struct rte_security_capability *sec_cap,
274 		struct rte_crypto_sym_xform *auth)
275 {
276 	const struct rte_cryptodev_symmetric_capability *sym_cap;
277 	const struct rte_cryptodev_capabilities *cap;
278 	int j = 0;
279 
280 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
281 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
282 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
283 				cap->sym.xform_type == auth->type &&
284 				cap->sym.auth.algo == auth->auth.algo) {
285 			sym_cap = &cap->sym;
286 			if (rte_cryptodev_sym_capability_check_auth(sym_cap,
287 					auth->auth.key.length,
288 					auth->auth.digest_length,
289 					auth->auth.iv.length) == 0)
290 				return 0;
291 		}
292 	}
293 
294 	return -ENOTSUP;
295 }
296 
297 void
298 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
299 			  struct ipsec_test_data *td_in)
300 {
301 	memcpy(td_in, td_out, sizeof(*td_in));
302 
303 	/* Populate output text of td_in with input text of td_out */
304 	memcpy(td_in->output_text.data, td_out->input_text.data,
305 	       td_out->input_text.len);
306 	td_in->output_text.len = td_out->input_text.len;
307 
308 	/* Populate input text of td_in with output text of td_out */
309 	memcpy(td_in->input_text.data, td_out->output_text.data,
310 	       td_out->output_text.len);
311 	td_in->input_text.len = td_out->output_text.len;
312 
313 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
314 
315 	if (td_in->aead) {
316 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
317 	} else {
318 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
319 		td_in->xform.chain.cipher.cipher.op =
320 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
321 	}
322 }
323 
324 static bool
325 is_ipv4(void *ip)
326 {
327 	struct rte_ipv4_hdr *ipv4 = ip;
328 	uint8_t ip_ver;
329 
330 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
331 	if (ip_ver == IPVERSION)
332 		return true;
333 	else
334 		return false;
335 }
336 
337 static void
338 test_ipsec_csum_init(void *ip, bool l3, bool l4)
339 {
340 	struct rte_ipv4_hdr *ipv4;
341 	struct rte_tcp_hdr *tcp;
342 	struct rte_udp_hdr *udp;
343 	uint8_t next_proto;
344 	uint8_t size;
345 
346 	if (is_ipv4(ip)) {
347 		ipv4 = ip;
348 		size = sizeof(struct rte_ipv4_hdr);
349 		next_proto = ipv4->next_proto_id;
350 
351 		if (l3)
352 			ipv4->hdr_checksum = 0;
353 	} else {
354 		size = sizeof(struct rte_ipv6_hdr);
355 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
356 	}
357 
358 	if (l4) {
359 		switch (next_proto) {
360 		case IPPROTO_TCP:
361 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
362 			tcp->cksum = 0;
363 			break;
364 		case IPPROTO_UDP:
365 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
366 			udp->dgram_cksum = 0;
367 			break;
368 		default:
369 			return;
370 		}
371 	}
372 }
373 
374 void
375 test_ipsec_td_prepare(const struct crypto_param *param1,
376 		      const struct crypto_param *param2,
377 		      const struct ipsec_test_flags *flags,
378 		      struct ipsec_test_data *td_array,
379 		      int nb_td)
380 
381 {
382 	struct ipsec_test_data *td;
383 	int i;
384 
385 	memset(td_array, 0, nb_td * sizeof(*td));
386 
387 	for (i = 0; i < nb_td; i++) {
388 		td = &td_array[i];
389 
390 		/* Prepare fields based on param */
391 
392 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
393 			/* Copy template for packet & key fields */
394 			if (flags->ipv6)
395 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
396 			else
397 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
398 
399 			if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM)
400 				td->salt.len = 3;
401 
402 			td->aead = true;
403 			td->xform.aead.aead.algo = param1->alg.aead;
404 			td->xform.aead.aead.key.length = param1->key_length;
405 		} else {
406 			/* Copy template for packet & key fields */
407 			if (flags->ipv6)
408 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
409 					sizeof(*td));
410 			else
411 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
412 					sizeof(*td));
413 
414 			td->aead = false;
415 
416 			if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
417 				td->xform.chain.auth.auth.algo =
418 						param1->alg.auth;
419 				td->xform.chain.auth.auth.key.length =
420 						param1->key_length;
421 				td->xform.chain.auth.auth.digest_length =
422 						param1->digest_length;
423 				td->auth_only = true;
424 
425 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
426 					td->xform.chain.auth.auth.iv.length =
427 						param1->iv_length;
428 					td->aes_gmac = true;
429 				}
430 			} else {
431 				td->xform.chain.cipher.cipher.algo =
432 						param1->alg.cipher;
433 				td->xform.chain.cipher.cipher.key.length =
434 						param1->key_length;
435 				td->xform.chain.cipher.cipher.iv.length =
436 						param1->iv_length;
437 				td->xform.chain.auth.auth.algo =
438 						param2->alg.auth;
439 				td->xform.chain.auth.auth.key.length =
440 						param2->key_length;
441 				td->xform.chain.auth.auth.digest_length =
442 						param2->digest_length;
443 
444 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
445 					td->xform.chain.auth.auth.iv.length =
446 						param2->iv_length;
447 					td->aes_gmac = true;
448 				}
449 			}
450 		}
451 
452 		/* Adjust the data to requested length */
453 		if (flags->plaintext_len && flags->ipv6) {
454 			struct rte_ipv6_hdr *ip6 = (struct rte_ipv6_hdr *)td->input_text.data;
455 			struct rte_tcp_hdr *tcp;
456 			int64_t payload_len;
457 			uint8_t *data;
458 			int64_t i;
459 
460 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
461 			payload_len -= sizeof(struct rte_ipv6_hdr);
462 			payload_len -= sizeof(struct rte_tcp_hdr);
463 			if (payload_len <= 16)
464 				payload_len = 16;
465 
466 			/* IPv6 */
467 			ip6->proto = IPPROTO_TCP;
468 			ip6->payload_len = sizeof(*tcp) + payload_len;
469 			ip6->payload_len = rte_cpu_to_be_16(ip6->payload_len);
470 
471 			/* TCP */
472 			tcp = (struct rte_tcp_hdr *)(ip6 + 1);
473 			data = (uint8_t *)(tcp + 1);
474 			for (i = 0; i < payload_len; i++)
475 				data[i] = i;
476 			tcp->cksum = 0;
477 			tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
478 			td->input_text.len = payload_len + sizeof(struct rte_ipv6_hdr) +
479 				sizeof(struct rte_tcp_hdr);
480 		} else if (flags->plaintext_len) {
481 			struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)td->input_text.data;
482 			struct rte_tcp_hdr *tcp;
483 			int64_t payload_len;
484 			uint8_t *data;
485 			int64_t i;
486 
487 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
488 			payload_len -= sizeof(struct rte_ipv4_hdr);
489 			payload_len -= sizeof(struct rte_tcp_hdr);
490 			if (payload_len <= 8)
491 				payload_len = 8;
492 
493 			/* IPv4 */
494 			ip->next_proto_id = IPPROTO_TCP;
495 			ip->total_length = sizeof(*ip) + sizeof(*tcp) + payload_len;
496 			ip->total_length = rte_cpu_to_be_16(ip->total_length);
497 			ip->hdr_checksum = 0;
498 			ip->hdr_checksum = rte_ipv4_cksum(ip);
499 
500 			/* TCP */
501 			tcp = (struct rte_tcp_hdr *)(ip + 1);
502 			data = (uint8_t *)(tcp + 1);
503 			for (i = 0; i < payload_len; i++)
504 				data[i] = i;
505 			tcp->cksum = 0;
506 			tcp->cksum = rte_ipv4_udptcp_cksum(ip, tcp);
507 			td->input_text.len = payload_len + sizeof(struct rte_ipv4_hdr) +
508 				sizeof(struct rte_tcp_hdr);
509 		}
510 
511 		if (flags->ah) {
512 			td->ipsec_xform.proto =
513 					RTE_SECURITY_IPSEC_SA_PROTO_AH;
514 		}
515 
516 		if (flags->iv_gen)
517 			td->ipsec_xform.options.iv_gen_disable = 0;
518 
519 		if (flags->sa_expiry_pkts_soft)
520 			td->ipsec_xform.life.packets_soft_limit =
521 					IPSEC_TEST_PACKETS_MAX - 1;
522 
523 		if (flags->ip_csum) {
524 			td->ipsec_xform.options.ip_csum_enable = 1;
525 			test_ipsec_csum_init(&td->input_text.data, true, false);
526 		}
527 
528 		if (flags->l4_csum) {
529 			td->ipsec_xform.options.l4_csum_enable = 1;
530 			test_ipsec_csum_init(&td->input_text.data, false, true);
531 		}
532 
533 		if (flags->transport) {
534 			td->ipsec_xform.mode =
535 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
536 		} else {
537 			td->ipsec_xform.mode =
538 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
539 
540 			if (flags->tunnel_ipv6)
541 				td->ipsec_xform.tunnel.type =
542 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
543 			else
544 				td->ipsec_xform.tunnel.type =
545 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
546 		}
547 
548 		if (flags->stats_success)
549 			td->ipsec_xform.options.stats = 1;
550 
551 		if (flags->fragment) {
552 			struct rte_ipv4_hdr *ip;
553 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
554 			ip->fragment_offset = 4;
555 			ip->hdr_checksum = rte_ipv4_cksum(ip);
556 		}
557 
558 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
559 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
560 			td->ipsec_xform.options.copy_df = 1;
561 
562 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
563 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
564 			td->ipsec_xform.options.copy_dscp = 1;
565 
566 		if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
567 		    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
568 			td->ipsec_xform.options.copy_flabel = 1;
569 
570 		if (flags->dec_ttl_or_hop_limit)
571 			td->ipsec_xform.options.dec_ttl = 1;
572 
573 		if (flags->udp_encap && flags->udp_encap_custom_ports) {
574 			td->ipsec_xform.udp.sport = UDP_CUSTOM_SPORT;
575 			td->ipsec_xform.udp.dport = UDP_CUSTOM_DPORT;
576 		}
577 	}
578 }
579 
580 void
581 test_ipsec_td_update(struct ipsec_test_data td_inb[],
582 		     const struct ipsec_test_data td_outb[],
583 		     int nb_td,
584 		     const struct ipsec_test_flags *flags)
585 {
586 	int i;
587 
588 	for (i = 0; i < nb_td; i++) {
589 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
590 		       td_outb[i].input_text.len);
591 		td_inb[i].output_text.len = td_outb->input_text.len;
592 
593 		if (flags->icv_corrupt) {
594 			int icv_pos = td_inb[i].input_text.len - 4;
595 			td_inb[i].input_text.data[icv_pos] += 1;
596 		}
597 
598 		if (flags->sa_expiry_pkts_hard)
599 			td_inb[i].ipsec_xform.life.packets_hard_limit =
600 					IPSEC_TEST_PACKETS_MAX - 1;
601 
602 		if (flags->udp_encap)
603 			td_inb[i].ipsec_xform.options.udp_encap = 1;
604 
605 		if (flags->udp_ports_verify)
606 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
607 
608 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
609 			flags->tunnel_hdr_verify;
610 
611 		if (flags->ip_csum)
612 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
613 
614 		if (flags->l4_csum)
615 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
616 
617 		/* Clear outbound specific flags */
618 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
619 	}
620 }
621 
622 void
623 test_ipsec_display_alg(const struct crypto_param *param1,
624 		       const struct crypto_param *param2)
625 {
626 	if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
627 		printf("\t%s [%d]",
628 		       rte_crypto_aead_algorithm_strings[param1->alg.aead],
629 		       param1->key_length * 8);
630 	} else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
631 		printf("\t%s",
632 		       rte_crypto_auth_algorithm_strings[param1->alg.auth]);
633 		if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL)
634 			printf(" [%dB ICV]", param1->digest_length);
635 	} else {
636 		printf("\t%s",
637 		       rte_crypto_cipher_algorithm_strings[param1->alg.cipher]);
638 		if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
639 			printf(" [%d]", param1->key_length * 8);
640 		printf(" %s",
641 		       rte_crypto_auth_algorithm_strings[param2->alg.auth]);
642 		if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
643 			printf(" [%dB ICV]", param2->digest_length);
644 	}
645 	printf("\n");
646 }
647 
648 static int
649 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
650 {
651 	int len = 0;
652 
653 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
654 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
655 			if (td->ipsec_xform.tunnel.type ==
656 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
657 				len += sizeof(struct rte_ipv4_hdr);
658 			else
659 				len += sizeof(struct rte_ipv6_hdr);
660 		}
661 	}
662 
663 	return len;
664 }
665 
666 static int
667 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
668 {
669 	static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
670 	uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
671 	int i, iv_pos, iv_len;
672 	static int index;
673 
674 	if (td->aead)
675 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
676 	else
677 		iv_len = td->xform.chain.cipher.cipher.iv.length;
678 
679 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
680 	output_text += iv_pos;
681 
682 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
683 
684 	/* Compare against previous values */
685 	for (i = 0; i < index; i++) {
686 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
687 
688 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
689 			printf("IV repeated");
690 			return TEST_FAILED;
691 		}
692 	}
693 
694 	/* Save IV for future comparisons */
695 
696 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
697 	memcpy(iv_tmp, output_text, iv_len);
698 	index++;
699 
700 	if (index == IPSEC_TEST_PACKETS_MAX)
701 		index = 0;
702 
703 	return TEST_SUCCESS;
704 }
705 
706 static int
707 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
708 {
709 	uint16_t actual_cksum, expected_cksum;
710 	struct rte_ipv4_hdr *ip;
711 
712 	ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
713 
714 	if (!is_ipv4((void *)ip))
715 		return TEST_SKIPPED;
716 
717 	actual_cksum = ip->hdr_checksum;
718 
719 	ip->hdr_checksum = 0;
720 
721 	expected_cksum = rte_ipv4_cksum(ip);
722 
723 	if (actual_cksum != expected_cksum)
724 		return TEST_FAILED;
725 
726 	return TEST_SUCCESS;
727 }
728 
729 static int
730 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
731 {
732 	uint16_t actual_cksum = 0, expected_cksum = 0;
733 	uint32_t len = rte_pktmbuf_pkt_len(m);
734 	uint8_t data_arr[IPSEC_TEXT_MAX_LEN];
735 	struct rte_ipv4_hdr *ipv4;
736 	struct rte_ipv6_hdr *ipv6;
737 	uint8_t *data = data_arr;
738 	struct rte_tcp_hdr *tcp;
739 	struct rte_udp_hdr *udp;
740 	const uint8_t *ptr;
741 	void *ip, *l4;
742 
743 	ptr = rte_pktmbuf_read(m, 0, len, data_arr);
744 	if (!ptr)
745 		return -EINVAL;
746 	else if (ptr != data_arr)
747 		data = rte_pktmbuf_mtod_offset(m, uint8_t *, 0);
748 
749 	ip = (struct rte_ipv4_hdr *)data;
750 
751 	if (is_ipv4(ip)) {
752 		ipv4 = ip;
753 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
754 
755 		switch (ipv4->next_proto_id) {
756 		case IPPROTO_TCP:
757 			tcp = (struct rte_tcp_hdr *)l4;
758 			actual_cksum = tcp->cksum;
759 			tcp->cksum = 0;
760 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
761 			break;
762 		case IPPROTO_UDP:
763 			udp = (struct rte_udp_hdr *)l4;
764 			actual_cksum = udp->dgram_cksum;
765 			udp->dgram_cksum = 0;
766 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
767 			break;
768 		default:
769 			break;
770 		}
771 	} else {
772 		ipv6 = ip;
773 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
774 
775 		switch (ipv6->proto) {
776 		case IPPROTO_TCP:
777 			tcp = (struct rte_tcp_hdr *)l4;
778 			actual_cksum = tcp->cksum;
779 			tcp->cksum = 0;
780 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
781 			break;
782 		case IPPROTO_UDP:
783 			udp = (struct rte_udp_hdr *)l4;
784 			actual_cksum = udp->dgram_cksum;
785 			udp->dgram_cksum = 0;
786 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
787 			break;
788 		default:
789 			break;
790 		}
791 	}
792 
793 	if (actual_cksum != expected_cksum)
794 		return TEST_FAILED;
795 
796 	return TEST_SUCCESS;
797 }
798 
799 static int
800 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
801 {
802 	struct rte_ipv4_hdr *iph4_ex, *iph4_re;
803 	struct rte_ipv6_hdr *iph6_ex, *iph6_re;
804 
805 	if (is_ipv4(received) && is_ipv4(expected)) {
806 		iph4_ex = expected;
807 		iph4_re = received;
808 		iph4_ex->time_to_live -= 1;
809 		if (iph4_re->time_to_live != iph4_ex->time_to_live)
810 			return TEST_FAILED;
811 	} else if (!is_ipv4(received) && !is_ipv4(expected)) {
812 		iph6_ex = expected;
813 		iph6_re = received;
814 		iph6_ex->hop_limits -= 1;
815 		if (iph6_re->hop_limits != iph6_ex->hop_limits)
816 			return TEST_FAILED;
817 	} else {
818 		printf("IP header version miss match\n");
819 		return TEST_FAILED;
820 	}
821 
822 	return TEST_SUCCESS;
823 }
824 
825 static int
826 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
827 		     bool silent, const struct ipsec_test_flags *flags)
828 {
829 	uint32_t skip, len = rte_pktmbuf_pkt_len(m);
830 	uint8_t td_output_text[IPSEC_TEXT_MAX_LEN];
831 	uint8_t data_arr[IPSEC_TEXT_MAX_LEN];
832 	uint8_t *output_text = data_arr;
833 	const uint8_t *ptr;
834 	int ret;
835 
836 	/* For tests with status as error for test success, skip verification */
837 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
838 	    (flags->icv_corrupt ||
839 	     flags->sa_expiry_pkts_hard ||
840 	     flags->tunnel_hdr_verify ||
841 	     td->ar_packet))
842 		return TEST_SUCCESS;
843 
844 	ptr = rte_pktmbuf_read(m, 0, len, data_arr);
845 	if (!ptr)
846 		return -EINVAL;
847 	else if (ptr != data_arr)
848 		output_text = rte_pktmbuf_mtod_offset(m, uint8_t *, 0);
849 
850 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
851 	   flags->udp_encap) {
852 
853 		len -= sizeof(struct rte_udp_hdr);
854 		output_text += sizeof(struct rte_udp_hdr);
855 	}
856 
857 	if (len != td->output_text.len) {
858 		printf("Output length (%d) not matching with expected (%d)\n",
859 			len, td->output_text.len);
860 		return TEST_FAILED;
861 	}
862 
863 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
864 				flags->fragment) {
865 		const struct rte_ipv4_hdr *iph4;
866 		iph4 = (const struct rte_ipv4_hdr *)output_text;
867 		if (iph4->fragment_offset) {
868 			printf("Output packet is fragmented");
869 			return TEST_FAILED;
870 		}
871 	}
872 
873 	skip = test_ipsec_tunnel_hdr_len_get(td);
874 
875 	len -= skip;
876 	output_text += skip;
877 
878 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
879 				flags->ip_csum) {
880 		if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
881 			ret = test_ipsec_l3_csum_verify(m);
882 		else
883 			ret = TEST_FAILED;
884 
885 		if (ret == TEST_FAILED)
886 			printf("Inner IP checksum test failed\n");
887 
888 		return ret;
889 	}
890 
891 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
892 				flags->l4_csum) {
893 		if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
894 			ret = test_ipsec_l4_csum_verify(m);
895 		else
896 			ret = TEST_FAILED;
897 
898 		if (ret == TEST_FAILED)
899 			printf("Inner L4 checksum test failed\n");
900 
901 		return ret;
902 	}
903 
904 	memcpy(td_output_text, td->output_text.data + skip, len);
905 
906 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
907 				flags->dec_ttl_or_hop_limit) {
908 		if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
909 			printf("Inner TTL/hop limit decrement test failed\n");
910 			return TEST_FAILED;
911 		}
912 	}
913 
914 	if (test_ipsec_pkt_update(td_output_text, flags)) {
915 		printf("Could not update expected vector");
916 		return TEST_FAILED;
917 	}
918 
919 	if (memcmp(output_text, td_output_text, len)) {
920 		if (silent)
921 			return TEST_FAILED;
922 
923 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
924 			"output text not as expected\n");
925 
926 		rte_hexdump(stdout, "expected", td_output_text, len);
927 		rte_hexdump(stdout, "actual", output_text, len);
928 		return TEST_FAILED;
929 	}
930 
931 	return TEST_SUCCESS;
932 }
933 
934 static int
935 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
936 		   struct ipsec_test_data *res_d)
937 {
938 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
939 	uint32_t len = rte_pktmbuf_pkt_len(m);
940 	struct rte_mbuf *next = m;
941 	uint32_t off = 0;
942 
943 	memcpy(res_d, td, sizeof(*res_d));
944 
945 	while (next && off < len) {
946 		output_text = rte_pktmbuf_mtod(next, uint8_t *);
947 		if (off + next->data_len > sizeof(res_d->input_text.data))
948 			break;
949 		memcpy(&res_d->input_text.data[off], output_text, next->data_len);
950 		off += next->data_len;
951 		next = next->next;
952 	}
953 	res_d->input_text.len = off;
954 
955 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
956 	if (res_d->aead) {
957 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
958 	} else {
959 		res_d->xform.chain.cipher.cipher.op =
960 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
961 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
962 	}
963 
964 	return TEST_SUCCESS;
965 }
966 
967 static int
968 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
969 			     const struct ipsec_test_flags *flags)
970 {
971 	uint8_t tos, dscp;
972 	uint16_t f_off;
973 
974 	if (!is_valid_ipv4_pkt(iph4)) {
975 		printf("Tunnel outer header is not IPv4\n");
976 		return -1;
977 	}
978 
979 	if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
980 		printf("Tunnel outer header proto is not AH\n");
981 		return -1;
982 	}
983 
984 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
985 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
986 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
987 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
988 			printf("DF bit is not set\n");
989 			return -1;
990 		}
991 	} else {
992 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
993 			printf("DF bit is set\n");
994 			return -1;
995 		}
996 	}
997 
998 	tos = iph4->type_of_service;
999 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
1000 
1001 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1002 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
1003 		if (dscp != TEST_IPSEC_DSCP_VAL) {
1004 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
1005 			       TEST_IPSEC_DSCP_VAL, dscp);
1006 			return -1;
1007 		}
1008 	} else {
1009 		if (dscp != 0) {
1010 			printf("DSCP value is set [exp: 0, actual: %x]\n",
1011 			       dscp);
1012 			return -1;
1013 		}
1014 	}
1015 
1016 	return 0;
1017 }
1018 
1019 static int
1020 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
1021 			     const struct ipsec_test_flags *flags)
1022 {
1023 	uint32_t vtc_flow;
1024 	uint32_t flabel;
1025 	uint8_t dscp;
1026 
1027 	if (!is_valid_ipv6_pkt(iph6)) {
1028 		printf("Tunnel outer header is not IPv6\n");
1029 		return -1;
1030 	}
1031 
1032 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1033 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
1034 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
1035 
1036 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1037 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
1038 		if (dscp != TEST_IPSEC_DSCP_VAL) {
1039 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
1040 			       TEST_IPSEC_DSCP_VAL, dscp);
1041 			return -1;
1042 		}
1043 	} else {
1044 		if (dscp != 0) {
1045 			printf("DSCP value is set [exp: 0, actual: %x]\n",
1046 			       dscp);
1047 			return -1;
1048 		}
1049 	}
1050 
1051 	flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
1052 
1053 	if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1054 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1055 		if (flabel != TEST_IPSEC_FLABEL_VAL) {
1056 			printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
1057 			       TEST_IPSEC_FLABEL_VAL, flabel);
1058 			return -1;
1059 		}
1060 	} else {
1061 		if (flabel != 0) {
1062 			printf("FLABEL value is set [exp: 0, actual: %x]\n",
1063 			       flabel);
1064 			return -1;
1065 		}
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 int
1072 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
1073 			struct ipsec_test_data *res_d, bool silent,
1074 			const struct ipsec_test_flags *flags)
1075 {
1076 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
1077 	int ret;
1078 
1079 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1080 		const struct rte_ipv4_hdr *iph4;
1081 		const struct rte_ipv6_hdr *iph6;
1082 
1083 		if (flags->iv_gen) {
1084 			ret = test_ipsec_iv_verify_push(m, td);
1085 			if (ret != TEST_SUCCESS)
1086 				return ret;
1087 		}
1088 
1089 		iph4 = (const struct rte_ipv4_hdr *)output_text;
1090 
1091 		if (td->ipsec_xform.mode ==
1092 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
1093 			if (flags->ipv6) {
1094 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1095 				if (is_valid_ipv6_pkt(iph6) == false) {
1096 					printf("Transport packet is not IPv6\n");
1097 					return TEST_FAILED;
1098 				}
1099 			} else {
1100 				if (is_valid_ipv4_pkt(iph4) == false) {
1101 					printf("Transport packet is not IPv4\n");
1102 					return TEST_FAILED;
1103 				}
1104 
1105 				if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1106 					printf("Transport IPv4 header proto is not AH\n");
1107 					return -1;
1108 				}
1109 			}
1110 		} else {
1111 			if (td->ipsec_xform.tunnel.type ==
1112 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1113 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
1114 					return TEST_FAILED;
1115 			} else {
1116 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1117 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
1118 					return TEST_FAILED;
1119 			}
1120 		}
1121 	}
1122 
1123 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
1124 	   flags->udp_encap) {
1125 		const struct rte_ipv4_hdr *iph4;
1126 		const struct rte_ipv6_hdr *iph6;
1127 
1128 		if (td->ipsec_xform.tunnel.type ==
1129 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1130 			iph4 = (const struct rte_ipv4_hdr *)output_text;
1131 
1132 			if (iph4->next_proto_id != IPPROTO_UDP) {
1133 				printf("UDP header is not found\n");
1134 				return TEST_FAILED;
1135 			}
1136 
1137 			if (flags->udp_encap_custom_ports) {
1138 				const struct rte_udp_hdr *udph;
1139 
1140 				udph = (const struct rte_udp_hdr *)(output_text +
1141 					sizeof(struct rte_ipv4_hdr));
1142 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1143 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1144 					printf("UDP custom ports not matching.\n");
1145 					return TEST_FAILED;
1146 				}
1147 			}
1148 		} else {
1149 			iph6 = (const struct rte_ipv6_hdr *)output_text;
1150 
1151 			if (iph6->proto != IPPROTO_UDP) {
1152 				printf("UDP header is not found\n");
1153 				return TEST_FAILED;
1154 			}
1155 
1156 			if (flags->udp_encap_custom_ports) {
1157 				const struct rte_udp_hdr *udph;
1158 
1159 				udph = (const struct rte_udp_hdr *)(output_text +
1160 					sizeof(struct rte_ipv6_hdr));
1161 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1162 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1163 					printf("UDP custom ports not matching.\n");
1164 					return TEST_FAILED;
1165 				}
1166 			}
1167 		}
1168 	}
1169 
1170 	/*
1171 	 * In case of known vector tests & all inbound tests, res_d provided
1172 	 * would be NULL and output data need to be validated against expected.
1173 	 * For inbound, output_text would be plain packet and for outbound
1174 	 * output_text would IPsec packet. Validate by comparing against
1175 	 * known vectors.
1176 	 *
1177 	 * In case of combined mode tests, the output_text from outbound
1178 	 * operation (ie, IPsec packet) would need to be inbound processed to
1179 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
1180 	 * that inbound processing can be done.
1181 	 */
1182 
1183 	if (res_d == NULL)
1184 		return test_ipsec_td_verify(m, td, silent, flags);
1185 	else
1186 		return test_ipsec_res_d_prepare(m, td, res_d);
1187 }
1188 
1189 int
1190 test_ipsec_status_check(const struct ipsec_test_data *td,
1191 			struct rte_crypto_op *op,
1192 			const struct ipsec_test_flags *flags,
1193 			enum rte_security_ipsec_sa_direction dir,
1194 			int pkt_num)
1195 {
1196 	int ret = TEST_SUCCESS;
1197 
1198 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1199 	    td->ar_packet) {
1200 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1201 			printf("Anti replay test case failed\n");
1202 			return TEST_FAILED;
1203 		} else {
1204 			return TEST_SUCCESS;
1205 		}
1206 	}
1207 
1208 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1209 	    flags->sa_expiry_pkts_hard &&
1210 	    pkt_num == IPSEC_TEST_PACKETS_MAX) {
1211 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1212 			printf("SA hard expiry (pkts) test failed\n");
1213 			return TEST_FAILED;
1214 		} else {
1215 			return TEST_SUCCESS;
1216 		}
1217 	}
1218 
1219 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1220 	    flags->tunnel_hdr_verify) {
1221 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1222 			printf("Tunnel header verify test case failed\n");
1223 			return TEST_FAILED;
1224 		} else {
1225 			return TEST_SUCCESS;
1226 		}
1227 	}
1228 
1229 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1230 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1231 			printf("ICV corruption test case failed\n");
1232 			ret = TEST_FAILED;
1233 		}
1234 	} else {
1235 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1236 			printf("Security op processing failed [pkt_num: %d]\n",
1237 			       pkt_num);
1238 			ret = TEST_FAILED;
1239 		}
1240 	}
1241 
1242 	if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1243 		if (!(op->aux_flags &
1244 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1245 			printf("SA soft expiry (pkts) test failed\n");
1246 			ret = TEST_FAILED;
1247 		}
1248 	}
1249 
1250 	return ret;
1251 }
1252 
1253 int
1254 test_ipsec_stats_verify(struct rte_security_ctx *ctx,
1255 			void *sess,
1256 			const struct ipsec_test_flags *flags,
1257 			enum rte_security_ipsec_sa_direction dir)
1258 {
1259 	struct rte_security_stats stats = {0};
1260 	int ret = TEST_SUCCESS;
1261 
1262 	if (flags->stats_success) {
1263 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1264 			return TEST_FAILED;
1265 
1266 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1267 			if (stats.ipsec.opackets != 1 ||
1268 			    stats.ipsec.oerrors != 0)
1269 				ret = TEST_FAILED;
1270 		} else {
1271 			if (stats.ipsec.ipackets != 1 ||
1272 			    stats.ipsec.ierrors != 0)
1273 				ret = TEST_FAILED;
1274 		}
1275 	}
1276 
1277 	return ret;
1278 }
1279 
1280 int
1281 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1282 {
1283 	struct rte_ipv4_hdr *iph4;
1284 	struct rte_ipv6_hdr *iph6;
1285 	bool cksum_dirty = false;
1286 
1287 	iph4 = (struct rte_ipv4_hdr *)pkt;
1288 
1289 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1290 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1291 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1292 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1293 		uint16_t frag_off;
1294 
1295 		if (!is_ipv4(iph4)) {
1296 			printf("Invalid packet type\n");
1297 			return -1;
1298 		}
1299 
1300 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1301 
1302 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1303 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1304 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1305 		else
1306 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1307 
1308 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1309 		cksum_dirty = true;
1310 	}
1311 
1312 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1313 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1314 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1315 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1316 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1317 	    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1318 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1319 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1320 
1321 		if (is_ipv4(iph4)) {
1322 			uint8_t tos;
1323 
1324 			tos = iph4->type_of_service;
1325 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1326 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1327 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1328 					(TEST_IPSEC_DSCP_VAL << 2));
1329 			else
1330 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1331 
1332 			iph4->type_of_service = tos;
1333 			cksum_dirty = true;
1334 		} else {
1335 			uint32_t vtc_flow;
1336 
1337 			iph6 = (struct rte_ipv6_hdr *)pkt;
1338 
1339 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1340 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1341 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1342 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1343 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1344 			else
1345 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1346 
1347 			if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1348 			    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1349 				vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1350 					     (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1351 			else
1352 				vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1353 
1354 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1355 		}
1356 	}
1357 
1358 	if (cksum_dirty && is_ipv4(iph4)) {
1359 		iph4->hdr_checksum = 0;
1360 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1361 	}
1362 
1363 	return 0;
1364 }
1365