xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision efb1a06bb3f8dbcce5e43b49d23d73aaf80b2c8f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #ifndef RTE_EXEC_ENV_WINDOWS
6 
7 #include <rte_common.h>
8 #include <rte_cryptodev.h>
9 #include <rte_esp.h>
10 #include <rte_ip.h>
11 #include <rte_security.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 
15 #include "test.h"
16 #include "test_cryptodev_security_ipsec.h"
17 
18 #define IV_LEN_MAX 16
19 
20 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
21 				  (RTE_DIM(cipher_list) *
22 				   RTE_DIM(auth_list))];
23 
24 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
25 
26 static bool
27 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
28 {
29 	/* The IP version number must be 4 */
30 	if (((pkt->version_ihl) >> 4) != 4)
31 		return false;
32 	/*
33 	 * The IP header length field must be large enough to hold the
34 	 * minimum length legal IP datagram (20 bytes = 5 words).
35 	 */
36 	if ((pkt->version_ihl & 0xf) < 5)
37 		return false;
38 
39 	/*
40 	 * The IP total length field must be large enough to hold the IP
41 	 * datagram header, whose length is specified in the IP header length
42 	 * field.
43 	 */
44 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
45 		return false;
46 
47 	return true;
48 }
49 
50 static bool
51 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
52 {
53 	/* The IP version number must be 6 */
54 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
55 		return false;
56 
57 	return true;
58 }
59 
60 void
61 test_ipsec_alg_list_populate(void)
62 {
63 	unsigned long i, j, index = 0;
64 
65 	for (i = 0; i < RTE_DIM(aead_list); i++) {
66 		alg_list[index].param1 = &aead_list[i];
67 		alg_list[index].param2 = NULL;
68 		index++;
69 	}
70 
71 	for (i = 0; i < RTE_DIM(cipher_list); i++) {
72 		for (j = 0; j < RTE_DIM(auth_list); j++) {
73 			alg_list[index].param1 = &cipher_list[i];
74 			alg_list[index].param2 = &auth_list[j];
75 			index++;
76 		}
77 	}
78 }
79 
80 void
81 test_ipsec_ah_alg_list_populate(void)
82 {
83 	unsigned long i, index = 0;
84 
85 	for (i = 1; i < RTE_DIM(auth_list); i++) {
86 		ah_alg_list[index].param1 = &auth_list[i];
87 		ah_alg_list[index].param2 = NULL;
88 		index++;
89 	}
90 
91 	for (i = 1; i < RTE_DIM(auth_list); i++) {
92 		/* NULL cipher */
93 		ah_alg_list[index].param1 = &cipher_list[0];
94 
95 		ah_alg_list[index].param2 = &auth_list[i];
96 		index++;
97 	}
98 }
99 
100 int
101 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
102 			   const struct rte_security_capability *sec_cap,
103 			   bool silent)
104 {
105 	/* Verify security capabilities */
106 
107 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
108 		if (!silent)
109 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
110 		return -ENOTSUP;
111 	}
112 
113 	if (ipsec_xform->options.udp_encap == 1 &&
114 	    sec_cap->ipsec.options.udp_encap == 0) {
115 		if (!silent)
116 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
117 		return -ENOTSUP;
118 	}
119 
120 	if (ipsec_xform->options.udp_ports_verify == 1 &&
121 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
122 		if (!silent)
123 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
124 				"verification is not supported\n");
125 		return -ENOTSUP;
126 	}
127 
128 	if (ipsec_xform->options.copy_dscp == 1 &&
129 	    sec_cap->ipsec.options.copy_dscp == 0) {
130 		if (!silent)
131 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
132 		return -ENOTSUP;
133 	}
134 
135 	if (ipsec_xform->options.copy_flabel == 1 &&
136 	    sec_cap->ipsec.options.copy_flabel == 0) {
137 		if (!silent)
138 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
139 		return -ENOTSUP;
140 	}
141 
142 	if (ipsec_xform->options.copy_df == 1 &&
143 	    sec_cap->ipsec.options.copy_df == 0) {
144 		if (!silent)
145 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
146 		return -ENOTSUP;
147 	}
148 
149 	if (ipsec_xform->options.dec_ttl == 1 &&
150 	    sec_cap->ipsec.options.dec_ttl == 0) {
151 		if (!silent)
152 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
153 		return -ENOTSUP;
154 	}
155 
156 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
157 		if (!silent)
158 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
159 		return -ENOTSUP;
160 	}
161 
162 	if (ipsec_xform->options.stats == 1 &&
163 	    sec_cap->ipsec.options.stats == 0) {
164 		if (!silent)
165 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
166 		return -ENOTSUP;
167 	}
168 
169 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
170 	    (ipsec_xform->options.iv_gen_disable == 1) &&
171 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
172 		if (!silent)
173 			RTE_LOG(INFO, USER1,
174 				"Application provided IV is not supported\n");
175 		return -ENOTSUP;
176 	}
177 
178 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
179 	    (ipsec_xform->options.tunnel_hdr_verify >
180 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
181 		if (!silent)
182 			RTE_LOG(INFO, USER1,
183 				"Tunnel header verify is not supported\n");
184 		return -ENOTSUP;
185 	}
186 
187 	if (ipsec_xform->options.ip_csum_enable == 1 &&
188 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
189 		if (!silent)
190 			RTE_LOG(INFO, USER1,
191 				"Inner IP checksum is not supported\n");
192 		return -ENOTSUP;
193 	}
194 
195 	if (ipsec_xform->options.l4_csum_enable == 1 &&
196 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
197 		if (!silent)
198 			RTE_LOG(INFO, USER1,
199 				"Inner L4 checksum is not supported\n");
200 		return -ENOTSUP;
201 	}
202 
203 	if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
204 		if (!silent)
205 			RTE_LOG(INFO, USER1,
206 				"Replay window size is not supported\n");
207 		return -ENOTSUP;
208 	}
209 
210 	return 0;
211 }
212 
213 int
214 test_ipsec_crypto_caps_aead_verify(
215 		const struct rte_security_capability *sec_cap,
216 		struct rte_crypto_sym_xform *aead)
217 {
218 	const struct rte_cryptodev_symmetric_capability *sym_cap;
219 	const struct rte_cryptodev_capabilities *crypto_cap;
220 	int j = 0;
221 
222 	while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
223 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
224 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
225 				crypto_cap->sym.xform_type == aead->type &&
226 				crypto_cap->sym.aead.algo == aead->aead.algo) {
227 			sym_cap = &crypto_cap->sym;
228 			if (rte_cryptodev_sym_capability_check_aead(sym_cap,
229 					aead->aead.key.length,
230 					aead->aead.digest_length,
231 					aead->aead.aad_length,
232 					aead->aead.iv.length) == 0)
233 				return 0;
234 		}
235 	}
236 
237 	return -ENOTSUP;
238 }
239 
240 int
241 test_ipsec_crypto_caps_cipher_verify(
242 		const struct rte_security_capability *sec_cap,
243 		struct rte_crypto_sym_xform *cipher)
244 {
245 	const struct rte_cryptodev_symmetric_capability *sym_cap;
246 	const struct rte_cryptodev_capabilities *cap;
247 	int j = 0;
248 
249 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
250 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
251 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
252 				cap->sym.xform_type == cipher->type &&
253 				cap->sym.cipher.algo == cipher->cipher.algo) {
254 			sym_cap = &cap->sym;
255 			if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
256 					cipher->cipher.key.length,
257 					cipher->cipher.iv.length) == 0)
258 				return 0;
259 		}
260 	}
261 
262 	return -ENOTSUP;
263 }
264 
265 int
266 test_ipsec_crypto_caps_auth_verify(
267 		const struct rte_security_capability *sec_cap,
268 		struct rte_crypto_sym_xform *auth)
269 {
270 	const struct rte_cryptodev_symmetric_capability *sym_cap;
271 	const struct rte_cryptodev_capabilities *cap;
272 	int j = 0;
273 
274 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
275 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
276 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
277 				cap->sym.xform_type == auth->type &&
278 				cap->sym.auth.algo == auth->auth.algo) {
279 			sym_cap = &cap->sym;
280 			if (rte_cryptodev_sym_capability_check_auth(sym_cap,
281 					auth->auth.key.length,
282 					auth->auth.digest_length,
283 					auth->auth.iv.length) == 0)
284 				return 0;
285 		}
286 	}
287 
288 	return -ENOTSUP;
289 }
290 
291 void
292 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
293 			  struct ipsec_test_data *td_in)
294 {
295 	memcpy(td_in, td_out, sizeof(*td_in));
296 
297 	/* Populate output text of td_in with input text of td_out */
298 	memcpy(td_in->output_text.data, td_out->input_text.data,
299 	       td_out->input_text.len);
300 	td_in->output_text.len = td_out->input_text.len;
301 
302 	/* Populate input text of td_in with output text of td_out */
303 	memcpy(td_in->input_text.data, td_out->output_text.data,
304 	       td_out->output_text.len);
305 	td_in->input_text.len = td_out->output_text.len;
306 
307 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
308 
309 	if (td_in->aead) {
310 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
311 	} else {
312 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
313 		td_in->xform.chain.cipher.cipher.op =
314 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
315 	}
316 }
317 
318 static bool
319 is_ipv4(void *ip)
320 {
321 	struct rte_ipv4_hdr *ipv4 = ip;
322 	uint8_t ip_ver;
323 
324 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
325 	if (ip_ver == IPVERSION)
326 		return true;
327 	else
328 		return false;
329 }
330 
331 static void
332 test_ipsec_csum_init(void *ip, bool l3, bool l4)
333 {
334 	struct rte_ipv4_hdr *ipv4;
335 	struct rte_tcp_hdr *tcp;
336 	struct rte_udp_hdr *udp;
337 	uint8_t next_proto;
338 	uint8_t size;
339 
340 	if (is_ipv4(ip)) {
341 		ipv4 = ip;
342 		size = sizeof(struct rte_ipv4_hdr);
343 		next_proto = ipv4->next_proto_id;
344 
345 		if (l3)
346 			ipv4->hdr_checksum = 0;
347 	} else {
348 		size = sizeof(struct rte_ipv6_hdr);
349 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
350 	}
351 
352 	if (l4) {
353 		switch (next_proto) {
354 		case IPPROTO_TCP:
355 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
356 			tcp->cksum = 0;
357 			break;
358 		case IPPROTO_UDP:
359 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
360 			udp->dgram_cksum = 0;
361 			break;
362 		default:
363 			return;
364 		}
365 	}
366 }
367 
368 void
369 test_ipsec_td_prepare(const struct crypto_param *param1,
370 		      const struct crypto_param *param2,
371 		      const struct ipsec_test_flags *flags,
372 		      struct ipsec_test_data *td_array,
373 		      int nb_td)
374 
375 {
376 	struct ipsec_test_data *td;
377 	int i;
378 
379 	memset(td_array, 0, nb_td * sizeof(*td));
380 
381 	for (i = 0; i < nb_td; i++) {
382 		td = &td_array[i];
383 
384 		/* Prepare fields based on param */
385 
386 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
387 			/* Copy template for packet & key fields */
388 			if (flags->ipv6)
389 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
390 			else
391 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
392 
393 			td->aead = true;
394 			td->xform.aead.aead.algo = param1->alg.aead;
395 			td->xform.aead.aead.key.length = param1->key_length;
396 		} else {
397 			/* Copy template for packet & key fields */
398 			if (flags->ipv6)
399 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
400 					sizeof(*td));
401 			else
402 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
403 					sizeof(*td));
404 
405 			td->aead = false;
406 
407 			if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
408 				td->xform.chain.auth.auth.algo =
409 						param1->alg.auth;
410 				td->xform.chain.auth.auth.key.length =
411 						param1->key_length;
412 				td->xform.chain.auth.auth.digest_length =
413 						param1->digest_length;
414 				td->auth_only = true;
415 
416 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
417 					td->xform.chain.auth.auth.iv.length =
418 						param1->iv_length;
419 					td->aes_gmac = true;
420 				}
421 			} else {
422 				td->xform.chain.cipher.cipher.algo =
423 						param1->alg.cipher;
424 				td->xform.chain.cipher.cipher.key.length =
425 						param1->key_length;
426 				td->xform.chain.cipher.cipher.iv.length =
427 						param1->iv_length;
428 				td->xform.chain.auth.auth.algo =
429 						param2->alg.auth;
430 				td->xform.chain.auth.auth.key.length =
431 						param2->key_length;
432 				td->xform.chain.auth.auth.digest_length =
433 						param2->digest_length;
434 
435 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
436 					td->xform.chain.auth.auth.iv.length =
437 						param2->iv_length;
438 					td->aes_gmac = true;
439 				}
440 			}
441 		}
442 
443 		if (flags->ah) {
444 			td->ipsec_xform.proto =
445 					RTE_SECURITY_IPSEC_SA_PROTO_AH;
446 		}
447 
448 		if (flags->iv_gen)
449 			td->ipsec_xform.options.iv_gen_disable = 0;
450 
451 		if (flags->sa_expiry_pkts_soft)
452 			td->ipsec_xform.life.packets_soft_limit =
453 					IPSEC_TEST_PACKETS_MAX - 1;
454 
455 		if (flags->ip_csum) {
456 			td->ipsec_xform.options.ip_csum_enable = 1;
457 			test_ipsec_csum_init(&td->input_text.data, true, false);
458 		}
459 
460 		if (flags->l4_csum) {
461 			td->ipsec_xform.options.l4_csum_enable = 1;
462 			test_ipsec_csum_init(&td->input_text.data, false, true);
463 		}
464 
465 		if (flags->transport) {
466 			td->ipsec_xform.mode =
467 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
468 		} else {
469 			td->ipsec_xform.mode =
470 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
471 
472 			if (flags->tunnel_ipv6)
473 				td->ipsec_xform.tunnel.type =
474 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
475 			else
476 				td->ipsec_xform.tunnel.type =
477 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
478 		}
479 
480 		if (flags->stats_success)
481 			td->ipsec_xform.options.stats = 1;
482 
483 		if (flags->fragment) {
484 			struct rte_ipv4_hdr *ip;
485 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
486 			ip->fragment_offset = 4;
487 			ip->hdr_checksum = rte_ipv4_cksum(ip);
488 		}
489 
490 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
491 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
492 			td->ipsec_xform.options.copy_df = 1;
493 
494 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
495 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
496 			td->ipsec_xform.options.copy_dscp = 1;
497 
498 		if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
499 		    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
500 			td->ipsec_xform.options.copy_flabel = 1;
501 
502 		if (flags->dec_ttl_or_hop_limit)
503 			td->ipsec_xform.options.dec_ttl = 1;
504 	}
505 }
506 
507 void
508 test_ipsec_td_update(struct ipsec_test_data td_inb[],
509 		     const struct ipsec_test_data td_outb[],
510 		     int nb_td,
511 		     const struct ipsec_test_flags *flags)
512 {
513 	int i;
514 
515 	for (i = 0; i < nb_td; i++) {
516 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
517 		       td_outb[i].input_text.len);
518 		td_inb[i].output_text.len = td_outb->input_text.len;
519 
520 		if (flags->icv_corrupt) {
521 			int icv_pos = td_inb[i].input_text.len - 4;
522 			td_inb[i].input_text.data[icv_pos] += 1;
523 		}
524 
525 		if (flags->sa_expiry_pkts_hard)
526 			td_inb[i].ipsec_xform.life.packets_hard_limit =
527 					IPSEC_TEST_PACKETS_MAX - 1;
528 
529 		if (flags->udp_encap)
530 			td_inb[i].ipsec_xform.options.udp_encap = 1;
531 
532 		if (flags->udp_ports_verify)
533 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
534 
535 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
536 			flags->tunnel_hdr_verify;
537 
538 		if (flags->ip_csum)
539 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
540 
541 		if (flags->l4_csum)
542 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
543 
544 		/* Clear outbound specific flags */
545 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
546 	}
547 }
548 
549 void
550 test_ipsec_display_alg(const struct crypto_param *param1,
551 		       const struct crypto_param *param2)
552 {
553 	if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
554 		printf("\t%s [%d]",
555 		       rte_crypto_aead_algorithm_strings[param1->alg.aead],
556 		       param1->key_length * 8);
557 	} else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
558 		printf("\t%s",
559 		       rte_crypto_auth_algorithm_strings[param1->alg.auth]);
560 		if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL)
561 			printf(" [%dB ICV]", param1->digest_length);
562 	} else {
563 		printf("\t%s",
564 		       rte_crypto_cipher_algorithm_strings[param1->alg.cipher]);
565 		if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
566 			printf(" [%d]", param1->key_length * 8);
567 		printf(" %s",
568 		       rte_crypto_auth_algorithm_strings[param2->alg.auth]);
569 		if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
570 			printf(" [%dB ICV]", param2->digest_length);
571 	}
572 	printf("\n");
573 }
574 
575 static int
576 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
577 {
578 	int len = 0;
579 
580 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
581 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
582 			if (td->ipsec_xform.tunnel.type ==
583 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
584 				len += sizeof(struct rte_ipv4_hdr);
585 			else
586 				len += sizeof(struct rte_ipv6_hdr);
587 		}
588 	}
589 
590 	return len;
591 }
592 
593 static int
594 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
595 {
596 	static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
597 	uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
598 	int i, iv_pos, iv_len;
599 	static int index;
600 
601 	if (td->aead)
602 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
603 	else
604 		iv_len = td->xform.chain.cipher.cipher.iv.length;
605 
606 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
607 	output_text += iv_pos;
608 
609 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
610 
611 	/* Compare against previous values */
612 	for (i = 0; i < index; i++) {
613 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
614 
615 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
616 			printf("IV repeated");
617 			return TEST_FAILED;
618 		}
619 	}
620 
621 	/* Save IV for future comparisons */
622 
623 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
624 	memcpy(iv_tmp, output_text, iv_len);
625 	index++;
626 
627 	if (index == IPSEC_TEST_PACKETS_MAX)
628 		index = 0;
629 
630 	return TEST_SUCCESS;
631 }
632 
633 static int
634 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
635 {
636 	uint16_t actual_cksum, expected_cksum;
637 	struct rte_ipv4_hdr *ip;
638 
639 	ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
640 
641 	if (!is_ipv4((void *)ip))
642 		return TEST_SKIPPED;
643 
644 	actual_cksum = ip->hdr_checksum;
645 
646 	ip->hdr_checksum = 0;
647 
648 	expected_cksum = rte_ipv4_cksum(ip);
649 
650 	if (actual_cksum != expected_cksum)
651 		return TEST_FAILED;
652 
653 	return TEST_SUCCESS;
654 }
655 
656 static int
657 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
658 {
659 	uint16_t actual_cksum = 0, expected_cksum = 0;
660 	struct rte_ipv4_hdr *ipv4;
661 	struct rte_ipv6_hdr *ipv6;
662 	struct rte_tcp_hdr *tcp;
663 	struct rte_udp_hdr *udp;
664 	void *ip, *l4;
665 
666 	ip = rte_pktmbuf_mtod(m, void *);
667 
668 	if (is_ipv4(ip)) {
669 		ipv4 = ip;
670 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
671 
672 		switch (ipv4->next_proto_id) {
673 		case IPPROTO_TCP:
674 			tcp = (struct rte_tcp_hdr *)l4;
675 			actual_cksum = tcp->cksum;
676 			tcp->cksum = 0;
677 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
678 			break;
679 		case IPPROTO_UDP:
680 			udp = (struct rte_udp_hdr *)l4;
681 			actual_cksum = udp->dgram_cksum;
682 			udp->dgram_cksum = 0;
683 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
684 			break;
685 		default:
686 			break;
687 		}
688 	} else {
689 		ipv6 = ip;
690 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
691 
692 		switch (ipv6->proto) {
693 		case IPPROTO_TCP:
694 			tcp = (struct rte_tcp_hdr *)l4;
695 			actual_cksum = tcp->cksum;
696 			tcp->cksum = 0;
697 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
698 			break;
699 		case IPPROTO_UDP:
700 			udp = (struct rte_udp_hdr *)l4;
701 			actual_cksum = udp->dgram_cksum;
702 			udp->dgram_cksum = 0;
703 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
704 			break;
705 		default:
706 			break;
707 		}
708 	}
709 
710 	if (actual_cksum != expected_cksum)
711 		return TEST_FAILED;
712 
713 	return TEST_SUCCESS;
714 }
715 
716 static int
717 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
718 {
719 	struct rte_ipv4_hdr *iph4_ex, *iph4_re;
720 	struct rte_ipv6_hdr *iph6_ex, *iph6_re;
721 
722 	if (is_ipv4(received) && is_ipv4(expected)) {
723 		iph4_ex = expected;
724 		iph4_re = received;
725 		iph4_ex->time_to_live -= 1;
726 		if (iph4_re->time_to_live != iph4_ex->time_to_live)
727 			return TEST_FAILED;
728 	} else if (!is_ipv4(received) && !is_ipv4(expected)) {
729 		iph6_ex = expected;
730 		iph6_re = received;
731 		iph6_ex->hop_limits -= 1;
732 		if (iph6_re->hop_limits != iph6_ex->hop_limits)
733 			return TEST_FAILED;
734 	} else {
735 		printf("IP header version miss match\n");
736 		return TEST_FAILED;
737 	}
738 
739 	return TEST_SUCCESS;
740 }
741 
742 static int
743 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
744 		     bool silent, const struct ipsec_test_flags *flags)
745 {
746 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
747 	uint32_t skip, len = rte_pktmbuf_pkt_len(m);
748 	uint8_t td_output_text[4096];
749 	int ret;
750 
751 	/* For tests with status as error for test success, skip verification */
752 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
753 	    (flags->icv_corrupt ||
754 	     flags->sa_expiry_pkts_hard ||
755 	     flags->tunnel_hdr_verify ||
756 	     td->ar_packet))
757 		return TEST_SUCCESS;
758 
759 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
760 	   flags->udp_encap) {
761 		const struct rte_ipv4_hdr *iph4;
762 		const struct rte_ipv6_hdr *iph6;
763 
764 		if (td->ipsec_xform.tunnel.type ==
765 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
766 			iph4 = (const struct rte_ipv4_hdr *)output_text;
767 			if (iph4->next_proto_id != IPPROTO_UDP) {
768 				printf("UDP header is not found\n");
769 				return TEST_FAILED;
770 			}
771 		} else {
772 			iph6 = (const struct rte_ipv6_hdr *)output_text;
773 			if (iph6->proto != IPPROTO_UDP) {
774 				printf("UDP header is not found\n");
775 				return TEST_FAILED;
776 			}
777 		}
778 
779 		len -= sizeof(struct rte_udp_hdr);
780 		output_text += sizeof(struct rte_udp_hdr);
781 	}
782 
783 	if (len != td->output_text.len) {
784 		printf("Output length (%d) not matching with expected (%d)\n",
785 			len, td->output_text.len);
786 		return TEST_FAILED;
787 	}
788 
789 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
790 				flags->fragment) {
791 		const struct rte_ipv4_hdr *iph4;
792 		iph4 = (const struct rte_ipv4_hdr *)output_text;
793 		if (iph4->fragment_offset) {
794 			printf("Output packet is fragmented");
795 			return TEST_FAILED;
796 		}
797 	}
798 
799 	skip = test_ipsec_tunnel_hdr_len_get(td);
800 
801 	len -= skip;
802 	output_text += skip;
803 
804 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
805 				flags->ip_csum) {
806 		if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
807 			ret = test_ipsec_l3_csum_verify(m);
808 		else
809 			ret = TEST_FAILED;
810 
811 		if (ret == TEST_FAILED)
812 			printf("Inner IP checksum test failed\n");
813 
814 		return ret;
815 	}
816 
817 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
818 				flags->l4_csum) {
819 		if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
820 			ret = test_ipsec_l4_csum_verify(m);
821 		else
822 			ret = TEST_FAILED;
823 
824 		if (ret == TEST_FAILED)
825 			printf("Inner L4 checksum test failed\n");
826 
827 		return ret;
828 	}
829 
830 	memcpy(td_output_text, td->output_text.data + skip, len);
831 
832 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
833 				flags->dec_ttl_or_hop_limit) {
834 		if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
835 			printf("Inner TTL/hop limit decrement test failed\n");
836 			return TEST_FAILED;
837 		}
838 	}
839 
840 	if (test_ipsec_pkt_update(td_output_text, flags)) {
841 		printf("Could not update expected vector");
842 		return TEST_FAILED;
843 	}
844 
845 	if (memcmp(output_text, td_output_text, len)) {
846 		if (silent)
847 			return TEST_FAILED;
848 
849 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
850 			"output text not as expected\n");
851 
852 		rte_hexdump(stdout, "expected", td_output_text, len);
853 		rte_hexdump(stdout, "actual", output_text, len);
854 		return TEST_FAILED;
855 	}
856 
857 	return TEST_SUCCESS;
858 }
859 
860 static int
861 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
862 		   struct ipsec_test_data *res_d)
863 {
864 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
865 	uint32_t len = rte_pktmbuf_pkt_len(m);
866 
867 	memcpy(res_d, td, sizeof(*res_d));
868 	memcpy(res_d->input_text.data, output_text, len);
869 	res_d->input_text.len = len;
870 
871 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
872 	if (res_d->aead) {
873 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
874 	} else {
875 		res_d->xform.chain.cipher.cipher.op =
876 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
877 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
878 	}
879 
880 	return TEST_SUCCESS;
881 }
882 
883 static int
884 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
885 			     const struct ipsec_test_flags *flags)
886 {
887 	uint8_t tos, dscp;
888 	uint16_t f_off;
889 
890 	if (!is_valid_ipv4_pkt(iph4)) {
891 		printf("Tunnel outer header is not IPv4\n");
892 		return -1;
893 	}
894 
895 	if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
896 		printf("Tunnel outer header proto is not AH\n");
897 		return -1;
898 	}
899 
900 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
901 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
902 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
903 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
904 			printf("DF bit is not set\n");
905 			return -1;
906 		}
907 	} else {
908 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
909 			printf("DF bit is set\n");
910 			return -1;
911 		}
912 	}
913 
914 	tos = iph4->type_of_service;
915 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
916 
917 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
918 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
919 		if (dscp != TEST_IPSEC_DSCP_VAL) {
920 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
921 			       TEST_IPSEC_DSCP_VAL, dscp);
922 			return -1;
923 		}
924 	} else {
925 		if (dscp != 0) {
926 			printf("DSCP value is set [exp: 0, actual: %x]\n",
927 			       dscp);
928 			return -1;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 static int
936 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
937 			     const struct ipsec_test_flags *flags)
938 {
939 	uint32_t vtc_flow;
940 	uint32_t flabel;
941 	uint8_t dscp;
942 
943 	if (!is_valid_ipv6_pkt(iph6)) {
944 		printf("Tunnel outer header is not IPv6\n");
945 		return -1;
946 	}
947 
948 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
949 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
950 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
951 
952 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
953 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
954 		if (dscp != TEST_IPSEC_DSCP_VAL) {
955 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
956 			       TEST_IPSEC_DSCP_VAL, dscp);
957 			return -1;
958 		}
959 	} else {
960 		if (dscp != 0) {
961 			printf("DSCP value is set [exp: 0, actual: %x]\n",
962 			       dscp);
963 			return -1;
964 		}
965 	}
966 
967 	flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
968 
969 	if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
970 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
971 		if (flabel != TEST_IPSEC_FLABEL_VAL) {
972 			printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
973 			       TEST_IPSEC_FLABEL_VAL, flabel);
974 			return -1;
975 		}
976 	} else {
977 		if (flabel != 0) {
978 			printf("FLABEL value is set [exp: 0, actual: %x]\n",
979 			       flabel);
980 			return -1;
981 		}
982 	}
983 
984 	return 0;
985 }
986 
987 int
988 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
989 			struct ipsec_test_data *res_d, bool silent,
990 			const struct ipsec_test_flags *flags)
991 {
992 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
993 	int ret;
994 
995 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
996 		const struct rte_ipv4_hdr *iph4;
997 		const struct rte_ipv6_hdr *iph6;
998 
999 		if (flags->iv_gen) {
1000 			ret = test_ipsec_iv_verify_push(m, td);
1001 			if (ret != TEST_SUCCESS)
1002 				return ret;
1003 		}
1004 
1005 		iph4 = (const struct rte_ipv4_hdr *)output_text;
1006 
1007 		if (td->ipsec_xform.mode ==
1008 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
1009 			if (flags->ipv6) {
1010 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1011 				if (is_valid_ipv6_pkt(iph6) == false) {
1012 					printf("Transport packet is not IPv6\n");
1013 					return TEST_FAILED;
1014 				}
1015 			} else {
1016 				if (is_valid_ipv4_pkt(iph4) == false) {
1017 					printf("Transport packet is not IPv4\n");
1018 					return TEST_FAILED;
1019 				}
1020 
1021 				if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1022 					printf("Transport IPv4 header proto is not AH\n");
1023 					return -1;
1024 				}
1025 			}
1026 		} else {
1027 			if (td->ipsec_xform.tunnel.type ==
1028 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1029 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
1030 					return TEST_FAILED;
1031 			} else {
1032 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1033 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
1034 					return TEST_FAILED;
1035 			}
1036 		}
1037 	}
1038 
1039 	/*
1040 	 * In case of known vector tests & all inbound tests, res_d provided
1041 	 * would be NULL and output data need to be validated against expected.
1042 	 * For inbound, output_text would be plain packet and for outbound
1043 	 * output_text would IPsec packet. Validate by comparing against
1044 	 * known vectors.
1045 	 *
1046 	 * In case of combined mode tests, the output_text from outbound
1047 	 * operation (ie, IPsec packet) would need to be inbound processed to
1048 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
1049 	 * that inbound processing can be done.
1050 	 */
1051 
1052 	if (res_d == NULL)
1053 		return test_ipsec_td_verify(m, td, silent, flags);
1054 	else
1055 		return test_ipsec_res_d_prepare(m, td, res_d);
1056 }
1057 
1058 int
1059 test_ipsec_status_check(const struct ipsec_test_data *td,
1060 			struct rte_crypto_op *op,
1061 			const struct ipsec_test_flags *flags,
1062 			enum rte_security_ipsec_sa_direction dir,
1063 			int pkt_num)
1064 {
1065 	int ret = TEST_SUCCESS;
1066 
1067 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1068 	    td->ar_packet) {
1069 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1070 			printf("Anti replay test case failed\n");
1071 			return TEST_FAILED;
1072 		} else {
1073 			return TEST_SUCCESS;
1074 		}
1075 	}
1076 
1077 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1078 	    flags->sa_expiry_pkts_hard &&
1079 	    pkt_num == IPSEC_TEST_PACKETS_MAX) {
1080 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1081 			printf("SA hard expiry (pkts) test failed\n");
1082 			return TEST_FAILED;
1083 		} else {
1084 			return TEST_SUCCESS;
1085 		}
1086 	}
1087 
1088 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1089 	    flags->tunnel_hdr_verify) {
1090 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1091 			printf("Tunnel header verify test case failed\n");
1092 			return TEST_FAILED;
1093 		} else {
1094 			return TEST_SUCCESS;
1095 		}
1096 	}
1097 
1098 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1099 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1100 			printf("ICV corruption test case failed\n");
1101 			ret = TEST_FAILED;
1102 		}
1103 	} else {
1104 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1105 			printf("Security op processing failed [pkt_num: %d]\n",
1106 			       pkt_num);
1107 			ret = TEST_FAILED;
1108 		}
1109 	}
1110 
1111 	if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1112 		if (!(op->aux_flags &
1113 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1114 			printf("SA soft expiry (pkts) test failed\n");
1115 			ret = TEST_FAILED;
1116 		}
1117 	}
1118 
1119 	return ret;
1120 }
1121 
1122 int
1123 test_ipsec_stats_verify(struct rte_security_ctx *ctx,
1124 			struct rte_security_session *sess,
1125 			const struct ipsec_test_flags *flags,
1126 			enum rte_security_ipsec_sa_direction dir)
1127 {
1128 	struct rte_security_stats stats = {0};
1129 	int ret = TEST_SUCCESS;
1130 
1131 	if (flags->stats_success) {
1132 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1133 			return TEST_FAILED;
1134 
1135 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1136 			if (stats.ipsec.opackets != 1 ||
1137 			    stats.ipsec.oerrors != 0)
1138 				ret = TEST_FAILED;
1139 		} else {
1140 			if (stats.ipsec.ipackets != 1 ||
1141 			    stats.ipsec.ierrors != 0)
1142 				ret = TEST_FAILED;
1143 		}
1144 	}
1145 
1146 	return ret;
1147 }
1148 
1149 int
1150 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1151 {
1152 	struct rte_ipv4_hdr *iph4;
1153 	struct rte_ipv6_hdr *iph6;
1154 	bool cksum_dirty = false;
1155 
1156 	iph4 = (struct rte_ipv4_hdr *)pkt;
1157 
1158 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1159 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1160 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1161 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1162 		uint16_t frag_off;
1163 
1164 		if (!is_ipv4(iph4)) {
1165 			printf("Invalid packet type\n");
1166 			return -1;
1167 		}
1168 
1169 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1170 
1171 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1172 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1173 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1174 		else
1175 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1176 
1177 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1178 		cksum_dirty = true;
1179 	}
1180 
1181 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1182 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1183 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1184 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1185 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1186 	    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1187 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1188 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1189 
1190 		if (is_ipv4(iph4)) {
1191 			uint8_t tos;
1192 
1193 			tos = iph4->type_of_service;
1194 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1195 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1196 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1197 					(TEST_IPSEC_DSCP_VAL << 2));
1198 			else
1199 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1200 
1201 			iph4->type_of_service = tos;
1202 			cksum_dirty = true;
1203 		} else {
1204 			uint32_t vtc_flow;
1205 
1206 			iph6 = (struct rte_ipv6_hdr *)pkt;
1207 
1208 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1209 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1210 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1211 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1212 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1213 			else
1214 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1215 
1216 			if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1217 			    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1218 				vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1219 					     (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1220 			else
1221 				vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1222 
1223 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1224 		}
1225 	}
1226 
1227 	if (cksum_dirty && is_ipv4(iph4)) {
1228 		iph4->hdr_checksum = 0;
1229 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 #endif /* !RTE_EXEC_ENV_WINDOWS */
1236