xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision 4677de0a4c2ba803d0e1adc26774f2c6c8b5b6df)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
7 #include <rte_esp.h>
8 #include <rte_ip.h>
9 #include <rte_security.h>
10 #include <rte_tcp.h>
11 #include <rte_udp.h>
12 
13 #include "test.h"
14 #include "test_cryptodev_security_ipsec.h"
15 
16 #define IV_LEN_MAX 16
17 #define UDP_CUSTOM_SPORT 4650
18 #define UDP_CUSTOM_DPORT 4660
19 
20 #ifndef IPVERSION
21 #define IPVERSION 4
22 #endif
23 
24 static bool
25 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
26 {
27 	/* The IP version number must be 4 */
28 	if (((pkt->version_ihl) >> 4) != 4)
29 		return false;
30 	/*
31 	 * The IP header length field must be large enough to hold the
32 	 * minimum length legal IP datagram (20 bytes = 5 words).
33 	 */
34 	if ((pkt->version_ihl & 0xf) < 5)
35 		return false;
36 
37 	/*
38 	 * The IP total length field must be large enough to hold the IP
39 	 * datagram header, whose length is specified in the IP header length
40 	 * field.
41 	 */
42 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
43 		return false;
44 
45 	return true;
46 }
47 
48 static bool
49 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
50 {
51 	/* The IP version number must be 6 */
52 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
53 		return false;
54 
55 	return true;
56 }
57 
58 int
59 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
60 			   const struct rte_security_capability *sec_cap,
61 			   bool silent)
62 {
63 	/* Verify security capabilities */
64 
65 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
66 		if (!silent)
67 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
68 		return -ENOTSUP;
69 	}
70 
71 	if (ipsec_xform->options.udp_encap == 1 &&
72 	    sec_cap->ipsec.options.udp_encap == 0) {
73 		if (!silent)
74 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
75 		return -ENOTSUP;
76 	}
77 
78 	if (ipsec_xform->options.udp_ports_verify == 1 &&
79 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
80 		if (!silent)
81 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
82 				"verification is not supported\n");
83 		return -ENOTSUP;
84 	}
85 
86 	if (ipsec_xform->options.copy_dscp == 1 &&
87 	    sec_cap->ipsec.options.copy_dscp == 0) {
88 		if (!silent)
89 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
90 		return -ENOTSUP;
91 	}
92 
93 	if (ipsec_xform->options.copy_flabel == 1 &&
94 	    sec_cap->ipsec.options.copy_flabel == 0) {
95 		if (!silent)
96 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
97 		return -ENOTSUP;
98 	}
99 
100 	if (ipsec_xform->options.copy_df == 1 &&
101 	    sec_cap->ipsec.options.copy_df == 0) {
102 		if (!silent)
103 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
104 		return -ENOTSUP;
105 	}
106 
107 	if (ipsec_xform->options.dec_ttl == 1 &&
108 	    sec_cap->ipsec.options.dec_ttl == 0) {
109 		if (!silent)
110 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
111 		return -ENOTSUP;
112 	}
113 
114 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
115 		if (!silent)
116 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
117 		return -ENOTSUP;
118 	}
119 
120 	if (ipsec_xform->options.stats == 1 &&
121 	    sec_cap->ipsec.options.stats == 0) {
122 		if (!silent)
123 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
124 		return -ENOTSUP;
125 	}
126 
127 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
128 	    (ipsec_xform->options.iv_gen_disable == 1) &&
129 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
130 		if (!silent)
131 			RTE_LOG(INFO, USER1,
132 				"Application provided IV is not supported\n");
133 		return -ENOTSUP;
134 	}
135 
136 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
137 	    (ipsec_xform->options.tunnel_hdr_verify >
138 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
139 		if (!silent)
140 			RTE_LOG(INFO, USER1,
141 				"Tunnel header verify is not supported\n");
142 		return -ENOTSUP;
143 	}
144 
145 	if (ipsec_xform->options.ip_csum_enable == 1 &&
146 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
147 		if (!silent)
148 			RTE_LOG(INFO, USER1,
149 				"Inner IP checksum is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.l4_csum_enable == 1 &&
154 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
155 		if (!silent)
156 			RTE_LOG(INFO, USER1,
157 				"Inner L4 checksum is not supported\n");
158 		return -ENOTSUP;
159 	}
160 
161 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
162 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
163 			if (!silent)
164 				RTE_LOG(INFO, USER1,
165 					"Replay window size is not supported\n");
166 			return -ENOTSUP;
167 		}
168 	}
169 
170 	if (ipsec_xform->options.ingress_oop == 1 &&
171 	    sec_cap->ipsec.options.ingress_oop == 0) {
172 		if (!silent)
173 			RTE_LOG(INFO, USER1,
174 				"Inline Ingress OOP processing is not supported\n");
175 		return -ENOTSUP;
176 	}
177 
178 	return 0;
179 }
180 
181 void
182 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
183 			  struct ipsec_test_data *td_in)
184 {
185 	memcpy(td_in, td_out, sizeof(*td_in));
186 
187 	/* Populate output text of td_in with input text of td_out */
188 	memcpy(td_in->output_text.data, td_out->input_text.data,
189 	       td_out->input_text.len);
190 	td_in->output_text.len = td_out->input_text.len;
191 
192 	/* Populate input text of td_in with output text of td_out */
193 	memcpy(td_in->input_text.data, td_out->output_text.data,
194 	       td_out->output_text.len);
195 	td_in->input_text.len = td_out->output_text.len;
196 
197 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
198 
199 	if (td_in->aead) {
200 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
201 	} else {
202 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
203 		td_in->xform.chain.cipher.cipher.op =
204 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
205 	}
206 }
207 
208 static bool
209 is_ipv4(void *ip)
210 {
211 	struct rte_ipv4_hdr *ipv4 = ip;
212 	uint8_t ip_ver;
213 
214 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
215 	if (ip_ver == IPVERSION)
216 		return true;
217 	else
218 		return false;
219 }
220 
221 static void
222 test_ipsec_csum_init(void *ip, bool l3, bool l4)
223 {
224 	struct rte_ipv4_hdr *ipv4;
225 	struct rte_tcp_hdr *tcp;
226 	struct rte_udp_hdr *udp;
227 	uint8_t next_proto;
228 	uint8_t size;
229 
230 	if (is_ipv4(ip)) {
231 		ipv4 = ip;
232 		size = sizeof(struct rte_ipv4_hdr);
233 		next_proto = ipv4->next_proto_id;
234 
235 		if (l3)
236 			ipv4->hdr_checksum = 0;
237 	} else {
238 		size = sizeof(struct rte_ipv6_hdr);
239 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
240 	}
241 
242 	if (l4) {
243 		switch (next_proto) {
244 		case IPPROTO_TCP:
245 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
246 			tcp->cksum = 0;
247 			break;
248 		case IPPROTO_UDP:
249 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
250 			udp->dgram_cksum = 0;
251 			break;
252 		default:
253 			return;
254 		}
255 	}
256 }
257 
258 void
259 test_ipsec_td_prepare(const struct crypto_param *param1,
260 		      const struct crypto_param *param2,
261 		      const struct ipsec_test_flags *flags,
262 		      struct ipsec_test_data *td_array,
263 		      int nb_td)
264 
265 {
266 	struct ipsec_test_data *td;
267 	int i;
268 
269 	memset(td_array, 0, nb_td * sizeof(*td));
270 
271 	for (i = 0; i < nb_td; i++) {
272 		td = &td_array[i];
273 
274 		/* Prepare fields based on param */
275 
276 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
277 			/* Copy template for packet & key fields */
278 			if (flags->ipv6)
279 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
280 			else
281 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
282 
283 			if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM)
284 				td->salt.len = 3;
285 
286 			td->aead = true;
287 			td->xform.aead.aead.algo = param1->alg.aead;
288 			td->xform.aead.aead.key.length = param1->key_length;
289 		} else {
290 			/* Copy template for packet & key fields */
291 			if (flags->ipv6)
292 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
293 					sizeof(*td));
294 			else
295 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
296 					sizeof(*td));
297 
298 			td->aead = false;
299 
300 			if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
301 				td->xform.chain.auth.auth.algo =
302 						param1->alg.auth;
303 				td->xform.chain.auth.auth.key.length =
304 						param1->key_length;
305 				td->xform.chain.auth.auth.digest_length =
306 						param1->digest_length;
307 				td->auth_only = true;
308 
309 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
310 					td->xform.chain.auth.auth.iv.length =
311 						param1->iv_length;
312 					td->aes_gmac = true;
313 				}
314 			} else {
315 				td->xform.chain.cipher.cipher.algo =
316 						param1->alg.cipher;
317 				td->xform.chain.cipher.cipher.key.length =
318 						param1->key_length;
319 				td->xform.chain.cipher.cipher.iv.length =
320 						param1->iv_length;
321 				td->xform.chain.auth.auth.algo =
322 						param2->alg.auth;
323 				td->xform.chain.auth.auth.key.length =
324 						param2->key_length;
325 				td->xform.chain.auth.auth.digest_length =
326 						param2->digest_length;
327 
328 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
329 					td->xform.chain.auth.auth.iv.length =
330 						param2->iv_length;
331 					td->aes_gmac = true;
332 				}
333 			}
334 		}
335 
336 		/* Adjust the data to requested length */
337 		if (flags->plaintext_len && flags->ipv6) {
338 			struct rte_ipv6_hdr *ip6 = (struct rte_ipv6_hdr *)td->input_text.data;
339 			struct rte_tcp_hdr *tcp;
340 			int64_t payload_len;
341 			uint8_t *data;
342 			int64_t i;
343 
344 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
345 			payload_len -= sizeof(struct rte_ipv6_hdr);
346 			payload_len -= sizeof(struct rte_tcp_hdr);
347 			if (payload_len <= 16)
348 				payload_len = 16;
349 
350 			/* IPv6 */
351 			ip6->proto = IPPROTO_TCP;
352 			ip6->payload_len = sizeof(*tcp) + payload_len;
353 			ip6->payload_len = rte_cpu_to_be_16(ip6->payload_len);
354 
355 			/* TCP */
356 			tcp = (struct rte_tcp_hdr *)(ip6 + 1);
357 			data = (uint8_t *)(tcp + 1);
358 			for (i = 0; i < payload_len; i++)
359 				data[i] = i;
360 			tcp->cksum = 0;
361 			tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
362 			td->input_text.len = payload_len + sizeof(struct rte_ipv6_hdr) +
363 				sizeof(struct rte_tcp_hdr);
364 		} else if (flags->plaintext_len) {
365 			struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)td->input_text.data;
366 			struct rte_tcp_hdr *tcp;
367 			int64_t payload_len;
368 			uint8_t *data;
369 			int64_t i;
370 
371 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
372 			payload_len -= sizeof(struct rte_ipv4_hdr);
373 			payload_len -= sizeof(struct rte_tcp_hdr);
374 			if (payload_len <= 8)
375 				payload_len = 8;
376 
377 			/* IPv4 */
378 			ip->next_proto_id = IPPROTO_TCP;
379 			ip->total_length = sizeof(*ip) + sizeof(*tcp) + payload_len;
380 			ip->total_length = rte_cpu_to_be_16(ip->total_length);
381 			ip->hdr_checksum = 0;
382 			ip->hdr_checksum = rte_ipv4_cksum(ip);
383 
384 			/* TCP */
385 			tcp = (struct rte_tcp_hdr *)(ip + 1);
386 			data = (uint8_t *)(tcp + 1);
387 			for (i = 0; i < payload_len; i++)
388 				data[i] = i;
389 			tcp->cksum = 0;
390 			tcp->cksum = rte_ipv4_udptcp_cksum(ip, tcp);
391 			td->input_text.len = payload_len + sizeof(struct rte_ipv4_hdr) +
392 				sizeof(struct rte_tcp_hdr);
393 		}
394 
395 		if (flags->ah) {
396 			td->ipsec_xform.proto =
397 					RTE_SECURITY_IPSEC_SA_PROTO_AH;
398 		}
399 
400 		if (flags->iv_gen)
401 			td->ipsec_xform.options.iv_gen_disable = 0;
402 
403 		if (flags->sa_expiry_pkts_soft)
404 			td->ipsec_xform.life.packets_soft_limit = TEST_SEC_PKTS_MAX - 1;
405 
406 		if (flags->ip_csum) {
407 			td->ipsec_xform.options.ip_csum_enable = 1;
408 			test_ipsec_csum_init(&td->input_text.data, true, false);
409 		}
410 
411 		if (flags->l4_csum) {
412 			td->ipsec_xform.options.l4_csum_enable = 1;
413 			test_ipsec_csum_init(&td->input_text.data, false, true);
414 		}
415 
416 		if (flags->transport) {
417 			td->ipsec_xform.mode =
418 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
419 		} else {
420 			td->ipsec_xform.mode =
421 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
422 
423 			if (flags->tunnel_ipv6)
424 				td->ipsec_xform.tunnel.type =
425 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
426 			else
427 				td->ipsec_xform.tunnel.type =
428 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
429 		}
430 
431 		if (flags->stats_success)
432 			td->ipsec_xform.options.stats = 1;
433 
434 		if (flags->fragment) {
435 			struct rte_ipv4_hdr *ip;
436 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
437 			ip->fragment_offset = 4;
438 			ip->hdr_checksum = rte_ipv4_cksum(ip);
439 		}
440 
441 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
442 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
443 			td->ipsec_xform.options.copy_df = 1;
444 
445 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
446 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
447 			td->ipsec_xform.options.copy_dscp = 1;
448 
449 		if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
450 		    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
451 			td->ipsec_xform.options.copy_flabel = 1;
452 
453 		if (flags->dec_ttl_or_hop_limit)
454 			td->ipsec_xform.options.dec_ttl = 1;
455 
456 		if (flags->udp_encap && flags->udp_encap_custom_ports) {
457 			td->ipsec_xform.udp.sport = UDP_CUSTOM_SPORT;
458 			td->ipsec_xform.udp.dport = UDP_CUSTOM_DPORT;
459 		}
460 	}
461 }
462 
463 void
464 test_ipsec_td_update(struct ipsec_test_data td_inb[],
465 		     const struct ipsec_test_data td_outb[],
466 		     int nb_td,
467 		     const struct ipsec_test_flags *flags)
468 {
469 	int i;
470 
471 	for (i = 0; i < nb_td; i++) {
472 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
473 		       td_outb[i].input_text.len);
474 		td_inb[i].output_text.len = td_outb->input_text.len;
475 
476 		if (flags->icv_corrupt) {
477 			int icv_pos = td_inb[i].input_text.len - 4;
478 			td_inb[i].input_text.data[icv_pos] += 1;
479 		}
480 
481 		if (flags->sa_expiry_pkts_hard)
482 			td_inb[i].ipsec_xform.life.packets_hard_limit = TEST_SEC_PKTS_MAX - 1;
483 
484 		if (flags->udp_encap)
485 			td_inb[i].ipsec_xform.options.udp_encap = 1;
486 
487 		if (flags->udp_ports_verify)
488 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
489 
490 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
491 			flags->tunnel_hdr_verify;
492 
493 		if (flags->ip_csum)
494 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
495 
496 		if (flags->l4_csum)
497 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
498 
499 		/* Clear outbound specific flags */
500 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
501 	}
502 }
503 
504 static int
505 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
506 {
507 	int len = 0;
508 
509 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
510 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
511 			if (td->ipsec_xform.tunnel.type ==
512 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
513 				len += sizeof(struct rte_ipv4_hdr);
514 			else
515 				len += sizeof(struct rte_ipv6_hdr);
516 		}
517 	}
518 
519 	return len;
520 }
521 
522 static int
523 test_ipsec_iv_verify_push(const uint8_t *output_text, const struct ipsec_test_data *td)
524 {
525 	static uint8_t iv_queue[IV_LEN_MAX * TEST_SEC_PKTS_MAX];
526 	int i, iv_pos, iv_len;
527 	static int index;
528 	uint8_t *iv_tmp;
529 
530 	if (td->aead)
531 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
532 	else
533 		iv_len = td->xform.chain.cipher.cipher.iv.length;
534 
535 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
536 	output_text += iv_pos;
537 
538 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
539 
540 	/* Compare against previous values */
541 	for (i = 0; i < index; i++) {
542 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
543 
544 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
545 			printf("IV repeated");
546 			return TEST_FAILED;
547 		}
548 	}
549 
550 	/* Save IV for future comparisons */
551 
552 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
553 	memcpy(iv_tmp, output_text, iv_len);
554 	index++;
555 
556 	if (index == TEST_SEC_PKTS_MAX)
557 		index = 0;
558 
559 	return TEST_SUCCESS;
560 }
561 
562 static int
563 test_ipsec_l3_csum_verify(uint8_t *output_text)
564 {
565 	uint16_t actual_cksum, expected_cksum;
566 	struct rte_ipv4_hdr *ip;
567 
568 	ip = (struct rte_ipv4_hdr *)output_text;
569 
570 	if (!is_ipv4((void *)ip))
571 		return TEST_SKIPPED;
572 
573 	actual_cksum = ip->hdr_checksum;
574 
575 	ip->hdr_checksum = 0;
576 
577 	expected_cksum = rte_ipv4_cksum(ip);
578 
579 	if (actual_cksum != expected_cksum)
580 		return TEST_FAILED;
581 
582 	return TEST_SUCCESS;
583 }
584 
585 static int
586 test_ipsec_l4_csum_verify(uint8_t *output_text)
587 {
588 	uint16_t actual_cksum = 0, expected_cksum = 0;
589 	struct rte_ipv4_hdr *ipv4;
590 	struct rte_ipv6_hdr *ipv6;
591 	struct rte_tcp_hdr *tcp;
592 	struct rte_udp_hdr *udp;
593 	void *ip, *l4;
594 
595 	ip = output_text;
596 
597 	if (is_ipv4(ip)) {
598 		ipv4 = ip;
599 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
600 
601 		switch (ipv4->next_proto_id) {
602 		case IPPROTO_TCP:
603 			tcp = (struct rte_tcp_hdr *)l4;
604 			actual_cksum = tcp->cksum;
605 			tcp->cksum = 0;
606 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
607 			break;
608 		case IPPROTO_UDP:
609 			udp = (struct rte_udp_hdr *)l4;
610 			actual_cksum = udp->dgram_cksum;
611 			udp->dgram_cksum = 0;
612 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
613 			break;
614 		default:
615 			break;
616 		}
617 	} else {
618 		ipv6 = ip;
619 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
620 
621 		switch (ipv6->proto) {
622 		case IPPROTO_TCP:
623 			tcp = (struct rte_tcp_hdr *)l4;
624 			actual_cksum = tcp->cksum;
625 			tcp->cksum = 0;
626 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
627 			break;
628 		case IPPROTO_UDP:
629 			udp = (struct rte_udp_hdr *)l4;
630 			actual_cksum = udp->dgram_cksum;
631 			udp->dgram_cksum = 0;
632 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
633 			break;
634 		default:
635 			break;
636 		}
637 	}
638 
639 	if (actual_cksum != expected_cksum)
640 		return TEST_FAILED;
641 
642 	return TEST_SUCCESS;
643 }
644 
645 static int
646 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
647 {
648 	struct rte_ipv4_hdr *iph4_ex, *iph4_re;
649 	struct rte_ipv6_hdr *iph6_ex, *iph6_re;
650 
651 	if (is_ipv4(received) && is_ipv4(expected)) {
652 		iph4_ex = expected;
653 		iph4_re = received;
654 		iph4_ex->time_to_live -= 1;
655 		if (iph4_re->time_to_live != iph4_ex->time_to_live)
656 			return TEST_FAILED;
657 	} else if (!is_ipv4(received) && !is_ipv4(expected)) {
658 		iph6_ex = expected;
659 		iph6_re = received;
660 		iph6_ex->hop_limits -= 1;
661 		if (iph6_re->hop_limits != iph6_ex->hop_limits)
662 			return TEST_FAILED;
663 	} else {
664 		printf("IP header version miss match\n");
665 		return TEST_FAILED;
666 	}
667 
668 	return TEST_SUCCESS;
669 }
670 
671 static int
672 test_ipsec_td_verify(uint8_t *output_text, uint32_t len, uint32_t ol_flags,
673 		const struct ipsec_test_data *td, bool silent, const struct ipsec_test_flags *flags)
674 {
675 	uint8_t td_output_text[IPSEC_TEXT_MAX_LEN];
676 	uint32_t skip;
677 	int ret;
678 
679 	/* For tests with status as error for test success, skip verification */
680 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
681 	    (flags->icv_corrupt ||
682 	     flags->sa_expiry_pkts_hard ||
683 	     flags->tunnel_hdr_verify ||
684 	     td->ar_packet))
685 		return TEST_SUCCESS;
686 
687 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
688 	   flags->udp_encap) {
689 
690 		len -= sizeof(struct rte_udp_hdr);
691 		output_text += sizeof(struct rte_udp_hdr);
692 	}
693 
694 	if (len != td->output_text.len) {
695 		printf("Output length (%d) not matching with expected (%d)\n",
696 			len, td->output_text.len);
697 		return TEST_FAILED;
698 	}
699 
700 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
701 				flags->fragment) {
702 		const struct rte_ipv4_hdr *iph4;
703 		iph4 = (const struct rte_ipv4_hdr *)output_text;
704 		if (iph4->fragment_offset) {
705 			printf("Output packet is fragmented");
706 			return TEST_FAILED;
707 		}
708 	}
709 
710 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
711 				flags->ip_csum) {
712 		if (ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
713 			ret = test_ipsec_l3_csum_verify(output_text);
714 		else
715 			ret = TEST_FAILED;
716 
717 		if (ret == TEST_FAILED)
718 			printf("Inner IP checksum test failed\n");
719 
720 		return ret;
721 	}
722 
723 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
724 				flags->l4_csum) {
725 		if (ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
726 			ret = test_ipsec_l4_csum_verify(output_text);
727 		else
728 			ret = TEST_FAILED;
729 
730 		if (ret == TEST_FAILED)
731 			printf("Inner L4 checksum test failed\n");
732 
733 		return ret;
734 	}
735 
736 	skip = test_ipsec_tunnel_hdr_len_get(td);
737 
738 	len -= skip;
739 	output_text += skip;
740 
741 	memcpy(td_output_text, td->output_text.data + skip, len);
742 
743 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
744 				flags->dec_ttl_or_hop_limit) {
745 		if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
746 			printf("Inner TTL/hop limit decrement test failed\n");
747 			return TEST_FAILED;
748 		}
749 	}
750 
751 	if (test_ipsec_pkt_update(td_output_text, flags)) {
752 		printf("Could not update expected vector");
753 		return TEST_FAILED;
754 	}
755 
756 	if (memcmp(output_text, td_output_text, len)) {
757 		if (silent)
758 			return TEST_FAILED;
759 
760 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
761 			"output text not as expected\n");
762 
763 		rte_hexdump(stdout, "expected", td_output_text, len);
764 		rte_hexdump(stdout, "actual", output_text, len);
765 		return TEST_FAILED;
766 	}
767 
768 	return TEST_SUCCESS;
769 }
770 
771 static int
772 test_ipsec_res_d_prepare(const uint8_t *output_text, uint32_t len,
773 		const struct ipsec_test_data *td, struct ipsec_test_data *res_d)
774 {
775 	memcpy(res_d, td, sizeof(*res_d));
776 
777 	memcpy(&res_d->input_text.data, output_text, len);
778 	res_d->input_text.len = len;
779 
780 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
781 	if (res_d->aead) {
782 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
783 	} else {
784 		res_d->xform.chain.cipher.cipher.op =
785 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
786 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
787 	}
788 
789 	return TEST_SUCCESS;
790 }
791 
792 static int
793 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
794 			     const struct ipsec_test_flags *flags)
795 {
796 	uint8_t tos, dscp;
797 	uint16_t f_off;
798 
799 	if (!is_valid_ipv4_pkt(iph4)) {
800 		printf("Tunnel outer header is not IPv4\n");
801 		return -1;
802 	}
803 
804 	if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
805 		printf("Tunnel outer header proto is not AH\n");
806 		return -1;
807 	}
808 
809 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
810 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
811 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
812 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
813 			printf("DF bit is not set\n");
814 			return -1;
815 		}
816 	} else {
817 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
818 			printf("DF bit is set\n");
819 			return -1;
820 		}
821 	}
822 
823 	tos = iph4->type_of_service;
824 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
825 
826 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
827 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
828 		if (dscp != TEST_IPSEC_DSCP_VAL) {
829 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
830 			       TEST_IPSEC_DSCP_VAL, dscp);
831 			return -1;
832 		}
833 	} else {
834 		if (dscp != 0) {
835 			printf("DSCP value is set [exp: 0, actual: %x]\n",
836 			       dscp);
837 			return -1;
838 		}
839 	}
840 
841 	return 0;
842 }
843 
844 static int
845 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
846 			     const struct ipsec_test_flags *flags)
847 {
848 	uint32_t vtc_flow;
849 	uint32_t flabel;
850 	uint8_t dscp;
851 
852 	if (!is_valid_ipv6_pkt(iph6)) {
853 		printf("Tunnel outer header is not IPv6\n");
854 		return -1;
855 	}
856 
857 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
858 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
859 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
860 
861 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
862 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
863 		if (dscp != TEST_IPSEC_DSCP_VAL) {
864 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
865 			       TEST_IPSEC_DSCP_VAL, dscp);
866 			return -1;
867 		}
868 	} else {
869 		if (dscp != 0) {
870 			printf("DSCP value is set [exp: 0, actual: %x]\n",
871 			       dscp);
872 			return -1;
873 		}
874 	}
875 
876 	flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
877 
878 	if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
879 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
880 		if (flabel != TEST_IPSEC_FLABEL_VAL) {
881 			printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
882 			       TEST_IPSEC_FLABEL_VAL, flabel);
883 			return -1;
884 		}
885 	} else {
886 		if (flabel != 0) {
887 			printf("FLABEL value is set [exp: 0, actual: %x]\n",
888 			       flabel);
889 			return -1;
890 		}
891 	}
892 
893 	return 0;
894 }
895 
896 int
897 test_ipsec_post_process(const struct rte_mbuf *m, const struct ipsec_test_data *td,
898 			struct ipsec_test_data *res_d, bool silent,
899 			const struct ipsec_test_flags *flags)
900 {
901 	uint32_t len = rte_pktmbuf_pkt_len(m), data_len;
902 	uint8_t output_text[IPSEC_TEXT_MAX_LEN];
903 	const struct rte_mbuf *seg;
904 	const uint8_t *output;
905 	int ret;
906 
907 	memset(output_text, 0, IPSEC_TEXT_MAX_LEN);
908 	/* Actual data in packet might be less in error cases,
909 	 * hence take minimum of pkt_len and sum of data_len.
910 	 * This is done to run through negative test cases.
911 	 */
912 	data_len = 0;
913 	seg = m;
914 	while (seg) {
915 		data_len += seg->data_len;
916 		seg = seg->next;
917 	}
918 	len = RTE_MIN(len, data_len);
919 	TEST_ASSERT(len <= IPSEC_TEXT_MAX_LEN, "Invalid packet length: %u", len);
920 	/* Copy mbuf payload to continuous buffer */
921 	output = rte_pktmbuf_read(m, 0, len, output_text);
922 	if (output != output_text)
923 		/* Single segment mbuf, copy manually */
924 		memcpy(output_text, output, len);
925 
926 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
927 		const struct rte_ipv4_hdr *iph4;
928 		const struct rte_ipv6_hdr *iph6;
929 
930 		if (flags->iv_gen) {
931 			ret = test_ipsec_iv_verify_push(output_text, td);
932 			if (ret != TEST_SUCCESS)
933 				return ret;
934 		}
935 
936 		iph4 = (const struct rte_ipv4_hdr *)output_text;
937 
938 		if (td->ipsec_xform.mode ==
939 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
940 			if (flags->ipv6) {
941 				iph6 = (const struct rte_ipv6_hdr *)output_text;
942 				if (is_valid_ipv6_pkt(iph6) == false) {
943 					printf("Transport packet is not IPv6\n");
944 					return TEST_FAILED;
945 				}
946 			} else {
947 				if (is_valid_ipv4_pkt(iph4) == false) {
948 					printf("Transport packet is not IPv4\n");
949 					return TEST_FAILED;
950 				}
951 
952 				if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
953 					printf("Transport IPv4 header proto is not AH\n");
954 					return -1;
955 				}
956 			}
957 		} else {
958 			if (td->ipsec_xform.tunnel.type ==
959 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
960 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
961 					return TEST_FAILED;
962 			} else {
963 				iph6 = (const struct rte_ipv6_hdr *)output_text;
964 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
965 					return TEST_FAILED;
966 			}
967 		}
968 	}
969 
970 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
971 	   flags->udp_encap) {
972 		const struct rte_ipv4_hdr *iph4;
973 		const struct rte_ipv6_hdr *iph6;
974 
975 		if (td->ipsec_xform.tunnel.type ==
976 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
977 			iph4 = (const struct rte_ipv4_hdr *)output_text;
978 
979 			if (iph4->next_proto_id != IPPROTO_UDP) {
980 				printf("UDP header is not found\n");
981 				return TEST_FAILED;
982 			}
983 
984 			if (flags->udp_encap_custom_ports) {
985 				const struct rte_udp_hdr *udph;
986 
987 				udph = (const struct rte_udp_hdr *)(output_text +
988 					sizeof(struct rte_ipv4_hdr));
989 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
990 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
991 					printf("UDP custom ports not matching.\n");
992 					return TEST_FAILED;
993 				}
994 			}
995 		} else {
996 			iph6 = (const struct rte_ipv6_hdr *)output_text;
997 
998 			if (iph6->proto != IPPROTO_UDP) {
999 				printf("UDP header is not found\n");
1000 				return TEST_FAILED;
1001 			}
1002 
1003 			if (flags->udp_encap_custom_ports) {
1004 				const struct rte_udp_hdr *udph;
1005 
1006 				udph = (const struct rte_udp_hdr *)(output_text +
1007 					sizeof(struct rte_ipv6_hdr));
1008 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1009 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1010 					printf("UDP custom ports not matching.\n");
1011 					return TEST_FAILED;
1012 				}
1013 			}
1014 		}
1015 	}
1016 
1017 	/*
1018 	 * In case of known vector tests & all inbound tests, res_d provided
1019 	 * would be NULL and output data need to be validated against expected.
1020 	 * For inbound, output_text would be plain packet and for outbound
1021 	 * output_text would IPsec packet. Validate by comparing against
1022 	 * known vectors.
1023 	 *
1024 	 * In case of combined mode tests, the output_text from outbound
1025 	 * operation (ie, IPsec packet) would need to be inbound processed to
1026 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
1027 	 * that inbound processing can be done.
1028 	 */
1029 
1030 	if (res_d == NULL)
1031 		return test_ipsec_td_verify(output_text, len, m->ol_flags, td, silent, flags);
1032 	else
1033 		return test_ipsec_res_d_prepare(output_text, len, td, res_d);
1034 }
1035 
1036 int
1037 test_ipsec_status_check(const struct ipsec_test_data *td,
1038 			struct rte_crypto_op *op,
1039 			const struct ipsec_test_flags *flags,
1040 			enum rte_security_ipsec_sa_direction dir,
1041 			int pkt_num)
1042 {
1043 	int ret = TEST_SUCCESS;
1044 
1045 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1046 	    td->ar_packet) {
1047 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1048 			printf("Anti replay test case failed\n");
1049 			return TEST_FAILED;
1050 		} else {
1051 			return TEST_SUCCESS;
1052 		}
1053 	}
1054 
1055 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1056 	    flags->sa_expiry_pkts_hard &&
1057 	    pkt_num == TEST_SEC_PKTS_MAX) {
1058 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1059 			printf("SA hard expiry (pkts) test failed\n");
1060 			return TEST_FAILED;
1061 		} else {
1062 			return TEST_SUCCESS;
1063 		}
1064 	}
1065 
1066 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1067 	    flags->tunnel_hdr_verify) {
1068 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1069 			printf("Tunnel header verify test case failed\n");
1070 			return TEST_FAILED;
1071 		} else {
1072 			return TEST_SUCCESS;
1073 		}
1074 	}
1075 
1076 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1077 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1078 			printf("ICV corruption test case failed\n");
1079 			ret = TEST_FAILED;
1080 		}
1081 	} else {
1082 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1083 			printf("Security op processing failed [pkt_num: %d]\n",
1084 			       pkt_num);
1085 			ret = TEST_FAILED;
1086 		}
1087 	}
1088 
1089 	if (flags->sa_expiry_pkts_soft && pkt_num == TEST_SEC_PKTS_MAX) {
1090 		if (!(op->aux_flags &
1091 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1092 			printf("SA soft expiry (pkts) test failed\n");
1093 			ret = TEST_FAILED;
1094 		}
1095 	}
1096 
1097 	return ret;
1098 }
1099 
1100 int
1101 test_ipsec_stats_verify(void *ctx,
1102 			void *sess,
1103 			const struct ipsec_test_flags *flags,
1104 			enum rte_security_ipsec_sa_direction dir)
1105 {
1106 	struct rte_security_stats stats = {0};
1107 	int retries = 0, ret = TEST_SUCCESS;
1108 
1109 	if (flags->stats_success) {
1110 stats_get:
1111 		ret = TEST_SUCCESS;
1112 
1113 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1114 			return TEST_FAILED;
1115 
1116 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1117 			if (stats.ipsec.opackets != 1 ||
1118 			    stats.ipsec.oerrors != 0)
1119 				ret = TEST_FAILED;
1120 		} else {
1121 			if (stats.ipsec.ipackets != 1 ||
1122 			    stats.ipsec.ierrors != 0)
1123 				ret = TEST_FAILED;
1124 		}
1125 
1126 		if (ret == TEST_FAILED && retries < TEST_STATS_RETRIES) {
1127 			retries++;
1128 			rte_delay_ms(1);
1129 			goto stats_get;
1130 		}
1131 	}
1132 
1133 	return ret;
1134 }
1135 
1136 int
1137 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1138 {
1139 	struct rte_ipv4_hdr *iph4;
1140 	struct rte_ipv6_hdr *iph6;
1141 	bool cksum_dirty = false;
1142 
1143 	iph4 = (struct rte_ipv4_hdr *)pkt;
1144 
1145 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1146 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1147 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1148 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1149 		uint16_t frag_off;
1150 
1151 		if (!is_ipv4(iph4)) {
1152 			printf("Invalid packet type\n");
1153 			return -1;
1154 		}
1155 
1156 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1157 
1158 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1159 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1160 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1161 		else
1162 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1163 
1164 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1165 		cksum_dirty = true;
1166 	}
1167 
1168 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1169 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1170 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1171 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1172 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1173 	    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1174 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1175 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1176 
1177 		if (is_ipv4(iph4)) {
1178 			uint8_t tos;
1179 
1180 			tos = iph4->type_of_service;
1181 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1182 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1183 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1184 					(TEST_IPSEC_DSCP_VAL << 2));
1185 			else
1186 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1187 
1188 			iph4->type_of_service = tos;
1189 			cksum_dirty = true;
1190 		} else {
1191 			uint32_t vtc_flow;
1192 
1193 			iph6 = (struct rte_ipv6_hdr *)pkt;
1194 
1195 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1196 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1197 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1198 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1199 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1200 			else
1201 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1202 
1203 			if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1204 			    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1205 				vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1206 					     (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1207 			else
1208 				vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1209 
1210 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1211 		}
1212 	}
1213 
1214 	if (cksum_dirty && is_ipv4(iph4)) {
1215 		iph4->hdr_checksum = 0;
1216 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1217 	}
1218 
1219 	return 0;
1220 }
1221