xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision 3cf0c56ca5b1884c89c9a6bb4e5fdc978018d2b0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
7 #include <rte_esp.h>
8 #include <rte_ip.h>
9 #include <rte_security.h>
10 #include <rte_tcp.h>
11 #include <rte_udp.h>
12 
13 #include "test.h"
14 #include "test_cryptodev_security_ipsec.h"
15 
16 #define IV_LEN_MAX 16
17 #define UDP_CUSTOM_SPORT 4650
18 #define UDP_CUSTOM_DPORT 4660
19 
20 #ifndef IPVERSION
21 #define IPVERSION 4
22 #endif
23 
24 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
25 				  (RTE_DIM(cipher_list) *
26 				   RTE_DIM(auth_list))];
27 
28 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
29 
30 static bool
31 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
32 {
33 	/* The IP version number must be 4 */
34 	if (((pkt->version_ihl) >> 4) != 4)
35 		return false;
36 	/*
37 	 * The IP header length field must be large enough to hold the
38 	 * minimum length legal IP datagram (20 bytes = 5 words).
39 	 */
40 	if ((pkt->version_ihl & 0xf) < 5)
41 		return false;
42 
43 	/*
44 	 * The IP total length field must be large enough to hold the IP
45 	 * datagram header, whose length is specified in the IP header length
46 	 * field.
47 	 */
48 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
49 		return false;
50 
51 	return true;
52 }
53 
54 static bool
55 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
56 {
57 	/* The IP version number must be 6 */
58 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
59 		return false;
60 
61 	return true;
62 }
63 
64 void
65 test_ipsec_alg_list_populate(void)
66 {
67 	unsigned long i, j, index = 0;
68 
69 	for (i = 0; i < RTE_DIM(aead_list); i++) {
70 		alg_list[index].param1 = &aead_list[i];
71 		alg_list[index].param2 = NULL;
72 		index++;
73 	}
74 
75 	for (i = 0; i < RTE_DIM(cipher_list); i++) {
76 		for (j = 0; j < RTE_DIM(auth_list); j++) {
77 			alg_list[index].param1 = &cipher_list[i];
78 			alg_list[index].param2 = &auth_list[j];
79 			index++;
80 		}
81 	}
82 }
83 
84 void
85 test_ipsec_ah_alg_list_populate(void)
86 {
87 	unsigned long i, index = 0;
88 
89 	for (i = 1; i < RTE_DIM(auth_list); i++) {
90 		ah_alg_list[index].param1 = &auth_list[i];
91 		ah_alg_list[index].param2 = NULL;
92 		index++;
93 	}
94 
95 	for (i = 1; i < RTE_DIM(auth_list); i++) {
96 		/* NULL cipher */
97 		ah_alg_list[index].param1 = &cipher_list[0];
98 
99 		ah_alg_list[index].param2 = &auth_list[i];
100 		index++;
101 	}
102 }
103 
104 int
105 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
106 			   const struct rte_security_capability *sec_cap,
107 			   bool silent)
108 {
109 	/* Verify security capabilities */
110 
111 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
112 		if (!silent)
113 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
114 		return -ENOTSUP;
115 	}
116 
117 	if (ipsec_xform->options.udp_encap == 1 &&
118 	    sec_cap->ipsec.options.udp_encap == 0) {
119 		if (!silent)
120 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
121 		return -ENOTSUP;
122 	}
123 
124 	if (ipsec_xform->options.udp_ports_verify == 1 &&
125 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
126 		if (!silent)
127 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
128 				"verification is not supported\n");
129 		return -ENOTSUP;
130 	}
131 
132 	if (ipsec_xform->options.copy_dscp == 1 &&
133 	    sec_cap->ipsec.options.copy_dscp == 0) {
134 		if (!silent)
135 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
136 		return -ENOTSUP;
137 	}
138 
139 	if (ipsec_xform->options.copy_flabel == 1 &&
140 	    sec_cap->ipsec.options.copy_flabel == 0) {
141 		if (!silent)
142 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
143 		return -ENOTSUP;
144 	}
145 
146 	if (ipsec_xform->options.copy_df == 1 &&
147 	    sec_cap->ipsec.options.copy_df == 0) {
148 		if (!silent)
149 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.dec_ttl == 1 &&
154 	    sec_cap->ipsec.options.dec_ttl == 0) {
155 		if (!silent)
156 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
157 		return -ENOTSUP;
158 	}
159 
160 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
161 		if (!silent)
162 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
163 		return -ENOTSUP;
164 	}
165 
166 	if (ipsec_xform->options.stats == 1 &&
167 	    sec_cap->ipsec.options.stats == 0) {
168 		if (!silent)
169 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
170 		return -ENOTSUP;
171 	}
172 
173 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
174 	    (ipsec_xform->options.iv_gen_disable == 1) &&
175 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
176 		if (!silent)
177 			RTE_LOG(INFO, USER1,
178 				"Application provided IV is not supported\n");
179 		return -ENOTSUP;
180 	}
181 
182 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
183 	    (ipsec_xform->options.tunnel_hdr_verify >
184 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
185 		if (!silent)
186 			RTE_LOG(INFO, USER1,
187 				"Tunnel header verify is not supported\n");
188 		return -ENOTSUP;
189 	}
190 
191 	if (ipsec_xform->options.ip_csum_enable == 1 &&
192 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
193 		if (!silent)
194 			RTE_LOG(INFO, USER1,
195 				"Inner IP checksum is not supported\n");
196 		return -ENOTSUP;
197 	}
198 
199 	if (ipsec_xform->options.l4_csum_enable == 1 &&
200 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
201 		if (!silent)
202 			RTE_LOG(INFO, USER1,
203 				"Inner L4 checksum is not supported\n");
204 		return -ENOTSUP;
205 	}
206 
207 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
208 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
209 			if (!silent)
210 				RTE_LOG(INFO, USER1,
211 					"Replay window size is not supported\n");
212 			return -ENOTSUP;
213 		}
214 	}
215 
216 	if (ipsec_xform->options.ingress_oop == 1 &&
217 	    sec_cap->ipsec.options.ingress_oop == 0) {
218 		if (!silent)
219 			RTE_LOG(INFO, USER1,
220 				"Inline Ingress OOP processing is not supported\n");
221 		return -ENOTSUP;
222 	}
223 
224 	return 0;
225 }
226 
227 void
228 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
229 			  struct ipsec_test_data *td_in)
230 {
231 	memcpy(td_in, td_out, sizeof(*td_in));
232 
233 	/* Populate output text of td_in with input text of td_out */
234 	memcpy(td_in->output_text.data, td_out->input_text.data,
235 	       td_out->input_text.len);
236 	td_in->output_text.len = td_out->input_text.len;
237 
238 	/* Populate input text of td_in with output text of td_out */
239 	memcpy(td_in->input_text.data, td_out->output_text.data,
240 	       td_out->output_text.len);
241 	td_in->input_text.len = td_out->output_text.len;
242 
243 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
244 
245 	if (td_in->aead) {
246 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
247 	} else {
248 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
249 		td_in->xform.chain.cipher.cipher.op =
250 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
251 	}
252 }
253 
254 static bool
255 is_ipv4(void *ip)
256 {
257 	struct rte_ipv4_hdr *ipv4 = ip;
258 	uint8_t ip_ver;
259 
260 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
261 	if (ip_ver == IPVERSION)
262 		return true;
263 	else
264 		return false;
265 }
266 
267 static void
268 test_ipsec_csum_init(void *ip, bool l3, bool l4)
269 {
270 	struct rte_ipv4_hdr *ipv4;
271 	struct rte_tcp_hdr *tcp;
272 	struct rte_udp_hdr *udp;
273 	uint8_t next_proto;
274 	uint8_t size;
275 
276 	if (is_ipv4(ip)) {
277 		ipv4 = ip;
278 		size = sizeof(struct rte_ipv4_hdr);
279 		next_proto = ipv4->next_proto_id;
280 
281 		if (l3)
282 			ipv4->hdr_checksum = 0;
283 	} else {
284 		size = sizeof(struct rte_ipv6_hdr);
285 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
286 	}
287 
288 	if (l4) {
289 		switch (next_proto) {
290 		case IPPROTO_TCP:
291 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
292 			tcp->cksum = 0;
293 			break;
294 		case IPPROTO_UDP:
295 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
296 			udp->dgram_cksum = 0;
297 			break;
298 		default:
299 			return;
300 		}
301 	}
302 }
303 
304 void
305 test_ipsec_td_prepare(const struct crypto_param *param1,
306 		      const struct crypto_param *param2,
307 		      const struct ipsec_test_flags *flags,
308 		      struct ipsec_test_data *td_array,
309 		      int nb_td)
310 
311 {
312 	struct ipsec_test_data *td;
313 	int i;
314 
315 	memset(td_array, 0, nb_td * sizeof(*td));
316 
317 	for (i = 0; i < nb_td; i++) {
318 		td = &td_array[i];
319 
320 		/* Prepare fields based on param */
321 
322 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
323 			/* Copy template for packet & key fields */
324 			if (flags->ipv6)
325 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
326 			else
327 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
328 
329 			if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM)
330 				td->salt.len = 3;
331 
332 			td->aead = true;
333 			td->xform.aead.aead.algo = param1->alg.aead;
334 			td->xform.aead.aead.key.length = param1->key_length;
335 		} else {
336 			/* Copy template for packet & key fields */
337 			if (flags->ipv6)
338 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
339 					sizeof(*td));
340 			else
341 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
342 					sizeof(*td));
343 
344 			td->aead = false;
345 
346 			if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
347 				td->xform.chain.auth.auth.algo =
348 						param1->alg.auth;
349 				td->xform.chain.auth.auth.key.length =
350 						param1->key_length;
351 				td->xform.chain.auth.auth.digest_length =
352 						param1->digest_length;
353 				td->auth_only = true;
354 
355 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
356 					td->xform.chain.auth.auth.iv.length =
357 						param1->iv_length;
358 					td->aes_gmac = true;
359 				}
360 			} else {
361 				td->xform.chain.cipher.cipher.algo =
362 						param1->alg.cipher;
363 				td->xform.chain.cipher.cipher.key.length =
364 						param1->key_length;
365 				td->xform.chain.cipher.cipher.iv.length =
366 						param1->iv_length;
367 				td->xform.chain.auth.auth.algo =
368 						param2->alg.auth;
369 				td->xform.chain.auth.auth.key.length =
370 						param2->key_length;
371 				td->xform.chain.auth.auth.digest_length =
372 						param2->digest_length;
373 
374 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
375 					td->xform.chain.auth.auth.iv.length =
376 						param2->iv_length;
377 					td->aes_gmac = true;
378 				}
379 			}
380 		}
381 
382 		/* Adjust the data to requested length */
383 		if (flags->plaintext_len && flags->ipv6) {
384 			struct rte_ipv6_hdr *ip6 = (struct rte_ipv6_hdr *)td->input_text.data;
385 			struct rte_tcp_hdr *tcp;
386 			int64_t payload_len;
387 			uint8_t *data;
388 			int64_t i;
389 
390 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
391 			payload_len -= sizeof(struct rte_ipv6_hdr);
392 			payload_len -= sizeof(struct rte_tcp_hdr);
393 			if (payload_len <= 16)
394 				payload_len = 16;
395 
396 			/* IPv6 */
397 			ip6->proto = IPPROTO_TCP;
398 			ip6->payload_len = sizeof(*tcp) + payload_len;
399 			ip6->payload_len = rte_cpu_to_be_16(ip6->payload_len);
400 
401 			/* TCP */
402 			tcp = (struct rte_tcp_hdr *)(ip6 + 1);
403 			data = (uint8_t *)(tcp + 1);
404 			for (i = 0; i < payload_len; i++)
405 				data[i] = i;
406 			tcp->cksum = 0;
407 			tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
408 			td->input_text.len = payload_len + sizeof(struct rte_ipv6_hdr) +
409 				sizeof(struct rte_tcp_hdr);
410 		} else if (flags->plaintext_len) {
411 			struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)td->input_text.data;
412 			struct rte_tcp_hdr *tcp;
413 			int64_t payload_len;
414 			uint8_t *data;
415 			int64_t i;
416 
417 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
418 			payload_len -= sizeof(struct rte_ipv4_hdr);
419 			payload_len -= sizeof(struct rte_tcp_hdr);
420 			if (payload_len <= 8)
421 				payload_len = 8;
422 
423 			/* IPv4 */
424 			ip->next_proto_id = IPPROTO_TCP;
425 			ip->total_length = sizeof(*ip) + sizeof(*tcp) + payload_len;
426 			ip->total_length = rte_cpu_to_be_16(ip->total_length);
427 			ip->hdr_checksum = 0;
428 			ip->hdr_checksum = rte_ipv4_cksum(ip);
429 
430 			/* TCP */
431 			tcp = (struct rte_tcp_hdr *)(ip + 1);
432 			data = (uint8_t *)(tcp + 1);
433 			for (i = 0; i < payload_len; i++)
434 				data[i] = i;
435 			tcp->cksum = 0;
436 			tcp->cksum = rte_ipv4_udptcp_cksum(ip, tcp);
437 			td->input_text.len = payload_len + sizeof(struct rte_ipv4_hdr) +
438 				sizeof(struct rte_tcp_hdr);
439 		}
440 
441 		if (flags->ah) {
442 			td->ipsec_xform.proto =
443 					RTE_SECURITY_IPSEC_SA_PROTO_AH;
444 		}
445 
446 		if (flags->iv_gen)
447 			td->ipsec_xform.options.iv_gen_disable = 0;
448 
449 		if (flags->sa_expiry_pkts_soft)
450 			td->ipsec_xform.life.packets_soft_limit =
451 					IPSEC_TEST_PACKETS_MAX - 1;
452 
453 		if (flags->ip_csum) {
454 			td->ipsec_xform.options.ip_csum_enable = 1;
455 			test_ipsec_csum_init(&td->input_text.data, true, false);
456 		}
457 
458 		if (flags->l4_csum) {
459 			td->ipsec_xform.options.l4_csum_enable = 1;
460 			test_ipsec_csum_init(&td->input_text.data, false, true);
461 		}
462 
463 		if (flags->transport) {
464 			td->ipsec_xform.mode =
465 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
466 		} else {
467 			td->ipsec_xform.mode =
468 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
469 
470 			if (flags->tunnel_ipv6)
471 				td->ipsec_xform.tunnel.type =
472 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
473 			else
474 				td->ipsec_xform.tunnel.type =
475 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
476 		}
477 
478 		if (flags->stats_success)
479 			td->ipsec_xform.options.stats = 1;
480 
481 		if (flags->fragment) {
482 			struct rte_ipv4_hdr *ip;
483 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
484 			ip->fragment_offset = 4;
485 			ip->hdr_checksum = rte_ipv4_cksum(ip);
486 		}
487 
488 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
489 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
490 			td->ipsec_xform.options.copy_df = 1;
491 
492 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
493 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
494 			td->ipsec_xform.options.copy_dscp = 1;
495 
496 		if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
497 		    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
498 			td->ipsec_xform.options.copy_flabel = 1;
499 
500 		if (flags->dec_ttl_or_hop_limit)
501 			td->ipsec_xform.options.dec_ttl = 1;
502 
503 		if (flags->udp_encap && flags->udp_encap_custom_ports) {
504 			td->ipsec_xform.udp.sport = UDP_CUSTOM_SPORT;
505 			td->ipsec_xform.udp.dport = UDP_CUSTOM_DPORT;
506 		}
507 	}
508 }
509 
510 void
511 test_ipsec_td_update(struct ipsec_test_data td_inb[],
512 		     const struct ipsec_test_data td_outb[],
513 		     int nb_td,
514 		     const struct ipsec_test_flags *flags)
515 {
516 	int i;
517 
518 	for (i = 0; i < nb_td; i++) {
519 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
520 		       td_outb[i].input_text.len);
521 		td_inb[i].output_text.len = td_outb->input_text.len;
522 
523 		if (flags->icv_corrupt) {
524 			int icv_pos = td_inb[i].input_text.len - 4;
525 			td_inb[i].input_text.data[icv_pos] += 1;
526 		}
527 
528 		if (flags->sa_expiry_pkts_hard)
529 			td_inb[i].ipsec_xform.life.packets_hard_limit =
530 					IPSEC_TEST_PACKETS_MAX - 1;
531 
532 		if (flags->udp_encap)
533 			td_inb[i].ipsec_xform.options.udp_encap = 1;
534 
535 		if (flags->udp_ports_verify)
536 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
537 
538 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
539 			flags->tunnel_hdr_verify;
540 
541 		if (flags->ip_csum)
542 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
543 
544 		if (flags->l4_csum)
545 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
546 
547 		/* Clear outbound specific flags */
548 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
549 	}
550 }
551 
552 static int
553 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
554 {
555 	int len = 0;
556 
557 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
558 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
559 			if (td->ipsec_xform.tunnel.type ==
560 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
561 				len += sizeof(struct rte_ipv4_hdr);
562 			else
563 				len += sizeof(struct rte_ipv6_hdr);
564 		}
565 	}
566 
567 	return len;
568 }
569 
570 static int
571 test_ipsec_iv_verify_push(const uint8_t *output_text, const struct ipsec_test_data *td)
572 {
573 	static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
574 	int i, iv_pos, iv_len;
575 	static int index;
576 	uint8_t *iv_tmp;
577 
578 	if (td->aead)
579 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
580 	else
581 		iv_len = td->xform.chain.cipher.cipher.iv.length;
582 
583 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
584 	output_text += iv_pos;
585 
586 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
587 
588 	/* Compare against previous values */
589 	for (i = 0; i < index; i++) {
590 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
591 
592 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
593 			printf("IV repeated");
594 			return TEST_FAILED;
595 		}
596 	}
597 
598 	/* Save IV for future comparisons */
599 
600 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
601 	memcpy(iv_tmp, output_text, iv_len);
602 	index++;
603 
604 	if (index == IPSEC_TEST_PACKETS_MAX)
605 		index = 0;
606 
607 	return TEST_SUCCESS;
608 }
609 
610 static int
611 test_ipsec_l3_csum_verify(uint8_t *output_text)
612 {
613 	uint16_t actual_cksum, expected_cksum;
614 	struct rte_ipv4_hdr *ip;
615 
616 	ip = (struct rte_ipv4_hdr *)output_text;
617 
618 	if (!is_ipv4((void *)ip))
619 		return TEST_SKIPPED;
620 
621 	actual_cksum = ip->hdr_checksum;
622 
623 	ip->hdr_checksum = 0;
624 
625 	expected_cksum = rte_ipv4_cksum(ip);
626 
627 	if (actual_cksum != expected_cksum)
628 		return TEST_FAILED;
629 
630 	return TEST_SUCCESS;
631 }
632 
633 static int
634 test_ipsec_l4_csum_verify(uint8_t *output_text)
635 {
636 	uint16_t actual_cksum = 0, expected_cksum = 0;
637 	struct rte_ipv4_hdr *ipv4;
638 	struct rte_ipv6_hdr *ipv6;
639 	struct rte_tcp_hdr *tcp;
640 	struct rte_udp_hdr *udp;
641 	void *ip, *l4;
642 
643 	ip = output_text;
644 
645 	if (is_ipv4(ip)) {
646 		ipv4 = ip;
647 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
648 
649 		switch (ipv4->next_proto_id) {
650 		case IPPROTO_TCP:
651 			tcp = (struct rte_tcp_hdr *)l4;
652 			actual_cksum = tcp->cksum;
653 			tcp->cksum = 0;
654 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
655 			break;
656 		case IPPROTO_UDP:
657 			udp = (struct rte_udp_hdr *)l4;
658 			actual_cksum = udp->dgram_cksum;
659 			udp->dgram_cksum = 0;
660 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
661 			break;
662 		default:
663 			break;
664 		}
665 	} else {
666 		ipv6 = ip;
667 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
668 
669 		switch (ipv6->proto) {
670 		case IPPROTO_TCP:
671 			tcp = (struct rte_tcp_hdr *)l4;
672 			actual_cksum = tcp->cksum;
673 			tcp->cksum = 0;
674 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
675 			break;
676 		case IPPROTO_UDP:
677 			udp = (struct rte_udp_hdr *)l4;
678 			actual_cksum = udp->dgram_cksum;
679 			udp->dgram_cksum = 0;
680 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
681 			break;
682 		default:
683 			break;
684 		}
685 	}
686 
687 	if (actual_cksum != expected_cksum)
688 		return TEST_FAILED;
689 
690 	return TEST_SUCCESS;
691 }
692 
693 static int
694 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
695 {
696 	struct rte_ipv4_hdr *iph4_ex, *iph4_re;
697 	struct rte_ipv6_hdr *iph6_ex, *iph6_re;
698 
699 	if (is_ipv4(received) && is_ipv4(expected)) {
700 		iph4_ex = expected;
701 		iph4_re = received;
702 		iph4_ex->time_to_live -= 1;
703 		if (iph4_re->time_to_live != iph4_ex->time_to_live)
704 			return TEST_FAILED;
705 	} else if (!is_ipv4(received) && !is_ipv4(expected)) {
706 		iph6_ex = expected;
707 		iph6_re = received;
708 		iph6_ex->hop_limits -= 1;
709 		if (iph6_re->hop_limits != iph6_ex->hop_limits)
710 			return TEST_FAILED;
711 	} else {
712 		printf("IP header version miss match\n");
713 		return TEST_FAILED;
714 	}
715 
716 	return TEST_SUCCESS;
717 }
718 
719 static int
720 test_ipsec_td_verify(uint8_t *output_text, uint32_t len, uint32_t ol_flags,
721 		const struct ipsec_test_data *td, bool silent, const struct ipsec_test_flags *flags)
722 {
723 	uint8_t td_output_text[IPSEC_TEXT_MAX_LEN];
724 	uint32_t skip;
725 	int ret;
726 
727 	/* For tests with status as error for test success, skip verification */
728 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
729 	    (flags->icv_corrupt ||
730 	     flags->sa_expiry_pkts_hard ||
731 	     flags->tunnel_hdr_verify ||
732 	     td->ar_packet))
733 		return TEST_SUCCESS;
734 
735 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
736 	   flags->udp_encap) {
737 
738 		len -= sizeof(struct rte_udp_hdr);
739 		output_text += sizeof(struct rte_udp_hdr);
740 	}
741 
742 	if (len != td->output_text.len) {
743 		printf("Output length (%d) not matching with expected (%d)\n",
744 			len, td->output_text.len);
745 		return TEST_FAILED;
746 	}
747 
748 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
749 				flags->fragment) {
750 		const struct rte_ipv4_hdr *iph4;
751 		iph4 = (const struct rte_ipv4_hdr *)output_text;
752 		if (iph4->fragment_offset) {
753 			printf("Output packet is fragmented");
754 			return TEST_FAILED;
755 		}
756 	}
757 
758 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
759 				flags->ip_csum) {
760 		if (ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
761 			ret = test_ipsec_l3_csum_verify(output_text);
762 		else
763 			ret = TEST_FAILED;
764 
765 		if (ret == TEST_FAILED)
766 			printf("Inner IP checksum test failed\n");
767 
768 		return ret;
769 	}
770 
771 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
772 				flags->l4_csum) {
773 		if (ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
774 			ret = test_ipsec_l4_csum_verify(output_text);
775 		else
776 			ret = TEST_FAILED;
777 
778 		if (ret == TEST_FAILED)
779 			printf("Inner L4 checksum test failed\n");
780 
781 		return ret;
782 	}
783 
784 	skip = test_ipsec_tunnel_hdr_len_get(td);
785 
786 	len -= skip;
787 	output_text += skip;
788 
789 	memcpy(td_output_text, td->output_text.data + skip, len);
790 
791 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
792 				flags->dec_ttl_or_hop_limit) {
793 		if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
794 			printf("Inner TTL/hop limit decrement test failed\n");
795 			return TEST_FAILED;
796 		}
797 	}
798 
799 	if (test_ipsec_pkt_update(td_output_text, flags)) {
800 		printf("Could not update expected vector");
801 		return TEST_FAILED;
802 	}
803 
804 	if (memcmp(output_text, td_output_text, len)) {
805 		if (silent)
806 			return TEST_FAILED;
807 
808 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
809 			"output text not as expected\n");
810 
811 		rte_hexdump(stdout, "expected", td_output_text, len);
812 		rte_hexdump(stdout, "actual", output_text, len);
813 		return TEST_FAILED;
814 	}
815 
816 	return TEST_SUCCESS;
817 }
818 
819 static int
820 test_ipsec_res_d_prepare(const uint8_t *output_text, uint32_t len,
821 		const struct ipsec_test_data *td, struct ipsec_test_data *res_d)
822 {
823 	memcpy(res_d, td, sizeof(*res_d));
824 
825 	memcpy(&res_d->input_text.data, output_text, len);
826 	res_d->input_text.len = len;
827 
828 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
829 	if (res_d->aead) {
830 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
831 	} else {
832 		res_d->xform.chain.cipher.cipher.op =
833 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
834 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
835 	}
836 
837 	return TEST_SUCCESS;
838 }
839 
840 static int
841 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
842 			     const struct ipsec_test_flags *flags)
843 {
844 	uint8_t tos, dscp;
845 	uint16_t f_off;
846 
847 	if (!is_valid_ipv4_pkt(iph4)) {
848 		printf("Tunnel outer header is not IPv4\n");
849 		return -1;
850 	}
851 
852 	if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
853 		printf("Tunnel outer header proto is not AH\n");
854 		return -1;
855 	}
856 
857 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
858 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
859 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
860 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
861 			printf("DF bit is not set\n");
862 			return -1;
863 		}
864 	} else {
865 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
866 			printf("DF bit is set\n");
867 			return -1;
868 		}
869 	}
870 
871 	tos = iph4->type_of_service;
872 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
873 
874 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
875 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
876 		if (dscp != TEST_IPSEC_DSCP_VAL) {
877 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
878 			       TEST_IPSEC_DSCP_VAL, dscp);
879 			return -1;
880 		}
881 	} else {
882 		if (dscp != 0) {
883 			printf("DSCP value is set [exp: 0, actual: %x]\n",
884 			       dscp);
885 			return -1;
886 		}
887 	}
888 
889 	return 0;
890 }
891 
892 static int
893 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
894 			     const struct ipsec_test_flags *flags)
895 {
896 	uint32_t vtc_flow;
897 	uint32_t flabel;
898 	uint8_t dscp;
899 
900 	if (!is_valid_ipv6_pkt(iph6)) {
901 		printf("Tunnel outer header is not IPv6\n");
902 		return -1;
903 	}
904 
905 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
906 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
907 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
908 
909 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
910 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
911 		if (dscp != TEST_IPSEC_DSCP_VAL) {
912 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
913 			       TEST_IPSEC_DSCP_VAL, dscp);
914 			return -1;
915 		}
916 	} else {
917 		if (dscp != 0) {
918 			printf("DSCP value is set [exp: 0, actual: %x]\n",
919 			       dscp);
920 			return -1;
921 		}
922 	}
923 
924 	flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
925 
926 	if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
927 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
928 		if (flabel != TEST_IPSEC_FLABEL_VAL) {
929 			printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
930 			       TEST_IPSEC_FLABEL_VAL, flabel);
931 			return -1;
932 		}
933 	} else {
934 		if (flabel != 0) {
935 			printf("FLABEL value is set [exp: 0, actual: %x]\n",
936 			       flabel);
937 			return -1;
938 		}
939 	}
940 
941 	return 0;
942 }
943 
944 int
945 test_ipsec_post_process(const struct rte_mbuf *m, const struct ipsec_test_data *td,
946 			struct ipsec_test_data *res_d, bool silent,
947 			const struct ipsec_test_flags *flags)
948 {
949 	uint32_t len = rte_pktmbuf_pkt_len(m), data_len;
950 	uint8_t output_text[IPSEC_TEXT_MAX_LEN];
951 	const struct rte_mbuf *seg;
952 	const uint8_t *output;
953 	int ret;
954 
955 	memset(output_text, 0, IPSEC_TEXT_MAX_LEN);
956 	/* Actual data in packet might be less in error cases,
957 	 * hence take minimum of pkt_len and sum of data_len.
958 	 * This is done to run through negative test cases.
959 	 */
960 	data_len = 0;
961 	seg = m;
962 	while (seg) {
963 		data_len += seg->data_len;
964 		seg = seg->next;
965 	}
966 	len = RTE_MIN(len, data_len);
967 	/* Copy mbuf payload to continuous buffer */
968 	output = rte_pktmbuf_read(m, 0, len, output_text);
969 	if (output != output_text)
970 		/* Single segment mbuf, copy manually */
971 		memcpy(output_text, output, len);
972 
973 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
974 		const struct rte_ipv4_hdr *iph4;
975 		const struct rte_ipv6_hdr *iph6;
976 
977 		if (flags->iv_gen) {
978 			ret = test_ipsec_iv_verify_push(output_text, td);
979 			if (ret != TEST_SUCCESS)
980 				return ret;
981 		}
982 
983 		iph4 = (const struct rte_ipv4_hdr *)output_text;
984 
985 		if (td->ipsec_xform.mode ==
986 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
987 			if (flags->ipv6) {
988 				iph6 = (const struct rte_ipv6_hdr *)output_text;
989 				if (is_valid_ipv6_pkt(iph6) == false) {
990 					printf("Transport packet is not IPv6\n");
991 					return TEST_FAILED;
992 				}
993 			} else {
994 				if (is_valid_ipv4_pkt(iph4) == false) {
995 					printf("Transport packet is not IPv4\n");
996 					return TEST_FAILED;
997 				}
998 
999 				if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1000 					printf("Transport IPv4 header proto is not AH\n");
1001 					return -1;
1002 				}
1003 			}
1004 		} else {
1005 			if (td->ipsec_xform.tunnel.type ==
1006 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1007 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
1008 					return TEST_FAILED;
1009 			} else {
1010 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1011 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
1012 					return TEST_FAILED;
1013 			}
1014 		}
1015 	}
1016 
1017 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
1018 	   flags->udp_encap) {
1019 		const struct rte_ipv4_hdr *iph4;
1020 		const struct rte_ipv6_hdr *iph6;
1021 
1022 		if (td->ipsec_xform.tunnel.type ==
1023 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1024 			iph4 = (const struct rte_ipv4_hdr *)output_text;
1025 
1026 			if (iph4->next_proto_id != IPPROTO_UDP) {
1027 				printf("UDP header is not found\n");
1028 				return TEST_FAILED;
1029 			}
1030 
1031 			if (flags->udp_encap_custom_ports) {
1032 				const struct rte_udp_hdr *udph;
1033 
1034 				udph = (const struct rte_udp_hdr *)(output_text +
1035 					sizeof(struct rte_ipv4_hdr));
1036 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1037 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1038 					printf("UDP custom ports not matching.\n");
1039 					return TEST_FAILED;
1040 				}
1041 			}
1042 		} else {
1043 			iph6 = (const struct rte_ipv6_hdr *)output_text;
1044 
1045 			if (iph6->proto != IPPROTO_UDP) {
1046 				printf("UDP header is not found\n");
1047 				return TEST_FAILED;
1048 			}
1049 
1050 			if (flags->udp_encap_custom_ports) {
1051 				const struct rte_udp_hdr *udph;
1052 
1053 				udph = (const struct rte_udp_hdr *)(output_text +
1054 					sizeof(struct rte_ipv6_hdr));
1055 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1056 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1057 					printf("UDP custom ports not matching.\n");
1058 					return TEST_FAILED;
1059 				}
1060 			}
1061 		}
1062 	}
1063 
1064 	/*
1065 	 * In case of known vector tests & all inbound tests, res_d provided
1066 	 * would be NULL and output data need to be validated against expected.
1067 	 * For inbound, output_text would be plain packet and for outbound
1068 	 * output_text would IPsec packet. Validate by comparing against
1069 	 * known vectors.
1070 	 *
1071 	 * In case of combined mode tests, the output_text from outbound
1072 	 * operation (ie, IPsec packet) would need to be inbound processed to
1073 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
1074 	 * that inbound processing can be done.
1075 	 */
1076 
1077 	if (res_d == NULL)
1078 		return test_ipsec_td_verify(output_text, len, m->ol_flags, td, silent, flags);
1079 	else
1080 		return test_ipsec_res_d_prepare(output_text, len, td, res_d);
1081 }
1082 
1083 int
1084 test_ipsec_status_check(const struct ipsec_test_data *td,
1085 			struct rte_crypto_op *op,
1086 			const struct ipsec_test_flags *flags,
1087 			enum rte_security_ipsec_sa_direction dir,
1088 			int pkt_num)
1089 {
1090 	int ret = TEST_SUCCESS;
1091 
1092 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1093 	    td->ar_packet) {
1094 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1095 			printf("Anti replay test case failed\n");
1096 			return TEST_FAILED;
1097 		} else {
1098 			return TEST_SUCCESS;
1099 		}
1100 	}
1101 
1102 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1103 	    flags->sa_expiry_pkts_hard &&
1104 	    pkt_num == IPSEC_TEST_PACKETS_MAX) {
1105 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1106 			printf("SA hard expiry (pkts) test failed\n");
1107 			return TEST_FAILED;
1108 		} else {
1109 			return TEST_SUCCESS;
1110 		}
1111 	}
1112 
1113 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1114 	    flags->tunnel_hdr_verify) {
1115 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1116 			printf("Tunnel header verify test case failed\n");
1117 			return TEST_FAILED;
1118 		} else {
1119 			return TEST_SUCCESS;
1120 		}
1121 	}
1122 
1123 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1124 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1125 			printf("ICV corruption test case failed\n");
1126 			ret = TEST_FAILED;
1127 		}
1128 	} else {
1129 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1130 			printf("Security op processing failed [pkt_num: %d]\n",
1131 			       pkt_num);
1132 			ret = TEST_FAILED;
1133 		}
1134 	}
1135 
1136 	if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1137 		if (!(op->aux_flags &
1138 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1139 			printf("SA soft expiry (pkts) test failed\n");
1140 			ret = TEST_FAILED;
1141 		}
1142 	}
1143 
1144 	return ret;
1145 }
1146 
1147 int
1148 test_ipsec_stats_verify(void *ctx,
1149 			void *sess,
1150 			const struct ipsec_test_flags *flags,
1151 			enum rte_security_ipsec_sa_direction dir)
1152 {
1153 	struct rte_security_stats stats = {0};
1154 	int ret = TEST_SUCCESS;
1155 
1156 	if (flags->stats_success) {
1157 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1158 			return TEST_FAILED;
1159 
1160 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1161 			if (stats.ipsec.opackets != 1 ||
1162 			    stats.ipsec.oerrors != 0)
1163 				ret = TEST_FAILED;
1164 		} else {
1165 			if (stats.ipsec.ipackets != 1 ||
1166 			    stats.ipsec.ierrors != 0)
1167 				ret = TEST_FAILED;
1168 		}
1169 	}
1170 
1171 	return ret;
1172 }
1173 
1174 int
1175 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1176 {
1177 	struct rte_ipv4_hdr *iph4;
1178 	struct rte_ipv6_hdr *iph6;
1179 	bool cksum_dirty = false;
1180 
1181 	iph4 = (struct rte_ipv4_hdr *)pkt;
1182 
1183 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1184 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1185 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1186 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1187 		uint16_t frag_off;
1188 
1189 		if (!is_ipv4(iph4)) {
1190 			printf("Invalid packet type\n");
1191 			return -1;
1192 		}
1193 
1194 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1195 
1196 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1197 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1198 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1199 		else
1200 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1201 
1202 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1203 		cksum_dirty = true;
1204 	}
1205 
1206 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1207 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1208 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1209 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1210 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1211 	    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1212 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1213 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1214 
1215 		if (is_ipv4(iph4)) {
1216 			uint8_t tos;
1217 
1218 			tos = iph4->type_of_service;
1219 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1220 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1221 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1222 					(TEST_IPSEC_DSCP_VAL << 2));
1223 			else
1224 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1225 
1226 			iph4->type_of_service = tos;
1227 			cksum_dirty = true;
1228 		} else {
1229 			uint32_t vtc_flow;
1230 
1231 			iph6 = (struct rte_ipv6_hdr *)pkt;
1232 
1233 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1234 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1235 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1236 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1237 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1238 			else
1239 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1240 
1241 			if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1242 			    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1243 				vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1244 					     (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1245 			else
1246 				vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1247 
1248 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1249 		}
1250 	}
1251 
1252 	if (cksum_dirty && is_ipv4(iph4)) {
1253 		iph4->hdr_checksum = 0;
1254 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1255 	}
1256 
1257 	return 0;
1258 }
1259