xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision 1d3a3e1875ca0fc6286fc29f650c4e0310c8b0b0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
7 #include <rte_esp.h>
8 #include <rte_ip.h>
9 #include <rte_security.h>
10 #include <rte_tcp.h>
11 #include <rte_udp.h>
12 
13 #include "test.h"
14 #include "test_cryptodev_security_ipsec.h"
15 
16 #define IV_LEN_MAX 16
17 #define UDP_CUSTOM_SPORT 4650
18 #define UDP_CUSTOM_DPORT 4660
19 
20 #ifndef IPVERSION
21 #define IPVERSION 4
22 #endif
23 
24 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
25 				  (RTE_DIM(cipher_list) *
26 				   RTE_DIM(auth_list))];
27 
28 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
29 
30 static bool
31 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
32 {
33 	/* The IP version number must be 4 */
34 	if (((pkt->version_ihl) >> 4) != 4)
35 		return false;
36 	/*
37 	 * The IP header length field must be large enough to hold the
38 	 * minimum length legal IP datagram (20 bytes = 5 words).
39 	 */
40 	if ((pkt->version_ihl & 0xf) < 5)
41 		return false;
42 
43 	/*
44 	 * The IP total length field must be large enough to hold the IP
45 	 * datagram header, whose length is specified in the IP header length
46 	 * field.
47 	 */
48 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
49 		return false;
50 
51 	return true;
52 }
53 
54 static bool
55 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
56 {
57 	/* The IP version number must be 6 */
58 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
59 		return false;
60 
61 	return true;
62 }
63 
64 void
65 test_ipsec_alg_list_populate(void)
66 {
67 	unsigned long i, j, index = 0;
68 
69 	for (i = 0; i < RTE_DIM(aead_list); i++) {
70 		alg_list[index].param1 = &aead_list[i];
71 		alg_list[index].param2 = NULL;
72 		index++;
73 	}
74 
75 	for (i = 0; i < RTE_DIM(cipher_list); i++) {
76 		for (j = 0; j < RTE_DIM(auth_list); j++) {
77 			alg_list[index].param1 = &cipher_list[i];
78 			alg_list[index].param2 = &auth_list[j];
79 			index++;
80 		}
81 	}
82 }
83 
84 void
85 test_ipsec_ah_alg_list_populate(void)
86 {
87 	unsigned long i, index = 0;
88 
89 	for (i = 1; i < RTE_DIM(auth_list); i++) {
90 		ah_alg_list[index].param1 = &auth_list[i];
91 		ah_alg_list[index].param2 = NULL;
92 		index++;
93 	}
94 
95 	for (i = 1; i < RTE_DIM(auth_list); i++) {
96 		/* NULL cipher */
97 		ah_alg_list[index].param1 = &cipher_list[0];
98 
99 		ah_alg_list[index].param2 = &auth_list[i];
100 		index++;
101 	}
102 }
103 
104 int
105 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
106 			   const struct rte_security_capability *sec_cap,
107 			   bool silent)
108 {
109 	/* Verify security capabilities */
110 
111 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
112 		if (!silent)
113 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
114 		return -ENOTSUP;
115 	}
116 
117 	if (ipsec_xform->options.udp_encap == 1 &&
118 	    sec_cap->ipsec.options.udp_encap == 0) {
119 		if (!silent)
120 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
121 		return -ENOTSUP;
122 	}
123 
124 	if (ipsec_xform->options.udp_ports_verify == 1 &&
125 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
126 		if (!silent)
127 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
128 				"verification is not supported\n");
129 		return -ENOTSUP;
130 	}
131 
132 	if (ipsec_xform->options.copy_dscp == 1 &&
133 	    sec_cap->ipsec.options.copy_dscp == 0) {
134 		if (!silent)
135 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
136 		return -ENOTSUP;
137 	}
138 
139 	if (ipsec_xform->options.copy_flabel == 1 &&
140 	    sec_cap->ipsec.options.copy_flabel == 0) {
141 		if (!silent)
142 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
143 		return -ENOTSUP;
144 	}
145 
146 	if (ipsec_xform->options.copy_df == 1 &&
147 	    sec_cap->ipsec.options.copy_df == 0) {
148 		if (!silent)
149 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.dec_ttl == 1 &&
154 	    sec_cap->ipsec.options.dec_ttl == 0) {
155 		if (!silent)
156 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
157 		return -ENOTSUP;
158 	}
159 
160 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
161 		if (!silent)
162 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
163 		return -ENOTSUP;
164 	}
165 
166 	if (ipsec_xform->options.stats == 1 &&
167 	    sec_cap->ipsec.options.stats == 0) {
168 		if (!silent)
169 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
170 		return -ENOTSUP;
171 	}
172 
173 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
174 	    (ipsec_xform->options.iv_gen_disable == 1) &&
175 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
176 		if (!silent)
177 			RTE_LOG(INFO, USER1,
178 				"Application provided IV is not supported\n");
179 		return -ENOTSUP;
180 	}
181 
182 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
183 	    (ipsec_xform->options.tunnel_hdr_verify >
184 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
185 		if (!silent)
186 			RTE_LOG(INFO, USER1,
187 				"Tunnel header verify is not supported\n");
188 		return -ENOTSUP;
189 	}
190 
191 	if (ipsec_xform->options.ip_csum_enable == 1 &&
192 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
193 		if (!silent)
194 			RTE_LOG(INFO, USER1,
195 				"Inner IP checksum is not supported\n");
196 		return -ENOTSUP;
197 	}
198 
199 	if (ipsec_xform->options.l4_csum_enable == 1 &&
200 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
201 		if (!silent)
202 			RTE_LOG(INFO, USER1,
203 				"Inner L4 checksum is not supported\n");
204 		return -ENOTSUP;
205 	}
206 
207 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
208 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
209 			if (!silent)
210 				RTE_LOG(INFO, USER1,
211 					"Replay window size is not supported\n");
212 			return -ENOTSUP;
213 		}
214 	}
215 
216 	if (ipsec_xform->options.ingress_oop == 1 &&
217 	    sec_cap->ipsec.options.ingress_oop == 0) {
218 		if (!silent)
219 			RTE_LOG(INFO, USER1,
220 				"Inline Ingress OOP processing is not supported\n");
221 		return -ENOTSUP;
222 	}
223 
224 	return 0;
225 }
226 
227 void
228 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
229 			  struct ipsec_test_data *td_in)
230 {
231 	memcpy(td_in, td_out, sizeof(*td_in));
232 
233 	/* Populate output text of td_in with input text of td_out */
234 	memcpy(td_in->output_text.data, td_out->input_text.data,
235 	       td_out->input_text.len);
236 	td_in->output_text.len = td_out->input_text.len;
237 
238 	/* Populate input text of td_in with output text of td_out */
239 	memcpy(td_in->input_text.data, td_out->output_text.data,
240 	       td_out->output_text.len);
241 	td_in->input_text.len = td_out->output_text.len;
242 
243 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
244 
245 	if (td_in->aead) {
246 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
247 	} else {
248 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
249 		td_in->xform.chain.cipher.cipher.op =
250 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
251 	}
252 }
253 
254 static bool
255 is_ipv4(void *ip)
256 {
257 	struct rte_ipv4_hdr *ipv4 = ip;
258 	uint8_t ip_ver;
259 
260 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
261 	if (ip_ver == IPVERSION)
262 		return true;
263 	else
264 		return false;
265 }
266 
267 static void
268 test_ipsec_csum_init(void *ip, bool l3, bool l4)
269 {
270 	struct rte_ipv4_hdr *ipv4;
271 	struct rte_tcp_hdr *tcp;
272 	struct rte_udp_hdr *udp;
273 	uint8_t next_proto;
274 	uint8_t size;
275 
276 	if (is_ipv4(ip)) {
277 		ipv4 = ip;
278 		size = sizeof(struct rte_ipv4_hdr);
279 		next_proto = ipv4->next_proto_id;
280 
281 		if (l3)
282 			ipv4->hdr_checksum = 0;
283 	} else {
284 		size = sizeof(struct rte_ipv6_hdr);
285 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
286 	}
287 
288 	if (l4) {
289 		switch (next_proto) {
290 		case IPPROTO_TCP:
291 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
292 			tcp->cksum = 0;
293 			break;
294 		case IPPROTO_UDP:
295 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
296 			udp->dgram_cksum = 0;
297 			break;
298 		default:
299 			return;
300 		}
301 	}
302 }
303 
304 void
305 test_ipsec_td_prepare(const struct crypto_param *param1,
306 		      const struct crypto_param *param2,
307 		      const struct ipsec_test_flags *flags,
308 		      struct ipsec_test_data *td_array,
309 		      int nb_td)
310 
311 {
312 	struct ipsec_test_data *td;
313 	int i;
314 
315 	memset(td_array, 0, nb_td * sizeof(*td));
316 
317 	for (i = 0; i < nb_td; i++) {
318 		td = &td_array[i];
319 
320 		/* Prepare fields based on param */
321 
322 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
323 			/* Copy template for packet & key fields */
324 			if (flags->ipv6)
325 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
326 			else
327 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
328 
329 			if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM)
330 				td->salt.len = 3;
331 
332 			td->aead = true;
333 			td->xform.aead.aead.algo = param1->alg.aead;
334 			td->xform.aead.aead.key.length = param1->key_length;
335 		} else {
336 			/* Copy template for packet & key fields */
337 			if (flags->ipv6)
338 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
339 					sizeof(*td));
340 			else
341 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
342 					sizeof(*td));
343 
344 			td->aead = false;
345 
346 			if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
347 				td->xform.chain.auth.auth.algo =
348 						param1->alg.auth;
349 				td->xform.chain.auth.auth.key.length =
350 						param1->key_length;
351 				td->xform.chain.auth.auth.digest_length =
352 						param1->digest_length;
353 				td->auth_only = true;
354 
355 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
356 					td->xform.chain.auth.auth.iv.length =
357 						param1->iv_length;
358 					td->aes_gmac = true;
359 				}
360 			} else {
361 				td->xform.chain.cipher.cipher.algo =
362 						param1->alg.cipher;
363 				td->xform.chain.cipher.cipher.key.length =
364 						param1->key_length;
365 				td->xform.chain.cipher.cipher.iv.length =
366 						param1->iv_length;
367 				td->xform.chain.auth.auth.algo =
368 						param2->alg.auth;
369 				td->xform.chain.auth.auth.key.length =
370 						param2->key_length;
371 				td->xform.chain.auth.auth.digest_length =
372 						param2->digest_length;
373 
374 				if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
375 					td->xform.chain.auth.auth.iv.length =
376 						param2->iv_length;
377 					td->aes_gmac = true;
378 				}
379 			}
380 		}
381 
382 		/* Adjust the data to requested length */
383 		if (flags->plaintext_len && flags->ipv6) {
384 			struct rte_ipv6_hdr *ip6 = (struct rte_ipv6_hdr *)td->input_text.data;
385 			struct rte_tcp_hdr *tcp;
386 			int64_t payload_len;
387 			uint8_t *data;
388 			int64_t i;
389 
390 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
391 			payload_len -= sizeof(struct rte_ipv6_hdr);
392 			payload_len -= sizeof(struct rte_tcp_hdr);
393 			if (payload_len <= 16)
394 				payload_len = 16;
395 
396 			/* IPv6 */
397 			ip6->proto = IPPROTO_TCP;
398 			ip6->payload_len = sizeof(*tcp) + payload_len;
399 			ip6->payload_len = rte_cpu_to_be_16(ip6->payload_len);
400 
401 			/* TCP */
402 			tcp = (struct rte_tcp_hdr *)(ip6 + 1);
403 			data = (uint8_t *)(tcp + 1);
404 			for (i = 0; i < payload_len; i++)
405 				data[i] = i;
406 			tcp->cksum = 0;
407 			tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
408 			td->input_text.len = payload_len + sizeof(struct rte_ipv6_hdr) +
409 				sizeof(struct rte_tcp_hdr);
410 		} else if (flags->plaintext_len) {
411 			struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)td->input_text.data;
412 			struct rte_tcp_hdr *tcp;
413 			int64_t payload_len;
414 			uint8_t *data;
415 			int64_t i;
416 
417 			payload_len = RTE_MIN(flags->plaintext_len, IPSEC_TEXT_MAX_LEN);
418 			payload_len -= sizeof(struct rte_ipv4_hdr);
419 			payload_len -= sizeof(struct rte_tcp_hdr);
420 			if (payload_len <= 8)
421 				payload_len = 8;
422 
423 			/* IPv4 */
424 			ip->next_proto_id = IPPROTO_TCP;
425 			ip->total_length = sizeof(*ip) + sizeof(*tcp) + payload_len;
426 			ip->total_length = rte_cpu_to_be_16(ip->total_length);
427 			ip->hdr_checksum = 0;
428 			ip->hdr_checksum = rte_ipv4_cksum(ip);
429 
430 			/* TCP */
431 			tcp = (struct rte_tcp_hdr *)(ip + 1);
432 			data = (uint8_t *)(tcp + 1);
433 			for (i = 0; i < payload_len; i++)
434 				data[i] = i;
435 			tcp->cksum = 0;
436 			tcp->cksum = rte_ipv4_udptcp_cksum(ip, tcp);
437 			td->input_text.len = payload_len + sizeof(struct rte_ipv4_hdr) +
438 				sizeof(struct rte_tcp_hdr);
439 		}
440 
441 		if (flags->ah) {
442 			td->ipsec_xform.proto =
443 					RTE_SECURITY_IPSEC_SA_PROTO_AH;
444 		}
445 
446 		if (flags->iv_gen)
447 			td->ipsec_xform.options.iv_gen_disable = 0;
448 
449 		if (flags->sa_expiry_pkts_soft)
450 			td->ipsec_xform.life.packets_soft_limit =
451 					IPSEC_TEST_PACKETS_MAX - 1;
452 
453 		if (flags->ip_csum) {
454 			td->ipsec_xform.options.ip_csum_enable = 1;
455 			test_ipsec_csum_init(&td->input_text.data, true, false);
456 		}
457 
458 		if (flags->l4_csum) {
459 			td->ipsec_xform.options.l4_csum_enable = 1;
460 			test_ipsec_csum_init(&td->input_text.data, false, true);
461 		}
462 
463 		if (flags->transport) {
464 			td->ipsec_xform.mode =
465 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
466 		} else {
467 			td->ipsec_xform.mode =
468 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
469 
470 			if (flags->tunnel_ipv6)
471 				td->ipsec_xform.tunnel.type =
472 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
473 			else
474 				td->ipsec_xform.tunnel.type =
475 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
476 		}
477 
478 		if (flags->stats_success)
479 			td->ipsec_xform.options.stats = 1;
480 
481 		if (flags->fragment) {
482 			struct rte_ipv4_hdr *ip;
483 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
484 			ip->fragment_offset = 4;
485 			ip->hdr_checksum = rte_ipv4_cksum(ip);
486 		}
487 
488 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
489 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
490 			td->ipsec_xform.options.copy_df = 1;
491 
492 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
493 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
494 			td->ipsec_xform.options.copy_dscp = 1;
495 
496 		if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
497 		    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
498 			td->ipsec_xform.options.copy_flabel = 1;
499 
500 		if (flags->dec_ttl_or_hop_limit)
501 			td->ipsec_xform.options.dec_ttl = 1;
502 
503 		if (flags->udp_encap && flags->udp_encap_custom_ports) {
504 			td->ipsec_xform.udp.sport = UDP_CUSTOM_SPORT;
505 			td->ipsec_xform.udp.dport = UDP_CUSTOM_DPORT;
506 		}
507 	}
508 }
509 
510 void
511 test_ipsec_td_update(struct ipsec_test_data td_inb[],
512 		     const struct ipsec_test_data td_outb[],
513 		     int nb_td,
514 		     const struct ipsec_test_flags *flags)
515 {
516 	int i;
517 
518 	for (i = 0; i < nb_td; i++) {
519 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
520 		       td_outb[i].input_text.len);
521 		td_inb[i].output_text.len = td_outb->input_text.len;
522 
523 		if (flags->icv_corrupt) {
524 			int icv_pos = td_inb[i].input_text.len - 4;
525 			td_inb[i].input_text.data[icv_pos] += 1;
526 		}
527 
528 		if (flags->sa_expiry_pkts_hard)
529 			td_inb[i].ipsec_xform.life.packets_hard_limit =
530 					IPSEC_TEST_PACKETS_MAX - 1;
531 
532 		if (flags->udp_encap)
533 			td_inb[i].ipsec_xform.options.udp_encap = 1;
534 
535 		if (flags->udp_ports_verify)
536 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
537 
538 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
539 			flags->tunnel_hdr_verify;
540 
541 		if (flags->ip_csum)
542 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
543 
544 		if (flags->l4_csum)
545 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
546 
547 		/* Clear outbound specific flags */
548 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
549 	}
550 }
551 
552 void
553 test_ipsec_display_alg(const struct crypto_param *param1,
554 		       const struct crypto_param *param2)
555 {
556 	if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
557 		printf("\t%s [%d]",
558 		       rte_cryptodev_get_aead_algo_string(param1->alg.aead),
559 		       param1->key_length * 8);
560 	} else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
561 		printf("\t%s",
562 		       rte_cryptodev_get_auth_algo_string(param1->alg.auth));
563 		if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL)
564 			printf(" [%dB ICV]", param1->digest_length);
565 	} else {
566 		printf("\t%s",
567 		       rte_cryptodev_get_cipher_algo_string(param1->alg.cipher));
568 		if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
569 			printf(" [%d]", param1->key_length * 8);
570 		printf(" %s",
571 		       rte_cryptodev_get_auth_algo_string(param2->alg.auth));
572 		if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
573 			printf(" [%dB ICV]", param2->digest_length);
574 	}
575 	printf("\n");
576 }
577 
578 static int
579 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
580 {
581 	int len = 0;
582 
583 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
584 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
585 			if (td->ipsec_xform.tunnel.type ==
586 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
587 				len += sizeof(struct rte_ipv4_hdr);
588 			else
589 				len += sizeof(struct rte_ipv6_hdr);
590 		}
591 	}
592 
593 	return len;
594 }
595 
596 static int
597 test_ipsec_iv_verify_push(const uint8_t *output_text, const struct ipsec_test_data *td)
598 {
599 	static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
600 	int i, iv_pos, iv_len;
601 	static int index;
602 	uint8_t *iv_tmp;
603 
604 	if (td->aead)
605 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
606 	else
607 		iv_len = td->xform.chain.cipher.cipher.iv.length;
608 
609 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
610 	output_text += iv_pos;
611 
612 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
613 
614 	/* Compare against previous values */
615 	for (i = 0; i < index; i++) {
616 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
617 
618 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
619 			printf("IV repeated");
620 			return TEST_FAILED;
621 		}
622 	}
623 
624 	/* Save IV for future comparisons */
625 
626 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
627 	memcpy(iv_tmp, output_text, iv_len);
628 	index++;
629 
630 	if (index == IPSEC_TEST_PACKETS_MAX)
631 		index = 0;
632 
633 	return TEST_SUCCESS;
634 }
635 
636 static int
637 test_ipsec_l3_csum_verify(uint8_t *output_text)
638 {
639 	uint16_t actual_cksum, expected_cksum;
640 	struct rte_ipv4_hdr *ip;
641 
642 	ip = (struct rte_ipv4_hdr *)output_text;
643 
644 	if (!is_ipv4((void *)ip))
645 		return TEST_SKIPPED;
646 
647 	actual_cksum = ip->hdr_checksum;
648 
649 	ip->hdr_checksum = 0;
650 
651 	expected_cksum = rte_ipv4_cksum(ip);
652 
653 	if (actual_cksum != expected_cksum)
654 		return TEST_FAILED;
655 
656 	return TEST_SUCCESS;
657 }
658 
659 static int
660 test_ipsec_l4_csum_verify(uint8_t *output_text)
661 {
662 	uint16_t actual_cksum = 0, expected_cksum = 0;
663 	struct rte_ipv4_hdr *ipv4;
664 	struct rte_ipv6_hdr *ipv6;
665 	struct rte_tcp_hdr *tcp;
666 	struct rte_udp_hdr *udp;
667 	void *ip, *l4;
668 
669 	ip = output_text;
670 
671 	if (is_ipv4(ip)) {
672 		ipv4 = ip;
673 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
674 
675 		switch (ipv4->next_proto_id) {
676 		case IPPROTO_TCP:
677 			tcp = (struct rte_tcp_hdr *)l4;
678 			actual_cksum = tcp->cksum;
679 			tcp->cksum = 0;
680 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
681 			break;
682 		case IPPROTO_UDP:
683 			udp = (struct rte_udp_hdr *)l4;
684 			actual_cksum = udp->dgram_cksum;
685 			udp->dgram_cksum = 0;
686 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
687 			break;
688 		default:
689 			break;
690 		}
691 	} else {
692 		ipv6 = ip;
693 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
694 
695 		switch (ipv6->proto) {
696 		case IPPROTO_TCP:
697 			tcp = (struct rte_tcp_hdr *)l4;
698 			actual_cksum = tcp->cksum;
699 			tcp->cksum = 0;
700 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
701 			break;
702 		case IPPROTO_UDP:
703 			udp = (struct rte_udp_hdr *)l4;
704 			actual_cksum = udp->dgram_cksum;
705 			udp->dgram_cksum = 0;
706 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
707 			break;
708 		default:
709 			break;
710 		}
711 	}
712 
713 	if (actual_cksum != expected_cksum)
714 		return TEST_FAILED;
715 
716 	return TEST_SUCCESS;
717 }
718 
719 static int
720 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
721 {
722 	struct rte_ipv4_hdr *iph4_ex, *iph4_re;
723 	struct rte_ipv6_hdr *iph6_ex, *iph6_re;
724 
725 	if (is_ipv4(received) && is_ipv4(expected)) {
726 		iph4_ex = expected;
727 		iph4_re = received;
728 		iph4_ex->time_to_live -= 1;
729 		if (iph4_re->time_to_live != iph4_ex->time_to_live)
730 			return TEST_FAILED;
731 	} else if (!is_ipv4(received) && !is_ipv4(expected)) {
732 		iph6_ex = expected;
733 		iph6_re = received;
734 		iph6_ex->hop_limits -= 1;
735 		if (iph6_re->hop_limits != iph6_ex->hop_limits)
736 			return TEST_FAILED;
737 	} else {
738 		printf("IP header version miss match\n");
739 		return TEST_FAILED;
740 	}
741 
742 	return TEST_SUCCESS;
743 }
744 
745 static int
746 test_ipsec_td_verify(uint8_t *output_text, uint32_t len, uint32_t ol_flags,
747 		const struct ipsec_test_data *td, bool silent, const struct ipsec_test_flags *flags)
748 {
749 	uint8_t td_output_text[IPSEC_TEXT_MAX_LEN];
750 	uint32_t skip;
751 	int ret;
752 
753 	/* For tests with status as error for test success, skip verification */
754 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
755 	    (flags->icv_corrupt ||
756 	     flags->sa_expiry_pkts_hard ||
757 	     flags->tunnel_hdr_verify ||
758 	     td->ar_packet))
759 		return TEST_SUCCESS;
760 
761 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
762 	   flags->udp_encap) {
763 
764 		len -= sizeof(struct rte_udp_hdr);
765 		output_text += sizeof(struct rte_udp_hdr);
766 	}
767 
768 	if (len != td->output_text.len) {
769 		printf("Output length (%d) not matching with expected (%d)\n",
770 			len, td->output_text.len);
771 		return TEST_FAILED;
772 	}
773 
774 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
775 				flags->fragment) {
776 		const struct rte_ipv4_hdr *iph4;
777 		iph4 = (const struct rte_ipv4_hdr *)output_text;
778 		if (iph4->fragment_offset) {
779 			printf("Output packet is fragmented");
780 			return TEST_FAILED;
781 		}
782 	}
783 
784 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
785 				flags->ip_csum) {
786 		if (ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
787 			ret = test_ipsec_l3_csum_verify(output_text);
788 		else
789 			ret = TEST_FAILED;
790 
791 		if (ret == TEST_FAILED)
792 			printf("Inner IP checksum test failed\n");
793 
794 		return ret;
795 	}
796 
797 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
798 				flags->l4_csum) {
799 		if (ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
800 			ret = test_ipsec_l4_csum_verify(output_text);
801 		else
802 			ret = TEST_FAILED;
803 
804 		if (ret == TEST_FAILED)
805 			printf("Inner L4 checksum test failed\n");
806 
807 		return ret;
808 	}
809 
810 	skip = test_ipsec_tunnel_hdr_len_get(td);
811 
812 	len -= skip;
813 	output_text += skip;
814 
815 	memcpy(td_output_text, td->output_text.data + skip, len);
816 
817 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
818 				flags->dec_ttl_or_hop_limit) {
819 		if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
820 			printf("Inner TTL/hop limit decrement test failed\n");
821 			return TEST_FAILED;
822 		}
823 	}
824 
825 	if (test_ipsec_pkt_update(td_output_text, flags)) {
826 		printf("Could not update expected vector");
827 		return TEST_FAILED;
828 	}
829 
830 	if (memcmp(output_text, td_output_text, len)) {
831 		if (silent)
832 			return TEST_FAILED;
833 
834 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
835 			"output text not as expected\n");
836 
837 		rte_hexdump(stdout, "expected", td_output_text, len);
838 		rte_hexdump(stdout, "actual", output_text, len);
839 		return TEST_FAILED;
840 	}
841 
842 	return TEST_SUCCESS;
843 }
844 
845 static int
846 test_ipsec_res_d_prepare(const uint8_t *output_text, uint32_t len,
847 		const struct ipsec_test_data *td, struct ipsec_test_data *res_d)
848 {
849 	memcpy(res_d, td, sizeof(*res_d));
850 
851 	memcpy(&res_d->input_text.data, output_text, len);
852 	res_d->input_text.len = len;
853 
854 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
855 	if (res_d->aead) {
856 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
857 	} else {
858 		res_d->xform.chain.cipher.cipher.op =
859 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
860 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
861 	}
862 
863 	return TEST_SUCCESS;
864 }
865 
866 static int
867 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
868 			     const struct ipsec_test_flags *flags)
869 {
870 	uint8_t tos, dscp;
871 	uint16_t f_off;
872 
873 	if (!is_valid_ipv4_pkt(iph4)) {
874 		printf("Tunnel outer header is not IPv4\n");
875 		return -1;
876 	}
877 
878 	if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
879 		printf("Tunnel outer header proto is not AH\n");
880 		return -1;
881 	}
882 
883 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
884 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
885 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
886 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
887 			printf("DF bit is not set\n");
888 			return -1;
889 		}
890 	} else {
891 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
892 			printf("DF bit is set\n");
893 			return -1;
894 		}
895 	}
896 
897 	tos = iph4->type_of_service;
898 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
899 
900 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
901 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
902 		if (dscp != TEST_IPSEC_DSCP_VAL) {
903 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
904 			       TEST_IPSEC_DSCP_VAL, dscp);
905 			return -1;
906 		}
907 	} else {
908 		if (dscp != 0) {
909 			printf("DSCP value is set [exp: 0, actual: %x]\n",
910 			       dscp);
911 			return -1;
912 		}
913 	}
914 
915 	return 0;
916 }
917 
918 static int
919 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
920 			     const struct ipsec_test_flags *flags)
921 {
922 	uint32_t vtc_flow;
923 	uint32_t flabel;
924 	uint8_t dscp;
925 
926 	if (!is_valid_ipv6_pkt(iph6)) {
927 		printf("Tunnel outer header is not IPv6\n");
928 		return -1;
929 	}
930 
931 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
932 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
933 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
934 
935 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
936 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
937 		if (dscp != TEST_IPSEC_DSCP_VAL) {
938 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
939 			       TEST_IPSEC_DSCP_VAL, dscp);
940 			return -1;
941 		}
942 	} else {
943 		if (dscp != 0) {
944 			printf("DSCP value is set [exp: 0, actual: %x]\n",
945 			       dscp);
946 			return -1;
947 		}
948 	}
949 
950 	flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
951 
952 	if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
953 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
954 		if (flabel != TEST_IPSEC_FLABEL_VAL) {
955 			printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
956 			       TEST_IPSEC_FLABEL_VAL, flabel);
957 			return -1;
958 		}
959 	} else {
960 		if (flabel != 0) {
961 			printf("FLABEL value is set [exp: 0, actual: %x]\n",
962 			       flabel);
963 			return -1;
964 		}
965 	}
966 
967 	return 0;
968 }
969 
970 int
971 test_ipsec_post_process(const struct rte_mbuf *m, const struct ipsec_test_data *td,
972 			struct ipsec_test_data *res_d, bool silent,
973 			const struct ipsec_test_flags *flags)
974 {
975 	uint32_t len = rte_pktmbuf_pkt_len(m), data_len;
976 	uint8_t output_text[IPSEC_TEXT_MAX_LEN];
977 	const struct rte_mbuf *seg;
978 	const uint8_t *output;
979 	int ret;
980 
981 	memset(output_text, 0, IPSEC_TEXT_MAX_LEN);
982 	/* Actual data in packet might be less in error cases,
983 	 * hence take minimum of pkt_len and sum of data_len.
984 	 * This is done to run through negative test cases.
985 	 */
986 	data_len = 0;
987 	seg = m;
988 	while (seg) {
989 		data_len += seg->data_len;
990 		seg = seg->next;
991 	}
992 	len = RTE_MIN(len, data_len);
993 	/* Copy mbuf payload to continuous buffer */
994 	output = rte_pktmbuf_read(m, 0, len, output_text);
995 	if (output != output_text)
996 		/* Single segment mbuf, copy manually */
997 		memcpy(output_text, output, len);
998 
999 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1000 		const struct rte_ipv4_hdr *iph4;
1001 		const struct rte_ipv6_hdr *iph6;
1002 
1003 		if (flags->iv_gen) {
1004 			ret = test_ipsec_iv_verify_push(output_text, td);
1005 			if (ret != TEST_SUCCESS)
1006 				return ret;
1007 		}
1008 
1009 		iph4 = (const struct rte_ipv4_hdr *)output_text;
1010 
1011 		if (td->ipsec_xform.mode ==
1012 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
1013 			if (flags->ipv6) {
1014 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1015 				if (is_valid_ipv6_pkt(iph6) == false) {
1016 					printf("Transport packet is not IPv6\n");
1017 					return TEST_FAILED;
1018 				}
1019 			} else {
1020 				if (is_valid_ipv4_pkt(iph4) == false) {
1021 					printf("Transport packet is not IPv4\n");
1022 					return TEST_FAILED;
1023 				}
1024 
1025 				if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1026 					printf("Transport IPv4 header proto is not AH\n");
1027 					return -1;
1028 				}
1029 			}
1030 		} else {
1031 			if (td->ipsec_xform.tunnel.type ==
1032 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1033 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
1034 					return TEST_FAILED;
1035 			} else {
1036 				iph6 = (const struct rte_ipv6_hdr *)output_text;
1037 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
1038 					return TEST_FAILED;
1039 			}
1040 		}
1041 	}
1042 
1043 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
1044 	   flags->udp_encap) {
1045 		const struct rte_ipv4_hdr *iph4;
1046 		const struct rte_ipv6_hdr *iph6;
1047 
1048 		if (td->ipsec_xform.tunnel.type ==
1049 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1050 			iph4 = (const struct rte_ipv4_hdr *)output_text;
1051 
1052 			if (iph4->next_proto_id != IPPROTO_UDP) {
1053 				printf("UDP header is not found\n");
1054 				return TEST_FAILED;
1055 			}
1056 
1057 			if (flags->udp_encap_custom_ports) {
1058 				const struct rte_udp_hdr *udph;
1059 
1060 				udph = (const struct rte_udp_hdr *)(output_text +
1061 					sizeof(struct rte_ipv4_hdr));
1062 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1063 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1064 					printf("UDP custom ports not matching.\n");
1065 					return TEST_FAILED;
1066 				}
1067 			}
1068 		} else {
1069 			iph6 = (const struct rte_ipv6_hdr *)output_text;
1070 
1071 			if (iph6->proto != IPPROTO_UDP) {
1072 				printf("UDP header is not found\n");
1073 				return TEST_FAILED;
1074 			}
1075 
1076 			if (flags->udp_encap_custom_ports) {
1077 				const struct rte_udp_hdr *udph;
1078 
1079 				udph = (const struct rte_udp_hdr *)(output_text +
1080 					sizeof(struct rte_ipv6_hdr));
1081 				if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) ||
1082 				    (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) {
1083 					printf("UDP custom ports not matching.\n");
1084 					return TEST_FAILED;
1085 				}
1086 			}
1087 		}
1088 	}
1089 
1090 	/*
1091 	 * In case of known vector tests & all inbound tests, res_d provided
1092 	 * would be NULL and output data need to be validated against expected.
1093 	 * For inbound, output_text would be plain packet and for outbound
1094 	 * output_text would IPsec packet. Validate by comparing against
1095 	 * known vectors.
1096 	 *
1097 	 * In case of combined mode tests, the output_text from outbound
1098 	 * operation (ie, IPsec packet) would need to be inbound processed to
1099 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
1100 	 * that inbound processing can be done.
1101 	 */
1102 
1103 	if (res_d == NULL)
1104 		return test_ipsec_td_verify(output_text, len, m->ol_flags, td, silent, flags);
1105 	else
1106 		return test_ipsec_res_d_prepare(output_text, len, td, res_d);
1107 }
1108 
1109 int
1110 test_ipsec_status_check(const struct ipsec_test_data *td,
1111 			struct rte_crypto_op *op,
1112 			const struct ipsec_test_flags *flags,
1113 			enum rte_security_ipsec_sa_direction dir,
1114 			int pkt_num)
1115 {
1116 	int ret = TEST_SUCCESS;
1117 
1118 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1119 	    td->ar_packet) {
1120 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1121 			printf("Anti replay test case failed\n");
1122 			return TEST_FAILED;
1123 		} else {
1124 			return TEST_SUCCESS;
1125 		}
1126 	}
1127 
1128 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1129 	    flags->sa_expiry_pkts_hard &&
1130 	    pkt_num == IPSEC_TEST_PACKETS_MAX) {
1131 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1132 			printf("SA hard expiry (pkts) test failed\n");
1133 			return TEST_FAILED;
1134 		} else {
1135 			return TEST_SUCCESS;
1136 		}
1137 	}
1138 
1139 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1140 	    flags->tunnel_hdr_verify) {
1141 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1142 			printf("Tunnel header verify test case failed\n");
1143 			return TEST_FAILED;
1144 		} else {
1145 			return TEST_SUCCESS;
1146 		}
1147 	}
1148 
1149 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1150 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1151 			printf("ICV corruption test case failed\n");
1152 			ret = TEST_FAILED;
1153 		}
1154 	} else {
1155 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1156 			printf("Security op processing failed [pkt_num: %d]\n",
1157 			       pkt_num);
1158 			ret = TEST_FAILED;
1159 		}
1160 	}
1161 
1162 	if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1163 		if (!(op->aux_flags &
1164 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1165 			printf("SA soft expiry (pkts) test failed\n");
1166 			ret = TEST_FAILED;
1167 		}
1168 	}
1169 
1170 	return ret;
1171 }
1172 
1173 int
1174 test_ipsec_stats_verify(void *ctx,
1175 			void *sess,
1176 			const struct ipsec_test_flags *flags,
1177 			enum rte_security_ipsec_sa_direction dir)
1178 {
1179 	struct rte_security_stats stats = {0};
1180 	int ret = TEST_SUCCESS;
1181 
1182 	if (flags->stats_success) {
1183 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1184 			return TEST_FAILED;
1185 
1186 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1187 			if (stats.ipsec.opackets != 1 ||
1188 			    stats.ipsec.oerrors != 0)
1189 				ret = TEST_FAILED;
1190 		} else {
1191 			if (stats.ipsec.ipackets != 1 ||
1192 			    stats.ipsec.ierrors != 0)
1193 				ret = TEST_FAILED;
1194 		}
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 int
1201 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1202 {
1203 	struct rte_ipv4_hdr *iph4;
1204 	struct rte_ipv6_hdr *iph6;
1205 	bool cksum_dirty = false;
1206 
1207 	iph4 = (struct rte_ipv4_hdr *)pkt;
1208 
1209 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1210 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1211 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1212 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1213 		uint16_t frag_off;
1214 
1215 		if (!is_ipv4(iph4)) {
1216 			printf("Invalid packet type\n");
1217 			return -1;
1218 		}
1219 
1220 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1221 
1222 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1223 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1224 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1225 		else
1226 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1227 
1228 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1229 		cksum_dirty = true;
1230 	}
1231 
1232 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1233 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1234 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1235 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1236 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1237 	    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1238 	    flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1239 	    flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1240 
1241 		if (is_ipv4(iph4)) {
1242 			uint8_t tos;
1243 
1244 			tos = iph4->type_of_service;
1245 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1246 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1247 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1248 					(TEST_IPSEC_DSCP_VAL << 2));
1249 			else
1250 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1251 
1252 			iph4->type_of_service = tos;
1253 			cksum_dirty = true;
1254 		} else {
1255 			uint32_t vtc_flow;
1256 
1257 			iph6 = (struct rte_ipv6_hdr *)pkt;
1258 
1259 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1260 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1261 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1262 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1263 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1264 			else
1265 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1266 
1267 			if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1268 			    flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1269 				vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1270 					     (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1271 			else
1272 				vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1273 
1274 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1275 		}
1276 	}
1277 
1278 	if (cksum_dirty && is_ipv4(iph4)) {
1279 		iph4->hdr_checksum = 0;
1280 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1281 	}
1282 
1283 	return 0;
1284 }
1285