xref: /dpdk/app/test/test_cryptodev_security_ipsec.c (revision d02c6bfcb99a7d5a3595e89a6a05c841767904c1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #ifndef RTE_EXEC_ENV_WINDOWS
6 
7 #include <rte_common.h>
8 #include <rte_cryptodev.h>
9 #include <rte_esp.h>
10 #include <rte_ip.h>
11 #include <rte_security.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 
15 #include "test.h"
16 #include "test_cryptodev_security_ipsec.h"
17 
18 #define IV_LEN_MAX 16
19 
20 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
21 				  (RTE_DIM(cipher_list) *
22 				   RTE_DIM(auth_list))];
23 
24 static bool
25 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
26 {
27 	/* The IP version number must be 4 */
28 	if (((pkt->version_ihl) >> 4) != 4)
29 		return false;
30 	/*
31 	 * The IP header length field must be large enough to hold the
32 	 * minimum length legal IP datagram (20 bytes = 5 words).
33 	 */
34 	if ((pkt->version_ihl & 0xf) < 5)
35 		return false;
36 
37 	/*
38 	 * The IP total length field must be large enough to hold the IP
39 	 * datagram header, whose length is specified in the IP header length
40 	 * field.
41 	 */
42 	if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
43 		return false;
44 
45 	return true;
46 }
47 
48 static bool
49 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
50 {
51 	/* The IP version number must be 6 */
52 	if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
53 		return false;
54 
55 	return true;
56 }
57 
58 void
59 test_ipsec_alg_list_populate(void)
60 {
61 	unsigned long i, j, index = 0;
62 
63 	for (i = 0; i < RTE_DIM(aead_list); i++) {
64 		alg_list[index].param1 = &aead_list[i];
65 		alg_list[index].param2 = NULL;
66 		index++;
67 	}
68 
69 	for (i = 0; i < RTE_DIM(cipher_list); i++) {
70 		for (j = 0; j < RTE_DIM(auth_list); j++) {
71 			alg_list[index].param1 = &cipher_list[i];
72 			alg_list[index].param2 = &auth_list[j];
73 			index++;
74 		}
75 	}
76 }
77 
78 int
79 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
80 			   const struct rte_security_capability *sec_cap,
81 			   bool silent)
82 {
83 	/* Verify security capabilities */
84 
85 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
86 		if (!silent)
87 			RTE_LOG(INFO, USER1, "ESN is not supported\n");
88 		return -ENOTSUP;
89 	}
90 
91 	if (ipsec_xform->options.udp_encap == 1 &&
92 	    sec_cap->ipsec.options.udp_encap == 0) {
93 		if (!silent)
94 			RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
95 		return -ENOTSUP;
96 	}
97 
98 	if (ipsec_xform->options.udp_ports_verify == 1 &&
99 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
100 		if (!silent)
101 			RTE_LOG(INFO, USER1, "UDP encapsulation ports "
102 				"verification is not supported\n");
103 		return -ENOTSUP;
104 	}
105 
106 	if (ipsec_xform->options.copy_dscp == 1 &&
107 	    sec_cap->ipsec.options.copy_dscp == 0) {
108 		if (!silent)
109 			RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
110 		return -ENOTSUP;
111 	}
112 
113 	if (ipsec_xform->options.copy_flabel == 1 &&
114 	    sec_cap->ipsec.options.copy_flabel == 0) {
115 		if (!silent)
116 			RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
117 		return -ENOTSUP;
118 	}
119 
120 	if (ipsec_xform->options.copy_df == 1 &&
121 	    sec_cap->ipsec.options.copy_df == 0) {
122 		if (!silent)
123 			RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
124 		return -ENOTSUP;
125 	}
126 
127 	if (ipsec_xform->options.dec_ttl == 1 &&
128 	    sec_cap->ipsec.options.dec_ttl == 0) {
129 		if (!silent)
130 			RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
131 		return -ENOTSUP;
132 	}
133 
134 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
135 		if (!silent)
136 			RTE_LOG(INFO, USER1, "ECN is not supported\n");
137 		return -ENOTSUP;
138 	}
139 
140 	if (ipsec_xform->options.stats == 1 &&
141 	    sec_cap->ipsec.options.stats == 0) {
142 		if (!silent)
143 			RTE_LOG(INFO, USER1, "Stats is not supported\n");
144 		return -ENOTSUP;
145 	}
146 
147 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
148 	    (ipsec_xform->options.iv_gen_disable == 1) &&
149 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
150 		if (!silent)
151 			RTE_LOG(INFO, USER1,
152 				"Application provided IV is not supported\n");
153 		return -ENOTSUP;
154 	}
155 
156 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
157 	    (ipsec_xform->options.tunnel_hdr_verify >
158 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
159 		if (!silent)
160 			RTE_LOG(INFO, USER1,
161 				"Tunnel header verify is not supported\n");
162 		return -ENOTSUP;
163 	}
164 
165 	if (ipsec_xform->options.ip_csum_enable == 1 &&
166 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
167 		if (!silent)
168 			RTE_LOG(INFO, USER1,
169 				"Inner IP checksum is not supported\n");
170 		return -ENOTSUP;
171 	}
172 
173 	if (ipsec_xform->options.l4_csum_enable == 1 &&
174 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
175 		if (!silent)
176 			RTE_LOG(INFO, USER1,
177 				"Inner L4 checksum is not supported\n");
178 		return -ENOTSUP;
179 	}
180 
181 	if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
182 		if (!silent)
183 			RTE_LOG(INFO, USER1,
184 				"Replay window size is not supported\n");
185 		return -ENOTSUP;
186 	}
187 
188 	return 0;
189 }
190 
191 int
192 test_ipsec_crypto_caps_aead_verify(
193 		const struct rte_security_capability *sec_cap,
194 		struct rte_crypto_sym_xform *aead)
195 {
196 	const struct rte_cryptodev_symmetric_capability *sym_cap;
197 	const struct rte_cryptodev_capabilities *crypto_cap;
198 	int j = 0;
199 
200 	while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
201 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
202 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
203 				crypto_cap->sym.xform_type == aead->type &&
204 				crypto_cap->sym.aead.algo == aead->aead.algo) {
205 			sym_cap = &crypto_cap->sym;
206 			if (rte_cryptodev_sym_capability_check_aead(sym_cap,
207 					aead->aead.key.length,
208 					aead->aead.digest_length,
209 					aead->aead.aad_length,
210 					aead->aead.iv.length) == 0)
211 				return 0;
212 		}
213 	}
214 
215 	return -ENOTSUP;
216 }
217 
218 int
219 test_ipsec_crypto_caps_cipher_verify(
220 		const struct rte_security_capability *sec_cap,
221 		struct rte_crypto_sym_xform *cipher)
222 {
223 	const struct rte_cryptodev_symmetric_capability *sym_cap;
224 	const struct rte_cryptodev_capabilities *cap;
225 	int j = 0;
226 
227 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
228 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
229 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
230 				cap->sym.xform_type == cipher->type &&
231 				cap->sym.cipher.algo == cipher->cipher.algo) {
232 			sym_cap = &cap->sym;
233 			if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
234 					cipher->cipher.key.length,
235 					cipher->cipher.iv.length) == 0)
236 				return 0;
237 		}
238 	}
239 
240 	return -ENOTSUP;
241 }
242 
243 int
244 test_ipsec_crypto_caps_auth_verify(
245 		const struct rte_security_capability *sec_cap,
246 		struct rte_crypto_sym_xform *auth)
247 {
248 	const struct rte_cryptodev_symmetric_capability *sym_cap;
249 	const struct rte_cryptodev_capabilities *cap;
250 	int j = 0;
251 
252 	while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
253 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
254 		if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
255 				cap->sym.xform_type == auth->type &&
256 				cap->sym.auth.algo == auth->auth.algo) {
257 			sym_cap = &cap->sym;
258 			if (rte_cryptodev_sym_capability_check_auth(sym_cap,
259 					auth->auth.key.length,
260 					auth->auth.digest_length,
261 					auth->auth.iv.length) == 0)
262 				return 0;
263 		}
264 	}
265 
266 	return -ENOTSUP;
267 }
268 
269 void
270 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
271 			  struct ipsec_test_data *td_in)
272 {
273 	memcpy(td_in, td_out, sizeof(*td_in));
274 
275 	/* Populate output text of td_in with input text of td_out */
276 	memcpy(td_in->output_text.data, td_out->input_text.data,
277 	       td_out->input_text.len);
278 	td_in->output_text.len = td_out->input_text.len;
279 
280 	/* Populate input text of td_in with output text of td_out */
281 	memcpy(td_in->input_text.data, td_out->output_text.data,
282 	       td_out->output_text.len);
283 	td_in->input_text.len = td_out->output_text.len;
284 
285 	td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
286 
287 	if (td_in->aead) {
288 		td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
289 	} else {
290 		td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
291 		td_in->xform.chain.cipher.cipher.op =
292 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
293 	}
294 }
295 
296 static bool
297 is_ipv4(void *ip)
298 {
299 	struct rte_ipv4_hdr *ipv4 = ip;
300 	uint8_t ip_ver;
301 
302 	ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
303 	if (ip_ver == IPVERSION)
304 		return true;
305 	else
306 		return false;
307 }
308 
309 static void
310 test_ipsec_csum_init(void *ip, bool l3, bool l4)
311 {
312 	struct rte_ipv4_hdr *ipv4;
313 	struct rte_tcp_hdr *tcp;
314 	struct rte_udp_hdr *udp;
315 	uint8_t next_proto;
316 	uint8_t size;
317 
318 	if (is_ipv4(ip)) {
319 		ipv4 = ip;
320 		size = sizeof(struct rte_ipv4_hdr);
321 		next_proto = ipv4->next_proto_id;
322 
323 		if (l3)
324 			ipv4->hdr_checksum = 0;
325 	} else {
326 		size = sizeof(struct rte_ipv6_hdr);
327 		next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
328 	}
329 
330 	if (l4) {
331 		switch (next_proto) {
332 		case IPPROTO_TCP:
333 			tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
334 			tcp->cksum = 0;
335 			break;
336 		case IPPROTO_UDP:
337 			udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
338 			udp->dgram_cksum = 0;
339 			break;
340 		default:
341 			return;
342 		}
343 	}
344 }
345 
346 void
347 test_ipsec_td_prepare(const struct crypto_param *param1,
348 		      const struct crypto_param *param2,
349 		      const struct ipsec_test_flags *flags,
350 		      struct ipsec_test_data *td_array,
351 		      int nb_td)
352 
353 {
354 	struct ipsec_test_data *td;
355 	int i;
356 
357 	memset(td_array, 0, nb_td * sizeof(*td));
358 
359 	for (i = 0; i < nb_td; i++) {
360 		td = &td_array[i];
361 
362 		/* Prepare fields based on param */
363 
364 		if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
365 			/* Copy template for packet & key fields */
366 			if (flags->ipv6)
367 				memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
368 			else
369 				memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
370 
371 			td->aead = true;
372 			td->xform.aead.aead.algo = param1->alg.aead;
373 			td->xform.aead.aead.key.length = param1->key_length;
374 		} else {
375 			/* Copy template for packet & key fields */
376 			if (flags->ipv6)
377 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
378 					sizeof(*td));
379 			else
380 				memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
381 					sizeof(*td));
382 
383 			td->aead = false;
384 			td->xform.chain.cipher.cipher.algo = param1->alg.cipher;
385 			td->xform.chain.cipher.cipher.key.length =
386 					param1->key_length;
387 			td->xform.chain.cipher.cipher.iv.length =
388 					param1->iv_length;
389 			td->xform.chain.auth.auth.algo = param2->alg.auth;
390 			td->xform.chain.auth.auth.key.length =
391 					param2->key_length;
392 			td->xform.chain.auth.auth.digest_length =
393 					param2->digest_length;
394 
395 		}
396 
397 		if (flags->iv_gen)
398 			td->ipsec_xform.options.iv_gen_disable = 0;
399 
400 		if (flags->sa_expiry_pkts_soft)
401 			td->ipsec_xform.life.packets_soft_limit =
402 					IPSEC_TEST_PACKETS_MAX - 1;
403 
404 		if (flags->ip_csum) {
405 			td->ipsec_xform.options.ip_csum_enable = 1;
406 			test_ipsec_csum_init(&td->input_text.data, true, false);
407 		}
408 
409 		if (flags->l4_csum) {
410 			td->ipsec_xform.options.l4_csum_enable = 1;
411 			test_ipsec_csum_init(&td->input_text.data, false, true);
412 		}
413 
414 		if (flags->transport) {
415 			td->ipsec_xform.mode =
416 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
417 		} else {
418 			td->ipsec_xform.mode =
419 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
420 
421 			if (flags->tunnel_ipv6)
422 				td->ipsec_xform.tunnel.type =
423 						RTE_SECURITY_IPSEC_TUNNEL_IPV6;
424 			else
425 				td->ipsec_xform.tunnel.type =
426 						RTE_SECURITY_IPSEC_TUNNEL_IPV4;
427 		}
428 
429 		if (flags->stats_success)
430 			td->ipsec_xform.options.stats = 1;
431 
432 		if (flags->fragment) {
433 			struct rte_ipv4_hdr *ip;
434 			ip = (struct rte_ipv4_hdr *)&td->input_text.data;
435 			ip->fragment_offset = 4;
436 			ip->hdr_checksum = rte_ipv4_cksum(ip);
437 		}
438 
439 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
440 		    flags->df == TEST_IPSEC_COPY_DF_INNER_1)
441 			td->ipsec_xform.options.copy_df = 1;
442 
443 		if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
444 		    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
445 			td->ipsec_xform.options.copy_dscp = 1;
446 	}
447 }
448 
449 void
450 test_ipsec_td_update(struct ipsec_test_data td_inb[],
451 		     const struct ipsec_test_data td_outb[],
452 		     int nb_td,
453 		     const struct ipsec_test_flags *flags)
454 {
455 	int i;
456 
457 	for (i = 0; i < nb_td; i++) {
458 		memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
459 		       td_outb[i].input_text.len);
460 		td_inb[i].output_text.len = td_outb->input_text.len;
461 
462 		if (flags->icv_corrupt) {
463 			int icv_pos = td_inb[i].input_text.len - 4;
464 			td_inb[i].input_text.data[icv_pos] += 1;
465 		}
466 
467 		if (flags->sa_expiry_pkts_hard)
468 			td_inb[i].ipsec_xform.life.packets_hard_limit =
469 					IPSEC_TEST_PACKETS_MAX - 1;
470 
471 		if (flags->udp_encap)
472 			td_inb[i].ipsec_xform.options.udp_encap = 1;
473 
474 		if (flags->udp_ports_verify)
475 			td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
476 
477 		td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
478 			flags->tunnel_hdr_verify;
479 
480 		if (flags->ip_csum)
481 			td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
482 
483 		if (flags->l4_csum)
484 			td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
485 
486 		/* Clear outbound specific flags */
487 		td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
488 	}
489 }
490 
491 void
492 test_ipsec_display_alg(const struct crypto_param *param1,
493 		       const struct crypto_param *param2)
494 {
495 	if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
496 		printf("\t%s [%d]",
497 		       rte_crypto_aead_algorithm_strings[param1->alg.aead],
498 		       param1->key_length * 8);
499 	} else {
500 		printf("\t%s",
501 		       rte_crypto_cipher_algorithm_strings[param1->alg.cipher]);
502 		if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
503 			printf(" [%d]", param1->key_length * 8);
504 		printf(" %s",
505 		       rte_crypto_auth_algorithm_strings[param2->alg.auth]);
506 		if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
507 			printf(" [%dB ICV]", param2->digest_length);
508 	}
509 	printf("\n");
510 }
511 
512 static int
513 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
514 {
515 	int len = 0;
516 
517 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
518 		if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
519 			if (td->ipsec_xform.tunnel.type ==
520 					RTE_SECURITY_IPSEC_TUNNEL_IPV4)
521 				len += sizeof(struct rte_ipv4_hdr);
522 			else
523 				len += sizeof(struct rte_ipv6_hdr);
524 		}
525 	}
526 
527 	return len;
528 }
529 
530 static int
531 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
532 {
533 	static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
534 	uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
535 	int i, iv_pos, iv_len;
536 	static int index;
537 
538 	if (td->aead)
539 		iv_len = td->xform.aead.aead.iv.length - td->salt.len;
540 	else
541 		iv_len = td->xform.chain.cipher.cipher.iv.length;
542 
543 	iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
544 	output_text += iv_pos;
545 
546 	TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
547 
548 	/* Compare against previous values */
549 	for (i = 0; i < index; i++) {
550 		iv_tmp = &iv_queue[i * IV_LEN_MAX];
551 
552 		if (memcmp(output_text, iv_tmp, iv_len) == 0) {
553 			printf("IV repeated");
554 			return TEST_FAILED;
555 		}
556 	}
557 
558 	/* Save IV for future comparisons */
559 
560 	iv_tmp = &iv_queue[index * IV_LEN_MAX];
561 	memcpy(iv_tmp, output_text, iv_len);
562 	index++;
563 
564 	if (index == IPSEC_TEST_PACKETS_MAX)
565 		index = 0;
566 
567 	return TEST_SUCCESS;
568 }
569 
570 static int
571 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
572 {
573 	uint16_t actual_cksum, expected_cksum;
574 	struct rte_ipv4_hdr *ip;
575 
576 	ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
577 
578 	if (!is_ipv4((void *)ip))
579 		return TEST_SKIPPED;
580 
581 	actual_cksum = ip->hdr_checksum;
582 
583 	ip->hdr_checksum = 0;
584 
585 	expected_cksum = rte_ipv4_cksum(ip);
586 
587 	if (actual_cksum != expected_cksum)
588 		return TEST_FAILED;
589 
590 	return TEST_SUCCESS;
591 }
592 
593 static int
594 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
595 {
596 	uint16_t actual_cksum = 0, expected_cksum = 0;
597 	struct rte_ipv4_hdr *ipv4;
598 	struct rte_ipv6_hdr *ipv6;
599 	struct rte_tcp_hdr *tcp;
600 	struct rte_udp_hdr *udp;
601 	void *ip, *l4;
602 
603 	ip = rte_pktmbuf_mtod(m, void *);
604 
605 	if (is_ipv4(ip)) {
606 		ipv4 = ip;
607 		l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
608 
609 		switch (ipv4->next_proto_id) {
610 		case IPPROTO_TCP:
611 			tcp = (struct rte_tcp_hdr *)l4;
612 			actual_cksum = tcp->cksum;
613 			tcp->cksum = 0;
614 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
615 			break;
616 		case IPPROTO_UDP:
617 			udp = (struct rte_udp_hdr *)l4;
618 			actual_cksum = udp->dgram_cksum;
619 			udp->dgram_cksum = 0;
620 			expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
621 			break;
622 		default:
623 			break;
624 		}
625 	} else {
626 		ipv6 = ip;
627 		l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
628 
629 		switch (ipv6->proto) {
630 		case IPPROTO_TCP:
631 			tcp = (struct rte_tcp_hdr *)l4;
632 			actual_cksum = tcp->cksum;
633 			tcp->cksum = 0;
634 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
635 			break;
636 		case IPPROTO_UDP:
637 			udp = (struct rte_udp_hdr *)l4;
638 			actual_cksum = udp->dgram_cksum;
639 			udp->dgram_cksum = 0;
640 			expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
641 			break;
642 		default:
643 			break;
644 		}
645 	}
646 
647 	if (actual_cksum != expected_cksum)
648 		return TEST_FAILED;
649 
650 	return TEST_SUCCESS;
651 }
652 
653 static int
654 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
655 		     bool silent, const struct ipsec_test_flags *flags)
656 {
657 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
658 	uint32_t skip, len = rte_pktmbuf_pkt_len(m);
659 	uint8_t td_output_text[4096];
660 	int ret;
661 
662 	/* For tests with status as error for test success, skip verification */
663 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
664 	    (flags->icv_corrupt ||
665 	     flags->sa_expiry_pkts_hard ||
666 	     flags->tunnel_hdr_verify ||
667 	     td->ar_packet))
668 		return TEST_SUCCESS;
669 
670 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
671 	   flags->udp_encap) {
672 		const struct rte_ipv4_hdr *iph4;
673 		const struct rte_ipv6_hdr *iph6;
674 
675 		if (td->ipsec_xform.tunnel.type ==
676 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
677 			iph4 = (const struct rte_ipv4_hdr *)output_text;
678 			if (iph4->next_proto_id != IPPROTO_UDP) {
679 				printf("UDP header is not found\n");
680 				return TEST_FAILED;
681 			}
682 		} else {
683 			iph6 = (const struct rte_ipv6_hdr *)output_text;
684 			if (iph6->proto != IPPROTO_UDP) {
685 				printf("UDP header is not found\n");
686 				return TEST_FAILED;
687 			}
688 		}
689 
690 		len -= sizeof(struct rte_udp_hdr);
691 		output_text += sizeof(struct rte_udp_hdr);
692 	}
693 
694 	if (len != td->output_text.len) {
695 		printf("Output length (%d) not matching with expected (%d)\n",
696 			len, td->output_text.len);
697 		return TEST_FAILED;
698 	}
699 
700 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
701 				flags->fragment) {
702 		const struct rte_ipv4_hdr *iph4;
703 		iph4 = (const struct rte_ipv4_hdr *)output_text;
704 		if (iph4->fragment_offset) {
705 			printf("Output packet is fragmented");
706 			return TEST_FAILED;
707 		}
708 	}
709 
710 	skip = test_ipsec_tunnel_hdr_len_get(td);
711 
712 	len -= skip;
713 	output_text += skip;
714 
715 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
716 				flags->ip_csum) {
717 		if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
718 			ret = test_ipsec_l3_csum_verify(m);
719 		else
720 			ret = TEST_FAILED;
721 
722 		if (ret == TEST_FAILED)
723 			printf("Inner IP checksum test failed\n");
724 
725 		return ret;
726 	}
727 
728 	if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
729 				flags->l4_csum) {
730 		if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
731 			ret = test_ipsec_l4_csum_verify(m);
732 		else
733 			ret = TEST_FAILED;
734 
735 		if (ret == TEST_FAILED)
736 			printf("Inner L4 checksum test failed\n");
737 
738 		return ret;
739 	}
740 
741 	memcpy(td_output_text, td->output_text.data + skip, len);
742 
743 	if (test_ipsec_pkt_update(td_output_text, flags)) {
744 		printf("Could not update expected vector");
745 		return TEST_FAILED;
746 	}
747 
748 	if (memcmp(output_text, td_output_text, len)) {
749 		if (silent)
750 			return TEST_FAILED;
751 
752 		printf("TestCase %s line %d: %s\n", __func__, __LINE__,
753 			"output text not as expected\n");
754 
755 		rte_hexdump(stdout, "expected", td_output_text, len);
756 		rte_hexdump(stdout, "actual", output_text, len);
757 		return TEST_FAILED;
758 	}
759 
760 	return TEST_SUCCESS;
761 }
762 
763 static int
764 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
765 		   struct ipsec_test_data *res_d)
766 {
767 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
768 	uint32_t len = rte_pktmbuf_pkt_len(m);
769 
770 	memcpy(res_d, td, sizeof(*res_d));
771 	memcpy(res_d->input_text.data, output_text, len);
772 	res_d->input_text.len = len;
773 
774 	res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
775 	if (res_d->aead) {
776 		res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
777 	} else {
778 		res_d->xform.chain.cipher.cipher.op =
779 				RTE_CRYPTO_CIPHER_OP_DECRYPT;
780 		res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
781 	}
782 
783 	return TEST_SUCCESS;
784 }
785 
786 static int
787 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
788 			     const struct ipsec_test_flags *flags)
789 {
790 	uint8_t tos, dscp;
791 	uint16_t f_off;
792 
793 	if (!is_valid_ipv4_pkt(iph4)) {
794 		printf("Tunnel outer header is not IPv4\n");
795 		return -1;
796 	}
797 
798 	f_off = rte_be_to_cpu_16(iph4->fragment_offset);
799 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
800 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
801 		if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
802 			printf("DF bit is not set\n");
803 			return -1;
804 		}
805 	} else {
806 		if (f_off & RTE_IPV4_HDR_DF_FLAG) {
807 			printf("DF bit is set\n");
808 			return -1;
809 		}
810 	}
811 
812 	tos = iph4->type_of_service;
813 	dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
814 
815 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
816 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
817 		if (dscp != TEST_IPSEC_DSCP_VAL) {
818 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
819 			       TEST_IPSEC_DSCP_VAL, dscp);
820 			return -1;
821 		}
822 	} else {
823 		if (dscp != 0) {
824 			printf("DSCP value is set [exp: 0, actual: %x]\n",
825 			       dscp);
826 			return -1;
827 		}
828 	}
829 
830 	return 0;
831 }
832 
833 static int
834 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
835 			     const struct ipsec_test_flags *flags)
836 {
837 	uint32_t vtc_flow;
838 	uint8_t dscp;
839 
840 	if (!is_valid_ipv6_pkt(iph6)) {
841 		printf("Tunnel outer header is not IPv6\n");
842 		return -1;
843 	}
844 
845 	vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
846 	dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
847 	       (RTE_IPV6_HDR_TC_SHIFT + 2);
848 
849 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
850 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
851 		if (dscp != TEST_IPSEC_DSCP_VAL) {
852 			printf("DSCP value is not matching [exp: %x, actual: %x]\n",
853 			       TEST_IPSEC_DSCP_VAL, dscp);
854 			return -1;
855 		}
856 	} else {
857 		if (dscp != 0) {
858 			printf("DSCP value is set [exp: 0, actual: %x]\n",
859 			       dscp);
860 			return -1;
861 		}
862 	}
863 
864 	return 0;
865 }
866 
867 int
868 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
869 			struct ipsec_test_data *res_d, bool silent,
870 			const struct ipsec_test_flags *flags)
871 {
872 	uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
873 	int ret;
874 
875 	if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
876 		const struct rte_ipv4_hdr *iph4;
877 		const struct rte_ipv6_hdr *iph6;
878 
879 		if (flags->iv_gen) {
880 			ret = test_ipsec_iv_verify_push(m, td);
881 			if (ret != TEST_SUCCESS)
882 				return ret;
883 		}
884 
885 		iph4 = (const struct rte_ipv4_hdr *)output_text;
886 
887 		if (td->ipsec_xform.mode ==
888 				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
889 			if (flags->ipv6) {
890 				iph6 = (const struct rte_ipv6_hdr *)output_text;
891 				if (is_valid_ipv6_pkt(iph6) == false) {
892 					printf("Transport packet is not IPv6\n");
893 					return TEST_FAILED;
894 				}
895 			} else {
896 				if (is_valid_ipv4_pkt(iph4) == false) {
897 					printf("Transport packet is not IPv4\n");
898 					return TEST_FAILED;
899 				}
900 			}
901 		} else {
902 			if (td->ipsec_xform.tunnel.type ==
903 					RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
904 				if (test_ipsec_iph4_hdr_validate(iph4, flags))
905 					return TEST_FAILED;
906 			} else {
907 				iph6 = (const struct rte_ipv6_hdr *)output_text;
908 				if (test_ipsec_iph6_hdr_validate(iph6, flags))
909 					return TEST_FAILED;
910 			}
911 		}
912 	}
913 
914 	/*
915 	 * In case of known vector tests & all inbound tests, res_d provided
916 	 * would be NULL and output data need to be validated against expected.
917 	 * For inbound, output_text would be plain packet and for outbound
918 	 * output_text would IPsec packet. Validate by comparing against
919 	 * known vectors.
920 	 *
921 	 * In case of combined mode tests, the output_text from outbound
922 	 * operation (ie, IPsec packet) would need to be inbound processed to
923 	 * obtain the plain text. Copy output_text to result data, 'res_d', so
924 	 * that inbound processing can be done.
925 	 */
926 
927 	if (res_d == NULL)
928 		return test_ipsec_td_verify(m, td, silent, flags);
929 	else
930 		return test_ipsec_res_d_prepare(m, td, res_d);
931 }
932 
933 int
934 test_ipsec_status_check(const struct ipsec_test_data *td,
935 			struct rte_crypto_op *op,
936 			const struct ipsec_test_flags *flags,
937 			enum rte_security_ipsec_sa_direction dir,
938 			int pkt_num)
939 {
940 	int ret = TEST_SUCCESS;
941 
942 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
943 	    td->ar_packet) {
944 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
945 			printf("Anti replay test case failed\n");
946 			return TEST_FAILED;
947 		} else {
948 			return TEST_SUCCESS;
949 		}
950 	}
951 
952 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
953 	    flags->sa_expiry_pkts_hard &&
954 	    pkt_num == IPSEC_TEST_PACKETS_MAX) {
955 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
956 			printf("SA hard expiry (pkts) test failed\n");
957 			return TEST_FAILED;
958 		} else {
959 			return TEST_SUCCESS;
960 		}
961 	}
962 
963 	if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
964 	    flags->tunnel_hdr_verify) {
965 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
966 			printf("Tunnel header verify test case failed\n");
967 			return TEST_FAILED;
968 		} else {
969 			return TEST_SUCCESS;
970 		}
971 	}
972 
973 	if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
974 		if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
975 			printf("ICV corruption test case failed\n");
976 			ret = TEST_FAILED;
977 		}
978 	} else {
979 		if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
980 			printf("Security op processing failed [pkt_num: %d]\n",
981 			       pkt_num);
982 			ret = TEST_FAILED;
983 		}
984 	}
985 
986 	if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
987 		if (!(op->aux_flags &
988 		      RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
989 			printf("SA soft expiry (pkts) test failed\n");
990 			ret = TEST_FAILED;
991 		}
992 	}
993 
994 	return ret;
995 }
996 
997 int
998 test_ipsec_stats_verify(struct rte_security_ctx *ctx,
999 			struct rte_security_session *sess,
1000 			const struct ipsec_test_flags *flags,
1001 			enum rte_security_ipsec_sa_direction dir)
1002 {
1003 	struct rte_security_stats stats = {0};
1004 	int ret = TEST_SUCCESS;
1005 
1006 	if (flags->stats_success) {
1007 		if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1008 			return TEST_FAILED;
1009 
1010 		if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1011 			if (stats.ipsec.opackets != 1 ||
1012 			    stats.ipsec.oerrors != 0)
1013 				ret = TEST_FAILED;
1014 		} else {
1015 			if (stats.ipsec.ipackets != 1 ||
1016 			    stats.ipsec.ierrors != 0)
1017 				ret = TEST_FAILED;
1018 		}
1019 	}
1020 
1021 	return ret;
1022 }
1023 
1024 int
1025 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1026 {
1027 	struct rte_ipv4_hdr *iph4;
1028 	struct rte_ipv6_hdr *iph6;
1029 	bool cksum_dirty = false;
1030 
1031 	iph4 = (struct rte_ipv4_hdr *)pkt;
1032 
1033 	if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1034 	    flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1035 	    flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1036 	    flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1037 		uint16_t frag_off;
1038 
1039 		if (!is_ipv4(iph4)) {
1040 			printf("Invalid packet type\n");
1041 			return -1;
1042 		}
1043 
1044 		frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1045 
1046 		if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1047 		    flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1048 			frag_off |= RTE_IPV4_HDR_DF_FLAG;
1049 		else
1050 			frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1051 
1052 		iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1053 		cksum_dirty = true;
1054 	}
1055 
1056 	if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1057 	    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1058 	    flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1059 	    flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
1060 
1061 		if (is_ipv4(iph4)) {
1062 			uint8_t tos;
1063 
1064 			tos = iph4->type_of_service;
1065 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1066 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1067 				tos |= (RTE_IPV4_HDR_DSCP_MASK &
1068 					(TEST_IPSEC_DSCP_VAL << 2));
1069 			else
1070 				tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1071 
1072 			iph4->type_of_service = tos;
1073 			cksum_dirty = true;
1074 		} else {
1075 			uint32_t vtc_flow;
1076 
1077 			iph6 = (struct rte_ipv6_hdr *)pkt;
1078 
1079 			vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1080 			if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1081 			    flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1082 				vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1083 					     (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1084 			else
1085 				vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1086 
1087 			iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1088 		}
1089 	}
1090 
1091 	if (cksum_dirty && is_ipv4(iph4)) {
1092 		iph4->hdr_checksum = 0;
1093 		iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 #endif /* !RTE_EXEC_ENV_WINDOWS */
1100