xref: /dpdk/app/test/test_ipfrag.c (revision 8f1d23ece06adff5eae9f1b4365bdbbd3abee2b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Red Hat, Inc.
3  */
4 
5 #include "test.h"
6 
7 #include <time.h>
8 
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_hexdump.h>
12 #include <rte_ip.h>
13 
14 #ifdef RTE_EXEC_ENV_WINDOWS
15 static int
16 test_ipfrag(void)
17 {
18 	printf("ipfrag not supported on Windows, skipping test\n");
19 	return TEST_SKIPPED;
20 }
21 
22 #else
23 
24 #include <rte_ip_frag.h>
25 #include <rte_mbuf.h>
26 #include <rte_random.h>
27 
28 #define NUM_MBUFS 128
29 #define BURST 32
30 
31 uint8_t expected_first_frag_ipv4_opts_copied[] = {
32 	0x07, 0x0b, 0x04, 0x00,
33 	0x00, 0x00, 0x00, 0x00,
34 	0x00, 0x00, 0x00, 0x83,
35 	0x07, 0x04, 0xc0, 0xa8,
36 	0xe3, 0x96, 0x00, 0x00,
37 };
38 
39 uint8_t expected_sub_frag_ipv4_opts_copied[] = {
40 	0x83, 0x07, 0x04, 0xc0,
41 	0xa8, 0xe3, 0x96, 0x00,
42 };
43 
44 uint8_t expected_first_frag_ipv4_opts_nocopied[] = {
45 	0x07, 0x0b, 0x04, 0x00,
46 	0x00, 0x00, 0x00, 0x00,
47 	0x00, 0x00, 0x00, 0x00,
48 };
49 
50 uint8_t expected_sub_frag_ipv4_opts_nocopied[0];
51 
52 struct test_opt_data {
53 	bool is_first_frag;		 /**< offset is 0 */
54 	bool opt_copied;		 /**< ip option copied flag */
55 	uint16_t len;			 /**< option data len */
56 	uint8_t data[RTE_IPV4_HDR_OPT_MAX_LEN]; /**< option data */
57 };
58 
59 static struct rte_mempool *pkt_pool,
60 			  *direct_pool,
61 			  *indirect_pool;
62 
63 static inline void
64 hex_to_str(uint8_t *hex, uint16_t len, char *str)
65 {
66 	int i;
67 
68 	for (i = 0; i < len; i++) {
69 		sprintf(str, "%02x", hex[i]);
70 		str += 2;
71 	}
72 	*str = 0;
73 }
74 
75 static int
76 setup_buf_pool(void)
77 {
78 	pkt_pool = rte_pktmbuf_pool_create("FRAG_MBUF_POOL",
79 					   NUM_MBUFS, BURST, 0,
80 					   RTE_MBUF_DEFAULT_BUF_SIZE,
81 					   SOCKET_ID_ANY);
82 	if (pkt_pool == NULL) {
83 		printf("%s: Error creating pkt mempool\n", __func__);
84 		goto bad_setup;
85 	}
86 
87 	direct_pool = rte_pktmbuf_pool_create("FRAG_D_MBUF_POOL",
88 					      NUM_MBUFS, BURST, 0,
89 					      RTE_MBUF_DEFAULT_BUF_SIZE,
90 					      SOCKET_ID_ANY);
91 	if (direct_pool == NULL) {
92 		printf("%s: Error creating direct mempool\n", __func__);
93 		goto bad_setup;
94 	}
95 
96 	indirect_pool = rte_pktmbuf_pool_create("FRAG_I_MBUF_POOL",
97 						NUM_MBUFS, BURST, 0,
98 						0, SOCKET_ID_ANY);
99 	if (indirect_pool == NULL) {
100 		printf("%s: Error creating indirect mempool\n", __func__);
101 		goto bad_setup;
102 	}
103 
104 	return TEST_SUCCESS;
105 
106 bad_setup:
107 	rte_mempool_free(pkt_pool);
108 	pkt_pool = NULL;
109 
110 	rte_mempool_free(direct_pool);
111 	direct_pool = NULL;
112 
113 	return TEST_FAILED;
114 }
115 
116 static int testsuite_setup(void)
117 {
118 	return setup_buf_pool();
119 }
120 
121 static void testsuite_teardown(void)
122 {
123 	rte_mempool_free(pkt_pool);
124 	rte_mempool_free(direct_pool);
125 	rte_mempool_free(indirect_pool);
126 
127 	pkt_pool = NULL;
128 	direct_pool = NULL;
129 	indirect_pool = NULL;
130 }
131 
132 static int ut_setup(void)
133 {
134 	return TEST_SUCCESS;
135 }
136 
137 static void ut_teardown(void)
138 {
139 }
140 
141 static inline void
142 test_get_ipv4_opt(bool is_first_frag, bool opt_copied,
143 	struct test_opt_data *expected_opt)
144 {
145 	if (is_first_frag) {
146 		if (opt_copied) {
147 			expected_opt->len =
148 				sizeof(expected_first_frag_ipv4_opts_copied);
149 			memcpy(expected_opt->data,
150 				expected_first_frag_ipv4_opts_copied,
151 				sizeof(expected_first_frag_ipv4_opts_copied));
152 		} else {
153 			expected_opt->len =
154 				sizeof(expected_first_frag_ipv4_opts_nocopied);
155 			memcpy(expected_opt->data,
156 				expected_first_frag_ipv4_opts_nocopied,
157 				sizeof(expected_first_frag_ipv4_opts_nocopied));
158 		}
159 	} else {
160 		if (opt_copied) {
161 			expected_opt->len =
162 				sizeof(expected_sub_frag_ipv4_opts_copied);
163 			memcpy(expected_opt->data,
164 				expected_sub_frag_ipv4_opts_copied,
165 				sizeof(expected_sub_frag_ipv4_opts_copied));
166 		} else {
167 			expected_opt->len =
168 				sizeof(expected_sub_frag_ipv4_opts_nocopied);
169 			memcpy(expected_opt->data,
170 				expected_sub_frag_ipv4_opts_nocopied,
171 				sizeof(expected_sub_frag_ipv4_opts_nocopied));
172 		}
173 	}
174 }
175 
176 static void
177 v4_allocate_packet_of(struct rte_mbuf *b, int fill, size_t s,
178 	int df, uint8_t mf, uint16_t off, uint8_t ttl, uint8_t proto,
179 	uint16_t pktid, bool have_opt, bool is_first_frag, bool opt_copied)
180 {
181 	/* Create a packet, 2k bytes long */
182 	b->data_off = 0;
183 	char *data = rte_pktmbuf_mtod(b, char *);
184 	rte_be16_t fragment_offset = 0;	/* fragmentation offset */
185 	uint16_t iph_len;
186 	struct test_opt_data opt;
187 
188 	opt.len = 0;
189 
190 	if (have_opt)
191 		test_get_ipv4_opt(is_first_frag, opt_copied, &opt);
192 
193 	iph_len = sizeof(struct rte_ipv4_hdr) + opt.len;
194 	memset(data, fill, iph_len + s);
195 
196 	struct rte_ipv4_hdr *hdr = (struct rte_ipv4_hdr *)data;
197 
198 	hdr->version_ihl = 0x40; /* ipv4 */
199 	hdr->version_ihl += (iph_len / 4);
200 	hdr->type_of_service = 0;
201 	b->pkt_len = s + iph_len;
202 	b->data_len = b->pkt_len;
203 	hdr->total_length = rte_cpu_to_be_16(b->pkt_len);
204 	hdr->packet_id = rte_cpu_to_be_16(pktid);
205 
206 	if (df)
207 		fragment_offset |= 0x4000;
208 
209 	if (mf)
210 		fragment_offset |= 0x2000;
211 
212 	if (off)
213 		fragment_offset |= off;
214 
215 	hdr->fragment_offset = rte_cpu_to_be_16(fragment_offset);
216 
217 	if (!ttl)
218 		ttl = 64; /* default to 64 */
219 
220 	if (!proto)
221 		proto = 1; /* icmp */
222 
223 	hdr->time_to_live = ttl;
224 	hdr->next_proto_id = proto;
225 	hdr->hdr_checksum = 0;
226 	hdr->src_addr = rte_cpu_to_be_32(0x8080808);
227 	hdr->dst_addr = rte_cpu_to_be_32(0x8080404);
228 
229 	memcpy(hdr + 1, opt.data, opt.len);
230 }
231 
232 static void
233 v6_allocate_packet_of(struct rte_mbuf *b, int fill, size_t s, uint8_t ttl,
234 		      uint8_t proto, uint16_t pktid)
235 {
236 	/* Create a packet, 2k bytes long */
237 	b->data_off = 0;
238 	char *data = rte_pktmbuf_mtod(b, char *);
239 
240 	memset(data, fill, sizeof(struct rte_ipv6_hdr) + s);
241 
242 	struct rte_ipv6_hdr *hdr = (struct rte_ipv6_hdr *)data;
243 	b->pkt_len = s + sizeof(struct rte_ipv6_hdr);
244 	b->data_len = b->pkt_len;
245 
246 	/* basic v6 header */
247 	hdr->vtc_flow = rte_cpu_to_be_32(0x60 << 24 | pktid);
248 	hdr->payload_len = rte_cpu_to_be_16(b->pkt_len);
249 	hdr->proto = proto;
250 	hdr->hop_limits = ttl;
251 
252 	memset(hdr->src_addr, 0x08, sizeof(hdr->src_addr));
253 	memset(hdr->dst_addr, 0x04, sizeof(hdr->src_addr));
254 }
255 
256 static inline void
257 test_free_fragments(struct rte_mbuf *mb[], uint32_t num)
258 {
259 	uint32_t i;
260 	for (i = 0; i < num; i++)
261 		rte_pktmbuf_free(mb[i]);
262 }
263 
264 static inline void
265 test_get_offset(struct rte_mbuf **mb, int32_t len,
266 	uint16_t *offset, int ipv)
267 {
268 	int32_t i;
269 
270 	for (i = 0; i < len; i++) {
271 		if (ipv == 4) {
272 			struct rte_ipv4_hdr *iph =
273 			    rte_pktmbuf_mtod(mb[i], struct rte_ipv4_hdr *);
274 			offset[i] = iph->fragment_offset;
275 		} else if (ipv == 6) {
276 			struct ipv6_extension_fragment *fh =
277 			    rte_pktmbuf_mtod_offset(
278 					mb[i],
279 					struct ipv6_extension_fragment *,
280 					sizeof(struct rte_ipv6_hdr));
281 			offset[i] = fh->frag_data;
282 		}
283 	}
284 }
285 
286 static inline void
287 test_get_frag_opt(struct rte_mbuf **mb, int32_t num,
288 	struct test_opt_data *opt, int ipv, bool opt_copied)
289 {
290 	int32_t i;
291 
292 	for (i = 0; i < num; i++) {
293 		if (ipv == 4) {
294 			struct rte_ipv4_hdr *iph =
295 			    rte_pktmbuf_mtod(mb[i], struct rte_ipv4_hdr *);
296 			uint16_t header_len = (iph->version_ihl &
297 				RTE_IPV4_HDR_IHL_MASK) *
298 				RTE_IPV4_IHL_MULTIPLIER;
299 			uint16_t opt_len = header_len -
300 				sizeof(struct rte_ipv4_hdr);
301 
302 			opt->opt_copied = opt_copied;
303 
304 			if ((rte_be_to_cpu_16(iph->fragment_offset) &
305 				    RTE_IPV4_HDR_OFFSET_MASK) == 0)
306 				opt->is_first_frag = true;
307 			else
308 				opt->is_first_frag = false;
309 
310 			if (likely(opt_len <= RTE_IPV4_HDR_OPT_MAX_LEN)) {
311 				char *iph_opt = rte_pktmbuf_mtod_offset(mb[i],
312 				    char *, sizeof(struct rte_ipv4_hdr));
313 				opt->len = opt_len;
314 				memcpy(opt->data, iph_opt, opt_len);
315 			} else {
316 				opt->len = RTE_IPV4_HDR_OPT_MAX_LEN;
317 				memset(opt->data, RTE_IPV4_HDR_OPT_EOL,
318 				    sizeof(opt->data));
319 			}
320 			opt++;
321 		}
322 	}
323 }
324 
325 static int
326 test_ip_frag(void)
327 {
328 	static const uint16_t RND_ID = UINT16_MAX;
329 	int result = TEST_SUCCESS;
330 	size_t i, j;
331 
332 	struct test_ip_frags {
333 		int      ipv;
334 		size_t   mtu_size;
335 		size_t   pkt_size;
336 		int      set_df;
337 		uint8_t  set_mf;
338 		uint16_t set_of;
339 		uint8_t  ttl;
340 		uint8_t  proto;
341 		uint16_t pkt_id;
342 		int      expected_frags;
343 		uint16_t expected_fragment_offset[BURST];
344 		bool have_opt;
345 		bool is_first_frag;
346 		bool opt_copied;
347 	} tests[] = {
348 		 {4, 1280, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       2,
349 		  {0x2000, 0x009D}, false},
350 		 {4, 1280, 1400, 0, 0, 0, 64, IPPROTO_ICMP, 0,            2,
351 		  {0x2000, 0x009D}, false},
352 		 {4,  600, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       3,
353 		  {0x2000, 0x2048, 0x0090}, false},
354 		 {4, 4, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,    -EINVAL},
355 		 {4, 600, 1400, 1, 0, 0, 64, IPPROTO_ICMP, RND_ID, -ENOTSUP},
356 		 {4, 600, 1400, 0, 0, 0, 0, IPPROTO_ICMP, RND_ID,         3,
357 		  {0x2000, 0x2046, 0x008C}, true, true, true},
358 		 /* The first fragment */
359 		 {4, 68, 104, 0, 1, 0, 0, IPPROTO_ICMP, RND_ID,           5,
360 		  {0x2000, 0x2003, 0x2006, 0x2009, 0x200C}, true, true, true},
361 		 /* The middle fragment */
362 		 {4, 68, 104, 0, 1, 13, 0, IPPROTO_ICMP, RND_ID,          3,
363 		  {0x200D, 0x2012, 0x2017}, true, false, true},
364 		 /* The last fragment */
365 		 {4, 68, 104, 0, 0, 26, 0, IPPROTO_ICMP, RND_ID,          3,
366 		  {0x201A, 0x201F, 0x0024}, true, false, true},
367 		 /* The first fragment */
368 		 {4, 68, 104, 0, 1, 0, 0, IPPROTO_ICMP, RND_ID,           4,
369 		  {0x2000, 0x2004, 0x2008, 0x200C}, true, true, false},
370 		 /* The middle fragment */
371 		 {4, 68, 104, 0, 1, 13, 0, IPPROTO_ICMP, RND_ID,          3,
372 		  {0x200D, 0x2013, 0x2019}, true, false, false},
373 		 /* The last fragment */
374 		 {4, 68, 104, 0, 0, 26, 0, IPPROTO_ICMP, RND_ID,          3,
375 		  {0x201A, 0x2020, 0x0026}, true, false, false},
376 		 {6, 1280, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       2,
377 		  {0x0001, 0x04D0}, false},
378 		 {6, 1300, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       2,
379 		  {0x0001, 0x04E0}, false},
380 		 {6, 4, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,    -EINVAL},
381 		 {6, 1300, 1400, 0, 0, 0, 0, IPPROTO_ICMP, RND_ID,        2,
382 		  {0x0001, 0x04E0}, false},
383 	};
384 
385 	for (i = 0; i < RTE_DIM(tests); i++) {
386 		int32_t len = 0;
387 		uint16_t fragment_offset[BURST];
388 		struct test_opt_data opt_res[BURST];
389 		struct test_opt_data opt_exp;
390 		uint16_t pktid = tests[i].pkt_id;
391 		struct rte_mbuf *pkts_out[BURST];
392 		struct rte_mbuf *b = rte_pktmbuf_alloc(pkt_pool);
393 
394 		RTE_TEST_ASSERT_NOT_EQUAL(b, NULL,
395 					  "Failed to allocate pkt.");
396 
397 		if (tests[i].pkt_id == RND_ID)
398 			pktid = rte_rand_max(UINT16_MAX);
399 
400 		if (tests[i].ipv == 4) {
401 			v4_allocate_packet_of(b, 0x41414141,
402 					      tests[i].pkt_size,
403 					      tests[i].set_df,
404 					      tests[i].set_mf,
405 					      tests[i].set_of,
406 					      tests[i].ttl,
407 					      tests[i].proto,
408 					      pktid,
409 					      tests[i].have_opt,
410 					      tests[i].is_first_frag,
411 					      tests[i].opt_copied);
412 		} else if (tests[i].ipv == 6) {
413 			v6_allocate_packet_of(b, 0x41414141,
414 					      tests[i].pkt_size,
415 					      tests[i].ttl,
416 					      tests[i].proto,
417 					      pktid);
418 		}
419 
420 		if (tests[i].ipv == 4)
421 			if (i % 2)
422 				len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
423 						       tests[i].mtu_size,
424 						       direct_pool,
425 						       indirect_pool);
426 			else
427 				len = rte_ipv4_fragment_copy_nonseg_packet(b,
428 						       pkts_out,
429 						       BURST,
430 						       tests[i].mtu_size,
431 						       direct_pool);
432 		else if (tests[i].ipv == 6)
433 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
434 						       tests[i].mtu_size,
435 						       direct_pool,
436 						       indirect_pool);
437 
438 		rte_pktmbuf_free(b);
439 
440 		if (len > 0) {
441 			test_get_offset(pkts_out, len,
442 			    fragment_offset, tests[i].ipv);
443 			if (tests[i].have_opt)
444 				test_get_frag_opt(pkts_out, len, opt_res,
445 					tests[i].ipv, tests[i].opt_copied);
446 			test_free_fragments(pkts_out, len);
447 		}
448 
449 		printf("[check frag number]%zd: checking %d with %d\n", i, len,
450 		       tests[i].expected_frags);
451 		RTE_TEST_ASSERT_EQUAL(len, tests[i].expected_frags,
452 				      "Failed case %zd.\n", i);
453 
454 		if (len > 0) {
455 			for (j = 0; j < (size_t)len; j++) {
456 				printf("[check offset]%zd-%zd: checking %d with %d\n",
457 				    i, j, fragment_offset[j],
458 				    rte_cpu_to_be_16(
459 					tests[i].expected_fragment_offset[j]));
460 				RTE_TEST_ASSERT_EQUAL(fragment_offset[j],
461 				    rte_cpu_to_be_16(
462 					tests[i].expected_fragment_offset[j]),
463 				    "Failed case %zd.\n", i);
464 			}
465 
466 			if (tests[i].have_opt && (tests[i].ipv == 4)) {
467 				for (j = 0; j < (size_t)len; j++) {
468 					char opt_res_str[2 *
469 						RTE_IPV4_HDR_OPT_MAX_LEN + 1];
470 					char opt_exp_str[2 *
471 						RTE_IPV4_HDR_OPT_MAX_LEN + 1];
472 
473 					test_get_ipv4_opt(
474 						opt_res[j].is_first_frag,
475 						opt_res[j].opt_copied,
476 						&opt_exp);
477 					hex_to_str(opt_res[j].data,
478 						opt_res[j].len,
479 						opt_res_str);
480 					hex_to_str(opt_exp.data,
481 						opt_exp.len,
482 						opt_exp_str);
483 
484 					printf(
485 						"[check ipv4 option]%zd-%zd: checking (len:%u)%s with (len:%u)%s\n",
486 						i, j,
487 						opt_res[j].len, opt_res_str,
488 						opt_exp.len, opt_exp_str);
489 						RTE_TEST_ASSERT_SUCCESS(
490 							strcmp(opt_res_str,
491 								opt_exp_str),
492 						"Failed case %zd.\n", i);
493 				}
494 			}
495 		}
496 
497 	}
498 
499 	return result;
500 }
501 
502 static struct unit_test_suite ipfrag_testsuite  = {
503 	.suite_name = "IP Frag Unit Test Suite",
504 	.setup = testsuite_setup,
505 	.teardown = testsuite_teardown,
506 	.unit_test_cases = {
507 		TEST_CASE_ST(ut_setup, ut_teardown,
508 			     test_ip_frag),
509 
510 		TEST_CASES_END() /**< NULL terminate unit test array */
511 	}
512 };
513 
514 static int
515 test_ipfrag(void)
516 {
517 	rte_log_set_global_level(RTE_LOG_DEBUG);
518 	rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
519 
520 	return unit_test_suite_runner(&ipfrag_testsuite);
521 }
522 
523 #endif /* !RTE_EXEC_ENV_WINDOWS */
524 
525 REGISTER_TEST_COMMAND(ipfrag_autotest, test_ipfrag);
526