xref: /dpdk/app/test/test_ipfrag.c (revision 592ab76f9f0f41993bebb44da85c37750a93ece9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Red Hat, Inc.
3  */
4 
5 #include "test.h"
6 
7 #include <time.h>
8 
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_hexdump.h>
12 #include <rte_ip.h>
13 
14 #ifdef RTE_EXEC_ENV_WINDOWS
15 static int
16 test_ipfrag(void)
17 {
18 	printf("ipfrag not supported on Windows, skipping test\n");
19 	return TEST_SKIPPED;
20 }
21 
22 #else
23 
24 #include <rte_ip_frag.h>
25 #include <rte_mbuf.h>
26 #include <rte_memcpy.h>
27 #include <rte_random.h>
28 
29 #define NUM_MBUFS 128
30 #define BURST 32
31 
32 uint8_t expected_first_frag_ipv4_opts_copied[] = {
33 	0x07, 0x0b, 0x04, 0x00,
34 	0x00, 0x00, 0x00, 0x00,
35 	0x00, 0x00, 0x00, 0x83,
36 	0x07, 0x04, 0xc0, 0xa8,
37 	0xe3, 0x96, 0x00, 0x00,
38 };
39 
40 uint8_t expected_sub_frag_ipv4_opts_copied[] = {
41 	0x83, 0x07, 0x04, 0xc0,
42 	0xa8, 0xe3, 0x96, 0x00,
43 };
44 
45 uint8_t expected_first_frag_ipv4_opts_nocopied[] = {
46 	0x07, 0x0b, 0x04, 0x00,
47 	0x00, 0x00, 0x00, 0x00,
48 	0x00, 0x00, 0x00, 0x00,
49 };
50 
51 uint8_t expected_sub_frag_ipv4_opts_nocopied[0];
52 
53 struct test_opt_data {
54 	bool is_first_frag;		 /**< offset is 0 */
55 	bool opt_copied;		 /**< ip option copied flag */
56 	uint16_t len;			 /**< option data len */
57 	uint8_t data[RTE_IPV4_HDR_OPT_MAX_LEN]; /**< option data */
58 };
59 
60 static struct rte_mempool *pkt_pool,
61 			  *direct_pool,
62 			  *indirect_pool;
63 
64 static inline void
65 hex_to_str(uint8_t *hex, uint16_t len, char *str)
66 {
67 	int i;
68 
69 	for (i = 0; i < len; i++) {
70 		sprintf(str, "%02x", hex[i]);
71 		str += 2;
72 	}
73 	*str = 0;
74 }
75 
76 static int
77 setup_buf_pool(void)
78 {
79 	pkt_pool = rte_pktmbuf_pool_create("FRAG_MBUF_POOL",
80 					   NUM_MBUFS, BURST, 0,
81 					   RTE_MBUF_DEFAULT_BUF_SIZE,
82 					   SOCKET_ID_ANY);
83 	if (pkt_pool == NULL) {
84 		printf("%s: Error creating pkt mempool\n", __func__);
85 		goto bad_setup;
86 	}
87 
88 	direct_pool = rte_pktmbuf_pool_create("FRAG_D_MBUF_POOL",
89 					      NUM_MBUFS, BURST, 0,
90 					      RTE_MBUF_DEFAULT_BUF_SIZE,
91 					      SOCKET_ID_ANY);
92 	if (direct_pool == NULL) {
93 		printf("%s: Error creating direct mempool\n", __func__);
94 		goto bad_setup;
95 	}
96 
97 	indirect_pool = rte_pktmbuf_pool_create("FRAG_I_MBUF_POOL",
98 						NUM_MBUFS, BURST, 0,
99 						0, SOCKET_ID_ANY);
100 	if (indirect_pool == NULL) {
101 		printf("%s: Error creating indirect mempool\n", __func__);
102 		goto bad_setup;
103 	}
104 
105 	return TEST_SUCCESS;
106 
107 bad_setup:
108 	rte_mempool_free(pkt_pool);
109 	pkt_pool = NULL;
110 
111 	rte_mempool_free(direct_pool);
112 	direct_pool = NULL;
113 
114 	return TEST_FAILED;
115 }
116 
117 static int testsuite_setup(void)
118 {
119 	return setup_buf_pool();
120 }
121 
122 static void testsuite_teardown(void)
123 {
124 	rte_mempool_free(pkt_pool);
125 	rte_mempool_free(direct_pool);
126 	rte_mempool_free(indirect_pool);
127 
128 	pkt_pool = NULL;
129 	direct_pool = NULL;
130 	indirect_pool = NULL;
131 }
132 
133 static int ut_setup(void)
134 {
135 	return TEST_SUCCESS;
136 }
137 
138 static void ut_teardown(void)
139 {
140 }
141 
142 static inline void
143 test_get_ipv4_opt(bool is_first_frag, bool opt_copied,
144 	struct test_opt_data *expected_opt)
145 {
146 	if (is_first_frag) {
147 		if (opt_copied) {
148 			expected_opt->len =
149 				sizeof(expected_first_frag_ipv4_opts_copied);
150 			rte_memcpy(expected_opt->data,
151 				expected_first_frag_ipv4_opts_copied,
152 				sizeof(expected_first_frag_ipv4_opts_copied));
153 		} else {
154 			expected_opt->len =
155 				sizeof(expected_first_frag_ipv4_opts_nocopied);
156 			rte_memcpy(expected_opt->data,
157 				expected_first_frag_ipv4_opts_nocopied,
158 				sizeof(expected_first_frag_ipv4_opts_nocopied));
159 		}
160 	} else {
161 		if (opt_copied) {
162 			expected_opt->len =
163 				sizeof(expected_sub_frag_ipv4_opts_copied);
164 			rte_memcpy(expected_opt->data,
165 				expected_sub_frag_ipv4_opts_copied,
166 				sizeof(expected_sub_frag_ipv4_opts_copied));
167 		} else {
168 			expected_opt->len =
169 				sizeof(expected_sub_frag_ipv4_opts_nocopied);
170 			rte_memcpy(expected_opt->data,
171 				expected_sub_frag_ipv4_opts_nocopied,
172 				sizeof(expected_sub_frag_ipv4_opts_nocopied));
173 		}
174 	}
175 }
176 
177 static void
178 v4_allocate_packet_of(struct rte_mbuf *b, int fill, size_t s,
179 	int df, uint8_t mf, uint16_t off, uint8_t ttl, uint8_t proto,
180 	uint16_t pktid, bool have_opt, bool is_first_frag, bool opt_copied)
181 {
182 	/* Create a packet, 2k bytes long */
183 	b->data_off = 0;
184 	char *data = rte_pktmbuf_mtod(b, char *);
185 	rte_be16_t fragment_offset = 0;	/* fragmentation offset */
186 	uint16_t iph_len;
187 	struct test_opt_data opt;
188 
189 	opt.len = 0;
190 
191 	if (have_opt)
192 		test_get_ipv4_opt(is_first_frag, opt_copied, &opt);
193 
194 	iph_len = sizeof(struct rte_ipv4_hdr) + opt.len;
195 	memset(data, fill, iph_len + s);
196 
197 	struct rte_ipv4_hdr *hdr = (struct rte_ipv4_hdr *)data;
198 
199 	hdr->version_ihl = 0x40; /* ipv4 */
200 	hdr->version_ihl += (iph_len / 4);
201 	hdr->type_of_service = 0;
202 	b->pkt_len = s + iph_len;
203 	b->data_len = b->pkt_len;
204 	hdr->total_length = rte_cpu_to_be_16(b->pkt_len);
205 	hdr->packet_id = rte_cpu_to_be_16(pktid);
206 
207 	if (df)
208 		fragment_offset |= 0x4000;
209 
210 	if (mf)
211 		fragment_offset |= 0x2000;
212 
213 	if (off)
214 		fragment_offset |= off;
215 
216 	hdr->fragment_offset = rte_cpu_to_be_16(fragment_offset);
217 
218 	if (!ttl)
219 		ttl = 64; /* default to 64 */
220 
221 	if (!proto)
222 		proto = 1; /* icmp */
223 
224 	hdr->time_to_live = ttl;
225 	hdr->next_proto_id = proto;
226 	hdr->hdr_checksum = 0;
227 	hdr->src_addr = rte_cpu_to_be_32(0x8080808);
228 	hdr->dst_addr = rte_cpu_to_be_32(0x8080404);
229 
230 	rte_memcpy(hdr + 1, opt.data, opt.len);
231 }
232 
233 static void
234 v6_allocate_packet_of(struct rte_mbuf *b, int fill, size_t s, uint8_t ttl,
235 		      uint8_t proto, uint16_t pktid)
236 {
237 	/* Create a packet, 2k bytes long */
238 	b->data_off = 0;
239 	char *data = rte_pktmbuf_mtod(b, char *);
240 
241 	memset(data, fill, sizeof(struct rte_ipv6_hdr) + s);
242 
243 	struct rte_ipv6_hdr *hdr = (struct rte_ipv6_hdr *)data;
244 	b->pkt_len = s + sizeof(struct rte_ipv6_hdr);
245 	b->data_len = b->pkt_len;
246 
247 	/* basic v6 header */
248 	hdr->vtc_flow = rte_cpu_to_be_32(0x60 << 24 | pktid);
249 	hdr->payload_len = rte_cpu_to_be_16(b->pkt_len);
250 	hdr->proto = proto;
251 	hdr->hop_limits = ttl;
252 
253 	memset(hdr->src_addr, 0x08, sizeof(hdr->src_addr));
254 	memset(hdr->dst_addr, 0x04, sizeof(hdr->src_addr));
255 }
256 
257 static inline void
258 test_free_fragments(struct rte_mbuf *mb[], uint32_t num)
259 {
260 	uint32_t i;
261 	for (i = 0; i < num; i++)
262 		rte_pktmbuf_free(mb[i]);
263 }
264 
265 static inline void
266 test_get_offset(struct rte_mbuf **mb, int32_t len,
267 	uint16_t *offset, int ipv)
268 {
269 	int32_t i;
270 
271 	for (i = 0; i < len; i++) {
272 		if (ipv == 4) {
273 			struct rte_ipv4_hdr *iph =
274 			    rte_pktmbuf_mtod(mb[i], struct rte_ipv4_hdr *);
275 			offset[i] = iph->fragment_offset;
276 		} else if (ipv == 6) {
277 			struct ipv6_extension_fragment *fh =
278 			    rte_pktmbuf_mtod_offset(
279 					mb[i],
280 					struct ipv6_extension_fragment *,
281 					sizeof(struct rte_ipv6_hdr));
282 			offset[i] = fh->frag_data;
283 		}
284 	}
285 }
286 
287 static inline void
288 test_get_frag_opt(struct rte_mbuf **mb, int32_t num,
289 	struct test_opt_data *opt, int ipv, bool opt_copied)
290 {
291 	int32_t i;
292 
293 	for (i = 0; i < num; i++) {
294 		if (ipv == 4) {
295 			struct rte_ipv4_hdr *iph =
296 			    rte_pktmbuf_mtod(mb[i], struct rte_ipv4_hdr *);
297 			uint16_t header_len = (iph->version_ihl &
298 				RTE_IPV4_HDR_IHL_MASK) *
299 				RTE_IPV4_IHL_MULTIPLIER;
300 			uint16_t opt_len = header_len -
301 				sizeof(struct rte_ipv4_hdr);
302 
303 			opt->opt_copied = opt_copied;
304 
305 			if ((rte_be_to_cpu_16(iph->fragment_offset) &
306 				    RTE_IPV4_HDR_OFFSET_MASK) == 0)
307 				opt->is_first_frag = true;
308 			else
309 				opt->is_first_frag = false;
310 
311 			if (likely(opt_len <= RTE_IPV4_HDR_OPT_MAX_LEN)) {
312 				char *iph_opt = rte_pktmbuf_mtod_offset(mb[i],
313 				    char *, sizeof(struct rte_ipv4_hdr));
314 				opt->len = opt_len;
315 				rte_memcpy(opt->data, iph_opt, opt_len);
316 			} else {
317 				opt->len = RTE_IPV4_HDR_OPT_MAX_LEN;
318 				memset(opt->data, RTE_IPV4_HDR_OPT_EOL,
319 				    sizeof(opt->data));
320 			}
321 			opt++;
322 		}
323 	}
324 }
325 
326 static int
327 test_ip_frag(void)
328 {
329 	static const uint16_t RND_ID = UINT16_MAX;
330 	int result = TEST_SUCCESS;
331 	size_t i, j;
332 
333 	struct test_ip_frags {
334 		int      ipv;
335 		size_t   mtu_size;
336 		size_t   pkt_size;
337 		int      set_df;
338 		uint8_t  set_mf;
339 		uint16_t set_of;
340 		uint8_t  ttl;
341 		uint8_t  proto;
342 		uint16_t pkt_id;
343 		int      expected_frags;
344 		uint16_t expected_fragment_offset[BURST];
345 		bool have_opt;
346 		bool is_first_frag;
347 		bool opt_copied;
348 	} tests[] = {
349 		 {4, 1280, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       2,
350 		  {0x2000, 0x009D}, false},
351 		 {4, 1280, 1400, 0, 0, 0, 64, IPPROTO_ICMP, 0,            2,
352 		  {0x2000, 0x009D}, false},
353 		 {4,  600, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       3,
354 		  {0x2000, 0x2048, 0x0090}, false},
355 		 {4, 4, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,    -EINVAL},
356 		 {4, 600, 1400, 1, 0, 0, 64, IPPROTO_ICMP, RND_ID, -ENOTSUP},
357 		 {4, 600, 1400, 0, 0, 0, 0, IPPROTO_ICMP, RND_ID,         3,
358 		  {0x2000, 0x2046, 0x008C}, true, true, true},
359 		 /* The first fragment */
360 		 {4, 68, 104, 0, 1, 0, 0, IPPROTO_ICMP, RND_ID,           5,
361 		  {0x2000, 0x2003, 0x2006, 0x2009, 0x200C}, true, true, true},
362 		 /* The middle fragment */
363 		 {4, 68, 104, 0, 1, 13, 0, IPPROTO_ICMP, RND_ID,          3,
364 		  {0x200D, 0x2012, 0x2017}, true, false, true},
365 		 /* The last fragment */
366 		 {4, 68, 104, 0, 0, 26, 0, IPPROTO_ICMP, RND_ID,          3,
367 		  {0x201A, 0x201F, 0x0024}, true, false, true},
368 		 /* The first fragment */
369 		 {4, 68, 104, 0, 1, 0, 0, IPPROTO_ICMP, RND_ID,           4,
370 		  {0x2000, 0x2004, 0x2008, 0x200C}, true, true, false},
371 		 /* The middle fragment */
372 		 {4, 68, 104, 0, 1, 13, 0, IPPROTO_ICMP, RND_ID,          3,
373 		  {0x200D, 0x2013, 0x2019}, true, false, false},
374 		 /* The last fragment */
375 		 {4, 68, 104, 0, 0, 26, 0, IPPROTO_ICMP, RND_ID,          3,
376 		  {0x201A, 0x2020, 0x0026}, true, false, false},
377 		 {6, 1280, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       2,
378 		  {0x0001, 0x04D0}, false},
379 		 {6, 1300, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,       2,
380 		  {0x0001, 0x04E0}, false},
381 		 {6, 4, 1400, 0, 0, 0, 64, IPPROTO_ICMP, RND_ID,    -EINVAL},
382 		 {6, 1300, 1400, 0, 0, 0, 0, IPPROTO_ICMP, RND_ID,        2,
383 		  {0x0001, 0x04E0}, false},
384 	};
385 
386 	for (i = 0; i < RTE_DIM(tests); i++) {
387 		int32_t len = 0;
388 		uint16_t fragment_offset[BURST];
389 		struct test_opt_data opt_res[BURST];
390 		struct test_opt_data opt_exp;
391 		uint16_t pktid = tests[i].pkt_id;
392 		struct rte_mbuf *pkts_out[BURST];
393 		struct rte_mbuf *b = rte_pktmbuf_alloc(pkt_pool);
394 
395 		RTE_TEST_ASSERT_NOT_EQUAL(b, NULL,
396 					  "Failed to allocate pkt.");
397 
398 		if (tests[i].pkt_id == RND_ID)
399 			pktid = rte_rand_max(UINT16_MAX);
400 
401 		if (tests[i].ipv == 4) {
402 			v4_allocate_packet_of(b, 0x41414141,
403 					      tests[i].pkt_size,
404 					      tests[i].set_df,
405 					      tests[i].set_mf,
406 					      tests[i].set_of,
407 					      tests[i].ttl,
408 					      tests[i].proto,
409 					      pktid,
410 					      tests[i].have_opt,
411 					      tests[i].is_first_frag,
412 					      tests[i].opt_copied);
413 		} else if (tests[i].ipv == 6) {
414 			v6_allocate_packet_of(b, 0x41414141,
415 					      tests[i].pkt_size,
416 					      tests[i].ttl,
417 					      tests[i].proto,
418 					      pktid);
419 		}
420 
421 		if (tests[i].ipv == 4)
422 			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
423 						       tests[i].mtu_size,
424 						       direct_pool,
425 						       indirect_pool);
426 		else if (tests[i].ipv == 6)
427 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
428 						       tests[i].mtu_size,
429 						       direct_pool,
430 						       indirect_pool);
431 
432 		rte_pktmbuf_free(b);
433 
434 		if (len > 0) {
435 			test_get_offset(pkts_out, len,
436 			    fragment_offset, tests[i].ipv);
437 			if (tests[i].have_opt)
438 				test_get_frag_opt(pkts_out, len, opt_res,
439 					tests[i].ipv, tests[i].opt_copied);
440 			test_free_fragments(pkts_out, len);
441 		}
442 
443 		printf("[check frag number]%zd: checking %d with %d\n", i, len,
444 		       tests[i].expected_frags);
445 		RTE_TEST_ASSERT_EQUAL(len, tests[i].expected_frags,
446 				      "Failed case %zd.\n", i);
447 
448 		if (len > 0) {
449 			for (j = 0; j < (size_t)len; j++) {
450 				printf("[check offset]%zd-%zd: checking %d with %d\n",
451 				    i, j, fragment_offset[j],
452 				    rte_cpu_to_be_16(
453 					tests[i].expected_fragment_offset[j]));
454 				RTE_TEST_ASSERT_EQUAL(fragment_offset[j],
455 				    rte_cpu_to_be_16(
456 					tests[i].expected_fragment_offset[j]),
457 				    "Failed case %zd.\n", i);
458 			}
459 
460 			if (tests[i].have_opt && (tests[i].ipv == 4)) {
461 				for (j = 0; j < (size_t)len; j++) {
462 					char opt_res_str[2 *
463 						RTE_IPV4_HDR_OPT_MAX_LEN + 1];
464 					char opt_exp_str[2 *
465 						RTE_IPV4_HDR_OPT_MAX_LEN + 1];
466 
467 					test_get_ipv4_opt(
468 						opt_res[j].is_first_frag,
469 						opt_res[j].opt_copied,
470 						&opt_exp);
471 					hex_to_str(opt_res[j].data,
472 						opt_res[j].len,
473 						opt_res_str);
474 					hex_to_str(opt_exp.data,
475 						opt_exp.len,
476 						opt_exp_str);
477 
478 					printf(
479 						"[check ipv4 option]%zd-%zd: checking (len:%u)%s with (len:%u)%s\n",
480 						i, j,
481 						opt_res[j].len, opt_res_str,
482 						opt_exp.len, opt_exp_str);
483 						RTE_TEST_ASSERT_SUCCESS(
484 							strcmp(opt_res_str,
485 								opt_exp_str),
486 						"Failed case %zd.\n", i);
487 				}
488 			}
489 		}
490 
491 	}
492 
493 	return result;
494 }
495 
496 static struct unit_test_suite ipfrag_testsuite  = {
497 	.suite_name = "IP Frag Unit Test Suite",
498 	.setup = testsuite_setup,
499 	.teardown = testsuite_teardown,
500 	.unit_test_cases = {
501 		TEST_CASE_ST(ut_setup, ut_teardown,
502 			     test_ip_frag),
503 
504 		TEST_CASES_END() /**< NULL terminate unit test array */
505 	}
506 };
507 
508 static int
509 test_ipfrag(void)
510 {
511 	rte_log_set_global_level(RTE_LOG_DEBUG);
512 	rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
513 
514 	return unit_test_suite_runner(&ipfrag_testsuite);
515 }
516 
517 #endif /* !RTE_EXEC_ENV_WINDOWS */
518 
519 REGISTER_TEST_COMMAND(ipfrag_autotest, test_ipfrag);
520