1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include "test.h"
6
7 #include <string.h>
8 #include <stdalign.h>
9 #include <stdarg.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <stdint.h>
13 #include <inttypes.h>
14 #include <errno.h>
15 #include <sys/queue.h>
16
17 #include <rte_common.h>
18 #include <rte_errno.h>
19 #include <rte_debug.h>
20 #include <rte_log.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_ring.h>
29 #include <rte_mempool.h>
30 #include <rte_mbuf.h>
31 #include <rte_random.h>
32 #include <rte_cycles.h>
33 #include <rte_malloc.h>
34 #include <rte_ether.h>
35 #include <rte_ip.h>
36 #include <rte_tcp.h>
37 #include <rte_mbuf_dyn.h>
38
39 #define MEMPOOL_CACHE_SIZE 32
40 #define MBUF_DATA_SIZE 2048
41 #define NB_MBUF 128
42 #define MBUF_TEST_DATA_LEN 1464
43 #define MBUF_TEST_DATA_LEN2 50
44 #define MBUF_TEST_DATA_LEN3 256
45 #define MBUF_TEST_HDR1_LEN 20
46 #define MBUF_TEST_HDR2_LEN 30
47 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN)
48 #define MBUF_TEST_SEG_SIZE 64
49 #define MBUF_TEST_BURST 8
50 #define EXT_BUF_TEST_DATA_LEN 1024
51 #define MBUF_MAX_SEG 16
52 #define MBUF_NO_HEADER 0
53 #define MBUF_HEADER 1
54 #define MBUF_NEG_TEST_READ 2
55 #define VAL_NAME(flag) { flag, #flag }
56
57 /* chain length in bulk test */
58 #define CHAIN_LEN 16
59
60 /* size of private data for mbuf in pktmbuf_pool2 */
61 #define MBUF2_PRIV_SIZE 128
62
63 #define REFCNT_MAX_ITER 64
64 #define REFCNT_MAX_TIMEOUT 10
65 #define REFCNT_MAX_REF (RTE_MAX_LCORE)
66 #define REFCNT_MBUF_NUM 64
67 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
68
69 #define MAGIC_DATA 0x42424242
70
71 #define MAKE_STRING(x) # x
72
73 #ifdef RTE_MBUF_REFCNT_ATOMIC
74
75 static volatile uint32_t refcnt_stop_workers;
76 static unsigned refcnt_lcore[RTE_MAX_LCORE];
77
78 #endif
79
80 /*
81 * MBUF
82 * ====
83 *
84 * #. Allocate a mbuf pool.
85 *
86 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE
87 * bytes long.
88 *
89 * #. Test multiple allocations of mbufs from this pool.
90 *
91 * - Allocate NB_MBUF and store pointers in a table.
92 * - If an allocation fails, return an error.
93 * - Free all these mbufs.
94 * - Repeat the same test to check that mbufs were freed correctly.
95 *
96 * #. Test data manipulation in pktmbuf.
97 *
98 * - Alloc an mbuf.
99 * - Append data using rte_pktmbuf_append().
100 * - Test for error in rte_pktmbuf_append() when len is too large.
101 * - Trim data at the end of mbuf using rte_pktmbuf_trim().
102 * - Test for error in rte_pktmbuf_trim() when len is too large.
103 * - Prepend a header using rte_pktmbuf_prepend().
104 * - Test for error in rte_pktmbuf_prepend() when len is too large.
105 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj().
106 * - Test for error in rte_pktmbuf_adj() when len is too large.
107 * - Check that appended data is not corrupt.
108 * - Free the mbuf.
109 * - Between all these tests, check data_len and pkt_len, and
110 * that the mbuf is contiguous.
111 * - Repeat the test to check that allocation operations
112 * reinitialize the mbuf correctly.
113 *
114 * #. Test packet cloning
115 * - Clone a mbuf and verify the data
116 * - Clone the cloned mbuf and verify the data
117 * - Attach a mbuf to another that does not have the same priv_size.
118 */
119
120 #define GOTO_FAIL(str, ...) do { \
121 printf("mbuf test FAILED (l.%d): <" str ">\n", \
122 __LINE__, ##__VA_ARGS__); \
123 goto fail; \
124 } while(0)
125
126 /*
127 * test data manipulation in mbuf with non-ascii data
128 */
129 static int
test_pktmbuf_with_non_ascii_data(struct rte_mempool * pktmbuf_pool)130 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool)
131 {
132 struct rte_mbuf *m = NULL;
133 char *data;
134
135 m = rte_pktmbuf_alloc(pktmbuf_pool);
136 if (m == NULL)
137 GOTO_FAIL("Cannot allocate mbuf");
138 if (rte_pktmbuf_pkt_len(m) != 0)
139 GOTO_FAIL("Bad length");
140
141 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
142 if (data == NULL)
143 GOTO_FAIL("Cannot append data");
144 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
145 GOTO_FAIL("Bad pkt length");
146 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
147 GOTO_FAIL("Bad data length");
148 memset(data, 0xff, rte_pktmbuf_pkt_len(m));
149 if (!rte_pktmbuf_is_contiguous(m))
150 GOTO_FAIL("Buffer should be continuous");
151 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
152
153 rte_pktmbuf_free(m);
154
155 return 0;
156
157 fail:
158 if(m) {
159 rte_pktmbuf_free(m);
160 }
161 return -1;
162 }
163
164 /*
165 * test data manipulation in mbuf
166 */
167 static int
test_one_pktmbuf(struct rte_mempool * pktmbuf_pool)168 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool)
169 {
170 struct rte_mbuf *m = NULL;
171 char *data, *data2, *hdr;
172 unsigned i;
173
174 printf("Test pktmbuf API\n");
175
176 /* alloc a mbuf */
177
178 m = rte_pktmbuf_alloc(pktmbuf_pool);
179 if (m == NULL)
180 GOTO_FAIL("Cannot allocate mbuf");
181 if (rte_pktmbuf_pkt_len(m) != 0)
182 GOTO_FAIL("Bad length");
183
184 rte_pktmbuf_dump(stdout, m, 0);
185
186 /* append data */
187
188 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
189 if (data == NULL)
190 GOTO_FAIL("Cannot append data");
191 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
192 GOTO_FAIL("Bad pkt length");
193 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
194 GOTO_FAIL("Bad data length");
195 memset(data, 0x66, rte_pktmbuf_pkt_len(m));
196 if (!rte_pktmbuf_is_contiguous(m))
197 GOTO_FAIL("Buffer should be continuous");
198 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN);
199 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN);
200
201 /* this append should fail */
202
203 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
204 if (data2 != NULL)
205 GOTO_FAIL("Append should not succeed");
206
207 /* append some more data */
208
209 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
210 if (data2 == NULL)
211 GOTO_FAIL("Cannot append data");
212 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
213 GOTO_FAIL("Bad pkt length");
214 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
215 GOTO_FAIL("Bad data length");
216 if (!rte_pktmbuf_is_contiguous(m))
217 GOTO_FAIL("Buffer should be continuous");
218
219 /* trim data at the end of mbuf */
220
221 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
222 GOTO_FAIL("Cannot trim data");
223 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
224 GOTO_FAIL("Bad pkt length");
225 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
226 GOTO_FAIL("Bad data length");
227 if (!rte_pktmbuf_is_contiguous(m))
228 GOTO_FAIL("Buffer should be continuous");
229
230 /* this trim should fail */
231
232 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
233 GOTO_FAIL("trim should not succeed");
234
235 /* prepend one header */
236
237 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
238 if (hdr == NULL)
239 GOTO_FAIL("Cannot prepend");
240 if (data - hdr != MBUF_TEST_HDR1_LEN)
241 GOTO_FAIL("Prepend failed");
242 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
243 GOTO_FAIL("Bad pkt length");
244 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
245 GOTO_FAIL("Bad data length");
246 if (!rte_pktmbuf_is_contiguous(m))
247 GOTO_FAIL("Buffer should be continuous");
248 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);
249
250 /* prepend another header */
251
252 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
253 if (hdr == NULL)
254 GOTO_FAIL("Cannot prepend");
255 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
256 GOTO_FAIL("Prepend failed");
257 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
258 GOTO_FAIL("Bad pkt length");
259 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
260 GOTO_FAIL("Bad data length");
261 if (!rte_pktmbuf_is_contiguous(m))
262 GOTO_FAIL("Buffer should be continuous");
263 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);
264
265 rte_mbuf_sanity_check(m, 1);
266 rte_mbuf_sanity_check(m, 0);
267 rte_pktmbuf_dump(stdout, m, 0);
268
269 /* this prepend should fail */
270
271 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
272 if (hdr != NULL)
273 GOTO_FAIL("prepend should not succeed");
274
275 /* remove data at beginning of mbuf (adj) */
276
277 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
278 GOTO_FAIL("rte_pktmbuf_adj failed");
279 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
280 GOTO_FAIL("Bad pkt length");
281 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
282 GOTO_FAIL("Bad data length");
283 if (!rte_pktmbuf_is_contiguous(m))
284 GOTO_FAIL("Buffer should be continuous");
285
286 /* this adj should fail */
287
288 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
289 GOTO_FAIL("rte_pktmbuf_adj should not succeed");
290
291 /* check data */
292
293 if (!rte_pktmbuf_is_contiguous(m))
294 GOTO_FAIL("Buffer should be continuous");
295
296 for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
297 if (data[i] != 0x66)
298 GOTO_FAIL("Data corrupted at offset %u", i);
299 }
300
301 /* free mbuf */
302
303 rte_pktmbuf_free(m);
304 m = NULL;
305 return 0;
306
307 fail:
308 rte_pktmbuf_free(m);
309 return -1;
310 }
311
312 static uint16_t
testclone_refcnt_read(struct rte_mbuf * m)313 testclone_refcnt_read(struct rte_mbuf *m)
314 {
315 return RTE_MBUF_HAS_PINNED_EXTBUF(m) ?
316 rte_mbuf_ext_refcnt_read(m->shinfo) :
317 rte_mbuf_refcnt_read(m);
318 }
319
320 static int
testclone_testupdate_testdetach(struct rte_mempool * pktmbuf_pool,struct rte_mempool * clone_pool)321 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool,
322 struct rte_mempool *clone_pool)
323 {
324 struct rte_mbuf *m = NULL;
325 struct rte_mbuf *clone = NULL;
326 struct rte_mbuf *clone2 = NULL;
327 unaligned_uint32_t *data;
328
329 /* alloc a mbuf */
330 m = rte_pktmbuf_alloc(pktmbuf_pool);
331 if (m == NULL)
332 GOTO_FAIL("ooops not allocating mbuf");
333
334 if (rte_pktmbuf_pkt_len(m) != 0)
335 GOTO_FAIL("Bad length");
336
337 rte_pktmbuf_append(m, sizeof(uint32_t));
338 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
339 *data = MAGIC_DATA;
340
341 /* clone the allocated mbuf */
342 clone = rte_pktmbuf_clone(m, clone_pool);
343 if (clone == NULL)
344 GOTO_FAIL("cannot clone data\n");
345
346 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
347 if (*data != MAGIC_DATA)
348 GOTO_FAIL("invalid data in clone\n");
349
350 if (testclone_refcnt_read(m) != 2)
351 GOTO_FAIL("invalid refcnt in m\n");
352
353 /* free the clone */
354 rte_pktmbuf_free(clone);
355 clone = NULL;
356
357 /* same test with a chained mbuf */
358 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
359 if (m->next == NULL)
360 GOTO_FAIL("Next Pkt Null\n");
361 m->nb_segs = 2;
362
363 rte_pktmbuf_append(m->next, sizeof(uint32_t));
364 m->pkt_len = 2 * sizeof(uint32_t);
365
366 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
367 *data = MAGIC_DATA;
368
369 clone = rte_pktmbuf_clone(m, clone_pool);
370 if (clone == NULL)
371 GOTO_FAIL("cannot clone data\n");
372
373 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *);
374 if (*data != MAGIC_DATA)
375 GOTO_FAIL("invalid data in clone\n");
376
377 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *);
378 if (*data != MAGIC_DATA)
379 GOTO_FAIL("invalid data in clone->next\n");
380
381 if (testclone_refcnt_read(m) != 2)
382 GOTO_FAIL("invalid refcnt in m\n");
383
384 if (testclone_refcnt_read(m->next) != 2)
385 GOTO_FAIL("invalid refcnt in m->next\n");
386
387 /* try to clone the clone */
388
389 clone2 = rte_pktmbuf_clone(clone, clone_pool);
390 if (clone2 == NULL)
391 GOTO_FAIL("cannot clone the clone\n");
392
393 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *);
394 if (*data != MAGIC_DATA)
395 GOTO_FAIL("invalid data in clone2\n");
396
397 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *);
398 if (*data != MAGIC_DATA)
399 GOTO_FAIL("invalid data in clone2->next\n");
400
401 if (testclone_refcnt_read(m) != 3)
402 GOTO_FAIL("invalid refcnt in m\n");
403
404 if (testclone_refcnt_read(m->next) != 3)
405 GOTO_FAIL("invalid refcnt in m->next\n");
406
407 /* free mbuf */
408 rte_pktmbuf_free(m);
409 rte_pktmbuf_free(clone);
410 rte_pktmbuf_free(clone2);
411
412 m = NULL;
413 clone = NULL;
414 clone2 = NULL;
415 printf("%s ok\n", __func__);
416 return 0;
417
418 fail:
419 rte_pktmbuf_free(m);
420 rte_pktmbuf_free(clone);
421 rte_pktmbuf_free(clone2);
422 return -1;
423 }
424
425 static int
test_pktmbuf_copy(struct rte_mempool * pktmbuf_pool,struct rte_mempool * clone_pool)426 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool,
427 struct rte_mempool *clone_pool)
428 {
429 struct rte_mbuf *m = NULL;
430 struct rte_mbuf *copy = NULL;
431 struct rte_mbuf *copy2 = NULL;
432 struct rte_mbuf *clone = NULL;
433 unaligned_uint32_t *data;
434
435 /* alloc a mbuf */
436 m = rte_pktmbuf_alloc(pktmbuf_pool);
437 if (m == NULL)
438 GOTO_FAIL("ooops not allocating mbuf");
439
440 if (rte_pktmbuf_pkt_len(m) != 0)
441 GOTO_FAIL("Bad length");
442
443 rte_pktmbuf_append(m, sizeof(uint32_t));
444 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *);
445 *data = MAGIC_DATA;
446
447 /* copy the allocated mbuf */
448 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
449 if (copy == NULL)
450 GOTO_FAIL("cannot copy data\n");
451
452 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
453 GOTO_FAIL("copy length incorrect\n");
454
455 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
456 GOTO_FAIL("copy data length incorrect\n");
457
458 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
459 if (*data != MAGIC_DATA)
460 GOTO_FAIL("invalid data in copy\n");
461
462 /* free the copy */
463 rte_pktmbuf_free(copy);
464 copy = NULL;
465
466 /* same test with a cloned mbuf */
467 clone = rte_pktmbuf_clone(m, clone_pool);
468 if (clone == NULL)
469 GOTO_FAIL("cannot clone data\n");
470
471 if ((!RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
472 !RTE_MBUF_CLONED(clone)) ||
473 (RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
474 !RTE_MBUF_HAS_EXTBUF(clone)))
475 GOTO_FAIL("clone did not give a cloned mbuf\n");
476
477 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX);
478 if (copy == NULL)
479 GOTO_FAIL("cannot copy cloned mbuf\n");
480
481 if (RTE_MBUF_CLONED(copy))
482 GOTO_FAIL("copy of clone is cloned?\n");
483
484 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t))
485 GOTO_FAIL("copy clone length incorrect\n");
486
487 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t))
488 GOTO_FAIL("copy clone data length incorrect\n");
489
490 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
491 if (*data != MAGIC_DATA)
492 GOTO_FAIL("invalid data in clone copy\n");
493 rte_pktmbuf_free(clone);
494 rte_pktmbuf_free(copy);
495 copy = NULL;
496 clone = NULL;
497
498
499 /* same test with a chained mbuf */
500 m->next = rte_pktmbuf_alloc(pktmbuf_pool);
501 if (m->next == NULL)
502 GOTO_FAIL("Next Pkt Null\n");
503 m->nb_segs = 2;
504
505 rte_pktmbuf_append(m->next, sizeof(uint32_t));
506 m->pkt_len = 2 * sizeof(uint32_t);
507 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
508 *data = MAGIC_DATA + 1;
509
510 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX);
511 if (copy == NULL)
512 GOTO_FAIL("cannot copy data\n");
513
514 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t))
515 GOTO_FAIL("chain copy length incorrect\n");
516
517 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t))
518 GOTO_FAIL("chain copy data length incorrect\n");
519
520 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *);
521 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1)
522 GOTO_FAIL("invalid data in copy\n");
523
524 rte_pktmbuf_free(copy2);
525
526 /* test offset copy */
527 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
528 sizeof(uint32_t), UINT32_MAX);
529 if (copy2 == NULL)
530 GOTO_FAIL("cannot copy the copy\n");
531
532 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
533 GOTO_FAIL("copy with offset, length incorrect\n");
534
535 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
536 GOTO_FAIL("copy with offset, data length incorrect\n");
537
538 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
539 if (data[0] != MAGIC_DATA + 1)
540 GOTO_FAIL("copy with offset, invalid data\n");
541
542 rte_pktmbuf_free(copy2);
543
544 /* test truncation copy */
545 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool,
546 0, sizeof(uint32_t));
547 if (copy2 == NULL)
548 GOTO_FAIL("cannot copy the copy\n");
549
550 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t))
551 GOTO_FAIL("copy with truncate, length incorrect\n");
552
553 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t))
554 GOTO_FAIL("copy with truncate, data length incorrect\n");
555
556 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *);
557 if (data[0] != MAGIC_DATA)
558 GOTO_FAIL("copy with truncate, invalid data\n");
559
560 /* free mbuf */
561 rte_pktmbuf_free(m);
562 rte_pktmbuf_free(copy);
563 rte_pktmbuf_free(copy2);
564
565 m = NULL;
566 copy = NULL;
567 copy2 = NULL;
568 printf("%s ok\n", __func__);
569 return 0;
570
571 fail:
572 rte_pktmbuf_free(m);
573 rte_pktmbuf_free(copy);
574 rte_pktmbuf_free(copy2);
575 return -1;
576 }
577
578 static int
test_attach_from_different_pool(struct rte_mempool * pktmbuf_pool,struct rte_mempool * pktmbuf_pool2)579 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool,
580 struct rte_mempool *pktmbuf_pool2)
581 {
582 struct rte_mbuf *m = NULL;
583 struct rte_mbuf *clone = NULL;
584 struct rte_mbuf *clone2 = NULL;
585 char *data, *c_data, *c_data2;
586
587 /* alloc a mbuf */
588 m = rte_pktmbuf_alloc(pktmbuf_pool);
589 if (m == NULL)
590 GOTO_FAIL("cannot allocate mbuf");
591
592 if (rte_pktmbuf_pkt_len(m) != 0)
593 GOTO_FAIL("Bad length");
594
595 data = rte_pktmbuf_mtod(m, char *);
596
597 /* allocate a new mbuf from the second pool, and attach it to the first
598 * mbuf */
599 clone = rte_pktmbuf_alloc(pktmbuf_pool2);
600 if (clone == NULL)
601 GOTO_FAIL("cannot allocate mbuf from second pool\n");
602
603 /* check data room size and priv size, and erase priv */
604 if (rte_pktmbuf_data_room_size(clone->pool) != 0)
605 GOTO_FAIL("data room size should be 0\n");
606 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE)
607 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
608 memset(clone + 1, 0, MBUF2_PRIV_SIZE);
609
610 /* save data pointer to compare it after detach() */
611 c_data = rte_pktmbuf_mtod(clone, char *);
612 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE)
613 GOTO_FAIL("bad data pointer in clone");
614 if (rte_pktmbuf_headroom(clone) != 0)
615 GOTO_FAIL("bad headroom in clone");
616
617 rte_pktmbuf_attach(clone, m);
618
619 if (rte_pktmbuf_mtod(clone, char *) != data)
620 GOTO_FAIL("clone was not attached properly\n");
621 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM)
622 GOTO_FAIL("bad headroom in clone after attach");
623 if (rte_mbuf_refcnt_read(m) != 2)
624 GOTO_FAIL("invalid refcnt in m\n");
625
626 /* allocate a new mbuf from the second pool, and attach it to the first
627 * cloned mbuf */
628 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2);
629 if (clone2 == NULL)
630 GOTO_FAIL("cannot allocate clone2 from second pool\n");
631
632 /* check data room size and priv size, and erase priv */
633 if (rte_pktmbuf_data_room_size(clone2->pool) != 0)
634 GOTO_FAIL("data room size should be 0\n");
635 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE)
636 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE);
637 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE);
638
639 /* save data pointer to compare it after detach() */
640 c_data2 = rte_pktmbuf_mtod(clone2, char *);
641 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE)
642 GOTO_FAIL("bad data pointer in clone2");
643 if (rte_pktmbuf_headroom(clone2) != 0)
644 GOTO_FAIL("bad headroom in clone2");
645
646 rte_pktmbuf_attach(clone2, clone);
647
648 if (rte_pktmbuf_mtod(clone2, char *) != data)
649 GOTO_FAIL("clone2 was not attached properly\n");
650 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM)
651 GOTO_FAIL("bad headroom in clone2 after attach");
652 if (rte_mbuf_refcnt_read(m) != 3)
653 GOTO_FAIL("invalid refcnt in m\n");
654
655 /* detach the clones */
656 rte_pktmbuf_detach(clone);
657 if (c_data != rte_pktmbuf_mtod(clone, char *))
658 GOTO_FAIL("clone was not detached properly\n");
659 if (rte_mbuf_refcnt_read(m) != 2)
660 GOTO_FAIL("invalid refcnt in m\n");
661
662 rte_pktmbuf_detach(clone2);
663 if (c_data2 != rte_pktmbuf_mtod(clone2, char *))
664 GOTO_FAIL("clone2 was not detached properly\n");
665 if (rte_mbuf_refcnt_read(m) != 1)
666 GOTO_FAIL("invalid refcnt in m\n");
667
668 /* free the clones and the initial mbuf */
669 rte_pktmbuf_free(clone2);
670 rte_pktmbuf_free(clone);
671 rte_pktmbuf_free(m);
672 printf("%s ok\n", __func__);
673 return 0;
674
675 fail:
676 rte_pktmbuf_free(m);
677 rte_pktmbuf_free(clone);
678 rte_pktmbuf_free(clone2);
679 return -1;
680 }
681
682 /*
683 * test allocation and free of mbufs
684 */
685 static int
test_pktmbuf_pool(struct rte_mempool * pktmbuf_pool)686 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool)
687 {
688 unsigned i;
689 struct rte_mbuf *m[NB_MBUF];
690 int ret = 0;
691
692 for (i=0; i<NB_MBUF; i++)
693 m[i] = NULL;
694
695 /* alloc NB_MBUF mbufs */
696 for (i=0; i<NB_MBUF; i++) {
697 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
698 if (m[i] == NULL) {
699 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
700 ret = -1;
701 }
702 }
703 struct rte_mbuf *extra = NULL;
704 extra = rte_pktmbuf_alloc(pktmbuf_pool);
705 if(extra != NULL) {
706 printf("Error pool not empty");
707 ret = -1;
708 }
709 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool);
710 if(extra != NULL) {
711 printf("Error pool not empty");
712 ret = -1;
713 }
714 /* free them */
715 for (i=0; i<NB_MBUF; i++) {
716 rte_pktmbuf_free(m[i]);
717 }
718
719 return ret;
720 }
721
722 /*
723 * test bulk allocation and bulk free of mbufs
724 */
725 static int
test_pktmbuf_pool_bulk(void)726 test_pktmbuf_pool_bulk(void)
727 {
728 struct rte_mempool *pool = NULL;
729 struct rte_mempool *pool2 = NULL;
730 unsigned int i;
731 struct rte_mbuf *m;
732 struct rte_mbuf *mbufs[NB_MBUF];
733 int ret = 0;
734
735 /* We cannot use the preallocated mbuf pools because their caches
736 * prevent us from bulk allocating all objects in them.
737 * So we create our own mbuf pools without caches.
738 */
739 printf("Create mbuf pools for bulk allocation.\n");
740 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk",
741 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
742 if (pool == NULL) {
743 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
744 rte_errno);
745 goto err;
746 }
747 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2",
748 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
749 if (pool2 == NULL) {
750 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n",
751 rte_errno);
752 goto err;
753 }
754
755 /* Preconditions: Mempools must be full. */
756 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
757 printf("Test precondition failed: mempools not full\n");
758 goto err;
759 }
760 if (!(rte_mempool_avail_count(pool) == NB_MBUF &&
761 rte_mempool_avail_count(pool2) == NB_MBUF)) {
762 printf("Test precondition failed: mempools: %u+%u != %u+%u",
763 rte_mempool_avail_count(pool),
764 rte_mempool_avail_count(pool2),
765 NB_MBUF, NB_MBUF);
766 goto err;
767 }
768
769 printf("Test single bulk alloc, followed by multiple bulk free.\n");
770
771 /* Bulk allocate all mbufs in the pool, in one go. */
772 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
773 if (ret != 0) {
774 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
775 goto err;
776 }
777 /* Test that they have been removed from the pool. */
778 if (!rte_mempool_empty(pool)) {
779 printf("mempool not empty\n");
780 goto err;
781 }
782 /* Bulk free all mbufs, in four steps. */
783 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0);
784 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
785 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4);
786 /* Test that they have been returned to the pool. */
787 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) {
788 printf("mempool avail count incorrect\n");
789 goto err;
790 }
791 }
792
793 printf("Test multiple bulk alloc, followed by single bulk free.\n");
794
795 /* Bulk allocate all mbufs in the pool, in four steps. */
796 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) {
797 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4);
798 if (ret != 0) {
799 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
800 goto err;
801 }
802 }
803 /* Test that they have been removed from the pool. */
804 if (!rte_mempool_empty(pool)) {
805 printf("mempool not empty\n");
806 goto err;
807 }
808 /* Bulk free all mbufs, in one go. */
809 rte_pktmbuf_free_bulk(mbufs, NB_MBUF);
810 /* Test that they have been returned to the pool. */
811 if (!rte_mempool_full(pool)) {
812 printf("mempool not full\n");
813 goto err;
814 }
815
816 printf("Test bulk free of single long chain.\n");
817
818 /* Bulk allocate all mbufs in the pool, in one go. */
819 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF);
820 if (ret != 0) {
821 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret);
822 goto err;
823 }
824 /* Create a long mbuf chain. */
825 for (i = 1; i < NB_MBUF; i++) {
826 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]);
827 if (ret != 0) {
828 printf("rte_pktmbuf_chain() failed: %d\n", ret);
829 goto err;
830 }
831 mbufs[i] = NULL;
832 }
833 /* Free the mbuf chain containing all the mbufs. */
834 rte_pktmbuf_free_bulk(mbufs, 1);
835 /* Test that they have been returned to the pool. */
836 if (!rte_mempool_full(pool)) {
837 printf("mempool not full\n");
838 goto err;
839 }
840
841 printf("Test bulk free of multiple chains using multiple pools.\n");
842
843 /* Create mbuf chains containing mbufs from different pools. */
844 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0);
845 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0);
846 for (i = 0; i < NB_MBUF * 2; i++) {
847 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool);
848 if (m == NULL) {
849 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
850 goto err;
851 }
852 if ((i % CHAIN_LEN) == 0)
853 mbufs[i / CHAIN_LEN] = m;
854 else
855 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m);
856 }
857 /* Test that both pools have been emptied. */
858 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) {
859 printf("mempools not empty\n");
860 goto err;
861 }
862 /* Free one mbuf chain. */
863 rte_pktmbuf_free_bulk(mbufs, 1);
864 /* Test that the segments have been returned to the pools. */
865 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 &&
866 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) {
867 printf("all segments of first mbuf have not been returned\n");
868 goto err;
869 }
870 /* Free the remaining mbuf chains. */
871 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1);
872 /* Test that they have been returned to the pools. */
873 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) {
874 printf("mempools not full\n");
875 goto err;
876 }
877
878 ret = 0;
879 goto done;
880
881 err:
882 ret = -1;
883
884 done:
885 printf("Free mbuf pools for bulk allocation.\n");
886 rte_mempool_free(pool);
887 rte_mempool_free(pool2);
888 return ret;
889 }
890
891 /*
892 * test that the pointer to the data on a packet mbuf is set properly
893 */
894 static int
test_pktmbuf_pool_ptr(struct rte_mempool * pktmbuf_pool)895 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool)
896 {
897 unsigned i;
898 struct rte_mbuf *m[NB_MBUF];
899 int ret = 0;
900
901 for (i=0; i<NB_MBUF; i++)
902 m[i] = NULL;
903
904 /* alloc NB_MBUF mbufs */
905 for (i=0; i<NB_MBUF; i++) {
906 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
907 if (m[i] == NULL) {
908 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
909 ret = -1;
910 break;
911 }
912 m[i]->data_off += 64;
913 }
914
915 /* free them */
916 for (i=0; i<NB_MBUF; i++) {
917 rte_pktmbuf_free(m[i]);
918 }
919
920 for (i=0; i<NB_MBUF; i++)
921 m[i] = NULL;
922
923 /* alloc NB_MBUF mbufs */
924 for (i=0; i<NB_MBUF; i++) {
925 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
926 if (m[i] == NULL) {
927 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
928 ret = -1;
929 break;
930 }
931 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) {
932 printf("invalid data_off\n");
933 ret = -1;
934 }
935 }
936
937 /* free them */
938 for (i=0; i<NB_MBUF; i++) {
939 rte_pktmbuf_free(m[i]);
940 }
941
942 return ret;
943 }
944
945 static int
test_pktmbuf_free_segment(struct rte_mempool * pktmbuf_pool)946 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
947 {
948 unsigned i;
949 struct rte_mbuf *m[NB_MBUF];
950 int ret = 0;
951
952 for (i=0; i<NB_MBUF; i++)
953 m[i] = NULL;
954
955 /* alloc NB_MBUF mbufs */
956 for (i=0; i<NB_MBUF; i++) {
957 m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
958 if (m[i] == NULL) {
959 printf("rte_pktmbuf_alloc() failed (%u)\n", i);
960 ret = -1;
961 }
962 }
963
964 /* free them */
965 for (i=0; i<NB_MBUF; i++) {
966 if (m[i] != NULL) {
967 struct rte_mbuf *mb, *mt;
968
969 mb = m[i];
970 while(mb != NULL) {
971 mt = mb;
972 mb = mb->next;
973 rte_pktmbuf_free_seg(mt);
974 }
975 }
976 }
977
978 return ret;
979 }
980
981 /*
982 * Stress test for rte_mbuf atomic refcnt.
983 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined.
984 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined.
985 */
986
987 #ifdef RTE_MBUF_REFCNT_ATOMIC
988
989 static int
test_refcnt_worker(void * arg)990 test_refcnt_worker(void *arg)
991 {
992 unsigned lcore, free;
993 void *mp = 0;
994 struct rte_ring *refcnt_mbuf_ring = arg;
995
996 lcore = rte_lcore_id();
997 printf("%s started at lcore %u\n", __func__, lcore);
998
999 free = 0;
1000 while (refcnt_stop_workers == 0) {
1001 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
1002 free++;
1003 rte_pktmbuf_free(mp);
1004 }
1005 }
1006
1007 refcnt_lcore[lcore] += free;
1008 printf("%s finished at lcore %u, "
1009 "number of freed mbufs: %u\n",
1010 __func__, lcore, free);
1011 return 0;
1012 }
1013
1014 static void
test_refcnt_iter(unsigned int lcore,unsigned int iter,struct rte_mempool * refcnt_pool,struct rte_ring * refcnt_mbuf_ring)1015 test_refcnt_iter(unsigned int lcore, unsigned int iter,
1016 struct rte_mempool *refcnt_pool,
1017 struct rte_ring *refcnt_mbuf_ring)
1018 {
1019 uint16_t ref;
1020 unsigned i, n, tref, wn;
1021 struct rte_mbuf *m;
1022
1023 tref = 0;
1024
1025 /* For each mbuf in the pool:
1026 * - allocate mbuf,
1027 * - increment it's reference up to N+1,
1028 * - enqueue it N times into the ring for worker cores to free.
1029 */
1030 for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
1031 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
1032 i++) {
1033 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
1034 tref += ref;
1035 if ((ref & 1) != 0) {
1036 rte_pktmbuf_refcnt_update(m, ref);
1037 while (ref-- != 0)
1038 rte_ring_enqueue(refcnt_mbuf_ring, m);
1039 } else {
1040 while (ref-- != 0) {
1041 rte_pktmbuf_refcnt_update(m, 1);
1042 rte_ring_enqueue(refcnt_mbuf_ring, m);
1043 }
1044 }
1045 rte_pktmbuf_free(m);
1046 }
1047
1048 if (i != n)
1049 rte_panic("(lcore=%u, iter=%u): was able to allocate only "
1050 "%u from %u mbufs\n", lcore, iter, i, n);
1051
1052 /* wait till worker lcores will consume all mbufs */
1053 while (!rte_ring_empty(refcnt_mbuf_ring))
1054 ;
1055
1056 /* check that all mbufs are back into mempool by now */
1057 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
1058 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
1059 refcnt_lcore[lcore] += tref;
1060 printf("%s(lcore=%u, iter=%u) completed, "
1061 "%u references processed\n",
1062 __func__, lcore, iter, tref);
1063 return;
1064 }
1065 rte_delay_ms(100);
1066 }
1067
1068 rte_panic("(lcore=%u, iter=%u): after %us only "
1069 "%u of %u mbufs left free\n", lcore, iter, wn, i, n);
1070 }
1071
1072 static int
test_refcnt_main(struct rte_mempool * refcnt_pool,struct rte_ring * refcnt_mbuf_ring)1073 test_refcnt_main(struct rte_mempool *refcnt_pool,
1074 struct rte_ring *refcnt_mbuf_ring)
1075 {
1076 unsigned i, lcore;
1077
1078 lcore = rte_lcore_id();
1079 printf("%s started at lcore %u\n", __func__, lcore);
1080
1081 for (i = 0; i != REFCNT_MAX_ITER; i++)
1082 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
1083
1084 refcnt_stop_workers = 1;
1085 rte_wmb();
1086
1087 printf("%s finished at lcore %u\n", __func__, lcore);
1088 return 0;
1089 }
1090
1091 #endif
1092
1093 static int
test_refcnt_mbuf(void)1094 test_refcnt_mbuf(void)
1095 {
1096 #ifdef RTE_MBUF_REFCNT_ATOMIC
1097 unsigned int main_lcore, worker, tref;
1098 int ret = -1;
1099 struct rte_mempool *refcnt_pool = NULL;
1100 struct rte_ring *refcnt_mbuf_ring = NULL;
1101
1102 if (rte_lcore_count() < 2) {
1103 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n");
1104 return TEST_SKIPPED;
1105 }
1106
1107 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count());
1108
1109 /* create refcnt pool & ring if they don't exist */
1110
1111 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool),
1112 REFCNT_MBUF_NUM, 0, 0, 0,
1113 SOCKET_ID_ANY);
1114 if (refcnt_pool == NULL) {
1115 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
1116 __func__);
1117 return -1;
1118 }
1119
1120 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
1121 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
1122 RING_F_SP_ENQ);
1123 if (refcnt_mbuf_ring == NULL) {
1124 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
1125 "\n", __func__);
1126 goto err;
1127 }
1128
1129 refcnt_stop_workers = 0;
1130 memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
1131
1132 rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
1133
1134 test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
1135
1136 rte_eal_mp_wait_lcore();
1137
1138 /* check that we processed all references */
1139 tref = 0;
1140 main_lcore = rte_get_main_lcore();
1141
1142 RTE_LCORE_FOREACH_WORKER(worker)
1143 tref += refcnt_lcore[worker];
1144
1145 if (tref != refcnt_lcore[main_lcore])
1146 rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
1147 tref, refcnt_lcore[main_lcore]);
1148
1149 rte_mempool_dump(stdout, refcnt_pool);
1150 rte_ring_dump(stdout, refcnt_mbuf_ring);
1151
1152 ret = 0;
1153
1154 err:
1155 rte_mempool_free(refcnt_pool);
1156 rte_ring_free(refcnt_mbuf_ring);
1157 return ret;
1158 #else
1159 return 0;
1160 #endif
1161 }
1162
1163 #ifdef RTE_EXEC_ENV_WINDOWS
1164 static int
test_failing_mbuf_sanity_check(struct rte_mempool * pktmbuf_pool)1165 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
1166 {
1167 RTE_SET_USED(pktmbuf_pool);
1168 return TEST_SKIPPED;
1169 }
1170 #else
1171 /* Verify if mbuf can pass the check */
1172 static bool
mbuf_check_pass(struct rte_mbuf * buf)1173 mbuf_check_pass(struct rte_mbuf *buf)
1174 {
1175 const char *reason;
1176
1177 if (rte_mbuf_check(buf, 1, &reason) == 0)
1178 return true;
1179
1180 return false;
1181 }
1182
1183 static int
test_failing_mbuf_sanity_check(struct rte_mempool * pktmbuf_pool)1184 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
1185 {
1186 struct rte_mbuf *buf;
1187 struct rte_mbuf badbuf;
1188
1189 printf("Checking rte_mbuf_sanity_check for failure conditions\n");
1190
1191 /* get a good mbuf to use to make copies */
1192 buf = rte_pktmbuf_alloc(pktmbuf_pool);
1193 if (buf == NULL)
1194 return -1;
1195
1196 printf("Checking good mbuf initially\n");
1197 if (!mbuf_check_pass(buf))
1198 return -1;
1199
1200 printf("Now checking for error conditions\n");
1201
1202 if (mbuf_check_pass(NULL)) {
1203 printf("Error with NULL mbuf test\n");
1204 return -1;
1205 }
1206
1207 badbuf = *buf;
1208 badbuf.pool = NULL;
1209 if (mbuf_check_pass(&badbuf)) {
1210 printf("Error with bad-pool mbuf test\n");
1211 return -1;
1212 }
1213
1214 if (RTE_IOVA_IN_MBUF) {
1215 badbuf = *buf;
1216 rte_mbuf_iova_set(&badbuf, 0);
1217 if (mbuf_check_pass(&badbuf)) {
1218 printf("Error with bad-physaddr mbuf test\n");
1219 return -1;
1220 }
1221 }
1222
1223 badbuf = *buf;
1224 badbuf.buf_addr = NULL;
1225 if (mbuf_check_pass(&badbuf)) {
1226 printf("Error with bad-addr mbuf test\n");
1227 return -1;
1228 }
1229
1230 badbuf = *buf;
1231 badbuf.refcnt = 0;
1232 if (mbuf_check_pass(&badbuf)) {
1233 printf("Error with bad-refcnt(0) mbuf test\n");
1234 return -1;
1235 }
1236
1237 badbuf = *buf;
1238 badbuf.refcnt = UINT16_MAX;
1239 if (mbuf_check_pass(&badbuf)) {
1240 printf("Error with bad-refcnt(MAX) mbuf test\n");
1241 return -1;
1242 }
1243
1244 return 0;
1245 }
1246
1247 #endif /* !RTE_EXEC_ENV_WINDOWS */
1248
1249 static int
test_mbuf_linearize(struct rte_mempool * pktmbuf_pool,int pkt_len,int nb_segs)1250 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
1251 int nb_segs)
1252 {
1253
1254 struct rte_mbuf *m = NULL, *mbuf = NULL;
1255 uint8_t *data;
1256 int data_len = 0;
1257 int remain;
1258 int seg, seg_len;
1259 int i;
1260
1261 if (pkt_len < 1) {
1262 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
1263 return -1;
1264 }
1265
1266 if (nb_segs < 1) {
1267 printf("Number of segments must be 1 or more (is %d)\n",
1268 nb_segs);
1269 return -1;
1270 }
1271
1272 seg_len = pkt_len / nb_segs;
1273 if (seg_len == 0)
1274 seg_len = 1;
1275
1276 remain = pkt_len;
1277
1278 /* Create chained mbuf_src and fill it generated data */
1279 for (seg = 0; remain > 0; seg++) {
1280
1281 m = rte_pktmbuf_alloc(pktmbuf_pool);
1282 if (m == NULL) {
1283 printf("Cannot create segment for source mbuf");
1284 goto fail;
1285 }
1286
1287 /* Make sure if tailroom is zeroed */
1288 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
1289 rte_pktmbuf_tailroom(m));
1290
1291 data_len = remain;
1292 if (data_len > seg_len)
1293 data_len = seg_len;
1294
1295 data = (uint8_t *)rte_pktmbuf_append(m, data_len);
1296 if (data == NULL) {
1297 printf("Cannot append %d bytes to the mbuf\n",
1298 data_len);
1299 goto fail;
1300 }
1301
1302 for (i = 0; i < data_len; i++)
1303 data[i] = (seg * seg_len + i) % 0x0ff;
1304
1305 if (seg == 0)
1306 mbuf = m;
1307 else
1308 rte_pktmbuf_chain(mbuf, m);
1309
1310 remain -= data_len;
1311 }
1312
1313 /* Create destination buffer to store coalesced data */
1314 if (rte_pktmbuf_linearize(mbuf)) {
1315 printf("Mbuf linearization failed\n");
1316 goto fail;
1317 }
1318
1319 if (!rte_pktmbuf_is_contiguous(mbuf)) {
1320 printf("Source buffer should be contiguous after "
1321 "linearization\n");
1322 goto fail;
1323 }
1324
1325 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
1326
1327 for (i = 0; i < pkt_len; i++)
1328 if (data[i] != (i % 0x0ff)) {
1329 printf("Incorrect data in linearized mbuf\n");
1330 goto fail;
1331 }
1332
1333 rte_pktmbuf_free(mbuf);
1334 return 0;
1335
1336 fail:
1337 rte_pktmbuf_free(mbuf);
1338 return -1;
1339 }
1340
1341 static int
test_mbuf_linearize_check(struct rte_mempool * pktmbuf_pool)1342 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool)
1343 {
1344 struct test_mbuf_array {
1345 int size;
1346 int nb_segs;
1347 } mbuf_array[] = {
1348 { 128, 1 },
1349 { 64, 64 },
1350 { 512, 10 },
1351 { 250, 11 },
1352 { 123, 8 },
1353 };
1354 unsigned int i;
1355
1356 printf("Test mbuf linearize API\n");
1357
1358 for (i = 0; i < RTE_DIM(mbuf_array); i++)
1359 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size,
1360 mbuf_array[i].nb_segs)) {
1361 printf("Test failed for %d, %d\n", mbuf_array[i].size,
1362 mbuf_array[i].nb_segs);
1363 return -1;
1364 }
1365
1366 return 0;
1367 }
1368
1369 /*
1370 * Helper function for test_tx_ofload
1371 */
1372 static inline void
set_tx_offload(struct rte_mbuf * mb,uint64_t il2,uint64_t il3,uint64_t il4,uint64_t tso,uint64_t ol3,uint64_t ol2)1373 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4,
1374 uint64_t tso, uint64_t ol3, uint64_t ol2)
1375 {
1376 mb->l2_len = il2;
1377 mb->l3_len = il3;
1378 mb->l4_len = il4;
1379 mb->tso_segsz = tso;
1380 mb->outer_l3_len = ol3;
1381 mb->outer_l2_len = ol2;
1382 }
1383
1384 static int
test_tx_offload(void)1385 test_tx_offload(void)
1386 {
1387 struct rte_mbuf *mb;
1388 uint64_t tm, v1, v2;
1389 size_t sz;
1390 uint32_t i;
1391
1392 static volatile struct {
1393 uint16_t l2;
1394 uint16_t l3;
1395 uint16_t l4;
1396 uint16_t tso;
1397 } txof;
1398
1399 const uint32_t num = 0x10000;
1400
1401 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS);
1402 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS);
1403 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS);
1404 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS);
1405
1406 printf("%s started, tx_offload = {\n"
1407 "\tl2_len=%#hx,\n"
1408 "\tl3_len=%#hx,\n"
1409 "\tl4_len=%#hx,\n"
1410 "\ttso_segsz=%#hx,\n"
1411 "\touter_l3_len=%#x,\n"
1412 "\touter_l2_len=%#x,\n"
1413 "};\n",
1414 __func__,
1415 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2);
1416
1417 sz = sizeof(*mb) * num;
1418 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
1419 if (mb == NULL) {
1420 printf("%s failed, out of memory\n", __func__);
1421 return -ENOMEM;
1422 }
1423
1424 memset(mb, 0, sz);
1425 tm = rte_rdtsc_precise();
1426
1427 for (i = 0; i != num; i++)
1428 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4,
1429 txof.tso, txof.l3, txof.l2);
1430
1431 tm = rte_rdtsc_precise() - tm;
1432 printf("%s set tx_offload by bit-fields: %u iterations, %"
1433 PRIu64 " cycles, %#Lf cycles/iter\n",
1434 __func__, num, tm, (long double)tm / num);
1435
1436 v1 = mb[rte_rand() % num].tx_offload;
1437
1438 memset(mb, 0, sz);
1439 tm = rte_rdtsc_precise();
1440
1441 for (i = 0; i != num; i++)
1442 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3,
1443 txof.l4, txof.tso, txof.l3, txof.l2, 0);
1444
1445 tm = rte_rdtsc_precise() - tm;
1446 printf("%s set raw tx_offload: %u iterations, %"
1447 PRIu64 " cycles, %#Lf cycles/iter\n",
1448 __func__, num, tm, (long double)tm / num);
1449
1450 v2 = mb[rte_rand() % num].tx_offload;
1451
1452 rte_free(mb);
1453
1454 printf("%s finished\n"
1455 "expected tx_offload value: 0x%" PRIx64 ";\n"
1456 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n",
1457 __func__, v1, v2);
1458
1459 return (v1 == v2) ? 0 : -EINVAL;
1460 }
1461
1462 static int
test_get_rx_ol_flag_list(void)1463 test_get_rx_ol_flag_list(void)
1464 {
1465 int len = 6, ret = 0;
1466 char buf[256] = "";
1467 int buflen = 0;
1468
1469 /* Test case to check with null buffer */
1470 ret = rte_get_rx_ol_flag_list(0, NULL, 0);
1471 if (ret != -1)
1472 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1473
1474 /* Test case to check with zero buffer len */
1475 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_L4_CKSUM_MASK, buf, 0);
1476 if (ret != -1)
1477 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1478
1479 buflen = strlen(buf);
1480 if (buflen != 0)
1481 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1482 __func__, buflen);
1483
1484 /* Test case to check with reduced buffer len */
1485 ret = rte_get_rx_ol_flag_list(0, buf, len);
1486 if (ret != -1)
1487 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1488
1489 buflen = strlen(buf);
1490 if (buflen != (len - 1))
1491 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1492 "received = %d\n", __func__,
1493 (len - 1), buflen);
1494
1495 /* Test case to check with zero mask value */
1496 ret = rte_get_rx_ol_flag_list(0, buf, sizeof(buf));
1497 if (ret != 0)
1498 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1499
1500 buflen = strlen(buf);
1501 if (buflen == 0)
1502 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1503 "non-zero, buffer should not be empty");
1504
1505 /* Test case to check with valid mask value */
1506 ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_SEC_OFFLOAD, buf,
1507 sizeof(buf));
1508 if (ret != 0)
1509 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1510
1511 buflen = strlen(buf);
1512 if (buflen == 0)
1513 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1514 "non-zero, buffer should not be empty");
1515
1516 return 0;
1517 fail:
1518 return -1;
1519 }
1520
1521 static int
test_get_tx_ol_flag_list(void)1522 test_get_tx_ol_flag_list(void)
1523 {
1524 int len = 6, ret = 0;
1525 char buf[256] = "";
1526 int buflen = 0;
1527
1528 /* Test case to check with null buffer */
1529 ret = rte_get_tx_ol_flag_list(0, NULL, 0);
1530 if (ret != -1)
1531 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1532
1533 /* Test case to check with zero buffer len */
1534 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_IP_CKSUM, buf, 0);
1535 if (ret != -1)
1536 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1537
1538 buflen = strlen(buf);
1539 if (buflen != 0) {
1540 GOTO_FAIL("%s buffer should be empty, received = %d\n",
1541 __func__, buflen);
1542 }
1543
1544 /* Test case to check with reduced buffer len */
1545 ret = rte_get_tx_ol_flag_list(0, buf, len);
1546 if (ret != -1)
1547 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
1548
1549 buflen = strlen(buf);
1550 if (buflen != (len - 1))
1551 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d,"
1552 "received = %d\n", __func__,
1553 (len - 1), buflen);
1554
1555 /* Test case to check with zero mask value */
1556 ret = rte_get_tx_ol_flag_list(0, buf, sizeof(buf));
1557 if (ret != 0)
1558 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1559
1560 buflen = strlen(buf);
1561 if (buflen == 0)
1562 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1563 "non-zero, buffer should not be empty");
1564
1565 /* Test case to check with valid mask value */
1566 ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_UDP_CKSUM, buf,
1567 sizeof(buf));
1568 if (ret != 0)
1569 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
1570
1571 buflen = strlen(buf);
1572 if (buflen == 0)
1573 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__,
1574 "non-zero, buffer should not be empty");
1575
1576 return 0;
1577 fail:
1578 return -1;
1579
1580 }
1581
1582 struct flag_name {
1583 uint64_t flag;
1584 const char *name;
1585 };
1586
1587 static int
test_get_rx_ol_flag_name(void)1588 test_get_rx_ol_flag_name(void)
1589 {
1590 uint16_t i;
1591 const char *flag_str = NULL;
1592 const struct flag_name rx_flags[] = {
1593 VAL_NAME(RTE_MBUF_F_RX_VLAN),
1594 VAL_NAME(RTE_MBUF_F_RX_RSS_HASH),
1595 VAL_NAME(RTE_MBUF_F_RX_FDIR),
1596 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_BAD),
1597 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_GOOD),
1598 VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_NONE),
1599 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_BAD),
1600 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_GOOD),
1601 VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_NONE),
1602 VAL_NAME(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD),
1603 VAL_NAME(RTE_MBUF_F_RX_VLAN_STRIPPED),
1604 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_PTP),
1605 VAL_NAME(RTE_MBUF_F_RX_IEEE1588_TMST),
1606 VAL_NAME(RTE_MBUF_F_RX_FDIR_ID),
1607 VAL_NAME(RTE_MBUF_F_RX_FDIR_FLX),
1608 VAL_NAME(RTE_MBUF_F_RX_QINQ_STRIPPED),
1609 VAL_NAME(RTE_MBUF_F_RX_LRO),
1610 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD),
1611 VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED),
1612 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD),
1613 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD),
1614 VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID),
1615 };
1616
1617 /* Test case to check with valid flag */
1618 for (i = 0; i < RTE_DIM(rx_flags); i++) {
1619 flag_str = rte_get_rx_ol_flag_name(rx_flags[i].flag);
1620 if (flag_str == NULL)
1621 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1622 __func__, rx_flags[i].name);
1623 if (strcmp(flag_str, rx_flags[i].name) != 0)
1624 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1625 __func__, rx_flags[i].name, flag_str);
1626 }
1627 /* Test case to check with invalid flag */
1628 flag_str = rte_get_rx_ol_flag_name(0);
1629 if (flag_str != NULL) {
1630 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1631 __func__, flag_str);
1632 }
1633
1634 return 0;
1635 fail:
1636 return -1;
1637 }
1638
1639 static int
test_get_tx_ol_flag_name(void)1640 test_get_tx_ol_flag_name(void)
1641 {
1642 uint16_t i;
1643 const char *flag_str = NULL;
1644 const struct flag_name tx_flags[] = {
1645 VAL_NAME(RTE_MBUF_F_TX_VLAN),
1646 VAL_NAME(RTE_MBUF_F_TX_IP_CKSUM),
1647 VAL_NAME(RTE_MBUF_F_TX_TCP_CKSUM),
1648 VAL_NAME(RTE_MBUF_F_TX_SCTP_CKSUM),
1649 VAL_NAME(RTE_MBUF_F_TX_UDP_CKSUM),
1650 VAL_NAME(RTE_MBUF_F_TX_IEEE1588_TMST),
1651 VAL_NAME(RTE_MBUF_F_TX_TCP_SEG),
1652 VAL_NAME(RTE_MBUF_F_TX_IPV4),
1653 VAL_NAME(RTE_MBUF_F_TX_IPV6),
1654 VAL_NAME(RTE_MBUF_F_TX_OUTER_IP_CKSUM),
1655 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV4),
1656 VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV6),
1657 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN),
1658 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GRE),
1659 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IPIP),
1660 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GENEVE),
1661 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_MPLSINUDP),
1662 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE),
1663 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IP),
1664 VAL_NAME(RTE_MBUF_F_TX_TUNNEL_UDP),
1665 VAL_NAME(RTE_MBUF_F_TX_QINQ),
1666 VAL_NAME(RTE_MBUF_F_TX_MACSEC),
1667 VAL_NAME(RTE_MBUF_F_TX_SEC_OFFLOAD),
1668 VAL_NAME(RTE_MBUF_F_TX_UDP_SEG),
1669 VAL_NAME(RTE_MBUF_F_TX_OUTER_UDP_CKSUM),
1670 };
1671
1672 /* Test case to check with valid flag */
1673 for (i = 0; i < RTE_DIM(tx_flags); i++) {
1674 flag_str = rte_get_tx_ol_flag_name(tx_flags[i].flag);
1675 if (flag_str == NULL)
1676 GOTO_FAIL("%s: Expected flagname = %s; received null\n",
1677 __func__, tx_flags[i].name);
1678 if (strcmp(flag_str, tx_flags[i].name) != 0)
1679 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n",
1680 __func__, tx_flags[i].name, flag_str);
1681 }
1682 /* Test case to check with invalid flag */
1683 flag_str = rte_get_tx_ol_flag_name(0);
1684 if (flag_str != NULL) {
1685 GOTO_FAIL("%s: Expected flag name = null; received = %s\n",
1686 __func__, flag_str);
1687 }
1688
1689 return 0;
1690 fail:
1691 return -1;
1692
1693 }
1694
1695 static int
test_mbuf_validate_tx_offload(const char * test_name,struct rte_mempool * pktmbuf_pool,uint64_t ol_flags,uint16_t segsize,int expected_retval)1696 test_mbuf_validate_tx_offload(const char *test_name,
1697 struct rte_mempool *pktmbuf_pool,
1698 uint64_t ol_flags,
1699 uint16_t segsize,
1700 int expected_retval)
1701 {
1702 struct rte_mbuf *m = NULL;
1703 int ret = 0;
1704
1705 /* alloc a mbuf and do sanity check */
1706 m = rte_pktmbuf_alloc(pktmbuf_pool);
1707 if (m == NULL)
1708 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1709 if (rte_pktmbuf_pkt_len(m) != 0)
1710 GOTO_FAIL("%s: Bad packet length\n", __func__);
1711 rte_mbuf_sanity_check(m, 0);
1712 m->ol_flags = ol_flags;
1713 m->tso_segsz = segsize;
1714 ret = rte_validate_tx_offload(m);
1715 if (ret != expected_retval)
1716 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n",
1717 __func__, test_name, expected_retval, ret);
1718 rte_pktmbuf_free(m);
1719 m = NULL;
1720 return 0;
1721 fail:
1722 if (m) {
1723 rte_pktmbuf_free(m);
1724 m = NULL;
1725 }
1726 return -1;
1727 }
1728
1729 static int
test_mbuf_validate_tx_offload_one(struct rte_mempool * pktmbuf_pool)1730 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
1731 {
1732 /* test to validate tx offload flags */
1733 uint64_t ol_flags = 0;
1734
1735 /* test to validate if IP checksum is counted only for IPV4 packet */
1736 /* set both IP checksum and IPV6 flags */
1737 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1738 ol_flags |= RTE_MBUF_F_TX_IPV6;
1739 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
1740 pktmbuf_pool,
1741 ol_flags, 0, -EINVAL) < 0)
1742 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__);
1743 /* resetting ol_flags for next testcase */
1744 ol_flags = 0;
1745
1746 /* test to validate if IP type is set when required */
1747 ol_flags |= RTE_MBUF_F_TX_L4_MASK;
1748 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1749 pktmbuf_pool,
1750 ol_flags, 0, -EINVAL) < 0)
1751 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1752
1753 /* test if IP type is set when TCP SEG is on */
1754 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1755 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
1756 pktmbuf_pool,
1757 ol_flags, 0, -EINVAL) < 0)
1758 GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
1759
1760 ol_flags = 0;
1761 /* test to confirm IP type (IPV4/IPV6) is set */
1762 ol_flags = RTE_MBUF_F_TX_L4_MASK;
1763 ol_flags |= RTE_MBUF_F_TX_IPV6;
1764 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
1765 pktmbuf_pool,
1766 ol_flags, 0, 0) < 0)
1767 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1768
1769 ol_flags = 0;
1770 /* test to check TSO segment size is non-zero */
1771 ol_flags |= RTE_MBUF_F_TX_IPV4;
1772 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1773 /* set 0 tso segment size */
1774 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
1775 pktmbuf_pool,
1776 ol_flags, 0, -EINVAL) < 0)
1777 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
1778
1779 /* retain IPV4 and RTE_MBUF_F_TX_TCP_SEG mask */
1780 /* set valid tso segment size but IP CKSUM not set */
1781 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
1782 pktmbuf_pool,
1783 ol_flags, 512, -EINVAL) < 0)
1784 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__);
1785
1786 /* test to validate if IP checksum is set for TSO capability */
1787 /* retain IPV4, TCP_SEG, tso_seg size */
1788 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1789 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
1790 pktmbuf_pool,
1791 ol_flags, 512, 0) < 0)
1792 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1793
1794 /* test to confirm TSO for IPV6 type */
1795 ol_flags = 0;
1796 ol_flags |= RTE_MBUF_F_TX_IPV6;
1797 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1798 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
1799 pktmbuf_pool,
1800 ol_flags, 512, 0) < 0)
1801 GOTO_FAIL("%s failed: TSO req not met.\n", __func__);
1802
1803 ol_flags = 0;
1804 /* test if outer IP checksum set for non outer IPv4 packet */
1805 ol_flags |= RTE_MBUF_F_TX_IPV6;
1806 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
1807 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
1808 pktmbuf_pool,
1809 ol_flags, 512, -EINVAL) < 0)
1810 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__);
1811
1812 ol_flags = 0;
1813 /* test to confirm outer IP checksum is set for outer IPV4 packet */
1814 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
1815 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
1816 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
1817 pktmbuf_pool,
1818 ol_flags, 512, 0) < 0)
1819 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1820
1821 ol_flags = 0;
1822 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */
1823 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET",
1824 pktmbuf_pool,
1825 ol_flags, 512, 0) < 0)
1826 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__);
1827 return 0;
1828 fail:
1829 return -1;
1830 }
1831
1832 /*
1833 * Test for allocating a bulk of mbufs
1834 * define an array with positive sizes for mbufs allocations.
1835 */
1836 static int
test_pktmbuf_alloc_bulk(struct rte_mempool * pktmbuf_pool)1837 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1838 {
1839 int ret = 0;
1840 unsigned int idx, loop;
1841 unsigned int alloc_counts[] = {
1842 0,
1843 MEMPOOL_CACHE_SIZE - 1,
1844 MEMPOOL_CACHE_SIZE + 1,
1845 MEMPOOL_CACHE_SIZE * 1.5,
1846 MEMPOOL_CACHE_SIZE * 2,
1847 MEMPOOL_CACHE_SIZE * 2 - 1,
1848 MEMPOOL_CACHE_SIZE * 2 + 1,
1849 MEMPOOL_CACHE_SIZE,
1850 };
1851
1852 /* allocate a large array of mbuf pointers */
1853 struct rte_mbuf *mbufs[NB_MBUF] = { 0 };
1854 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) {
1855 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1856 alloc_counts[idx]);
1857 if (ret == 0) {
1858 for (loop = 0; loop < alloc_counts[idx] &&
1859 mbufs[loop] != NULL; loop++)
1860 rte_pktmbuf_free(mbufs[loop]);
1861 } else if (ret != 0) {
1862 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n",
1863 __func__, alloc_counts[idx], ret);
1864 return -1;
1865 }
1866 }
1867 return 0;
1868 }
1869
1870 /*
1871 * Negative testing for allocating a bulk of mbufs
1872 */
1873 static int
test_neg_pktmbuf_alloc_bulk(struct rte_mempool * pktmbuf_pool)1874 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
1875 {
1876 int ret = 0;
1877 unsigned int idx, loop;
1878 unsigned int neg_alloc_counts[] = {
1879 MEMPOOL_CACHE_SIZE - NB_MBUF,
1880 NB_MBUF + 1,
1881 NB_MBUF * 8,
1882 UINT_MAX
1883 };
1884 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 };
1885
1886 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) {
1887 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
1888 neg_alloc_counts[idx]);
1889 if (ret == 0) {
1890 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n",
1891 __func__, neg_alloc_counts[idx], ret);
1892 for (loop = 0; loop < neg_alloc_counts[idx] &&
1893 mbufs[loop] != NULL; loop++)
1894 rte_pktmbuf_free(mbufs[loop]);
1895 return -1;
1896 }
1897 }
1898 return 0;
1899 }
1900
1901 /*
1902 * Test to read mbuf packet using rte_pktmbuf_read
1903 */
1904 static int
test_pktmbuf_read(struct rte_mempool * pktmbuf_pool)1905 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool)
1906 {
1907 struct rte_mbuf *m = NULL;
1908 char *data = NULL;
1909 const char *data_copy = NULL;
1910 int off;
1911
1912 /* alloc a mbuf */
1913 m = rte_pktmbuf_alloc(pktmbuf_pool);
1914 if (m == NULL)
1915 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1916 if (rte_pktmbuf_pkt_len(m) != 0)
1917 GOTO_FAIL("%s: Bad packet length\n", __func__);
1918 rte_mbuf_sanity_check(m, 0);
1919
1920 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1921 if (data == NULL)
1922 GOTO_FAIL("%s: Cannot append data\n", __func__);
1923 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2)
1924 GOTO_FAIL("%s: Bad packet length\n", __func__);
1925 memset(data, 0xfe, MBUF_TEST_DATA_LEN2);
1926
1927 /* read the data from mbuf */
1928 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL);
1929 if (data_copy == NULL)
1930 GOTO_FAIL("%s: Error in reading data!\n", __func__);
1931 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
1932 if (data_copy[off] != (char)0xfe)
1933 GOTO_FAIL("Data corrupted at offset %u", off);
1934 }
1935 rte_pktmbuf_free(m);
1936 m = NULL;
1937
1938 return 0;
1939 fail:
1940 if (m) {
1941 rte_pktmbuf_free(m);
1942 m = NULL;
1943 }
1944 return -1;
1945 }
1946
1947 /*
1948 * Test to read mbuf packet data from offset
1949 */
1950 static int
test_pktmbuf_read_from_offset(struct rte_mempool * pktmbuf_pool)1951 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
1952 {
1953 struct rte_mbuf *m = NULL;
1954 struct ether_hdr *hdr = NULL;
1955 char *data = NULL;
1956 const char *data_copy = NULL;
1957 unsigned int off;
1958 unsigned int hdr_len = sizeof(struct rte_ether_hdr);
1959
1960 /* alloc a mbuf */
1961 m = rte_pktmbuf_alloc(pktmbuf_pool);
1962 if (m == NULL)
1963 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
1964
1965 if (rte_pktmbuf_pkt_len(m) != 0)
1966 GOTO_FAIL("%s: Bad packet length\n", __func__);
1967 rte_mbuf_sanity_check(m, 0);
1968
1969 /* prepend an ethernet header */
1970 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len);
1971 if (hdr == NULL)
1972 GOTO_FAIL("%s: Cannot prepend header\n", __func__);
1973 if (rte_pktmbuf_pkt_len(m) != hdr_len)
1974 GOTO_FAIL("%s: Bad pkt length", __func__);
1975 if (rte_pktmbuf_data_len(m) != hdr_len)
1976 GOTO_FAIL("%s: Bad data length", __func__);
1977 memset(hdr, 0xde, hdr_len);
1978
1979 /* read mbuf header info from 0 offset */
1980 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL);
1981 if (data_copy == NULL)
1982 GOTO_FAIL("%s: Error in reading header!\n", __func__);
1983 for (off = 0; off < hdr_len; off++) {
1984 if (data_copy[off] != (char)0xde)
1985 GOTO_FAIL("Header info corrupted at offset %u", off);
1986 }
1987
1988 /* append sample data after ethernet header */
1989 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
1990 if (data == NULL)
1991 GOTO_FAIL("%s: Cannot append data\n", __func__);
1992 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
1993 GOTO_FAIL("%s: Bad packet length\n", __func__);
1994 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
1995 GOTO_FAIL("%s: Bad data length\n", __func__);
1996 memset(data, 0xcc, MBUF_TEST_DATA_LEN2);
1997
1998 /* read mbuf data after header info */
1999 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL);
2000 if (data_copy == NULL)
2001 GOTO_FAIL("%s: Error in reading header data!\n", __func__);
2002 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2003 if (data_copy[off] != (char)0xcc)
2004 GOTO_FAIL("Data corrupted at offset %u", off);
2005 }
2006
2007 /* partial reading of mbuf data */
2008 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5,
2009 NULL);
2010 if (data_copy == NULL)
2011 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2012 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
2013 if (data_copy[off] != (char)0xcc)
2014 GOTO_FAIL("Data corrupted at offset %u", off);
2015 }
2016
2017 /* read length greater than mbuf data_len */
2018 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1,
2019 NULL) != NULL)
2020 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n",
2021 __func__);
2022
2023 /* read length greater than mbuf pkt_len */
2024 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1,
2025 NULL) != NULL)
2026 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n",
2027 __func__);
2028
2029 /* read data of zero len from valid offset */
2030 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
2031 if (data_copy == NULL)
2032 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2033 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
2034 if (data_copy[off] != (char)0xcc)
2035 GOTO_FAIL("Data corrupted at offset %u", off);
2036 }
2037
2038 /* read data of zero length from zero offset */
2039 data_copy = rte_pktmbuf_read(m, 0, 0, NULL);
2040 if (data_copy == NULL)
2041 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2042 /* check if the received address is the beginning of header info */
2043 if (hdr != (const struct ether_hdr *)data_copy)
2044 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2045
2046 /* read data of max length from valid offset */
2047 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL);
2048 if (data_copy == NULL)
2049 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2050 /* check if the received address is the beginning of data segment */
2051 if (data_copy != data)
2052 GOTO_FAIL("%s: Corrupted data address!\n", __func__);
2053
2054 /* try to read from mbuf with max size offset */
2055 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL);
2056 if (data_copy != NULL)
2057 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2058
2059 /* try to read from mbuf with max size offset and len */
2060 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL);
2061 if (data_copy != NULL)
2062 GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
2063
2064 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2065
2066 rte_pktmbuf_free(m);
2067 m = NULL;
2068
2069 return 0;
2070 fail:
2071 if (m) {
2072 rte_pktmbuf_free(m);
2073 m = NULL;
2074 }
2075 return -1;
2076 }
2077
2078 struct test_case {
2079 unsigned int seg_count;
2080 unsigned int flags;
2081 uint32_t read_off;
2082 uint32_t read_len;
2083 unsigned int seg_lengths[MBUF_MAX_SEG];
2084 };
2085
2086 /* create a mbuf with different sized segments
2087 * and fill with data [0x00 0x01 0x02 ...]
2088 */
2089 static struct rte_mbuf *
create_packet(struct rte_mempool * pktmbuf_pool,struct test_case * test_data)2090 create_packet(struct rte_mempool *pktmbuf_pool,
2091 struct test_case *test_data)
2092 {
2093 uint16_t i, ret, seg, seg_len = 0;
2094 uint32_t last_index = 0;
2095 unsigned int seg_lengths[MBUF_MAX_SEG];
2096 unsigned int hdr_len;
2097 struct rte_mbuf *pkt = NULL;
2098 struct rte_mbuf *pkt_seg = NULL;
2099 char *hdr = NULL;
2100 char *data = NULL;
2101
2102 memcpy(seg_lengths, test_data->seg_lengths,
2103 sizeof(unsigned int)*test_data->seg_count);
2104 for (seg = 0; seg < test_data->seg_count; seg++) {
2105 hdr_len = 0;
2106 seg_len = seg_lengths[seg];
2107 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool);
2108 if (pkt_seg == NULL)
2109 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2110 if (rte_pktmbuf_pkt_len(pkt_seg) != 0)
2111 GOTO_FAIL("%s: Bad packet length\n", __func__);
2112 rte_mbuf_sanity_check(pkt_seg, 0);
2113 /* Add header only for the first segment */
2114 if (test_data->flags == MBUF_HEADER && seg == 0) {
2115 hdr_len = sizeof(struct rte_ether_hdr);
2116 /* prepend a header and fill with dummy data */
2117 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len);
2118 if (hdr == NULL)
2119 GOTO_FAIL("%s: Cannot prepend header\n",
2120 __func__);
2121 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len)
2122 GOTO_FAIL("%s: Bad pkt length", __func__);
2123 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len)
2124 GOTO_FAIL("%s: Bad data length", __func__);
2125 for (i = 0; i < hdr_len; i++)
2126 hdr[i] = (last_index + i) % 0xffff;
2127 last_index += hdr_len;
2128 }
2129 /* skip appending segment with 0 length */
2130 if (seg_len == 0)
2131 continue;
2132 data = rte_pktmbuf_append(pkt_seg, seg_len);
2133 if (data == NULL)
2134 GOTO_FAIL("%s: Cannot append data segment\n", __func__);
2135 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len)
2136 GOTO_FAIL("%s: Bad packet segment length: %d\n",
2137 __func__, rte_pktmbuf_pkt_len(pkt_seg));
2138 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len)
2139 GOTO_FAIL("%s: Bad data length\n", __func__);
2140 for (i = 0; i < seg_len; i++)
2141 data[i] = (last_index + i) % 0xffff;
2142 /* to fill continuous data from one seg to another */
2143 last_index += i;
2144 /* create chained mbufs */
2145 if (seg == 0)
2146 pkt = pkt_seg;
2147 else {
2148 ret = rte_pktmbuf_chain(pkt, pkt_seg);
2149 if (ret != 0)
2150 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n",
2151 __func__, ret);
2152 }
2153
2154 pkt_seg = pkt_seg->next;
2155 }
2156 return pkt;
2157 fail:
2158 if (pkt != NULL) {
2159 rte_pktmbuf_free(pkt);
2160 pkt = NULL;
2161 }
2162 if (pkt_seg != NULL) {
2163 rte_pktmbuf_free(pkt_seg);
2164 pkt_seg = NULL;
2165 }
2166 return NULL;
2167 }
2168
2169 static int
test_pktmbuf_read_from_chain(struct rte_mempool * pktmbuf_pool)2170 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool)
2171 {
2172 struct rte_mbuf *m;
2173 struct test_case test_cases[] = {
2174 {
2175 .seg_lengths = { 100, 100, 100 },
2176 .seg_count = 3,
2177 .flags = MBUF_NO_HEADER,
2178 .read_off = 0,
2179 .read_len = 300
2180 },
2181 {
2182 .seg_lengths = { 100, 125, 150 },
2183 .seg_count = 3,
2184 .flags = MBUF_NO_HEADER,
2185 .read_off = 99,
2186 .read_len = 201
2187 },
2188 {
2189 .seg_lengths = { 100, 100 },
2190 .seg_count = 2,
2191 .flags = MBUF_NO_HEADER,
2192 .read_off = 0,
2193 .read_len = 100
2194 },
2195 {
2196 .seg_lengths = { 100, 200 },
2197 .seg_count = 2,
2198 .flags = MBUF_HEADER,
2199 .read_off = sizeof(struct rte_ether_hdr),
2200 .read_len = 150
2201 },
2202 {
2203 .seg_lengths = { 1000, 100 },
2204 .seg_count = 2,
2205 .flags = MBUF_NO_HEADER,
2206 .read_off = 0,
2207 .read_len = 1000
2208 },
2209 {
2210 .seg_lengths = { 1024, 0, 100 },
2211 .seg_count = 3,
2212 .flags = MBUF_NO_HEADER,
2213 .read_off = 100,
2214 .read_len = 1001
2215 },
2216 {
2217 .seg_lengths = { 1000, 1, 1000 },
2218 .seg_count = 3,
2219 .flags = MBUF_NO_HEADER,
2220 .read_off = 1000,
2221 .read_len = 2
2222 },
2223 {
2224 .seg_lengths = { MBUF_TEST_DATA_LEN,
2225 MBUF_TEST_DATA_LEN2,
2226 MBUF_TEST_DATA_LEN3, 800, 10 },
2227 .seg_count = 5,
2228 .flags = MBUF_NEG_TEST_READ,
2229 .read_off = 1000,
2230 .read_len = MBUF_DATA_SIZE
2231 },
2232 };
2233
2234 uint32_t i, pos;
2235 const char *data_copy = NULL;
2236 char data_buf[MBUF_DATA_SIZE];
2237
2238 memset(data_buf, 0, MBUF_DATA_SIZE);
2239
2240 for (i = 0; i < RTE_DIM(test_cases); i++) {
2241 m = create_packet(pktmbuf_pool, &test_cases[i]);
2242 if (m == NULL)
2243 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2244
2245 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off,
2246 test_cases[i].read_len, data_buf);
2247 if (test_cases[i].flags == MBUF_NEG_TEST_READ) {
2248 if (data_copy != NULL)
2249 GOTO_FAIL("%s: mbuf data read should fail!\n",
2250 __func__);
2251 else {
2252 rte_pktmbuf_free(m);
2253 m = NULL;
2254 continue;
2255 }
2256 }
2257 if (data_copy == NULL)
2258 GOTO_FAIL("%s: Error in reading packet data!\n",
2259 __func__);
2260 for (pos = 0; pos < test_cases[i].read_len; pos++) {
2261 if (data_copy[pos] !=
2262 (char)((test_cases[i].read_off + pos)
2263 % 0xffff))
2264 GOTO_FAIL("Data corrupted at offset %u is %2X",
2265 pos, data_copy[pos]);
2266 }
2267 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
2268 rte_pktmbuf_free(m);
2269 m = NULL;
2270 }
2271 return 0;
2272
2273 fail:
2274 if (m != NULL) {
2275 rte_pktmbuf_free(m);
2276 m = NULL;
2277 }
2278 return -1;
2279 }
2280
2281 /* Define a free call back function to be used for external buffer */
2282 static void
ext_buf_free_callback_fn(void * addr,void * opaque)2283 ext_buf_free_callback_fn(void *addr, void *opaque)
2284 {
2285 bool *freed = opaque;
2286
2287 if (addr == NULL) {
2288 printf("External buffer address is invalid\n");
2289 return;
2290 }
2291 rte_free(addr);
2292 *freed = true;
2293 printf("External buffer freed via callback\n");
2294 }
2295
2296 /*
2297 * Test to initialize shared data in external buffer before attaching to mbuf
2298 * - Allocate mbuf with no data.
2299 * - Allocate external buffer with size should be large enough to accommodate
2300 * rte_mbuf_ext_shared_info.
2301 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data.
2302 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf.
2303 * - Clone another mbuf and attach the same external buffer to it.
2304 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf.
2305 */
2306 static int
test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool * pktmbuf_pool)2307 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
2308 {
2309 struct rte_mbuf *m = NULL;
2310 struct rte_mbuf *clone = NULL;
2311 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL;
2312 rte_iova_t buf_iova;
2313 void *ext_buf_addr = NULL;
2314 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
2315 sizeof(struct rte_mbuf_ext_shared_info);
2316 bool freed = false;
2317
2318 /* alloc a mbuf */
2319 m = rte_pktmbuf_alloc(pktmbuf_pool);
2320 if (m == NULL)
2321 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
2322 if (rte_pktmbuf_pkt_len(m) != 0)
2323 GOTO_FAIL("%s: Bad packet length\n", __func__);
2324 rte_mbuf_sanity_check(m, 0);
2325
2326 ext_buf_addr = rte_malloc("External buffer", buf_len,
2327 RTE_CACHE_LINE_SIZE);
2328 if (ext_buf_addr == NULL)
2329 GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
2330
2331 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
2332 ext_buf_free_callback_fn, &freed);
2333 if (ret_shinfo == NULL)
2334 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
2335
2336 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2337 GOTO_FAIL("%s: External refcount is not 1\n", __func__);
2338
2339 if (rte_mbuf_refcnt_read(m) != 1)
2340 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
2341
2342 buf_iova = rte_mem_virt2iova(ext_buf_addr);
2343 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
2344 ret_shinfo);
2345 if (m->ol_flags != RTE_MBUF_F_EXTERNAL)
2346 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2347 __func__);
2348
2349 /* allocate one more mbuf, it is attached to the same external buffer */
2350 clone = rte_pktmbuf_clone(m, pktmbuf_pool);
2351 if (clone == NULL)
2352 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
2353 if (rte_pktmbuf_pkt_len(clone) != 0)
2354 GOTO_FAIL("%s: Bad packet length\n", __func__);
2355
2356 if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
2357 GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
2358 __func__);
2359
2360 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2361 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2362 if (freed)
2363 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2364
2365 /* test to manually update ext_buf_ref_cnt from 2 to 3*/
2366 rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
2367 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
2368 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
2369 if (freed)
2370 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2371
2372 /* reset the ext_refcnt before freeing the external buffer */
2373 rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
2374 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
2375 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
2376 if (freed)
2377 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2378
2379 /* detach the external buffer from mbufs */
2380 rte_pktmbuf_detach_extbuf(m);
2381 /* check if ref cnt is decremented */
2382 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
2383 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
2384 if (freed)
2385 GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
2386
2387 rte_pktmbuf_detach_extbuf(clone);
2388 if (!freed)
2389 GOTO_FAIL("%s: extbuf should be freed\n", __func__);
2390 freed = false;
2391
2392 rte_pktmbuf_free(m);
2393 m = NULL;
2394 rte_pktmbuf_free(clone);
2395 clone = NULL;
2396
2397 return 0;
2398
2399 fail:
2400 if (m) {
2401 rte_pktmbuf_free(m);
2402 m = NULL;
2403 }
2404 if (clone) {
2405 rte_pktmbuf_free(clone);
2406 clone = NULL;
2407 }
2408 if (ext_buf_addr != NULL) {
2409 rte_free(ext_buf_addr);
2410 ext_buf_addr = NULL;
2411 }
2412 return -1;
2413 }
2414
2415 /*
2416 * Test the mbuf pool with pinned external data buffers
2417 * - Allocate memory zone for external buffer
2418 * - Create the mbuf pool with pinned external buffer
2419 * - Check the created pool with relevant mbuf pool unit tests
2420 */
2421 static int
test_pktmbuf_ext_pinned_buffer(struct rte_mempool * std_pool)2422 test_pktmbuf_ext_pinned_buffer(struct rte_mempool *std_pool)
2423 {
2424
2425 struct rte_pktmbuf_extmem ext_mem;
2426 struct rte_mempool *pinned_pool = NULL;
2427 const struct rte_memzone *mz = NULL;
2428
2429 printf("Test mbuf pool with external pinned data buffers\n");
2430
2431 /* Allocate memzone for the external data buffer */
2432 mz = rte_memzone_reserve("pinned_pool",
2433 NB_MBUF * MBUF_DATA_SIZE,
2434 SOCKET_ID_ANY,
2435 RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY);
2436 if (mz == NULL)
2437 GOTO_FAIL("%s: Memzone allocation failed\n", __func__);
2438
2439 /* Create the mbuf pool with pinned external data buffer */
2440 ext_mem.buf_ptr = mz->addr;
2441 ext_mem.buf_iova = mz->iova;
2442 ext_mem.buf_len = mz->len;
2443 ext_mem.elt_size = MBUF_DATA_SIZE;
2444
2445 pinned_pool = rte_pktmbuf_pool_create_extbuf("test_pinned_pool",
2446 NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
2447 MBUF_DATA_SIZE, SOCKET_ID_ANY,
2448 &ext_mem, 1);
2449 if (pinned_pool == NULL)
2450 GOTO_FAIL("%s: Mbuf pool with pinned external"
2451 " buffer creation failed\n", __func__);
2452 /* test multiple mbuf alloc */
2453 if (test_pktmbuf_pool(pinned_pool) < 0)
2454 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed\n",
2455 __func__);
2456
2457 /* do it another time to check that all mbufs were freed */
2458 if (test_pktmbuf_pool(pinned_pool) < 0)
2459 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed (2)\n",
2460 __func__);
2461
2462 /* test that the data pointer on a packet mbuf is set properly */
2463 if (test_pktmbuf_pool_ptr(pinned_pool) < 0)
2464 GOTO_FAIL("%s: test_pktmbuf_pool_ptr(pinned) failed\n",
2465 __func__);
2466
2467 /* test data manipulation in mbuf with non-ascii data */
2468 if (test_pktmbuf_with_non_ascii_data(pinned_pool) < 0)
2469 GOTO_FAIL("%s: test_pktmbuf_with_non_ascii_data(pinned)"
2470 " failed\n", __func__);
2471
2472 /* test free pktmbuf segment one by one */
2473 if (test_pktmbuf_free_segment(pinned_pool) < 0)
2474 GOTO_FAIL("%s: test_pktmbuf_free_segment(pinned) failed\n",
2475 __func__);
2476
2477 if (testclone_testupdate_testdetach(pinned_pool, std_pool) < 0)
2478 GOTO_FAIL("%s: testclone_and_testupdate(pinned) failed\n",
2479 __func__);
2480
2481 if (test_pktmbuf_copy(pinned_pool, std_pool) < 0)
2482 GOTO_FAIL("%s: test_pktmbuf_copy(pinned) failed\n",
2483 __func__);
2484
2485 if (test_failing_mbuf_sanity_check(pinned_pool) < 0)
2486 GOTO_FAIL("%s: test_failing_mbuf_sanity_check(pinned)"
2487 " failed\n", __func__);
2488
2489 if (test_mbuf_linearize_check(pinned_pool) < 0)
2490 GOTO_FAIL("%s: test_mbuf_linearize_check(pinned) failed\n",
2491 __func__);
2492
2493 /* test for allocating a bulk of mbufs with various sizes */
2494 if (test_pktmbuf_alloc_bulk(pinned_pool) < 0)
2495 GOTO_FAIL("%s: test_rte_pktmbuf_alloc_bulk(pinned) failed\n",
2496 __func__);
2497
2498 /* test for allocating a bulk of mbufs with various sizes */
2499 if (test_neg_pktmbuf_alloc_bulk(pinned_pool) < 0)
2500 GOTO_FAIL("%s: test_neg_rte_pktmbuf_alloc_bulk(pinned)"
2501 " failed\n", __func__);
2502
2503 /* test to read mbuf packet */
2504 if (test_pktmbuf_read(pinned_pool) < 0)
2505 GOTO_FAIL("%s: test_rte_pktmbuf_read(pinned) failed\n",
2506 __func__);
2507
2508 /* test to read mbuf packet from offset */
2509 if (test_pktmbuf_read_from_offset(pinned_pool) < 0)
2510 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_offset(pinned)"
2511 " failed\n", __func__);
2512
2513 /* test to read data from chain of mbufs with data segments */
2514 if (test_pktmbuf_read_from_chain(pinned_pool) < 0)
2515 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_chain(pinned)"
2516 " failed\n", __func__);
2517
2518 RTE_SET_USED(std_pool);
2519 rte_mempool_free(pinned_pool);
2520 rte_memzone_free(mz);
2521 return 0;
2522
2523 fail:
2524 rte_mempool_free(pinned_pool);
2525 rte_memzone_free(mz);
2526 return -1;
2527 }
2528
2529 static int
test_mbuf_dyn(struct rte_mempool * pktmbuf_pool)2530 test_mbuf_dyn(struct rte_mempool *pktmbuf_pool)
2531 {
2532 const struct rte_mbuf_dynfield dynfield = {
2533 .name = "test-dynfield",
2534 .size = sizeof(uint8_t),
2535 .align = alignof(uint8_t),
2536 .flags = 0,
2537 };
2538 const struct rte_mbuf_dynfield dynfield2 = {
2539 .name = "test-dynfield2",
2540 .size = sizeof(uint16_t),
2541 .align = alignof(uint16_t),
2542 .flags = 0,
2543 };
2544 const struct rte_mbuf_dynfield dynfield3 = {
2545 .name = "test-dynfield3",
2546 .size = sizeof(uint8_t),
2547 .align = alignof(uint8_t),
2548 .flags = 0,
2549 };
2550 const struct rte_mbuf_dynfield dynfield_fail_big = {
2551 .name = "test-dynfield-fail-big",
2552 .size = 256,
2553 .align = 1,
2554 .flags = 0,
2555 };
2556 const struct rte_mbuf_dynfield dynfield_fail_align = {
2557 .name = "test-dynfield-fail-align",
2558 .size = 1,
2559 .align = 3,
2560 .flags = 0,
2561 };
2562 const struct rte_mbuf_dynfield dynfield_fail_flag = {
2563 .name = "test-dynfield",
2564 .size = sizeof(uint8_t),
2565 .align = alignof(uint8_t),
2566 .flags = 1,
2567 };
2568 const struct rte_mbuf_dynflag dynflag_fail_flag = {
2569 .name = "test-dynflag",
2570 .flags = 1,
2571 };
2572 const struct rte_mbuf_dynflag dynflag = {
2573 .name = "test-dynflag",
2574 .flags = 0,
2575 };
2576 const struct rte_mbuf_dynflag dynflag2 = {
2577 .name = "test-dynflag2",
2578 .flags = 0,
2579 };
2580 const struct rte_mbuf_dynflag dynflag3 = {
2581 .name = "test-dynflag3",
2582 .flags = 0,
2583 };
2584 struct rte_mbuf *m = NULL;
2585 int offset, offset2, offset3;
2586 int flag, flag2, flag3;
2587 int ret;
2588
2589 printf("Test mbuf dynamic fields and flags\n");
2590 rte_mbuf_dyn_dump(stdout);
2591
2592 offset = rte_mbuf_dynfield_register(&dynfield);
2593 if (offset == -1)
2594 GOTO_FAIL("failed to register dynamic field, offset=%d: %s",
2595 offset, strerror(errno));
2596
2597 ret = rte_mbuf_dynfield_register(&dynfield);
2598 if (ret != offset)
2599 GOTO_FAIL("failed to lookup dynamic field, ret=%d: %s",
2600 ret, strerror(errno));
2601
2602 offset2 = rte_mbuf_dynfield_register(&dynfield2);
2603 if (offset2 == -1 || offset2 == offset || (offset2 & 1))
2604 GOTO_FAIL("failed to register dynamic field 2, offset2=%d: %s",
2605 offset2, strerror(errno));
2606
2607 offset3 = rte_mbuf_dynfield_register_offset(&dynfield3,
2608 offsetof(struct rte_mbuf, dynfield1[1]));
2609 if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) {
2610 if (rte_errno == EBUSY)
2611 printf("mbuf test error skipped: dynfield is busy\n");
2612 else
2613 GOTO_FAIL("failed to register dynamic field 3, offset="
2614 "%d: %s", offset3, strerror(errno));
2615 }
2616
2617 printf("dynfield: offset=%d, offset2=%d, offset3=%d\n",
2618 offset, offset2, offset3);
2619
2620 ret = rte_mbuf_dynfield_register(&dynfield_fail_big);
2621 if (ret != -1)
2622 GOTO_FAIL("dynamic field creation should fail (too big)");
2623
2624 ret = rte_mbuf_dynfield_register(&dynfield_fail_align);
2625 if (ret != -1)
2626 GOTO_FAIL("dynamic field creation should fail (bad alignment)");
2627
2628 ret = rte_mbuf_dynfield_register_offset(&dynfield_fail_align,
2629 offsetof(struct rte_mbuf, ol_flags));
2630 if (ret != -1)
2631 GOTO_FAIL("dynamic field creation should fail (not avail)");
2632
2633 ret = rte_mbuf_dynfield_register(&dynfield_fail_flag);
2634 if (ret != -1)
2635 GOTO_FAIL("dynamic field creation should fail (invalid flag)");
2636
2637 ret = rte_mbuf_dynflag_register(&dynflag_fail_flag);
2638 if (ret != -1)
2639 GOTO_FAIL("dynamic flag creation should fail (invalid flag)");
2640
2641 flag = rte_mbuf_dynflag_register(&dynflag);
2642 if (flag == -1)
2643 GOTO_FAIL("failed to register dynamic flag, flag=%d: %s",
2644 flag, strerror(errno));
2645
2646 ret = rte_mbuf_dynflag_register(&dynflag);
2647 if (ret != flag)
2648 GOTO_FAIL("failed to lookup dynamic flag, ret=%d: %s",
2649 ret, strerror(errno));
2650
2651 flag2 = rte_mbuf_dynflag_register(&dynflag2);
2652 if (flag2 == -1 || flag2 == flag)
2653 GOTO_FAIL("failed to register dynamic flag 2, flag2=%d: %s",
2654 flag2, strerror(errno));
2655
2656 flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
2657 rte_bsf64(RTE_MBUF_F_LAST_FREE));
2658 if ((uint32_t)flag3 != rte_bsf64(RTE_MBUF_F_LAST_FREE))
2659 GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
2660 flag3, strerror(errno));
2661
2662 printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3);
2663
2664 /* set, get dynamic field */
2665 m = rte_pktmbuf_alloc(pktmbuf_pool);
2666 if (m == NULL)
2667 GOTO_FAIL("Cannot allocate mbuf");
2668
2669 *RTE_MBUF_DYNFIELD(m, offset, uint8_t *) = 1;
2670 if (*RTE_MBUF_DYNFIELD(m, offset, uint8_t *) != 1)
2671 GOTO_FAIL("failed to read dynamic field");
2672 *RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) = 1000;
2673 if (*RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) != 1000)
2674 GOTO_FAIL("failed to read dynamic field");
2675
2676 /* set a dynamic flag */
2677 m->ol_flags |= (1ULL << flag);
2678
2679 rte_mbuf_dyn_dump(stdout);
2680 rte_pktmbuf_free(m);
2681 return 0;
2682 fail:
2683 rte_pktmbuf_free(m);
2684 return -1;
2685 }
2686
2687 /* check that m->nb_segs and m->next are reset on mbuf free */
2688 static int
test_nb_segs_and_next_reset(void)2689 test_nb_segs_and_next_reset(void)
2690 {
2691 struct rte_mbuf *m0 = NULL, *m1 = NULL, *m2 = NULL;
2692 struct rte_mempool *pool = NULL;
2693
2694 pool = rte_pktmbuf_pool_create("test_mbuf_reset",
2695 3, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
2696 if (pool == NULL)
2697 GOTO_FAIL("Failed to create mbuf pool");
2698
2699 /* alloc mbufs */
2700 m0 = rte_pktmbuf_alloc(pool);
2701 m1 = rte_pktmbuf_alloc(pool);
2702 m2 = rte_pktmbuf_alloc(pool);
2703 if (m0 == NULL || m1 == NULL || m2 == NULL)
2704 GOTO_FAIL("Failed to allocate mbuf");
2705
2706 /* append data in all of them */
2707 if (rte_pktmbuf_append(m0, 500) == NULL ||
2708 rte_pktmbuf_append(m1, 500) == NULL ||
2709 rte_pktmbuf_append(m2, 500) == NULL)
2710 GOTO_FAIL("Failed to append data in mbuf");
2711
2712 /* chain them in one mbuf m0 */
2713 rte_pktmbuf_chain(m1, m2);
2714 rte_pktmbuf_chain(m0, m1);
2715 if (m0->nb_segs != 3 || m0->next != m1 || m1->next != m2 ||
2716 m2->next != NULL) {
2717 m1 = m2 = NULL;
2718 GOTO_FAIL("Failed to chain mbufs");
2719 }
2720
2721 /* split m0 chain in two, between m1 and m2 */
2722 m0->nb_segs = 2;
2723 m0->pkt_len -= m2->data_len;
2724 m1->next = NULL;
2725 m2->nb_segs = 1;
2726
2727 /* free the 2 mbuf chains m0 and m2 */
2728 rte_pktmbuf_free(m0);
2729 rte_pktmbuf_free(m2);
2730
2731 /* realloc the 3 mbufs */
2732 m0 = rte_mbuf_raw_alloc(pool);
2733 m1 = rte_mbuf_raw_alloc(pool);
2734 m2 = rte_mbuf_raw_alloc(pool);
2735 if (m0 == NULL || m1 == NULL || m2 == NULL)
2736 GOTO_FAIL("Failed to reallocate mbuf");
2737
2738 /* ensure that m->next and m->nb_segs are reset allocated mbufs */
2739 if (m0->nb_segs != 1 || m0->next != NULL ||
2740 m1->nb_segs != 1 || m1->next != NULL ||
2741 m2->nb_segs != 1 || m2->next != NULL)
2742 GOTO_FAIL("nb_segs or next was not reset properly");
2743
2744 rte_mempool_free(pool);
2745 return 0;
2746
2747 fail:
2748 rte_mempool_free(pool);
2749 return -1;
2750 }
2751
2752 static int
test_mbuf(void)2753 test_mbuf(void)
2754 {
2755 int ret = -1;
2756 struct rte_mempool *pktmbuf_pool = NULL;
2757 struct rte_mempool *pktmbuf_pool2 = NULL;
2758
2759
2760 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2);
2761
2762 /* create pktmbuf pool if it does not exist */
2763 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
2764 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
2765 SOCKET_ID_ANY);
2766
2767 if (pktmbuf_pool == NULL) {
2768 printf("cannot allocate mbuf pool\n");
2769 goto err;
2770 }
2771
2772 /* test registration of dynamic fields and flags */
2773 if (test_mbuf_dyn(pktmbuf_pool) < 0) {
2774 printf("mbuf dynflag test failed\n");
2775 goto err;
2776 }
2777
2778 /* create a specific pktmbuf pool with a priv_size != 0 and no data
2779 * room size */
2780 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
2781 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0,
2782 SOCKET_ID_ANY);
2783
2784 if (pktmbuf_pool2 == NULL) {
2785 printf("cannot allocate mbuf pool\n");
2786 goto err;
2787 }
2788
2789 /* test multiple mbuf alloc */
2790 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2791 printf("test_mbuf_pool() failed\n");
2792 goto err;
2793 }
2794
2795 /* do it another time to check that all mbufs were freed */
2796 if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
2797 printf("test_mbuf_pool() failed (2)\n");
2798 goto err;
2799 }
2800
2801 /* test bulk mbuf alloc and free */
2802 if (test_pktmbuf_pool_bulk() < 0) {
2803 printf("test_pktmbuf_pool_bulk() failed\n");
2804 goto err;
2805 }
2806
2807 /* test that the pointer to the data on a packet mbuf is set properly */
2808 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) {
2809 printf("test_pktmbuf_pool_ptr() failed\n");
2810 goto err;
2811 }
2812
2813 /* test data manipulation in mbuf */
2814 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2815 printf("test_one_mbuf() failed\n");
2816 goto err;
2817 }
2818
2819
2820 /*
2821 * do it another time, to check that allocation reinitialize
2822 * the mbuf correctly
2823 */
2824 if (test_one_pktmbuf(pktmbuf_pool) < 0) {
2825 printf("test_one_mbuf() failed (2)\n");
2826 goto err;
2827 }
2828
2829 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) {
2830 printf("test_pktmbuf_with_non_ascii_data() failed\n");
2831 goto err;
2832 }
2833
2834 /* test free pktmbuf segment one by one */
2835 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) {
2836 printf("test_pktmbuf_free_segment() failed.\n");
2837 goto err;
2838 }
2839
2840 if (testclone_testupdate_testdetach(pktmbuf_pool, pktmbuf_pool) < 0) {
2841 printf("testclone_and_testupdate() failed \n");
2842 goto err;
2843 }
2844
2845 if (test_pktmbuf_copy(pktmbuf_pool, pktmbuf_pool) < 0) {
2846 printf("test_pktmbuf_copy() failed\n");
2847 goto err;
2848 }
2849
2850 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) {
2851 printf("test_attach_from_different_pool() failed\n");
2852 goto err;
2853 }
2854
2855 if (test_refcnt_mbuf() < 0) {
2856 printf("test_refcnt_mbuf() failed \n");
2857 goto err;
2858 }
2859
2860 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) {
2861 printf("test_failing_mbuf_sanity_check() failed\n");
2862 goto err;
2863 }
2864
2865 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) {
2866 printf("test_mbuf_linearize_check() failed\n");
2867 goto err;
2868 }
2869
2870 if (test_tx_offload() < 0) {
2871 printf("test_tx_offload() failed\n");
2872 goto err;
2873 }
2874
2875 if (test_get_rx_ol_flag_list() < 0) {
2876 printf("test_rte_get_rx_ol_flag_list() failed\n");
2877 goto err;
2878 }
2879
2880 if (test_get_tx_ol_flag_list() < 0) {
2881 printf("test_rte_get_tx_ol_flag_list() failed\n");
2882 goto err;
2883 }
2884
2885 if (test_get_rx_ol_flag_name() < 0) {
2886 printf("test_rte_get_rx_ol_flag_name() failed\n");
2887 goto err;
2888 }
2889
2890 if (test_get_tx_ol_flag_name() < 0) {
2891 printf("test_rte_get_tx_ol_flag_name() failed\n");
2892 goto err;
2893 }
2894
2895 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) {
2896 printf("test_mbuf_validate_tx_offload_one() failed\n");
2897 goto err;
2898 }
2899
2900 /* test for allocating a bulk of mbufs with various sizes */
2901 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2902 printf("test_rte_pktmbuf_alloc_bulk() failed\n");
2903 goto err;
2904 }
2905
2906 /* test for allocating a bulk of mbufs with various sizes */
2907 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
2908 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n");
2909 goto err;
2910 }
2911
2912 /* test to read mbuf packet */
2913 if (test_pktmbuf_read(pktmbuf_pool) < 0) {
2914 printf("test_rte_pktmbuf_read() failed\n");
2915 goto err;
2916 }
2917
2918 /* test to read mbuf packet from offset */
2919 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) {
2920 printf("test_rte_pktmbuf_read_from_offset() failed\n");
2921 goto err;
2922 }
2923
2924 /* test to read data from chain of mbufs with data segments */
2925 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) {
2926 printf("test_rte_pktmbuf_read_from_chain() failed\n");
2927 goto err;
2928 }
2929
2930 /* test to initialize shared info. at the end of external buffer */
2931 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) {
2932 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n");
2933 goto err;
2934 }
2935
2936 /* test the mbuf pool with pinned external data buffers */
2937 if (test_pktmbuf_ext_pinned_buffer(pktmbuf_pool) < 0) {
2938 printf("test_pktmbuf_ext_pinned_buffer() failed\n");
2939 goto err;
2940 }
2941
2942 /* test reset of m->nb_segs and m->next on mbuf free */
2943 if (test_nb_segs_and_next_reset() < 0) {
2944 printf("test_nb_segs_and_next_reset() failed\n");
2945 goto err;
2946 }
2947
2948 ret = 0;
2949 err:
2950 rte_mempool_free(pktmbuf_pool);
2951 rte_mempool_free(pktmbuf_pool2);
2952 return ret;
2953 }
2954 #undef GOTO_FAIL
2955
2956 REGISTER_FAST_TEST(mbuf_autotest, false, true, test_mbuf);
2957