1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <stdarg.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <stdint.h> 10 #include <inttypes.h> 11 #include <errno.h> 12 #include <sys/queue.h> 13 14 #include <rte_common.h> 15 #include <rte_errno.h> 16 #include <rte_debug.h> 17 #include <rte_log.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_launch.h> 21 #include <rte_eal.h> 22 #include <rte_per_lcore.h> 23 #include <rte_lcore.h> 24 #include <rte_atomic.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_ring.h> 27 #include <rte_mempool.h> 28 #include <rte_mbuf.h> 29 #include <rte_random.h> 30 #include <rte_cycles.h> 31 #include <rte_malloc.h> 32 #include <rte_ether.h> 33 #include <rte_ip.h> 34 #include <rte_tcp.h> 35 #include <rte_mbuf_dyn.h> 36 37 #include "test.h" 38 39 #define MEMPOOL_CACHE_SIZE 32 40 #define MBUF_DATA_SIZE 2048 41 #define NB_MBUF 128 42 #define MBUF_TEST_DATA_LEN 1464 43 #define MBUF_TEST_DATA_LEN2 50 44 #define MBUF_TEST_DATA_LEN3 256 45 #define MBUF_TEST_HDR1_LEN 20 46 #define MBUF_TEST_HDR2_LEN 30 47 #define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN) 48 #define MBUF_TEST_SEG_SIZE 64 49 #define MBUF_TEST_BURST 8 50 #define EXT_BUF_TEST_DATA_LEN 1024 51 #define MBUF_MAX_SEG 16 52 #define MBUF_NO_HEADER 0 53 #define MBUF_HEADER 1 54 #define MBUF_NEG_TEST_READ 2 55 #define VAL_NAME(flag) { flag, #flag } 56 57 /* chain length in bulk test */ 58 #define CHAIN_LEN 16 59 60 /* size of private data for mbuf in pktmbuf_pool2 */ 61 #define MBUF2_PRIV_SIZE 128 62 63 #define REFCNT_MAX_ITER 64 64 #define REFCNT_MAX_TIMEOUT 10 65 #define REFCNT_MAX_REF (RTE_MAX_LCORE) 66 #define REFCNT_MBUF_NUM 64 67 #define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF) 68 69 #define MAGIC_DATA 0x42424242 70 71 #define MAKE_STRING(x) # x 72 73 #ifdef RTE_MBUF_REFCNT_ATOMIC 74 75 static volatile uint32_t refcnt_stop_workers; 76 static unsigned refcnt_lcore[RTE_MAX_LCORE]; 77 78 #endif 79 80 /* 81 * MBUF 82 * ==== 83 * 84 * #. Allocate a mbuf pool. 85 * 86 * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE 87 * bytes long. 88 * 89 * #. Test multiple allocations of mbufs from this pool. 90 * 91 * - Allocate NB_MBUF and store pointers in a table. 92 * - If an allocation fails, return an error. 93 * - Free all these mbufs. 94 * - Repeat the same test to check that mbufs were freed correctly. 95 * 96 * #. Test data manipulation in pktmbuf. 97 * 98 * - Alloc an mbuf. 99 * - Append data using rte_pktmbuf_append(). 100 * - Test for error in rte_pktmbuf_append() when len is too large. 101 * - Trim data at the end of mbuf using rte_pktmbuf_trim(). 102 * - Test for error in rte_pktmbuf_trim() when len is too large. 103 * - Prepend a header using rte_pktmbuf_prepend(). 104 * - Test for error in rte_pktmbuf_prepend() when len is too large. 105 * - Remove data at the beginning of mbuf using rte_pktmbuf_adj(). 106 * - Test for error in rte_pktmbuf_adj() when len is too large. 107 * - Check that appended data is not corrupt. 108 * - Free the mbuf. 109 * - Between all these tests, check data_len and pkt_len, and 110 * that the mbuf is contiguous. 111 * - Repeat the test to check that allocation operations 112 * reinitialize the mbuf correctly. 113 * 114 * #. Test packet cloning 115 * - Clone a mbuf and verify the data 116 * - Clone the cloned mbuf and verify the data 117 * - Attach a mbuf to another that does not have the same priv_size. 118 */ 119 120 #define GOTO_FAIL(str, ...) do { \ 121 printf("mbuf test FAILED (l.%d): <" str ">\n", \ 122 __LINE__, ##__VA_ARGS__); \ 123 goto fail; \ 124 } while(0) 125 126 /* 127 * test data manipulation in mbuf with non-ascii data 128 */ 129 static int 130 test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool) 131 { 132 struct rte_mbuf *m = NULL; 133 char *data; 134 135 m = rte_pktmbuf_alloc(pktmbuf_pool); 136 if (m == NULL) 137 GOTO_FAIL("Cannot allocate mbuf"); 138 if (rte_pktmbuf_pkt_len(m) != 0) 139 GOTO_FAIL("Bad length"); 140 141 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN); 142 if (data == NULL) 143 GOTO_FAIL("Cannot append data"); 144 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 145 GOTO_FAIL("Bad pkt length"); 146 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 147 GOTO_FAIL("Bad data length"); 148 memset(data, 0xff, rte_pktmbuf_pkt_len(m)); 149 if (!rte_pktmbuf_is_contiguous(m)) 150 GOTO_FAIL("Buffer should be continuous"); 151 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN); 152 153 rte_pktmbuf_free(m); 154 155 return 0; 156 157 fail: 158 if(m) { 159 rte_pktmbuf_free(m); 160 } 161 return -1; 162 } 163 164 /* 165 * test data manipulation in mbuf 166 */ 167 static int 168 test_one_pktmbuf(struct rte_mempool *pktmbuf_pool) 169 { 170 struct rte_mbuf *m = NULL; 171 char *data, *data2, *hdr; 172 unsigned i; 173 174 printf("Test pktmbuf API\n"); 175 176 /* alloc a mbuf */ 177 178 m = rte_pktmbuf_alloc(pktmbuf_pool); 179 if (m == NULL) 180 GOTO_FAIL("Cannot allocate mbuf"); 181 if (rte_pktmbuf_pkt_len(m) != 0) 182 GOTO_FAIL("Bad length"); 183 184 rte_pktmbuf_dump(stdout, m, 0); 185 186 /* append data */ 187 188 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN); 189 if (data == NULL) 190 GOTO_FAIL("Cannot append data"); 191 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 192 GOTO_FAIL("Bad pkt length"); 193 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 194 GOTO_FAIL("Bad data length"); 195 memset(data, 0x66, rte_pktmbuf_pkt_len(m)); 196 if (!rte_pktmbuf_is_contiguous(m)) 197 GOTO_FAIL("Buffer should be continuous"); 198 rte_pktmbuf_dump(stdout, m, MBUF_TEST_DATA_LEN); 199 rte_pktmbuf_dump(stdout, m, 2*MBUF_TEST_DATA_LEN); 200 201 /* this append should fail */ 202 203 data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1)); 204 if (data2 != NULL) 205 GOTO_FAIL("Append should not succeed"); 206 207 /* append some more data */ 208 209 data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2); 210 if (data2 == NULL) 211 GOTO_FAIL("Cannot append data"); 212 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2) 213 GOTO_FAIL("Bad pkt length"); 214 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2) 215 GOTO_FAIL("Bad data length"); 216 if (!rte_pktmbuf_is_contiguous(m)) 217 GOTO_FAIL("Buffer should be continuous"); 218 219 /* trim data at the end of mbuf */ 220 221 if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0) 222 GOTO_FAIL("Cannot trim data"); 223 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 224 GOTO_FAIL("Bad pkt length"); 225 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 226 GOTO_FAIL("Bad data length"); 227 if (!rte_pktmbuf_is_contiguous(m)) 228 GOTO_FAIL("Buffer should be continuous"); 229 230 /* this trim should fail */ 231 232 if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0) 233 GOTO_FAIL("trim should not succeed"); 234 235 /* prepend one header */ 236 237 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN); 238 if (hdr == NULL) 239 GOTO_FAIL("Cannot prepend"); 240 if (data - hdr != MBUF_TEST_HDR1_LEN) 241 GOTO_FAIL("Prepend failed"); 242 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN) 243 GOTO_FAIL("Bad pkt length"); 244 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN) 245 GOTO_FAIL("Bad data length"); 246 if (!rte_pktmbuf_is_contiguous(m)) 247 GOTO_FAIL("Buffer should be continuous"); 248 memset(hdr, 0x55, MBUF_TEST_HDR1_LEN); 249 250 /* prepend another header */ 251 252 hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN); 253 if (hdr == NULL) 254 GOTO_FAIL("Cannot prepend"); 255 if (data - hdr != MBUF_TEST_ALL_HDRS_LEN) 256 GOTO_FAIL("Prepend failed"); 257 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN) 258 GOTO_FAIL("Bad pkt length"); 259 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN) 260 GOTO_FAIL("Bad data length"); 261 if (!rte_pktmbuf_is_contiguous(m)) 262 GOTO_FAIL("Buffer should be continuous"); 263 memset(hdr, 0x55, MBUF_TEST_HDR2_LEN); 264 265 rte_mbuf_sanity_check(m, 1); 266 rte_mbuf_sanity_check(m, 0); 267 rte_pktmbuf_dump(stdout, m, 0); 268 269 /* this prepend should fail */ 270 271 hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1)); 272 if (hdr != NULL) 273 GOTO_FAIL("prepend should not succeed"); 274 275 /* remove data at beginning of mbuf (adj) */ 276 277 if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN)) 278 GOTO_FAIL("rte_pktmbuf_adj failed"); 279 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) 280 GOTO_FAIL("Bad pkt length"); 281 if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) 282 GOTO_FAIL("Bad data length"); 283 if (!rte_pktmbuf_is_contiguous(m)) 284 GOTO_FAIL("Buffer should be continuous"); 285 286 /* this adj should fail */ 287 288 if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL) 289 GOTO_FAIL("rte_pktmbuf_adj should not succeed"); 290 291 /* check data */ 292 293 if (!rte_pktmbuf_is_contiguous(m)) 294 GOTO_FAIL("Buffer should be continuous"); 295 296 for (i=0; i<MBUF_TEST_DATA_LEN; i++) { 297 if (data[i] != 0x66) 298 GOTO_FAIL("Data corrupted at offset %u", i); 299 } 300 301 /* free mbuf */ 302 303 rte_pktmbuf_free(m); 304 m = NULL; 305 return 0; 306 307 fail: 308 if (m) 309 rte_pktmbuf_free(m); 310 return -1; 311 } 312 313 static uint16_t 314 testclone_refcnt_read(struct rte_mbuf *m) 315 { 316 return RTE_MBUF_HAS_PINNED_EXTBUF(m) ? 317 rte_mbuf_ext_refcnt_read(m->shinfo) : 318 rte_mbuf_refcnt_read(m); 319 } 320 321 static int 322 testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool, 323 struct rte_mempool *clone_pool) 324 { 325 struct rte_mbuf *m = NULL; 326 struct rte_mbuf *clone = NULL; 327 struct rte_mbuf *clone2 = NULL; 328 unaligned_uint32_t *data; 329 330 /* alloc a mbuf */ 331 m = rte_pktmbuf_alloc(pktmbuf_pool); 332 if (m == NULL) 333 GOTO_FAIL("ooops not allocating mbuf"); 334 335 if (rte_pktmbuf_pkt_len(m) != 0) 336 GOTO_FAIL("Bad length"); 337 338 rte_pktmbuf_append(m, sizeof(uint32_t)); 339 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *); 340 *data = MAGIC_DATA; 341 342 /* clone the allocated mbuf */ 343 clone = rte_pktmbuf_clone(m, clone_pool); 344 if (clone == NULL) 345 GOTO_FAIL("cannot clone data\n"); 346 347 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *); 348 if (*data != MAGIC_DATA) 349 GOTO_FAIL("invalid data in clone\n"); 350 351 if (testclone_refcnt_read(m) != 2) 352 GOTO_FAIL("invalid refcnt in m\n"); 353 354 /* free the clone */ 355 rte_pktmbuf_free(clone); 356 clone = NULL; 357 358 /* same test with a chained mbuf */ 359 m->next = rte_pktmbuf_alloc(pktmbuf_pool); 360 if (m->next == NULL) 361 GOTO_FAIL("Next Pkt Null\n"); 362 m->nb_segs = 2; 363 364 rte_pktmbuf_append(m->next, sizeof(uint32_t)); 365 m->pkt_len = 2 * sizeof(uint32_t); 366 367 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *); 368 *data = MAGIC_DATA; 369 370 clone = rte_pktmbuf_clone(m, clone_pool); 371 if (clone == NULL) 372 GOTO_FAIL("cannot clone data\n"); 373 374 data = rte_pktmbuf_mtod(clone, unaligned_uint32_t *); 375 if (*data != MAGIC_DATA) 376 GOTO_FAIL("invalid data in clone\n"); 377 378 data = rte_pktmbuf_mtod(clone->next, unaligned_uint32_t *); 379 if (*data != MAGIC_DATA) 380 GOTO_FAIL("invalid data in clone->next\n"); 381 382 if (testclone_refcnt_read(m) != 2) 383 GOTO_FAIL("invalid refcnt in m\n"); 384 385 if (testclone_refcnt_read(m->next) != 2) 386 GOTO_FAIL("invalid refcnt in m->next\n"); 387 388 /* try to clone the clone */ 389 390 clone2 = rte_pktmbuf_clone(clone, clone_pool); 391 if (clone2 == NULL) 392 GOTO_FAIL("cannot clone the clone\n"); 393 394 data = rte_pktmbuf_mtod(clone2, unaligned_uint32_t *); 395 if (*data != MAGIC_DATA) 396 GOTO_FAIL("invalid data in clone2\n"); 397 398 data = rte_pktmbuf_mtod(clone2->next, unaligned_uint32_t *); 399 if (*data != MAGIC_DATA) 400 GOTO_FAIL("invalid data in clone2->next\n"); 401 402 if (testclone_refcnt_read(m) != 3) 403 GOTO_FAIL("invalid refcnt in m\n"); 404 405 if (testclone_refcnt_read(m->next) != 3) 406 GOTO_FAIL("invalid refcnt in m->next\n"); 407 408 /* free mbuf */ 409 rte_pktmbuf_free(m); 410 rte_pktmbuf_free(clone); 411 rte_pktmbuf_free(clone2); 412 413 m = NULL; 414 clone = NULL; 415 clone2 = NULL; 416 printf("%s ok\n", __func__); 417 return 0; 418 419 fail: 420 if (m) 421 rte_pktmbuf_free(m); 422 if (clone) 423 rte_pktmbuf_free(clone); 424 if (clone2) 425 rte_pktmbuf_free(clone2); 426 return -1; 427 } 428 429 static int 430 test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool, 431 struct rte_mempool *clone_pool) 432 { 433 struct rte_mbuf *m = NULL; 434 struct rte_mbuf *copy = NULL; 435 struct rte_mbuf *copy2 = NULL; 436 struct rte_mbuf *clone = NULL; 437 unaligned_uint32_t *data; 438 439 /* alloc a mbuf */ 440 m = rte_pktmbuf_alloc(pktmbuf_pool); 441 if (m == NULL) 442 GOTO_FAIL("ooops not allocating mbuf"); 443 444 if (rte_pktmbuf_pkt_len(m) != 0) 445 GOTO_FAIL("Bad length"); 446 447 rte_pktmbuf_append(m, sizeof(uint32_t)); 448 data = rte_pktmbuf_mtod(m, unaligned_uint32_t *); 449 *data = MAGIC_DATA; 450 451 /* copy the allocated mbuf */ 452 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX); 453 if (copy == NULL) 454 GOTO_FAIL("cannot copy data\n"); 455 456 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t)) 457 GOTO_FAIL("copy length incorrect\n"); 458 459 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t)) 460 GOTO_FAIL("copy data length incorrect\n"); 461 462 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *); 463 if (*data != MAGIC_DATA) 464 GOTO_FAIL("invalid data in copy\n"); 465 466 /* free the copy */ 467 rte_pktmbuf_free(copy); 468 copy = NULL; 469 470 /* same test with a cloned mbuf */ 471 clone = rte_pktmbuf_clone(m, clone_pool); 472 if (clone == NULL) 473 GOTO_FAIL("cannot clone data\n"); 474 475 if ((!RTE_MBUF_HAS_PINNED_EXTBUF(m) && 476 !RTE_MBUF_CLONED(clone)) || 477 (RTE_MBUF_HAS_PINNED_EXTBUF(m) && 478 !RTE_MBUF_HAS_EXTBUF(clone))) 479 GOTO_FAIL("clone did not give a cloned mbuf\n"); 480 481 copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX); 482 if (copy == NULL) 483 GOTO_FAIL("cannot copy cloned mbuf\n"); 484 485 if (RTE_MBUF_CLONED(copy)) 486 GOTO_FAIL("copy of clone is cloned?\n"); 487 488 if (rte_pktmbuf_pkt_len(copy) != sizeof(uint32_t)) 489 GOTO_FAIL("copy clone length incorrect\n"); 490 491 if (rte_pktmbuf_data_len(copy) != sizeof(uint32_t)) 492 GOTO_FAIL("copy clone data length incorrect\n"); 493 494 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *); 495 if (*data != MAGIC_DATA) 496 GOTO_FAIL("invalid data in clone copy\n"); 497 rte_pktmbuf_free(clone); 498 rte_pktmbuf_free(copy); 499 copy = NULL; 500 clone = NULL; 501 502 503 /* same test with a chained mbuf */ 504 m->next = rte_pktmbuf_alloc(pktmbuf_pool); 505 if (m->next == NULL) 506 GOTO_FAIL("Next Pkt Null\n"); 507 m->nb_segs = 2; 508 509 rte_pktmbuf_append(m->next, sizeof(uint32_t)); 510 m->pkt_len = 2 * sizeof(uint32_t); 511 data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *); 512 *data = MAGIC_DATA + 1; 513 514 copy = rte_pktmbuf_copy(m, pktmbuf_pool, 0, UINT32_MAX); 515 if (copy == NULL) 516 GOTO_FAIL("cannot copy data\n"); 517 518 if (rte_pktmbuf_pkt_len(copy) != 2 * sizeof(uint32_t)) 519 GOTO_FAIL("chain copy length incorrect\n"); 520 521 if (rte_pktmbuf_data_len(copy) != 2 * sizeof(uint32_t)) 522 GOTO_FAIL("chain copy data length incorrect\n"); 523 524 data = rte_pktmbuf_mtod(copy, unaligned_uint32_t *); 525 if (data[0] != MAGIC_DATA || data[1] != MAGIC_DATA + 1) 526 GOTO_FAIL("invalid data in copy\n"); 527 528 rte_pktmbuf_free(copy2); 529 530 /* test offset copy */ 531 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool, 532 sizeof(uint32_t), UINT32_MAX); 533 if (copy2 == NULL) 534 GOTO_FAIL("cannot copy the copy\n"); 535 536 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t)) 537 GOTO_FAIL("copy with offset, length incorrect\n"); 538 539 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t)) 540 GOTO_FAIL("copy with offset, data length incorrect\n"); 541 542 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *); 543 if (data[0] != MAGIC_DATA + 1) 544 GOTO_FAIL("copy with offset, invalid data\n"); 545 546 rte_pktmbuf_free(copy2); 547 548 /* test truncation copy */ 549 copy2 = rte_pktmbuf_copy(copy, pktmbuf_pool, 550 0, sizeof(uint32_t)); 551 if (copy2 == NULL) 552 GOTO_FAIL("cannot copy the copy\n"); 553 554 if (rte_pktmbuf_pkt_len(copy2) != sizeof(uint32_t)) 555 GOTO_FAIL("copy with truncate, length incorrect\n"); 556 557 if (rte_pktmbuf_data_len(copy2) != sizeof(uint32_t)) 558 GOTO_FAIL("copy with truncate, data length incorrect\n"); 559 560 data = rte_pktmbuf_mtod(copy2, unaligned_uint32_t *); 561 if (data[0] != MAGIC_DATA) 562 GOTO_FAIL("copy with truncate, invalid data\n"); 563 564 /* free mbuf */ 565 rte_pktmbuf_free(m); 566 rte_pktmbuf_free(copy); 567 rte_pktmbuf_free(copy2); 568 569 m = NULL; 570 copy = NULL; 571 copy2 = NULL; 572 printf("%s ok\n", __func__); 573 return 0; 574 575 fail: 576 if (m) 577 rte_pktmbuf_free(m); 578 if (copy) 579 rte_pktmbuf_free(copy); 580 if (copy2) 581 rte_pktmbuf_free(copy2); 582 return -1; 583 } 584 585 static int 586 test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool, 587 struct rte_mempool *pktmbuf_pool2) 588 { 589 struct rte_mbuf *m = NULL; 590 struct rte_mbuf *clone = NULL; 591 struct rte_mbuf *clone2 = NULL; 592 char *data, *c_data, *c_data2; 593 594 /* alloc a mbuf */ 595 m = rte_pktmbuf_alloc(pktmbuf_pool); 596 if (m == NULL) 597 GOTO_FAIL("cannot allocate mbuf"); 598 599 if (rte_pktmbuf_pkt_len(m) != 0) 600 GOTO_FAIL("Bad length"); 601 602 data = rte_pktmbuf_mtod(m, char *); 603 604 /* allocate a new mbuf from the second pool, and attach it to the first 605 * mbuf */ 606 clone = rte_pktmbuf_alloc(pktmbuf_pool2); 607 if (clone == NULL) 608 GOTO_FAIL("cannot allocate mbuf from second pool\n"); 609 610 /* check data room size and priv size, and erase priv */ 611 if (rte_pktmbuf_data_room_size(clone->pool) != 0) 612 GOTO_FAIL("data room size should be 0\n"); 613 if (rte_pktmbuf_priv_size(clone->pool) != MBUF2_PRIV_SIZE) 614 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE); 615 memset(clone + 1, 0, MBUF2_PRIV_SIZE); 616 617 /* save data pointer to compare it after detach() */ 618 c_data = rte_pktmbuf_mtod(clone, char *); 619 if (c_data != (char *)clone + sizeof(*clone) + MBUF2_PRIV_SIZE) 620 GOTO_FAIL("bad data pointer in clone"); 621 if (rte_pktmbuf_headroom(clone) != 0) 622 GOTO_FAIL("bad headroom in clone"); 623 624 rte_pktmbuf_attach(clone, m); 625 626 if (rte_pktmbuf_mtod(clone, char *) != data) 627 GOTO_FAIL("clone was not attached properly\n"); 628 if (rte_pktmbuf_headroom(clone) != RTE_PKTMBUF_HEADROOM) 629 GOTO_FAIL("bad headroom in clone after attach"); 630 if (rte_mbuf_refcnt_read(m) != 2) 631 GOTO_FAIL("invalid refcnt in m\n"); 632 633 /* allocate a new mbuf from the second pool, and attach it to the first 634 * cloned mbuf */ 635 clone2 = rte_pktmbuf_alloc(pktmbuf_pool2); 636 if (clone2 == NULL) 637 GOTO_FAIL("cannot allocate clone2 from second pool\n"); 638 639 /* check data room size and priv size, and erase priv */ 640 if (rte_pktmbuf_data_room_size(clone2->pool) != 0) 641 GOTO_FAIL("data room size should be 0\n"); 642 if (rte_pktmbuf_priv_size(clone2->pool) != MBUF2_PRIV_SIZE) 643 GOTO_FAIL("data room size should be %d\n", MBUF2_PRIV_SIZE); 644 memset(clone2 + 1, 0, MBUF2_PRIV_SIZE); 645 646 /* save data pointer to compare it after detach() */ 647 c_data2 = rte_pktmbuf_mtod(clone2, char *); 648 if (c_data2 != (char *)clone2 + sizeof(*clone2) + MBUF2_PRIV_SIZE) 649 GOTO_FAIL("bad data pointer in clone2"); 650 if (rte_pktmbuf_headroom(clone2) != 0) 651 GOTO_FAIL("bad headroom in clone2"); 652 653 rte_pktmbuf_attach(clone2, clone); 654 655 if (rte_pktmbuf_mtod(clone2, char *) != data) 656 GOTO_FAIL("clone2 was not attached properly\n"); 657 if (rte_pktmbuf_headroom(clone2) != RTE_PKTMBUF_HEADROOM) 658 GOTO_FAIL("bad headroom in clone2 after attach"); 659 if (rte_mbuf_refcnt_read(m) != 3) 660 GOTO_FAIL("invalid refcnt in m\n"); 661 662 /* detach the clones */ 663 rte_pktmbuf_detach(clone); 664 if (c_data != rte_pktmbuf_mtod(clone, char *)) 665 GOTO_FAIL("clone was not detached properly\n"); 666 if (rte_mbuf_refcnt_read(m) != 2) 667 GOTO_FAIL("invalid refcnt in m\n"); 668 669 rte_pktmbuf_detach(clone2); 670 if (c_data2 != rte_pktmbuf_mtod(clone2, char *)) 671 GOTO_FAIL("clone2 was not detached properly\n"); 672 if (rte_mbuf_refcnt_read(m) != 1) 673 GOTO_FAIL("invalid refcnt in m\n"); 674 675 /* free the clones and the initial mbuf */ 676 rte_pktmbuf_free(clone2); 677 rte_pktmbuf_free(clone); 678 rte_pktmbuf_free(m); 679 printf("%s ok\n", __func__); 680 return 0; 681 682 fail: 683 if (m) 684 rte_pktmbuf_free(m); 685 if (clone) 686 rte_pktmbuf_free(clone); 687 if (clone2) 688 rte_pktmbuf_free(clone2); 689 return -1; 690 } 691 692 /* 693 * test allocation and free of mbufs 694 */ 695 static int 696 test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool) 697 { 698 unsigned i; 699 struct rte_mbuf *m[NB_MBUF]; 700 int ret = 0; 701 702 for (i=0; i<NB_MBUF; i++) 703 m[i] = NULL; 704 705 /* alloc NB_MBUF mbufs */ 706 for (i=0; i<NB_MBUF; i++) { 707 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 708 if (m[i] == NULL) { 709 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 710 ret = -1; 711 } 712 } 713 struct rte_mbuf *extra = NULL; 714 extra = rte_pktmbuf_alloc(pktmbuf_pool); 715 if(extra != NULL) { 716 printf("Error pool not empty"); 717 ret = -1; 718 } 719 extra = rte_pktmbuf_clone(m[0], pktmbuf_pool); 720 if(extra != NULL) { 721 printf("Error pool not empty"); 722 ret = -1; 723 } 724 /* free them */ 725 for (i=0; i<NB_MBUF; i++) { 726 if (m[i] != NULL) 727 rte_pktmbuf_free(m[i]); 728 } 729 730 return ret; 731 } 732 733 /* 734 * test bulk allocation and bulk free of mbufs 735 */ 736 static int 737 test_pktmbuf_pool_bulk(void) 738 { 739 struct rte_mempool *pool = NULL; 740 struct rte_mempool *pool2 = NULL; 741 unsigned int i; 742 struct rte_mbuf *m; 743 struct rte_mbuf *mbufs[NB_MBUF]; 744 int ret = 0; 745 746 /* We cannot use the preallocated mbuf pools because their caches 747 * prevent us from bulk allocating all objects in them. 748 * So we create our own mbuf pools without caches. 749 */ 750 printf("Create mbuf pools for bulk allocation.\n"); 751 pool = rte_pktmbuf_pool_create("test_pktmbuf_bulk", 752 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY); 753 if (pool == NULL) { 754 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n", 755 rte_errno); 756 goto err; 757 } 758 pool2 = rte_pktmbuf_pool_create("test_pktmbuf_bulk2", 759 NB_MBUF, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY); 760 if (pool2 == NULL) { 761 printf("rte_pktmbuf_pool_create() failed. rte_errno %d\n", 762 rte_errno); 763 goto err; 764 } 765 766 /* Preconditions: Mempools must be full. */ 767 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) { 768 printf("Test precondition failed: mempools not full\n"); 769 goto err; 770 } 771 if (!(rte_mempool_avail_count(pool) == NB_MBUF && 772 rte_mempool_avail_count(pool2) == NB_MBUF)) { 773 printf("Test precondition failed: mempools: %u+%u != %u+%u", 774 rte_mempool_avail_count(pool), 775 rte_mempool_avail_count(pool2), 776 NB_MBUF, NB_MBUF); 777 goto err; 778 } 779 780 printf("Test single bulk alloc, followed by multiple bulk free.\n"); 781 782 /* Bulk allocate all mbufs in the pool, in one go. */ 783 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF); 784 if (ret != 0) { 785 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret); 786 goto err; 787 } 788 /* Test that they have been removed from the pool. */ 789 if (!rte_mempool_empty(pool)) { 790 printf("mempool not empty\n"); 791 goto err; 792 } 793 /* Bulk free all mbufs, in four steps. */ 794 RTE_BUILD_BUG_ON(NB_MBUF % 4 != 0); 795 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) { 796 rte_pktmbuf_free_bulk(&mbufs[i], NB_MBUF / 4); 797 /* Test that they have been returned to the pool. */ 798 if (rte_mempool_avail_count(pool) != i + NB_MBUF / 4) { 799 printf("mempool avail count incorrect\n"); 800 goto err; 801 } 802 } 803 804 printf("Test multiple bulk alloc, followed by single bulk free.\n"); 805 806 /* Bulk allocate all mbufs in the pool, in four steps. */ 807 for (i = 0; i < NB_MBUF; i += NB_MBUF / 4) { 808 ret = rte_pktmbuf_alloc_bulk(pool, &mbufs[i], NB_MBUF / 4); 809 if (ret != 0) { 810 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret); 811 goto err; 812 } 813 } 814 /* Test that they have been removed from the pool. */ 815 if (!rte_mempool_empty(pool)) { 816 printf("mempool not empty\n"); 817 goto err; 818 } 819 /* Bulk free all mbufs, in one go. */ 820 rte_pktmbuf_free_bulk(mbufs, NB_MBUF); 821 /* Test that they have been returned to the pool. */ 822 if (!rte_mempool_full(pool)) { 823 printf("mempool not full\n"); 824 goto err; 825 } 826 827 printf("Test bulk free of single long chain.\n"); 828 829 /* Bulk allocate all mbufs in the pool, in one go. */ 830 ret = rte_pktmbuf_alloc_bulk(pool, mbufs, NB_MBUF); 831 if (ret != 0) { 832 printf("rte_pktmbuf_alloc_bulk() failed: %d\n", ret); 833 goto err; 834 } 835 /* Create a long mbuf chain. */ 836 for (i = 1; i < NB_MBUF; i++) { 837 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]); 838 if (ret != 0) { 839 printf("rte_pktmbuf_chain() failed: %d\n", ret); 840 goto err; 841 } 842 mbufs[i] = NULL; 843 } 844 /* Free the mbuf chain containing all the mbufs. */ 845 rte_pktmbuf_free_bulk(mbufs, 1); 846 /* Test that they have been returned to the pool. */ 847 if (!rte_mempool_full(pool)) { 848 printf("mempool not full\n"); 849 goto err; 850 } 851 852 printf("Test bulk free of multiple chains using multiple pools.\n"); 853 854 /* Create mbuf chains containing mbufs from different pools. */ 855 RTE_BUILD_BUG_ON(CHAIN_LEN % 2 != 0); 856 RTE_BUILD_BUG_ON(NB_MBUF % (CHAIN_LEN / 2) != 0); 857 for (i = 0; i < NB_MBUF * 2; i++) { 858 m = rte_pktmbuf_alloc((i & 4) ? pool2 : pool); 859 if (m == NULL) { 860 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 861 goto err; 862 } 863 if ((i % CHAIN_LEN) == 0) 864 mbufs[i / CHAIN_LEN] = m; 865 else 866 rte_pktmbuf_chain(mbufs[i / CHAIN_LEN], m); 867 } 868 /* Test that both pools have been emptied. */ 869 if (!(rte_mempool_empty(pool) && rte_mempool_empty(pool2))) { 870 printf("mempools not empty\n"); 871 goto err; 872 } 873 /* Free one mbuf chain. */ 874 rte_pktmbuf_free_bulk(mbufs, 1); 875 /* Test that the segments have been returned to the pools. */ 876 if (!(rte_mempool_avail_count(pool) == CHAIN_LEN / 2 && 877 rte_mempool_avail_count(pool2) == CHAIN_LEN / 2)) { 878 printf("all segments of first mbuf have not been returned\n"); 879 goto err; 880 } 881 /* Free the remaining mbuf chains. */ 882 rte_pktmbuf_free_bulk(&mbufs[1], NB_MBUF * 2 / CHAIN_LEN - 1); 883 /* Test that they have been returned to the pools. */ 884 if (!(rte_mempool_full(pool) && rte_mempool_full(pool2))) { 885 printf("mempools not full\n"); 886 goto err; 887 } 888 889 ret = 0; 890 goto done; 891 892 err: 893 ret = -1; 894 895 done: 896 printf("Free mbuf pools for bulk allocation.\n"); 897 rte_mempool_free(pool); 898 rte_mempool_free(pool2); 899 return ret; 900 } 901 902 /* 903 * test that the pointer to the data on a packet mbuf is set properly 904 */ 905 static int 906 test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool) 907 { 908 unsigned i; 909 struct rte_mbuf *m[NB_MBUF]; 910 int ret = 0; 911 912 for (i=0; i<NB_MBUF; i++) 913 m[i] = NULL; 914 915 /* alloc NB_MBUF mbufs */ 916 for (i=0; i<NB_MBUF; i++) { 917 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 918 if (m[i] == NULL) { 919 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 920 ret = -1; 921 break; 922 } 923 m[i]->data_off += 64; 924 } 925 926 /* free them */ 927 for (i=0; i<NB_MBUF; i++) { 928 if (m[i] != NULL) 929 rte_pktmbuf_free(m[i]); 930 } 931 932 for (i=0; i<NB_MBUF; i++) 933 m[i] = NULL; 934 935 /* alloc NB_MBUF mbufs */ 936 for (i=0; i<NB_MBUF; i++) { 937 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 938 if (m[i] == NULL) { 939 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 940 ret = -1; 941 break; 942 } 943 if (m[i]->data_off != RTE_PKTMBUF_HEADROOM) { 944 printf("invalid data_off\n"); 945 ret = -1; 946 } 947 } 948 949 /* free them */ 950 for (i=0; i<NB_MBUF; i++) { 951 if (m[i] != NULL) 952 rte_pktmbuf_free(m[i]); 953 } 954 955 return ret; 956 } 957 958 static int 959 test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool) 960 { 961 unsigned i; 962 struct rte_mbuf *m[NB_MBUF]; 963 int ret = 0; 964 965 for (i=0; i<NB_MBUF; i++) 966 m[i] = NULL; 967 968 /* alloc NB_MBUF mbufs */ 969 for (i=0; i<NB_MBUF; i++) { 970 m[i] = rte_pktmbuf_alloc(pktmbuf_pool); 971 if (m[i] == NULL) { 972 printf("rte_pktmbuf_alloc() failed (%u)\n", i); 973 ret = -1; 974 } 975 } 976 977 /* free them */ 978 for (i=0; i<NB_MBUF; i++) { 979 if (m[i] != NULL) { 980 struct rte_mbuf *mb, *mt; 981 982 mb = m[i]; 983 while(mb != NULL) { 984 mt = mb; 985 mb = mb->next; 986 rte_pktmbuf_free_seg(mt); 987 } 988 } 989 } 990 991 return ret; 992 } 993 994 /* 995 * Stress test for rte_mbuf atomic refcnt. 996 * Implies that RTE_MBUF_REFCNT_ATOMIC is defined. 997 * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined. 998 */ 999 1000 #ifdef RTE_MBUF_REFCNT_ATOMIC 1001 1002 static int 1003 test_refcnt_worker(void *arg) 1004 { 1005 unsigned lcore, free; 1006 void *mp = 0; 1007 struct rte_ring *refcnt_mbuf_ring = arg; 1008 1009 lcore = rte_lcore_id(); 1010 printf("%s started at lcore %u\n", __func__, lcore); 1011 1012 free = 0; 1013 while (refcnt_stop_workers == 0) { 1014 if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) { 1015 free++; 1016 rte_pktmbuf_free(mp); 1017 } 1018 } 1019 1020 refcnt_lcore[lcore] += free; 1021 printf("%s finished at lcore %u, " 1022 "number of freed mbufs: %u\n", 1023 __func__, lcore, free); 1024 return 0; 1025 } 1026 1027 static void 1028 test_refcnt_iter(unsigned int lcore, unsigned int iter, 1029 struct rte_mempool *refcnt_pool, 1030 struct rte_ring *refcnt_mbuf_ring) 1031 { 1032 uint16_t ref; 1033 unsigned i, n, tref, wn; 1034 struct rte_mbuf *m; 1035 1036 tref = 0; 1037 1038 /* For each mbuf in the pool: 1039 * - allocate mbuf, 1040 * - increment it's reference up to N+1, 1041 * - enqueue it N times into the ring for worker cores to free. 1042 */ 1043 for (i = 0, n = rte_mempool_avail_count(refcnt_pool); 1044 i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL; 1045 i++) { 1046 ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL); 1047 tref += ref; 1048 if ((ref & 1) != 0) { 1049 rte_pktmbuf_refcnt_update(m, ref); 1050 while (ref-- != 0) 1051 rte_ring_enqueue(refcnt_mbuf_ring, m); 1052 } else { 1053 while (ref-- != 0) { 1054 rte_pktmbuf_refcnt_update(m, 1); 1055 rte_ring_enqueue(refcnt_mbuf_ring, m); 1056 } 1057 } 1058 rte_pktmbuf_free(m); 1059 } 1060 1061 if (i != n) 1062 rte_panic("(lcore=%u, iter=%u): was able to allocate only " 1063 "%u from %u mbufs\n", lcore, iter, i, n); 1064 1065 /* wait till worker lcores will consume all mbufs */ 1066 while (!rte_ring_empty(refcnt_mbuf_ring)) 1067 ; 1068 1069 /* check that all mbufs are back into mempool by now */ 1070 for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) { 1071 if ((i = rte_mempool_avail_count(refcnt_pool)) == n) { 1072 refcnt_lcore[lcore] += tref; 1073 printf("%s(lcore=%u, iter=%u) completed, " 1074 "%u references processed\n", 1075 __func__, lcore, iter, tref); 1076 return; 1077 } 1078 rte_delay_ms(100); 1079 } 1080 1081 rte_panic("(lcore=%u, iter=%u): after %us only " 1082 "%u of %u mbufs left free\n", lcore, iter, wn, i, n); 1083 } 1084 1085 static int 1086 test_refcnt_main(struct rte_mempool *refcnt_pool, 1087 struct rte_ring *refcnt_mbuf_ring) 1088 { 1089 unsigned i, lcore; 1090 1091 lcore = rte_lcore_id(); 1092 printf("%s started at lcore %u\n", __func__, lcore); 1093 1094 for (i = 0; i != REFCNT_MAX_ITER; i++) 1095 test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring); 1096 1097 refcnt_stop_workers = 1; 1098 rte_wmb(); 1099 1100 printf("%s finished at lcore %u\n", __func__, lcore); 1101 return 0; 1102 } 1103 1104 #endif 1105 1106 static int 1107 test_refcnt_mbuf(void) 1108 { 1109 #ifdef RTE_MBUF_REFCNT_ATOMIC 1110 unsigned int main_lcore, worker, tref; 1111 int ret = -1; 1112 struct rte_mempool *refcnt_pool = NULL; 1113 struct rte_ring *refcnt_mbuf_ring = NULL; 1114 1115 if (rte_lcore_count() < 2) { 1116 printf("Not enough cores for test_refcnt_mbuf, expecting at least 2\n"); 1117 return TEST_SKIPPED; 1118 } 1119 1120 printf("starting %s, at %u lcores\n", __func__, rte_lcore_count()); 1121 1122 /* create refcnt pool & ring if they don't exist */ 1123 1124 refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool), 1125 REFCNT_MBUF_NUM, 0, 0, 0, 1126 SOCKET_ID_ANY); 1127 if (refcnt_pool == NULL) { 1128 printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n", 1129 __func__); 1130 return -1; 1131 } 1132 1133 refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring", 1134 rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY, 1135 RING_F_SP_ENQ); 1136 if (refcnt_mbuf_ring == NULL) { 1137 printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring) 1138 "\n", __func__); 1139 goto err; 1140 } 1141 1142 refcnt_stop_workers = 0; 1143 memset(refcnt_lcore, 0, sizeof (refcnt_lcore)); 1144 1145 rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN); 1146 1147 test_refcnt_main(refcnt_pool, refcnt_mbuf_ring); 1148 1149 rte_eal_mp_wait_lcore(); 1150 1151 /* check that we porcessed all references */ 1152 tref = 0; 1153 main_lcore = rte_get_main_lcore(); 1154 1155 RTE_LCORE_FOREACH_WORKER(worker) 1156 tref += refcnt_lcore[worker]; 1157 1158 if (tref != refcnt_lcore[main_lcore]) 1159 rte_panic("referenced mbufs: %u, freed mbufs: %u\n", 1160 tref, refcnt_lcore[main_lcore]); 1161 1162 rte_mempool_dump(stdout, refcnt_pool); 1163 rte_ring_dump(stdout, refcnt_mbuf_ring); 1164 1165 ret = 0; 1166 1167 err: 1168 rte_mempool_free(refcnt_pool); 1169 rte_ring_free(refcnt_mbuf_ring); 1170 return ret; 1171 #else 1172 return 0; 1173 #endif 1174 } 1175 1176 #include <unistd.h> 1177 #include <sys/resource.h> 1178 #include <sys/time.h> 1179 #include <sys/wait.h> 1180 1181 /* use fork() to test mbuf errors panic */ 1182 static int 1183 verify_mbuf_check_panics(struct rte_mbuf *buf) 1184 { 1185 int pid; 1186 int status; 1187 1188 pid = fork(); 1189 1190 if (pid == 0) { 1191 struct rlimit rl; 1192 1193 /* No need to generate a coredump when panicking. */ 1194 rl.rlim_cur = rl.rlim_max = 0; 1195 setrlimit(RLIMIT_CORE, &rl); 1196 rte_mbuf_sanity_check(buf, 1); /* should panic */ 1197 exit(0); /* return normally if it doesn't panic */ 1198 } else if (pid < 0) { 1199 printf("Fork Failed\n"); 1200 return -1; 1201 } 1202 wait(&status); 1203 if(status == 0) 1204 return -1; 1205 1206 return 0; 1207 } 1208 1209 static int 1210 test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) 1211 { 1212 struct rte_mbuf *buf; 1213 struct rte_mbuf badbuf; 1214 1215 printf("Checking rte_mbuf_sanity_check for failure conditions\n"); 1216 1217 /* get a good mbuf to use to make copies */ 1218 buf = rte_pktmbuf_alloc(pktmbuf_pool); 1219 if (buf == NULL) 1220 return -1; 1221 1222 printf("Checking good mbuf initially\n"); 1223 if (verify_mbuf_check_panics(buf) != -1) 1224 return -1; 1225 1226 printf("Now checking for error conditions\n"); 1227 1228 if (verify_mbuf_check_panics(NULL)) { 1229 printf("Error with NULL mbuf test\n"); 1230 return -1; 1231 } 1232 1233 badbuf = *buf; 1234 badbuf.pool = NULL; 1235 if (verify_mbuf_check_panics(&badbuf)) { 1236 printf("Error with bad-pool mbuf test\n"); 1237 return -1; 1238 } 1239 1240 badbuf = *buf; 1241 badbuf.buf_iova = 0; 1242 if (verify_mbuf_check_panics(&badbuf)) { 1243 printf("Error with bad-physaddr mbuf test\n"); 1244 return -1; 1245 } 1246 1247 badbuf = *buf; 1248 badbuf.buf_addr = NULL; 1249 if (verify_mbuf_check_panics(&badbuf)) { 1250 printf("Error with bad-addr mbuf test\n"); 1251 return -1; 1252 } 1253 1254 badbuf = *buf; 1255 badbuf.refcnt = 0; 1256 if (verify_mbuf_check_panics(&badbuf)) { 1257 printf("Error with bad-refcnt(0) mbuf test\n"); 1258 return -1; 1259 } 1260 1261 badbuf = *buf; 1262 badbuf.refcnt = UINT16_MAX; 1263 if (verify_mbuf_check_panics(&badbuf)) { 1264 printf("Error with bad-refcnt(MAX) mbuf test\n"); 1265 return -1; 1266 } 1267 1268 return 0; 1269 } 1270 1271 static int 1272 test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len, 1273 int nb_segs) 1274 { 1275 1276 struct rte_mbuf *m = NULL, *mbuf = NULL; 1277 uint8_t *data; 1278 int data_len = 0; 1279 int remain; 1280 int seg, seg_len; 1281 int i; 1282 1283 if (pkt_len < 1) { 1284 printf("Packet size must be 1 or more (is %d)\n", pkt_len); 1285 return -1; 1286 } 1287 1288 if (nb_segs < 1) { 1289 printf("Number of segments must be 1 or more (is %d)\n", 1290 nb_segs); 1291 return -1; 1292 } 1293 1294 seg_len = pkt_len / nb_segs; 1295 if (seg_len == 0) 1296 seg_len = 1; 1297 1298 remain = pkt_len; 1299 1300 /* Create chained mbuf_src and fill it generated data */ 1301 for (seg = 0; remain > 0; seg++) { 1302 1303 m = rte_pktmbuf_alloc(pktmbuf_pool); 1304 if (m == NULL) { 1305 printf("Cannot create segment for source mbuf"); 1306 goto fail; 1307 } 1308 1309 /* Make sure if tailroom is zeroed */ 1310 memset(rte_pktmbuf_mtod(m, uint8_t *), 0, 1311 rte_pktmbuf_tailroom(m)); 1312 1313 data_len = remain; 1314 if (data_len > seg_len) 1315 data_len = seg_len; 1316 1317 data = (uint8_t *)rte_pktmbuf_append(m, data_len); 1318 if (data == NULL) { 1319 printf("Cannot append %d bytes to the mbuf\n", 1320 data_len); 1321 goto fail; 1322 } 1323 1324 for (i = 0; i < data_len; i++) 1325 data[i] = (seg * seg_len + i) % 0x0ff; 1326 1327 if (seg == 0) 1328 mbuf = m; 1329 else 1330 rte_pktmbuf_chain(mbuf, m); 1331 1332 remain -= data_len; 1333 } 1334 1335 /* Create destination buffer to store coalesced data */ 1336 if (rte_pktmbuf_linearize(mbuf)) { 1337 printf("Mbuf linearization failed\n"); 1338 goto fail; 1339 } 1340 1341 if (!rte_pktmbuf_is_contiguous(mbuf)) { 1342 printf("Source buffer should be contiguous after " 1343 "linearization\n"); 1344 goto fail; 1345 } 1346 1347 data = rte_pktmbuf_mtod(mbuf, uint8_t *); 1348 1349 for (i = 0; i < pkt_len; i++) 1350 if (data[i] != (i % 0x0ff)) { 1351 printf("Incorrect data in linearized mbuf\n"); 1352 goto fail; 1353 } 1354 1355 rte_pktmbuf_free(mbuf); 1356 return 0; 1357 1358 fail: 1359 if (mbuf) 1360 rte_pktmbuf_free(mbuf); 1361 return -1; 1362 } 1363 1364 static int 1365 test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool) 1366 { 1367 struct test_mbuf_array { 1368 int size; 1369 int nb_segs; 1370 } mbuf_array[] = { 1371 { 128, 1 }, 1372 { 64, 64 }, 1373 { 512, 10 }, 1374 { 250, 11 }, 1375 { 123, 8 }, 1376 }; 1377 unsigned int i; 1378 1379 printf("Test mbuf linearize API\n"); 1380 1381 for (i = 0; i < RTE_DIM(mbuf_array); i++) 1382 if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size, 1383 mbuf_array[i].nb_segs)) { 1384 printf("Test failed for %d, %d\n", mbuf_array[i].size, 1385 mbuf_array[i].nb_segs); 1386 return -1; 1387 } 1388 1389 return 0; 1390 } 1391 1392 /* 1393 * Helper function for test_tx_ofload 1394 */ 1395 static inline void 1396 set_tx_offload(struct rte_mbuf *mb, uint64_t il2, uint64_t il3, uint64_t il4, 1397 uint64_t tso, uint64_t ol3, uint64_t ol2) 1398 { 1399 mb->l2_len = il2; 1400 mb->l3_len = il3; 1401 mb->l4_len = il4; 1402 mb->tso_segsz = tso; 1403 mb->outer_l3_len = ol3; 1404 mb->outer_l2_len = ol2; 1405 } 1406 1407 static int 1408 test_tx_offload(void) 1409 { 1410 struct rte_mbuf *mb; 1411 uint64_t tm, v1, v2; 1412 size_t sz; 1413 uint32_t i; 1414 1415 static volatile struct { 1416 uint16_t l2; 1417 uint16_t l3; 1418 uint16_t l4; 1419 uint16_t tso; 1420 } txof; 1421 1422 const uint32_t num = 0x10000; 1423 1424 txof.l2 = rte_rand() % (1 << RTE_MBUF_L2_LEN_BITS); 1425 txof.l3 = rte_rand() % (1 << RTE_MBUF_L3_LEN_BITS); 1426 txof.l4 = rte_rand() % (1 << RTE_MBUF_L4_LEN_BITS); 1427 txof.tso = rte_rand() % (1 << RTE_MBUF_TSO_SEGSZ_BITS); 1428 1429 printf("%s started, tx_offload = {\n" 1430 "\tl2_len=%#hx,\n" 1431 "\tl3_len=%#hx,\n" 1432 "\tl4_len=%#hx,\n" 1433 "\ttso_segsz=%#hx,\n" 1434 "\touter_l3_len=%#x,\n" 1435 "\touter_l2_len=%#x,\n" 1436 "};\n", 1437 __func__, 1438 txof.l2, txof.l3, txof.l4, txof.tso, txof.l3, txof.l2); 1439 1440 sz = sizeof(*mb) * num; 1441 mb = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE); 1442 if (mb == NULL) { 1443 printf("%s failed, out of memory\n", __func__); 1444 return -ENOMEM; 1445 } 1446 1447 memset(mb, 0, sz); 1448 tm = rte_rdtsc_precise(); 1449 1450 for (i = 0; i != num; i++) 1451 set_tx_offload(mb + i, txof.l2, txof.l3, txof.l4, 1452 txof.tso, txof.l3, txof.l2); 1453 1454 tm = rte_rdtsc_precise() - tm; 1455 printf("%s set tx_offload by bit-fields: %u iterations, %" 1456 PRIu64 " cycles, %#Lf cycles/iter\n", 1457 __func__, num, tm, (long double)tm / num); 1458 1459 v1 = mb[rte_rand() % num].tx_offload; 1460 1461 memset(mb, 0, sz); 1462 tm = rte_rdtsc_precise(); 1463 1464 for (i = 0; i != num; i++) 1465 mb[i].tx_offload = rte_mbuf_tx_offload(txof.l2, txof.l3, 1466 txof.l4, txof.tso, txof.l3, txof.l2, 0); 1467 1468 tm = rte_rdtsc_precise() - tm; 1469 printf("%s set raw tx_offload: %u iterations, %" 1470 PRIu64 " cycles, %#Lf cycles/iter\n", 1471 __func__, num, tm, (long double)tm / num); 1472 1473 v2 = mb[rte_rand() % num].tx_offload; 1474 1475 rte_free(mb); 1476 1477 printf("%s finished\n" 1478 "expected tx_offload value: 0x%" PRIx64 ";\n" 1479 "rte_mbuf_tx_offload value: 0x%" PRIx64 ";\n", 1480 __func__, v1, v2); 1481 1482 return (v1 == v2) ? 0 : -EINVAL; 1483 } 1484 1485 static int 1486 test_get_rx_ol_flag_list(void) 1487 { 1488 int len = 6, ret = 0; 1489 char buf[256] = ""; 1490 int buflen = 0; 1491 1492 /* Test case to check with null buffer */ 1493 ret = rte_get_rx_ol_flag_list(0, NULL, 0); 1494 if (ret != -1) 1495 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1496 1497 /* Test case to check with zero buffer len */ 1498 ret = rte_get_rx_ol_flag_list(PKT_RX_L4_CKSUM_MASK, buf, 0); 1499 if (ret != -1) 1500 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1501 1502 buflen = strlen(buf); 1503 if (buflen != 0) 1504 GOTO_FAIL("%s buffer should be empty, received = %d\n", 1505 __func__, buflen); 1506 1507 /* Test case to check with reduced buffer len */ 1508 ret = rte_get_rx_ol_flag_list(0, buf, len); 1509 if (ret != -1) 1510 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1511 1512 buflen = strlen(buf); 1513 if (buflen != (len - 1)) 1514 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d," 1515 "received = %d\n", __func__, 1516 (len - 1), buflen); 1517 1518 /* Test case to check with zero mask value */ 1519 ret = rte_get_rx_ol_flag_list(0, buf, sizeof(buf)); 1520 if (ret != 0) 1521 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1522 1523 buflen = strlen(buf); 1524 if (buflen == 0) 1525 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1526 "non-zero, buffer should not be empty"); 1527 1528 /* Test case to check with valid mask value */ 1529 ret = rte_get_rx_ol_flag_list(PKT_RX_SEC_OFFLOAD, buf, sizeof(buf)); 1530 if (ret != 0) 1531 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1532 1533 buflen = strlen(buf); 1534 if (buflen == 0) 1535 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1536 "non-zero, buffer should not be empty"); 1537 1538 return 0; 1539 fail: 1540 return -1; 1541 } 1542 1543 static int 1544 test_get_tx_ol_flag_list(void) 1545 { 1546 int len = 6, ret = 0; 1547 char buf[256] = ""; 1548 int buflen = 0; 1549 1550 /* Test case to check with null buffer */ 1551 ret = rte_get_tx_ol_flag_list(0, NULL, 0); 1552 if (ret != -1) 1553 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1554 1555 /* Test case to check with zero buffer len */ 1556 ret = rte_get_tx_ol_flag_list(PKT_TX_IP_CKSUM, buf, 0); 1557 if (ret != -1) 1558 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1559 1560 buflen = strlen(buf); 1561 if (buflen != 0) { 1562 GOTO_FAIL("%s buffer should be empty, received = %d\n", 1563 __func__, buflen); 1564 } 1565 1566 /* Test case to check with reduced buffer len */ 1567 ret = rte_get_tx_ol_flag_list(0, buf, len); 1568 if (ret != -1) 1569 GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret); 1570 1571 buflen = strlen(buf); 1572 if (buflen != (len - 1)) 1573 GOTO_FAIL("%s invalid buffer length retrieved, expected: %d," 1574 "received = %d\n", __func__, 1575 (len - 1), buflen); 1576 1577 /* Test case to check with zero mask value */ 1578 ret = rte_get_tx_ol_flag_list(0, buf, sizeof(buf)); 1579 if (ret != 0) 1580 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1581 1582 buflen = strlen(buf); 1583 if (buflen == 0) 1584 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1585 "non-zero, buffer should not be empty"); 1586 1587 /* Test case to check with valid mask value */ 1588 ret = rte_get_tx_ol_flag_list(PKT_TX_UDP_CKSUM, buf, sizeof(buf)); 1589 if (ret != 0) 1590 GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret); 1591 1592 buflen = strlen(buf); 1593 if (buflen == 0) 1594 GOTO_FAIL("%s expected: %s, received length = 0\n", __func__, 1595 "non-zero, buffer should not be empty"); 1596 1597 return 0; 1598 fail: 1599 return -1; 1600 1601 } 1602 1603 struct flag_name { 1604 uint64_t flag; 1605 const char *name; 1606 }; 1607 1608 static int 1609 test_get_rx_ol_flag_name(void) 1610 { 1611 uint16_t i; 1612 const char *flag_str = NULL; 1613 const struct flag_name rx_flags[] = { 1614 VAL_NAME(PKT_RX_VLAN), 1615 VAL_NAME(PKT_RX_RSS_HASH), 1616 VAL_NAME(PKT_RX_FDIR), 1617 VAL_NAME(PKT_RX_L4_CKSUM_BAD), 1618 VAL_NAME(PKT_RX_L4_CKSUM_GOOD), 1619 VAL_NAME(PKT_RX_L4_CKSUM_NONE), 1620 VAL_NAME(PKT_RX_IP_CKSUM_BAD), 1621 VAL_NAME(PKT_RX_IP_CKSUM_GOOD), 1622 VAL_NAME(PKT_RX_IP_CKSUM_NONE), 1623 VAL_NAME(PKT_RX_OUTER_IP_CKSUM_BAD), 1624 VAL_NAME(PKT_RX_VLAN_STRIPPED), 1625 VAL_NAME(PKT_RX_IEEE1588_PTP), 1626 VAL_NAME(PKT_RX_IEEE1588_TMST), 1627 VAL_NAME(PKT_RX_FDIR_ID), 1628 VAL_NAME(PKT_RX_FDIR_FLX), 1629 VAL_NAME(PKT_RX_QINQ_STRIPPED), 1630 VAL_NAME(PKT_RX_LRO), 1631 VAL_NAME(PKT_RX_SEC_OFFLOAD), 1632 VAL_NAME(PKT_RX_SEC_OFFLOAD_FAILED), 1633 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_BAD), 1634 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_GOOD), 1635 VAL_NAME(PKT_RX_OUTER_L4_CKSUM_INVALID), 1636 }; 1637 1638 /* Test case to check with valid flag */ 1639 for (i = 0; i < RTE_DIM(rx_flags); i++) { 1640 flag_str = rte_get_rx_ol_flag_name(rx_flags[i].flag); 1641 if (flag_str == NULL) 1642 GOTO_FAIL("%s: Expected flagname = %s; received null\n", 1643 __func__, rx_flags[i].name); 1644 if (strcmp(flag_str, rx_flags[i].name) != 0) 1645 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n", 1646 __func__, rx_flags[i].name, flag_str); 1647 } 1648 /* Test case to check with invalid flag */ 1649 flag_str = rte_get_rx_ol_flag_name(0); 1650 if (flag_str != NULL) { 1651 GOTO_FAIL("%s: Expected flag name = null; received = %s\n", 1652 __func__, flag_str); 1653 } 1654 1655 return 0; 1656 fail: 1657 return -1; 1658 } 1659 1660 static int 1661 test_get_tx_ol_flag_name(void) 1662 { 1663 uint16_t i; 1664 const char *flag_str = NULL; 1665 const struct flag_name tx_flags[] = { 1666 VAL_NAME(PKT_TX_VLAN), 1667 VAL_NAME(PKT_TX_IP_CKSUM), 1668 VAL_NAME(PKT_TX_TCP_CKSUM), 1669 VAL_NAME(PKT_TX_SCTP_CKSUM), 1670 VAL_NAME(PKT_TX_UDP_CKSUM), 1671 VAL_NAME(PKT_TX_IEEE1588_TMST), 1672 VAL_NAME(PKT_TX_TCP_SEG), 1673 VAL_NAME(PKT_TX_IPV4), 1674 VAL_NAME(PKT_TX_IPV6), 1675 VAL_NAME(PKT_TX_OUTER_IP_CKSUM), 1676 VAL_NAME(PKT_TX_OUTER_IPV4), 1677 VAL_NAME(PKT_TX_OUTER_IPV6), 1678 VAL_NAME(PKT_TX_TUNNEL_VXLAN), 1679 VAL_NAME(PKT_TX_TUNNEL_GRE), 1680 VAL_NAME(PKT_TX_TUNNEL_IPIP), 1681 VAL_NAME(PKT_TX_TUNNEL_GENEVE), 1682 VAL_NAME(PKT_TX_TUNNEL_MPLSINUDP), 1683 VAL_NAME(PKT_TX_TUNNEL_VXLAN_GPE), 1684 VAL_NAME(PKT_TX_TUNNEL_IP), 1685 VAL_NAME(PKT_TX_TUNNEL_UDP), 1686 VAL_NAME(PKT_TX_QINQ), 1687 VAL_NAME(PKT_TX_MACSEC), 1688 VAL_NAME(PKT_TX_SEC_OFFLOAD), 1689 VAL_NAME(PKT_TX_UDP_SEG), 1690 VAL_NAME(PKT_TX_OUTER_UDP_CKSUM), 1691 }; 1692 1693 /* Test case to check with valid flag */ 1694 for (i = 0; i < RTE_DIM(tx_flags); i++) { 1695 flag_str = rte_get_tx_ol_flag_name(tx_flags[i].flag); 1696 if (flag_str == NULL) 1697 GOTO_FAIL("%s: Expected flagname = %s; received null\n", 1698 __func__, tx_flags[i].name); 1699 if (strcmp(flag_str, tx_flags[i].name) != 0) 1700 GOTO_FAIL("%s: Expected flagname = %s; received = %s\n", 1701 __func__, tx_flags[i].name, flag_str); 1702 } 1703 /* Test case to check with invalid flag */ 1704 flag_str = rte_get_tx_ol_flag_name(0); 1705 if (flag_str != NULL) { 1706 GOTO_FAIL("%s: Expected flag name = null; received = %s\n", 1707 __func__, flag_str); 1708 } 1709 1710 return 0; 1711 fail: 1712 return -1; 1713 1714 } 1715 1716 static int 1717 test_mbuf_validate_tx_offload(const char *test_name, 1718 struct rte_mempool *pktmbuf_pool, 1719 uint64_t ol_flags, 1720 uint16_t segsize, 1721 int expected_retval) 1722 { 1723 struct rte_mbuf *m = NULL; 1724 int ret = 0; 1725 1726 /* alloc a mbuf and do sanity check */ 1727 m = rte_pktmbuf_alloc(pktmbuf_pool); 1728 if (m == NULL) 1729 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 1730 if (rte_pktmbuf_pkt_len(m) != 0) 1731 GOTO_FAIL("%s: Bad packet length\n", __func__); 1732 rte_mbuf_sanity_check(m, 0); 1733 m->ol_flags = ol_flags; 1734 m->tso_segsz = segsize; 1735 ret = rte_validate_tx_offload(m); 1736 if (ret != expected_retval) 1737 GOTO_FAIL("%s(%s): expected ret val: %d; received: %d\n", 1738 __func__, test_name, expected_retval, ret); 1739 rte_pktmbuf_free(m); 1740 m = NULL; 1741 return 0; 1742 fail: 1743 if (m) { 1744 rte_pktmbuf_free(m); 1745 m = NULL; 1746 } 1747 return -1; 1748 } 1749 1750 static int 1751 test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool) 1752 { 1753 /* test to validate tx offload flags */ 1754 uint64_t ol_flags = 0; 1755 1756 /* test to validate if IP checksum is counted only for IPV4 packet */ 1757 /* set both IP checksum and IPV6 flags */ 1758 ol_flags |= PKT_TX_IP_CKSUM; 1759 ol_flags |= PKT_TX_IPV6; 1760 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET", 1761 pktmbuf_pool, 1762 ol_flags, 0, -EINVAL) < 0) 1763 GOTO_FAIL("%s failed: IP cksum is set incorrect.\n", __func__); 1764 /* resetting ol_flags for next testcase */ 1765 ol_flags = 0; 1766 1767 /* test to validate if IP type is set when required */ 1768 ol_flags |= PKT_TX_L4_MASK; 1769 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET", 1770 pktmbuf_pool, 1771 ol_flags, 0, -EINVAL) < 0) 1772 GOTO_FAIL("%s failed: IP type is not set.\n", __func__); 1773 1774 /* test if IP type is set when TCP SEG is on */ 1775 ol_flags |= PKT_TX_TCP_SEG; 1776 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET", 1777 pktmbuf_pool, 1778 ol_flags, 0, -EINVAL) < 0) 1779 GOTO_FAIL("%s failed: IP type is not set.\n", __func__); 1780 1781 ol_flags = 0; 1782 /* test to confirm IP type (IPV4/IPV6) is set */ 1783 ol_flags = PKT_TX_L4_MASK; 1784 ol_flags |= PKT_TX_IPV6; 1785 if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET", 1786 pktmbuf_pool, 1787 ol_flags, 0, 0) < 0) 1788 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1789 1790 ol_flags = 0; 1791 /* test to check TSO segment size is non-zero */ 1792 ol_flags |= PKT_TX_IPV4; 1793 ol_flags |= PKT_TX_TCP_SEG; 1794 /* set 0 tso segment size */ 1795 if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ", 1796 pktmbuf_pool, 1797 ol_flags, 0, -EINVAL) < 0) 1798 GOTO_FAIL("%s failed: tso segment size is null.\n", __func__); 1799 1800 /* retain IPV4 and PKT_TX_TCP_SEG mask */ 1801 /* set valid tso segment size but IP CKSUM not set */ 1802 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET", 1803 pktmbuf_pool, 1804 ol_flags, 512, -EINVAL) < 0) 1805 GOTO_FAIL("%s failed: IP CKSUM is not set.\n", __func__); 1806 1807 /* test to validate if IP checksum is set for TSO capability */ 1808 /* retain IPV4, TCP_SEG, tso_seg size */ 1809 ol_flags |= PKT_TX_IP_CKSUM; 1810 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET", 1811 pktmbuf_pool, 1812 ol_flags, 512, 0) < 0) 1813 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1814 1815 /* test to confirm TSO for IPV6 type */ 1816 ol_flags = 0; 1817 ol_flags |= PKT_TX_IPV6; 1818 ol_flags |= PKT_TX_TCP_SEG; 1819 if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET", 1820 pktmbuf_pool, 1821 ol_flags, 512, 0) < 0) 1822 GOTO_FAIL("%s failed: TSO req not met.\n", __func__); 1823 1824 ol_flags = 0; 1825 /* test if outer IP checksum set for non outer IPv4 packet */ 1826 ol_flags |= PKT_TX_IPV6; 1827 ol_flags |= PKT_TX_OUTER_IP_CKSUM; 1828 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET", 1829 pktmbuf_pool, 1830 ol_flags, 512, -EINVAL) < 0) 1831 GOTO_FAIL("%s failed: Outer IP cksum set.\n", __func__); 1832 1833 ol_flags = 0; 1834 /* test to confirm outer IP checksum is set for outer IPV4 packet */ 1835 ol_flags |= PKT_TX_OUTER_IP_CKSUM; 1836 ol_flags |= PKT_TX_OUTER_IPV4; 1837 if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET", 1838 pktmbuf_pool, 1839 ol_flags, 512, 0) < 0) 1840 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1841 1842 ol_flags = 0; 1843 /* test to confirm if packets with no TX_OFFLOAD_MASK are skipped */ 1844 if (test_mbuf_validate_tx_offload("MBUF_TEST_OL_MASK_NOT_SET", 1845 pktmbuf_pool, 1846 ol_flags, 512, 0) < 0) 1847 GOTO_FAIL("%s failed: tx offload flag error.\n", __func__); 1848 return 0; 1849 fail: 1850 return -1; 1851 } 1852 1853 /* 1854 * Test for allocating a bulk of mbufs 1855 * define an array with positive sizes for mbufs allocations. 1856 */ 1857 static int 1858 test_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool) 1859 { 1860 int ret = 0; 1861 unsigned int idx, loop; 1862 unsigned int alloc_counts[] = { 1863 0, 1864 MEMPOOL_CACHE_SIZE - 1, 1865 MEMPOOL_CACHE_SIZE + 1, 1866 MEMPOOL_CACHE_SIZE * 1.5, 1867 MEMPOOL_CACHE_SIZE * 2, 1868 MEMPOOL_CACHE_SIZE * 2 - 1, 1869 MEMPOOL_CACHE_SIZE * 2 + 1, 1870 MEMPOOL_CACHE_SIZE, 1871 }; 1872 1873 /* allocate a large array of mbuf pointers */ 1874 struct rte_mbuf *mbufs[NB_MBUF] = { 0 }; 1875 for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) { 1876 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs, 1877 alloc_counts[idx]); 1878 if (ret == 0) { 1879 for (loop = 0; loop < alloc_counts[idx] && 1880 mbufs[loop] != NULL; loop++) 1881 rte_pktmbuf_free(mbufs[loop]); 1882 } else if (ret != 0) { 1883 printf("%s: Bulk alloc failed count(%u); ret val(%d)\n", 1884 __func__, alloc_counts[idx], ret); 1885 return -1; 1886 } 1887 } 1888 return 0; 1889 } 1890 1891 /* 1892 * Negative testing for allocating a bulk of mbufs 1893 */ 1894 static int 1895 test_neg_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool) 1896 { 1897 int ret = 0; 1898 unsigned int idx, loop; 1899 unsigned int neg_alloc_counts[] = { 1900 MEMPOOL_CACHE_SIZE - NB_MBUF, 1901 NB_MBUF + 1, 1902 NB_MBUF * 8, 1903 UINT_MAX 1904 }; 1905 struct rte_mbuf *mbufs[NB_MBUF * 8] = { 0 }; 1906 1907 for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) { 1908 ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs, 1909 neg_alloc_counts[idx]); 1910 if (ret == 0) { 1911 printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n", 1912 __func__, neg_alloc_counts[idx], ret); 1913 for (loop = 0; loop < neg_alloc_counts[idx] && 1914 mbufs[loop] != NULL; loop++) 1915 rte_pktmbuf_free(mbufs[loop]); 1916 return -1; 1917 } 1918 } 1919 return 0; 1920 } 1921 1922 /* 1923 * Test to read mbuf packet using rte_pktmbuf_read 1924 */ 1925 static int 1926 test_pktmbuf_read(struct rte_mempool *pktmbuf_pool) 1927 { 1928 struct rte_mbuf *m = NULL; 1929 char *data = NULL; 1930 const char *data_copy = NULL; 1931 int off; 1932 1933 /* alloc a mbuf */ 1934 m = rte_pktmbuf_alloc(pktmbuf_pool); 1935 if (m == NULL) 1936 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 1937 if (rte_pktmbuf_pkt_len(m) != 0) 1938 GOTO_FAIL("%s: Bad packet length\n", __func__); 1939 rte_mbuf_sanity_check(m, 0); 1940 1941 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2); 1942 if (data == NULL) 1943 GOTO_FAIL("%s: Cannot append data\n", __func__); 1944 if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN2) 1945 GOTO_FAIL("%s: Bad packet length\n", __func__); 1946 memset(data, 0xfe, MBUF_TEST_DATA_LEN2); 1947 1948 /* read the data from mbuf */ 1949 data_copy = rte_pktmbuf_read(m, 0, MBUF_TEST_DATA_LEN2, NULL); 1950 if (data_copy == NULL) 1951 GOTO_FAIL("%s: Error in reading data!\n", __func__); 1952 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { 1953 if (data_copy[off] != (char)0xfe) 1954 GOTO_FAIL("Data corrupted at offset %u", off); 1955 } 1956 rte_pktmbuf_free(m); 1957 m = NULL; 1958 1959 return 0; 1960 fail: 1961 if (m) { 1962 rte_pktmbuf_free(m); 1963 m = NULL; 1964 } 1965 return -1; 1966 } 1967 1968 /* 1969 * Test to read mbuf packet data from offset 1970 */ 1971 static int 1972 test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool) 1973 { 1974 struct rte_mbuf *m = NULL; 1975 struct ether_hdr *hdr = NULL; 1976 char *data = NULL; 1977 const char *data_copy = NULL; 1978 unsigned int off; 1979 unsigned int hdr_len = sizeof(struct rte_ether_hdr); 1980 1981 /* alloc a mbuf */ 1982 m = rte_pktmbuf_alloc(pktmbuf_pool); 1983 if (m == NULL) 1984 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 1985 1986 if (rte_pktmbuf_pkt_len(m) != 0) 1987 GOTO_FAIL("%s: Bad packet length\n", __func__); 1988 rte_mbuf_sanity_check(m, 0); 1989 1990 /* prepend an ethernet header */ 1991 hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len); 1992 if (hdr == NULL) 1993 GOTO_FAIL("%s: Cannot prepend header\n", __func__); 1994 if (rte_pktmbuf_pkt_len(m) != hdr_len) 1995 GOTO_FAIL("%s: Bad pkt length", __func__); 1996 if (rte_pktmbuf_data_len(m) != hdr_len) 1997 GOTO_FAIL("%s: Bad data length", __func__); 1998 memset(hdr, 0xde, hdr_len); 1999 2000 /* read mbuf header info from 0 offset */ 2001 data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL); 2002 if (data_copy == NULL) 2003 GOTO_FAIL("%s: Error in reading header!\n", __func__); 2004 for (off = 0; off < hdr_len; off++) { 2005 if (data_copy[off] != (char)0xde) 2006 GOTO_FAIL("Header info corrupted at offset %u", off); 2007 } 2008 2009 /* append sample data after ethernet header */ 2010 data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2); 2011 if (data == NULL) 2012 GOTO_FAIL("%s: Cannot append data\n", __func__); 2013 if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2) 2014 GOTO_FAIL("%s: Bad packet length\n", __func__); 2015 if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2) 2016 GOTO_FAIL("%s: Bad data length\n", __func__); 2017 memset(data, 0xcc, MBUF_TEST_DATA_LEN2); 2018 2019 /* read mbuf data after header info */ 2020 data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL); 2021 if (data_copy == NULL) 2022 GOTO_FAIL("%s: Error in reading header data!\n", __func__); 2023 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { 2024 if (data_copy[off] != (char)0xcc) 2025 GOTO_FAIL("Data corrupted at offset %u", off); 2026 } 2027 2028 /* partial reading of mbuf data */ 2029 data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5, 2030 NULL); 2031 if (data_copy == NULL) 2032 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2033 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5) 2034 GOTO_FAIL("%s: Incorrect data length!\n", __func__); 2035 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) { 2036 if (data_copy[off] != (char)0xcc) 2037 GOTO_FAIL("Data corrupted at offset %u", off); 2038 } 2039 2040 /* read length greater than mbuf data_len */ 2041 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1, 2042 NULL) != NULL) 2043 GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n", 2044 __func__); 2045 2046 /* read length greater than mbuf pkt_len */ 2047 if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1, 2048 NULL) != NULL) 2049 GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n", 2050 __func__); 2051 2052 /* read data of zero len from valid offset */ 2053 data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL); 2054 if (data_copy == NULL) 2055 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2056 if (strlen(data_copy) != MBUF_TEST_DATA_LEN2) 2057 GOTO_FAIL("%s: Corrupted data content!\n", __func__); 2058 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { 2059 if (data_copy[off] != (char)0xcc) 2060 GOTO_FAIL("Data corrupted at offset %u", off); 2061 } 2062 2063 /* read data of zero length from zero offset */ 2064 data_copy = rte_pktmbuf_read(m, 0, 0, NULL); 2065 if (data_copy == NULL) 2066 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2067 /* check if the received address is the beginning of header info */ 2068 if (hdr != (const struct ether_hdr *)data_copy) 2069 GOTO_FAIL("%s: Corrupted data address!\n", __func__); 2070 2071 /* read data of max length from valid offset */ 2072 data_copy = rte_pktmbuf_read(m, hdr_len, UINT_MAX, NULL); 2073 if (data_copy == NULL) 2074 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2075 /* check if the received address is the beginning of data segment */ 2076 if (data_copy != data) 2077 GOTO_FAIL("%s: Corrupted data address!\n", __func__); 2078 2079 /* try to read from mbuf with max size offset */ 2080 data_copy = rte_pktmbuf_read(m, UINT_MAX, 0, NULL); 2081 if (data_copy != NULL) 2082 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2083 2084 /* try to read from mbuf with max size offset and len */ 2085 data_copy = rte_pktmbuf_read(m, UINT_MAX, UINT_MAX, NULL); 2086 if (data_copy != NULL) 2087 GOTO_FAIL("%s: Error in reading packet data!\n", __func__); 2088 2089 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 2090 2091 rte_pktmbuf_free(m); 2092 m = NULL; 2093 2094 return 0; 2095 fail: 2096 if (m) { 2097 rte_pktmbuf_free(m); 2098 m = NULL; 2099 } 2100 return -1; 2101 } 2102 2103 struct test_case { 2104 unsigned int seg_count; 2105 unsigned int flags; 2106 uint32_t read_off; 2107 uint32_t read_len; 2108 unsigned int seg_lengths[MBUF_MAX_SEG]; 2109 }; 2110 2111 /* create a mbuf with different sized segments 2112 * and fill with data [0x00 0x01 0x02 ...] 2113 */ 2114 static struct rte_mbuf * 2115 create_packet(struct rte_mempool *pktmbuf_pool, 2116 struct test_case *test_data) 2117 { 2118 uint16_t i, ret, seg, seg_len = 0; 2119 uint32_t last_index = 0; 2120 unsigned int seg_lengths[MBUF_MAX_SEG]; 2121 unsigned int hdr_len; 2122 struct rte_mbuf *pkt = NULL; 2123 struct rte_mbuf *pkt_seg = NULL; 2124 char *hdr = NULL; 2125 char *data = NULL; 2126 2127 memcpy(seg_lengths, test_data->seg_lengths, 2128 sizeof(unsigned int)*test_data->seg_count); 2129 for (seg = 0; seg < test_data->seg_count; seg++) { 2130 hdr_len = 0; 2131 seg_len = seg_lengths[seg]; 2132 pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool); 2133 if (pkt_seg == NULL) 2134 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 2135 if (rte_pktmbuf_pkt_len(pkt_seg) != 0) 2136 GOTO_FAIL("%s: Bad packet length\n", __func__); 2137 rte_mbuf_sanity_check(pkt_seg, 0); 2138 /* Add header only for the first segment */ 2139 if (test_data->flags == MBUF_HEADER && seg == 0) { 2140 hdr_len = sizeof(struct rte_ether_hdr); 2141 /* prepend a header and fill with dummy data */ 2142 hdr = (char *)rte_pktmbuf_prepend(pkt_seg, hdr_len); 2143 if (hdr == NULL) 2144 GOTO_FAIL("%s: Cannot prepend header\n", 2145 __func__); 2146 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len) 2147 GOTO_FAIL("%s: Bad pkt length", __func__); 2148 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len) 2149 GOTO_FAIL("%s: Bad data length", __func__); 2150 for (i = 0; i < hdr_len; i++) 2151 hdr[i] = (last_index + i) % 0xffff; 2152 last_index += hdr_len; 2153 } 2154 /* skip appending segment with 0 length */ 2155 if (seg_len == 0) 2156 continue; 2157 data = rte_pktmbuf_append(pkt_seg, seg_len); 2158 if (data == NULL) 2159 GOTO_FAIL("%s: Cannot append data segment\n", __func__); 2160 if (rte_pktmbuf_pkt_len(pkt_seg) != hdr_len + seg_len) 2161 GOTO_FAIL("%s: Bad packet segment length: %d\n", 2162 __func__, rte_pktmbuf_pkt_len(pkt_seg)); 2163 if (rte_pktmbuf_data_len(pkt_seg) != hdr_len + seg_len) 2164 GOTO_FAIL("%s: Bad data length\n", __func__); 2165 for (i = 0; i < seg_len; i++) 2166 data[i] = (last_index + i) % 0xffff; 2167 /* to fill continuous data from one seg to another */ 2168 last_index += i; 2169 /* create chained mbufs */ 2170 if (seg == 0) 2171 pkt = pkt_seg; 2172 else { 2173 ret = rte_pktmbuf_chain(pkt, pkt_seg); 2174 if (ret != 0) 2175 GOTO_FAIL("%s:FAIL: Chained mbuf creation %d\n", 2176 __func__, ret); 2177 } 2178 2179 pkt_seg = pkt_seg->next; 2180 } 2181 return pkt; 2182 fail: 2183 if (pkt != NULL) { 2184 rte_pktmbuf_free(pkt); 2185 pkt = NULL; 2186 } 2187 if (pkt_seg != NULL) { 2188 rte_pktmbuf_free(pkt_seg); 2189 pkt_seg = NULL; 2190 } 2191 return NULL; 2192 } 2193 2194 static int 2195 test_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool) 2196 { 2197 struct rte_mbuf *m; 2198 struct test_case test_cases[] = { 2199 { 2200 .seg_lengths = { 100, 100, 100 }, 2201 .seg_count = 3, 2202 .flags = MBUF_NO_HEADER, 2203 .read_off = 0, 2204 .read_len = 300 2205 }, 2206 { 2207 .seg_lengths = { 100, 125, 150 }, 2208 .seg_count = 3, 2209 .flags = MBUF_NO_HEADER, 2210 .read_off = 99, 2211 .read_len = 201 2212 }, 2213 { 2214 .seg_lengths = { 100, 100 }, 2215 .seg_count = 2, 2216 .flags = MBUF_NO_HEADER, 2217 .read_off = 0, 2218 .read_len = 100 2219 }, 2220 { 2221 .seg_lengths = { 100, 200 }, 2222 .seg_count = 2, 2223 .flags = MBUF_HEADER, 2224 .read_off = sizeof(struct rte_ether_hdr), 2225 .read_len = 150 2226 }, 2227 { 2228 .seg_lengths = { 1000, 100 }, 2229 .seg_count = 2, 2230 .flags = MBUF_NO_HEADER, 2231 .read_off = 0, 2232 .read_len = 1000 2233 }, 2234 { 2235 .seg_lengths = { 1024, 0, 100 }, 2236 .seg_count = 3, 2237 .flags = MBUF_NO_HEADER, 2238 .read_off = 100, 2239 .read_len = 1001 2240 }, 2241 { 2242 .seg_lengths = { 1000, 1, 1000 }, 2243 .seg_count = 3, 2244 .flags = MBUF_NO_HEADER, 2245 .read_off = 1000, 2246 .read_len = 2 2247 }, 2248 { 2249 .seg_lengths = { MBUF_TEST_DATA_LEN, 2250 MBUF_TEST_DATA_LEN2, 2251 MBUF_TEST_DATA_LEN3, 800, 10 }, 2252 .seg_count = 5, 2253 .flags = MBUF_NEG_TEST_READ, 2254 .read_off = 1000, 2255 .read_len = MBUF_DATA_SIZE 2256 }, 2257 }; 2258 2259 uint32_t i, pos; 2260 const char *data_copy = NULL; 2261 char data_buf[MBUF_DATA_SIZE]; 2262 2263 memset(data_buf, 0, MBUF_DATA_SIZE); 2264 2265 for (i = 0; i < RTE_DIM(test_cases); i++) { 2266 m = create_packet(pktmbuf_pool, &test_cases[i]); 2267 if (m == NULL) 2268 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 2269 2270 data_copy = rte_pktmbuf_read(m, test_cases[i].read_off, 2271 test_cases[i].read_len, data_buf); 2272 if (test_cases[i].flags == MBUF_NEG_TEST_READ) { 2273 if (data_copy != NULL) 2274 GOTO_FAIL("%s: mbuf data read should fail!\n", 2275 __func__); 2276 else { 2277 rte_pktmbuf_free(m); 2278 m = NULL; 2279 continue; 2280 } 2281 } 2282 if (data_copy == NULL) 2283 GOTO_FAIL("%s: Error in reading packet data!\n", 2284 __func__); 2285 for (pos = 0; pos < test_cases[i].read_len; pos++) { 2286 if (data_copy[pos] != 2287 (char)((test_cases[i].read_off + pos) 2288 % 0xffff)) 2289 GOTO_FAIL("Data corrupted at offset %u is %2X", 2290 pos, data_copy[pos]); 2291 } 2292 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 2293 rte_pktmbuf_free(m); 2294 m = NULL; 2295 } 2296 return 0; 2297 2298 fail: 2299 if (m != NULL) { 2300 rte_pktmbuf_free(m); 2301 m = NULL; 2302 } 2303 return -1; 2304 } 2305 2306 /* Define a free call back function to be used for external buffer */ 2307 static void 2308 ext_buf_free_callback_fn(void *addr __rte_unused, void *opaque) 2309 { 2310 void *ext_buf_addr = opaque; 2311 2312 if (ext_buf_addr == NULL) { 2313 printf("External buffer address is invalid\n"); 2314 return; 2315 } 2316 rte_free(ext_buf_addr); 2317 ext_buf_addr = NULL; 2318 printf("External buffer freed via callback\n"); 2319 } 2320 2321 /* 2322 * Test to initialize shared data in external buffer before attaching to mbuf 2323 * - Allocate mbuf with no data. 2324 * - Allocate external buffer with size should be large enough to accommodate 2325 * rte_mbuf_ext_shared_info. 2326 * - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data. 2327 * - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf. 2328 * - Clone another mbuf and attach the same external buffer to it. 2329 * - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf. 2330 */ 2331 static int 2332 test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool) 2333 { 2334 struct rte_mbuf *m = NULL; 2335 struct rte_mbuf *clone = NULL; 2336 struct rte_mbuf_ext_shared_info *ret_shinfo = NULL; 2337 rte_iova_t buf_iova; 2338 void *ext_buf_addr = NULL; 2339 uint16_t buf_len = EXT_BUF_TEST_DATA_LEN + 2340 sizeof(struct rte_mbuf_ext_shared_info); 2341 2342 /* alloc a mbuf */ 2343 m = rte_pktmbuf_alloc(pktmbuf_pool); 2344 if (m == NULL) 2345 GOTO_FAIL("%s: mbuf allocation failed!\n", __func__); 2346 if (rte_pktmbuf_pkt_len(m) != 0) 2347 GOTO_FAIL("%s: Bad packet length\n", __func__); 2348 rte_mbuf_sanity_check(m, 0); 2349 2350 ext_buf_addr = rte_malloc("External buffer", buf_len, 2351 RTE_CACHE_LINE_SIZE); 2352 if (ext_buf_addr == NULL) 2353 GOTO_FAIL("%s: External buffer allocation failed\n", __func__); 2354 2355 ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len, 2356 ext_buf_free_callback_fn, ext_buf_addr); 2357 if (ret_shinfo == NULL) 2358 GOTO_FAIL("%s: Shared info initialization failed!\n", __func__); 2359 2360 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1) 2361 GOTO_FAIL("%s: External refcount is not 1\n", __func__); 2362 2363 if (rte_mbuf_refcnt_read(m) != 1) 2364 GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__); 2365 2366 buf_iova = rte_mempool_virt2iova(ext_buf_addr); 2367 rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len, 2368 ret_shinfo); 2369 if (m->ol_flags != EXT_ATTACHED_MBUF) 2370 GOTO_FAIL("%s: External buffer is not attached to mbuf\n", 2371 __func__); 2372 2373 /* allocate one more mbuf */ 2374 clone = rte_pktmbuf_clone(m, pktmbuf_pool); 2375 if (clone == NULL) 2376 GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__); 2377 if (rte_pktmbuf_pkt_len(clone) != 0) 2378 GOTO_FAIL("%s: Bad packet length\n", __func__); 2379 2380 /* attach the same external buffer to the cloned mbuf */ 2381 rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len, 2382 ret_shinfo); 2383 if (clone->ol_flags != EXT_ATTACHED_MBUF) 2384 GOTO_FAIL("%s: External buffer is not attached to mbuf\n", 2385 __func__); 2386 2387 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2) 2388 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__); 2389 2390 /* test to manually update ext_buf_ref_cnt from 2 to 3*/ 2391 rte_mbuf_ext_refcnt_update(ret_shinfo, 1); 2392 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3) 2393 GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__); 2394 2395 /* reset the ext_refcnt before freeing the external buffer */ 2396 rte_mbuf_ext_refcnt_set(ret_shinfo, 2); 2397 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2) 2398 GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__); 2399 2400 /* detach the external buffer from mbufs */ 2401 rte_pktmbuf_detach_extbuf(m); 2402 /* check if ref cnt is decremented */ 2403 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1) 2404 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__); 2405 2406 rte_pktmbuf_detach_extbuf(clone); 2407 if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 0) 2408 GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__); 2409 2410 rte_pktmbuf_free(m); 2411 m = NULL; 2412 rte_pktmbuf_free(clone); 2413 clone = NULL; 2414 2415 return 0; 2416 2417 fail: 2418 if (m) { 2419 rte_pktmbuf_free(m); 2420 m = NULL; 2421 } 2422 if (clone) { 2423 rte_pktmbuf_free(clone); 2424 clone = NULL; 2425 } 2426 if (ext_buf_addr != NULL) { 2427 rte_free(ext_buf_addr); 2428 ext_buf_addr = NULL; 2429 } 2430 return -1; 2431 } 2432 2433 /* 2434 * Test the mbuf pool with pinned external data buffers 2435 * - Allocate memory zone for external buffer 2436 * - Create the mbuf pool with pinned external buffer 2437 * - Check the created pool with relevant mbuf pool unit tests 2438 */ 2439 static int 2440 test_pktmbuf_ext_pinned_buffer(struct rte_mempool *std_pool) 2441 { 2442 2443 struct rte_pktmbuf_extmem ext_mem; 2444 struct rte_mempool *pinned_pool = NULL; 2445 const struct rte_memzone *mz = NULL; 2446 2447 printf("Test mbuf pool with external pinned data buffers\n"); 2448 2449 /* Allocate memzone for the external data buffer */ 2450 mz = rte_memzone_reserve("pinned_pool", 2451 NB_MBUF * MBUF_DATA_SIZE, 2452 SOCKET_ID_ANY, 2453 RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY); 2454 if (mz == NULL) 2455 GOTO_FAIL("%s: Memzone allocation failed\n", __func__); 2456 2457 /* Create the mbuf pool with pinned external data buffer */ 2458 ext_mem.buf_ptr = mz->addr; 2459 ext_mem.buf_iova = mz->iova; 2460 ext_mem.buf_len = mz->len; 2461 ext_mem.elt_size = MBUF_DATA_SIZE; 2462 2463 pinned_pool = rte_pktmbuf_pool_create_extbuf("test_pinned_pool", 2464 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, 2465 MBUF_DATA_SIZE, SOCKET_ID_ANY, 2466 &ext_mem, 1); 2467 if (pinned_pool == NULL) 2468 GOTO_FAIL("%s: Mbuf pool with pinned external" 2469 " buffer creation failed\n", __func__); 2470 /* test multiple mbuf alloc */ 2471 if (test_pktmbuf_pool(pinned_pool) < 0) 2472 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed\n", 2473 __func__); 2474 2475 /* do it another time to check that all mbufs were freed */ 2476 if (test_pktmbuf_pool(pinned_pool) < 0) 2477 GOTO_FAIL("%s: test_mbuf_pool(pinned) failed (2)\n", 2478 __func__); 2479 2480 /* test that the data pointer on a packet mbuf is set properly */ 2481 if (test_pktmbuf_pool_ptr(pinned_pool) < 0) 2482 GOTO_FAIL("%s: test_pktmbuf_pool_ptr(pinned) failed\n", 2483 __func__); 2484 2485 /* test data manipulation in mbuf with non-ascii data */ 2486 if (test_pktmbuf_with_non_ascii_data(pinned_pool) < 0) 2487 GOTO_FAIL("%s: test_pktmbuf_with_non_ascii_data(pinned)" 2488 " failed\n", __func__); 2489 2490 /* test free pktmbuf segment one by one */ 2491 if (test_pktmbuf_free_segment(pinned_pool) < 0) 2492 GOTO_FAIL("%s: test_pktmbuf_free_segment(pinned) failed\n", 2493 __func__); 2494 2495 if (testclone_testupdate_testdetach(pinned_pool, std_pool) < 0) 2496 GOTO_FAIL("%s: testclone_and_testupdate(pinned) failed\n", 2497 __func__); 2498 2499 if (test_pktmbuf_copy(pinned_pool, std_pool) < 0) 2500 GOTO_FAIL("%s: test_pktmbuf_copy(pinned) failed\n", 2501 __func__); 2502 2503 if (test_failing_mbuf_sanity_check(pinned_pool) < 0) 2504 GOTO_FAIL("%s: test_failing_mbuf_sanity_check(pinned)" 2505 " failed\n", __func__); 2506 2507 if (test_mbuf_linearize_check(pinned_pool) < 0) 2508 GOTO_FAIL("%s: test_mbuf_linearize_check(pinned) failed\n", 2509 __func__); 2510 2511 /* test for allocating a bulk of mbufs with various sizes */ 2512 if (test_pktmbuf_alloc_bulk(pinned_pool) < 0) 2513 GOTO_FAIL("%s: test_rte_pktmbuf_alloc_bulk(pinned) failed\n", 2514 __func__); 2515 2516 /* test for allocating a bulk of mbufs with various sizes */ 2517 if (test_neg_pktmbuf_alloc_bulk(pinned_pool) < 0) 2518 GOTO_FAIL("%s: test_neg_rte_pktmbuf_alloc_bulk(pinned)" 2519 " failed\n", __func__); 2520 2521 /* test to read mbuf packet */ 2522 if (test_pktmbuf_read(pinned_pool) < 0) 2523 GOTO_FAIL("%s: test_rte_pktmbuf_read(pinned) failed\n", 2524 __func__); 2525 2526 /* test to read mbuf packet from offset */ 2527 if (test_pktmbuf_read_from_offset(pinned_pool) < 0) 2528 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_offset(pinned)" 2529 " failed\n", __func__); 2530 2531 /* test to read data from chain of mbufs with data segments */ 2532 if (test_pktmbuf_read_from_chain(pinned_pool) < 0) 2533 GOTO_FAIL("%s: test_rte_pktmbuf_read_from_chain(pinned)" 2534 " failed\n", __func__); 2535 2536 RTE_SET_USED(std_pool); 2537 rte_mempool_free(pinned_pool); 2538 rte_memzone_free(mz); 2539 return 0; 2540 2541 fail: 2542 rte_mempool_free(pinned_pool); 2543 rte_memzone_free(mz); 2544 return -1; 2545 } 2546 2547 static int 2548 test_mbuf_dyn(struct rte_mempool *pktmbuf_pool) 2549 { 2550 const struct rte_mbuf_dynfield dynfield = { 2551 .name = "test-dynfield", 2552 .size = sizeof(uint8_t), 2553 .align = __alignof__(uint8_t), 2554 .flags = 0, 2555 }; 2556 const struct rte_mbuf_dynfield dynfield2 = { 2557 .name = "test-dynfield2", 2558 .size = sizeof(uint16_t), 2559 .align = __alignof__(uint16_t), 2560 .flags = 0, 2561 }; 2562 const struct rte_mbuf_dynfield dynfield3 = { 2563 .name = "test-dynfield3", 2564 .size = sizeof(uint8_t), 2565 .align = __alignof__(uint8_t), 2566 .flags = 0, 2567 }; 2568 const struct rte_mbuf_dynfield dynfield_fail_big = { 2569 .name = "test-dynfield-fail-big", 2570 .size = 256, 2571 .align = 1, 2572 .flags = 0, 2573 }; 2574 const struct rte_mbuf_dynfield dynfield_fail_align = { 2575 .name = "test-dynfield-fail-align", 2576 .size = 1, 2577 .align = 3, 2578 .flags = 0, 2579 }; 2580 const struct rte_mbuf_dynflag dynflag = { 2581 .name = "test-dynflag", 2582 .flags = 0, 2583 }; 2584 const struct rte_mbuf_dynflag dynflag2 = { 2585 .name = "test-dynflag2", 2586 .flags = 0, 2587 }; 2588 const struct rte_mbuf_dynflag dynflag3 = { 2589 .name = "test-dynflag3", 2590 .flags = 0, 2591 }; 2592 struct rte_mbuf *m = NULL; 2593 int offset, offset2, offset3; 2594 int flag, flag2, flag3; 2595 int ret; 2596 2597 printf("Test mbuf dynamic fields and flags\n"); 2598 rte_mbuf_dyn_dump(stdout); 2599 2600 offset = rte_mbuf_dynfield_register(&dynfield); 2601 if (offset == -1) 2602 GOTO_FAIL("failed to register dynamic field, offset=%d: %s", 2603 offset, strerror(errno)); 2604 2605 ret = rte_mbuf_dynfield_register(&dynfield); 2606 if (ret != offset) 2607 GOTO_FAIL("failed to lookup dynamic field, ret=%d: %s", 2608 ret, strerror(errno)); 2609 2610 offset2 = rte_mbuf_dynfield_register(&dynfield2); 2611 if (offset2 == -1 || offset2 == offset || (offset2 & 1)) 2612 GOTO_FAIL("failed to register dynamic field 2, offset2=%d: %s", 2613 offset2, strerror(errno)); 2614 2615 offset3 = rte_mbuf_dynfield_register_offset(&dynfield3, 2616 offsetof(struct rte_mbuf, dynfield1[1])); 2617 if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) { 2618 if (rte_errno == EBUSY) 2619 printf("mbuf test error skipped: dynfield is busy\n"); 2620 else 2621 GOTO_FAIL("failed to register dynamic field 3, offset=" 2622 "%d: %s", offset3, strerror(errno)); 2623 } 2624 2625 printf("dynfield: offset=%d, offset2=%d, offset3=%d\n", 2626 offset, offset2, offset3); 2627 2628 ret = rte_mbuf_dynfield_register(&dynfield_fail_big); 2629 if (ret != -1) 2630 GOTO_FAIL("dynamic field creation should fail (too big)"); 2631 2632 ret = rte_mbuf_dynfield_register(&dynfield_fail_align); 2633 if (ret != -1) 2634 GOTO_FAIL("dynamic field creation should fail (bad alignment)"); 2635 2636 ret = rte_mbuf_dynfield_register_offset(&dynfield_fail_align, 2637 offsetof(struct rte_mbuf, ol_flags)); 2638 if (ret != -1) 2639 GOTO_FAIL("dynamic field creation should fail (not avail)"); 2640 2641 flag = rte_mbuf_dynflag_register(&dynflag); 2642 if (flag == -1) 2643 GOTO_FAIL("failed to register dynamic flag, flag=%d: %s", 2644 flag, strerror(errno)); 2645 2646 ret = rte_mbuf_dynflag_register(&dynflag); 2647 if (ret != flag) 2648 GOTO_FAIL("failed to lookup dynamic flag, ret=%d: %s", 2649 ret, strerror(errno)); 2650 2651 flag2 = rte_mbuf_dynflag_register(&dynflag2); 2652 if (flag2 == -1 || flag2 == flag) 2653 GOTO_FAIL("failed to register dynamic flag 2, flag2=%d: %s", 2654 flag2, strerror(errno)); 2655 2656 flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3, 2657 rte_bsf64(PKT_LAST_FREE)); 2658 if (flag3 != rte_bsf64(PKT_LAST_FREE)) 2659 GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s", 2660 flag3, strerror(errno)); 2661 2662 printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3); 2663 2664 /* set, get dynamic field */ 2665 m = rte_pktmbuf_alloc(pktmbuf_pool); 2666 if (m == NULL) 2667 GOTO_FAIL("Cannot allocate mbuf"); 2668 2669 *RTE_MBUF_DYNFIELD(m, offset, uint8_t *) = 1; 2670 if (*RTE_MBUF_DYNFIELD(m, offset, uint8_t *) != 1) 2671 GOTO_FAIL("failed to read dynamic field"); 2672 *RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) = 1000; 2673 if (*RTE_MBUF_DYNFIELD(m, offset2, uint16_t *) != 1000) 2674 GOTO_FAIL("failed to read dynamic field"); 2675 2676 /* set a dynamic flag */ 2677 m->ol_flags |= (1ULL << flag); 2678 2679 rte_mbuf_dyn_dump(stdout); 2680 rte_pktmbuf_free(m); 2681 return 0; 2682 fail: 2683 rte_pktmbuf_free(m); 2684 return -1; 2685 } 2686 2687 static int 2688 test_mbuf(void) 2689 { 2690 int ret = -1; 2691 struct rte_mempool *pktmbuf_pool = NULL; 2692 struct rte_mempool *pktmbuf_pool2 = NULL; 2693 2694 2695 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2); 2696 2697 /* create pktmbuf pool if it does not exist */ 2698 pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool", 2699 NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE, 2700 SOCKET_ID_ANY); 2701 2702 if (pktmbuf_pool == NULL) { 2703 printf("cannot allocate mbuf pool\n"); 2704 goto err; 2705 } 2706 2707 /* test registration of dynamic fields and flags */ 2708 if (test_mbuf_dyn(pktmbuf_pool) < 0) { 2709 printf("mbuf dynflag test failed\n"); 2710 goto err; 2711 } 2712 2713 /* create a specific pktmbuf pool with a priv_size != 0 and no data 2714 * room size */ 2715 pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2", 2716 NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0, 2717 SOCKET_ID_ANY); 2718 2719 if (pktmbuf_pool2 == NULL) { 2720 printf("cannot allocate mbuf pool\n"); 2721 goto err; 2722 } 2723 2724 /* test multiple mbuf alloc */ 2725 if (test_pktmbuf_pool(pktmbuf_pool) < 0) { 2726 printf("test_mbuf_pool() failed\n"); 2727 goto err; 2728 } 2729 2730 /* do it another time to check that all mbufs were freed */ 2731 if (test_pktmbuf_pool(pktmbuf_pool) < 0) { 2732 printf("test_mbuf_pool() failed (2)\n"); 2733 goto err; 2734 } 2735 2736 /* test bulk mbuf alloc and free */ 2737 if (test_pktmbuf_pool_bulk() < 0) { 2738 printf("test_pktmbuf_pool_bulk() failed\n"); 2739 goto err; 2740 } 2741 2742 /* test that the pointer to the data on a packet mbuf is set properly */ 2743 if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) { 2744 printf("test_pktmbuf_pool_ptr() failed\n"); 2745 goto err; 2746 } 2747 2748 /* test data manipulation in mbuf */ 2749 if (test_one_pktmbuf(pktmbuf_pool) < 0) { 2750 printf("test_one_mbuf() failed\n"); 2751 goto err; 2752 } 2753 2754 2755 /* 2756 * do it another time, to check that allocation reinitialize 2757 * the mbuf correctly 2758 */ 2759 if (test_one_pktmbuf(pktmbuf_pool) < 0) { 2760 printf("test_one_mbuf() failed (2)\n"); 2761 goto err; 2762 } 2763 2764 if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) { 2765 printf("test_pktmbuf_with_non_ascii_data() failed\n"); 2766 goto err; 2767 } 2768 2769 /* test free pktmbuf segment one by one */ 2770 if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) { 2771 printf("test_pktmbuf_free_segment() failed.\n"); 2772 goto err; 2773 } 2774 2775 if (testclone_testupdate_testdetach(pktmbuf_pool, pktmbuf_pool) < 0) { 2776 printf("testclone_and_testupdate() failed \n"); 2777 goto err; 2778 } 2779 2780 if (test_pktmbuf_copy(pktmbuf_pool, pktmbuf_pool) < 0) { 2781 printf("test_pktmbuf_copy() failed\n"); 2782 goto err; 2783 } 2784 2785 if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) { 2786 printf("test_attach_from_different_pool() failed\n"); 2787 goto err; 2788 } 2789 2790 if (test_refcnt_mbuf() < 0) { 2791 printf("test_refcnt_mbuf() failed \n"); 2792 goto err; 2793 } 2794 2795 if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) { 2796 printf("test_failing_mbuf_sanity_check() failed\n"); 2797 goto err; 2798 } 2799 2800 if (test_mbuf_linearize_check(pktmbuf_pool) < 0) { 2801 printf("test_mbuf_linearize_check() failed\n"); 2802 goto err; 2803 } 2804 2805 if (test_tx_offload() < 0) { 2806 printf("test_tx_offload() failed\n"); 2807 goto err; 2808 } 2809 2810 if (test_get_rx_ol_flag_list() < 0) { 2811 printf("test_rte_get_rx_ol_flag_list() failed\n"); 2812 goto err; 2813 } 2814 2815 if (test_get_tx_ol_flag_list() < 0) { 2816 printf("test_rte_get_tx_ol_flag_list() failed\n"); 2817 goto err; 2818 } 2819 2820 if (test_get_rx_ol_flag_name() < 0) { 2821 printf("test_rte_get_rx_ol_flag_name() failed\n"); 2822 goto err; 2823 } 2824 2825 if (test_get_tx_ol_flag_name() < 0) { 2826 printf("test_rte_get_tx_ol_flag_name() failed\n"); 2827 goto err; 2828 } 2829 2830 if (test_mbuf_validate_tx_offload_one(pktmbuf_pool) < 0) { 2831 printf("test_mbuf_validate_tx_offload_one() failed\n"); 2832 goto err; 2833 } 2834 2835 /* test for allocating a bulk of mbufs with various sizes */ 2836 if (test_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) { 2837 printf("test_rte_pktmbuf_alloc_bulk() failed\n"); 2838 goto err; 2839 } 2840 2841 /* test for allocating a bulk of mbufs with various sizes */ 2842 if (test_neg_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) { 2843 printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n"); 2844 goto err; 2845 } 2846 2847 /* test to read mbuf packet */ 2848 if (test_pktmbuf_read(pktmbuf_pool) < 0) { 2849 printf("test_rte_pktmbuf_read() failed\n"); 2850 goto err; 2851 } 2852 2853 /* test to read mbuf packet from offset */ 2854 if (test_pktmbuf_read_from_offset(pktmbuf_pool) < 0) { 2855 printf("test_rte_pktmbuf_read_from_offset() failed\n"); 2856 goto err; 2857 } 2858 2859 /* test to read data from chain of mbufs with data segments */ 2860 if (test_pktmbuf_read_from_chain(pktmbuf_pool) < 0) { 2861 printf("test_rte_pktmbuf_read_from_chain() failed\n"); 2862 goto err; 2863 } 2864 2865 /* test to initialize shared info. at the end of external buffer */ 2866 if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) { 2867 printf("test_pktmbuf_ext_shinfo_init_helper() failed\n"); 2868 goto err; 2869 } 2870 2871 /* test the mbuf pool with pinned external data buffers */ 2872 if (test_pktmbuf_ext_pinned_buffer(pktmbuf_pool) < 0) { 2873 printf("test_pktmbuf_ext_pinned_buffer() failed\n"); 2874 goto err; 2875 } 2876 2877 2878 ret = 0; 2879 err: 2880 rte_mempool_free(pktmbuf_pool); 2881 rte_mempool_free(pktmbuf_pool2); 2882 return ret; 2883 } 2884 #undef GOTO_FAIL 2885 2886 REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf); 2887