1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <stdint.h> 9 #include <inttypes.h> 10 #include <stdarg.h> 11 #include <errno.h> 12 #include <sys/queue.h> 13 14 #include <rte_common.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_memory.h> 18 #include <rte_launch.h> 19 #include <rte_cycles.h> 20 #include <rte_eal.h> 21 #include <rte_per_lcore.h> 22 #include <rte_lcore.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_mempool.h> 25 #include <rte_spinlock.h> 26 #include <rte_malloc.h> 27 #include <rte_mbuf_pool_ops.h> 28 #include <rte_mbuf.h> 29 30 #include "test.h" 31 32 /* 33 * Mempool 34 * ======= 35 * 36 * Basic tests: done on one core with and without cache: 37 * 38 * - Get one object, put one object 39 * - Get two objects, put two objects 40 * - Get all objects, test that their content is not modified and 41 * put them back in the pool. 42 */ 43 44 #define MEMPOOL_ELT_SIZE 2048 45 #define MAX_KEEP 16 46 #define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) 47 48 #define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__) 49 #define RET_ERR() do { \ 50 LOG_ERR(); \ 51 return -1; \ 52 } while (0) 53 #define GOTO_ERR(var, label) do { \ 54 LOG_ERR(); \ 55 var = -1; \ 56 goto label; \ 57 } while (0) 58 59 /* 60 * save the object number in the first 4 bytes of object data. All 61 * other bytes are set to 0. 62 */ 63 static void 64 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg, 65 void *obj, unsigned i) 66 { 67 uint32_t *objnum = obj; 68 69 memset(obj, 0, mp->elt_size); 70 *objnum = i; 71 } 72 73 /* basic tests (done on one core) */ 74 static int 75 test_mempool_basic(struct rte_mempool *mp, int use_external_cache) 76 { 77 uint32_t *objnum; 78 void **objtable; 79 void *obj, *obj2; 80 char *obj_data; 81 int ret = 0; 82 unsigned i, j; 83 int offset; 84 struct rte_mempool_cache *cache; 85 86 if (use_external_cache) { 87 /* Create a user-owned mempool cache. */ 88 cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE, 89 SOCKET_ID_ANY); 90 if (cache == NULL) 91 RET_ERR(); 92 } else { 93 /* May be NULL if cache is disabled. */ 94 cache = rte_mempool_default_cache(mp, rte_lcore_id()); 95 } 96 97 /* dump the mempool status */ 98 rte_mempool_dump(stdout, mp); 99 100 printf("get an object\n"); 101 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0) 102 GOTO_ERR(ret, out); 103 rte_mempool_dump(stdout, mp); 104 105 /* tests that improve coverage */ 106 printf("get object count\n"); 107 /* We have to count the extra caches, one in this case. */ 108 offset = use_external_cache ? 1 * cache->len : 0; 109 if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1) 110 GOTO_ERR(ret, out); 111 112 printf("get private data\n"); 113 if (rte_mempool_get_priv(mp) != (char *)mp + 114 MEMPOOL_HEADER_SIZE(mp, mp->cache_size)) 115 GOTO_ERR(ret, out); 116 117 #ifndef RTE_EXEC_ENV_FREEBSD /* rte_mem_virt2iova() not supported on bsd */ 118 printf("get physical address of an object\n"); 119 if (rte_mempool_virt2iova(obj) != rte_mem_virt2iova(obj)) 120 GOTO_ERR(ret, out); 121 #endif 122 123 printf("put the object back\n"); 124 rte_mempool_generic_put(mp, &obj, 1, cache); 125 rte_mempool_dump(stdout, mp); 126 127 printf("get 2 objects\n"); 128 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0) 129 GOTO_ERR(ret, out); 130 if (rte_mempool_generic_get(mp, &obj2, 1, cache) < 0) { 131 rte_mempool_generic_put(mp, &obj, 1, cache); 132 GOTO_ERR(ret, out); 133 } 134 rte_mempool_dump(stdout, mp); 135 136 printf("put the objects back\n"); 137 rte_mempool_generic_put(mp, &obj, 1, cache); 138 rte_mempool_generic_put(mp, &obj2, 1, cache); 139 rte_mempool_dump(stdout, mp); 140 141 /* 142 * get many objects: we cannot get them all because the cache 143 * on other cores may not be empty. 144 */ 145 objtable = malloc(MEMPOOL_SIZE * sizeof(void *)); 146 if (objtable == NULL) 147 GOTO_ERR(ret, out); 148 149 for (i = 0; i < MEMPOOL_SIZE; i++) { 150 if (rte_mempool_generic_get(mp, &objtable[i], 1, cache) < 0) 151 break; 152 } 153 154 /* 155 * for each object, check that its content was not modified, 156 * and put objects back in pool 157 */ 158 while (i--) { 159 obj = objtable[i]; 160 obj_data = obj; 161 objnum = obj; 162 if (*objnum > MEMPOOL_SIZE) { 163 printf("bad object number(%d)\n", *objnum); 164 ret = -1; 165 break; 166 } 167 for (j = sizeof(*objnum); j < mp->elt_size; j++) { 168 if (obj_data[j] != 0) 169 ret = -1; 170 } 171 172 rte_mempool_generic_put(mp, &objtable[i], 1, cache); 173 } 174 175 free(objtable); 176 if (ret == -1) 177 printf("objects were modified!\n"); 178 179 out: 180 if (use_external_cache) { 181 rte_mempool_cache_flush(cache, mp); 182 rte_mempool_cache_free(cache); 183 } 184 185 return ret; 186 } 187 188 static int test_mempool_creation_with_exceeded_cache_size(void) 189 { 190 struct rte_mempool *mp_cov; 191 192 mp_cov = rte_mempool_create("test_mempool_cache_too_big", 193 MEMPOOL_SIZE, 194 MEMPOOL_ELT_SIZE, 195 RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0, 196 NULL, NULL, 197 my_obj_init, NULL, 198 SOCKET_ID_ANY, 0); 199 200 if (mp_cov != NULL) { 201 rte_mempool_free(mp_cov); 202 RET_ERR(); 203 } 204 205 return 0; 206 } 207 208 static struct rte_mempool *mp_spsc; 209 static rte_spinlock_t scsp_spinlock; 210 static void *scsp_obj_table[MAX_KEEP]; 211 212 /* 213 * single producer function 214 */ 215 static int test_mempool_single_producer(void) 216 { 217 unsigned int i; 218 void *obj = NULL; 219 uint64_t start_cycles, end_cycles; 220 uint64_t duration = rte_get_timer_hz() / 4; 221 222 start_cycles = rte_get_timer_cycles(); 223 while (1) { 224 end_cycles = rte_get_timer_cycles(); 225 /* duration uses up, stop producing */ 226 if (start_cycles + duration < end_cycles) 227 break; 228 rte_spinlock_lock(&scsp_spinlock); 229 for (i = 0; i < MAX_KEEP; i ++) { 230 if (NULL != scsp_obj_table[i]) { 231 obj = scsp_obj_table[i]; 232 break; 233 } 234 } 235 rte_spinlock_unlock(&scsp_spinlock); 236 if (i >= MAX_KEEP) { 237 continue; 238 } 239 if (rte_mempool_from_obj(obj) != mp_spsc) { 240 printf("obj not owned by this mempool\n"); 241 RET_ERR(); 242 } 243 rte_mempool_put(mp_spsc, obj); 244 rte_spinlock_lock(&scsp_spinlock); 245 scsp_obj_table[i] = NULL; 246 rte_spinlock_unlock(&scsp_spinlock); 247 } 248 249 return 0; 250 } 251 252 /* 253 * single consumer function 254 */ 255 static int test_mempool_single_consumer(void) 256 { 257 unsigned int i; 258 void * obj; 259 uint64_t start_cycles, end_cycles; 260 uint64_t duration = rte_get_timer_hz() / 8; 261 262 start_cycles = rte_get_timer_cycles(); 263 while (1) { 264 end_cycles = rte_get_timer_cycles(); 265 /* duration uses up, stop consuming */ 266 if (start_cycles + duration < end_cycles) 267 break; 268 rte_spinlock_lock(&scsp_spinlock); 269 for (i = 0; i < MAX_KEEP; i ++) { 270 if (NULL == scsp_obj_table[i]) 271 break; 272 } 273 rte_spinlock_unlock(&scsp_spinlock); 274 if (i >= MAX_KEEP) 275 continue; 276 if (rte_mempool_get(mp_spsc, &obj) < 0) 277 break; 278 rte_spinlock_lock(&scsp_spinlock); 279 scsp_obj_table[i] = obj; 280 rte_spinlock_unlock(&scsp_spinlock); 281 } 282 283 return 0; 284 } 285 286 /* 287 * test function for mempool test based on singple consumer and single producer, 288 * can run on one lcore only 289 */ 290 static int 291 test_mempool_launch_single_consumer(__rte_unused void *arg) 292 { 293 return test_mempool_single_consumer(); 294 } 295 296 static void 297 my_mp_init(struct rte_mempool *mp, __rte_unused void *arg) 298 { 299 printf("mempool name is %s\n", mp->name); 300 /* nothing to be implemented here*/ 301 return ; 302 } 303 304 /* 305 * it tests the mempool operations based on singple producer and single consumer 306 */ 307 static int 308 test_mempool_sp_sc(void) 309 { 310 int ret = 0; 311 unsigned lcore_id = rte_lcore_id(); 312 unsigned lcore_next; 313 314 /* create a mempool with single producer/consumer ring */ 315 if (mp_spsc == NULL) { 316 mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE, 317 MEMPOOL_ELT_SIZE, 0, 0, 318 my_mp_init, NULL, 319 my_obj_init, NULL, 320 SOCKET_ID_ANY, 321 MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | 322 MEMPOOL_F_SC_GET); 323 if (mp_spsc == NULL) 324 RET_ERR(); 325 } 326 if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) { 327 printf("Cannot lookup mempool from its name\n"); 328 ret = -1; 329 goto err; 330 } 331 lcore_next = rte_get_next_lcore(lcore_id, 0, 1); 332 if (lcore_next >= RTE_MAX_LCORE) { 333 ret = -1; 334 goto err; 335 } 336 if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) { 337 ret = -1; 338 goto err; 339 } 340 rte_spinlock_init(&scsp_spinlock); 341 memset(scsp_obj_table, 0, sizeof(scsp_obj_table)); 342 rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, 343 lcore_next); 344 if (test_mempool_single_producer() < 0) 345 ret = -1; 346 347 if (rte_eal_wait_lcore(lcore_next) < 0) 348 ret = -1; 349 350 err: 351 rte_mempool_free(mp_spsc); 352 mp_spsc = NULL; 353 354 return ret; 355 } 356 357 /* 358 * it tests some more basic of mempool 359 */ 360 static int 361 test_mempool_basic_ex(struct rte_mempool *mp) 362 { 363 unsigned i; 364 void **obj; 365 void *err_obj; 366 int ret = -1; 367 368 if (mp == NULL) 369 return ret; 370 371 obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE, 372 sizeof(void *), 0); 373 if (obj == NULL) { 374 printf("test_mempool_basic_ex fail to rte_malloc\n"); 375 return ret; 376 } 377 printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", 378 mp->name, rte_mempool_in_use_count(mp)); 379 if (rte_mempool_full(mp) != 1) { 380 printf("test_mempool_basic_ex the mempool should be full\n"); 381 goto fail_mp_basic_ex; 382 } 383 384 for (i = 0; i < MEMPOOL_SIZE; i ++) { 385 if (rte_mempool_get(mp, &obj[i]) < 0) { 386 printf("test_mp_basic_ex fail to get object for [%u]\n", 387 i); 388 goto fail_mp_basic_ex; 389 } 390 } 391 if (rte_mempool_get(mp, &err_obj) == 0) { 392 printf("test_mempool_basic_ex get an impossible obj\n"); 393 goto fail_mp_basic_ex; 394 } 395 printf("number: %u\n", i); 396 if (rte_mempool_empty(mp) != 1) { 397 printf("test_mempool_basic_ex the mempool should be empty\n"); 398 goto fail_mp_basic_ex; 399 } 400 401 for (i = 0; i < MEMPOOL_SIZE; i++) 402 rte_mempool_put(mp, obj[i]); 403 404 if (rte_mempool_full(mp) != 1) { 405 printf("test_mempool_basic_ex the mempool should be full\n"); 406 goto fail_mp_basic_ex; 407 } 408 409 ret = 0; 410 411 fail_mp_basic_ex: 412 if (obj != NULL) 413 rte_free((void *)obj); 414 415 return ret; 416 } 417 418 static int 419 test_mempool_same_name_twice_creation(void) 420 { 421 struct rte_mempool *mp_tc, *mp_tc2; 422 423 mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE, 424 MEMPOOL_ELT_SIZE, 0, 0, 425 NULL, NULL, 426 NULL, NULL, 427 SOCKET_ID_ANY, 0); 428 429 if (mp_tc == NULL) 430 RET_ERR(); 431 432 mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE, 433 MEMPOOL_ELT_SIZE, 0, 0, 434 NULL, NULL, 435 NULL, NULL, 436 SOCKET_ID_ANY, 0); 437 438 if (mp_tc2 != NULL) { 439 rte_mempool_free(mp_tc); 440 rte_mempool_free(mp_tc2); 441 RET_ERR(); 442 } 443 444 rte_mempool_free(mp_tc); 445 return 0; 446 } 447 448 static void 449 walk_cb(struct rte_mempool *mp, void *userdata __rte_unused) 450 { 451 printf("\t%s\n", mp->name); 452 } 453 454 struct mp_data { 455 int16_t ret; 456 }; 457 458 static void 459 test_mp_mem_init(struct rte_mempool *mp, 460 __rte_unused void *opaque, 461 __rte_unused struct rte_mempool_memhdr *memhdr, 462 __rte_unused unsigned int mem_idx) 463 { 464 struct mp_data *data = opaque; 465 466 if (mp == NULL) { 467 data->ret = -1; 468 return; 469 } 470 /* nothing to be implemented here*/ 471 data->ret = 0; 472 } 473 474 static int 475 test_mempool(void) 476 { 477 int ret = -1; 478 uint32_t nb_objs = 0; 479 uint32_t nb_mem_chunks = 0; 480 struct rte_mempool *mp_cache = NULL; 481 struct rte_mempool *mp_nocache = NULL; 482 struct rte_mempool *mp_stack_anon = NULL; 483 struct rte_mempool *mp_stack_mempool_iter = NULL; 484 struct rte_mempool *mp_stack = NULL; 485 struct rte_mempool *default_pool = NULL; 486 struct mp_data cb_arg = { 487 .ret = -1 488 }; 489 const char *default_pool_ops = rte_mbuf_best_mempool_ops(); 490 491 /* create a mempool (without cache) */ 492 mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE, 493 MEMPOOL_ELT_SIZE, 0, 0, 494 NULL, NULL, 495 my_obj_init, NULL, 496 SOCKET_ID_ANY, 0); 497 498 if (mp_nocache == NULL) { 499 printf("cannot allocate mp_nocache mempool\n"); 500 GOTO_ERR(ret, err); 501 } 502 503 /* create a mempool (with cache) */ 504 mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE, 505 MEMPOOL_ELT_SIZE, 506 RTE_MEMPOOL_CACHE_MAX_SIZE, 0, 507 NULL, NULL, 508 my_obj_init, NULL, 509 SOCKET_ID_ANY, 0); 510 511 if (mp_cache == NULL) { 512 printf("cannot allocate mp_cache mempool\n"); 513 GOTO_ERR(ret, err); 514 } 515 516 /* create an empty mempool */ 517 mp_stack_anon = rte_mempool_create_empty("test_stack_anon", 518 MEMPOOL_SIZE, 519 MEMPOOL_ELT_SIZE, 520 RTE_MEMPOOL_CACHE_MAX_SIZE, 0, 521 SOCKET_ID_ANY, 0); 522 523 if (mp_stack_anon == NULL) 524 GOTO_ERR(ret, err); 525 526 /* populate an empty mempool */ 527 ret = rte_mempool_populate_anon(mp_stack_anon); 528 printf("%s ret = %d\n", __func__, ret); 529 if (ret < 0) 530 GOTO_ERR(ret, err); 531 532 /* Try to populate when already populated */ 533 ret = rte_mempool_populate_anon(mp_stack_anon); 534 if (ret != 0) 535 GOTO_ERR(ret, err); 536 537 /* create a mempool */ 538 mp_stack_mempool_iter = rte_mempool_create("test_iter_obj", 539 MEMPOOL_SIZE, 540 MEMPOOL_ELT_SIZE, 541 RTE_MEMPOOL_CACHE_MAX_SIZE, 0, 542 NULL, NULL, 543 my_obj_init, NULL, 544 SOCKET_ID_ANY, 0); 545 546 if (mp_stack_mempool_iter == NULL) 547 GOTO_ERR(ret, err); 548 549 /* test to initialize mempool objects and memory */ 550 nb_objs = rte_mempool_obj_iter(mp_stack_mempool_iter, my_obj_init, 551 NULL); 552 if (nb_objs == 0) 553 GOTO_ERR(ret, err); 554 555 nb_mem_chunks = rte_mempool_mem_iter(mp_stack_mempool_iter, 556 test_mp_mem_init, &cb_arg); 557 if (nb_mem_chunks == 0 || cb_arg.ret < 0) 558 GOTO_ERR(ret, err); 559 560 /* create a mempool with an external handler */ 561 mp_stack = rte_mempool_create_empty("test_stack", 562 MEMPOOL_SIZE, 563 MEMPOOL_ELT_SIZE, 564 RTE_MEMPOOL_CACHE_MAX_SIZE, 0, 565 SOCKET_ID_ANY, 0); 566 567 if (mp_stack == NULL) { 568 printf("cannot allocate mp_stack mempool\n"); 569 GOTO_ERR(ret, err); 570 } 571 if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) { 572 printf("cannot set stack handler\n"); 573 GOTO_ERR(ret, err); 574 } 575 if (rte_mempool_populate_default(mp_stack) < 0) { 576 printf("cannot populate mp_stack mempool\n"); 577 GOTO_ERR(ret, err); 578 } 579 rte_mempool_obj_iter(mp_stack, my_obj_init, NULL); 580 581 /* Create a mempool based on Default handler */ 582 printf("Testing %s mempool handler\n", default_pool_ops); 583 default_pool = rte_mempool_create_empty("default_pool", 584 MEMPOOL_SIZE, 585 MEMPOOL_ELT_SIZE, 586 RTE_MEMPOOL_CACHE_MAX_SIZE, 0, 587 SOCKET_ID_ANY, 0); 588 589 if (default_pool == NULL) { 590 printf("cannot allocate default mempool\n"); 591 GOTO_ERR(ret, err); 592 } 593 if (rte_mempool_set_ops_byname(default_pool, 594 default_pool_ops, NULL) < 0) { 595 printf("cannot set %s handler\n", default_pool_ops); 596 GOTO_ERR(ret, err); 597 } 598 if (rte_mempool_populate_default(default_pool) < 0) { 599 printf("cannot populate %s mempool\n", default_pool_ops); 600 GOTO_ERR(ret, err); 601 } 602 rte_mempool_obj_iter(default_pool, my_obj_init, NULL); 603 604 /* retrieve the mempool from its name */ 605 if (rte_mempool_lookup("test_nocache") != mp_nocache) { 606 printf("Cannot lookup mempool from its name\n"); 607 GOTO_ERR(ret, err); 608 } 609 610 printf("Walk into mempools:\n"); 611 rte_mempool_walk(walk_cb, NULL); 612 613 rte_mempool_list_dump(stdout); 614 615 /* basic tests without cache */ 616 if (test_mempool_basic(mp_nocache, 0) < 0) 617 GOTO_ERR(ret, err); 618 619 /* basic tests with cache */ 620 if (test_mempool_basic(mp_cache, 0) < 0) 621 GOTO_ERR(ret, err); 622 623 /* basic tests with user-owned cache */ 624 if (test_mempool_basic(mp_nocache, 1) < 0) 625 GOTO_ERR(ret, err); 626 627 /* more basic tests without cache */ 628 if (test_mempool_basic_ex(mp_nocache) < 0) 629 GOTO_ERR(ret, err); 630 631 /* mempool operation test based on single producer and single comsumer */ 632 if (test_mempool_sp_sc() < 0) 633 GOTO_ERR(ret, err); 634 635 if (test_mempool_creation_with_exceeded_cache_size() < 0) 636 GOTO_ERR(ret, err); 637 638 if (test_mempool_same_name_twice_creation() < 0) 639 GOTO_ERR(ret, err); 640 641 /* test the stack handler */ 642 if (test_mempool_basic(mp_stack, 1) < 0) 643 GOTO_ERR(ret, err); 644 645 if (test_mempool_basic(default_pool, 1) < 0) 646 GOTO_ERR(ret, err); 647 648 rte_mempool_list_dump(stdout); 649 650 ret = 0; 651 652 err: 653 rte_mempool_free(mp_nocache); 654 rte_mempool_free(mp_cache); 655 rte_mempool_free(mp_stack_anon); 656 rte_mempool_free(mp_stack_mempool_iter); 657 rte_mempool_free(mp_stack); 658 rte_mempool_free(default_pool); 659 660 return ret; 661 } 662 663 REGISTER_TEST_COMMAND(mempool_autotest, test_mempool); 664