1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 * Copyright(c) 2020 Arm Limited 4 */ 5 6 #include <string.h> 7 #include <stdarg.h> 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 #include <errno.h> 13 #include <sys/queue.h> 14 15 #include <rte_common.h> 16 #include <rte_log.h> 17 #include <rte_memory.h> 18 #include <rte_launch.h> 19 #include <rte_cycles.h> 20 #include <rte_eal.h> 21 #include <rte_per_lcore.h> 22 #include <rte_lcore.h> 23 #include <rte_atomic.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_malloc.h> 26 #include <rte_ring.h> 27 #include <rte_ring_elem.h> 28 #include <rte_random.h> 29 #include <rte_errno.h> 30 #include <rte_hexdump.h> 31 32 #include "test.h" 33 #include "test_ring.h" 34 35 /* 36 * Ring 37 * ==== 38 * 39 * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC, 40 * legacy/custom element size (4B, 8B, 16B, 20B) APIs. 41 * Some tests incorporate unaligned addresses for objects. 42 * The enqueued/dequeued data is validated for correctness. 43 * 44 * #. Performance tests are in test_ring_perf.c 45 */ 46 47 #define RING_SIZE 4096 48 #define MAX_BULK 32 49 50 /* 51 * Validate the return value of test cases and print details of the 52 * ring if validation fails 53 * 54 * @param exp 55 * Expression to validate return value. 56 * @param r 57 * A pointer to the ring structure. 58 */ 59 #define TEST_RING_VERIFY(exp, r, errst) do { \ 60 if (!(exp)) { \ 61 printf("error at %s:%d\tcondition " #exp " failed\n", \ 62 __func__, __LINE__); \ 63 rte_ring_dump(stdout, (r)); \ 64 errst; \ 65 } \ 66 } while (0) 67 68 #define TEST_RING_FULL_EMPTY_ITER 8 69 70 static const int esize[] = {-1, 4, 8, 16, 20}; 71 72 /* Wrappers around the zero-copy APIs. The wrappers match 73 * the normal enqueue/dequeue API declarations. 74 */ 75 static unsigned int 76 test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table, 77 unsigned int n, unsigned int *free_space) 78 { 79 uint32_t ret; 80 struct rte_ring_zc_data zcd; 81 82 ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space); 83 if (ret != 0) { 84 /* Copy the data to the ring */ 85 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret); 86 rte_ring_enqueue_zc_finish(r, ret); 87 } 88 89 return ret; 90 } 91 92 static unsigned int 93 test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table, 94 unsigned int esize, unsigned int n, unsigned int *free_space) 95 { 96 unsigned int ret; 97 struct rte_ring_zc_data zcd; 98 99 ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, 100 &zcd, free_space); 101 if (ret != 0) { 102 /* Copy the data to the ring */ 103 test_ring_copy_to(&zcd, obj_table, esize, ret); 104 rte_ring_enqueue_zc_finish(r, ret); 105 } 106 107 return ret; 108 } 109 110 static unsigned int 111 test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table, 112 unsigned int n, unsigned int *free_space) 113 { 114 unsigned int ret; 115 struct rte_ring_zc_data zcd; 116 117 ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space); 118 if (ret != 0) { 119 /* Copy the data to the ring */ 120 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret); 121 rte_ring_enqueue_zc_finish(r, ret); 122 } 123 124 return ret; 125 } 126 127 static unsigned int 128 test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table, 129 unsigned int esize, unsigned int n, unsigned int *free_space) 130 { 131 unsigned int ret; 132 struct rte_ring_zc_data zcd; 133 134 ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n, 135 &zcd, free_space); 136 if (ret != 0) { 137 /* Copy the data to the ring */ 138 test_ring_copy_to(&zcd, obj_table, esize, ret); 139 rte_ring_enqueue_zc_finish(r, ret); 140 } 141 142 return ret; 143 } 144 145 static unsigned int 146 test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table, 147 unsigned int n, unsigned int *available) 148 { 149 unsigned int ret; 150 struct rte_ring_zc_data zcd; 151 152 ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available); 153 if (ret != 0) { 154 /* Copy the data from the ring */ 155 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret); 156 rte_ring_dequeue_zc_finish(r, ret); 157 } 158 159 return ret; 160 } 161 162 static unsigned int 163 test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table, 164 unsigned int esize, unsigned int n, unsigned int *available) 165 { 166 unsigned int ret; 167 struct rte_ring_zc_data zcd; 168 169 ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, 170 &zcd, available); 171 if (ret != 0) { 172 /* Copy the data from the ring */ 173 test_ring_copy_from(&zcd, obj_table, esize, ret); 174 rte_ring_dequeue_zc_finish(r, ret); 175 } 176 177 return ret; 178 } 179 180 static unsigned int 181 test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table, 182 unsigned int n, unsigned int *available) 183 { 184 unsigned int ret; 185 struct rte_ring_zc_data zcd; 186 187 ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available); 188 if (ret != 0) { 189 /* Copy the data from the ring */ 190 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret); 191 rte_ring_dequeue_zc_finish(r, ret); 192 } 193 194 return ret; 195 } 196 197 static unsigned int 198 test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table, 199 unsigned int esize, unsigned int n, unsigned int *available) 200 { 201 unsigned int ret; 202 struct rte_ring_zc_data zcd; 203 204 ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n, 205 &zcd, available); 206 if (ret != 0) { 207 /* Copy the data from the ring */ 208 test_ring_copy_from(&zcd, obj_table, esize, ret); 209 rte_ring_dequeue_zc_finish(r, ret); 210 } 211 212 return ret; 213 } 214 215 static const struct { 216 const char *desc; 217 uint32_t api_type; 218 uint32_t create_flags; 219 struct { 220 unsigned int (*flegacy)(struct rte_ring *r, 221 void * const *obj_table, unsigned int n, 222 unsigned int *free_space); 223 unsigned int (*felem)(struct rte_ring *r, const void *obj_table, 224 unsigned int esize, unsigned int n, 225 unsigned int *free_space); 226 } enq; 227 struct { 228 unsigned int (*flegacy)(struct rte_ring *r, 229 void **obj_table, unsigned int n, 230 unsigned int *available); 231 unsigned int (*felem)(struct rte_ring *r, void *obj_table, 232 unsigned int esize, unsigned int n, 233 unsigned int *available); 234 } deq; 235 } test_enqdeq_impl[] = { 236 { 237 .desc = "MP/MC sync mode", 238 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, 239 .create_flags = 0, 240 .enq = { 241 .flegacy = rte_ring_enqueue_bulk, 242 .felem = rte_ring_enqueue_bulk_elem, 243 }, 244 .deq = { 245 .flegacy = rte_ring_dequeue_bulk, 246 .felem = rte_ring_dequeue_bulk_elem, 247 }, 248 }, 249 { 250 .desc = "SP/SC sync mode", 251 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC, 252 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, 253 .enq = { 254 .flegacy = rte_ring_sp_enqueue_bulk, 255 .felem = rte_ring_sp_enqueue_bulk_elem, 256 }, 257 .deq = { 258 .flegacy = rte_ring_sc_dequeue_bulk, 259 .felem = rte_ring_sc_dequeue_bulk_elem, 260 }, 261 }, 262 { 263 .desc = "MP/MC sync mode", 264 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC, 265 .create_flags = 0, 266 .enq = { 267 .flegacy = rte_ring_mp_enqueue_bulk, 268 .felem = rte_ring_mp_enqueue_bulk_elem, 269 }, 270 .deq = { 271 .flegacy = rte_ring_mc_dequeue_bulk, 272 .felem = rte_ring_mc_dequeue_bulk_elem, 273 }, 274 }, 275 { 276 .desc = "MP_RTS/MC_RTS sync mode", 277 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, 278 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ, 279 .enq = { 280 .flegacy = rte_ring_enqueue_bulk, 281 .felem = rte_ring_enqueue_bulk_elem, 282 }, 283 .deq = { 284 .flegacy = rte_ring_dequeue_bulk, 285 .felem = rte_ring_dequeue_bulk_elem, 286 }, 287 }, 288 { 289 .desc = "MP_HTS/MC_HTS sync mode", 290 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, 291 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, 292 .enq = { 293 .flegacy = rte_ring_enqueue_bulk, 294 .felem = rte_ring_enqueue_bulk_elem, 295 }, 296 .deq = { 297 .flegacy = rte_ring_dequeue_bulk, 298 .felem = rte_ring_dequeue_bulk_elem, 299 }, 300 }, 301 { 302 .desc = "MP/MC sync mode", 303 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, 304 .create_flags = 0, 305 .enq = { 306 .flegacy = rte_ring_enqueue_burst, 307 .felem = rte_ring_enqueue_burst_elem, 308 }, 309 .deq = { 310 .flegacy = rte_ring_dequeue_burst, 311 .felem = rte_ring_dequeue_burst_elem, 312 }, 313 }, 314 { 315 .desc = "SP/SC sync mode", 316 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC, 317 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, 318 .enq = { 319 .flegacy = rte_ring_sp_enqueue_burst, 320 .felem = rte_ring_sp_enqueue_burst_elem, 321 }, 322 .deq = { 323 .flegacy = rte_ring_sc_dequeue_burst, 324 .felem = rte_ring_sc_dequeue_burst_elem, 325 }, 326 }, 327 { 328 .desc = "MP/MC sync mode", 329 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC, 330 .create_flags = 0, 331 .enq = { 332 .flegacy = rte_ring_mp_enqueue_burst, 333 .felem = rte_ring_mp_enqueue_burst_elem, 334 }, 335 .deq = { 336 .flegacy = rte_ring_mc_dequeue_burst, 337 .felem = rte_ring_mc_dequeue_burst_elem, 338 }, 339 }, 340 { 341 .desc = "MP_RTS/MC_RTS sync mode", 342 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, 343 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ, 344 .enq = { 345 .flegacy = rte_ring_enqueue_burst, 346 .felem = rte_ring_enqueue_burst_elem, 347 }, 348 .deq = { 349 .flegacy = rte_ring_dequeue_burst, 350 .felem = rte_ring_dequeue_burst_elem, 351 }, 352 }, 353 { 354 .desc = "MP_HTS/MC_HTS sync mode", 355 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, 356 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, 357 .enq = { 358 .flegacy = rte_ring_enqueue_burst, 359 .felem = rte_ring_enqueue_burst_elem, 360 }, 361 .deq = { 362 .flegacy = rte_ring_dequeue_burst, 363 .felem = rte_ring_dequeue_burst_elem, 364 }, 365 }, 366 { 367 .desc = "SP/SC sync mode (ZC)", 368 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC, 369 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, 370 .enq = { 371 .flegacy = test_ring_enqueue_zc_bulk, 372 .felem = test_ring_enqueue_zc_bulk_elem, 373 }, 374 .deq = { 375 .flegacy = test_ring_dequeue_zc_bulk, 376 .felem = test_ring_dequeue_zc_bulk_elem, 377 }, 378 }, 379 { 380 .desc = "MP_HTS/MC_HTS sync mode (ZC)", 381 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, 382 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, 383 .enq = { 384 .flegacy = test_ring_enqueue_zc_bulk, 385 .felem = test_ring_enqueue_zc_bulk_elem, 386 }, 387 .deq = { 388 .flegacy = test_ring_dequeue_zc_bulk, 389 .felem = test_ring_dequeue_zc_bulk_elem, 390 }, 391 }, 392 { 393 .desc = "SP/SC sync mode (ZC)", 394 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC, 395 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, 396 .enq = { 397 .flegacy = test_ring_enqueue_zc_burst, 398 .felem = test_ring_enqueue_zc_burst_elem, 399 }, 400 .deq = { 401 .flegacy = test_ring_dequeue_zc_burst, 402 .felem = test_ring_dequeue_zc_burst_elem, 403 }, 404 }, 405 { 406 .desc = "MP_HTS/MC_HTS sync mode (ZC)", 407 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, 408 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, 409 .enq = { 410 .flegacy = test_ring_enqueue_zc_burst, 411 .felem = test_ring_enqueue_zc_burst_elem, 412 }, 413 .deq = { 414 .flegacy = test_ring_dequeue_zc_burst, 415 .felem = test_ring_dequeue_zc_burst_elem, 416 }, 417 } 418 }; 419 420 static unsigned int 421 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n, 422 unsigned int test_idx) 423 { 424 if (esize == -1) 425 return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL); 426 else 427 return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n, 428 NULL); 429 } 430 431 static unsigned int 432 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n, 433 unsigned int test_idx) 434 { 435 if (esize == -1) 436 return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL); 437 else 438 return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n, 439 NULL); 440 } 441 442 static void 443 test_ring_mem_init(void *obj, unsigned int count, int esize) 444 { 445 unsigned int i; 446 447 /* Legacy queue APIs? */ 448 if (esize == -1) 449 for (i = 0; i < count; i++) 450 ((void **)obj)[i] = (void *)(uintptr_t)i; 451 else 452 for (i = 0; i < (count * esize / sizeof(uint32_t)); i++) 453 ((uint32_t *)obj)[i] = i; 454 } 455 456 static int 457 test_ring_mem_cmp(void *src, void *dst, unsigned int size) 458 { 459 int ret; 460 461 ret = memcmp(src, dst, size); 462 if (ret) { 463 rte_hexdump(stdout, "src", src, size); 464 rte_hexdump(stdout, "dst", dst, size); 465 printf("data after dequeue is not the same\n"); 466 } 467 468 return ret; 469 } 470 471 static void 472 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize) 473 { 474 printf("\n%s: ", istr); 475 476 if (esize == -1) 477 printf("legacy APIs: "); 478 else 479 printf("elem APIs: element size %dB ", esize); 480 481 if (api_type == TEST_RING_IGNORE_API_TYPE) 482 return; 483 484 if (api_type & TEST_RING_THREAD_DEF) 485 printf(": default enqueue/dequeue: "); 486 else if (api_type & TEST_RING_THREAD_SPSC) 487 printf(": SP/SC: "); 488 else if (api_type & TEST_RING_THREAD_MPMC) 489 printf(": MP/MC: "); 490 491 if (api_type & TEST_RING_ELEM_SINGLE) 492 printf("single\n"); 493 else if (api_type & TEST_RING_ELEM_BULK) 494 printf("bulk\n"); 495 else if (api_type & TEST_RING_ELEM_BURST) 496 printf("burst\n"); 497 } 498 499 /* 500 * Various negative test cases. 501 */ 502 static int 503 test_ring_negative_tests(void) 504 { 505 struct rte_ring *rp = NULL; 506 struct rte_ring *rt = NULL; 507 unsigned int i; 508 509 /* Test with esize not a multiple of 4 */ 510 rp = test_ring_create("test_bad_element_size", 23, 511 RING_SIZE + 1, SOCKET_ID_ANY, 0); 512 if (rp != NULL) { 513 printf("Test failed to detect invalid element size\n"); 514 goto test_fail; 515 } 516 517 518 for (i = 0; i < RTE_DIM(esize); i++) { 519 /* Test if ring size is not power of 2 */ 520 rp = test_ring_create("test_bad_ring_size", esize[i], 521 RING_SIZE + 1, SOCKET_ID_ANY, 0); 522 if (rp != NULL) { 523 printf("Test failed to detect odd count\n"); 524 goto test_fail; 525 } 526 527 /* Test if ring size is exceeding the limit */ 528 rp = test_ring_create("test_bad_ring_size", esize[i], 529 RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0); 530 if (rp != NULL) { 531 printf("Test failed to detect limits\n"); 532 goto test_fail; 533 } 534 535 /* Tests if lookup returns NULL on non-existing ring */ 536 rp = rte_ring_lookup("ring_not_found"); 537 if (rp != NULL && rte_errno != ENOENT) { 538 printf("Test failed to detect NULL ring lookup\n"); 539 goto test_fail; 540 } 541 542 /* Test to if a non-power of 2 count causes the create 543 * function to fail correctly 544 */ 545 rp = test_ring_create("test_ring_count", esize[i], 4097, 546 SOCKET_ID_ANY, 0); 547 if (rp != NULL) 548 goto test_fail; 549 550 rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE, 551 SOCKET_ID_ANY, 552 RING_F_SP_ENQ | RING_F_SC_DEQ); 553 if (rp == NULL) { 554 printf("test_ring_negative fail to create ring\n"); 555 goto test_fail; 556 } 557 558 TEST_RING_VERIFY(rte_ring_lookup("test_ring_negative") == rp, 559 rp, goto test_fail); 560 561 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto test_fail); 562 563 /* Tests if it would always fail to create ring with an used 564 * ring name. 565 */ 566 rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE, 567 SOCKET_ID_ANY, 0); 568 if (rt != NULL) 569 goto test_fail; 570 571 rte_ring_free(rp); 572 rp = NULL; 573 } 574 575 return 0; 576 577 test_fail: 578 579 rte_ring_free(rp); 580 return -1; 581 } 582 583 /* 584 * Burst and bulk operations with sp/sc, mp/mc and default (during creation) 585 * Random number of elements are enqueued and dequeued. 586 */ 587 static int 588 test_ring_burst_bulk_tests1(unsigned int test_idx) 589 { 590 struct rte_ring *r; 591 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; 592 int ret; 593 unsigned int i, j, temp_sz; 594 int rand; 595 const unsigned int rsz = RING_SIZE - 1; 596 597 for (i = 0; i < RTE_DIM(esize); i++) { 598 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc, 599 test_enqdeq_impl[test_idx].api_type, esize[i]); 600 601 /* Create the ring */ 602 r = test_ring_create("test_ring_burst_bulk_tests", esize[i], 603 RING_SIZE, SOCKET_ID_ANY, 604 test_enqdeq_impl[test_idx].create_flags); 605 606 /* alloc dummy object pointers */ 607 src = test_ring_calloc(RING_SIZE * 2, esize[i]); 608 if (src == NULL) 609 goto fail; 610 test_ring_mem_init(src, RING_SIZE * 2, esize[i]); 611 cur_src = src; 612 613 /* alloc some room for copied objects */ 614 dst = test_ring_calloc(RING_SIZE * 2, esize[i]); 615 if (dst == NULL) 616 goto fail; 617 cur_dst = dst; 618 619 printf("Random full/empty test\n"); 620 621 for (j = 0; j != TEST_RING_FULL_EMPTY_ITER; j++) { 622 /* random shift in the ring */ 623 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL); 624 printf("%s: iteration %u, random shift: %u;\n", 625 __func__, i, rand); 626 ret = test_ring_enq_impl(r, cur_src, esize[i], rand, 627 test_idx); 628 TEST_RING_VERIFY(ret != 0, r, goto fail); 629 630 ret = test_ring_deq_impl(r, cur_dst, esize[i], rand, 631 test_idx); 632 TEST_RING_VERIFY(ret == rand, r, goto fail); 633 634 /* fill the ring */ 635 ret = test_ring_enq_impl(r, cur_src, esize[i], rsz, 636 test_idx); 637 TEST_RING_VERIFY(ret != 0, r, goto fail); 638 639 TEST_RING_VERIFY(rte_ring_free_count(r) == 0, r, goto fail); 640 TEST_RING_VERIFY(rsz == rte_ring_count(r), r, goto fail); 641 TEST_RING_VERIFY(rte_ring_full(r), r, goto fail); 642 TEST_RING_VERIFY(rte_ring_empty(r) == 0, r, goto fail); 643 644 /* empty the ring */ 645 ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz, 646 test_idx); 647 TEST_RING_VERIFY(ret == (int)rsz, r, goto fail); 648 649 TEST_RING_VERIFY(rsz == rte_ring_free_count(r), r, goto fail); 650 TEST_RING_VERIFY(rte_ring_count(r) == 0, r, goto fail); 651 TEST_RING_VERIFY(rte_ring_full(r) == 0, r, goto fail); 652 TEST_RING_VERIFY(rte_ring_empty(r), r, goto fail); 653 654 /* check data */ 655 temp_sz = rsz * sizeof(void *); 656 if (esize[i] != -1) 657 temp_sz = rsz * esize[i]; 658 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst, 659 temp_sz) == 0, r, goto fail); 660 } 661 662 /* Free memory before test completed */ 663 rte_ring_free(r); 664 rte_free(src); 665 rte_free(dst); 666 r = NULL; 667 src = NULL; 668 dst = NULL; 669 } 670 671 return 0; 672 fail: 673 rte_ring_free(r); 674 rte_free(src); 675 rte_free(dst); 676 return -1; 677 } 678 679 /* 680 * Burst and bulk operations with sp/sc, mp/mc and default (during creation) 681 * Sequence of simple enqueues/dequeues and validate the enqueued and 682 * dequeued data. 683 */ 684 static int 685 test_ring_burst_bulk_tests2(unsigned int test_idx) 686 { 687 struct rte_ring *r; 688 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; 689 int ret; 690 unsigned int i; 691 692 for (i = 0; i < RTE_DIM(esize); i++) { 693 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc, 694 test_enqdeq_impl[test_idx].api_type, esize[i]); 695 696 /* Create the ring */ 697 r = test_ring_create("test_ring_burst_bulk_tests", esize[i], 698 RING_SIZE, SOCKET_ID_ANY, 699 test_enqdeq_impl[test_idx].create_flags); 700 701 /* alloc dummy object pointers */ 702 src = test_ring_calloc(RING_SIZE * 2, esize[i]); 703 if (src == NULL) 704 goto fail; 705 test_ring_mem_init(src, RING_SIZE * 2, esize[i]); 706 cur_src = src; 707 708 /* alloc some room for copied objects */ 709 dst = test_ring_calloc(RING_SIZE * 2, esize[i]); 710 if (dst == NULL) 711 goto fail; 712 cur_dst = dst; 713 714 printf("enqueue 1 obj\n"); 715 ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx); 716 TEST_RING_VERIFY(ret == 1, r, goto fail); 717 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1); 718 719 printf("enqueue 2 objs\n"); 720 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx); 721 TEST_RING_VERIFY(ret == 2, r, goto fail); 722 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2); 723 724 printf("enqueue MAX_BULK objs\n"); 725 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK, 726 test_idx); 727 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail); 728 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK); 729 730 printf("dequeue 1 obj\n"); 731 ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx); 732 TEST_RING_VERIFY(ret == 1, r, goto fail); 733 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1); 734 735 printf("dequeue 2 objs\n"); 736 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx); 737 TEST_RING_VERIFY(ret == 2, r, goto fail); 738 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2); 739 740 printf("dequeue MAX_BULK objs\n"); 741 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK, 742 test_idx); 743 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail); 744 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK); 745 746 /* check data */ 747 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst, 748 RTE_PTR_DIFF(cur_dst, dst)) == 0, 749 r, goto fail); 750 751 /* Free memory before test completed */ 752 rte_ring_free(r); 753 rte_free(src); 754 rte_free(dst); 755 r = NULL; 756 src = NULL; 757 dst = NULL; 758 } 759 760 return 0; 761 fail: 762 rte_ring_free(r); 763 rte_free(src); 764 rte_free(dst); 765 return -1; 766 } 767 768 /* 769 * Burst and bulk operations with sp/sc, mp/mc and default (during creation) 770 * Enqueue and dequeue to cover the entire ring length. 771 */ 772 static int 773 test_ring_burst_bulk_tests3(unsigned int test_idx) 774 { 775 struct rte_ring *r; 776 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; 777 int ret; 778 unsigned int i, j; 779 780 for (i = 0; i < RTE_DIM(esize); i++) { 781 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc, 782 test_enqdeq_impl[test_idx].api_type, esize[i]); 783 784 /* Create the ring */ 785 r = test_ring_create("test_ring_burst_bulk_tests", esize[i], 786 RING_SIZE, SOCKET_ID_ANY, 787 test_enqdeq_impl[test_idx].create_flags); 788 789 /* alloc dummy object pointers */ 790 src = test_ring_calloc(RING_SIZE * 2, esize[i]); 791 if (src == NULL) 792 goto fail; 793 test_ring_mem_init(src, RING_SIZE * 2, esize[i]); 794 cur_src = src; 795 796 /* alloc some room for copied objects */ 797 dst = test_ring_calloc(RING_SIZE * 2, esize[i]); 798 if (dst == NULL) 799 goto fail; 800 cur_dst = dst; 801 802 printf("fill and empty the ring\n"); 803 for (j = 0; j < RING_SIZE / MAX_BULK; j++) { 804 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK, 805 test_idx); 806 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail); 807 cur_src = test_ring_inc_ptr(cur_src, esize[i], 808 MAX_BULK); 809 810 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK, 811 test_idx); 812 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail); 813 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 814 MAX_BULK); 815 } 816 817 /* check data */ 818 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst, 819 RTE_PTR_DIFF(cur_dst, dst)) == 0, 820 r, goto fail); 821 822 /* Free memory before test completed */ 823 rte_ring_free(r); 824 rte_free(src); 825 rte_free(dst); 826 r = NULL; 827 src = NULL; 828 dst = NULL; 829 } 830 831 return 0; 832 fail: 833 rte_ring_free(r); 834 rte_free(src); 835 rte_free(dst); 836 return -1; 837 } 838 839 /* 840 * Burst and bulk operations with sp/sc, mp/mc and default (during creation) 841 * Enqueue till the ring is full and dequeue till the ring becomes empty. 842 */ 843 static int 844 test_ring_burst_bulk_tests4(unsigned int test_idx) 845 { 846 struct rte_ring *r; 847 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; 848 int ret; 849 unsigned int i, j; 850 unsigned int api_type, num_elems; 851 852 api_type = test_enqdeq_impl[test_idx].api_type; 853 854 for (i = 0; i < RTE_DIM(esize); i++) { 855 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc, 856 test_enqdeq_impl[test_idx].api_type, esize[i]); 857 858 /* Create the ring */ 859 r = test_ring_create("test_ring_burst_bulk_tests", esize[i], 860 RING_SIZE, SOCKET_ID_ANY, 861 test_enqdeq_impl[test_idx].create_flags); 862 863 /* alloc dummy object pointers */ 864 src = test_ring_calloc(RING_SIZE * 2, esize[i]); 865 if (src == NULL) 866 goto fail; 867 test_ring_mem_init(src, RING_SIZE * 2, esize[i]); 868 cur_src = src; 869 870 /* alloc some room for copied objects */ 871 dst = test_ring_calloc(RING_SIZE * 2, esize[i]); 872 if (dst == NULL) 873 goto fail; 874 cur_dst = dst; 875 876 printf("Test enqueue without enough memory space\n"); 877 for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) { 878 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK, 879 test_idx); 880 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail); 881 cur_src = test_ring_inc_ptr(cur_src, esize[i], 882 MAX_BULK); 883 } 884 885 printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n"); 886 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx); 887 TEST_RING_VERIFY(ret == 2, r, goto fail); 888 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2); 889 890 printf("Enqueue the remaining entries = MAX_BULK - 3\n"); 891 /* Bulk APIs enqueue exact number of elements */ 892 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK) 893 num_elems = MAX_BULK - 3; 894 else 895 num_elems = MAX_BULK; 896 /* Always one free entry left */ 897 ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems, 898 test_idx); 899 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail); 900 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3); 901 902 printf("Test if ring is full\n"); 903 TEST_RING_VERIFY(rte_ring_full(r) == 1, r, goto fail); 904 905 printf("Test enqueue for a full entry\n"); 906 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK, 907 test_idx); 908 TEST_RING_VERIFY(ret == 0, r, goto fail); 909 910 printf("Test dequeue without enough objects\n"); 911 for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) { 912 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK, 913 test_idx); 914 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail); 915 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 916 MAX_BULK); 917 } 918 919 /* Available memory space for the exact MAX_BULK entries */ 920 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx); 921 TEST_RING_VERIFY(ret == 2, r, goto fail); 922 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2); 923 924 /* Bulk APIs enqueue exact number of elements */ 925 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK) 926 num_elems = MAX_BULK - 3; 927 else 928 num_elems = MAX_BULK; 929 ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems, 930 test_idx); 931 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail); 932 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3); 933 934 printf("Test if ring is empty\n"); 935 /* Check if ring is empty */ 936 TEST_RING_VERIFY(rte_ring_empty(r) == 1, r, goto fail); 937 938 /* check data */ 939 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst, 940 RTE_PTR_DIFF(cur_dst, dst)) == 0, 941 r, goto fail); 942 943 /* Free memory before test completed */ 944 rte_ring_free(r); 945 rte_free(src); 946 rte_free(dst); 947 r = NULL; 948 src = NULL; 949 dst = NULL; 950 } 951 952 return 0; 953 fail: 954 rte_ring_free(r); 955 rte_free(src); 956 rte_free(dst); 957 return -1; 958 } 959 960 /* 961 * Test default, single element, bulk and burst APIs 962 */ 963 static int 964 test_ring_basic_ex(void) 965 { 966 int ret = -1; 967 unsigned int i, j; 968 struct rte_ring *rp = NULL; 969 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; 970 971 for (i = 0; i < RTE_DIM(esize); i++) { 972 rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE, 973 SOCKET_ID_ANY, 974 RING_F_SP_ENQ | RING_F_SC_DEQ); 975 if (rp == NULL) { 976 printf("%s: failed to create ring\n", __func__); 977 goto fail_test; 978 } 979 980 /* alloc dummy object pointers */ 981 src = test_ring_calloc(RING_SIZE, esize[i]); 982 if (src == NULL) { 983 printf("%s: failed to alloc src memory\n", __func__); 984 goto fail_test; 985 } 986 test_ring_mem_init(src, RING_SIZE, esize[i]); 987 cur_src = src; 988 989 /* alloc some room for copied objects */ 990 dst = test_ring_calloc(RING_SIZE, esize[i]); 991 if (dst == NULL) { 992 printf("%s: failed to alloc dst memory\n", __func__); 993 goto fail_test; 994 } 995 cur_dst = dst; 996 997 TEST_RING_VERIFY(rte_ring_lookup("test_ring_basic_ex") == rp, 998 rp, goto fail_test); 999 1000 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test); 1001 1002 printf("%u ring entries are now free\n", 1003 rte_ring_free_count(rp)); 1004 1005 for (j = 0; j < RING_SIZE - 1; j++) { 1006 ret = test_ring_enqueue(rp, cur_src, esize[i], 1, 1007 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE); 1008 TEST_RING_VERIFY(ret == 0, rp, goto fail_test); 1009 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1); 1010 } 1011 1012 TEST_RING_VERIFY(rte_ring_full(rp) == 1, rp, goto fail_test); 1013 1014 for (j = 0; j < RING_SIZE - 1; j++) { 1015 ret = test_ring_dequeue(rp, cur_dst, esize[i], 1, 1016 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE); 1017 TEST_RING_VERIFY(ret == 0, rp, goto fail_test); 1018 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1); 1019 } 1020 1021 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test); 1022 1023 /* check data */ 1024 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst, 1025 RTE_PTR_DIFF(cur_dst, dst)) == 0, 1026 rp, goto fail_test); 1027 1028 /* Following tests use the configured flags to decide 1029 * SP/SC or MP/MC. 1030 */ 1031 /* reset memory of dst */ 1032 memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst)); 1033 1034 /* reset cur_src and cur_dst */ 1035 cur_src = src; 1036 cur_dst = dst; 1037 1038 /* Covering the ring burst operation */ 1039 ret = test_ring_enqueue(rp, cur_src, esize[i], 2, 1040 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST); 1041 TEST_RING_VERIFY(ret == 2, rp, goto fail_test); 1042 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2); 1043 1044 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2, 1045 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST); 1046 TEST_RING_VERIFY(ret == 2, rp, goto fail_test); 1047 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2); 1048 1049 /* Covering the ring bulk operation */ 1050 ret = test_ring_enqueue(rp, cur_src, esize[i], 2, 1051 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK); 1052 TEST_RING_VERIFY(ret == 2, rp, goto fail_test); 1053 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2); 1054 1055 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2, 1056 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK); 1057 TEST_RING_VERIFY(ret == 2, rp, goto fail_test); 1058 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2); 1059 1060 /* check data */ 1061 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst, 1062 RTE_PTR_DIFF(cur_dst, dst)) == 0, 1063 rp, goto fail_test); 1064 1065 rte_ring_free(rp); 1066 rte_free(src); 1067 rte_free(dst); 1068 rp = NULL; 1069 src = NULL; 1070 dst = NULL; 1071 } 1072 1073 return 0; 1074 1075 fail_test: 1076 rte_ring_free(rp); 1077 rte_free(src); 1078 rte_free(dst); 1079 return -1; 1080 } 1081 1082 /* 1083 * Basic test cases with exact size ring. 1084 */ 1085 static int 1086 test_ring_with_exact_size(void) 1087 { 1088 struct rte_ring *std_r = NULL, *exact_sz_r = NULL; 1089 void **src_orig = NULL, **dst_orig = NULL; 1090 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; 1091 const unsigned int ring_sz = 16; 1092 unsigned int i, j; 1093 int ret = -1; 1094 1095 for (i = 0; i < RTE_DIM(esize); i++) { 1096 test_ring_print_test_string("Test exact size ring", 1097 TEST_RING_IGNORE_API_TYPE, 1098 esize[i]); 1099 1100 std_r = test_ring_create("std", esize[i], ring_sz, 1101 rte_socket_id(), 1102 RING_F_SP_ENQ | RING_F_SC_DEQ); 1103 if (std_r == NULL) { 1104 printf("%s: error, can't create std ring\n", __func__); 1105 goto test_fail; 1106 } 1107 exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz, 1108 rte_socket_id(), 1109 RING_F_SP_ENQ | RING_F_SC_DEQ | 1110 RING_F_EXACT_SZ); 1111 if (exact_sz_r == NULL) { 1112 printf("%s: error, can't create exact size ring\n", 1113 __func__); 1114 goto test_fail; 1115 } 1116 1117 /* alloc object pointers. Allocate one extra object 1118 * and create an unaligned address. 1119 */ 1120 src_orig = test_ring_calloc(17, esize[i]); 1121 if (src_orig == NULL) 1122 goto test_fail; 1123 test_ring_mem_init(src_orig, 17, esize[i]); 1124 src = (void **)((uintptr_t)src_orig + 1); 1125 cur_src = src; 1126 1127 dst_orig = test_ring_calloc(17, esize[i]); 1128 if (dst_orig == NULL) 1129 goto test_fail; 1130 dst = (void **)((uintptr_t)dst_orig + 1); 1131 cur_dst = dst; 1132 1133 /* 1134 * Check that the exact size ring is bigger than the 1135 * standard ring 1136 */ 1137 TEST_RING_VERIFY(rte_ring_get_size(std_r) <= 1138 rte_ring_get_size(exact_sz_r), 1139 std_r, goto test_fail); 1140 1141 /* 1142 * check that the exact_sz_ring can hold one more element 1143 * than the standard ring. (16 vs 15 elements) 1144 */ 1145 for (j = 0; j < ring_sz - 1; j++) { 1146 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1, 1147 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE); 1148 TEST_RING_VERIFY(ret == 0, std_r, goto test_fail); 1149 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1, 1150 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE); 1151 TEST_RING_VERIFY(ret == 0, exact_sz_r, goto test_fail); 1152 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1); 1153 } 1154 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1, 1155 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE); 1156 TEST_RING_VERIFY(ret == -ENOBUFS, std_r, goto test_fail); 1157 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1, 1158 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE); 1159 TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail); 1160 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1); 1161 1162 /* check that dequeue returns the expected number of elements */ 1163 ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz, 1164 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST); 1165 TEST_RING_VERIFY(ret == (int)ring_sz, exact_sz_r, goto test_fail); 1166 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz); 1167 1168 /* check that the capacity function returns expected value */ 1169 TEST_RING_VERIFY(rte_ring_get_capacity(exact_sz_r) == ring_sz, 1170 exact_sz_r, goto test_fail); 1171 1172 /* check data */ 1173 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst, 1174 RTE_PTR_DIFF(cur_dst, dst)) == 0, 1175 exact_sz_r, goto test_fail); 1176 1177 rte_free(src_orig); 1178 rte_free(dst_orig); 1179 rte_ring_free(std_r); 1180 rte_ring_free(exact_sz_r); 1181 src_orig = NULL; 1182 dst_orig = NULL; 1183 std_r = NULL; 1184 exact_sz_r = NULL; 1185 } 1186 1187 return 0; 1188 1189 test_fail: 1190 rte_free(src_orig); 1191 rte_free(dst_orig); 1192 rte_ring_free(std_r); 1193 rte_ring_free(exact_sz_r); 1194 return -1; 1195 } 1196 1197 static int 1198 test_ring(void) 1199 { 1200 int32_t rc; 1201 unsigned int i; 1202 1203 /* Negative test cases */ 1204 if (test_ring_negative_tests() < 0) 1205 goto test_fail; 1206 1207 /* Some basic operations */ 1208 if (test_ring_basic_ex() < 0) 1209 goto test_fail; 1210 1211 if (test_ring_with_exact_size() < 0) 1212 goto test_fail; 1213 1214 /* Burst and bulk operations with sp/sc, mp/mc and default. 1215 * The test cases are split into smaller test cases to 1216 * help clang compile faster. 1217 */ 1218 for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) { 1219 1220 1221 rc = test_ring_burst_bulk_tests1(i); 1222 if (rc < 0) 1223 goto test_fail; 1224 1225 rc = test_ring_burst_bulk_tests2(i); 1226 if (rc < 0) 1227 goto test_fail; 1228 1229 rc = test_ring_burst_bulk_tests3(i); 1230 if (rc < 0) 1231 goto test_fail; 1232 1233 rc = test_ring_burst_bulk_tests4(i); 1234 if (rc < 0) 1235 goto test_fail; 1236 } 1237 1238 /* dump the ring status */ 1239 rte_ring_list_dump(stdout); 1240 1241 return 0; 1242 1243 test_fail: 1244 1245 return -1; 1246 } 1247 1248 REGISTER_TEST_COMMAND(ring_autotest, test_ring); 1249