1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
4 */
5
6 #include <string.h>
7 #include <stdarg.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 #include <errno.h>
13 #include <sys/queue.h>
14
15 #include <rte_common.h>
16 #include <rte_log.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_malloc.h>
25 #include <rte_ring.h>
26 #include <rte_ring_elem.h>
27 #include <rte_random.h>
28 #include <rte_errno.h>
29 #include <rte_hexdump.h>
30
31 #include "test.h"
32 #include "test_ring.h"
33
34 /*
35 * Ring
36 * ====
37 *
38 * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
39 * legacy/custom element size (4B, 8B, 16B, 20B) APIs.
40 * Some tests incorporate unaligned addresses for objects.
41 * The enqueued/dequeued data is validated for correctness.
42 *
43 * #. Performance tests are in test_ring_perf.c
44 */
45
46 #define RING_SIZE 4096
47 #define MAX_BULK 32
48
49 /*
50 * Validate the return value of test cases and print details of the
51 * ring if validation fails
52 *
53 * @param exp
54 * Expression to validate return value.
55 * @param r
56 * A pointer to the ring structure.
57 */
58 #define TEST_RING_VERIFY(exp, r, errst) do { \
59 if (!(exp)) { \
60 printf("error at %s:%d\tcondition " #exp " failed\n", \
61 __func__, __LINE__); \
62 rte_ring_dump(stdout, (r)); \
63 errst; \
64 } \
65 } while (0)
66
67 #define TEST_RING_FULL_EMPTY_ITER 8
68
69 static const int esize[] = {-1, 4, 8, 16, 20};
70
71 /* Wrappers around the zero-copy APIs. The wrappers match
72 * the normal enqueue/dequeue API declarations.
73 */
74 static unsigned int
test_ring_enqueue_zc_bulk(struct rte_ring * r,void * const * obj_table,unsigned int n,unsigned int * free_space)75 test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table,
76 unsigned int n, unsigned int *free_space)
77 {
78 uint32_t ret;
79 struct rte_ring_zc_data zcd;
80
81 ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space);
82 if (ret != 0) {
83 /* Copy the data to the ring */
84 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
85 rte_ring_enqueue_zc_finish(r, ret);
86 }
87
88 return ret;
89 }
90
91 static unsigned int
test_ring_enqueue_zc_bulk_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)92 test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table,
93 unsigned int esize, unsigned int n, unsigned int *free_space)
94 {
95 unsigned int ret;
96 struct rte_ring_zc_data zcd;
97
98 ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n,
99 &zcd, free_space);
100 if (ret != 0) {
101 /* Copy the data to the ring */
102 test_ring_copy_to(&zcd, obj_table, esize, ret);
103 rte_ring_enqueue_zc_finish(r, ret);
104 }
105
106 return ret;
107 }
108
109 static unsigned int
test_ring_enqueue_zc_burst(struct rte_ring * r,void * const * obj_table,unsigned int n,unsigned int * free_space)110 test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table,
111 unsigned int n, unsigned int *free_space)
112 {
113 unsigned int ret;
114 struct rte_ring_zc_data zcd;
115
116 ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space);
117 if (ret != 0) {
118 /* Copy the data to the ring */
119 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
120 rte_ring_enqueue_zc_finish(r, ret);
121 }
122
123 return ret;
124 }
125
126 static unsigned int
test_ring_enqueue_zc_burst_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)127 test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table,
128 unsigned int esize, unsigned int n, unsigned int *free_space)
129 {
130 unsigned int ret;
131 struct rte_ring_zc_data zcd;
132
133 ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n,
134 &zcd, free_space);
135 if (ret != 0) {
136 /* Copy the data to the ring */
137 test_ring_copy_to(&zcd, obj_table, esize, ret);
138 rte_ring_enqueue_zc_finish(r, ret);
139 }
140
141 return ret;
142 }
143
144 static unsigned int
test_ring_dequeue_zc_bulk(struct rte_ring * r,void ** obj_table,unsigned int n,unsigned int * available)145 test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table,
146 unsigned int n, unsigned int *available)
147 {
148 unsigned int ret;
149 struct rte_ring_zc_data zcd;
150
151 ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available);
152 if (ret != 0) {
153 /* Copy the data from the ring */
154 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
155 rte_ring_dequeue_zc_finish(r, ret);
156 }
157
158 return ret;
159 }
160
161 static unsigned int
test_ring_dequeue_zc_bulk_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)162 test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table,
163 unsigned int esize, unsigned int n, unsigned int *available)
164 {
165 unsigned int ret;
166 struct rte_ring_zc_data zcd;
167
168 ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n,
169 &zcd, available);
170 if (ret != 0) {
171 /* Copy the data from the ring */
172 test_ring_copy_from(&zcd, obj_table, esize, ret);
173 rte_ring_dequeue_zc_finish(r, ret);
174 }
175
176 return ret;
177 }
178
179 static unsigned int
test_ring_dequeue_zc_burst(struct rte_ring * r,void ** obj_table,unsigned int n,unsigned int * available)180 test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table,
181 unsigned int n, unsigned int *available)
182 {
183 unsigned int ret;
184 struct rte_ring_zc_data zcd;
185
186 ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available);
187 if (ret != 0) {
188 /* Copy the data from the ring */
189 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
190 rte_ring_dequeue_zc_finish(r, ret);
191 }
192
193 return ret;
194 }
195
196 static unsigned int
test_ring_dequeue_zc_burst_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)197 test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table,
198 unsigned int esize, unsigned int n, unsigned int *available)
199 {
200 unsigned int ret;
201 struct rte_ring_zc_data zcd;
202
203 ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n,
204 &zcd, available);
205 if (ret != 0) {
206 /* Copy the data from the ring */
207 test_ring_copy_from(&zcd, obj_table, esize, ret);
208 rte_ring_dequeue_zc_finish(r, ret);
209 }
210
211 return ret;
212 }
213
214 static const struct {
215 const char *desc;
216 uint32_t api_type;
217 uint32_t create_flags;
218 struct {
219 unsigned int (*flegacy)(struct rte_ring *r,
220 void * const *obj_table, unsigned int n,
221 unsigned int *free_space);
222 unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
223 unsigned int esize, unsigned int n,
224 unsigned int *free_space);
225 } enq;
226 struct {
227 unsigned int (*flegacy)(struct rte_ring *r,
228 void **obj_table, unsigned int n,
229 unsigned int *available);
230 unsigned int (*felem)(struct rte_ring *r, void *obj_table,
231 unsigned int esize, unsigned int n,
232 unsigned int *available);
233 } deq;
234 } test_enqdeq_impl[] = {
235 {
236 .desc = "MP/MC sync mode",
237 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
238 .create_flags = 0,
239 .enq = {
240 .flegacy = rte_ring_enqueue_bulk,
241 .felem = rte_ring_enqueue_bulk_elem,
242 },
243 .deq = {
244 .flegacy = rte_ring_dequeue_bulk,
245 .felem = rte_ring_dequeue_bulk_elem,
246 },
247 },
248 {
249 .desc = "SP/SC sync mode",
250 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
251 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
252 .enq = {
253 .flegacy = rte_ring_sp_enqueue_bulk,
254 .felem = rte_ring_sp_enqueue_bulk_elem,
255 },
256 .deq = {
257 .flegacy = rte_ring_sc_dequeue_bulk,
258 .felem = rte_ring_sc_dequeue_bulk_elem,
259 },
260 },
261 {
262 .desc = "MP/MC sync mode",
263 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
264 .create_flags = 0,
265 .enq = {
266 .flegacy = rte_ring_mp_enqueue_bulk,
267 .felem = rte_ring_mp_enqueue_bulk_elem,
268 },
269 .deq = {
270 .flegacy = rte_ring_mc_dequeue_bulk,
271 .felem = rte_ring_mc_dequeue_bulk_elem,
272 },
273 },
274 {
275 .desc = "MP_RTS/MC_RTS sync mode",
276 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
277 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
278 .enq = {
279 .flegacy = rte_ring_enqueue_bulk,
280 .felem = rte_ring_enqueue_bulk_elem,
281 },
282 .deq = {
283 .flegacy = rte_ring_dequeue_bulk,
284 .felem = rte_ring_dequeue_bulk_elem,
285 },
286 },
287 {
288 .desc = "MP_HTS/MC_HTS sync mode",
289 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
290 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
291 .enq = {
292 .flegacy = rte_ring_enqueue_bulk,
293 .felem = rte_ring_enqueue_bulk_elem,
294 },
295 .deq = {
296 .flegacy = rte_ring_dequeue_bulk,
297 .felem = rte_ring_dequeue_bulk_elem,
298 },
299 },
300 {
301 .desc = "MP/MC sync mode",
302 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
303 .create_flags = 0,
304 .enq = {
305 .flegacy = rte_ring_enqueue_burst,
306 .felem = rte_ring_enqueue_burst_elem,
307 },
308 .deq = {
309 .flegacy = rte_ring_dequeue_burst,
310 .felem = rte_ring_dequeue_burst_elem,
311 },
312 },
313 {
314 .desc = "SP/SC sync mode",
315 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
316 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
317 .enq = {
318 .flegacy = rte_ring_sp_enqueue_burst,
319 .felem = rte_ring_sp_enqueue_burst_elem,
320 },
321 .deq = {
322 .flegacy = rte_ring_sc_dequeue_burst,
323 .felem = rte_ring_sc_dequeue_burst_elem,
324 },
325 },
326 {
327 .desc = "MP/MC sync mode",
328 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
329 .create_flags = 0,
330 .enq = {
331 .flegacy = rte_ring_mp_enqueue_burst,
332 .felem = rte_ring_mp_enqueue_burst_elem,
333 },
334 .deq = {
335 .flegacy = rte_ring_mc_dequeue_burst,
336 .felem = rte_ring_mc_dequeue_burst_elem,
337 },
338 },
339 {
340 .desc = "MP_RTS/MC_RTS sync mode",
341 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
342 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
343 .enq = {
344 .flegacy = rte_ring_enqueue_burst,
345 .felem = rte_ring_enqueue_burst_elem,
346 },
347 .deq = {
348 .flegacy = rte_ring_dequeue_burst,
349 .felem = rte_ring_dequeue_burst_elem,
350 },
351 },
352 {
353 .desc = "MP_HTS/MC_HTS sync mode",
354 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
355 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
356 .enq = {
357 .flegacy = rte_ring_enqueue_burst,
358 .felem = rte_ring_enqueue_burst_elem,
359 },
360 .deq = {
361 .flegacy = rte_ring_dequeue_burst,
362 .felem = rte_ring_dequeue_burst_elem,
363 },
364 },
365 {
366 .desc = "SP/SC sync mode (ZC)",
367 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
368 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
369 .enq = {
370 .flegacy = test_ring_enqueue_zc_bulk,
371 .felem = test_ring_enqueue_zc_bulk_elem,
372 },
373 .deq = {
374 .flegacy = test_ring_dequeue_zc_bulk,
375 .felem = test_ring_dequeue_zc_bulk_elem,
376 },
377 },
378 {
379 .desc = "MP_HTS/MC_HTS sync mode (ZC)",
380 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
381 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
382 .enq = {
383 .flegacy = test_ring_enqueue_zc_bulk,
384 .felem = test_ring_enqueue_zc_bulk_elem,
385 },
386 .deq = {
387 .flegacy = test_ring_dequeue_zc_bulk,
388 .felem = test_ring_dequeue_zc_bulk_elem,
389 },
390 },
391 {
392 .desc = "SP/SC sync mode (ZC)",
393 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
394 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
395 .enq = {
396 .flegacy = test_ring_enqueue_zc_burst,
397 .felem = test_ring_enqueue_zc_burst_elem,
398 },
399 .deq = {
400 .flegacy = test_ring_dequeue_zc_burst,
401 .felem = test_ring_dequeue_zc_burst_elem,
402 },
403 },
404 {
405 .desc = "MP_HTS/MC_HTS sync mode (ZC)",
406 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
407 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
408 .enq = {
409 .flegacy = test_ring_enqueue_zc_burst,
410 .felem = test_ring_enqueue_zc_burst_elem,
411 },
412 .deq = {
413 .flegacy = test_ring_dequeue_zc_burst,
414 .felem = test_ring_dequeue_zc_burst_elem,
415 },
416 }
417 };
418
419 static unsigned int
test_ring_enq_impl(struct rte_ring * r,void ** obj,int esize,unsigned int n,unsigned int test_idx)420 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
421 unsigned int test_idx)
422 {
423 if (esize == -1)
424 return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
425 else
426 return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
427 NULL);
428 }
429
430 static unsigned int
test_ring_deq_impl(struct rte_ring * r,void ** obj,int esize,unsigned int n,unsigned int test_idx)431 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
432 unsigned int test_idx)
433 {
434 if (esize == -1)
435 return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
436 else
437 return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
438 NULL);
439 }
440
441 static void
test_ring_mem_init(void * obj,unsigned int count,int esize)442 test_ring_mem_init(void *obj, unsigned int count, int esize)
443 {
444 unsigned int i;
445
446 /* Legacy queue APIs? */
447 if (esize == -1)
448 for (i = 0; i < count; i++)
449 ((void **)obj)[i] = (void *)(uintptr_t)i;
450 else
451 for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
452 ((uint32_t *)obj)[i] = i;
453 }
454
455 static int
test_ring_mem_cmp(void * src,void * dst,unsigned int size)456 test_ring_mem_cmp(void *src, void *dst, unsigned int size)
457 {
458 int ret;
459
460 ret = memcmp(src, dst, size);
461 if (ret) {
462 rte_hexdump(stdout, "src", src, size);
463 rte_hexdump(stdout, "dst", dst, size);
464 printf("data after dequeue is not the same\n");
465 }
466
467 return ret;
468 }
469
470 static void
test_ring_print_test_string(const char * istr,unsigned int api_type,int esize)471 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
472 {
473 printf("\n%s: ", istr);
474
475 if (esize == -1)
476 printf("legacy APIs: ");
477 else
478 printf("elem APIs: element size %dB ", esize);
479
480 if (api_type == TEST_RING_IGNORE_API_TYPE)
481 return;
482
483 if (api_type & TEST_RING_THREAD_DEF)
484 printf(": default enqueue/dequeue: ");
485 else if (api_type & TEST_RING_THREAD_SPSC)
486 printf(": SP/SC: ");
487 else if (api_type & TEST_RING_THREAD_MPMC)
488 printf(": MP/MC: ");
489
490 if (api_type & TEST_RING_ELEM_SINGLE)
491 printf("single\n");
492 else if (api_type & TEST_RING_ELEM_BULK)
493 printf("bulk\n");
494 else if (api_type & TEST_RING_ELEM_BURST)
495 printf("burst\n");
496 }
497
498 /*
499 * Various negative test cases.
500 */
501 static int
test_ring_negative_tests(void)502 test_ring_negative_tests(void)
503 {
504 struct rte_ring *rp = NULL;
505 struct rte_ring *rt = NULL;
506 unsigned int i;
507
508 /* Test with esize not a multiple of 4 */
509 rp = test_ring_create("test_bad_element_size", 23,
510 RING_SIZE + 1, SOCKET_ID_ANY, 0);
511 if (rp != NULL) {
512 printf("Test failed to detect invalid element size\n");
513 goto test_fail;
514 }
515
516
517 for (i = 0; i < RTE_DIM(esize); i++) {
518 /* Test if ring size is not power of 2 */
519 rp = test_ring_create("test_bad_ring_size", esize[i],
520 RING_SIZE + 1, SOCKET_ID_ANY, 0);
521 if (rp != NULL) {
522 printf("Test failed to detect odd count\n");
523 goto test_fail;
524 }
525
526 /* Test if ring size is exceeding the limit */
527 rp = test_ring_create("test_bad_ring_size", esize[i],
528 RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
529 if (rp != NULL) {
530 printf("Test failed to detect limits\n");
531 goto test_fail;
532 }
533
534 /* Tests if lookup returns NULL on non-existing ring */
535 rp = rte_ring_lookup("ring_not_found");
536 if (rp != NULL && rte_errno != ENOENT) {
537 printf("Test failed to detect NULL ring lookup\n");
538 goto test_fail;
539 }
540
541 /* Test to if a non-power of 2 count causes the create
542 * function to fail correctly
543 */
544 rp = test_ring_create("test_ring_count", esize[i], 4097,
545 SOCKET_ID_ANY, 0);
546 if (rp != NULL)
547 goto test_fail;
548
549 rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
550 SOCKET_ID_ANY,
551 RING_F_SP_ENQ | RING_F_SC_DEQ);
552 if (rp == NULL) {
553 printf("test_ring_negative fail to create ring\n");
554 goto test_fail;
555 }
556
557 TEST_RING_VERIFY(rte_ring_lookup("test_ring_negative") == rp,
558 rp, goto test_fail);
559
560 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto test_fail);
561
562 /* Tests if it would always fail to create ring with an used
563 * ring name.
564 */
565 rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
566 SOCKET_ID_ANY, 0);
567 if (rt != NULL)
568 goto test_fail;
569
570 rte_ring_free(rp);
571 rp = NULL;
572 }
573
574 return 0;
575
576 test_fail:
577
578 rte_ring_free(rp);
579 return -1;
580 }
581
582 /*
583 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
584 * Random number of elements are enqueued and dequeued.
585 */
586 static int
test_ring_burst_bulk_tests1(unsigned int test_idx)587 test_ring_burst_bulk_tests1(unsigned int test_idx)
588 {
589 struct rte_ring *r;
590 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
591 int ret;
592 unsigned int i, j, temp_sz;
593 int rand;
594 const unsigned int rsz = RING_SIZE - 1;
595
596 for (i = 0; i < RTE_DIM(esize); i++) {
597 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
598 test_enqdeq_impl[test_idx].api_type, esize[i]);
599
600 /* Create the ring */
601 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
602 RING_SIZE, SOCKET_ID_ANY,
603 test_enqdeq_impl[test_idx].create_flags);
604
605 /* alloc dummy object pointers */
606 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
607 if (src == NULL)
608 goto fail;
609 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
610 cur_src = src;
611
612 /* alloc some room for copied objects */
613 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
614 if (dst == NULL)
615 goto fail;
616 cur_dst = dst;
617
618 printf("Random full/empty test\n");
619
620 for (j = 0; j != TEST_RING_FULL_EMPTY_ITER; j++) {
621 /* random shift in the ring */
622 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
623 printf("%s: iteration %u, random shift: %u;\n",
624 __func__, i, rand);
625 ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
626 test_idx);
627 TEST_RING_VERIFY(ret != 0, r, goto fail);
628
629 ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
630 test_idx);
631 TEST_RING_VERIFY(ret == rand, r, goto fail);
632
633 /* fill the ring */
634 ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
635 test_idx);
636 TEST_RING_VERIFY(ret != 0, r, goto fail);
637
638 TEST_RING_VERIFY(rte_ring_free_count(r) == 0, r, goto fail);
639 TEST_RING_VERIFY(rsz == rte_ring_count(r), r, goto fail);
640 TEST_RING_VERIFY(rte_ring_full(r), r, goto fail);
641 TEST_RING_VERIFY(rte_ring_empty(r) == 0, r, goto fail);
642
643 /* empty the ring */
644 ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
645 test_idx);
646 TEST_RING_VERIFY(ret == (int)rsz, r, goto fail);
647
648 TEST_RING_VERIFY(rsz == rte_ring_free_count(r), r, goto fail);
649 TEST_RING_VERIFY(rte_ring_count(r) == 0, r, goto fail);
650 TEST_RING_VERIFY(rte_ring_full(r) == 0, r, goto fail);
651 TEST_RING_VERIFY(rte_ring_empty(r), r, goto fail);
652
653 /* check data */
654 temp_sz = rsz * sizeof(void *);
655 if (esize[i] != -1)
656 temp_sz = rsz * esize[i];
657 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
658 temp_sz) == 0, r, goto fail);
659 }
660
661 /* Free memory before test completed */
662 rte_ring_free(r);
663 rte_free(src);
664 rte_free(dst);
665 r = NULL;
666 src = NULL;
667 dst = NULL;
668 }
669
670 return 0;
671 fail:
672 rte_ring_free(r);
673 rte_free(src);
674 rte_free(dst);
675 return -1;
676 }
677
678 /*
679 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
680 * Sequence of simple enqueues/dequeues and validate the enqueued and
681 * dequeued data.
682 */
683 static int
test_ring_burst_bulk_tests2(unsigned int test_idx)684 test_ring_burst_bulk_tests2(unsigned int test_idx)
685 {
686 struct rte_ring *r;
687 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
688 int ret;
689 unsigned int i;
690
691 for (i = 0; i < RTE_DIM(esize); i++) {
692 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
693 test_enqdeq_impl[test_idx].api_type, esize[i]);
694
695 /* Create the ring */
696 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
697 RING_SIZE, SOCKET_ID_ANY,
698 test_enqdeq_impl[test_idx].create_flags);
699
700 /* alloc dummy object pointers */
701 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
702 if (src == NULL)
703 goto fail;
704 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
705 cur_src = src;
706
707 /* alloc some room for copied objects */
708 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
709 if (dst == NULL)
710 goto fail;
711 cur_dst = dst;
712
713 printf("enqueue 1 obj\n");
714 ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
715 TEST_RING_VERIFY(ret == 1, r, goto fail);
716 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
717
718 printf("enqueue 2 objs\n");
719 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
720 TEST_RING_VERIFY(ret == 2, r, goto fail);
721 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
722
723 printf("enqueue MAX_BULK objs\n");
724 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
725 test_idx);
726 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
727
728 printf("dequeue 1 obj\n");
729 ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
730 TEST_RING_VERIFY(ret == 1, r, goto fail);
731 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
732
733 printf("dequeue 2 objs\n");
734 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
735 TEST_RING_VERIFY(ret == 2, r, goto fail);
736 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
737
738 printf("dequeue MAX_BULK objs\n");
739 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
740 test_idx);
741 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
742 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
743
744 /* check data */
745 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
746 RTE_PTR_DIFF(cur_dst, dst)) == 0,
747 r, goto fail);
748
749 /* Free memory before test completed */
750 rte_ring_free(r);
751 rte_free(src);
752 rte_free(dst);
753 r = NULL;
754 src = NULL;
755 dst = NULL;
756 }
757
758 return 0;
759 fail:
760 rte_ring_free(r);
761 rte_free(src);
762 rte_free(dst);
763 return -1;
764 }
765
766 /*
767 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
768 * Enqueue and dequeue to cover the entire ring length.
769 */
770 static int
test_ring_burst_bulk_tests3(unsigned int test_idx)771 test_ring_burst_bulk_tests3(unsigned int test_idx)
772 {
773 struct rte_ring *r;
774 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
775 int ret;
776 unsigned int i, j;
777
778 for (i = 0; i < RTE_DIM(esize); i++) {
779 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
780 test_enqdeq_impl[test_idx].api_type, esize[i]);
781
782 /* Create the ring */
783 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
784 RING_SIZE, SOCKET_ID_ANY,
785 test_enqdeq_impl[test_idx].create_flags);
786
787 /* alloc dummy object pointers */
788 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
789 if (src == NULL)
790 goto fail;
791 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
792 cur_src = src;
793
794 /* alloc some room for copied objects */
795 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
796 if (dst == NULL)
797 goto fail;
798 cur_dst = dst;
799
800 printf("fill and empty the ring\n");
801 for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
802 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
803 test_idx);
804 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
805 cur_src = test_ring_inc_ptr(cur_src, esize[i],
806 MAX_BULK);
807
808 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
809 test_idx);
810 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
811 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
812 MAX_BULK);
813 }
814
815 /* check data */
816 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
817 RTE_PTR_DIFF(cur_dst, dst)) == 0,
818 r, goto fail);
819
820 /* Free memory before test completed */
821 rte_ring_free(r);
822 rte_free(src);
823 rte_free(dst);
824 r = NULL;
825 src = NULL;
826 dst = NULL;
827 }
828
829 return 0;
830 fail:
831 rte_ring_free(r);
832 rte_free(src);
833 rte_free(dst);
834 return -1;
835 }
836
837 /*
838 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
839 * Enqueue till the ring is full and dequeue till the ring becomes empty.
840 */
841 static int
test_ring_burst_bulk_tests4(unsigned int test_idx)842 test_ring_burst_bulk_tests4(unsigned int test_idx)
843 {
844 struct rte_ring *r;
845 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
846 int ret;
847 unsigned int i, j;
848 unsigned int api_type, num_elems;
849
850 api_type = test_enqdeq_impl[test_idx].api_type;
851
852 for (i = 0; i < RTE_DIM(esize); i++) {
853 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
854 test_enqdeq_impl[test_idx].api_type, esize[i]);
855
856 /* Create the ring */
857 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
858 RING_SIZE, SOCKET_ID_ANY,
859 test_enqdeq_impl[test_idx].create_flags);
860
861 /* alloc dummy object pointers */
862 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
863 if (src == NULL)
864 goto fail;
865 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
866 cur_src = src;
867
868 /* alloc some room for copied objects */
869 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
870 if (dst == NULL)
871 goto fail;
872 cur_dst = dst;
873
874 printf("Test enqueue without enough memory space\n");
875 for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
876 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
877 test_idx);
878 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
879 cur_src = test_ring_inc_ptr(cur_src, esize[i],
880 MAX_BULK);
881 }
882
883 printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
884 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
885 TEST_RING_VERIFY(ret == 2, r, goto fail);
886 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
887
888 printf("Enqueue the remaining entries = MAX_BULK - 3\n");
889 /* Bulk APIs enqueue exact number of elements */
890 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
891 num_elems = MAX_BULK - 3;
892 else
893 num_elems = MAX_BULK;
894 /* Always one free entry left */
895 ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
896 test_idx);
897 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
898 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
899
900 printf("Test if ring is full\n");
901 TEST_RING_VERIFY(rte_ring_full(r) == 1, r, goto fail);
902
903 printf("Test enqueue for a full entry\n");
904 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
905 test_idx);
906 TEST_RING_VERIFY(ret == 0, r, goto fail);
907
908 printf("Test dequeue without enough objects\n");
909 for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
910 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
911 test_idx);
912 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
913 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
914 MAX_BULK);
915 }
916
917 /* Available memory space for the exact MAX_BULK entries */
918 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
919 TEST_RING_VERIFY(ret == 2, r, goto fail);
920 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
921
922 /* Bulk APIs enqueue exact number of elements */
923 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
924 num_elems = MAX_BULK - 3;
925 else
926 num_elems = MAX_BULK;
927 ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
928 test_idx);
929 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
930 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
931
932 printf("Test if ring is empty\n");
933 /* Check if ring is empty */
934 TEST_RING_VERIFY(rte_ring_empty(r) == 1, r, goto fail);
935
936 /* check data */
937 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
938 RTE_PTR_DIFF(cur_dst, dst)) == 0,
939 r, goto fail);
940
941 /* Free memory before test completed */
942 rte_ring_free(r);
943 rte_free(src);
944 rte_free(dst);
945 r = NULL;
946 src = NULL;
947 dst = NULL;
948 }
949
950 return 0;
951 fail:
952 rte_ring_free(r);
953 rte_free(src);
954 rte_free(dst);
955 return -1;
956 }
957
958 /*
959 * Test default, single element, bulk and burst APIs
960 */
961 static int
test_ring_basic_ex(void)962 test_ring_basic_ex(void)
963 {
964 int ret = -1;
965 unsigned int i, j;
966 struct rte_ring *rp = NULL;
967 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
968
969 for (i = 0; i < RTE_DIM(esize); i++) {
970 rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
971 SOCKET_ID_ANY,
972 RING_F_SP_ENQ | RING_F_SC_DEQ);
973 if (rp == NULL) {
974 printf("%s: failed to create ring\n", __func__);
975 goto fail_test;
976 }
977
978 /* alloc dummy object pointers */
979 src = test_ring_calloc(RING_SIZE, esize[i]);
980 if (src == NULL) {
981 printf("%s: failed to alloc src memory\n", __func__);
982 goto fail_test;
983 }
984 test_ring_mem_init(src, RING_SIZE, esize[i]);
985 cur_src = src;
986
987 /* alloc some room for copied objects */
988 dst = test_ring_calloc(RING_SIZE, esize[i]);
989 if (dst == NULL) {
990 printf("%s: failed to alloc dst memory\n", __func__);
991 goto fail_test;
992 }
993 cur_dst = dst;
994
995 TEST_RING_VERIFY(rte_ring_lookup("test_ring_basic_ex") == rp,
996 rp, goto fail_test);
997
998 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
999
1000 printf("%u ring entries are now free\n",
1001 rte_ring_free_count(rp));
1002
1003 for (j = 0; j < RING_SIZE - 1; j++) {
1004 ret = test_ring_enqueue(rp, cur_src, esize[i], 1,
1005 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1006 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1007 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1008 }
1009
1010 TEST_RING_VERIFY(rte_ring_full(rp) == 1, rp, goto fail_test);
1011
1012 for (j = 0; j < RING_SIZE - 1; j++) {
1013 ret = test_ring_dequeue(rp, cur_dst, esize[i], 1,
1014 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1015 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1016 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
1017 }
1018
1019 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
1020
1021 /* check data */
1022 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1023 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1024 rp, goto fail_test);
1025
1026 /* Following tests use the configured flags to decide
1027 * SP/SC or MP/MC.
1028 */
1029 /* reset memory of dst */
1030 memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst));
1031
1032 /* reset cur_src and cur_dst */
1033 cur_src = src;
1034 cur_dst = dst;
1035
1036 /* Covering the ring burst operation */
1037 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1038 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1039 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1040 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
1041
1042 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1043 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1044 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1045 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1046
1047 /* Covering the ring bulk operation */
1048 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1049 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1050 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1051
1052 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1053 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1054 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1055 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1056
1057 /* check data */
1058 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1059 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1060 rp, goto fail_test);
1061
1062 rte_ring_free(rp);
1063 rte_free(src);
1064 rte_free(dst);
1065 rp = NULL;
1066 src = NULL;
1067 dst = NULL;
1068 }
1069
1070 return 0;
1071
1072 fail_test:
1073 rte_ring_free(rp);
1074 rte_free(src);
1075 rte_free(dst);
1076 return -1;
1077 }
1078
1079 /*
1080 * Basic test cases with exact size ring.
1081 */
1082 static int
test_ring_with_exact_size(void)1083 test_ring_with_exact_size(void)
1084 {
1085 struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
1086 void **src_orig = NULL, **dst_orig = NULL;
1087 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
1088 const unsigned int ring_sz = 16;
1089 unsigned int i, j;
1090 int ret = -1;
1091
1092 for (i = 0; i < RTE_DIM(esize); i++) {
1093 test_ring_print_test_string("Test exact size ring",
1094 TEST_RING_IGNORE_API_TYPE,
1095 esize[i]);
1096
1097 std_r = test_ring_create("std", esize[i], ring_sz,
1098 rte_socket_id(),
1099 RING_F_SP_ENQ | RING_F_SC_DEQ);
1100 if (std_r == NULL) {
1101 printf("%s: error, can't create std ring\n", __func__);
1102 goto test_fail;
1103 }
1104 exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
1105 rte_socket_id(),
1106 RING_F_SP_ENQ | RING_F_SC_DEQ |
1107 RING_F_EXACT_SZ);
1108 if (exact_sz_r == NULL) {
1109 printf("%s: error, can't create exact size ring\n",
1110 __func__);
1111 goto test_fail;
1112 }
1113
1114 /* alloc object pointers. Allocate one extra object
1115 * and create an unaligned address.
1116 */
1117 src_orig = test_ring_calloc(17, esize[i]);
1118 if (src_orig == NULL)
1119 goto test_fail;
1120 test_ring_mem_init(src_orig, 17, esize[i]);
1121 src = (void **)((uintptr_t)src_orig + 1);
1122 cur_src = src;
1123
1124 dst_orig = test_ring_calloc(17, esize[i]);
1125 if (dst_orig == NULL)
1126 goto test_fail;
1127 dst = (void **)((uintptr_t)dst_orig + 1);
1128 cur_dst = dst;
1129
1130 /*
1131 * Check that the exact size ring is bigger than the
1132 * standard ring
1133 */
1134 TEST_RING_VERIFY(rte_ring_get_size(std_r) <=
1135 rte_ring_get_size(exact_sz_r),
1136 std_r, goto test_fail);
1137
1138 /*
1139 * check that the exact_sz_ring can hold one more element
1140 * than the standard ring. (16 vs 15 elements)
1141 */
1142 for (j = 0; j < ring_sz - 1; j++) {
1143 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1144 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1145 TEST_RING_VERIFY(ret == 0, std_r, goto test_fail);
1146 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1147 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1148 TEST_RING_VERIFY(ret == 0, exact_sz_r, goto test_fail);
1149 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1150 }
1151 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1152 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1153 TEST_RING_VERIFY(ret == -ENOBUFS, std_r, goto test_fail);
1154 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1155 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1156 TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail);
1157
1158 /* check that dequeue returns the expected number of elements */
1159 ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,
1160 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1161 TEST_RING_VERIFY(ret == (int)ring_sz, exact_sz_r, goto test_fail);
1162 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz);
1163
1164 /* check that the capacity function returns expected value */
1165 TEST_RING_VERIFY(rte_ring_get_capacity(exact_sz_r) == ring_sz,
1166 exact_sz_r, goto test_fail);
1167
1168 /* check data */
1169 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1170 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1171 exact_sz_r, goto test_fail);
1172
1173 rte_free(src_orig);
1174 rte_free(dst_orig);
1175 rte_ring_free(std_r);
1176 rte_ring_free(exact_sz_r);
1177 src_orig = NULL;
1178 dst_orig = NULL;
1179 std_r = NULL;
1180 exact_sz_r = NULL;
1181 }
1182
1183 return 0;
1184
1185 test_fail:
1186 rte_free(src_orig);
1187 rte_free(dst_orig);
1188 rte_ring_free(std_r);
1189 rte_ring_free(exact_sz_r);
1190 return -1;
1191 }
1192
1193 static int
test_ring(void)1194 test_ring(void)
1195 {
1196 int32_t rc;
1197 unsigned int i;
1198
1199 /* Negative test cases */
1200 if (test_ring_negative_tests() < 0)
1201 goto test_fail;
1202
1203 /* Some basic operations */
1204 if (test_ring_basic_ex() < 0)
1205 goto test_fail;
1206
1207 if (test_ring_with_exact_size() < 0)
1208 goto test_fail;
1209
1210 /* Burst and bulk operations with sp/sc, mp/mc and default.
1211 * The test cases are split into smaller test cases to
1212 * help clang compile faster.
1213 */
1214 for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1215
1216
1217 rc = test_ring_burst_bulk_tests1(i);
1218 if (rc < 0)
1219 goto test_fail;
1220
1221 rc = test_ring_burst_bulk_tests2(i);
1222 if (rc < 0)
1223 goto test_fail;
1224
1225 rc = test_ring_burst_bulk_tests3(i);
1226 if (rc < 0)
1227 goto test_fail;
1228
1229 rc = test_ring_burst_bulk_tests4(i);
1230 if (rc < 0)
1231 goto test_fail;
1232 }
1233
1234 /* dump the ring status */
1235 rte_ring_list_dump(stdout);
1236
1237 return 0;
1238
1239 test_fail:
1240
1241 return -1;
1242 }
1243
1244 REGISTER_FAST_TEST(ring_autotest, true, true, test_ring);
1245