xref: /dpdk/app/test/test_ring.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  * Copyright(c) 2020 Arm Limited
4  */
5 
6 #include <string.h>
7 #include <stdarg.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 #include <errno.h>
13 #include <sys/queue.h>
14 
15 #include <rte_common.h>
16 #include <rte_log.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_malloc.h>
26 #include <rte_ring.h>
27 #include <rte_ring_elem.h>
28 #include <rte_random.h>
29 #include <rte_errno.h>
30 #include <rte_hexdump.h>
31 
32 #include "test.h"
33 #include "test_ring.h"
34 
35 /*
36  * Ring
37  * ====
38  *
39  * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
40  *    legacy/custom element size (4B, 8B, 16B, 20B) APIs.
41  *    Some tests incorporate unaligned addresses for objects.
42  *    The enqueued/dequeued data is validated for correctness.
43  *
44  * #. Performance tests are in test_ring_perf.c
45  */
46 
47 #define RING_SIZE 4096
48 #define MAX_BULK 32
49 
50 /*
51  * Validate the return value of test cases and print details of the
52  * ring if validation fails
53  *
54  * @param exp
55  *   Expression to validate return value.
56  * @param r
57  *   A pointer to the ring structure.
58  */
59 #define TEST_RING_VERIFY(exp, r, errst) do {				\
60 	if (!(exp)) {							\
61 		printf("error at %s:%d\tcondition " #exp " failed\n",	\
62 		    __func__, __LINE__);				\
63 		rte_ring_dump(stdout, (r));				\
64 		errst;							\
65 	}								\
66 } while (0)
67 
68 #define TEST_RING_FULL_EMPTY_ITER	8
69 
70 static const int esize[] = {-1, 4, 8, 16, 20};
71 
72 /* Wrappers around the zero-copy APIs. The wrappers match
73  * the normal enqueue/dequeue API declarations.
74  */
75 static unsigned int
76 test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table,
77 	unsigned int n, unsigned int *free_space)
78 {
79 	uint32_t ret;
80 	struct rte_ring_zc_data zcd;
81 
82 	ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space);
83 	if (ret != 0) {
84 		/* Copy the data to the ring */
85 		test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
86 		rte_ring_enqueue_zc_finish(r, ret);
87 	}
88 
89 	return ret;
90 }
91 
92 static unsigned int
93 test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table,
94 	unsigned int esize, unsigned int n, unsigned int *free_space)
95 {
96 	unsigned int ret;
97 	struct rte_ring_zc_data zcd;
98 
99 	ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n,
100 				&zcd, free_space);
101 	if (ret != 0) {
102 		/* Copy the data to the ring */
103 		test_ring_copy_to(&zcd, obj_table, esize, ret);
104 		rte_ring_enqueue_zc_finish(r, ret);
105 	}
106 
107 	return ret;
108 }
109 
110 static unsigned int
111 test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table,
112 	unsigned int n, unsigned int *free_space)
113 {
114 	unsigned int ret;
115 	struct rte_ring_zc_data zcd;
116 
117 	ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space);
118 	if (ret != 0) {
119 		/* Copy the data to the ring */
120 		test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
121 		rte_ring_enqueue_zc_finish(r, ret);
122 	}
123 
124 	return ret;
125 }
126 
127 static unsigned int
128 test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table,
129 	unsigned int esize, unsigned int n, unsigned int *free_space)
130 {
131 	unsigned int ret;
132 	struct rte_ring_zc_data zcd;
133 
134 	ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n,
135 				&zcd, free_space);
136 	if (ret != 0) {
137 		/* Copy the data to the ring */
138 		test_ring_copy_to(&zcd, obj_table, esize, ret);
139 		rte_ring_enqueue_zc_finish(r, ret);
140 	}
141 
142 	return ret;
143 }
144 
145 static unsigned int
146 test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table,
147 	unsigned int n, unsigned int *available)
148 {
149 	unsigned int ret;
150 	struct rte_ring_zc_data zcd;
151 
152 	ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available);
153 	if (ret != 0) {
154 		/* Copy the data from the ring */
155 		test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
156 		rte_ring_dequeue_zc_finish(r, ret);
157 	}
158 
159 	return ret;
160 }
161 
162 static unsigned int
163 test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table,
164 	unsigned int esize, unsigned int n, unsigned int *available)
165 {
166 	unsigned int ret;
167 	struct rte_ring_zc_data zcd;
168 
169 	ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n,
170 				&zcd, available);
171 	if (ret != 0) {
172 		/* Copy the data from the ring */
173 		test_ring_copy_from(&zcd, obj_table, esize, ret);
174 		rte_ring_dequeue_zc_finish(r, ret);
175 	}
176 
177 	return ret;
178 }
179 
180 static unsigned int
181 test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table,
182 	unsigned int n, unsigned int *available)
183 {
184 	unsigned int ret;
185 	struct rte_ring_zc_data zcd;
186 
187 	ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available);
188 	if (ret != 0) {
189 		/* Copy the data from the ring */
190 		test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
191 		rte_ring_dequeue_zc_finish(r, ret);
192 	}
193 
194 	return ret;
195 }
196 
197 static unsigned int
198 test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table,
199 	unsigned int esize, unsigned int n, unsigned int *available)
200 {
201 	unsigned int ret;
202 	struct rte_ring_zc_data zcd;
203 
204 	ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n,
205 				&zcd, available);
206 	if (ret != 0) {
207 		/* Copy the data from the ring */
208 		test_ring_copy_from(&zcd, obj_table, esize, ret);
209 		rte_ring_dequeue_zc_finish(r, ret);
210 	}
211 
212 	return ret;
213 }
214 
215 static const struct {
216 	const char *desc;
217 	uint32_t api_type;
218 	uint32_t create_flags;
219 	struct {
220 		unsigned int (*flegacy)(struct rte_ring *r,
221 			void * const *obj_table, unsigned int n,
222 			unsigned int *free_space);
223 		unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
224 			unsigned int esize, unsigned int n,
225 			unsigned int *free_space);
226 	} enq;
227 	struct {
228 		unsigned int (*flegacy)(struct rte_ring *r,
229 			void **obj_table, unsigned int n,
230 			unsigned int *available);
231 		unsigned int (*felem)(struct rte_ring *r, void *obj_table,
232 			unsigned int esize, unsigned int n,
233 			unsigned int *available);
234 	} deq;
235 } test_enqdeq_impl[] = {
236 	{
237 		.desc = "MP/MC sync mode",
238 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
239 		.create_flags = 0,
240 		.enq = {
241 			.flegacy = rte_ring_enqueue_bulk,
242 			.felem = rte_ring_enqueue_bulk_elem,
243 		},
244 		.deq = {
245 			.flegacy = rte_ring_dequeue_bulk,
246 			.felem = rte_ring_dequeue_bulk_elem,
247 		},
248 	},
249 	{
250 		.desc = "SP/SC sync mode",
251 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
252 		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
253 		.enq = {
254 			.flegacy = rte_ring_sp_enqueue_bulk,
255 			.felem = rte_ring_sp_enqueue_bulk_elem,
256 		},
257 		.deq = {
258 			.flegacy = rte_ring_sc_dequeue_bulk,
259 			.felem = rte_ring_sc_dequeue_bulk_elem,
260 		},
261 	},
262 	{
263 		.desc = "MP/MC sync mode",
264 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
265 		.create_flags = 0,
266 		.enq = {
267 			.flegacy = rte_ring_mp_enqueue_bulk,
268 			.felem = rte_ring_mp_enqueue_bulk_elem,
269 		},
270 		.deq = {
271 			.flegacy = rte_ring_mc_dequeue_bulk,
272 			.felem = rte_ring_mc_dequeue_bulk_elem,
273 		},
274 	},
275 	{
276 		.desc = "MP_RTS/MC_RTS sync mode",
277 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
278 		.create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
279 		.enq = {
280 			.flegacy = rte_ring_enqueue_bulk,
281 			.felem = rte_ring_enqueue_bulk_elem,
282 		},
283 		.deq = {
284 			.flegacy = rte_ring_dequeue_bulk,
285 			.felem = rte_ring_dequeue_bulk_elem,
286 		},
287 	},
288 	{
289 		.desc = "MP_HTS/MC_HTS sync mode",
290 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
291 		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
292 		.enq = {
293 			.flegacy = rte_ring_enqueue_bulk,
294 			.felem = rte_ring_enqueue_bulk_elem,
295 		},
296 		.deq = {
297 			.flegacy = rte_ring_dequeue_bulk,
298 			.felem = rte_ring_dequeue_bulk_elem,
299 		},
300 	},
301 	{
302 		.desc = "MP/MC sync mode",
303 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
304 		.create_flags = 0,
305 		.enq = {
306 			.flegacy = rte_ring_enqueue_burst,
307 			.felem = rte_ring_enqueue_burst_elem,
308 		},
309 		.deq = {
310 			.flegacy = rte_ring_dequeue_burst,
311 			.felem = rte_ring_dequeue_burst_elem,
312 		},
313 	},
314 	{
315 		.desc = "SP/SC sync mode",
316 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
317 		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
318 		.enq = {
319 			.flegacy = rte_ring_sp_enqueue_burst,
320 			.felem = rte_ring_sp_enqueue_burst_elem,
321 		},
322 		.deq = {
323 			.flegacy = rte_ring_sc_dequeue_burst,
324 			.felem = rte_ring_sc_dequeue_burst_elem,
325 		},
326 	},
327 	{
328 		.desc = "MP/MC sync mode",
329 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
330 		.create_flags = 0,
331 		.enq = {
332 			.flegacy = rte_ring_mp_enqueue_burst,
333 			.felem = rte_ring_mp_enqueue_burst_elem,
334 		},
335 		.deq = {
336 			.flegacy = rte_ring_mc_dequeue_burst,
337 			.felem = rte_ring_mc_dequeue_burst_elem,
338 		},
339 	},
340 	{
341 		.desc = "MP_RTS/MC_RTS sync mode",
342 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
343 		.create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
344 		.enq = {
345 			.flegacy = rte_ring_enqueue_burst,
346 			.felem = rte_ring_enqueue_burst_elem,
347 		},
348 		.deq = {
349 			.flegacy = rte_ring_dequeue_burst,
350 			.felem = rte_ring_dequeue_burst_elem,
351 		},
352 	},
353 	{
354 		.desc = "MP_HTS/MC_HTS sync mode",
355 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
356 		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
357 		.enq = {
358 			.flegacy = rte_ring_enqueue_burst,
359 			.felem = rte_ring_enqueue_burst_elem,
360 		},
361 		.deq = {
362 			.flegacy = rte_ring_dequeue_burst,
363 			.felem = rte_ring_dequeue_burst_elem,
364 		},
365 	},
366 	{
367 		.desc = "SP/SC sync mode (ZC)",
368 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
369 		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
370 		.enq = {
371 			.flegacy = test_ring_enqueue_zc_bulk,
372 			.felem = test_ring_enqueue_zc_bulk_elem,
373 		},
374 		.deq = {
375 			.flegacy = test_ring_dequeue_zc_bulk,
376 			.felem = test_ring_dequeue_zc_bulk_elem,
377 		},
378 	},
379 	{
380 		.desc = "MP_HTS/MC_HTS sync mode (ZC)",
381 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
382 		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
383 		.enq = {
384 			.flegacy = test_ring_enqueue_zc_bulk,
385 			.felem = test_ring_enqueue_zc_bulk_elem,
386 		},
387 		.deq = {
388 			.flegacy = test_ring_dequeue_zc_bulk,
389 			.felem = test_ring_dequeue_zc_bulk_elem,
390 		},
391 	},
392 	{
393 		.desc = "SP/SC sync mode (ZC)",
394 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
395 		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
396 		.enq = {
397 			.flegacy = test_ring_enqueue_zc_burst,
398 			.felem = test_ring_enqueue_zc_burst_elem,
399 		},
400 		.deq = {
401 			.flegacy = test_ring_dequeue_zc_burst,
402 			.felem = test_ring_dequeue_zc_burst_elem,
403 		},
404 	},
405 	{
406 		.desc = "MP_HTS/MC_HTS sync mode (ZC)",
407 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
408 		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
409 		.enq = {
410 			.flegacy = test_ring_enqueue_zc_burst,
411 			.felem = test_ring_enqueue_zc_burst_elem,
412 		},
413 		.deq = {
414 			.flegacy = test_ring_dequeue_zc_burst,
415 			.felem = test_ring_dequeue_zc_burst_elem,
416 		},
417 	}
418 };
419 
420 static unsigned int
421 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
422 	unsigned int test_idx)
423 {
424 	if (esize == -1)
425 		return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
426 	else
427 		return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
428 			NULL);
429 }
430 
431 static unsigned int
432 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
433 	unsigned int test_idx)
434 {
435 	if (esize == -1)
436 		return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
437 	else
438 		return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
439 			NULL);
440 }
441 
442 static void
443 test_ring_mem_init(void *obj, unsigned int count, int esize)
444 {
445 	unsigned int i;
446 
447 	/* Legacy queue APIs? */
448 	if (esize == -1)
449 		for (i = 0; i < count; i++)
450 			((void **)obj)[i] = (void *)(uintptr_t)i;
451 	else
452 		for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
453 			((uint32_t *)obj)[i] = i;
454 }
455 
456 static int
457 test_ring_mem_cmp(void *src, void *dst, unsigned int size)
458 {
459 	int ret;
460 
461 	ret = memcmp(src, dst, size);
462 	if (ret) {
463 		rte_hexdump(stdout, "src", src, size);
464 		rte_hexdump(stdout, "dst", dst, size);
465 		printf("data after dequeue is not the same\n");
466 	}
467 
468 	return ret;
469 }
470 
471 static void
472 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
473 {
474 	printf("\n%s: ", istr);
475 
476 	if (esize == -1)
477 		printf("legacy APIs: ");
478 	else
479 		printf("elem APIs: element size %dB ", esize);
480 
481 	if (api_type == TEST_RING_IGNORE_API_TYPE)
482 		return;
483 
484 	if (api_type & TEST_RING_THREAD_DEF)
485 		printf(": default enqueue/dequeue: ");
486 	else if (api_type & TEST_RING_THREAD_SPSC)
487 		printf(": SP/SC: ");
488 	else if (api_type & TEST_RING_THREAD_MPMC)
489 		printf(": MP/MC: ");
490 
491 	if (api_type & TEST_RING_ELEM_SINGLE)
492 		printf("single\n");
493 	else if (api_type & TEST_RING_ELEM_BULK)
494 		printf("bulk\n");
495 	else if (api_type & TEST_RING_ELEM_BURST)
496 		printf("burst\n");
497 }
498 
499 /*
500  * Various negative test cases.
501  */
502 static int
503 test_ring_negative_tests(void)
504 {
505 	struct rte_ring *rp = NULL;
506 	struct rte_ring *rt = NULL;
507 	unsigned int i;
508 
509 	/* Test with esize not a multiple of 4 */
510 	rp = test_ring_create("test_bad_element_size", 23,
511 				RING_SIZE + 1, SOCKET_ID_ANY, 0);
512 	if (rp != NULL) {
513 		printf("Test failed to detect invalid element size\n");
514 		goto test_fail;
515 	}
516 
517 
518 	for (i = 0; i < RTE_DIM(esize); i++) {
519 		/* Test if ring size is not power of 2 */
520 		rp = test_ring_create("test_bad_ring_size", esize[i],
521 					RING_SIZE + 1, SOCKET_ID_ANY, 0);
522 		if (rp != NULL) {
523 			printf("Test failed to detect odd count\n");
524 			goto test_fail;
525 		}
526 
527 		/* Test if ring size is exceeding the limit */
528 		rp = test_ring_create("test_bad_ring_size", esize[i],
529 					RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
530 		if (rp != NULL) {
531 			printf("Test failed to detect limits\n");
532 			goto test_fail;
533 		}
534 
535 		/* Tests if lookup returns NULL on non-existing ring */
536 		rp = rte_ring_lookup("ring_not_found");
537 		if (rp != NULL && rte_errno != ENOENT) {
538 			printf("Test failed to detect NULL ring lookup\n");
539 			goto test_fail;
540 		}
541 
542 		/* Test to if a non-power of 2 count causes the create
543 		 * function to fail correctly
544 		 */
545 		rp = test_ring_create("test_ring_count", esize[i], 4097,
546 					SOCKET_ID_ANY, 0);
547 		if (rp != NULL)
548 			goto test_fail;
549 
550 		rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
551 					SOCKET_ID_ANY,
552 					RING_F_SP_ENQ | RING_F_SC_DEQ);
553 		if (rp == NULL) {
554 			printf("test_ring_negative fail to create ring\n");
555 			goto test_fail;
556 		}
557 
558 		TEST_RING_VERIFY(rte_ring_lookup("test_ring_negative") == rp,
559 					rp, goto test_fail);
560 
561 		TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto test_fail);
562 
563 		/* Tests if it would always fail to create ring with an used
564 		 * ring name.
565 		 */
566 		rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
567 					SOCKET_ID_ANY, 0);
568 		if (rt != NULL)
569 			goto test_fail;
570 
571 		rte_ring_free(rp);
572 		rp = NULL;
573 	}
574 
575 	return 0;
576 
577 test_fail:
578 
579 	rte_ring_free(rp);
580 	return -1;
581 }
582 
583 /*
584  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
585  * Random number of elements are enqueued and dequeued.
586  */
587 static int
588 test_ring_burst_bulk_tests1(unsigned int test_idx)
589 {
590 	struct rte_ring *r;
591 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
592 	int ret;
593 	unsigned int i, j, temp_sz;
594 	int rand;
595 	const unsigned int rsz = RING_SIZE - 1;
596 
597 	for (i = 0; i < RTE_DIM(esize); i++) {
598 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
599 			test_enqdeq_impl[test_idx].api_type, esize[i]);
600 
601 		/* Create the ring */
602 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
603 				RING_SIZE, SOCKET_ID_ANY,
604 				test_enqdeq_impl[test_idx].create_flags);
605 
606 		/* alloc dummy object pointers */
607 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
608 		if (src == NULL)
609 			goto fail;
610 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
611 		cur_src = src;
612 
613 		/* alloc some room for copied objects */
614 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
615 		if (dst == NULL)
616 			goto fail;
617 		cur_dst = dst;
618 
619 		printf("Random full/empty test\n");
620 
621 		for (j = 0; j != TEST_RING_FULL_EMPTY_ITER; j++) {
622 			/* random shift in the ring */
623 			rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
624 			printf("%s: iteration %u, random shift: %u;\n",
625 			    __func__, i, rand);
626 			ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
627 							test_idx);
628 			TEST_RING_VERIFY(ret != 0, r, goto fail);
629 
630 			ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
631 							test_idx);
632 			TEST_RING_VERIFY(ret == rand, r, goto fail);
633 
634 			/* fill the ring */
635 			ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
636 							test_idx);
637 			TEST_RING_VERIFY(ret != 0, r, goto fail);
638 
639 			TEST_RING_VERIFY(rte_ring_free_count(r) == 0, r, goto fail);
640 			TEST_RING_VERIFY(rsz == rte_ring_count(r), r, goto fail);
641 			TEST_RING_VERIFY(rte_ring_full(r), r, goto fail);
642 			TEST_RING_VERIFY(rte_ring_empty(r) == 0, r, goto fail);
643 
644 			/* empty the ring */
645 			ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
646 							test_idx);
647 			TEST_RING_VERIFY(ret == (int)rsz, r, goto fail);
648 
649 			TEST_RING_VERIFY(rsz == rte_ring_free_count(r), r, goto fail);
650 			TEST_RING_VERIFY(rte_ring_count(r) == 0, r, goto fail);
651 			TEST_RING_VERIFY(rte_ring_full(r) == 0, r, goto fail);
652 			TEST_RING_VERIFY(rte_ring_empty(r), r, goto fail);
653 
654 			/* check data */
655 			temp_sz = rsz * sizeof(void *);
656 			if (esize[i] != -1)
657 				temp_sz = rsz * esize[i];
658 			TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
659 						temp_sz) == 0, r, goto fail);
660 		}
661 
662 		/* Free memory before test completed */
663 		rte_ring_free(r);
664 		rte_free(src);
665 		rte_free(dst);
666 		r = NULL;
667 		src = NULL;
668 		dst = NULL;
669 	}
670 
671 	return 0;
672 fail:
673 	rte_ring_free(r);
674 	rte_free(src);
675 	rte_free(dst);
676 	return -1;
677 }
678 
679 /*
680  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
681  * Sequence of simple enqueues/dequeues and validate the enqueued and
682  * dequeued data.
683  */
684 static int
685 test_ring_burst_bulk_tests2(unsigned int test_idx)
686 {
687 	struct rte_ring *r;
688 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
689 	int ret;
690 	unsigned int i;
691 
692 	for (i = 0; i < RTE_DIM(esize); i++) {
693 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
694 			test_enqdeq_impl[test_idx].api_type, esize[i]);
695 
696 		/* Create the ring */
697 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
698 				RING_SIZE, SOCKET_ID_ANY,
699 				test_enqdeq_impl[test_idx].create_flags);
700 
701 		/* alloc dummy object pointers */
702 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
703 		if (src == NULL)
704 			goto fail;
705 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
706 		cur_src = src;
707 
708 		/* alloc some room for copied objects */
709 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
710 		if (dst == NULL)
711 			goto fail;
712 		cur_dst = dst;
713 
714 		printf("enqueue 1 obj\n");
715 		ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
716 		TEST_RING_VERIFY(ret == 1, r, goto fail);
717 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
718 
719 		printf("enqueue 2 objs\n");
720 		ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
721 		TEST_RING_VERIFY(ret == 2, r, goto fail);
722 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
723 
724 		printf("enqueue MAX_BULK objs\n");
725 		ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
726 						test_idx);
727 		TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
728 
729 		printf("dequeue 1 obj\n");
730 		ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
731 		TEST_RING_VERIFY(ret == 1, r, goto fail);
732 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
733 
734 		printf("dequeue 2 objs\n");
735 		ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
736 		TEST_RING_VERIFY(ret == 2, r, goto fail);
737 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
738 
739 		printf("dequeue MAX_BULK objs\n");
740 		ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
741 						test_idx);
742 		TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
743 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
744 
745 		/* check data */
746 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
747 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
748 					r, goto fail);
749 
750 		/* Free memory before test completed */
751 		rte_ring_free(r);
752 		rte_free(src);
753 		rte_free(dst);
754 		r = NULL;
755 		src = NULL;
756 		dst = NULL;
757 	}
758 
759 	return 0;
760 fail:
761 	rte_ring_free(r);
762 	rte_free(src);
763 	rte_free(dst);
764 	return -1;
765 }
766 
767 /*
768  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
769  * Enqueue and dequeue to cover the entire ring length.
770  */
771 static int
772 test_ring_burst_bulk_tests3(unsigned int test_idx)
773 {
774 	struct rte_ring *r;
775 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
776 	int ret;
777 	unsigned int i, j;
778 
779 	for (i = 0; i < RTE_DIM(esize); i++) {
780 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
781 			test_enqdeq_impl[test_idx].api_type, esize[i]);
782 
783 		/* Create the ring */
784 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
785 				RING_SIZE, SOCKET_ID_ANY,
786 				test_enqdeq_impl[test_idx].create_flags);
787 
788 		/* alloc dummy object pointers */
789 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
790 		if (src == NULL)
791 			goto fail;
792 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
793 		cur_src = src;
794 
795 		/* alloc some room for copied objects */
796 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
797 		if (dst == NULL)
798 			goto fail;
799 		cur_dst = dst;
800 
801 		printf("fill and empty the ring\n");
802 		for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
803 			ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
804 							test_idx);
805 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
806 			cur_src = test_ring_inc_ptr(cur_src, esize[i],
807 								MAX_BULK);
808 
809 			ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
810 							test_idx);
811 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
812 			cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
813 								MAX_BULK);
814 		}
815 
816 		/* check data */
817 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
818 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
819 					r, goto fail);
820 
821 		/* Free memory before test completed */
822 		rte_ring_free(r);
823 		rte_free(src);
824 		rte_free(dst);
825 		r = NULL;
826 		src = NULL;
827 		dst = NULL;
828 	}
829 
830 	return 0;
831 fail:
832 	rte_ring_free(r);
833 	rte_free(src);
834 	rte_free(dst);
835 	return -1;
836 }
837 
838 /*
839  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
840  * Enqueue till the ring is full and dequeue till the ring becomes empty.
841  */
842 static int
843 test_ring_burst_bulk_tests4(unsigned int test_idx)
844 {
845 	struct rte_ring *r;
846 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
847 	int ret;
848 	unsigned int i, j;
849 	unsigned int api_type, num_elems;
850 
851 	api_type = test_enqdeq_impl[test_idx].api_type;
852 
853 	for (i = 0; i < RTE_DIM(esize); i++) {
854 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
855 			test_enqdeq_impl[test_idx].api_type, esize[i]);
856 
857 		/* Create the ring */
858 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
859 				RING_SIZE, SOCKET_ID_ANY,
860 				test_enqdeq_impl[test_idx].create_flags);
861 
862 		/* alloc dummy object pointers */
863 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
864 		if (src == NULL)
865 			goto fail;
866 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
867 		cur_src = src;
868 
869 		/* alloc some room for copied objects */
870 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
871 		if (dst == NULL)
872 			goto fail;
873 		cur_dst = dst;
874 
875 		printf("Test enqueue without enough memory space\n");
876 		for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
877 			ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
878 							test_idx);
879 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
880 			cur_src = test_ring_inc_ptr(cur_src, esize[i],
881 								MAX_BULK);
882 		}
883 
884 		printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
885 		ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
886 		TEST_RING_VERIFY(ret == 2, r, goto fail);
887 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
888 
889 		printf("Enqueue the remaining entries = MAX_BULK - 3\n");
890 		/* Bulk APIs enqueue exact number of elements */
891 		if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
892 			num_elems = MAX_BULK - 3;
893 		else
894 			num_elems = MAX_BULK;
895 		/* Always one free entry left */
896 		ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
897 						test_idx);
898 		TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
899 		cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
900 
901 		printf("Test if ring is full\n");
902 		TEST_RING_VERIFY(rte_ring_full(r) == 1, r, goto fail);
903 
904 		printf("Test enqueue for a full entry\n");
905 		ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
906 						test_idx);
907 		TEST_RING_VERIFY(ret == 0, r, goto fail);
908 
909 		printf("Test dequeue without enough objects\n");
910 		for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
911 			ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
912 							test_idx);
913 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
914 			cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
915 								MAX_BULK);
916 		}
917 
918 		/* Available memory space for the exact MAX_BULK entries */
919 		ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
920 		TEST_RING_VERIFY(ret == 2, r, goto fail);
921 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
922 
923 		/* Bulk APIs enqueue exact number of elements */
924 		if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
925 			num_elems = MAX_BULK - 3;
926 		else
927 			num_elems = MAX_BULK;
928 		ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
929 						test_idx);
930 		TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
931 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
932 
933 		printf("Test if ring is empty\n");
934 		/* Check if ring is empty */
935 		TEST_RING_VERIFY(rte_ring_empty(r) == 1, r, goto fail);
936 
937 		/* check data */
938 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
939 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
940 					r, goto fail);
941 
942 		/* Free memory before test completed */
943 		rte_ring_free(r);
944 		rte_free(src);
945 		rte_free(dst);
946 		r = NULL;
947 		src = NULL;
948 		dst = NULL;
949 	}
950 
951 	return 0;
952 fail:
953 	rte_ring_free(r);
954 	rte_free(src);
955 	rte_free(dst);
956 	return -1;
957 }
958 
959 /*
960  * Test default, single element, bulk and burst APIs
961  */
962 static int
963 test_ring_basic_ex(void)
964 {
965 	int ret = -1;
966 	unsigned int i, j;
967 	struct rte_ring *rp = NULL;
968 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
969 
970 	for (i = 0; i < RTE_DIM(esize); i++) {
971 		rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
972 					SOCKET_ID_ANY,
973 					RING_F_SP_ENQ | RING_F_SC_DEQ);
974 		if (rp == NULL) {
975 			printf("%s: failed to create ring\n", __func__);
976 			goto fail_test;
977 		}
978 
979 		/* alloc dummy object pointers */
980 		src = test_ring_calloc(RING_SIZE, esize[i]);
981 		if (src == NULL) {
982 			printf("%s: failed to alloc src memory\n", __func__);
983 			goto fail_test;
984 		}
985 		test_ring_mem_init(src, RING_SIZE, esize[i]);
986 		cur_src = src;
987 
988 		/* alloc some room for copied objects */
989 		dst = test_ring_calloc(RING_SIZE, esize[i]);
990 		if (dst == NULL) {
991 			printf("%s: failed to alloc dst memory\n", __func__);
992 			goto fail_test;
993 		}
994 		cur_dst = dst;
995 
996 		TEST_RING_VERIFY(rte_ring_lookup("test_ring_basic_ex") == rp,
997 					rp, goto fail_test);
998 
999 		TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
1000 
1001 		printf("%u ring entries are now free\n",
1002 			rte_ring_free_count(rp));
1003 
1004 		for (j = 0; j < RING_SIZE - 1; j++) {
1005 			ret = test_ring_enqueue(rp, cur_src, esize[i], 1,
1006 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1007 			TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1008 			cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1009 		}
1010 
1011 		TEST_RING_VERIFY(rte_ring_full(rp) == 1, rp, goto fail_test);
1012 
1013 		for (j = 0; j < RING_SIZE - 1; j++) {
1014 			ret = test_ring_dequeue(rp, cur_dst, esize[i], 1,
1015 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1016 			TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1017 			cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
1018 		}
1019 
1020 		TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
1021 
1022 		/* check data */
1023 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1024 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
1025 					rp, goto fail_test);
1026 
1027 		/* Following tests use the configured flags to decide
1028 		 * SP/SC or MP/MC.
1029 		 */
1030 		/* reset memory of dst */
1031 		memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst));
1032 
1033 		/* reset cur_src and cur_dst */
1034 		cur_src = src;
1035 		cur_dst = dst;
1036 
1037 		/* Covering the ring burst operation */
1038 		ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1039 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1040 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1041 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
1042 
1043 		ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1044 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1045 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1046 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1047 
1048 		/* Covering the ring bulk operation */
1049 		ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1050 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1051 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1052 
1053 		ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1054 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1055 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1056 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1057 
1058 		/* check data */
1059 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1060 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
1061 					rp, goto fail_test);
1062 
1063 		rte_ring_free(rp);
1064 		rte_free(src);
1065 		rte_free(dst);
1066 		rp = NULL;
1067 		src = NULL;
1068 		dst = NULL;
1069 	}
1070 
1071 	return 0;
1072 
1073 fail_test:
1074 	rte_ring_free(rp);
1075 	rte_free(src);
1076 	rte_free(dst);
1077 	return -1;
1078 }
1079 
1080 /*
1081  * Basic test cases with exact size ring.
1082  */
1083 static int
1084 test_ring_with_exact_size(void)
1085 {
1086 	struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
1087 	void **src_orig = NULL, **dst_orig = NULL;
1088 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
1089 	const unsigned int ring_sz = 16;
1090 	unsigned int i, j;
1091 	int ret = -1;
1092 
1093 	for (i = 0; i < RTE_DIM(esize); i++) {
1094 		test_ring_print_test_string("Test exact size ring",
1095 				TEST_RING_IGNORE_API_TYPE,
1096 				esize[i]);
1097 
1098 		std_r = test_ring_create("std", esize[i], ring_sz,
1099 					rte_socket_id(),
1100 					RING_F_SP_ENQ | RING_F_SC_DEQ);
1101 		if (std_r == NULL) {
1102 			printf("%s: error, can't create std ring\n", __func__);
1103 			goto test_fail;
1104 		}
1105 		exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
1106 				rte_socket_id(),
1107 				RING_F_SP_ENQ | RING_F_SC_DEQ |
1108 				RING_F_EXACT_SZ);
1109 		if (exact_sz_r == NULL) {
1110 			printf("%s: error, can't create exact size ring\n",
1111 					__func__);
1112 			goto test_fail;
1113 		}
1114 
1115 		/* alloc object pointers. Allocate one extra object
1116 		 * and create an unaligned address.
1117 		 */
1118 		src_orig = test_ring_calloc(17, esize[i]);
1119 		if (src_orig == NULL)
1120 			goto test_fail;
1121 		test_ring_mem_init(src_orig, 17, esize[i]);
1122 		src = (void **)((uintptr_t)src_orig + 1);
1123 		cur_src = src;
1124 
1125 		dst_orig = test_ring_calloc(17, esize[i]);
1126 		if (dst_orig == NULL)
1127 			goto test_fail;
1128 		dst = (void **)((uintptr_t)dst_orig + 1);
1129 		cur_dst = dst;
1130 
1131 		/*
1132 		 * Check that the exact size ring is bigger than the
1133 		 * standard ring
1134 		 */
1135 		TEST_RING_VERIFY(rte_ring_get_size(std_r) <=
1136 				rte_ring_get_size(exact_sz_r),
1137 				std_r, goto test_fail);
1138 
1139 		/*
1140 		 * check that the exact_sz_ring can hold one more element
1141 		 * than the standard ring. (16 vs 15 elements)
1142 		 */
1143 		for (j = 0; j < ring_sz - 1; j++) {
1144 			ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1145 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1146 			TEST_RING_VERIFY(ret == 0, std_r, goto test_fail);
1147 			ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1148 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1149 			TEST_RING_VERIFY(ret == 0, exact_sz_r, goto test_fail);
1150 			cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1151 		}
1152 		ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1153 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1154 		TEST_RING_VERIFY(ret == -ENOBUFS, std_r, goto test_fail);
1155 		ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1156 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1157 		TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail);
1158 
1159 		/* check that dequeue returns the expected number of elements */
1160 		ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,
1161 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1162 		TEST_RING_VERIFY(ret == (int)ring_sz, exact_sz_r, goto test_fail);
1163 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz);
1164 
1165 		/* check that the capacity function returns expected value */
1166 		TEST_RING_VERIFY(rte_ring_get_capacity(exact_sz_r) == ring_sz,
1167 					exact_sz_r, goto test_fail);
1168 
1169 		/* check data */
1170 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1171 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
1172 					exact_sz_r, goto test_fail);
1173 
1174 		rte_free(src_orig);
1175 		rte_free(dst_orig);
1176 		rte_ring_free(std_r);
1177 		rte_ring_free(exact_sz_r);
1178 		src_orig = NULL;
1179 		dst_orig = NULL;
1180 		std_r = NULL;
1181 		exact_sz_r = NULL;
1182 	}
1183 
1184 	return 0;
1185 
1186 test_fail:
1187 	rte_free(src_orig);
1188 	rte_free(dst_orig);
1189 	rte_ring_free(std_r);
1190 	rte_ring_free(exact_sz_r);
1191 	return -1;
1192 }
1193 
1194 static int
1195 test_ring(void)
1196 {
1197 	int32_t rc;
1198 	unsigned int i;
1199 
1200 	/* Negative test cases */
1201 	if (test_ring_negative_tests() < 0)
1202 		goto test_fail;
1203 
1204 	/* Some basic operations */
1205 	if (test_ring_basic_ex() < 0)
1206 		goto test_fail;
1207 
1208 	if (test_ring_with_exact_size() < 0)
1209 		goto test_fail;
1210 
1211 	/* Burst and bulk operations with sp/sc, mp/mc and default.
1212 	 * The test cases are split into smaller test cases to
1213 	 * help clang compile faster.
1214 	 */
1215 	for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1216 
1217 
1218 		rc = test_ring_burst_bulk_tests1(i);
1219 		if (rc < 0)
1220 			goto test_fail;
1221 
1222 		rc = test_ring_burst_bulk_tests2(i);
1223 		if (rc < 0)
1224 			goto test_fail;
1225 
1226 		rc = test_ring_burst_bulk_tests3(i);
1227 		if (rc < 0)
1228 			goto test_fail;
1229 
1230 		rc = test_ring_burst_bulk_tests4(i);
1231 		if (rc < 0)
1232 			goto test_fail;
1233 	}
1234 
1235 	/* dump the ring status */
1236 	rte_ring_list_dump(stdout);
1237 
1238 	return 0;
1239 
1240 test_fail:
1241 
1242 	return -1;
1243 }
1244 
1245 REGISTER_TEST_COMMAND(ring_autotest, test_ring);
1246