xref: /dpdk/app/test/test_ring.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <stdarg.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13 
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_memory.h>
17 #include <rte_launch.h>
18 #include <rte_cycles.h>
19 #include <rte_eal.h>
20 #include <rte_per_lcore.h>
21 #include <rte_lcore.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_malloc.h>
25 #include <rte_ring.h>
26 #include <rte_ring_elem.h>
27 #include <rte_random.h>
28 #include <rte_errno.h>
29 #include <rte_hexdump.h>
30 
31 #include "test.h"
32 #include "test_ring.h"
33 
34 /*
35  * Ring
36  * ====
37  *
38  * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
39  *    legacy/custom element size (4B, 8B, 16B, 20B) APIs.
40  *    Some tests incorporate unaligned addresses for objects.
41  *    The enqueued/dequeued data is validated for correctness.
42  *
43  * #. Performance tests are in test_ring_perf.c
44  */
45 
46 #define RING_SIZE 4096
47 #define MAX_BULK 32
48 
49 /*
50  * Validate the return value of test cases and print details of the
51  * ring if validation fails
52  *
53  * @param exp
54  *   Expression to validate return value.
55  * @param r
56  *   A pointer to the ring structure.
57  */
58 #define TEST_RING_VERIFY(exp, r, errst) do {				\
59 	if (!(exp)) {							\
60 		printf("error at %s:%d\tcondition " #exp " failed\n",	\
61 		    __func__, __LINE__);				\
62 		rte_ring_dump(stdout, (r));				\
63 		errst;							\
64 	}								\
65 } while (0)
66 
67 #define TEST_RING_FULL_EMPTY_ITER	8
68 
69 static const int esize[] = {-1, 4, 8, 16, 20};
70 
71 static const struct {
72 	const char *desc;
73 	uint32_t api_type;
74 	uint32_t create_flags;
75 	struct {
76 		unsigned int (*flegacy)(struct rte_ring *r,
77 			void * const *obj_table, unsigned int n,
78 			unsigned int *free_space);
79 		unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
80 			unsigned int esize, unsigned int n,
81 			unsigned int *free_space);
82 	} enq;
83 	struct {
84 		unsigned int (*flegacy)(struct rte_ring *r,
85 			void **obj_table, unsigned int n,
86 			unsigned int *available);
87 		unsigned int (*felem)(struct rte_ring *r, void *obj_table,
88 			unsigned int esize, unsigned int n,
89 			unsigned int *available);
90 	} deq;
91 } test_enqdeq_impl[] = {
92 	{
93 		.desc = "MP/MC sync mode",
94 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
95 		.create_flags = 0,
96 		.enq = {
97 			.flegacy = rte_ring_enqueue_bulk,
98 			.felem = rte_ring_enqueue_bulk_elem,
99 		},
100 		.deq = {
101 			.flegacy = rte_ring_dequeue_bulk,
102 			.felem = rte_ring_dequeue_bulk_elem,
103 		},
104 	},
105 	{
106 		.desc = "SP/SC sync mode",
107 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
108 		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
109 		.enq = {
110 			.flegacy = rte_ring_sp_enqueue_bulk,
111 			.felem = rte_ring_sp_enqueue_bulk_elem,
112 		},
113 		.deq = {
114 			.flegacy = rte_ring_sc_dequeue_bulk,
115 			.felem = rte_ring_sc_dequeue_bulk_elem,
116 		},
117 	},
118 	{
119 		.desc = "MP/MC sync mode",
120 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
121 		.create_flags = 0,
122 		.enq = {
123 			.flegacy = rte_ring_mp_enqueue_bulk,
124 			.felem = rte_ring_mp_enqueue_bulk_elem,
125 		},
126 		.deq = {
127 			.flegacy = rte_ring_mc_dequeue_bulk,
128 			.felem = rte_ring_mc_dequeue_bulk_elem,
129 		},
130 	},
131 	{
132 		.desc = "MP_RTS/MC_RTS sync mode",
133 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
134 		.create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
135 		.enq = {
136 			.flegacy = rte_ring_enqueue_bulk,
137 			.felem = rte_ring_enqueue_bulk_elem,
138 		},
139 		.deq = {
140 			.flegacy = rte_ring_dequeue_bulk,
141 			.felem = rte_ring_dequeue_bulk_elem,
142 		},
143 	},
144 	{
145 		.desc = "MP_HTS/MC_HTS sync mode",
146 		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
147 		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
148 		.enq = {
149 			.flegacy = rte_ring_enqueue_bulk,
150 			.felem = rte_ring_enqueue_bulk_elem,
151 		},
152 		.deq = {
153 			.flegacy = rte_ring_dequeue_bulk,
154 			.felem = rte_ring_dequeue_bulk_elem,
155 		},
156 	},
157 	{
158 		.desc = "MP/MC sync mode",
159 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
160 		.create_flags = 0,
161 		.enq = {
162 			.flegacy = rte_ring_enqueue_burst,
163 			.felem = rte_ring_enqueue_burst_elem,
164 		},
165 		.deq = {
166 			.flegacy = rte_ring_dequeue_burst,
167 			.felem = rte_ring_dequeue_burst_elem,
168 		},
169 	},
170 	{
171 		.desc = "SP/SC sync mode",
172 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
173 		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
174 		.enq = {
175 			.flegacy = rte_ring_sp_enqueue_burst,
176 			.felem = rte_ring_sp_enqueue_burst_elem,
177 		},
178 		.deq = {
179 			.flegacy = rte_ring_sc_dequeue_burst,
180 			.felem = rte_ring_sc_dequeue_burst_elem,
181 		},
182 	},
183 	{
184 		.desc = "MP/MC sync mode",
185 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
186 		.create_flags = 0,
187 		.enq = {
188 			.flegacy = rte_ring_mp_enqueue_burst,
189 			.felem = rte_ring_mp_enqueue_burst_elem,
190 		},
191 		.deq = {
192 			.flegacy = rte_ring_mc_dequeue_burst,
193 			.felem = rte_ring_mc_dequeue_burst_elem,
194 		},
195 	},
196 	{
197 		.desc = "MP_RTS/MC_RTS sync mode",
198 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
199 		.create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
200 		.enq = {
201 			.flegacy = rte_ring_enqueue_burst,
202 			.felem = rte_ring_enqueue_burst_elem,
203 		},
204 		.deq = {
205 			.flegacy = rte_ring_dequeue_burst,
206 			.felem = rte_ring_dequeue_burst_elem,
207 		},
208 	},
209 	{
210 		.desc = "MP_HTS/MC_HTS sync mode",
211 		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
212 		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
213 		.enq = {
214 			.flegacy = rte_ring_enqueue_burst,
215 			.felem = rte_ring_enqueue_burst_elem,
216 		},
217 		.deq = {
218 			.flegacy = rte_ring_dequeue_burst,
219 			.felem = rte_ring_dequeue_burst_elem,
220 		},
221 	},
222 };
223 
224 static unsigned int
225 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
226 	unsigned int test_idx)
227 {
228 	if (esize == -1)
229 		return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
230 	else
231 		return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
232 			NULL);
233 }
234 
235 static unsigned int
236 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
237 	unsigned int test_idx)
238 {
239 	if (esize == -1)
240 		return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
241 	else
242 		return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
243 			NULL);
244 }
245 
246 static void**
247 test_ring_inc_ptr(void **obj, int esize, unsigned int n)
248 {
249 	/* Legacy queue APIs? */
250 	if ((esize) == -1)
251 		return ((void **)obj) + n;
252 	else
253 		return (void **)(((uint32_t *)obj) +
254 					(n * esize / sizeof(uint32_t)));
255 }
256 
257 static void
258 test_ring_mem_init(void *obj, unsigned int count, int esize)
259 {
260 	unsigned int i;
261 
262 	/* Legacy queue APIs? */
263 	if (esize == -1)
264 		for (i = 0; i < count; i++)
265 			((void **)obj)[i] = (void *)(unsigned long)i;
266 	else
267 		for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
268 			((uint32_t *)obj)[i] = i;
269 }
270 
271 static int
272 test_ring_mem_cmp(void *src, void *dst, unsigned int size)
273 {
274 	int ret;
275 
276 	ret = memcmp(src, dst, size);
277 	if (ret) {
278 		rte_hexdump(stdout, "src", src, size);
279 		rte_hexdump(stdout, "dst", dst, size);
280 		printf("data after dequeue is not the same\n");
281 	}
282 
283 	return ret;
284 }
285 
286 static void
287 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
288 {
289 	printf("\n%s: ", istr);
290 
291 	if (esize == -1)
292 		printf("legacy APIs: ");
293 	else
294 		printf("elem APIs: element size %dB ", esize);
295 
296 	if (api_type == TEST_RING_IGNORE_API_TYPE)
297 		return;
298 
299 	if (api_type & TEST_RING_THREAD_DEF)
300 		printf(": default enqueue/dequeue: ");
301 	else if (api_type & TEST_RING_THREAD_SPSC)
302 		printf(": SP/SC: ");
303 	else if (api_type & TEST_RING_THREAD_MPMC)
304 		printf(": MP/MC: ");
305 
306 	if (api_type & TEST_RING_ELEM_SINGLE)
307 		printf("single\n");
308 	else if (api_type & TEST_RING_ELEM_BULK)
309 		printf("bulk\n");
310 	else if (api_type & TEST_RING_ELEM_BURST)
311 		printf("burst\n");
312 }
313 
314 /*
315  * Various negative test cases.
316  */
317 static int
318 test_ring_negative_tests(void)
319 {
320 	struct rte_ring *rp = NULL;
321 	struct rte_ring *rt = NULL;
322 	unsigned int i;
323 
324 	/* Test with esize not a multiple of 4 */
325 	rp = test_ring_create("test_bad_element_size", 23,
326 				RING_SIZE + 1, SOCKET_ID_ANY, 0);
327 	if (rp != NULL) {
328 		printf("Test failed to detect invalid element size\n");
329 		goto test_fail;
330 	}
331 
332 
333 	for (i = 0; i < RTE_DIM(esize); i++) {
334 		/* Test if ring size is not power of 2 */
335 		rp = test_ring_create("test_bad_ring_size", esize[i],
336 					RING_SIZE + 1, SOCKET_ID_ANY, 0);
337 		if (rp != NULL) {
338 			printf("Test failed to detect odd count\n");
339 			goto test_fail;
340 		}
341 
342 		/* Test if ring size is exceeding the limit */
343 		rp = test_ring_create("test_bad_ring_size", esize[i],
344 					RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
345 		if (rp != NULL) {
346 			printf("Test failed to detect limits\n");
347 			goto test_fail;
348 		}
349 
350 		/* Tests if lookup returns NULL on non-existing ring */
351 		rp = rte_ring_lookup("ring_not_found");
352 		if (rp != NULL && rte_errno != ENOENT) {
353 			printf("Test failed to detect NULL ring lookup\n");
354 			goto test_fail;
355 		}
356 
357 		/* Test to if a non-power of 2 count causes the create
358 		 * function to fail correctly
359 		 */
360 		rp = test_ring_create("test_ring_count", esize[i], 4097,
361 					SOCKET_ID_ANY, 0);
362 		if (rp != NULL)
363 			goto test_fail;
364 
365 		rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
366 					SOCKET_ID_ANY,
367 					RING_F_SP_ENQ | RING_F_SC_DEQ);
368 		if (rp == NULL) {
369 			printf("test_ring_negative fail to create ring\n");
370 			goto test_fail;
371 		}
372 
373 		TEST_RING_VERIFY(rte_ring_lookup("test_ring_negative") == rp,
374 					rp, goto test_fail);
375 
376 		TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto test_fail);
377 
378 		/* Tests if it would always fail to create ring with an used
379 		 * ring name.
380 		 */
381 		rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
382 					SOCKET_ID_ANY, 0);
383 		if (rt != NULL)
384 			goto test_fail;
385 
386 		rte_ring_free(rp);
387 		rp = NULL;
388 	}
389 
390 	return 0;
391 
392 test_fail:
393 
394 	rte_ring_free(rp);
395 	return -1;
396 }
397 
398 /*
399  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
400  * Random number of elements are enqueued and dequeued.
401  */
402 static int
403 test_ring_burst_bulk_tests1(unsigned int test_idx)
404 {
405 	struct rte_ring *r;
406 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
407 	int ret;
408 	unsigned int i, j, temp_sz;
409 	int rand;
410 	const unsigned int rsz = RING_SIZE - 1;
411 
412 	for (i = 0; i < RTE_DIM(esize); i++) {
413 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
414 			test_enqdeq_impl[test_idx].api_type, esize[i]);
415 
416 		/* Create the ring */
417 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
418 				RING_SIZE, SOCKET_ID_ANY,
419 				test_enqdeq_impl[test_idx].create_flags);
420 
421 		/* alloc dummy object pointers */
422 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
423 		if (src == NULL)
424 			goto fail;
425 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
426 		cur_src = src;
427 
428 		/* alloc some room for copied objects */
429 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
430 		if (dst == NULL)
431 			goto fail;
432 		cur_dst = dst;
433 
434 		printf("Random full/empty test\n");
435 
436 		for (j = 0; j != TEST_RING_FULL_EMPTY_ITER; j++) {
437 			/* random shift in the ring */
438 			rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
439 			printf("%s: iteration %u, random shift: %u;\n",
440 			    __func__, i, rand);
441 			ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
442 							test_idx);
443 			TEST_RING_VERIFY(ret != 0, r, goto fail);
444 
445 			ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
446 							test_idx);
447 			TEST_RING_VERIFY(ret == rand, r, goto fail);
448 
449 			/* fill the ring */
450 			ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
451 							test_idx);
452 			TEST_RING_VERIFY(ret != 0, r, goto fail);
453 
454 			TEST_RING_VERIFY(rte_ring_free_count(r) == 0, r, goto fail);
455 			TEST_RING_VERIFY(rsz == rte_ring_count(r), r, goto fail);
456 			TEST_RING_VERIFY(rte_ring_full(r), r, goto fail);
457 			TEST_RING_VERIFY(rte_ring_empty(r) == 0, r, goto fail);
458 
459 			/* empty the ring */
460 			ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
461 							test_idx);
462 			TEST_RING_VERIFY(ret == (int)rsz, r, goto fail);
463 
464 			TEST_RING_VERIFY(rsz == rte_ring_free_count(r), r, goto fail);
465 			TEST_RING_VERIFY(rte_ring_count(r) == 0, r, goto fail);
466 			TEST_RING_VERIFY(rte_ring_full(r) == 0, r, goto fail);
467 			TEST_RING_VERIFY(rte_ring_empty(r), r, goto fail);
468 
469 			/* check data */
470 			temp_sz = rsz * sizeof(void *);
471 			if (esize[i] != -1)
472 				temp_sz = rsz * esize[i];
473 			TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
474 						temp_sz) == 0, r, goto fail);
475 		}
476 
477 		/* Free memory before test completed */
478 		rte_ring_free(r);
479 		rte_free(src);
480 		rte_free(dst);
481 		r = NULL;
482 		src = NULL;
483 		dst = NULL;
484 	}
485 
486 	return 0;
487 fail:
488 	rte_ring_free(r);
489 	rte_free(src);
490 	rte_free(dst);
491 	return -1;
492 }
493 
494 /*
495  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
496  * Sequence of simple enqueues/dequeues and validate the enqueued and
497  * dequeued data.
498  */
499 static int
500 test_ring_burst_bulk_tests2(unsigned int test_idx)
501 {
502 	struct rte_ring *r;
503 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
504 	int ret;
505 	unsigned int i;
506 
507 	for (i = 0; i < RTE_DIM(esize); i++) {
508 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
509 			test_enqdeq_impl[test_idx].api_type, esize[i]);
510 
511 		/* Create the ring */
512 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
513 				RING_SIZE, SOCKET_ID_ANY,
514 				test_enqdeq_impl[test_idx].create_flags);
515 
516 		/* alloc dummy object pointers */
517 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
518 		if (src == NULL)
519 			goto fail;
520 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
521 		cur_src = src;
522 
523 		/* alloc some room for copied objects */
524 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
525 		if (dst == NULL)
526 			goto fail;
527 		cur_dst = dst;
528 
529 		printf("enqueue 1 obj\n");
530 		ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
531 		TEST_RING_VERIFY(ret == 1, r, goto fail);
532 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
533 
534 		printf("enqueue 2 objs\n");
535 		ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
536 		TEST_RING_VERIFY(ret == 2, r, goto fail);
537 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
538 
539 		printf("enqueue MAX_BULK objs\n");
540 		ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
541 						test_idx);
542 		TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
543 		cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK);
544 
545 		printf("dequeue 1 obj\n");
546 		ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
547 		TEST_RING_VERIFY(ret == 1, r, goto fail);
548 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
549 
550 		printf("dequeue 2 objs\n");
551 		ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
552 		TEST_RING_VERIFY(ret == 2, r, goto fail);
553 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
554 
555 		printf("dequeue MAX_BULK objs\n");
556 		ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
557 						test_idx);
558 		TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
559 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
560 
561 		/* check data */
562 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
563 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
564 					r, goto fail);
565 
566 		/* Free memory before test completed */
567 		rte_ring_free(r);
568 		rte_free(src);
569 		rte_free(dst);
570 		r = NULL;
571 		src = NULL;
572 		dst = NULL;
573 	}
574 
575 	return 0;
576 fail:
577 	rte_ring_free(r);
578 	rte_free(src);
579 	rte_free(dst);
580 	return -1;
581 }
582 
583 /*
584  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
585  * Enqueue and dequeue to cover the entire ring length.
586  */
587 static int
588 test_ring_burst_bulk_tests3(unsigned int test_idx)
589 {
590 	struct rte_ring *r;
591 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
592 	int ret;
593 	unsigned int i, j;
594 
595 	for (i = 0; i < RTE_DIM(esize); i++) {
596 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
597 			test_enqdeq_impl[test_idx].api_type, esize[i]);
598 
599 		/* Create the ring */
600 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
601 				RING_SIZE, SOCKET_ID_ANY,
602 				test_enqdeq_impl[test_idx].create_flags);
603 
604 		/* alloc dummy object pointers */
605 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
606 		if (src == NULL)
607 			goto fail;
608 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
609 		cur_src = src;
610 
611 		/* alloc some room for copied objects */
612 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
613 		if (dst == NULL)
614 			goto fail;
615 		cur_dst = dst;
616 
617 		printf("fill and empty the ring\n");
618 		for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
619 			ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
620 							test_idx);
621 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
622 			cur_src = test_ring_inc_ptr(cur_src, esize[i],
623 								MAX_BULK);
624 
625 			ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
626 							test_idx);
627 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
628 			cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
629 								MAX_BULK);
630 		}
631 
632 		/* check data */
633 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
634 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
635 					r, goto fail);
636 
637 		/* Free memory before test completed */
638 		rte_ring_free(r);
639 		rte_free(src);
640 		rte_free(dst);
641 		r = NULL;
642 		src = NULL;
643 		dst = NULL;
644 	}
645 
646 	return 0;
647 fail:
648 	rte_ring_free(r);
649 	rte_free(src);
650 	rte_free(dst);
651 	return -1;
652 }
653 
654 /*
655  * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
656  * Enqueue till the ring is full and dequeue till the ring becomes empty.
657  */
658 static int
659 test_ring_burst_bulk_tests4(unsigned int test_idx)
660 {
661 	struct rte_ring *r;
662 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
663 	int ret;
664 	unsigned int i, j;
665 	unsigned int api_type, num_elems;
666 
667 	api_type = test_enqdeq_impl[test_idx].api_type;
668 
669 	for (i = 0; i < RTE_DIM(esize); i++) {
670 		test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
671 			test_enqdeq_impl[test_idx].api_type, esize[i]);
672 
673 		/* Create the ring */
674 		r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
675 				RING_SIZE, SOCKET_ID_ANY,
676 				test_enqdeq_impl[test_idx].create_flags);
677 
678 		/* alloc dummy object pointers */
679 		src = test_ring_calloc(RING_SIZE * 2, esize[i]);
680 		if (src == NULL)
681 			goto fail;
682 		test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
683 		cur_src = src;
684 
685 		/* alloc some room for copied objects */
686 		dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
687 		if (dst == NULL)
688 			goto fail;
689 		cur_dst = dst;
690 
691 		printf("Test enqueue without enough memory space\n");
692 		for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
693 			ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
694 							test_idx);
695 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
696 			cur_src = test_ring_inc_ptr(cur_src, esize[i],
697 								MAX_BULK);
698 		}
699 
700 		printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
701 		ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
702 		TEST_RING_VERIFY(ret == 2, r, goto fail);
703 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
704 
705 		printf("Enqueue the remaining entries = MAX_BULK - 3\n");
706 		/* Bulk APIs enqueue exact number of elements */
707 		if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
708 			num_elems = MAX_BULK - 3;
709 		else
710 			num_elems = MAX_BULK;
711 		/* Always one free entry left */
712 		ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
713 						test_idx);
714 		TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
715 		cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
716 
717 		printf("Test if ring is full\n");
718 		TEST_RING_VERIFY(rte_ring_full(r) == 1, r, goto fail);
719 
720 		printf("Test enqueue for a full entry\n");
721 		ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
722 						test_idx);
723 		TEST_RING_VERIFY(ret == 0, r, goto fail);
724 
725 		printf("Test dequeue without enough objects\n");
726 		for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
727 			ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
728 							test_idx);
729 			TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
730 			cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
731 								MAX_BULK);
732 		}
733 
734 		/* Available memory space for the exact MAX_BULK entries */
735 		ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
736 		TEST_RING_VERIFY(ret == 2, r, goto fail);
737 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
738 
739 		/* Bulk APIs enqueue exact number of elements */
740 		if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
741 			num_elems = MAX_BULK - 3;
742 		else
743 			num_elems = MAX_BULK;
744 		ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
745 						test_idx);
746 		TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
747 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
748 
749 		printf("Test if ring is empty\n");
750 		/* Check if ring is empty */
751 		TEST_RING_VERIFY(rte_ring_empty(r) == 1, r, goto fail);
752 
753 		/* check data */
754 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
755 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
756 					r, goto fail);
757 
758 		/* Free memory before test completed */
759 		rte_ring_free(r);
760 		rte_free(src);
761 		rte_free(dst);
762 		r = NULL;
763 		src = NULL;
764 		dst = NULL;
765 	}
766 
767 	return 0;
768 fail:
769 	rte_ring_free(r);
770 	rte_free(src);
771 	rte_free(dst);
772 	return -1;
773 }
774 
775 /*
776  * Test default, single element, bulk and burst APIs
777  */
778 static int
779 test_ring_basic_ex(void)
780 {
781 	int ret = -1;
782 	unsigned int i, j;
783 	struct rte_ring *rp = NULL;
784 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
785 
786 	for (i = 0; i < RTE_DIM(esize); i++) {
787 		rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
788 					SOCKET_ID_ANY,
789 					RING_F_SP_ENQ | RING_F_SC_DEQ);
790 		if (rp == NULL) {
791 			printf("%s: failed to create ring\n", __func__);
792 			goto fail_test;
793 		}
794 
795 		/* alloc dummy object pointers */
796 		src = test_ring_calloc(RING_SIZE, esize[i]);
797 		if (src == NULL) {
798 			printf("%s: failed to alloc src memory\n", __func__);
799 			goto fail_test;
800 		}
801 		test_ring_mem_init(src, RING_SIZE, esize[i]);
802 		cur_src = src;
803 
804 		/* alloc some room for copied objects */
805 		dst = test_ring_calloc(RING_SIZE, esize[i]);
806 		if (dst == NULL) {
807 			printf("%s: failed to alloc dst memory\n", __func__);
808 			goto fail_test;
809 		}
810 		cur_dst = dst;
811 
812 		TEST_RING_VERIFY(rte_ring_lookup("test_ring_basic_ex") == rp,
813 					rp, goto fail_test);
814 
815 		TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
816 
817 		printf("%u ring entries are now free\n",
818 			rte_ring_free_count(rp));
819 
820 		for (j = 0; j < RING_SIZE - 1; j++) {
821 			ret = test_ring_enqueue(rp, cur_src, esize[i], 1,
822 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
823 			TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
824 			cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
825 		}
826 
827 		TEST_RING_VERIFY(rte_ring_full(rp) == 1, rp, goto fail_test);
828 
829 		for (j = 0; j < RING_SIZE - 1; j++) {
830 			ret = test_ring_dequeue(rp, cur_dst, esize[i], 1,
831 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
832 			TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
833 			cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
834 		}
835 
836 		TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
837 
838 		/* check data */
839 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
840 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
841 					rp, goto fail_test);
842 
843 		/* Following tests use the configured flags to decide
844 		 * SP/SC or MP/MC.
845 		 */
846 		/* reset memory of dst */
847 		memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst));
848 
849 		/* reset cur_src and cur_dst */
850 		cur_src = src;
851 		cur_dst = dst;
852 
853 		/* Covering the ring burst operation */
854 		ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
855 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
856 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
857 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
858 
859 		ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
860 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
861 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
862 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
863 
864 		/* Covering the ring bulk operation */
865 		ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
866 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
867 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
868 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
869 
870 		ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
871 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
872 		TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
873 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
874 
875 		/* check data */
876 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
877 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
878 					rp, goto fail_test);
879 
880 		rte_ring_free(rp);
881 		rte_free(src);
882 		rte_free(dst);
883 		rp = NULL;
884 		src = NULL;
885 		dst = NULL;
886 	}
887 
888 	return 0;
889 
890 fail_test:
891 	rte_ring_free(rp);
892 	rte_free(src);
893 	rte_free(dst);
894 	return -1;
895 }
896 
897 /*
898  * Basic test cases with exact size ring.
899  */
900 static int
901 test_ring_with_exact_size(void)
902 {
903 	struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
904 	void **src_orig = NULL, **dst_orig = NULL;
905 	void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
906 	const unsigned int ring_sz = 16;
907 	unsigned int i, j;
908 	int ret = -1;
909 
910 	for (i = 0; i < RTE_DIM(esize); i++) {
911 		test_ring_print_test_string("Test exact size ring",
912 				TEST_RING_IGNORE_API_TYPE,
913 				esize[i]);
914 
915 		std_r = test_ring_create("std", esize[i], ring_sz,
916 					rte_socket_id(),
917 					RING_F_SP_ENQ | RING_F_SC_DEQ);
918 		if (std_r == NULL) {
919 			printf("%s: error, can't create std ring\n", __func__);
920 			goto test_fail;
921 		}
922 		exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
923 				rte_socket_id(),
924 				RING_F_SP_ENQ | RING_F_SC_DEQ |
925 				RING_F_EXACT_SZ);
926 		if (exact_sz_r == NULL) {
927 			printf("%s: error, can't create exact size ring\n",
928 					__func__);
929 			goto test_fail;
930 		}
931 
932 		/* alloc object pointers. Allocate one extra object
933 		 * and create an unaligned address.
934 		 */
935 		src_orig = test_ring_calloc(17, esize[i]);
936 		if (src_orig == NULL)
937 			goto test_fail;
938 		test_ring_mem_init(src_orig, 17, esize[i]);
939 		src = (void **)((uintptr_t)src_orig + 1);
940 		cur_src = src;
941 
942 		dst_orig = test_ring_calloc(17, esize[i]);
943 		if (dst_orig == NULL)
944 			goto test_fail;
945 		dst = (void **)((uintptr_t)dst_orig + 1);
946 		cur_dst = dst;
947 
948 		/*
949 		 * Check that the exact size ring is bigger than the
950 		 * standard ring
951 		 */
952 		TEST_RING_VERIFY(rte_ring_get_size(std_r) <=
953 				rte_ring_get_size(exact_sz_r),
954 				std_r, goto test_fail);
955 
956 		/*
957 		 * check that the exact_sz_ring can hold one more element
958 		 * than the standard ring. (16 vs 15 elements)
959 		 */
960 		for (j = 0; j < ring_sz - 1; j++) {
961 			ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
962 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
963 			TEST_RING_VERIFY(ret == 0, std_r, goto test_fail);
964 			ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
965 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
966 			TEST_RING_VERIFY(ret == 0, exact_sz_r, goto test_fail);
967 			cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
968 		}
969 		ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
970 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
971 		TEST_RING_VERIFY(ret == -ENOBUFS, std_r, goto test_fail);
972 		ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
973 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
974 		TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail);
975 		cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
976 
977 		/* check that dequeue returns the expected number of elements */
978 		ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,
979 				TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
980 		TEST_RING_VERIFY(ret == (int)ring_sz, exact_sz_r, goto test_fail);
981 		cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz);
982 
983 		/* check that the capacity function returns expected value */
984 		TEST_RING_VERIFY(rte_ring_get_capacity(exact_sz_r) == ring_sz,
985 					exact_sz_r, goto test_fail);
986 
987 		/* check data */
988 		TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
989 					RTE_PTR_DIFF(cur_dst, dst)) == 0,
990 					exact_sz_r, goto test_fail);
991 
992 		rte_free(src_orig);
993 		rte_free(dst_orig);
994 		rte_ring_free(std_r);
995 		rte_ring_free(exact_sz_r);
996 		src_orig = NULL;
997 		dst_orig = NULL;
998 		std_r = NULL;
999 		exact_sz_r = NULL;
1000 	}
1001 
1002 	return 0;
1003 
1004 test_fail:
1005 	rte_free(src_orig);
1006 	rte_free(dst_orig);
1007 	rte_ring_free(std_r);
1008 	rte_ring_free(exact_sz_r);
1009 	return -1;
1010 }
1011 
1012 static int
1013 test_ring(void)
1014 {
1015 	int32_t rc;
1016 	unsigned int i;
1017 
1018 	/* Negative test cases */
1019 	if (test_ring_negative_tests() < 0)
1020 		goto test_fail;
1021 
1022 	/* Some basic operations */
1023 	if (test_ring_basic_ex() < 0)
1024 		goto test_fail;
1025 
1026 	if (test_ring_with_exact_size() < 0)
1027 		goto test_fail;
1028 
1029 	/* Burst and bulk operations with sp/sc, mp/mc and default.
1030 	 * The test cases are split into smaller test cases to
1031 	 * help clang compile faster.
1032 	 */
1033 	for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1034 
1035 
1036 		rc = test_ring_burst_bulk_tests1(i);
1037 		if (rc < 0)
1038 			goto test_fail;
1039 
1040 		rc = test_ring_burst_bulk_tests2(i);
1041 		if (rc < 0)
1042 			goto test_fail;
1043 
1044 		rc = test_ring_burst_bulk_tests3(i);
1045 		if (rc < 0)
1046 			goto test_fail;
1047 
1048 		rc = test_ring_burst_bulk_tests4(i);
1049 		if (rc < 0)
1050 			goto test_fail;
1051 	}
1052 
1053 	/* dump the ring status */
1054 	rte_ring_list_dump(stdout);
1055 
1056 	return 0;
1057 
1058 test_fail:
1059 
1060 	return -1;
1061 }
1062 
1063 REGISTER_TEST_COMMAND(ring_autotest, test_ring);
1064