xref: /dpdk/lib/ring/rte_ring_elem_pvt.h (revision e4251abd4a3a1dbf7d613fa407e4d2843708a843)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  *
399a2dd95SBruce Richardson  * Copyright (c) 2017,2018 HXT-semitech Corporation.
499a2dd95SBruce Richardson  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
599a2dd95SBruce Richardson  * All rights reserved.
699a2dd95SBruce Richardson  * Derived from FreeBSD's bufring.h
799a2dd95SBruce Richardson  * Used as BSD-3 Licensed with permission from Kip Macy.
899a2dd95SBruce Richardson  */
999a2dd95SBruce Richardson 
1099a2dd95SBruce Richardson #ifndef _RTE_RING_ELEM_PVT_H_
1199a2dd95SBruce Richardson #define _RTE_RING_ELEM_PVT_H_
1299a2dd95SBruce Richardson 
13dea4c541SKevin Traynor #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
14dea4c541SKevin Traynor #pragma GCC diagnostic push
15dea4c541SKevin Traynor #pragma GCC diagnostic ignored "-Wstringop-overflow"
16dea4c541SKevin Traynor #pragma GCC diagnostic ignored "-Wstringop-overread"
17dea4c541SKevin Traynor #endif
18dea4c541SKevin Traynor 
1999a2dd95SBruce Richardson static __rte_always_inline void
20*e4251abdSKonstantin Ananyev __rte_ring_enqueue_elems_32(void *ring_table, const void *obj_table,
21*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t n)
2299a2dd95SBruce Richardson {
2399a2dd95SBruce Richardson 	unsigned int i;
24*e4251abdSKonstantin Ananyev 
25*e4251abdSKonstantin Ananyev 	uint32_t *ring = (uint32_t *)ring_table;
2699a2dd95SBruce Richardson 	const uint32_t *obj = (const uint32_t *)obj_table;
27*e4251abdSKonstantin Ananyev 
2897ed4cb6SAndrzej Ostruszka 	if (likely(idx + n <= size)) {
2999a2dd95SBruce Richardson 		for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
3099a2dd95SBruce Richardson 			ring[idx] = obj[i];
3199a2dd95SBruce Richardson 			ring[idx + 1] = obj[i + 1];
3299a2dd95SBruce Richardson 			ring[idx + 2] = obj[i + 2];
3399a2dd95SBruce Richardson 			ring[idx + 3] = obj[i + 3];
3499a2dd95SBruce Richardson 			ring[idx + 4] = obj[i + 4];
3599a2dd95SBruce Richardson 			ring[idx + 5] = obj[i + 5];
3699a2dd95SBruce Richardson 			ring[idx + 6] = obj[i + 6];
3799a2dd95SBruce Richardson 			ring[idx + 7] = obj[i + 7];
3899a2dd95SBruce Richardson 		}
3999a2dd95SBruce Richardson 		switch (n & 0x7) {
4099a2dd95SBruce Richardson 		case 7:
4199a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
4299a2dd95SBruce Richardson 		case 6:
4399a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
4499a2dd95SBruce Richardson 		case 5:
4599a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
4699a2dd95SBruce Richardson 		case 4:
4799a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
4899a2dd95SBruce Richardson 		case 3:
4999a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
5099a2dd95SBruce Richardson 		case 2:
5199a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
5299a2dd95SBruce Richardson 		case 1:
5399a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
5499a2dd95SBruce Richardson 		}
5599a2dd95SBruce Richardson 	} else {
5699a2dd95SBruce Richardson 		for (i = 0; idx < size; i++, idx++)
5799a2dd95SBruce Richardson 			ring[idx] = obj[i];
5899a2dd95SBruce Richardson 		/* Start at the beginning */
5999a2dd95SBruce Richardson 		for (idx = 0; i < n; i++, idx++)
6099a2dd95SBruce Richardson 			ring[idx] = obj[i];
6199a2dd95SBruce Richardson 	}
6299a2dd95SBruce Richardson }
6399a2dd95SBruce Richardson 
6499a2dd95SBruce Richardson static __rte_always_inline void
65*e4251abdSKonstantin Ananyev __rte_ring_enqueue_elems_64(void *ring_table, const void *obj_table,
66*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t n)
6799a2dd95SBruce Richardson {
6899a2dd95SBruce Richardson 	unsigned int i;
69*e4251abdSKonstantin Ananyev 
70*e4251abdSKonstantin Ananyev 	uint64_t *ring = (uint64_t *)ring_table;
7199a2dd95SBruce Richardson 	const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
72*e4251abdSKonstantin Ananyev 
7397ed4cb6SAndrzej Ostruszka 	if (likely(idx + n <= size)) {
7499a2dd95SBruce Richardson 		for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
7599a2dd95SBruce Richardson 			ring[idx] = obj[i];
7699a2dd95SBruce Richardson 			ring[idx + 1] = obj[i + 1];
7799a2dd95SBruce Richardson 			ring[idx + 2] = obj[i + 2];
7899a2dd95SBruce Richardson 			ring[idx + 3] = obj[i + 3];
7999a2dd95SBruce Richardson 		}
8099a2dd95SBruce Richardson 		switch (n & 0x3) {
8199a2dd95SBruce Richardson 		case 3:
8299a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
8399a2dd95SBruce Richardson 		case 2:
8499a2dd95SBruce Richardson 			ring[idx++] = obj[i++]; /* fallthrough */
8599a2dd95SBruce Richardson 		case 1:
8699a2dd95SBruce Richardson 			ring[idx++] = obj[i++];
8799a2dd95SBruce Richardson 		}
8899a2dd95SBruce Richardson 	} else {
8999a2dd95SBruce Richardson 		for (i = 0; idx < size; i++, idx++)
9099a2dd95SBruce Richardson 			ring[idx] = obj[i];
9199a2dd95SBruce Richardson 		/* Start at the beginning */
9299a2dd95SBruce Richardson 		for (idx = 0; i < n; i++, idx++)
9399a2dd95SBruce Richardson 			ring[idx] = obj[i];
9499a2dd95SBruce Richardson 	}
9599a2dd95SBruce Richardson }
9699a2dd95SBruce Richardson 
9799a2dd95SBruce Richardson static __rte_always_inline void
98*e4251abdSKonstantin Ananyev __rte_ring_enqueue_elems_128(void *ring_table, const void *obj_table,
99*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t n)
10099a2dd95SBruce Richardson {
10199a2dd95SBruce Richardson 	unsigned int i;
102*e4251abdSKonstantin Ananyev 
103*e4251abdSKonstantin Ananyev 	rte_int128_t *ring = (rte_int128_t *)ring_table;
10499a2dd95SBruce Richardson 	const rte_int128_t *obj = (const rte_int128_t *)obj_table;
105*e4251abdSKonstantin Ananyev 
10697ed4cb6SAndrzej Ostruszka 	if (likely(idx + n <= size)) {
10799a2dd95SBruce Richardson 		for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
10899a2dd95SBruce Richardson 			memcpy((void *)(ring + idx),
10999a2dd95SBruce Richardson 				(const void *)(obj + i), 32);
11099a2dd95SBruce Richardson 		switch (n & 0x1) {
11199a2dd95SBruce Richardson 		case 1:
11299a2dd95SBruce Richardson 			memcpy((void *)(ring + idx),
11399a2dd95SBruce Richardson 				(const void *)(obj + i), 16);
11499a2dd95SBruce Richardson 		}
11599a2dd95SBruce Richardson 	} else {
11699a2dd95SBruce Richardson 		for (i = 0; idx < size; i++, idx++)
11799a2dd95SBruce Richardson 			memcpy((void *)(ring + idx),
11899a2dd95SBruce Richardson 				(const void *)(obj + i), 16);
11999a2dd95SBruce Richardson 		/* Start at the beginning */
12099a2dd95SBruce Richardson 		for (idx = 0; i < n; i++, idx++)
12199a2dd95SBruce Richardson 			memcpy((void *)(ring + idx),
12299a2dd95SBruce Richardson 				(const void *)(obj + i), 16);
12399a2dd95SBruce Richardson 	}
12499a2dd95SBruce Richardson }
12599a2dd95SBruce Richardson 
12699a2dd95SBruce Richardson /* the actual enqueue of elements on the ring.
12799a2dd95SBruce Richardson  * Placed here since identical code needed in both
12899a2dd95SBruce Richardson  * single and multi producer enqueue functions.
12999a2dd95SBruce Richardson  */
13099a2dd95SBruce Richardson static __rte_always_inline void
131*e4251abdSKonstantin Ananyev __rte_ring_do_enqueue_elems(void *ring_table, const void *obj_table,
132*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t esize, uint32_t num)
13399a2dd95SBruce Richardson {
13499a2dd95SBruce Richardson 	/* 8B and 16B copies implemented individually to retain
13599a2dd95SBruce Richardson 	 * the current performance.
13699a2dd95SBruce Richardson 	 */
13799a2dd95SBruce Richardson 	if (esize == 8)
138*e4251abdSKonstantin Ananyev 		__rte_ring_enqueue_elems_64(ring_table, obj_table, size,
139*e4251abdSKonstantin Ananyev 				idx, num);
14099a2dd95SBruce Richardson 	else if (esize == 16)
141*e4251abdSKonstantin Ananyev 		__rte_ring_enqueue_elems_128(ring_table, obj_table, size,
142*e4251abdSKonstantin Ananyev 				idx, num);
14399a2dd95SBruce Richardson 	else {
144*e4251abdSKonstantin Ananyev 		uint32_t scale, nr_idx, nr_num, nr_size;
14599a2dd95SBruce Richardson 
14699a2dd95SBruce Richardson 		/* Normalize to uint32_t */
14799a2dd95SBruce Richardson 		scale = esize / sizeof(uint32_t);
14899a2dd95SBruce Richardson 		nr_num = num * scale;
14999a2dd95SBruce Richardson 		nr_idx = idx * scale;
150*e4251abdSKonstantin Ananyev 		nr_size = size * scale;
151*e4251abdSKonstantin Ananyev 		__rte_ring_enqueue_elems_32(ring_table, obj_table, nr_size,
152*e4251abdSKonstantin Ananyev 				nr_idx, nr_num);
15399a2dd95SBruce Richardson 	}
15499a2dd95SBruce Richardson }
15599a2dd95SBruce Richardson 
15699a2dd95SBruce Richardson static __rte_always_inline void
157*e4251abdSKonstantin Ananyev __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
158*e4251abdSKonstantin Ananyev 		const void *obj_table, uint32_t esize, uint32_t num)
159*e4251abdSKonstantin Ananyev {
160*e4251abdSKonstantin Ananyev 	__rte_ring_do_enqueue_elems(&r[1], obj_table, r->size,
161*e4251abdSKonstantin Ananyev 			prod_head & r->mask, esize, num);
162*e4251abdSKonstantin Ananyev }
163*e4251abdSKonstantin Ananyev 
164*e4251abdSKonstantin Ananyev static __rte_always_inline void
165*e4251abdSKonstantin Ananyev __rte_ring_dequeue_elems_32(void *obj_table, const void *ring_table,
166*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t n)
16799a2dd95SBruce Richardson {
16899a2dd95SBruce Richardson 	unsigned int i;
16999a2dd95SBruce Richardson 	uint32_t *obj = (uint32_t *)obj_table;
170*e4251abdSKonstantin Ananyev 	const uint32_t *ring = (const uint32_t *)ring_table;
171*e4251abdSKonstantin Ananyev 
17297ed4cb6SAndrzej Ostruszka 	if (likely(idx + n <= size)) {
17399a2dd95SBruce Richardson 		for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
17499a2dd95SBruce Richardson 			obj[i] = ring[idx];
17599a2dd95SBruce Richardson 			obj[i + 1] = ring[idx + 1];
17699a2dd95SBruce Richardson 			obj[i + 2] = ring[idx + 2];
17799a2dd95SBruce Richardson 			obj[i + 3] = ring[idx + 3];
17899a2dd95SBruce Richardson 			obj[i + 4] = ring[idx + 4];
17999a2dd95SBruce Richardson 			obj[i + 5] = ring[idx + 5];
18099a2dd95SBruce Richardson 			obj[i + 6] = ring[idx + 6];
18199a2dd95SBruce Richardson 			obj[i + 7] = ring[idx + 7];
18299a2dd95SBruce Richardson 		}
18399a2dd95SBruce Richardson 		switch (n & 0x7) {
18499a2dd95SBruce Richardson 		case 7:
18599a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
18699a2dd95SBruce Richardson 		case 6:
18799a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
18899a2dd95SBruce Richardson 		case 5:
18999a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
19099a2dd95SBruce Richardson 		case 4:
19199a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
19299a2dd95SBruce Richardson 		case 3:
19399a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
19499a2dd95SBruce Richardson 		case 2:
19599a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
19699a2dd95SBruce Richardson 		case 1:
19799a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
19899a2dd95SBruce Richardson 		}
19999a2dd95SBruce Richardson 	} else {
20099a2dd95SBruce Richardson 		for (i = 0; idx < size; i++, idx++)
20199a2dd95SBruce Richardson 			obj[i] = ring[idx];
20299a2dd95SBruce Richardson 		/* Start at the beginning */
20399a2dd95SBruce Richardson 		for (idx = 0; i < n; i++, idx++)
20499a2dd95SBruce Richardson 			obj[i] = ring[idx];
20599a2dd95SBruce Richardson 	}
20699a2dd95SBruce Richardson }
20799a2dd95SBruce Richardson 
20899a2dd95SBruce Richardson static __rte_always_inline void
209*e4251abdSKonstantin Ananyev __rte_ring_dequeue_elems_64(void *obj_table, const void *ring_table,
210*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t n)
21199a2dd95SBruce Richardson {
21299a2dd95SBruce Richardson 	unsigned int i;
21399a2dd95SBruce Richardson 	unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
214*e4251abdSKonstantin Ananyev 	const uint64_t *ring = (const uint64_t *)ring_table;
215*e4251abdSKonstantin Ananyev 
21697ed4cb6SAndrzej Ostruszka 	if (likely(idx + n <= size)) {
21799a2dd95SBruce Richardson 		for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
21899a2dd95SBruce Richardson 			obj[i] = ring[idx];
21999a2dd95SBruce Richardson 			obj[i + 1] = ring[idx + 1];
22099a2dd95SBruce Richardson 			obj[i + 2] = ring[idx + 2];
22199a2dd95SBruce Richardson 			obj[i + 3] = ring[idx + 3];
22299a2dd95SBruce Richardson 		}
22399a2dd95SBruce Richardson 		switch (n & 0x3) {
22499a2dd95SBruce Richardson 		case 3:
22599a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
22699a2dd95SBruce Richardson 		case 2:
22799a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
22899a2dd95SBruce Richardson 		case 1:
22999a2dd95SBruce Richardson 			obj[i++] = ring[idx++]; /* fallthrough */
23099a2dd95SBruce Richardson 		}
23199a2dd95SBruce Richardson 	} else {
23299a2dd95SBruce Richardson 		for (i = 0; idx < size; i++, idx++)
23399a2dd95SBruce Richardson 			obj[i] = ring[idx];
23499a2dd95SBruce Richardson 		/* Start at the beginning */
23599a2dd95SBruce Richardson 		for (idx = 0; i < n; i++, idx++)
23699a2dd95SBruce Richardson 			obj[i] = ring[idx];
23799a2dd95SBruce Richardson 	}
23899a2dd95SBruce Richardson }
23999a2dd95SBruce Richardson 
24099a2dd95SBruce Richardson static __rte_always_inline void
241*e4251abdSKonstantin Ananyev __rte_ring_dequeue_elems_128(void *obj_table, const void *ring_table,
242*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t n)
24399a2dd95SBruce Richardson {
24499a2dd95SBruce Richardson 	unsigned int i;
24599a2dd95SBruce Richardson 	rte_int128_t *obj = (rte_int128_t *)obj_table;
246*e4251abdSKonstantin Ananyev 	const rte_int128_t *ring = (const rte_int128_t *)ring_table;
247*e4251abdSKonstantin Ananyev 
24897ed4cb6SAndrzej Ostruszka 	if (likely(idx + n <= size)) {
24999a2dd95SBruce Richardson 		for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
250*e4251abdSKonstantin Ananyev 			memcpy((obj + i), (const void *)(ring + idx), 32);
25199a2dd95SBruce Richardson 		switch (n & 0x1) {
25299a2dd95SBruce Richardson 		case 1:
253*e4251abdSKonstantin Ananyev 			memcpy((obj + i), (const void *)(ring + idx), 16);
25499a2dd95SBruce Richardson 		}
25599a2dd95SBruce Richardson 	} else {
25699a2dd95SBruce Richardson 		for (i = 0; idx < size; i++, idx++)
257*e4251abdSKonstantin Ananyev 			memcpy((obj + i), (const void *)(ring + idx), 16);
25899a2dd95SBruce Richardson 		/* Start at the beginning */
25999a2dd95SBruce Richardson 		for (idx = 0; i < n; i++, idx++)
260*e4251abdSKonstantin Ananyev 			memcpy((obj + i), (const void *)(ring + idx), 16);
26199a2dd95SBruce Richardson 	}
26299a2dd95SBruce Richardson }
26399a2dd95SBruce Richardson 
26499a2dd95SBruce Richardson /* the actual dequeue of elements from the ring.
26599a2dd95SBruce Richardson  * Placed here since identical code needed in both
26699a2dd95SBruce Richardson  * single and multi producer enqueue functions.
26799a2dd95SBruce Richardson  */
26899a2dd95SBruce Richardson static __rte_always_inline void
269*e4251abdSKonstantin Ananyev __rte_ring_do_dequeue_elems(void *obj_table, const void *ring_table,
270*e4251abdSKonstantin Ananyev 	uint32_t size, uint32_t idx, uint32_t esize, uint32_t num)
27199a2dd95SBruce Richardson {
27299a2dd95SBruce Richardson 	/* 8B and 16B copies implemented individually to retain
27399a2dd95SBruce Richardson 	 * the current performance.
27499a2dd95SBruce Richardson 	 */
27599a2dd95SBruce Richardson 	if (esize == 8)
276*e4251abdSKonstantin Ananyev 		__rte_ring_dequeue_elems_64(obj_table, ring_table, size,
277*e4251abdSKonstantin Ananyev 				idx, num);
27899a2dd95SBruce Richardson 	else if (esize == 16)
279*e4251abdSKonstantin Ananyev 		__rte_ring_dequeue_elems_128(obj_table, ring_table, size,
280*e4251abdSKonstantin Ananyev 				idx, num);
28199a2dd95SBruce Richardson 	else {
282*e4251abdSKonstantin Ananyev 		uint32_t scale, nr_idx, nr_num, nr_size;
28399a2dd95SBruce Richardson 
28499a2dd95SBruce Richardson 		/* Normalize to uint32_t */
28599a2dd95SBruce Richardson 		scale = esize / sizeof(uint32_t);
28699a2dd95SBruce Richardson 		nr_num = num * scale;
28799a2dd95SBruce Richardson 		nr_idx = idx * scale;
288*e4251abdSKonstantin Ananyev 		nr_size = size * scale;
289*e4251abdSKonstantin Ananyev 		__rte_ring_dequeue_elems_32(obj_table, ring_table, nr_size,
290*e4251abdSKonstantin Ananyev 				nr_idx, nr_num);
29199a2dd95SBruce Richardson 	}
29299a2dd95SBruce Richardson }
29399a2dd95SBruce Richardson 
294*e4251abdSKonstantin Ananyev static __rte_always_inline void
295*e4251abdSKonstantin Ananyev __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
296*e4251abdSKonstantin Ananyev 		void *obj_table, uint32_t esize, uint32_t num)
297*e4251abdSKonstantin Ananyev {
298*e4251abdSKonstantin Ananyev 	__rte_ring_do_dequeue_elems(obj_table, &r[1], r->size,
299*e4251abdSKonstantin Ananyev 			cons_head & r->mask, esize, num);
300*e4251abdSKonstantin Ananyev }
301*e4251abdSKonstantin Ananyev 
30299a2dd95SBruce Richardson /* Between load and load. there might be cpu reorder in weak model
30399a2dd95SBruce Richardson  * (powerpc/arm).
30499a2dd95SBruce Richardson  * There are 2 choices for the users
30599a2dd95SBruce Richardson  * 1.use rmb() memory barrier
30699a2dd95SBruce Richardson  * 2.use one-direction load_acquire/store_release barrier
30799a2dd95SBruce Richardson  * It depends on performance test results.
30899a2dd95SBruce Richardson  */
30999a2dd95SBruce Richardson #ifdef RTE_USE_C11_MEM_MODEL
31099a2dd95SBruce Richardson #include "rte_ring_c11_pvt.h"
31199a2dd95SBruce Richardson #else
31299a2dd95SBruce Richardson #include "rte_ring_generic_pvt.h"
31399a2dd95SBruce Richardson #endif
31499a2dd95SBruce Richardson 
31599a2dd95SBruce Richardson /**
3163197a1ffSKonstantin Ananyev  * @internal This function updates the producer head for enqueue
3173197a1ffSKonstantin Ananyev  *
3183197a1ffSKonstantin Ananyev  * @param r
3193197a1ffSKonstantin Ananyev  *   A pointer to the ring structure
3203197a1ffSKonstantin Ananyev  * @param is_sp
3213197a1ffSKonstantin Ananyev  *   Indicates whether multi-producer path is needed or not
3223197a1ffSKonstantin Ananyev  * @param n
3233197a1ffSKonstantin Ananyev  *   The number of elements we will want to enqueue, i.e. how far should the
3243197a1ffSKonstantin Ananyev  *   head be moved
3253197a1ffSKonstantin Ananyev  * @param behavior
3263197a1ffSKonstantin Ananyev  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
3273197a1ffSKonstantin Ananyev  *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
3283197a1ffSKonstantin Ananyev  * @param old_head
3293197a1ffSKonstantin Ananyev  *   Returns head value as it was before the move, i.e. where enqueue starts
3303197a1ffSKonstantin Ananyev  * @param new_head
3313197a1ffSKonstantin Ananyev  *   Returns the current/new head value i.e. where enqueue finishes
3323197a1ffSKonstantin Ananyev  * @param free_entries
3333197a1ffSKonstantin Ananyev  *   Returns the amount of free space in the ring BEFORE head was moved
3343197a1ffSKonstantin Ananyev  * @return
3353197a1ffSKonstantin Ananyev  *   Actual number of objects enqueued.
3363197a1ffSKonstantin Ananyev  *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
3373197a1ffSKonstantin Ananyev  */
3383197a1ffSKonstantin Ananyev static __rte_always_inline unsigned int
3393197a1ffSKonstantin Ananyev __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
3403197a1ffSKonstantin Ananyev 		unsigned int n, enum rte_ring_queue_behavior behavior,
3413197a1ffSKonstantin Ananyev 		uint32_t *old_head, uint32_t *new_head,
3423197a1ffSKonstantin Ananyev 		uint32_t *free_entries)
3433197a1ffSKonstantin Ananyev {
3443197a1ffSKonstantin Ananyev 	return __rte_ring_headtail_move_head(&r->prod, &r->cons, r->capacity,
3453197a1ffSKonstantin Ananyev 			is_sp, n, behavior, old_head, new_head, free_entries);
3463197a1ffSKonstantin Ananyev }
3473197a1ffSKonstantin Ananyev 
3483197a1ffSKonstantin Ananyev /**
3493197a1ffSKonstantin Ananyev  * @internal This function updates the consumer head for dequeue
3503197a1ffSKonstantin Ananyev  *
3513197a1ffSKonstantin Ananyev  * @param r
3523197a1ffSKonstantin Ananyev  *   A pointer to the ring structure
3533197a1ffSKonstantin Ananyev  * @param is_sc
3543197a1ffSKonstantin Ananyev  *   Indicates whether multi-consumer path is needed or not
3553197a1ffSKonstantin Ananyev  * @param n
3563197a1ffSKonstantin Ananyev  *   The number of elements we will want to dequeue, i.e. how far should the
3573197a1ffSKonstantin Ananyev  *   head be moved
3583197a1ffSKonstantin Ananyev  * @param behavior
3593197a1ffSKonstantin Ananyev  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
3603197a1ffSKonstantin Ananyev  *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
3613197a1ffSKonstantin Ananyev  * @param old_head
3623197a1ffSKonstantin Ananyev  *   Returns head value as it was before the move, i.e. where dequeue starts
3633197a1ffSKonstantin Ananyev  * @param new_head
3643197a1ffSKonstantin Ananyev  *   Returns the current/new head value i.e. where dequeue finishes
3653197a1ffSKonstantin Ananyev  * @param entries
3663197a1ffSKonstantin Ananyev  *   Returns the number of entries in the ring BEFORE head was moved
3673197a1ffSKonstantin Ananyev  * @return
3683197a1ffSKonstantin Ananyev  *   - Actual number of objects dequeued.
3693197a1ffSKonstantin Ananyev  *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
3703197a1ffSKonstantin Ananyev  */
3713197a1ffSKonstantin Ananyev static __rte_always_inline unsigned int
3723197a1ffSKonstantin Ananyev __rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
3733197a1ffSKonstantin Ananyev 		unsigned int n, enum rte_ring_queue_behavior behavior,
3743197a1ffSKonstantin Ananyev 		uint32_t *old_head, uint32_t *new_head,
3753197a1ffSKonstantin Ananyev 		uint32_t *entries)
3763197a1ffSKonstantin Ananyev {
3773197a1ffSKonstantin Ananyev 	return __rte_ring_headtail_move_head(&r->cons, &r->prod, 0,
3783197a1ffSKonstantin Ananyev 			is_sc, n, behavior, old_head, new_head, entries);
3793197a1ffSKonstantin Ananyev }
3803197a1ffSKonstantin Ananyev 
3813197a1ffSKonstantin Ananyev /**
38299a2dd95SBruce Richardson  * @internal Enqueue several objects on the ring
38399a2dd95SBruce Richardson  *
38499a2dd95SBruce Richardson  * @param r
38599a2dd95SBruce Richardson  *   A pointer to the ring structure.
38699a2dd95SBruce Richardson  * @param obj_table
38799a2dd95SBruce Richardson  *   A pointer to a table of objects.
38899a2dd95SBruce Richardson  * @param esize
38999a2dd95SBruce Richardson  *   The size of ring element, in bytes. It must be a multiple of 4.
39099a2dd95SBruce Richardson  *   This must be the same value used while creating the ring. Otherwise
39199a2dd95SBruce Richardson  *   the results are undefined.
39299a2dd95SBruce Richardson  * @param n
39399a2dd95SBruce Richardson  *   The number of objects to add in the ring from the obj_table.
39499a2dd95SBruce Richardson  * @param behavior
39599a2dd95SBruce Richardson  *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
39699a2dd95SBruce Richardson  *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
39799a2dd95SBruce Richardson  * @param is_sp
39899a2dd95SBruce Richardson  *   Indicates whether to use single producer or multi-producer head update
39999a2dd95SBruce Richardson  * @param free_space
40099a2dd95SBruce Richardson  *   returns the amount of space after the enqueue operation has finished
40199a2dd95SBruce Richardson  * @return
40299a2dd95SBruce Richardson  *   Actual number of objects enqueued.
40399a2dd95SBruce Richardson  *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
40499a2dd95SBruce Richardson  */
40599a2dd95SBruce Richardson static __rte_always_inline unsigned int
40699a2dd95SBruce Richardson __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
40799a2dd95SBruce Richardson 		unsigned int esize, unsigned int n,
40899a2dd95SBruce Richardson 		enum rte_ring_queue_behavior behavior, unsigned int is_sp,
40999a2dd95SBruce Richardson 		unsigned int *free_space)
41099a2dd95SBruce Richardson {
41199a2dd95SBruce Richardson 	uint32_t prod_head, prod_next;
41299a2dd95SBruce Richardson 	uint32_t free_entries;
41399a2dd95SBruce Richardson 
41499a2dd95SBruce Richardson 	n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
41599a2dd95SBruce Richardson 			&prod_head, &prod_next, &free_entries);
41699a2dd95SBruce Richardson 	if (n == 0)
41799a2dd95SBruce Richardson 		goto end;
41899a2dd95SBruce Richardson 
41999a2dd95SBruce Richardson 	__rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
42099a2dd95SBruce Richardson 
42199a2dd95SBruce Richardson 	__rte_ring_update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
42299a2dd95SBruce Richardson end:
42399a2dd95SBruce Richardson 	if (free_space != NULL)
42499a2dd95SBruce Richardson 		*free_space = free_entries - n;
42599a2dd95SBruce Richardson 	return n;
42699a2dd95SBruce Richardson }
42799a2dd95SBruce Richardson 
42899a2dd95SBruce Richardson /**
42999a2dd95SBruce Richardson  * @internal Dequeue several objects from the ring
43099a2dd95SBruce Richardson  *
43199a2dd95SBruce Richardson  * @param r
43299a2dd95SBruce Richardson  *   A pointer to the ring structure.
43399a2dd95SBruce Richardson  * @param obj_table
43499a2dd95SBruce Richardson  *   A pointer to a table of objects.
43599a2dd95SBruce Richardson  * @param esize
43699a2dd95SBruce Richardson  *   The size of ring element, in bytes. It must be a multiple of 4.
43799a2dd95SBruce Richardson  *   This must be the same value used while creating the ring. Otherwise
43899a2dd95SBruce Richardson  *   the results are undefined.
43999a2dd95SBruce Richardson  * @param n
44099a2dd95SBruce Richardson  *   The number of objects to pull from the ring.
44199a2dd95SBruce Richardson  * @param behavior
44299a2dd95SBruce Richardson  *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
44399a2dd95SBruce Richardson  *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
44499a2dd95SBruce Richardson  * @param is_sc
44599a2dd95SBruce Richardson  *   Indicates whether to use single consumer or multi-consumer head update
44699a2dd95SBruce Richardson  * @param available
44799a2dd95SBruce Richardson  *   returns the number of remaining ring entries after the dequeue has finished
44899a2dd95SBruce Richardson  * @return
44999a2dd95SBruce Richardson  *   - Actual number of objects dequeued.
45099a2dd95SBruce Richardson  *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
45199a2dd95SBruce Richardson  */
45299a2dd95SBruce Richardson static __rte_always_inline unsigned int
45399a2dd95SBruce Richardson __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
45499a2dd95SBruce Richardson 		unsigned int esize, unsigned int n,
45599a2dd95SBruce Richardson 		enum rte_ring_queue_behavior behavior, unsigned int is_sc,
45699a2dd95SBruce Richardson 		unsigned int *available)
45799a2dd95SBruce Richardson {
45899a2dd95SBruce Richardson 	uint32_t cons_head, cons_next;
45999a2dd95SBruce Richardson 	uint32_t entries;
46099a2dd95SBruce Richardson 
46199a2dd95SBruce Richardson 	n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
46299a2dd95SBruce Richardson 			&cons_head, &cons_next, &entries);
46399a2dd95SBruce Richardson 	if (n == 0)
46499a2dd95SBruce Richardson 		goto end;
46599a2dd95SBruce Richardson 
46699a2dd95SBruce Richardson 	__rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
46799a2dd95SBruce Richardson 
46899a2dd95SBruce Richardson 	__rte_ring_update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
46999a2dd95SBruce Richardson 
47099a2dd95SBruce Richardson end:
47199a2dd95SBruce Richardson 	if (available != NULL)
47299a2dd95SBruce Richardson 		*available = entries - n;
47399a2dd95SBruce Richardson 	return n;
47499a2dd95SBruce Richardson }
47599a2dd95SBruce Richardson 
476dea4c541SKevin Traynor #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
477dea4c541SKevin Traynor #pragma GCC diagnostic pop
478dea4c541SKevin Traynor #endif
479dea4c541SKevin Traynor 
48099a2dd95SBruce Richardson #endif /* _RTE_RING_ELEM_PVT_H_ */
481