xref: /dpdk/lib/ring/rte_ring_peek_elem_pvt.h (revision 32faaf30732cb4006cd1e45997403427029ef5fe)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2010-2020 Intel Corporation
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_PEEK_ELEM_PVT_H_
11 #define _RTE_RING_PEEK_ELEM_PVT_H_
12 
13 /**
14  * @file rte_ring_peek_elem_pvt.h
15  * It is not recommended to include this file directly,
16  * include <rte_ring.h> instead.
17  * Contains internal helper functions for rte_ring peek API.
18  * For more information please refer to <rte_ring_peek.h>.
19  */
20 
21 /**
22  * @internal get current tail value.
23  * This function should be used only for single thread producer/consumer.
24  * Check that user didn't request to move tail above the head.
25  * In that situation:
26  * - return zero, that will cause abort any pending changes and
27  *   return head to its previous position.
28  * - throw an assert in debug mode.
29  */
30 static __rte_always_inline uint32_t
__rte_ring_st_get_tail(struct rte_ring_headtail * ht,uint32_t * tail,uint32_t num)31 __rte_ring_st_get_tail(struct rte_ring_headtail *ht, uint32_t *tail,
32 	uint32_t num)
33 {
34 	uint32_t h, n, t;
35 
36 	h = ht->head;
37 	t = ht->tail;
38 	n = h - t;
39 
40 	RTE_ASSERT(n >= num);
41 	num = (n >= num) ? num : 0;
42 
43 	*tail = t;
44 	return num;
45 }
46 
47 /**
48  * @internal set new values for head and tail.
49  * This function should be used only for single thread producer/consumer.
50  * Should be used only in conjunction with __rte_ring_st_get_tail.
51  */
52 static __rte_always_inline void
__rte_ring_st_set_head_tail(struct rte_ring_headtail * ht,uint32_t tail,uint32_t num,uint32_t enqueue)53 __rte_ring_st_set_head_tail(struct rte_ring_headtail *ht, uint32_t tail,
54 	uint32_t num, uint32_t enqueue)
55 {
56 	uint32_t pos;
57 
58 	RTE_SET_USED(enqueue);
59 
60 	pos = tail + num;
61 	ht->head = pos;
62 	rte_atomic_store_explicit(&ht->tail, pos, rte_memory_order_release);
63 }
64 
65 /**
66  * @internal get current tail value.
67  * This function should be used only for producer/consumer in MT_HTS mode.
68  * Check that user didn't request to move tail above the head.
69  * In that situation:
70  * - return zero, that will cause abort any pending changes and
71  *   return head to its previous position.
72  * - throw an assert in debug mode.
73  */
74 static __rte_always_inline uint32_t
__rte_ring_hts_get_tail(struct rte_ring_hts_headtail * ht,uint32_t * tail,uint32_t num)75 __rte_ring_hts_get_tail(struct rte_ring_hts_headtail *ht, uint32_t *tail,
76 	uint32_t num)
77 {
78 	uint32_t n;
79 	union __rte_ring_hts_pos p;
80 
81 	p.raw = rte_atomic_load_explicit(&ht->ht.raw, rte_memory_order_relaxed);
82 	n = p.pos.head - p.pos.tail;
83 
84 	RTE_ASSERT(n >= num);
85 	num = (n >= num) ? num : 0;
86 
87 	*tail = p.pos.tail;
88 	return num;
89 }
90 
91 /**
92  * @internal set new values for head and tail as one atomic 64 bit operation.
93  * This function should be used only for producer/consumer in MT_HTS mode.
94  * Should be used only in conjunction with __rte_ring_hts_get_tail.
95  */
96 static __rte_always_inline void
__rte_ring_hts_set_head_tail(struct rte_ring_hts_headtail * ht,uint32_t tail,uint32_t num,uint32_t enqueue)97 __rte_ring_hts_set_head_tail(struct rte_ring_hts_headtail *ht, uint32_t tail,
98 	uint32_t num, uint32_t enqueue)
99 {
100 	union __rte_ring_hts_pos p;
101 
102 	RTE_SET_USED(enqueue);
103 
104 	p.pos.head = tail + num;
105 	p.pos.tail = p.pos.head;
106 
107 	rte_atomic_store_explicit(&ht->ht.raw, p.raw, rte_memory_order_release);
108 }
109 
110 /**
111  * @internal This function moves prod head value.
112  */
113 static __rte_always_inline unsigned int
__rte_ring_do_enqueue_start(struct rte_ring * r,uint32_t n,enum rte_ring_queue_behavior behavior,uint32_t * free_space)114 __rte_ring_do_enqueue_start(struct rte_ring *r, uint32_t n,
115 		enum rte_ring_queue_behavior behavior, uint32_t *free_space)
116 {
117 	uint32_t free, head, next;
118 
119 	switch (r->prod.sync_type) {
120 	case RTE_RING_SYNC_ST:
121 		n = __rte_ring_move_prod_head(r, RTE_RING_SYNC_ST, n,
122 			behavior, &head, &next, &free);
123 		break;
124 	case RTE_RING_SYNC_MT_HTS:
125 		n =  __rte_ring_hts_move_prod_head(r, n, behavior,
126 			&head, &free);
127 		break;
128 	case RTE_RING_SYNC_MT:
129 	case RTE_RING_SYNC_MT_RTS:
130 	default:
131 		/* unsupported mode, shouldn't be here */
132 		RTE_ASSERT(0);
133 		n = 0;
134 		free = 0;
135 	}
136 
137 	if (free_space != NULL)
138 		*free_space = free - n;
139 	return n;
140 }
141 
142 /**
143  * @internal This function moves cons head value and copies up to *n*
144  * objects from the ring to the user provided obj_table.
145  */
146 static __rte_always_inline unsigned int
__rte_ring_do_dequeue_start(struct rte_ring * r,void * obj_table,uint32_t esize,uint32_t n,enum rte_ring_queue_behavior behavior,uint32_t * available)147 __rte_ring_do_dequeue_start(struct rte_ring *r, void *obj_table,
148 	uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
149 	uint32_t *available)
150 {
151 	uint32_t avail, head, next;
152 
153 	switch (r->cons.sync_type) {
154 	case RTE_RING_SYNC_ST:
155 		n = __rte_ring_move_cons_head(r, RTE_RING_SYNC_ST, n,
156 			behavior, &head, &next, &avail);
157 		break;
158 	case RTE_RING_SYNC_MT_HTS:
159 		n =  __rte_ring_hts_move_cons_head(r, n, behavior,
160 			&head, &avail);
161 		break;
162 	case RTE_RING_SYNC_MT:
163 	case RTE_RING_SYNC_MT_RTS:
164 	default:
165 		/* unsupported mode, shouldn't be here */
166 		RTE_ASSERT(0);
167 		n = 0;
168 		avail = 0;
169 	}
170 
171 	if (n != 0)
172 		__rte_ring_dequeue_elems(r, head, obj_table, esize, n);
173 
174 	if (available != NULL)
175 		*available = avail - n;
176 	return n;
177 }
178 
179 #endif /* _RTE_RING_PEEK_ELEM_PVT_H_ */
180