xref: /dpdk/lib/eal/include/rte_seqcount.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Ericsson AB
3  */
4 
5 #ifndef _RTE_SEQCOUNT_H_
6 #define _RTE_SEQCOUNT_H_
7 
8 /**
9  * @file
10  * RTE Seqcount
11  *
12  * The sequence counter synchronizes a single writer with multiple,
13  * parallel readers. It is used as the basis for the RTE sequence
14  * lock.
15  *
16  * @see rte_seqlock.h
17  */
18 
19 #include <stdbool.h>
20 #include <stdint.h>
21 
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_stdatomic.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 /**
31  * The RTE seqcount type.
32  */
33 typedef struct {
34 	RTE_ATOMIC(uint32_t) sn; /**< A sequence number for the protected data. */
35 } rte_seqcount_t;
36 
37 /**
38  * A static seqcount initializer.
39  */
40 #define RTE_SEQCOUNT_INITIALIZER { .sn = 0 }
41 
42 /**
43  * Initialize the sequence counter.
44  *
45  * @param seqcount
46  *   A pointer to the sequence counter.
47  */
48 static inline void
49 rte_seqcount_init(rte_seqcount_t *seqcount)
50 {
51 	seqcount->sn = 0;
52 }
53 
54 /**
55  * Begin a read-side critical section.
56  *
57  * A call to this function marks the beginning of a read-side critical
58  * section, for @p seqcount.
59  *
60  * rte_seqcount_read_begin() returns a sequence number, which is later
61  * used in rte_seqcount_read_retry() to check if the protected data
62  * underwent any modifications during the read transaction.
63  *
64  * After (in program order) rte_seqcount_read_begin() has been called,
65  * the calling thread reads the protected data, for later use. The
66  * protected data read *must* be copied (either in pristine form, or
67  * in the form of some derivative), since the caller may only read the
68  * data from within the read-side critical section (i.e., after
69  * rte_seqcount_read_begin() and before rte_seqcount_read_retry()),
70  * but must not act upon the retrieved data while in the critical
71  * section, since it does not yet know if it is consistent.
72  *
73  * The protected data may be read using atomic and/or non-atomic
74  * operations.
75  *
76  * After (in program order) all required data loads have been
77  * performed, rte_seqcount_read_retry() should be called, marking
78  * the end of the read-side critical section.
79  *
80  * If rte_seqcount_read_retry() returns true, the just-read data is
81  * inconsistent and should be discarded. The caller has the option to
82  * either restart the whole procedure right away (i.e., calling
83  * rte_seqcount_read_begin() again), or do the same at some later time.
84  *
85  * If rte_seqcount_read_retry() returns false, the data was read
86  * atomically and the copied data is consistent.
87  *
88  * @param seqcount
89  *   A pointer to the sequence counter.
90  * @return
91  *   The seqcount sequence number for this critical section, to
92  *   later be passed to rte_seqcount_read_retry().
93  *
94  * @see rte_seqcount_read_retry()
95  */
96 static inline uint32_t
97 rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
98 {
99 	/* rte_memory_order_acquire to prevent loads after (in program order)
100 	 * from happening before the sn load. Synchronizes-with the
101 	 * store release in rte_seqcount_write_end().
102 	 */
103 	return rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_acquire);
104 }
105 
106 /**
107  * End a read-side critical section.
108  *
109  * A call to this function marks the end of a read-side critical
110  * section, for @p seqcount. The application must supply the sequence
111  * number produced by the corresponding rte_seqcount_read_begin() call.
112  *
113  * After this function has been called, the caller should not access
114  * the protected data.
115  *
116  * In case rte_seqcount_read_retry() returns true, the just-read data
117  * was modified as it was being read and may be inconsistent, and thus
118  * should be discarded.
119  *
120  * In case this function returns false, the data is consistent and the
121  * set of atomic and non-atomic load operations performed between
122  * rte_seqcount_read_begin() and rte_seqcount_read_retry() were atomic,
123  * as a whole.
124  *
125  * @param seqcount
126  *   A pointer to the sequence counter.
127  * @param begin_sn
128  *   The sequence number returned by rte_seqcount_read_begin().
129  * @return
130  *   true or false, if the just-read seqcount-protected data was
131  *   inconsistent or consistent, respectively, at the time it was
132  *   read.
133  *
134  * @see rte_seqcount_read_begin()
135  */
136 static inline bool
137 rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn)
138 {
139 	uint32_t end_sn;
140 
141 	/* An odd sequence number means the protected data was being
142 	 * modified already at the point of the rte_seqcount_read_begin()
143 	 * call.
144 	 */
145 	if (unlikely(begin_sn & 1))
146 		return true;
147 
148 	/* make sure the data loads happens before the sn load */
149 	rte_atomic_thread_fence(rte_memory_order_acquire);
150 
151 	end_sn = rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_relaxed);
152 
153 	/* A writer incremented the sequence number during this read
154 	 * critical section.
155 	 */
156 	return begin_sn != end_sn;
157 }
158 
159 /**
160  * Begin a write-side critical section.
161  *
162  * A call to this function marks the beginning of a write-side
163  * critical section, after which the caller may go on to modify (both
164  * read and write) the protected data, in an atomic or non-atomic
165  * manner.
166  *
167  * After the necessary updates have been performed, the application
168  * calls rte_seqcount_write_end().
169  *
170  * Multiple, parallel writers must use some external serialization.
171  *
172  * This function is not preemption-safe in the sense that preemption
173  * of the calling thread may block reader progress until the writer
174  * thread is rescheduled.
175  *
176  * @param seqcount
177  *   A pointer to the sequence counter.
178  *
179  * @see rte_seqcount_write_end()
180  */
181 static inline void
182 rte_seqcount_write_begin(rte_seqcount_t *seqcount)
183 {
184 	uint32_t sn;
185 
186 	sn = seqcount->sn + 1;
187 
188 	rte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_relaxed);
189 
190 	/* rte_memory_order_release to prevent stores after (in program order)
191 	 * from happening before the sn store.
192 	 */
193 	rte_atomic_thread_fence(rte_memory_order_release);
194 }
195 
196 /**
197  * End a write-side critical section.
198  *
199  * A call to this function marks the end of the write-side critical
200  * section, for @p seqcount. After this call has been made, the
201  * protected data may no longer be modified.
202  *
203  * @param seqcount
204  *   A pointer to the sequence counter.
205  *
206  * @see rte_seqcount_write_begin()
207  */
208 static inline void
209 rte_seqcount_write_end(rte_seqcount_t *seqcount)
210 {
211 	uint32_t sn;
212 
213 	sn = seqcount->sn + 1;
214 
215 	/* Synchronizes-with the load acquire in rte_seqcount_read_begin(). */
216 	rte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_release);
217 }
218 
219 #ifdef __cplusplus
220 }
221 #endif
222 
223 #endif /* _RTE_SEQCOUNT_H_ */
224