xref: /dpdk/lib/eal/include/generic/rte_rwlock.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_RWLOCK_H_
6 #define _RTE_RWLOCK_H_
7 
8 /**
9  * @file
10  *
11  * RTE Read-Write Locks
12  *
13  * This file defines an API for read-write locks. The lock is used to
14  * protect data that allows multiple readers in parallel, but only
15  * one writer. All readers are blocked until the writer is finished
16  * writing.
17  *
18  * This version does not give preference to readers or writers
19  * and does not starve either readers or writers.
20  *
21  * See also:
22  *  https://locklessinc.com/articles/locks/
23  */
24 
25 #include <errno.h>
26 
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_lock_annotations.h>
30 #include <rte_pause.h>
31 #include <rte_stdatomic.h>
32 
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36 
37 /**
38  * The rte_rwlock_t type.
39  *
40  * Readers increment the counter by RTE_RWLOCK_READ (4)
41  * Writers set the RTE_RWLOCK_WRITE bit when lock is held
42  *     and set the RTE_RWLOCK_WAIT bit while waiting.
43  *
44  * 31                 2 1 0
45  * +-------------------+-+-+
46  * |  readers          | | |
47  * +-------------------+-+-+
48  *                      ^ ^
49  *                      | |
50  * WRITE: lock held ----/ |
51  * WAIT: writer pending --/
52  */
53 
54 #define RTE_RWLOCK_WAIT	 0x1	/* Writer is waiting */
55 #define RTE_RWLOCK_WRITE 0x2	/* Writer has the lock */
56 #define RTE_RWLOCK_MASK  (RTE_RWLOCK_WAIT | RTE_RWLOCK_WRITE)
57 				/* Writer is waiting or has lock */
58 #define RTE_RWLOCK_READ	 0x4	/* Reader increment */
59 
60 typedef struct __rte_lockable {
61 	RTE_ATOMIC(int32_t) cnt;
62 } rte_rwlock_t;
63 
64 /**
65  * A static rwlock initializer.
66  */
67 #define RTE_RWLOCK_INITIALIZER { 0 }
68 
69 /**
70  * Initialize the rwlock to an unlocked state.
71  *
72  * @param rwl
73  *   A pointer to the rwlock structure.
74  */
75 static inline void
76 rte_rwlock_init(rte_rwlock_t *rwl)
77 {
78 	rwl->cnt = 0;
79 }
80 
81 /**
82  * Take a read lock. Loop until the lock is held.
83  *
84  * @note The RW lock isn't recursive, so calling this function on the same
85  * lock twice without releasing it could potentially result in a deadlock
86  * scenario when a write lock is involved.
87  *
88  * @param rwl
89  *   A pointer to a rwlock structure.
90  */
91 static inline void
92 rte_rwlock_read_lock(rte_rwlock_t *rwl)
93 	__rte_shared_lock_function(rwl)
94 	__rte_no_thread_safety_analysis
95 {
96 	int32_t x;
97 
98 	while (1) {
99 		/* Wait while writer is present or pending */
100 		while (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed)
101 		       & RTE_RWLOCK_MASK)
102 			rte_pause();
103 
104 		/* Try to get read lock */
105 		x = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,
106 				       rte_memory_order_acquire) + RTE_RWLOCK_READ;
107 
108 		/* If no writer, then acquire was successful */
109 		if (likely(!(x & RTE_RWLOCK_MASK)))
110 			return;
111 
112 		/* Lost race with writer, backout the change. */
113 		rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,
114 				   rte_memory_order_relaxed);
115 	}
116 }
117 
118 /**
119  * Try to take a read lock.
120  *
121  * @param rwl
122  *   A pointer to a rwlock structure.
123  * @return
124  *   - zero if the lock is successfully taken
125  *   - -EBUSY if lock could not be acquired for reading because a
126  *     writer holds the lock
127  */
128 static inline int
129 rte_rwlock_read_trylock(rte_rwlock_t *rwl)
130 	__rte_shared_trylock_function(0, rwl)
131 	__rte_no_thread_safety_analysis
132 {
133 	int32_t x;
134 
135 	x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);
136 
137 	/* fail if write lock is held or writer is pending */
138 	if (x & RTE_RWLOCK_MASK)
139 		return -EBUSY;
140 
141 	/* Try to get read lock */
142 	x = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,
143 			       rte_memory_order_acquire) + RTE_RWLOCK_READ;
144 
145 	/* Back out if writer raced in */
146 	if (unlikely(x & RTE_RWLOCK_MASK)) {
147 		rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,
148 				   rte_memory_order_release);
149 
150 		return -EBUSY;
151 	}
152 	return 0;
153 }
154 
155 /**
156  * Release a read lock.
157  *
158  * @param rwl
159  *   A pointer to the rwlock structure.
160  */
161 static inline void
162 rte_rwlock_read_unlock(rte_rwlock_t *rwl)
163 	__rte_unlock_function(rwl)
164 	__rte_no_thread_safety_analysis
165 {
166 	rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, rte_memory_order_release);
167 }
168 
169 /**
170  * Try to take a write lock.
171  *
172  * @param rwl
173  *   A pointer to a rwlock structure.
174  * @return
175  *   - zero if the lock is successfully taken
176  *   - -EBUSY if lock could not be acquired for writing because
177  *     it was already locked for reading or writing
178  */
179 static inline int
180 rte_rwlock_write_trylock(rte_rwlock_t *rwl)
181 	__rte_exclusive_trylock_function(0, rwl)
182 	__rte_no_thread_safety_analysis
183 {
184 	int32_t x;
185 
186 	x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);
187 	if (x < RTE_RWLOCK_WRITE &&
188 	    rte_atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE,
189 					rte_memory_order_acquire, rte_memory_order_relaxed))
190 		return 0;
191 	else
192 		return -EBUSY;
193 }
194 
195 /**
196  * Take a write lock. Loop until the lock is held.
197  *
198  * @param rwl
199  *   A pointer to a rwlock structure.
200  */
201 static inline void
202 rte_rwlock_write_lock(rte_rwlock_t *rwl)
203 	__rte_exclusive_lock_function(rwl)
204 	__rte_no_thread_safety_analysis
205 {
206 	int32_t x;
207 
208 	while (1) {
209 		x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);
210 
211 		/* No readers or writers? */
212 		if (likely(x < RTE_RWLOCK_WRITE)) {
213 			/* Turn off RTE_RWLOCK_WAIT, turn on RTE_RWLOCK_WRITE */
214 			if (rte_atomic_compare_exchange_weak_explicit(&rwl->cnt, &x,
215 					RTE_RWLOCK_WRITE, rte_memory_order_acquire,
216 					rte_memory_order_relaxed))
217 				return;
218 		}
219 
220 		/* Turn on writer wait bit */
221 		if (!(x & RTE_RWLOCK_WAIT))
222 			rte_atomic_fetch_or_explicit(&rwl->cnt, RTE_RWLOCK_WAIT,
223 				rte_memory_order_relaxed);
224 
225 		/* Wait until no readers before trying again */
226 		while (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed)
227 				> RTE_RWLOCK_WAIT)
228 			rte_pause();
229 
230 	}
231 }
232 
233 /**
234  * Release a write lock.
235  *
236  * @param rwl
237  *   A pointer to a rwlock structure.
238  */
239 static inline void
240 rte_rwlock_write_unlock(rte_rwlock_t *rwl)
241 	__rte_unlock_function(rwl)
242 	__rte_no_thread_safety_analysis
243 {
244 	rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_WRITE, rte_memory_order_release);
245 }
246 
247 /**
248  * Test if the write lock is taken.
249  *
250  * @param rwl
251  *   A pointer to a rwlock structure.
252  * @return
253  *   1 if the write lock is currently taken; 0 otherwise.
254  */
255 static inline int
256 rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
257 {
258 	if (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed) & RTE_RWLOCK_WRITE)
259 		return 1;
260 
261 	return 0;
262 }
263 
264 /**
265  * Try to execute critical section in a hardware memory transaction, if it
266  * fails or not available take a read lock
267  *
268  * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
269  * transaction always aborts the transaction since the CPU is not able to
270  * roll-back should the transaction fail. Therefore, hardware transactional
271  * locks are not advised to be used around rte_eth_rx_burst() and
272  * rte_eth_tx_burst() calls.
273  *
274  * @param rwl
275  *   A pointer to a rwlock structure.
276  */
277 static inline void
278 rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
279 	__rte_shared_lock_function(rwl);
280 
281 /**
282  * Commit hardware memory transaction or release the read lock if the lock is used as a fall-back
283  *
284  * @param rwl
285  *   A pointer to the rwlock structure.
286  */
287 static inline void
288 rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
289 	__rte_unlock_function(rwl);
290 
291 /**
292  * Try to execute critical section in a hardware memory transaction, if it
293  * fails or not available take a write lock
294  *
295  * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
296  * transaction always aborts the transaction since the CPU is not able to
297  * roll-back should the transaction fail. Therefore, hardware transactional
298  * locks are not advised to be used around rte_eth_rx_burst() and
299  * rte_eth_tx_burst() calls.
300  *
301  * @param rwl
302  *   A pointer to a rwlock structure.
303  */
304 static inline void
305 rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
306 	__rte_exclusive_lock_function(rwl);
307 
308 /**
309  * Commit hardware memory transaction or release the write lock if the lock is used as a fall-back
310  *
311  * @param rwl
312  *   A pointer to a rwlock structure.
313  */
314 static inline void
315 rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
316 	__rte_unlock_function(rwl);
317 
318 #ifdef __cplusplus
319 }
320 #endif
321 
322 #endif /* _RTE_RWLOCK_H_ */
323