xref: /dpdk/lib/eal/include/rte_ticketlock.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4 
5 #ifndef _RTE_TICKETLOCK_H_
6 #define _RTE_TICKETLOCK_H_
7 
8 /**
9  * @file
10  *
11  * RTE ticket locks
12  *
13  * This file defines an API for ticket locks, which give each waiting
14  * thread a ticket and take the lock one by one, first come, first
15  * serviced.
16  *
17  * All locks must be initialised before use, and only initialised once.
18  */
19 
20 #include <rte_common.h>
21 #include <rte_lcore.h>
22 #include <rte_pause.h>
23 #include <rte_stdatomic.h>
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
29 /**
30  * The rte_ticketlock_t type.
31  */
32 typedef union {
33 	RTE_ATOMIC(uint32_t) tickets;
34 	struct {
35 		RTE_ATOMIC(uint16_t) current;
36 		RTE_ATOMIC(uint16_t) next;
37 	} s;
38 } rte_ticketlock_t;
39 
40 /**
41  * A static ticketlock initializer.
42  */
43 #define RTE_TICKETLOCK_INITIALIZER { 0 }
44 
45 /**
46  * Initialize the ticketlock to an unlocked state.
47  *
48  * @param tl
49  *   A pointer to the ticketlock.
50  */
51 static inline void
52 rte_ticketlock_init(rte_ticketlock_t *tl)
53 {
54 	rte_atomic_store_explicit(&tl->tickets, 0, rte_memory_order_relaxed);
55 }
56 
57 /**
58  * Take the ticketlock.
59  *
60  * @param tl
61  *   A pointer to the ticketlock.
62  */
63 static inline void
64 rte_ticketlock_lock(rte_ticketlock_t *tl)
65 {
66 	uint16_t me = rte_atomic_fetch_add_explicit(&tl->s.next, 1, rte_memory_order_relaxed);
67 	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tl->s.current, me,
68 		rte_memory_order_acquire);
69 }
70 
71 /**
72  * Release the ticketlock.
73  *
74  * @param tl
75  *   A pointer to the ticketlock.
76  */
77 static inline void
78 rte_ticketlock_unlock(rte_ticketlock_t *tl)
79 {
80 	uint16_t i = rte_atomic_load_explicit(&tl->s.current, rte_memory_order_relaxed);
81 	rte_atomic_store_explicit(&tl->s.current, i + 1, rte_memory_order_release);
82 }
83 
84 /**
85  * Try to take the lock.
86  *
87  * @param tl
88  *   A pointer to the ticketlock.
89  * @return
90  *   1 if the lock is successfully taken; 0 otherwise.
91  */
92 static inline int
93 rte_ticketlock_trylock(rte_ticketlock_t *tl)
94 {
95 	rte_ticketlock_t oldl, newl;
96 	oldl.tickets = rte_atomic_load_explicit(&tl->tickets, rte_memory_order_relaxed);
97 	newl.tickets = oldl.tickets;
98 	newl.s.next++;
99 	if (oldl.s.next == oldl.s.current) {
100 		if (rte_atomic_compare_exchange_strong_explicit(&tl->tickets,
101 				(uint32_t *)(uintptr_t)&oldl.tickets, newl.tickets,
102 				rte_memory_order_acquire, rte_memory_order_relaxed))
103 			return 1;
104 	}
105 
106 	return 0;
107 }
108 
109 /**
110  * Test if the lock is taken.
111  *
112  * @param tl
113  *   A pointer to the ticketlock.
114  * @return
115  *   1 if the lock is currently taken; 0 otherwise.
116  */
117 static inline int
118 rte_ticketlock_is_locked(rte_ticketlock_t *tl)
119 {
120 	rte_ticketlock_t tic;
121 	tic.tickets = rte_atomic_load_explicit(&tl->tickets, rte_memory_order_acquire);
122 	return (tic.s.current != tic.s.next);
123 }
124 
125 /**
126  * The rte_ticketlock_recursive_t type.
127  */
128 #define TICKET_LOCK_INVALID_ID -1
129 
130 typedef struct {
131 	rte_ticketlock_t tl; /**< the actual ticketlock */
132 	RTE_ATOMIC(int) user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */
133 	unsigned int count; /**< count of time this lock has been called */
134 } rte_ticketlock_recursive_t;
135 
136 /**
137  * A static recursive ticketlock initializer.
138  */
139 #define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \
140 					      TICKET_LOCK_INVALID_ID, 0}
141 
142 /**
143  * Initialize the recursive ticketlock to an unlocked state.
144  *
145  * @param tlr
146  *   A pointer to the recursive ticketlock.
147  */
148 static inline void
149 rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)
150 {
151 	rte_ticketlock_init(&tlr->tl);
152 	rte_atomic_store_explicit(&tlr->user, TICKET_LOCK_INVALID_ID, rte_memory_order_relaxed);
153 	tlr->count = 0;
154 }
155 
156 /**
157  * Take the recursive ticketlock.
158  *
159  * @param tlr
160  *   A pointer to the recursive ticketlock.
161  */
162 static inline void
163 rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr)
164 {
165 	int id = rte_gettid();
166 
167 	if (rte_atomic_load_explicit(&tlr->user, rte_memory_order_relaxed) != id) {
168 		rte_ticketlock_lock(&tlr->tl);
169 		rte_atomic_store_explicit(&tlr->user, id, rte_memory_order_relaxed);
170 	}
171 	tlr->count++;
172 }
173 
174 /**
175  * Release the recursive ticketlock.
176  *
177  * @param tlr
178  *   A pointer to the recursive ticketlock.
179  */
180 static inline void
181 rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)
182 {
183 	if (--(tlr->count) == 0) {
184 		rte_atomic_store_explicit(&tlr->user, TICKET_LOCK_INVALID_ID,
185 				 rte_memory_order_relaxed);
186 		rte_ticketlock_unlock(&tlr->tl);
187 	}
188 }
189 
190 /**
191  * Try to take the recursive lock.
192  *
193  * @param tlr
194  *   A pointer to the recursive ticketlock.
195  * @return
196  *   1 if the lock is successfully taken; 0 otherwise.
197  */
198 static inline int
199 rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr)
200 {
201 	int id = rte_gettid();
202 
203 	if (rte_atomic_load_explicit(&tlr->user, rte_memory_order_relaxed) != id) {
204 		if (rte_ticketlock_trylock(&tlr->tl) == 0)
205 			return 0;
206 		rte_atomic_store_explicit(&tlr->user, id, rte_memory_order_relaxed);
207 	}
208 	tlr->count++;
209 	return 1;
210 }
211 
212 #ifdef __cplusplus
213 }
214 #endif
215 
216 #endif /* _RTE_TICKETLOCK_H_ */
217