1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2022 Ericsson AB 3 */ 4 5 #ifndef _RTE_SEQLOCK_H_ 6 #define _RTE_SEQLOCK_H_ 7 8 /** 9 * @file 10 * RTE Seqlock 11 * 12 * A sequence lock (seqlock) is a synchronization primitive allowing 13 * multiple, parallel, readers to efficiently and safely (i.e., in a 14 * data-race free manner) access lock-protected data. The RTE seqlock 15 * permits multiple writers as well. A spinlock is used for 16 * writer-writer synchronization. 17 * 18 * A reader never blocks a writer. Very high frequency writes may 19 * prevent readers from making progress. 20 * 21 * A seqlock is not preemption-safe on the writer side. If a writer is 22 * preempted, it may block readers until the writer thread is allowed 23 * to continue. Heavy computations should be kept out of the 24 * writer-side critical section, to avoid delaying readers. 25 * 26 * Seqlocks are useful for data which are read by many cores, at a 27 * high frequency, and relatively infrequently written to. 28 * 29 * One way to think about seqlocks is that they provide means to 30 * perform atomic operations on objects larger than what the native 31 * machine instructions allow for. 32 * 33 * To avoid resource reclamation issues, the data protected by a 34 * seqlock should typically be kept self-contained (e.g., no pointers 35 * to mutable, dynamically allocated data). 36 * 37 * Example usage: 38 * @code{.c} 39 * #define MAX_Y_LEN 16 40 * // Application-defined example data structure, protected by a seqlock. 41 * struct config { 42 * rte_seqlock_t lock; 43 * int param_x; 44 * char param_y[MAX_Y_LEN]; 45 * }; 46 * 47 * // Accessor function for reading config fields. 48 * void 49 * config_read(const struct config *config, int *param_x, char *param_y) 50 * { 51 * uint32_t sn; 52 * 53 * do { 54 * sn = rte_seqlock_read_begin(&config->lock); 55 * 56 * // Loads may be atomic or non-atomic, as in this example. 57 * *param_x = config->param_x; 58 * strcpy(param_y, config->param_y); 59 * // An alternative to an immediate retry is to abort and 60 * // try again at some later time, assuming progress is 61 * // possible without the data. 62 * } while (rte_seqlock_read_retry(&config->lock, sn)); 63 * } 64 * 65 * // Accessor function for writing config fields. 66 * void 67 * config_update(struct config *config, int param_x, const char *param_y) 68 * { 69 * rte_seqlock_write_lock(&config->lock); 70 * // Stores may be atomic or non-atomic, as in this example. 71 * config->param_x = param_x; 72 * strcpy(config->param_y, param_y); 73 * rte_seqlock_write_unlock(&config->lock); 74 * } 75 * @endcode 76 * 77 * In case there is only a single writer, or writer-writer 78 * serialization is provided by other means, the use of sequence lock 79 * (i.e., rte_seqlock_t) can be replaced with the use of the "raw" 80 * rte_seqcount_t type instead. 81 * 82 * @see 83 * https://en.wikipedia.org/wiki/Seqlock. 84 */ 85 86 #include <stdbool.h> 87 #include <stdint.h> 88 89 #include <rte_atomic.h> 90 #include <rte_branch_prediction.h> 91 #include <rte_seqcount.h> 92 #include <rte_spinlock.h> 93 94 #ifdef __cplusplus 95 extern "C" { 96 #endif 97 98 /** 99 * The RTE seqlock type. 100 */ 101 typedef struct { 102 rte_seqcount_t count; /**< Sequence count for the protected data. */ 103 rte_spinlock_t lock; /**< Spinlock used to serialize writers. */ 104 } rte_seqlock_t; 105 106 /** 107 * A static seqlock initializer. 108 */ 109 #define RTE_SEQLOCK_INITIALIZER \ 110 { \ 111 .count = RTE_SEQCOUNT_INITIALIZER, \ 112 .lock = RTE_SPINLOCK_INITIALIZER \ 113 } 114 115 /** 116 * Initialize the seqlock. 117 * 118 * This function initializes the seqlock, and leaves the writer-side 119 * spinlock unlocked. 120 * 121 * @param seqlock 122 * A pointer to the seqlock. 123 */ 124 static inline void 125 rte_seqlock_init(rte_seqlock_t *seqlock) 126 { 127 rte_seqcount_init(&seqlock->count); 128 rte_spinlock_init(&seqlock->lock); 129 } 130 131 /** 132 * Begin a read-side critical section. 133 * 134 * See rte_seqcount_read_retry() for details. 135 * 136 * @param seqlock 137 * A pointer to the seqlock. 138 * @return 139 * The seqlock sequence number for this critical section, to 140 * later be passed to rte_seqlock_read_retry(). 141 * 142 * @see rte_seqlock_read_retry() 143 * @see rte_seqcount_read_retry() 144 */ 145 static inline uint32_t 146 rte_seqlock_read_begin(const rte_seqlock_t *seqlock) 147 { 148 return rte_seqcount_read_begin(&seqlock->count); 149 } 150 151 /** 152 * End a read-side critical section. 153 * 154 * See rte_seqcount_read_retry() for details. 155 * 156 * @param seqlock 157 * A pointer to the seqlock. 158 * @param begin_sn 159 * The seqlock sequence number returned by rte_seqlock_read_begin(). 160 * @return 161 * true or false, if the just-read seqlock-protected data was 162 * inconsistent or consistent, respectively, at the time it was 163 * read. 164 * 165 * @see rte_seqlock_read_begin() 166 */ 167 static inline bool 168 rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn) 169 { 170 return rte_seqcount_read_retry(&seqlock->count, begin_sn); 171 } 172 173 /** 174 * Begin a write-side critical section. 175 * 176 * A call to this function acquires the write lock associated @p 177 * seqlock, and marks the beginning of a write-side critical section. 178 * 179 * After having called this function, the caller may go on to modify 180 * (both read and write) the protected data, in an atomic or 181 * non-atomic manner. 182 * 183 * After the necessary updates have been performed, the application 184 * calls rte_seqlock_write_unlock(). 185 * 186 * This function is not preemption-safe in the sense that preemption 187 * of the calling thread may block reader progress until the writer 188 * thread is rescheduled. 189 * 190 * Unlike rte_seqlock_read_begin(), each call made to 191 * rte_seqlock_write_lock() must be matched with an unlock call. 192 * 193 * @param seqlock 194 * A pointer to the seqlock. 195 * 196 * @see rte_seqlock_write_unlock() 197 */ 198 static inline void 199 rte_seqlock_write_lock(rte_seqlock_t *seqlock) 200 __rte_exclusive_lock_function(&seqlock->lock) 201 { 202 /* To synchronize with other writers. */ 203 rte_spinlock_lock(&seqlock->lock); 204 205 rte_seqcount_write_begin(&seqlock->count); 206 } 207 208 /** 209 * End a write-side critical section. 210 * 211 * A call to this function marks the end of the write-side critical 212 * section, for @p seqlock. After this call has been made, the protected 213 * data may no longer be modified. 214 * 215 * @param seqlock 216 * A pointer to the seqlock. 217 * 218 * @see rte_seqlock_write_lock() 219 */ 220 static inline void 221 rte_seqlock_write_unlock(rte_seqlock_t *seqlock) 222 __rte_unlock_function(&seqlock->lock) 223 { 224 rte_seqcount_write_end(&seqlock->count); 225 226 rte_spinlock_unlock(&seqlock->lock); 227 } 228 229 #ifdef __cplusplus 230 } 231 #endif 232 233 #endif /* _RTE_SEQLOCK_H_ */ 234