1 /* $NetBSD: seqlock.h,v 1.5 2021/12/19 01:50:10 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_SEQLOCK_H_
33 #define _LINUX_SEQLOCK_H_
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/lock.h>
38
39 #include <lib/libkern/libkern.h>
40
41 struct seqcount {
42 unsigned sqc_gen;
43 };
44
45 typedef struct seqcount seqcount_t;
46
47 static inline void
seqcount_init(struct seqcount * seqcount)48 seqcount_init(struct seqcount *seqcount)
49 {
50
51 seqcount->sqc_gen = 0;
52 }
53
54 static inline void
seqcount_destroy(struct seqcount * seqcount)55 seqcount_destroy(struct seqcount *seqcount)
56 {
57
58 KASSERT((seqcount->sqc_gen & 1) == 0);
59 seqcount->sqc_gen = -1;
60 }
61
62 static inline void
write_seqcount_begin(struct seqcount * seqcount)63 write_seqcount_begin(struct seqcount *seqcount)
64 {
65
66 KASSERT((seqcount->sqc_gen & 1) == 0);
67 seqcount->sqc_gen |= 1;
68 membar_producer();
69 }
70
71 static inline void
write_seqcount_end(struct seqcount * seqcount)72 write_seqcount_end(struct seqcount *seqcount)
73 {
74
75 KASSERT((seqcount->sqc_gen & 1) == 1);
76 membar_producer();
77 seqcount->sqc_gen |= 1; /* paranoia */
78 seqcount->sqc_gen++;
79 }
80
81 static inline unsigned
__read_seqcount_begin(const struct seqcount * seqcount)82 __read_seqcount_begin(const struct seqcount *seqcount)
83 {
84 unsigned gen;
85
86 while (__predict_false((gen = seqcount->sqc_gen) & 1))
87 SPINLOCK_BACKOFF_HOOK;
88 __insn_barrier();
89
90 return gen;
91 }
92
93 static inline bool
__read_seqcount_retry(const struct seqcount * seqcount,unsigned gen)94 __read_seqcount_retry(const struct seqcount *seqcount, unsigned gen)
95 {
96
97 __insn_barrier();
98 return __predict_false(seqcount->sqc_gen != gen);
99 }
100
101 static inline unsigned
read_seqcount_begin(const struct seqcount * seqcount)102 read_seqcount_begin(const struct seqcount *seqcount)
103 {
104 unsigned gen;
105
106 gen = __read_seqcount_begin(seqcount);
107 membar_consumer();
108
109 return gen;
110 }
111
112 static inline bool
read_seqcount_retry(const struct seqcount * seqcount,unsigned gen)113 read_seqcount_retry(const struct seqcount *seqcount, unsigned gen)
114 {
115
116 membar_consumer();
117 return __read_seqcount_retry(seqcount, gen);
118 }
119
120 static inline unsigned
raw_read_seqcount(const struct seqcount * seqcount)121 raw_read_seqcount(const struct seqcount *seqcount)
122 {
123 unsigned gen;
124
125 gen = seqcount->sqc_gen;
126 membar_consumer();
127
128 return gen;
129 }
130
131 struct seqlock {
132 kmutex_t sql_lock;
133 struct seqcount sql_count;
134 };
135
136 typedef struct seqlock seqlock_t;
137
138 static inline void
seqlock_init(struct seqlock * seqlock)139 seqlock_init(struct seqlock *seqlock)
140 {
141
142 mutex_init(&seqlock->sql_lock, MUTEX_DEFAULT, IPL_VM);
143 seqcount_init(&seqlock->sql_count);
144 }
145
146 static inline void
seqlock_destroy(struct seqlock * seqlock)147 seqlock_destroy(struct seqlock *seqlock)
148 {
149
150 seqcount_destroy(&seqlock->sql_count);
151 mutex_destroy(&seqlock->sql_lock);
152 }
153
154 static inline void
write_seqlock(struct seqlock * seqlock)155 write_seqlock(struct seqlock *seqlock)
156 {
157
158 mutex_spin_enter(&seqlock->sql_lock);
159 write_seqcount_begin(&seqlock->sql_count);
160 }
161
162 static inline void
write_sequnlock(struct seqlock * seqlock)163 write_sequnlock(struct seqlock *seqlock)
164 {
165
166 write_seqcount_end(&seqlock->sql_count);
167 mutex_spin_exit(&seqlock->sql_lock);
168 }
169
170 #define write_seqlock_irqsave(SEQLOCK, FLAGS) do { \
171 (FLAGS) = (unsigned long)splvm(); \
172 write_seqlock(SEQLOCK); \
173 } while (0)
174
175 #define write_sequnlock_irqrestore(SEQLOCK, FLAGS) do { \
176 write_sequnlock(SEQLOCK); \
177 splx((int)(FLAGS)); \
178 } while (0)
179
180 static inline unsigned
read_seqbegin(const struct seqlock * seqlock)181 read_seqbegin(const struct seqlock *seqlock)
182 {
183
184 return read_seqcount_begin(&seqlock->sql_count);
185 }
186
187 static inline bool
read_seqretry(const struct seqlock * seqlock,unsigned gen)188 read_seqretry(const struct seqlock *seqlock, unsigned gen)
189 {
190
191 return read_seqcount_retry(&seqlock->sql_count, gen);
192 }
193
194 #endif /* _LINUX_SEQLOCK_H_ */
195