xref: /netbsd-src/sys/external/bsd/drm2/include/linux/spinlock.h (revision aef5eb5f59cdfe8314f1b5f78ac04eb144e44010)
1 /*	$NetBSD: spinlock.h,v 1.14 2021/12/19 11:52:08 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LINUX_SPINLOCK_H_
33 #define _LINUX_SPINLOCK_H_
34 
35 #include <sys/cdefs.h>
36 #include <sys/mutex.h>
37 
38 #include <machine/limits.h>
39 
40 #include <linux/atomic.h>
41 #include <linux/irqflags.h>
42 #include <linux/lockdep.h>
43 #include <linux/preempt.h>
44 
45 typedef struct spinlock {
46 	kmutex_t sl_lock;
47 } spinlock_t;
48 
49 static inline int
50 spin_is_locked(spinlock_t *spinlock)
51 {
52 	return mutex_owned(&spinlock->sl_lock);
53 }
54 
55 static inline void
56 spin_lock(spinlock_t *spinlock)
57 {
58 	mutex_enter(&spinlock->sl_lock);
59 }
60 
61 static inline void
62 spin_unlock(spinlock_t *spinlock)
63 {
64 	mutex_exit(&spinlock->sl_lock);
65 }
66 
67 static inline void
68 spin_lock_irq(spinlock_t *spinlock)
69 {
70 	spin_lock(spinlock);
71 }
72 
73 static inline void
74 spin_unlock_irq(spinlock_t *spinlock)
75 {
76 	spin_unlock(spinlock);
77 }
78 
79 /* Must be a macro because the second argument is to be assigned.  */
80 #define	spin_lock_irqsave(SPINLOCK, FLAGS)				\
81 	do {								\
82 		(FLAGS) = 0;						\
83 		mutex_enter(&((spinlock_t *)(SPINLOCK))->sl_lock);	\
84 	} while (0)
85 
86 #define	spin_trylock_irqsave(SPINLOCK, FLAGS)				\
87 		( (FLAGS) = 0,						\
88 		mutex_tryenter(&((spinlock_t *)(SPINLOCK))->sl_lock) )
89 
90 static inline void
91 spin_unlock_irqrestore(spinlock_t *spinlock, unsigned long __unused flags)
92 {
93 	mutex_exit(&spinlock->sl_lock);
94 }
95 
96 static inline void
97 spin_lock_nested(spinlock_t *spinlock, int subclass)
98 {
99 	spin_lock(spinlock);
100 }
101 
102 #define	spin_lock_irqsave_nested(SPINLOCK, FLAGS, SUBCLASS)		      \
103 	spin_lock_irqsave(SPINLOCK, FLAGS)
104 
105 static inline void
106 spin_lock_init(spinlock_t *spinlock)
107 {
108 	/* XXX What's the right IPL?  IPL_DRM...?  */
109 	mutex_init(&spinlock->sl_lock, MUTEX_DEFAULT, IPL_VM);
110 }
111 
112 /*
113  * XXX Linux doesn't ever destroy spin locks, it seems.  We'll have to
114  * kludge it up.
115  */
116 
117 static inline void
118 spin_lock_destroy(spinlock_t *spinlock)
119 {
120 	mutex_destroy(&spinlock->sl_lock);
121 }
122 
123 /* This is a macro to make the panic message clearer.  */
124 #define	assert_spin_locked(spinlock)	\
125 	KASSERT(mutex_owned(&(spinlock)->sl_lock))
126 
127 /*
128  * Stupid reader/writer spin locks.  No attempt to avoid writer
129  * starvation.  Must allow recursive readers.  We use mutex and state
130  * instead of compare-and-swap for expedience and LOCKDEBUG support.
131  */
132 
133 typedef struct linux_rwlock {
134 	kmutex_t	rw_lock;
135 	unsigned	rw_nreaders;
136 } rwlock_t;
137 
138 static inline void
139 rwlock_init(rwlock_t *rw)
140 {
141 
142 	mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM);
143 	rw->rw_nreaders = 0;
144 }
145 
146 static inline void
147 rwlock_destroy(rwlock_t *rw)
148 {
149 
150 	KASSERTMSG(rw->rw_nreaders == 0,
151 	    "rwlock still held by %u readers", rw->rw_nreaders);
152 	mutex_destroy(&rw->rw_lock);
153 }
154 
155 static inline void
156 write_lock_irq(rwlock_t *rw)
157 {
158 
159 	for (;;) {
160 		mutex_spin_enter(&rw->rw_lock);
161 		if (rw->rw_nreaders == 0)
162 			break;
163 		mutex_spin_exit(&rw->rw_lock);
164 	}
165 }
166 
167 static inline void
168 write_unlock_irq(rwlock_t *rw)
169 {
170 
171 	KASSERT(rw->rw_nreaders == 0);
172 	mutex_spin_exit(&rw->rw_lock);
173 }
174 
175 static inline void
176 read_lock(rwlock_t *rw)
177 {
178 
179 	mutex_spin_enter(&rw->rw_lock);
180 	KASSERT(rw->rw_nreaders < UINT_MAX);
181 	rw->rw_nreaders++;
182 	mutex_spin_exit(&rw->rw_lock);
183 }
184 
185 static inline void
186 read_unlock(rwlock_t *rw)
187 {
188 
189 	mutex_spin_enter(&rw->rw_lock);
190 	KASSERT(0 < rw->rw_nreaders);
191 	rw->rw_nreaders--;
192 	mutex_spin_exit(&rw->rw_lock);
193 }
194 
195 static inline void
196 local_bh_disable(void)
197 {
198 }
199 
200 static inline void
201 local_bh_enable(void)
202 {
203 }
204 
205 #define	atomic_dec_and_lock_irqsave(A, L, F)				      \
206 	_atomic_dec_and_lock_irqsave(A, L, &(F))
207 
208 static inline bool __must_check
209 _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
210     unsigned long *flagsp)
211 {
212 	unsigned old, new;
213 
214 	do {
215 		old = atomic_read(atomic);
216 		KASSERT(old);
217 		if (old == 1) {
218 			spin_lock_irqsave(lock, *flagsp);
219 			if (atomic_dec_return(atomic) == 0)
220 				return true;
221 			spin_unlock_irqrestore(lock, *flagsp);
222 			return false;
223 		}
224 		new = old - 1;
225 	} while (atomic_cmpxchg(atomic, old, new) != old);
226 
227 	KASSERT(old != 1);
228 	KASSERT(new != 0);
229 	return false;
230 }
231 
232 #endif  /* _LINUX_SPINLOCK_H_ */
233