xref: /openbsd-src/sys/kern/kern_lock.c (revision 6c6408334dbede3a2c0dcd9ff9c489157df0c856)
1 /*	$OpenBSD: kern_lock.c,v 1.59 2018/02/19 09:18:00 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2017 Visa Hankala
5  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sched.h>
24 #include <sys/atomic.h>
25 #include <sys/witness.h>
26 #include <sys/mutex.h>
27 
28 #include <ddb/db_output.h>
29 
30 #if defined(MULTIPROCESSOR) || defined(WITNESS)
31 #include <sys/mplock.h>
32 struct __mp_lock kernel_lock;
33 #endif
34 
35 #ifdef MP_LOCKDEBUG
36 #ifndef DDB
37 #error "MP_LOCKDEBUG requires DDB"
38 #endif
39 
40 /* CPU-dependent timing, this needs to be settable from ddb. */
41 int __mp_lock_spinout = 200000000;
42 #endif /* MP_LOCKDEBUG */
43 
44 #ifdef MULTIPROCESSOR
45 
46 /*
47  * Functions for manipulating the kernel_lock.  We put them here
48  * so that they show up in profiles.
49  */
50 
51 void
52 _kernel_lock_init(void)
53 {
54 	__mp_lock_init(&kernel_lock);
55 }
56 
57 /*
58  * Acquire/release the kernel lock.  Intended for use in the scheduler
59  * and the lower half of the kernel.
60  */
61 
62 void
63 _kernel_lock(const char *file, int line)
64 {
65 	SCHED_ASSERT_UNLOCKED();
66 #ifdef WITNESS
67 	___mp_lock(&kernel_lock, file, line);
68 #else
69 	__mp_lock(&kernel_lock);
70 #endif
71 }
72 
73 void
74 _kernel_unlock(void)
75 {
76 	__mp_unlock(&kernel_lock);
77 }
78 
79 int
80 _kernel_lock_held(void)
81 {
82 	if (panicstr)
83 		return 1;
84 	return (__mp_lock_held(&kernel_lock, curcpu()));
85 }
86 
87 #ifdef __USE_MI_MPLOCK
88 
89 /* Ticket lock implementation */
90 
91 #include <machine/cpu.h>
92 
93 void
94 ___mp_lock_init(struct __mp_lock *mpl, struct lock_type *type)
95 {
96 	memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus));
97 	mpl->mpl_users = 0;
98 	mpl->mpl_ticket = 1;
99 
100 #ifdef WITNESS
101 	mpl->mpl_lock_obj.lo_name = type->lt_name;
102 	mpl->mpl_lock_obj.lo_type = type;
103 	if (mpl == &kernel_lock)
104 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
105 		    LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
106 	else if (mpl == &sched_lock)
107 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
108 		    LO_RECURSABLE | (LO_CLASS_SCHED_LOCK << LO_CLASSSHIFT);
109 	WITNESS_INIT(&mpl->mpl_lock_obj, type);
110 #endif
111 }
112 
113 static __inline void
114 __mp_lock_spin(struct __mp_lock *mpl, u_int me)
115 {
116 #ifndef MP_LOCKDEBUG
117 	while (mpl->mpl_ticket != me)
118 		CPU_BUSY_CYCLE();
119 #else
120 	int nticks = __mp_lock_spinout;
121 
122 	while (mpl->mpl_ticket != me) {
123 		CPU_BUSY_CYCLE();
124 
125 		if (--nticks <= 0) {
126 			db_printf("__mp_lock(%p): lock spun out", mpl);
127 			db_enter();
128 			nticks = __mp_lock_spinout;
129 		}
130 	}
131 #endif
132 }
133 
134 void
135 ___mp_lock(struct __mp_lock *mpl LOCK_FL_VARS)
136 {
137 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
138 	unsigned long s;
139 
140 #ifdef WITNESS
141 	if (!__mp_lock_held(mpl, curcpu()))
142 		WITNESS_CHECKORDER(&mpl->mpl_lock_obj,
143 		    LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
144 #endif
145 
146 	s = intr_disable();
147 	if (cpu->mplc_depth++ == 0)
148 		cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users);
149 	intr_restore(s);
150 
151 	__mp_lock_spin(mpl, cpu->mplc_ticket);
152 	membar_enter_after_atomic();
153 
154 	WITNESS_LOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
155 }
156 
157 void
158 ___mp_unlock(struct __mp_lock *mpl LOCK_FL_VARS)
159 {
160 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
161 	unsigned long s;
162 
163 #ifdef MP_LOCKDEBUG
164 	if (!__mp_lock_held(mpl, curcpu())) {
165 		db_printf("__mp_unlock(%p): not held lock\n", mpl);
166 		db_enter();
167 	}
168 #endif
169 
170 	WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
171 
172 	s = intr_disable();
173 	if (--cpu->mplc_depth == 0) {
174 		membar_exit();
175 		mpl->mpl_ticket++;
176 	}
177 	intr_restore(s);
178 }
179 
180 int
181 ___mp_release_all(struct __mp_lock *mpl LOCK_FL_VARS)
182 {
183 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
184 	unsigned long s;
185 	int rv;
186 #ifdef WITNESS
187 	int i;
188 #endif
189 
190 	s = intr_disable();
191 	rv = cpu->mplc_depth;
192 #ifdef WITNESS
193 	for (i = 0; i < rv; i++)
194 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
195 #endif
196 	cpu->mplc_depth = 0;
197 	membar_exit();
198 	mpl->mpl_ticket++;
199 	intr_restore(s);
200 
201 	return (rv);
202 }
203 
204 int
205 ___mp_release_all_but_one(struct __mp_lock *mpl LOCK_FL_VARS)
206 {
207 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
208 	int rv = cpu->mplc_depth - 1;
209 #ifdef WITNESS
210 	int i;
211 
212 	for (i = 0; i < rv; i++)
213 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
214 #endif
215 
216 #ifdef MP_LOCKDEBUG
217 	if (!__mp_lock_held(mpl, curcpu())) {
218 		db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl);
219 		db_enter();
220 	}
221 #endif
222 
223 	cpu->mplc_depth = 1;
224 
225 	return (rv);
226 }
227 
228 void
229 ___mp_acquire_count(struct __mp_lock *mpl, int count LOCK_FL_VARS)
230 {
231 	while (count--)
232 		___mp_lock(mpl LOCK_FL_ARGS);
233 }
234 
235 int
236 __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci)
237 {
238 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[CPU_INFO_UNIT(ci)];
239 
240 	return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
241 }
242 
243 #endif /* __USE_MI_MPLOCK */
244 
245 #endif /* MULTIPROCESSOR */
246 
247 
248 #ifdef __USE_MI_MUTEX
249 void
250 __mtx_init(struct mutex *mtx, int wantipl)
251 {
252 	mtx->mtx_owner = NULL;
253 	mtx->mtx_wantipl = wantipl;
254 	mtx->mtx_oldipl = IPL_NONE;
255 }
256 
257 #ifdef MULTIPROCESSOR
258 void
259 __mtx_enter(struct mutex *mtx)
260 {
261 #ifdef MP_LOCKDEBUG
262 	int nticks = __mp_lock_spinout;
263 #endif
264 
265 	while (__mtx_enter_try(mtx) == 0) {
266 		CPU_BUSY_CYCLE();
267 
268 #ifdef MP_LOCKDEBUG
269 		if (--nticks == 0) {
270 			db_printf("%s: %p lock spun out", __func__, mtx);
271 			db_enter();
272 			nticks = __mp_lock_spinout;
273 		}
274 #endif
275 	}
276 }
277 
278 int
279 __mtx_enter_try(struct mutex *mtx)
280 {
281 	struct cpu_info *owner, *ci = curcpu();
282 	int s;
283 
284 	if (mtx->mtx_wantipl != IPL_NONE)
285 		s = splraise(mtx->mtx_wantipl);
286 
287 	owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
288 #ifdef DIAGNOSTIC
289 	if (__predict_false(owner == ci))
290 		panic("mtx %p: locking against myself", mtx);
291 #endif
292 	if (owner == NULL) {
293 		membar_enter_after_atomic();
294 		if (mtx->mtx_wantipl != IPL_NONE)
295 			mtx->mtx_oldipl = s;
296 #ifdef DIAGNOSTIC
297 		ci->ci_mutex_level++;
298 #endif
299 		return (1);
300 	}
301 
302 	if (mtx->mtx_wantipl != IPL_NONE)
303 		splx(s);
304 
305 	return (0);
306 }
307 #else
308 void
309 __mtx_enter(struct mutex *mtx)
310 {
311 	struct cpu_info *ci = curcpu();
312 
313 #ifdef DIAGNOSTIC
314 	if (__predict_false(mtx->mtx_owner == ci))
315 		panic("mtx %p: locking against myself", mtx);
316 #endif
317 
318 	if (mtx->mtx_wantipl != IPL_NONE)
319 		mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
320 
321 	mtx->mtx_owner = ci;
322 
323 #ifdef DIAGNOSTIC
324 	ci->ci_mutex_level++;
325 #endif
326 }
327 
328 int
329 __mtx_enter_try(struct mutex *mtx)
330 {
331 	__mtx_enter(mtx);
332 	return (1);
333 }
334 #endif
335 
336 void
337 __mtx_leave(struct mutex *mtx)
338 {
339 	int s;
340 
341 	MUTEX_ASSERT_LOCKED(mtx);
342 
343 #ifdef DIAGNOSTIC
344 	curcpu()->ci_mutex_level--;
345 #endif
346 
347 	s = mtx->mtx_oldipl;
348 #ifdef MULTIPROCESSOR
349 	membar_exit_before_atomic();
350 #endif
351 	mtx->mtx_owner = NULL;
352 	if (mtx->mtx_wantipl != IPL_NONE)
353 		splx(s);
354 }
355 #endif /* __USE_MI_MUTEX */
356 
357 #ifdef WITNESS
358 void
359 _mtx_init_flags(struct mutex *m, int ipl, const char *name, int flags,
360     struct lock_type *type)
361 {
362 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
363 
364 	lo->lo_flags = MTX_LO_FLAGS(flags);
365 	if (name != NULL)
366 		lo->lo_name = name;
367 	else
368 		lo->lo_name = type->lt_name;
369 	WITNESS_INIT(lo, type);
370 
371 	_mtx_init(m, ipl);
372 }
373 
374 void
375 _mtx_enter(struct mutex *m, const char *file, int line)
376 {
377 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
378 
379 	WITNESS_CHECKORDER(lo, LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
380 	__mtx_enter(m);
381 	WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
382 }
383 
384 int
385 _mtx_enter_try(struct mutex *m, const char *file, int line)
386 {
387 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
388 
389 	if (__mtx_enter_try(m)) {
390 		WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
391 		return 1;
392 	}
393 	return 0;
394 }
395 
396 void
397 _mtx_leave(struct mutex *m, const char *file, int line)
398 {
399 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
400 
401 	WITNESS_UNLOCK(lo, LOP_EXCLUSIVE, file, line);
402 	__mtx_leave(m);
403 }
404 #endif /* WITNESS */
405