xref: /openbsd-src/sys/kern/kern_lock.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*	$OpenBSD: kern_lock.c,v 1.66 2018/06/15 13:59:53 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2017 Visa Hankala
5  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sched.h>
24 #include <sys/atomic.h>
25 #include <sys/witness.h>
26 #include <sys/mutex.h>
27 
28 #include <ddb/db_output.h>
29 
30 #ifdef MP_LOCKDEBUG
31 #ifndef DDB
32 #error "MP_LOCKDEBUG requires DDB"
33 #endif
34 
35 /* CPU-dependent timing, this needs to be settable from ddb. */
36 int __mp_lock_spinout = 200000000;
37 #endif /* MP_LOCKDEBUG */
38 
39 #ifdef MULTIPROCESSOR
40 
41 #include <sys/mplock.h>
42 struct __mp_lock kernel_lock;
43 
44 /*
45  * Functions for manipulating the kernel_lock.  We put them here
46  * so that they show up in profiles.
47  */
48 
49 void
50 _kernel_lock_init(void)
51 {
52 	__mp_lock_init(&kernel_lock);
53 }
54 
55 /*
56  * Acquire/release the kernel lock.  Intended for use in the scheduler
57  * and the lower half of the kernel.
58  */
59 
60 void
61 _kernel_lock(const char *file, int line)
62 {
63 	SCHED_ASSERT_UNLOCKED();
64 #ifdef WITNESS
65 	___mp_lock(&kernel_lock, file, line);
66 #else
67 	__mp_lock(&kernel_lock);
68 #endif
69 }
70 
71 void
72 _kernel_unlock(void)
73 {
74 	__mp_unlock(&kernel_lock);
75 }
76 
77 int
78 _kernel_lock_held(void)
79 {
80 	if (panicstr || db_active)
81 		return 1;
82 	return (__mp_lock_held(&kernel_lock, curcpu()));
83 }
84 
85 #ifdef __USE_MI_MPLOCK
86 
87 /* Ticket lock implementation */
88 
89 #include <machine/cpu.h>
90 
91 void
92 ___mp_lock_init(struct __mp_lock *mpl, const struct lock_type *type)
93 {
94 	memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus));
95 	mpl->mpl_users = 0;
96 	mpl->mpl_ticket = 1;
97 
98 #ifdef WITNESS
99 	mpl->mpl_lock_obj.lo_name = type->lt_name;
100 	mpl->mpl_lock_obj.lo_type = type;
101 	if (mpl == &kernel_lock)
102 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
103 		    LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
104 	else if (mpl == &sched_lock)
105 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
106 		    LO_RECURSABLE | (LO_CLASS_SCHED_LOCK << LO_CLASSSHIFT);
107 	WITNESS_INIT(&mpl->mpl_lock_obj, type);
108 #endif
109 }
110 
111 static __inline void
112 __mp_lock_spin(struct __mp_lock *mpl, u_int me)
113 {
114 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
115 #ifdef MP_LOCKDEBUG
116 	int nticks = __mp_lock_spinout;
117 #endif
118 
119 	spc->spc_spinning++;
120 	while (mpl->mpl_ticket != me) {
121 		CPU_BUSY_CYCLE();
122 
123 #ifdef MP_LOCKDEBUG
124 		if (--nticks <= 0) {
125 			db_printf("%s: %p lock spun out", __func__, mpl);
126 			db_enter();
127 			nticks = __mp_lock_spinout;
128 		}
129 #endif
130 	}
131 	spc->spc_spinning--;
132 }
133 
134 void
135 ___mp_lock(struct __mp_lock *mpl LOCK_FL_VARS)
136 {
137 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
138 	unsigned long s;
139 
140 #ifdef WITNESS
141 	if (!__mp_lock_held(mpl, curcpu()))
142 		WITNESS_CHECKORDER(&mpl->mpl_lock_obj,
143 		    LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
144 #endif
145 
146 	s = intr_disable();
147 	if (cpu->mplc_depth++ == 0)
148 		cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users);
149 	intr_restore(s);
150 
151 	__mp_lock_spin(mpl, cpu->mplc_ticket);
152 	membar_enter_after_atomic();
153 
154 	WITNESS_LOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
155 }
156 
157 void
158 ___mp_unlock(struct __mp_lock *mpl LOCK_FL_VARS)
159 {
160 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
161 	unsigned long s;
162 
163 #ifdef MP_LOCKDEBUG
164 	if (!__mp_lock_held(mpl, curcpu())) {
165 		db_printf("__mp_unlock(%p): not held lock\n", mpl);
166 		db_enter();
167 	}
168 #endif
169 
170 	WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
171 
172 	s = intr_disable();
173 	if (--cpu->mplc_depth == 0) {
174 		membar_exit();
175 		mpl->mpl_ticket++;
176 	}
177 	intr_restore(s);
178 }
179 
180 int
181 ___mp_release_all(struct __mp_lock *mpl LOCK_FL_VARS)
182 {
183 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
184 	unsigned long s;
185 	int rv;
186 #ifdef WITNESS
187 	int i;
188 #endif
189 
190 	s = intr_disable();
191 	rv = cpu->mplc_depth;
192 #ifdef WITNESS
193 	for (i = 0; i < rv; i++)
194 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
195 #endif
196 	cpu->mplc_depth = 0;
197 	membar_exit();
198 	mpl->mpl_ticket++;
199 	intr_restore(s);
200 
201 	return (rv);
202 }
203 
204 int
205 ___mp_release_all_but_one(struct __mp_lock *mpl LOCK_FL_VARS)
206 {
207 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
208 	int rv = cpu->mplc_depth - 1;
209 #ifdef WITNESS
210 	int i;
211 
212 	for (i = 0; i < rv; i++)
213 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
214 #endif
215 
216 #ifdef MP_LOCKDEBUG
217 	if (!__mp_lock_held(mpl, curcpu())) {
218 		db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl);
219 		db_enter();
220 	}
221 #endif
222 
223 	cpu->mplc_depth = 1;
224 
225 	return (rv);
226 }
227 
228 void
229 ___mp_acquire_count(struct __mp_lock *mpl, int count LOCK_FL_VARS)
230 {
231 	while (count--)
232 		___mp_lock(mpl LOCK_FL_ARGS);
233 }
234 
235 int
236 __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci)
237 {
238 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[CPU_INFO_UNIT(ci)];
239 
240 	return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
241 }
242 
243 #endif /* __USE_MI_MPLOCK */
244 
245 #endif /* MULTIPROCESSOR */
246 
247 
248 #ifdef __USE_MI_MUTEX
249 void
250 __mtx_init(struct mutex *mtx, int wantipl)
251 {
252 	mtx->mtx_owner = NULL;
253 	mtx->mtx_wantipl = wantipl;
254 	mtx->mtx_oldipl = IPL_NONE;
255 }
256 
257 #ifdef MULTIPROCESSOR
258 void
259 __mtx_enter(struct mutex *mtx)
260 {
261 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
262 #ifdef MP_LOCKDEBUG
263 	int nticks = __mp_lock_spinout;
264 #endif
265 
266 	spc->spc_spinning++;
267 	while (__mtx_enter_try(mtx) == 0) {
268 		CPU_BUSY_CYCLE();
269 
270 #ifdef MP_LOCKDEBUG
271 		if (--nticks == 0) {
272 			db_printf("%s: %p lock spun out", __func__, mtx);
273 			db_enter();
274 			nticks = __mp_lock_spinout;
275 		}
276 #endif
277 	}
278 	spc->spc_spinning--;
279 }
280 
281 int
282 __mtx_enter_try(struct mutex *mtx)
283 {
284 	struct cpu_info *owner, *ci = curcpu();
285 	int s;
286 
287 	/* Avoid deadlocks after panic or in DDB */
288 	if (panicstr || db_active)
289 		return (1);
290 
291 	if (mtx->mtx_wantipl != IPL_NONE)
292 		s = splraise(mtx->mtx_wantipl);
293 
294 	owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
295 #ifdef DIAGNOSTIC
296 	if (__predict_false(owner == ci))
297 		panic("mtx %p: locking against myself", mtx);
298 #endif
299 	if (owner == NULL) {
300 		membar_enter_after_atomic();
301 		if (mtx->mtx_wantipl != IPL_NONE)
302 			mtx->mtx_oldipl = s;
303 #ifdef DIAGNOSTIC
304 		ci->ci_mutex_level++;
305 #endif
306 		return (1);
307 	}
308 
309 	if (mtx->mtx_wantipl != IPL_NONE)
310 		splx(s);
311 
312 	return (0);
313 }
314 #else
315 void
316 __mtx_enter(struct mutex *mtx)
317 {
318 	struct cpu_info *ci = curcpu();
319 
320 	/* Avoid deadlocks after panic or in DDB */
321 	if (panicstr || db_active)
322 		return;
323 
324 #ifdef DIAGNOSTIC
325 	if (__predict_false(mtx->mtx_owner == ci))
326 		panic("mtx %p: locking against myself", mtx);
327 #endif
328 
329 	if (mtx->mtx_wantipl != IPL_NONE)
330 		mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
331 
332 	mtx->mtx_owner = ci;
333 
334 #ifdef DIAGNOSTIC
335 	ci->ci_mutex_level++;
336 #endif
337 }
338 
339 int
340 __mtx_enter_try(struct mutex *mtx)
341 {
342 	__mtx_enter(mtx);
343 	return (1);
344 }
345 #endif
346 
347 void
348 __mtx_leave(struct mutex *mtx)
349 {
350 	int s;
351 
352 	/* Avoid deadlocks after panic or in DDB */
353 	if (panicstr || db_active)
354 		return;
355 
356 	MUTEX_ASSERT_LOCKED(mtx);
357 
358 #ifdef DIAGNOSTIC
359 	curcpu()->ci_mutex_level--;
360 #endif
361 
362 	s = mtx->mtx_oldipl;
363 #ifdef MULTIPROCESSOR
364 	membar_exit_before_atomic();
365 #endif
366 	mtx->mtx_owner = NULL;
367 	if (mtx->mtx_wantipl != IPL_NONE)
368 		splx(s);
369 }
370 #endif /* __USE_MI_MUTEX */
371 
372 #ifdef WITNESS
373 void
374 _mtx_init_flags(struct mutex *m, int ipl, const char *name, int flags,
375     const struct lock_type *type)
376 {
377 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
378 
379 	lo->lo_flags = MTX_LO_FLAGS(flags);
380 	if (name != NULL)
381 		lo->lo_name = name;
382 	else
383 		lo->lo_name = type->lt_name;
384 	WITNESS_INIT(lo, type);
385 
386 	_mtx_init(m, ipl);
387 }
388 
389 void
390 _mtx_enter(struct mutex *m, const char *file, int line)
391 {
392 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
393 
394 	WITNESS_CHECKORDER(lo, LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
395 	__mtx_enter(m);
396 	WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
397 }
398 
399 int
400 _mtx_enter_try(struct mutex *m, const char *file, int line)
401 {
402 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
403 
404 	if (__mtx_enter_try(m)) {
405 		WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
406 		return 1;
407 	}
408 	return 0;
409 }
410 
411 void
412 _mtx_leave(struct mutex *m, const char *file, int line)
413 {
414 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
415 
416 	WITNESS_UNLOCK(lo, LOP_EXCLUSIVE, file, line);
417 	__mtx_leave(m);
418 }
419 #endif /* WITNESS */
420