xref: /openbsd-src/sys/kern/kern_lock.c (revision 6dae24f41ad5c55e1c4b5d401c02ac60379625f1)
1 /*	$OpenBSD: kern_lock.c,v 1.63 2018/04/26 06:51:48 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2017 Visa Hankala
5  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sched.h>
24 #include <sys/atomic.h>
25 #include <sys/witness.h>
26 #include <sys/mutex.h>
27 
28 #include <ddb/db_output.h>
29 
30 #if defined(MULTIPROCESSOR) || defined(WITNESS)
31 #include <sys/mplock.h>
32 struct __mp_lock kernel_lock;
33 #endif
34 
35 #ifdef MP_LOCKDEBUG
36 #ifndef DDB
37 #error "MP_LOCKDEBUG requires DDB"
38 #endif
39 
40 /* CPU-dependent timing, this needs to be settable from ddb. */
41 int __mp_lock_spinout = 200000000;
42 #endif /* MP_LOCKDEBUG */
43 
44 #ifdef MULTIPROCESSOR
45 
46 /*
47  * Functions for manipulating the kernel_lock.  We put them here
48  * so that they show up in profiles.
49  */
50 
51 void
52 _kernel_lock_init(void)
53 {
54 	__mp_lock_init(&kernel_lock);
55 }
56 
57 /*
58  * Acquire/release the kernel lock.  Intended for use in the scheduler
59  * and the lower half of the kernel.
60  */
61 
62 void
63 _kernel_lock(const char *file, int line)
64 {
65 	SCHED_ASSERT_UNLOCKED();
66 #ifdef WITNESS
67 	___mp_lock(&kernel_lock, file, line);
68 #else
69 	__mp_lock(&kernel_lock);
70 #endif
71 }
72 
73 void
74 _kernel_unlock(void)
75 {
76 	__mp_unlock(&kernel_lock);
77 }
78 
79 int
80 _kernel_lock_held(void)
81 {
82 	if (panicstr || db_active)
83 		return 1;
84 	return (__mp_lock_held(&kernel_lock, curcpu()));
85 }
86 
87 #ifdef __USE_MI_MPLOCK
88 
89 /* Ticket lock implementation */
90 
91 #include <machine/cpu.h>
92 
93 void
94 ___mp_lock_init(struct __mp_lock *mpl, struct lock_type *type)
95 {
96 	memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus));
97 	mpl->mpl_users = 0;
98 	mpl->mpl_ticket = 1;
99 
100 #ifdef WITNESS
101 	mpl->mpl_lock_obj.lo_name = type->lt_name;
102 	mpl->mpl_lock_obj.lo_type = type;
103 	if (mpl == &kernel_lock)
104 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
105 		    LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
106 	else if (mpl == &sched_lock)
107 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
108 		    LO_RECURSABLE | (LO_CLASS_SCHED_LOCK << LO_CLASSSHIFT);
109 	WITNESS_INIT(&mpl->mpl_lock_obj, type);
110 #endif
111 }
112 
113 static __inline void
114 __mp_lock_spin(struct __mp_lock *mpl, u_int me)
115 {
116 #ifdef MP_LOCKDEBUG
117 	int nticks = __mp_lock_spinout;
118 #endif
119 
120 	while (mpl->mpl_ticket != me) {
121 		CPU_BUSY_CYCLE();
122 
123 #ifdef MP_LOCKDEBUG
124 		if (--nticks <= 0) {
125 			db_printf("%s: %p lock spun out", __func__, mpl);
126 			db_enter();
127 			nticks = __mp_lock_spinout;
128 		}
129 #endif
130 	}
131 }
132 
133 void
134 ___mp_lock(struct __mp_lock *mpl LOCK_FL_VARS)
135 {
136 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
137 	unsigned long s;
138 
139 #ifdef WITNESS
140 	if (!__mp_lock_held(mpl, curcpu()))
141 		WITNESS_CHECKORDER(&mpl->mpl_lock_obj,
142 		    LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
143 #endif
144 
145 	s = intr_disable();
146 	if (cpu->mplc_depth++ == 0)
147 		cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users);
148 	intr_restore(s);
149 
150 	__mp_lock_spin(mpl, cpu->mplc_ticket);
151 	membar_enter_after_atomic();
152 
153 	WITNESS_LOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
154 }
155 
156 void
157 ___mp_unlock(struct __mp_lock *mpl LOCK_FL_VARS)
158 {
159 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
160 	unsigned long s;
161 
162 #ifdef MP_LOCKDEBUG
163 	if (!__mp_lock_held(mpl, curcpu())) {
164 		db_printf("__mp_unlock(%p): not held lock\n", mpl);
165 		db_enter();
166 	}
167 #endif
168 
169 	WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
170 
171 	s = intr_disable();
172 	if (--cpu->mplc_depth == 0) {
173 		membar_exit();
174 		mpl->mpl_ticket++;
175 	}
176 	intr_restore(s);
177 }
178 
179 int
180 ___mp_release_all(struct __mp_lock *mpl LOCK_FL_VARS)
181 {
182 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
183 	unsigned long s;
184 	int rv;
185 #ifdef WITNESS
186 	int i;
187 #endif
188 
189 	s = intr_disable();
190 	rv = cpu->mplc_depth;
191 #ifdef WITNESS
192 	for (i = 0; i < rv; i++)
193 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
194 #endif
195 	cpu->mplc_depth = 0;
196 	membar_exit();
197 	mpl->mpl_ticket++;
198 	intr_restore(s);
199 
200 	return (rv);
201 }
202 
203 int
204 ___mp_release_all_but_one(struct __mp_lock *mpl LOCK_FL_VARS)
205 {
206 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
207 	int rv = cpu->mplc_depth - 1;
208 #ifdef WITNESS
209 	int i;
210 
211 	for (i = 0; i < rv; i++)
212 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
213 #endif
214 
215 #ifdef MP_LOCKDEBUG
216 	if (!__mp_lock_held(mpl, curcpu())) {
217 		db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl);
218 		db_enter();
219 	}
220 #endif
221 
222 	cpu->mplc_depth = 1;
223 
224 	return (rv);
225 }
226 
227 void
228 ___mp_acquire_count(struct __mp_lock *mpl, int count LOCK_FL_VARS)
229 {
230 	while (count--)
231 		___mp_lock(mpl LOCK_FL_ARGS);
232 }
233 
234 int
235 __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci)
236 {
237 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[CPU_INFO_UNIT(ci)];
238 
239 	return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
240 }
241 
242 #endif /* __USE_MI_MPLOCK */
243 
244 #endif /* MULTIPROCESSOR */
245 
246 
247 #ifdef __USE_MI_MUTEX
248 void
249 __mtx_init(struct mutex *mtx, int wantipl)
250 {
251 	mtx->mtx_owner = NULL;
252 	mtx->mtx_wantipl = wantipl;
253 	mtx->mtx_oldipl = IPL_NONE;
254 }
255 
256 #ifdef MULTIPROCESSOR
257 void
258 __mtx_enter(struct mutex *mtx)
259 {
260 #ifdef MP_LOCKDEBUG
261 	int nticks = __mp_lock_spinout;
262 #endif
263 
264 	while (__mtx_enter_try(mtx) == 0) {
265 		CPU_BUSY_CYCLE();
266 
267 #ifdef MP_LOCKDEBUG
268 		if (--nticks == 0) {
269 			db_printf("%s: %p lock spun out", __func__, mtx);
270 			db_enter();
271 			nticks = __mp_lock_spinout;
272 		}
273 #endif
274 	}
275 }
276 
277 int
278 __mtx_enter_try(struct mutex *mtx)
279 {
280 	struct cpu_info *owner, *ci = curcpu();
281 	int s;
282 
283 	/* Avoid deadlocks after panic or in DDB */
284 	if (panicstr || db_active)
285 		return (1);
286 
287 	if (mtx->mtx_wantipl != IPL_NONE)
288 		s = splraise(mtx->mtx_wantipl);
289 
290 	owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
291 #ifdef DIAGNOSTIC
292 	if (__predict_false(owner == ci))
293 		panic("mtx %p: locking against myself", mtx);
294 #endif
295 	if (owner == NULL) {
296 		membar_enter_after_atomic();
297 		if (mtx->mtx_wantipl != IPL_NONE)
298 			mtx->mtx_oldipl = s;
299 #ifdef DIAGNOSTIC
300 		ci->ci_mutex_level++;
301 #endif
302 		return (1);
303 	}
304 
305 	if (mtx->mtx_wantipl != IPL_NONE)
306 		splx(s);
307 
308 	return (0);
309 }
310 #else
311 void
312 __mtx_enter(struct mutex *mtx)
313 {
314 	struct cpu_info *ci = curcpu();
315 
316 	/* Avoid deadlocks after panic or in DDB */
317 	if (panicstr || db_active)
318 		return;
319 
320 #ifdef DIAGNOSTIC
321 	if (__predict_false(mtx->mtx_owner == ci))
322 		panic("mtx %p: locking against myself", mtx);
323 #endif
324 
325 	if (mtx->mtx_wantipl != IPL_NONE)
326 		mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
327 
328 	mtx->mtx_owner = ci;
329 
330 #ifdef DIAGNOSTIC
331 	ci->ci_mutex_level++;
332 #endif
333 }
334 
335 int
336 __mtx_enter_try(struct mutex *mtx)
337 {
338 	__mtx_enter(mtx);
339 	return (1);
340 }
341 #endif
342 
343 void
344 __mtx_leave(struct mutex *mtx)
345 {
346 	int s;
347 
348 	/* Avoid deadlocks after panic or in DDB */
349 	if (panicstr || db_active)
350 		return;
351 
352 	MUTEX_ASSERT_LOCKED(mtx);
353 
354 #ifdef DIAGNOSTIC
355 	curcpu()->ci_mutex_level--;
356 #endif
357 
358 	s = mtx->mtx_oldipl;
359 #ifdef MULTIPROCESSOR
360 	membar_exit_before_atomic();
361 #endif
362 	mtx->mtx_owner = NULL;
363 	if (mtx->mtx_wantipl != IPL_NONE)
364 		splx(s);
365 }
366 #endif /* __USE_MI_MUTEX */
367 
368 #ifdef WITNESS
369 void
370 _mtx_init_flags(struct mutex *m, int ipl, const char *name, int flags,
371     struct lock_type *type)
372 {
373 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
374 
375 	lo->lo_flags = MTX_LO_FLAGS(flags);
376 	if (name != NULL)
377 		lo->lo_name = name;
378 	else
379 		lo->lo_name = type->lt_name;
380 	WITNESS_INIT(lo, type);
381 
382 	_mtx_init(m, ipl);
383 }
384 
385 void
386 _mtx_enter(struct mutex *m, const char *file, int line)
387 {
388 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
389 
390 	WITNESS_CHECKORDER(lo, LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
391 	__mtx_enter(m);
392 	WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
393 }
394 
395 int
396 _mtx_enter_try(struct mutex *m, const char *file, int line)
397 {
398 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
399 
400 	if (__mtx_enter_try(m)) {
401 		WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
402 		return 1;
403 	}
404 	return 0;
405 }
406 
407 void
408 _mtx_leave(struct mutex *m, const char *file, int line)
409 {
410 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
411 
412 	WITNESS_UNLOCK(lo, LOP_EXCLUSIVE, file, line);
413 	__mtx_leave(m);
414 }
415 #endif /* WITNESS */
416