xref: /openbsd-src/sys/kern/kern_lock.c (revision fdf140bed7e7d0aa2f7f51b01d9a894d4c9bcc46)
1 /*	$OpenBSD: kern_lock.c,v 1.62 2018/04/25 10:30:41 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2017 Visa Hankala
5  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sched.h>
24 #include <sys/atomic.h>
25 #include <sys/witness.h>
26 #include <sys/mutex.h>
27 
28 #include <ddb/db_output.h>
29 
30 #if defined(MULTIPROCESSOR) || defined(WITNESS)
31 #include <sys/mplock.h>
32 struct __mp_lock kernel_lock;
33 #endif
34 
35 #ifdef MP_LOCKDEBUG
36 #ifndef DDB
37 #error "MP_LOCKDEBUG requires DDB"
38 #endif
39 
40 /* CPU-dependent timing, this needs to be settable from ddb. */
41 int __mp_lock_spinout = 200000000;
42 #endif /* MP_LOCKDEBUG */
43 
44 #ifdef MULTIPROCESSOR
45 
46 /*
47  * Functions for manipulating the kernel_lock.  We put them here
48  * so that they show up in profiles.
49  */
50 
51 void
52 _kernel_lock_init(void)
53 {
54 	__mp_lock_init(&kernel_lock);
55 }
56 
57 /*
58  * Acquire/release the kernel lock.  Intended for use in the scheduler
59  * and the lower half of the kernel.
60  */
61 
62 void
63 _kernel_lock(const char *file, int line)
64 {
65 	SCHED_ASSERT_UNLOCKED();
66 #ifdef WITNESS
67 	___mp_lock(&kernel_lock, file, line);
68 #else
69 	__mp_lock(&kernel_lock);
70 #endif
71 }
72 
73 void
74 _kernel_unlock(void)
75 {
76 	__mp_unlock(&kernel_lock);
77 }
78 
79 int
80 _kernel_lock_held(void)
81 {
82 	if (panicstr || db_active)
83 		return 1;
84 	return (__mp_lock_held(&kernel_lock, curcpu()));
85 }
86 
87 #ifdef __USE_MI_MPLOCK
88 
89 /* Ticket lock implementation */
90 
91 #include <machine/cpu.h>
92 
93 void
94 ___mp_lock_init(struct __mp_lock *mpl, struct lock_type *type)
95 {
96 	memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus));
97 	mpl->mpl_users = 0;
98 	mpl->mpl_ticket = 1;
99 
100 #ifdef WITNESS
101 	mpl->mpl_lock_obj.lo_name = type->lt_name;
102 	mpl->mpl_lock_obj.lo_type = type;
103 	if (mpl == &kernel_lock)
104 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
105 		    LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
106 	else if (mpl == &sched_lock)
107 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
108 		    LO_RECURSABLE | (LO_CLASS_SCHED_LOCK << LO_CLASSSHIFT);
109 	WITNESS_INIT(&mpl->mpl_lock_obj, type);
110 #endif
111 }
112 
113 static __inline void
114 __mp_lock_spin(struct __mp_lock *mpl, u_int me)
115 {
116 #ifndef MP_LOCKDEBUG
117 	while (mpl->mpl_ticket != me)
118 		CPU_BUSY_CYCLE();
119 #else
120 	int nticks = __mp_lock_spinout;
121 
122 	while (mpl->mpl_ticket != me) {
123 		CPU_BUSY_CYCLE();
124 
125 		if (--nticks <= 0) {
126 			db_printf("__mp_lock(%p): lock spun out", mpl);
127 			db_enter();
128 			nticks = __mp_lock_spinout;
129 		}
130 	}
131 #endif
132 }
133 
134 void
135 ___mp_lock(struct __mp_lock *mpl LOCK_FL_VARS)
136 {
137 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
138 	unsigned long s;
139 
140 #ifdef WITNESS
141 	if (!__mp_lock_held(mpl, curcpu()))
142 		WITNESS_CHECKORDER(&mpl->mpl_lock_obj,
143 		    LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
144 #endif
145 
146 	s = intr_disable();
147 	if (cpu->mplc_depth++ == 0)
148 		cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users);
149 	intr_restore(s);
150 
151 	__mp_lock_spin(mpl, cpu->mplc_ticket);
152 	membar_enter_after_atomic();
153 
154 	WITNESS_LOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
155 }
156 
157 void
158 ___mp_unlock(struct __mp_lock *mpl LOCK_FL_VARS)
159 {
160 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
161 	unsigned long s;
162 
163 #ifdef MP_LOCKDEBUG
164 	if (!__mp_lock_held(mpl, curcpu())) {
165 		db_printf("__mp_unlock(%p): not held lock\n", mpl);
166 		db_enter();
167 	}
168 #endif
169 
170 	WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
171 
172 	s = intr_disable();
173 	if (--cpu->mplc_depth == 0) {
174 		membar_exit();
175 		mpl->mpl_ticket++;
176 	}
177 	intr_restore(s);
178 }
179 
180 int
181 ___mp_release_all(struct __mp_lock *mpl LOCK_FL_VARS)
182 {
183 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
184 	unsigned long s;
185 	int rv;
186 #ifdef WITNESS
187 	int i;
188 #endif
189 
190 	s = intr_disable();
191 	rv = cpu->mplc_depth;
192 #ifdef WITNESS
193 	for (i = 0; i < rv; i++)
194 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
195 #endif
196 	cpu->mplc_depth = 0;
197 	membar_exit();
198 	mpl->mpl_ticket++;
199 	intr_restore(s);
200 
201 	return (rv);
202 }
203 
204 int
205 ___mp_release_all_but_one(struct __mp_lock *mpl LOCK_FL_VARS)
206 {
207 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
208 	int rv = cpu->mplc_depth - 1;
209 #ifdef WITNESS
210 	int i;
211 
212 	for (i = 0; i < rv; i++)
213 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE, file, line);
214 #endif
215 
216 #ifdef MP_LOCKDEBUG
217 	if (!__mp_lock_held(mpl, curcpu())) {
218 		db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl);
219 		db_enter();
220 	}
221 #endif
222 
223 	cpu->mplc_depth = 1;
224 
225 	return (rv);
226 }
227 
228 void
229 ___mp_acquire_count(struct __mp_lock *mpl, int count LOCK_FL_VARS)
230 {
231 	while (count--)
232 		___mp_lock(mpl LOCK_FL_ARGS);
233 }
234 
235 int
236 __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci)
237 {
238 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[CPU_INFO_UNIT(ci)];
239 
240 	return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
241 }
242 
243 #endif /* __USE_MI_MPLOCK */
244 
245 #endif /* MULTIPROCESSOR */
246 
247 
248 #ifdef __USE_MI_MUTEX
249 void
250 __mtx_init(struct mutex *mtx, int wantipl)
251 {
252 	mtx->mtx_owner = NULL;
253 	mtx->mtx_wantipl = wantipl;
254 	mtx->mtx_oldipl = IPL_NONE;
255 }
256 
257 #ifdef MULTIPROCESSOR
258 void
259 __mtx_enter(struct mutex *mtx)
260 {
261 #ifdef MP_LOCKDEBUG
262 	int nticks = __mp_lock_spinout;
263 #endif
264 
265 	while (__mtx_enter_try(mtx) == 0) {
266 		CPU_BUSY_CYCLE();
267 
268 #ifdef MP_LOCKDEBUG
269 		if (--nticks == 0) {
270 			db_printf("%s: %p lock spun out", __func__, mtx);
271 			db_enter();
272 			nticks = __mp_lock_spinout;
273 		}
274 #endif
275 	}
276 }
277 
278 int
279 __mtx_enter_try(struct mutex *mtx)
280 {
281 	struct cpu_info *owner, *ci = curcpu();
282 	int s;
283 
284 	/* Avoid deadlocks after panic or in DDB */
285 	if (panicstr || db_active)
286 		return (1);
287 
288 	if (mtx->mtx_wantipl != IPL_NONE)
289 		s = splraise(mtx->mtx_wantipl);
290 
291 	owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
292 #ifdef DIAGNOSTIC
293 	if (__predict_false(owner == ci))
294 		panic("mtx %p: locking against myself", mtx);
295 #endif
296 	if (owner == NULL) {
297 		membar_enter_after_atomic();
298 		if (mtx->mtx_wantipl != IPL_NONE)
299 			mtx->mtx_oldipl = s;
300 #ifdef DIAGNOSTIC
301 		ci->ci_mutex_level++;
302 #endif
303 		return (1);
304 	}
305 
306 	if (mtx->mtx_wantipl != IPL_NONE)
307 		splx(s);
308 
309 	return (0);
310 }
311 #else
312 void
313 __mtx_enter(struct mutex *mtx)
314 {
315 	struct cpu_info *ci = curcpu();
316 
317 	/* Avoid deadlocks after panic or in DDB */
318 	if (panicstr || db_active)
319 		return;
320 
321 #ifdef DIAGNOSTIC
322 	if (__predict_false(mtx->mtx_owner == ci))
323 		panic("mtx %p: locking against myself", mtx);
324 #endif
325 
326 	if (mtx->mtx_wantipl != IPL_NONE)
327 		mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
328 
329 	mtx->mtx_owner = ci;
330 
331 #ifdef DIAGNOSTIC
332 	ci->ci_mutex_level++;
333 #endif
334 }
335 
336 int
337 __mtx_enter_try(struct mutex *mtx)
338 {
339 	__mtx_enter(mtx);
340 	return (1);
341 }
342 #endif
343 
344 void
345 __mtx_leave(struct mutex *mtx)
346 {
347 	int s;
348 
349 	/* Avoid deadlocks after panic or in DDB */
350 	if (panicstr || db_active)
351 		return;
352 
353 	MUTEX_ASSERT_LOCKED(mtx);
354 
355 #ifdef DIAGNOSTIC
356 	curcpu()->ci_mutex_level--;
357 #endif
358 
359 	s = mtx->mtx_oldipl;
360 #ifdef MULTIPROCESSOR
361 	membar_exit_before_atomic();
362 #endif
363 	mtx->mtx_owner = NULL;
364 	if (mtx->mtx_wantipl != IPL_NONE)
365 		splx(s);
366 }
367 #endif /* __USE_MI_MUTEX */
368 
369 #ifdef WITNESS
370 void
371 _mtx_init_flags(struct mutex *m, int ipl, const char *name, int flags,
372     struct lock_type *type)
373 {
374 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
375 
376 	lo->lo_flags = MTX_LO_FLAGS(flags);
377 	if (name != NULL)
378 		lo->lo_name = name;
379 	else
380 		lo->lo_name = type->lt_name;
381 	WITNESS_INIT(lo, type);
382 
383 	_mtx_init(m, ipl);
384 }
385 
386 void
387 _mtx_enter(struct mutex *m, const char *file, int line)
388 {
389 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
390 
391 	WITNESS_CHECKORDER(lo, LOP_EXCLUSIVE | LOP_NEWORDER, file, line, NULL);
392 	__mtx_enter(m);
393 	WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
394 }
395 
396 int
397 _mtx_enter_try(struct mutex *m, const char *file, int line)
398 {
399 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
400 
401 	if (__mtx_enter_try(m)) {
402 		WITNESS_LOCK(lo, LOP_EXCLUSIVE, file, line);
403 		return 1;
404 	}
405 	return 0;
406 }
407 
408 void
409 _mtx_leave(struct mutex *m, const char *file, int line)
410 {
411 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
412 
413 	WITNESS_UNLOCK(lo, LOP_EXCLUSIVE, file, line);
414 	__mtx_leave(m);
415 }
416 #endif /* WITNESS */
417