xref: /netbsd-src/sys/kern/kern_lock.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /*	$NetBSD: kern_lock.c,v 1.183 2023/02/23 14:57:29 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.183 2023/02/23 14:57:29 riastradh Exp $");
35 
36 #ifdef _KERNEL_OPT
37 #include "opt_lockdebug.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/lockdebug.h>
46 #include <sys/cpu.h>
47 #include <sys/syslog.h>
48 #include <sys/atomic.h>
49 #include <sys/lwp.h>
50 #include <sys/pserialize.h>
51 
52 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
53 #include <sys/ksyms.h>
54 #endif
55 
56 #include <machine/lock.h>
57 
58 #include <dev/lockstat.h>
59 
60 #define	RETURN_ADDRESS	(uintptr_t)__builtin_return_address(0)
61 
62 bool	kernel_lock_dodebug;
63 
64 __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
65     __cacheline_aligned;
66 
67 void
68 assert_sleepable(void)
69 {
70 	const char *reason;
71 	uint64_t pctr;
72 	bool idle;
73 
74 	if (__predict_false(panicstr != NULL)) {
75 		return;
76 	}
77 
78 	LOCKDEBUG_BARRIER(kernel_lock, 1);
79 
80 	/*
81 	 * Avoid disabling/re-enabling preemption here since this
82 	 * routine may be called in delicate situations.
83 	 */
84 	do {
85 		pctr = lwp_pctr();
86 		__insn_barrier();
87 		idle = CURCPU_IDLE_P();
88 		__insn_barrier();
89 	} while (pctr != lwp_pctr());
90 
91 	reason = NULL;
92 	if (idle && !cold) {
93 		reason = "idle";
94 	}
95 	if (cpu_intr_p()) {
96 		reason = "interrupt";
97 	}
98 	if (cpu_softintr_p()) {
99 		reason = "softint";
100 	}
101 	if (!pserialize_not_in_read_section()) {
102 		reason = "pserialize";
103 	}
104 
105 	if (reason) {
106 		panic("%s: %s caller=%p", __func__, reason,
107 		    (void *)RETURN_ADDRESS);
108 	}
109 }
110 
111 /*
112  * Functions for manipulating the kernel_lock.  We put them here
113  * so that they show up in profiles.
114  */
115 
116 #define	_KERNEL_LOCK_ABORT(msg)						\
117     LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
118 
119 #ifdef LOCKDEBUG
120 #define	_KERNEL_LOCK_ASSERT(cond)					\
121 do {									\
122 	if (!(cond))							\
123 		_KERNEL_LOCK_ABORT("assertion failed: " #cond);		\
124 } while (/* CONSTCOND */ 0)
125 #else
126 #define	_KERNEL_LOCK_ASSERT(cond)	/* nothing */
127 #endif
128 
129 static void	_kernel_lock_dump(const volatile void *, lockop_printer_t);
130 
131 lockops_t _kernel_lock_ops = {
132 	.lo_name = "Kernel lock",
133 	.lo_type = LOCKOPS_SPIN,
134 	.lo_dump = _kernel_lock_dump,
135 };
136 
137 #ifdef LOCKDEBUG
138 
139 #include <ddb/ddb.h>
140 
141 static void
142 kernel_lock_trace_ipi(void *cookie)
143 {
144 
145 	printf("%s[%d %s]: hogging kernel lock\n", cpu_name(curcpu()),
146 	    curlwp->l_lid,
147 	    curlwp->l_name ? curlwp->l_name : curproc->p_comm);
148 	db_stacktrace();
149 }
150 
151 #endif
152 
153 /*
154  * Initialize the kernel lock.
155  */
156 void
157 kernel_lock_init(void)
158 {
159 
160 	__cpu_simple_lock_init(kernel_lock);
161 	kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
162 	    RETURN_ADDRESS);
163 }
164 CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
165 
166 /*
167  * Print debugging information about the kernel lock.
168  */
169 static void
170 _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr)
171 {
172 	struct cpu_info *ci = curcpu();
173 
174 	(void)junk;
175 
176 	pr("curcpu holds : %18d wanted by: %#018lx\n",
177 	    ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
178 }
179 
180 /*
181  * Acquire 'nlocks' holds on the kernel lock.
182  *
183  * Although it may not look it, this is one of the most central, intricate
184  * routines in the kernel, and tons of code elsewhere depends on its exact
185  * behaviour.  If you change something in here, expect it to bite you in the
186  * rear.
187  */
188 void
189 _kernel_lock(int nlocks)
190 {
191 	struct cpu_info *ci;
192 	LOCKSTAT_TIMER(spintime);
193 	LOCKSTAT_FLAG(lsflag);
194 	struct lwp *owant;
195 #ifdef LOCKDEBUG
196 	static struct cpu_info *kernel_lock_holder;
197 	u_int spins = 0;
198 	u_int starttime = getticks();
199 #endif
200 	int s;
201 	struct lwp *l = curlwp;
202 
203 	_KERNEL_LOCK_ASSERT(nlocks > 0);
204 
205 	s = splvm();
206 	ci = curcpu();
207 	if (ci->ci_biglock_count != 0) {
208 		_KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
209 		ci->ci_biglock_count += nlocks;
210 		l->l_blcnt += nlocks;
211 		splx(s);
212 		return;
213 	}
214 
215 	_KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
216 	LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
217 	    0);
218 
219 	if (__predict_true(__cpu_simple_lock_try(kernel_lock))) {
220 #ifdef LOCKDEBUG
221 		kernel_lock_holder = curcpu();
222 #endif
223 		ci->ci_biglock_count = nlocks;
224 		l->l_blcnt = nlocks;
225 		LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
226 		    RETURN_ADDRESS, 0);
227 		splx(s);
228 		return;
229 	}
230 
231 	/*
232 	 * To remove the ordering constraint between adaptive mutexes
233 	 * and kernel_lock we must make it appear as if this thread is
234 	 * blocking.  For non-interlocked mutex release, a store fence
235 	 * is required to ensure that the result of any mutex_exit()
236 	 * by the current LWP becomes visible on the bus before the set
237 	 * of ci->ci_biglock_wanted becomes visible.
238 	 *
239 	 * This membar_producer matches the membar_consumer in
240 	 * mutex_vector_enter.
241 	 *
242 	 * That way, if l has just released a mutex, mutex_vector_enter
243 	 * can't see this store ci->ci_biglock_wanted := l until it
244 	 * will also see the mutex_exit store mtx->mtx_owner := 0 which
245 	 * clears the has-waiters bit.
246 	 */
247 	membar_producer();
248 	owant = ci->ci_biglock_wanted;
249 	atomic_store_relaxed(&ci->ci_biglock_wanted, l);
250 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
251 	l->l_ld_wanted = __builtin_return_address(0);
252 #endif
253 
254 	/*
255 	 * Spin until we acquire the lock.  Once we have it, record the
256 	 * time spent with lockstat.
257 	 */
258 	LOCKSTAT_ENTER(lsflag);
259 	LOCKSTAT_START_TIMER(lsflag, spintime);
260 
261 	do {
262 		splx(s);
263 		while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
264 #ifdef LOCKDEBUG
265 			if (SPINLOCK_SPINOUT(spins) && start_init_exec &&
266 			    (getticks() - starttime) > 10*hz) {
267 				ipi_msg_t msg = {
268 					.func = kernel_lock_trace_ipi,
269 				};
270 				kpreempt_disable();
271 				ipi_unicast(&msg, kernel_lock_holder);
272 				ipi_wait(&msg);
273 				kpreempt_enable();
274 				_KERNEL_LOCK_ABORT("spinout");
275 			}
276 #endif
277 			SPINLOCK_BACKOFF_HOOK;
278 			SPINLOCK_SPIN_HOOK;
279 		}
280 		s = splvm();
281 	} while (!__cpu_simple_lock_try(kernel_lock));
282 
283 	ci->ci_biglock_count = nlocks;
284 	l->l_blcnt = nlocks;
285 	LOCKSTAT_STOP_TIMER(lsflag, spintime);
286 	LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
287 	    RETURN_ADDRESS, 0);
288 	if (owant == NULL) {
289 		LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
290 		    LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
291 	}
292 	LOCKSTAT_EXIT(lsflag);
293 	splx(s);
294 
295 	/*
296 	 * Now that we have kernel_lock, reset ci_biglock_wanted.  This
297 	 * store must be visible on other CPUs before a mutex_exit() on
298 	 * this CPU can test the has-waiters bit.
299 	 *
300 	 * This membar_enter matches the membar_enter in
301 	 * mutex_vector_enter.  (Yes, not membar_exit -- the legacy
302 	 * naming is confusing, but store-before-load usually pairs
303 	 * with store-before-load, in the extremely rare cases where it
304 	 * is used at all.)
305 	 *
306 	 * That way, mutex_vector_enter can't see this store
307 	 * ci->ci_biglock_wanted := owant until it has set the
308 	 * has-waiters bit.
309 	 */
310 	(void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
311 #ifndef __HAVE_ATOMIC_AS_MEMBAR
312 	membar_enter();
313 #endif
314 
315 #ifdef LOCKDEBUG
316 	kernel_lock_holder = curcpu();
317 #endif
318 }
319 
320 /*
321  * Release 'nlocks' holds on the kernel lock.  If 'nlocks' is zero, release
322  * all holds.
323  */
324 void
325 _kernel_unlock(int nlocks, int *countp)
326 {
327 	struct cpu_info *ci;
328 	u_int olocks;
329 	int s;
330 	struct lwp *l = curlwp;
331 
332 	_KERNEL_LOCK_ASSERT(nlocks < 2);
333 
334 	olocks = l->l_blcnt;
335 
336 	if (olocks == 0) {
337 		_KERNEL_LOCK_ASSERT(nlocks <= 0);
338 		if (countp != NULL)
339 			*countp = 0;
340 		return;
341 	}
342 
343 	_KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
344 
345 	if (nlocks == 0)
346 		nlocks = olocks;
347 	else if (nlocks == -1) {
348 		nlocks = 1;
349 		_KERNEL_LOCK_ASSERT(olocks == 1);
350 	}
351 	s = splvm();
352 	ci = curcpu();
353 	_KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
354 	if (ci->ci_biglock_count == nlocks) {
355 		LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
356 		    RETURN_ADDRESS, 0);
357 		ci->ci_biglock_count = 0;
358 		__cpu_simple_unlock(kernel_lock);
359 		l->l_blcnt -= nlocks;
360 		splx(s);
361 		if (l->l_dopreempt)
362 			kpreempt(0);
363 	} else {
364 		ci->ci_biglock_count -= nlocks;
365 		l->l_blcnt -= nlocks;
366 		splx(s);
367 	}
368 
369 	if (countp != NULL)
370 		*countp = olocks;
371 }
372 
373 bool
374 _kernel_locked_p(void)
375 {
376 	return __SIMPLELOCK_LOCKED_P(kernel_lock);
377 }
378