xref: /netbsd-src/sys/kern/kern_lock.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /*	$NetBSD: kern_lock.c,v 1.161 2017/12/25 09:13:40 ozaki-r Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.161 2017/12/25 09:13:40 ozaki-r Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/lock.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lockdebug.h>
42 #include <sys/cpu.h>
43 #include <sys/syslog.h>
44 #include <sys/atomic.h>
45 #include <sys/lwp.h>
46 #include <sys/pserialize.h>
47 
48 #include <machine/lock.h>
49 
50 #include <dev/lockstat.h>
51 
52 #define	RETURN_ADDRESS	(uintptr_t)__builtin_return_address(0)
53 
54 bool	kernel_lock_dodebug;
55 
56 __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
57     __cacheline_aligned;
58 
59 void
60 assert_sleepable(void)
61 {
62 	const char *reason;
63 	uint64_t pctr;
64 	bool idle;
65 
66 	if (panicstr != NULL) {
67 		return;
68 	}
69 
70 	LOCKDEBUG_BARRIER(kernel_lock, 1);
71 
72 	/*
73 	 * Avoid disabling/re-enabling preemption here since this
74 	 * routine may be called in delicate situations.
75 	 */
76 	do {
77 		pctr = lwp_pctr();
78 		idle = CURCPU_IDLE_P();
79 	} while (pctr != lwp_pctr());
80 
81 	reason = NULL;
82 	if (idle && !cold &&
83 	    kcpuset_isset(kcpuset_running, cpu_index(curcpu()))) {
84 		reason = "idle";
85 	}
86 	if (cpu_intr_p()) {
87 		reason = "interrupt";
88 	}
89 	if (cpu_softintr_p()) {
90 		reason = "softint";
91 	}
92 	if (!pserialize_not_in_read_section()) {
93 		reason = "pserialize";
94 	}
95 
96 	if (reason) {
97 		panic("%s: %s caller=%p", __func__, reason,
98 		    (void *)RETURN_ADDRESS);
99 	}
100 }
101 
102 /*
103  * Functions for manipulating the kernel_lock.  We put them here
104  * so that they show up in profiles.
105  */
106 
107 #define	_KERNEL_LOCK_ABORT(msg)						\
108     LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
109 
110 #ifdef LOCKDEBUG
111 #define	_KERNEL_LOCK_ASSERT(cond)					\
112 do {									\
113 	if (!(cond))							\
114 		_KERNEL_LOCK_ABORT("assertion failed: " #cond);		\
115 } while (/* CONSTCOND */ 0)
116 #else
117 #define	_KERNEL_LOCK_ASSERT(cond)	/* nothing */
118 #endif
119 
120 void	_kernel_lock_dump(const volatile void *);
121 
122 lockops_t _kernel_lock_ops = {
123 	.lo_name = "Kernel lock",
124 	.lo_type = LOCKOPS_SPIN,
125 	.lo_dump = _kernel_lock_dump,
126 };
127 
128 /*
129  * Initialize the kernel lock.
130  */
131 void
132 kernel_lock_init(void)
133 {
134 
135 	__cpu_simple_lock_init(kernel_lock);
136 	kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
137 	    RETURN_ADDRESS);
138 }
139 CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
140 
141 /*
142  * Print debugging information about the kernel lock.
143  */
144 void
145 _kernel_lock_dump(const volatile void *junk)
146 {
147 	struct cpu_info *ci = curcpu();
148 
149 	(void)junk;
150 
151 	printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
152 	    ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
153 }
154 
155 /*
156  * Acquire 'nlocks' holds on the kernel lock.
157  */
158 void
159 _kernel_lock(int nlocks)
160 {
161 	struct cpu_info *ci;
162 	LOCKSTAT_TIMER(spintime);
163 	LOCKSTAT_FLAG(lsflag);
164 	struct lwp *owant;
165 	u_int spins;
166 	int s;
167 	struct lwp *l = curlwp;
168 
169 	_KERNEL_LOCK_ASSERT(nlocks > 0);
170 
171 	s = splvm();
172 	ci = curcpu();
173 	if (ci->ci_biglock_count != 0) {
174 		_KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
175 		ci->ci_biglock_count += nlocks;
176 		l->l_blcnt += nlocks;
177 		splx(s);
178 		return;
179 	}
180 
181 	_KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
182 	LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
183 	    0);
184 
185 	if (__cpu_simple_lock_try(kernel_lock)) {
186 		ci->ci_biglock_count = nlocks;
187 		l->l_blcnt = nlocks;
188 		LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
189 		    RETURN_ADDRESS, 0);
190 		splx(s);
191 		return;
192 	}
193 
194 	/*
195 	 * To remove the ordering constraint between adaptive mutexes
196 	 * and kernel_lock we must make it appear as if this thread is
197 	 * blocking.  For non-interlocked mutex release, a store fence
198 	 * is required to ensure that the result of any mutex_exit()
199 	 * by the current LWP becomes visible on the bus before the set
200 	 * of ci->ci_biglock_wanted becomes visible.
201 	 */
202 	membar_producer();
203 	owant = ci->ci_biglock_wanted;
204 	ci->ci_biglock_wanted = l;
205 
206 	/*
207 	 * Spin until we acquire the lock.  Once we have it, record the
208 	 * time spent with lockstat.
209 	 */
210 	LOCKSTAT_ENTER(lsflag);
211 	LOCKSTAT_START_TIMER(lsflag, spintime);
212 
213 	spins = 0;
214 	do {
215 		splx(s);
216 		while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
217 			if (SPINLOCK_SPINOUT(spins)) {
218 				extern int start_init_exec;
219 				if (!start_init_exec)
220 					_KERNEL_LOCK_ABORT("spinout");
221 			}
222 			SPINLOCK_BACKOFF_HOOK;
223 			SPINLOCK_SPIN_HOOK;
224 		}
225 		s = splvm();
226 	} while (!__cpu_simple_lock_try(kernel_lock));
227 
228 	ci->ci_biglock_count = nlocks;
229 	l->l_blcnt = nlocks;
230 	LOCKSTAT_STOP_TIMER(lsflag, spintime);
231 	LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
232 	    RETURN_ADDRESS, 0);
233 	if (owant == NULL) {
234 		LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
235 		    LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
236 	}
237 	LOCKSTAT_EXIT(lsflag);
238 	splx(s);
239 
240 	/*
241 	 * Now that we have kernel_lock, reset ci_biglock_wanted.  This
242 	 * store must be unbuffered (immediately visible on the bus) in
243 	 * order for non-interlocked mutex release to work correctly.
244 	 * It must be visible before a mutex_exit() can execute on this
245 	 * processor.
246 	 *
247 	 * Note: only where CAS is available in hardware will this be
248 	 * an unbuffered write, but non-interlocked release cannot be
249 	 * done on CPUs without CAS in hardware.
250 	 */
251 	(void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
252 
253 	/*
254 	 * Issue a memory barrier as we have acquired a lock.  This also
255 	 * prevents stores from a following mutex_exit() being reordered
256 	 * to occur before our store to ci_biglock_wanted above.
257 	 */
258 	membar_enter();
259 }
260 
261 /*
262  * Release 'nlocks' holds on the kernel lock.  If 'nlocks' is zero, release
263  * all holds.
264  */
265 void
266 _kernel_unlock(int nlocks, int *countp)
267 {
268 	struct cpu_info *ci;
269 	u_int olocks;
270 	int s;
271 	struct lwp *l = curlwp;
272 
273 	_KERNEL_LOCK_ASSERT(nlocks < 2);
274 
275 	olocks = l->l_blcnt;
276 
277 	if (olocks == 0) {
278 		_KERNEL_LOCK_ASSERT(nlocks <= 0);
279 		if (countp != NULL)
280 			*countp = 0;
281 		return;
282 	}
283 
284 	_KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
285 
286 	if (nlocks == 0)
287 		nlocks = olocks;
288 	else if (nlocks == -1) {
289 		nlocks = 1;
290 		_KERNEL_LOCK_ASSERT(olocks == 1);
291 	}
292 	s = splvm();
293 	ci = curcpu();
294 	_KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
295 	if (ci->ci_biglock_count == nlocks) {
296 		LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
297 		    RETURN_ADDRESS, 0);
298 		ci->ci_biglock_count = 0;
299 		__cpu_simple_unlock(kernel_lock);
300 		l->l_blcnt -= nlocks;
301 		splx(s);
302 		if (l->l_dopreempt)
303 			kpreempt(0);
304 	} else {
305 		ci->ci_biglock_count -= nlocks;
306 		l->l_blcnt -= nlocks;
307 		splx(s);
308 	}
309 
310 	if (countp != NULL)
311 		*countp = olocks;
312 }
313 
314 bool
315 _kernel_locked_p(void)
316 {
317 	return __SIMPLELOCK_LOCKED_P(kernel_lock);
318 }
319