xref: /dflybsd-src/sys/kern/kern_spinlock.c (revision 4badba3841ae9f4d60211d1c5ed006e17b38c299)
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu. and Matthew Dillon
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * The implementation is designed to avoid looping when compatible operations
35  * are executed.
36  *
37  * To acquire a spinlock we first increment counta.  Then we check if counta
38  * meets our requirements.  For an exclusive spinlock it must be 1, of a
39  * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set.
40  *
41  * Shared spinlock failure case: Decrement the count, loop until we can
42  * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK
43  * is set and increment the count.
44  *
45  * Exclusive spinlock failure case: While maintaining the count, clear the
46  * SHARED_SPINLOCK flag unconditionally.  Then use an atomic add to transfer
47  * the count from the low bits to the high bits of counta.  Then loop until
48  * all low bits are 0.  Once the low bits drop to 0 we can transfer the
49  * count back with an atomic_cmpset_int(), atomically, and return.
50  */
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/types.h>
54 #include <sys/kernel.h>
55 #include <sys/sysctl.h>
56 #ifdef INVARIANTS
57 #include <sys/proc.h>
58 #endif
59 #include <sys/priv.h>
60 #include <machine/atomic.h>
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 #include <machine/specialreg.h>
64 #include <machine/clock.h>
65 #include <sys/spinlock.h>
66 #include <sys/spinlock2.h>
67 #include <sys/ktr.h>
68 
69 #ifdef _KERNEL_VIRTUAL
70 #include <pthread.h>
71 #endif
72 
73 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin);
74 
75 struct indefinite_info {
76 	sysclock_t	base;
77 	int		secs;
78 };
79 
80 /*
81  * Kernal Trace
82  */
83 #if !defined(KTR_SPIN_CONTENTION)
84 #define KTR_SPIN_CONTENTION	KTR_ALL
85 #endif
86 #define SPIN_STRING	"spin=%p type=%c"
87 #define SPIN_ARG_SIZE	(sizeof(void *) + sizeof(int))
88 
89 KTR_INFO_MASTER(spin);
90 #if 0
91 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE);
92 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE);
93 #endif
94 
95 #define logspin(name, spin, type)			\
96 	KTR_LOG(spin_ ## name, spin, type)
97 
98 #ifdef INVARIANTS
99 static int spin_lock_test_mode;
100 #endif
101 
102 static int64_t spinlocks_contested1;
103 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested1, CTLFLAG_RD,
104     &spinlocks_contested1, 0,
105     "Spinlock contention count due to collisions with exclusive lock holders");
106 
107 static int64_t spinlocks_contested2;
108 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested2, CTLFLAG_RD,
109     &spinlocks_contested2, 0,
110     "Serious spinlock contention count");
111 
112 #ifdef DEBUG_LOCKS_LATENCY
113 
114 static long spinlocks_add_latency;
115 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW,
116     &spinlocks_add_latency, 0,
117     "Add spinlock latency");
118 
119 #endif
120 
121 
122 /*
123  * We need a fairly large pool to avoid contention on large SMP systems,
124  * particularly multi-chip systems.
125  */
126 /*#define SPINLOCK_NUM_POOL	8101*/
127 #define SPINLOCK_NUM_POOL	8192
128 #define SPINLOCK_NUM_POOL_MASK	(SPINLOCK_NUM_POOL - 1)
129 
130 static __cachealign struct {
131 	struct spinlock	spin;
132 	char filler[32 - sizeof(struct spinlock)];
133 } pool_spinlocks[SPINLOCK_NUM_POOL];
134 
135 static int spin_indefinite_check(struct spinlock *spin,
136 				  struct indefinite_info *info);
137 
138 /*
139  * We contested due to another exclusive lock holder.  We lose.
140  *
141  * We have to unwind the attempt and may acquire the spinlock
142  * anyway while doing so.  countb was incremented on our behalf.
143  */
144 int
145 spin_trylock_contested(struct spinlock *spin)
146 {
147 	globaldata_t gd = mycpu;
148 
149 	/*++spinlocks_contested1;*/
150 	/*atomic_add_int(&spin->counta, -1);*/
151 	--gd->gd_spinlocks;
152 	--gd->gd_curthread->td_critcount;
153 	return (FALSE);
154 }
155 
156 /*
157  * The spin_lock() inline was unable to acquire the lock.
158  *
159  * atomic_swap_int() is the absolute fastest spinlock instruction, at
160  * least on multi-socket systems.  All instructions seem to be about
161  * the same on single-socket multi-core systems.  However, atomic_swap_int()
162  * does not result in an even distribution of successful acquisitions.
163  *
164  * UNFORTUNATELY we cannot really use atomic_swap_int() when also implementing
165  * shared spin locks, so as we do a better job removing contention we've
166  * moved to atomic_cmpset_int() to be able handle multiple states.
167  *
168  * Another problem we have is that (at least on the 48-core opteron we test
169  * with) having all 48 cores contesting the same spin lock reduces
170  * performance to around 600,000 ops/sec, verses millions when fewer cores
171  * are going after the same lock.
172  *
173  * Backoff algorithms can create even worse starvation problems, and don't
174  * really improve performance when a lot of cores are contending.
175  *
176  * Our solution is to allow the data cache to lazy-update by reading it
177  * non-atomically and only attempting to acquire the lock if the lazy read
178  * looks good.  This effectively limits cache bus bandwidth.  A cpu_pause()
179  * (for intel/amd anyhow) is not strictly needed as cache bus resource use
180  * is governed by the lazy update.
181  *
182  * WARNING!!!!  Performance matters here, by a huge margin.
183  *
184  *	48-core test with pre-read / -j 48 no-modules kernel compile
185  *	with fanned-out inactive and active queues came in at 55 seconds.
186  *
187  *	48-core test with pre-read / -j 48 no-modules kernel compile
188  *	came in at 75 seconds.  Without pre-read it came in at 170 seconds.
189  *
190  *	4-core test with pre-read / -j 48 no-modules kernel compile
191  *	came in at 83 seconds.  Without pre-read it came in at 83 seconds
192  *	as well (no difference).
193  */
194 void
195 spin_lock_contested(struct spinlock *spin)
196 {
197 	struct indefinite_info info = { 0, 0 };
198 	int i;
199 
200 	/*
201 	 * Transfer our count to the high bits, then loop until we can
202 	 * acquire the low counter (== 1).  No new shared lock can be
203 	 * acquired while we hold the EXCLWAIT bits.
204 	 *
205 	 * Force any existing shared locks to exclusive.  The shared unlock
206 	 * understands that this may occur.
207 	 */
208 	atomic_add_int(&spin->counta, SPINLOCK_EXCLWAIT - 1);
209 	atomic_clear_int(&spin->counta, SPINLOCK_SHARED);
210 
211 #ifdef DEBUG_LOCKS_LATENCY
212 	long j;
213 	for (j = spinlocks_add_latency; j > 0; --j)
214 		cpu_ccfence();
215 #endif
216 #if defined(INVARIANTS)
217 	if (spin_lock_test_mode > 10 &&
218 	    spin->countb > spin_lock_test_mode &&
219 	    (spin_lock_test_mode & 0xFF) == mycpu->gd_cpuid) {
220 		spin->countb = 0;
221 		print_backtrace(-1);
222 	}
223 	++spin->countb;
224 #endif
225 	i = 0;
226 
227 	/*logspin(beg, spin, 'w');*/
228 	for (;;) {
229 		/*
230 		 * If the low bits are zero, try to acquire the exclusive lock
231 		 * by transfering our high bit counter to the low bits.
232 		 *
233 		 * NOTE: Reading spin->counta prior to the swap is extremely
234 		 *	 important on multi-chip/many-core boxes.  On 48-core
235 		 *	 this one change improves fully concurrent all-cores
236 		 *	 compiles by 100% or better.
237 		 *
238 		 *	 I can't emphasize enough how important the pre-read
239 		 *	 is in preventing hw cache bus armageddon on
240 		 *	 multi-chip systems.  And on single-chip/multi-core
241 		 *	 systems it just doesn't hurt.
242 		 */
243 		uint32_t ovalue = spin->counta;
244 		cpu_ccfence();
245 		if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 &&
246 		    atomic_cmpset_int(&spin->counta, ovalue,
247 				      (ovalue - SPINLOCK_EXCLWAIT) | 1)) {
248 			break;
249 		}
250 		if ((++i & 0x7F) == 0x7F) {
251 #if defined(INVARIANTS)
252 			++spin->countb;
253 #endif
254 			if (spin_indefinite_check(spin, &info))
255 				break;
256 		}
257 #ifdef _KERNEL_VIRTUAL
258 		pthread_yield();
259 #endif
260 	}
261 	/*logspin(end, spin, 'w');*/
262 }
263 
264 /*
265  * Shared spinlock attempt was contested.
266  *
267  * The caller has not modified counta.
268  */
269 void
270 spin_lock_shared_contested2(struct spinlock *spin)
271 {
272 	struct indefinite_info info = { 0, 0 };
273 	int i;
274 
275 #ifdef DEBUG_LOCKS_LATENCY
276 	long j;
277 	for (j = spinlocks_add_latency; j > 0; --j)
278 		cpu_ccfence();
279 #endif
280 #if defined(INVARIANTS)
281 	if (spin_lock_test_mode > 10 &&
282 	    spin->countb > spin_lock_test_mode &&
283 	    (spin_lock_test_mode & 0xFF) == mycpu->gd_cpuid) {
284 		spin->countb = 0;
285 		print_backtrace(-1);
286 	}
287 	++spin->countb;
288 #endif
289 	i = 0;
290 
291 	/*logspin(beg, spin, 'w');*/
292 	for (;;) {
293 		/*
294 		 * Loop until we can acquire the shared spinlock.  Note that
295 		 * the low bits can be zero while the high EXCLWAIT bits are
296 		 * non-zero.  In this situation exclusive requesters have
297 		 * priority (otherwise shared users on multiple cpus can hog
298 		 * the spinlnock).
299 		 *
300 		 * NOTE: Reading spin->counta prior to the swap is extremely
301 		 *	 important on multi-chip/many-core boxes.  On 48-core
302 		 *	 this one change improves fully concurrent all-cores
303 		 *	 compiles by 100% or better.
304 		 *
305 		 *	 I can't emphasize enough how important the pre-read
306 		 *	 is in preventing hw cache bus armageddon on
307 		 *	 multi-chip systems.  And on single-chip/multi-core
308 		 *	 systems it just doesn't hurt.
309 		 */
310 		uint32_t ovalue = spin->counta;
311 
312 		cpu_ccfence();
313 		if (ovalue == 0) {
314 			if (atomic_cmpset_int(&spin->counta, 0,
315 					      SPINLOCK_SHARED | 1))
316 				break;
317 		} else if (ovalue & SPINLOCK_SHARED) {
318 			if (atomic_cmpset_int(&spin->counta, ovalue,
319 					      ovalue + 1))
320 				break;
321 		}
322 		if ((++i & 0x7F) == 0x7F) {
323 #if defined(INVARIANTS)
324 			++spin->countb;
325 #endif
326 			if (spin_indefinite_check(spin, &info))
327 				break;
328 		}
329 #ifdef _KERNEL_VIRTUAL
330 		pthread_yield();
331 #endif
332 	}
333 	/*logspin(end, spin, 'w');*/
334 }
335 
336 /*
337  * Pool functions (SHARED SPINLOCKS NOT SUPPORTED)
338  */
339 static __inline int
340 _spin_pool_hash(void *ptr)
341 {
342 	int i;
343 
344 	i = ((int)(uintptr_t) ptr >> 5) ^ ((int)(uintptr_t)ptr >> 12);
345 	i &= SPINLOCK_NUM_POOL_MASK;
346 	return (i);
347 }
348 
349 void
350 _spin_pool_lock(void *chan)
351 {
352 	struct spinlock *sp;
353 
354 	sp = &pool_spinlocks[_spin_pool_hash(chan)].spin;
355 	spin_lock(sp);
356 }
357 
358 void
359 _spin_pool_unlock(void *chan)
360 {
361 	struct spinlock *sp;
362 
363 	sp = &pool_spinlocks[_spin_pool_hash(chan)].spin;
364 	spin_unlock(sp);
365 }
366 
367 
368 static
369 int
370 spin_indefinite_check(struct spinlock *spin, struct indefinite_info *info)
371 {
372 	sysclock_t count;
373 
374 	cpu_spinlock_contested();
375 
376 	count = sys_cputimer->count();
377 	if (info->secs == 0) {
378 		info->base = count;
379 		++info->secs;
380 	} else if (count - info->base > sys_cputimer->freq) {
381 		kprintf("spin_lock: %p, indefinite wait (%d secs)!\n",
382 			spin, info->secs);
383 		info->base = count;
384 		++info->secs;
385 		if (panicstr)
386 			return (TRUE);
387 #if defined(INVARIANTS)
388 		if (spin_lock_test_mode) {
389 			print_backtrace(-1);
390 			return (TRUE);
391 		}
392 #endif
393 #if defined(INVARIANTS)
394 		if (info->secs == 11)
395 			print_backtrace(-1);
396 #endif
397 		if (info->secs == 60)
398 			panic("spin_lock: %p, indefinite wait!", spin);
399 	}
400 	return (FALSE);
401 }
402 
403 /*
404  * If INVARIANTS is enabled various spinlock timing tests can be run
405  * by setting debug.spin_lock_test:
406  *
407  *	1	Test the indefinite wait code
408  *	2	Time the best-case exclusive lock overhead (spin_test_count)
409  *	3	Time the best-case shared lock overhead (spin_test_count)
410  */
411 
412 #ifdef INVARIANTS
413 
414 static int spin_test_count = 10000000;
415 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0,
416     "Number of iterations to use for spinlock wait code test");
417 
418 static int
419 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
420 {
421         struct spinlock spin;
422 	int error;
423 	int value = 0;
424 	int i;
425 
426 	if ((error = priv_check(curthread, PRIV_ROOT)) != 0)
427 		return (error);
428 	if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0)
429 		return (error);
430 
431 	/*
432 	 * Indefinite wait test
433 	 */
434 	if (value == 1) {
435 		spin_init(&spin);
436 		spin_lock(&spin);	/* force an indefinite wait */
437 		spin_lock_test_mode = 1;
438 		spin_lock(&spin);
439 		spin_unlock(&spin);	/* Clean up the spinlock count */
440 		spin_unlock(&spin);
441 		spin_lock_test_mode = 0;
442 	}
443 
444 	/*
445 	 * Time best-case exclusive spinlocks
446 	 */
447 	if (value == 2) {
448 		globaldata_t gd = mycpu;
449 
450 		spin_init(&spin);
451 		for (i = spin_test_count; i > 0; --i) {
452 		    spin_lock_quick(gd, &spin);
453 		    spin_unlock_quick(gd, &spin);
454 		}
455 	}
456 
457         return (0);
458 }
459 
460 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT,
461         0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code");
462 
463 #endif	/* INVARIANTS */
464