1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment counta. Then we check if counta 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of counta. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/spinlock.h> 66 #include <sys/spinlock2.h> 67 #include <sys/ktr.h> 68 69 #ifdef _KERNEL_VIRTUAL 70 #include <pthread.h> 71 #endif 72 73 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin"); 74 75 struct indefinite_info { 76 sysclock_t base; 77 int secs; 78 const char *ident; 79 }; 80 81 /* 82 * Kernal Trace 83 */ 84 #if !defined(KTR_SPIN_CONTENTION) 85 #define KTR_SPIN_CONTENTION KTR_ALL 86 #endif 87 #define SPIN_STRING "spin=%p type=%c" 88 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 89 90 KTR_INFO_MASTER(spin); 91 #if 0 92 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 93 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 94 #endif 95 96 #define logspin(name, spin, type) \ 97 KTR_LOG(spin_ ## name, spin, type) 98 99 #ifdef INVARIANTS 100 static int spin_lock_test_mode; 101 #endif 102 103 #ifdef DEBUG_LOCKS_LATENCY 104 105 static long spinlocks_add_latency; 106 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 107 &spinlocks_add_latency, 0, 108 "Add spinlock latency"); 109 110 #endif 111 112 static int spin_indefinite_check(struct spinlock *spin, 113 struct indefinite_info *info); 114 115 /* 116 * We contested due to another exclusive lock holder. We lose. 117 * 118 * We have to unwind the attempt and may acquire the spinlock 119 * anyway while doing so. 120 */ 121 int 122 spin_trylock_contested(struct spinlock *spin) 123 { 124 globaldata_t gd = mycpu; 125 126 /* 127 * Handle degenerate case, else fail. 128 */ 129 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1)) 130 return TRUE; 131 /*atomic_add_int(&spin->counta, -1);*/ 132 --gd->gd_spinlocks; 133 --gd->gd_curthread->td_critcount; 134 return (FALSE); 135 } 136 137 /* 138 * The spin_lock() inline was unable to acquire the lock and calls this 139 * function with spin->counta already incremented. 140 * 141 * atomic_swap_int() is the absolute fastest spinlock instruction, at 142 * least on multi-socket systems. All instructions seem to be about 143 * the same on single-socket multi-core systems. However, atomic_swap_int() 144 * does not result in an even distribution of successful acquisitions. 145 * 146 * UNFORTUNATELY we cannot really use atomic_swap_int() when also implementing 147 * shared spin locks, so as we do a better job removing contention we've 148 * moved to atomic_cmpset_int() to be able handle multiple states. 149 * 150 * Another problem we have is that (at least on the 48-core opteron we test 151 * with) having all 48 cores contesting the same spin lock reduces 152 * performance to around 600,000 ops/sec, verses millions when fewer cores 153 * are going after the same lock. 154 * 155 * Backoff algorithms can create even worse starvation problems, and don't 156 * really improve performance when a lot of cores are contending. 157 * 158 * Our solution is to allow the data cache to lazy-update by reading it 159 * non-atomically and only attempting to acquire the lock if the lazy read 160 * looks good. This effectively limits cache bus bandwidth. A cpu_pause() 161 * (for intel/amd anyhow) is not strictly needed as cache bus resource use 162 * is governed by the lazy update. 163 * 164 * WARNING!!!! Performance matters here, by a huge margin. 165 * 166 * 48-core test with pre-read / -j 48 no-modules kernel compile 167 * with fanned-out inactive and active queues came in at 55 seconds. 168 * 169 * 48-core test with pre-read / -j 48 no-modules kernel compile 170 * came in at 75 seconds. Without pre-read it came in at 170 seconds. 171 * 172 * 4-core test with pre-read / -j 48 no-modules kernel compile 173 * came in at 83 seconds. Without pre-read it came in at 83 seconds 174 * as well (no difference). 175 */ 176 void 177 _spin_lock_contested(struct spinlock *spin, const char *ident, int value) 178 { 179 struct indefinite_info info = { 0, 0, ident }; 180 int i; 181 182 /* 183 * Handle degenerate case. 184 */ 185 if (value == SPINLOCK_SHARED) { 186 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1)) 187 return; 188 } 189 190 /* 191 * Transfer our count to the high bits, then loop until we can 192 * acquire the low counter (== 1). No new shared lock can be 193 * acquired while we hold the EXCLWAIT bits. 194 * 195 * Force any existing shared locks to exclusive. The shared unlock 196 * understands that this may occur. 197 */ 198 atomic_add_int(&spin->counta, SPINLOCK_EXCLWAIT - 1); 199 if (value & SPINLOCK_SHARED) 200 atomic_clear_int(&spin->counta, SPINLOCK_SHARED); 201 202 #ifdef DEBUG_LOCKS_LATENCY 203 long j; 204 for (j = spinlocks_add_latency; j > 0; --j) 205 cpu_ccfence(); 206 #endif 207 i = 0; 208 209 /*logspin(beg, spin, 'w');*/ 210 for (;;) { 211 /* 212 * If the low bits are zero, try to acquire the exclusive lock 213 * by transfering our high bit counter to the low bits. 214 * 215 * NOTE: Reading spin->counta prior to the swap is extremely 216 * important on multi-chip/many-core boxes. On 48-core 217 * this one change improves fully concurrent all-cores 218 * compiles by 100% or better. 219 * 220 * I can't emphasize enough how important the pre-read 221 * is in preventing hw cache bus armageddon on 222 * multi-chip systems. And on single-chip/multi-core 223 * systems it just doesn't hurt. 224 */ 225 uint32_t ovalue = spin->counta; 226 cpu_ccfence(); 227 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 228 atomic_cmpset_int(&spin->counta, ovalue, 229 (ovalue - SPINLOCK_EXCLWAIT) | 1)) { 230 break; 231 } 232 if ((++i & 0x7F) == 0x7F) { 233 mycpu->gd_cnt.v_lock_name[0] = 'X'; 234 strncpy(mycpu->gd_cnt.v_lock_name + 1, 235 ident, 236 sizeof(mycpu->gd_cnt.v_lock_name) - 2); 237 ++mycpu->gd_cnt.v_lock_colls; 238 if (spin_indefinite_check(spin, &info)) 239 break; 240 } 241 #ifdef _KERNEL_VIRTUAL 242 pthread_yield(); 243 #endif 244 } 245 /*logspin(end, spin, 'w');*/ 246 } 247 248 /* 249 * Shared spinlock attempt was contested. 250 * 251 * The caller has not modified counta. 252 */ 253 void 254 _spin_lock_shared_contested(struct spinlock *spin, const char *ident, int value) 255 { 256 struct indefinite_info info = { 0, 0, ident }; 257 int i; 258 259 #ifdef DEBUG_LOCKS_LATENCY 260 long j; 261 for (j = spinlocks_add_latency; j > 0; --j) 262 cpu_ccfence(); 263 #endif 264 i = 0; 265 266 /*logspin(beg, spin, 'w');*/ 267 for (;;) { 268 /* 269 * Loop until we can acquire the shared spinlock. Note that 270 * the low bits can be zero while the high EXCLWAIT bits are 271 * non-zero. In this situation exclusive requesters have 272 * priority (otherwise shared users on multiple cpus can hog 273 * the spinlnock). 274 * 275 * NOTE: Reading spin->counta prior to the swap is extremely 276 * important on multi-chip/many-core boxes. On 48-core 277 * this one change improves fully concurrent all-cores 278 * compiles by 100% or better. 279 * 280 * I can't emphasize enough how important the pre-read 281 * is in preventing hw cache bus armageddon on 282 * multi-chip systems. And on single-chip/multi-core 283 * systems it just doesn't hurt. 284 */ 285 uint32_t ovalue = spin->counta; 286 287 cpu_ccfence(); 288 if (ovalue == 0) { 289 if (atomic_cmpset_int(&spin->counta, 0, 290 SPINLOCK_SHARED | 1)) 291 break; 292 } else if (ovalue & SPINLOCK_SHARED) { 293 if (atomic_cmpset_int(&spin->counta, ovalue, 294 ovalue + 1)) 295 break; 296 } 297 if ((++i & 0x7F) == 0x7F) { 298 mycpu->gd_cnt.v_lock_name[0] = 'S'; 299 strncpy(mycpu->gd_cnt.v_lock_name + 1, 300 ident, 301 sizeof(mycpu->gd_cnt.v_lock_name) - 2); 302 ++mycpu->gd_cnt.v_lock_colls; 303 if (spin_indefinite_check(spin, &info)) 304 break; 305 } 306 #ifdef _KERNEL_VIRTUAL 307 pthread_yield(); 308 #endif 309 } 310 /*logspin(end, spin, 'w');*/ 311 } 312 313 static 314 int 315 spin_indefinite_check(struct spinlock *spin, struct indefinite_info *info) 316 { 317 sysclock_t count; 318 319 cpu_spinlock_contested(); 320 321 count = sys_cputimer->count(); 322 if (info->secs == 0) { 323 info->base = count; 324 ++info->secs; 325 } else if (count - info->base > sys_cputimer->freq) { 326 kprintf("spin_lock: %s(%p), indefinite wait (%d secs)!\n", 327 info->ident, spin, info->secs); 328 info->base = count; 329 ++info->secs; 330 if (panicstr) 331 return (TRUE); 332 #if defined(INVARIANTS) 333 if (spin_lock_test_mode) { 334 print_backtrace(-1); 335 return (TRUE); 336 } 337 #endif 338 #if defined(INVARIANTS) 339 if (info->secs == 11) 340 print_backtrace(-1); 341 #endif 342 if (info->secs == 60) 343 panic("spin_lock: %s(%p), indefinite wait!", 344 info->ident, spin); 345 } 346 return (FALSE); 347 } 348 349 /* 350 * If INVARIANTS is enabled various spinlock timing tests can be run 351 * by setting debug.spin_lock_test: 352 * 353 * 1 Test the indefinite wait code 354 * 2 Time the best-case exclusive lock overhead (spin_test_count) 355 * 3 Time the best-case shared lock overhead (spin_test_count) 356 */ 357 358 #ifdef INVARIANTS 359 360 static int spin_test_count = 10000000; 361 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 362 "Number of iterations to use for spinlock wait code test"); 363 364 static int 365 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 366 { 367 struct spinlock spin; 368 int error; 369 int value = 0; 370 int i; 371 372 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 373 return (error); 374 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 375 return (error); 376 377 /* 378 * Indefinite wait test 379 */ 380 if (value == 1) { 381 spin_init(&spin, "sysctllock"); 382 spin_lock(&spin); /* force an indefinite wait */ 383 spin_lock_test_mode = 1; 384 spin_lock(&spin); 385 spin_unlock(&spin); /* Clean up the spinlock count */ 386 spin_unlock(&spin); 387 spin_lock_test_mode = 0; 388 } 389 390 /* 391 * Time best-case exclusive spinlocks 392 */ 393 if (value == 2) { 394 globaldata_t gd = mycpu; 395 396 spin_init(&spin, "sysctllocktest"); 397 for (i = spin_test_count; i > 0; --i) { 398 _spin_lock_quick(gd, &spin, "test"); 399 spin_unlock_quick(gd, &spin); 400 } 401 } 402 403 return (0); 404 } 405 406 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 407 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 408 409 #endif /* INVARIANTS */ 410