1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $DragonFly: src/sys/kern/kern_spinlock.c,v 1.16 2008/09/11 01:11:42 y0netan1 Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/types.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #ifdef INVARIANTS 41 #include <sys/proc.h> 42 #endif 43 #include <sys/priv.h> 44 #include <machine/atomic.h> 45 #include <machine/cpufunc.h> 46 #include <machine/specialreg.h> 47 #include <machine/clock.h> 48 #include <sys/spinlock.h> 49 #include <sys/spinlock2.h> 50 #include <sys/ktr.h> 51 52 #define BACKOFF_INITIAL 1 53 #define BACKOFF_LIMIT 256 54 55 #ifdef SMP 56 57 /* 58 * Kernal Trace 59 */ 60 #if !defined(KTR_SPIN_CONTENTION) 61 #define KTR_SPIN_CONTENTION KTR_ALL 62 #endif 63 #define SPIN_STRING "spin=%p type=%c" 64 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 65 66 KTR_INFO_MASTER(spin); 67 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 68 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 69 KTR_INFO(KTR_SPIN_CONTENTION, spin, backoff, 2, 70 "spin=%p bo1=%d thr=%p bo=%d", 71 ((2 * sizeof(void *)) + (2 * sizeof(int)))); 72 KTR_INFO(KTR_SPIN_CONTENTION, spin, bofail, 3, SPIN_STRING, SPIN_ARG_SIZE); 73 74 #define logspin(name, mtx, type) \ 75 KTR_LOG(spin_ ## name, mtx, type) 76 77 #define logspin_backoff(mtx, bo1, thr, bo) \ 78 KTR_LOG(spin_backoff, mtx, bo1, thr, bo) 79 80 #ifdef INVARIANTS 81 static int spin_lock_test_mode; 82 #endif 83 84 static int64_t spinlocks_contested1; 85 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested1, CTLFLAG_RD, 86 &spinlocks_contested1, 0, ""); 87 88 static int64_t spinlocks_contested2; 89 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested2, CTLFLAG_RD, 90 &spinlocks_contested2, 0, ""); 91 92 static int spinlocks_backoff_limit = BACKOFF_LIMIT; 93 SYSCTL_INT(_debug, OID_AUTO, spinlocks_bolim, CTLFLAG_RW, 94 &spinlocks_backoff_limit, 0, ""); 95 96 struct exponential_backoff { 97 int backoff; 98 int nsec; 99 struct spinlock *mtx; 100 sysclock_t base; 101 }; 102 static int exponential_backoff(struct exponential_backoff *bo); 103 104 static __inline 105 void 106 exponential_init(struct exponential_backoff *bo, struct spinlock *mtx) 107 { 108 bo->backoff = BACKOFF_INITIAL; 109 bo->nsec = 0; 110 bo->mtx = mtx; 111 } 112 113 /* 114 * We were either contested due to another exclusive lock holder, 115 * or due to the presence of shared locks. We have to undo the mess 116 * we created by returning the shared locks. 117 * 118 * If there was another exclusive lock holder only the exclusive bit 119 * in value will be the only bit set. We don't have to do anything since 120 * restoration does not involve any work. 121 * 122 * Otherwise we successfully obtained the exclusive bit. Attempt to 123 * clear the shared bits. If we are able to clear the shared bits 124 * we win. Otherwise we lose and we have to restore the shared bits 125 * we couldn't clear (and also clear our exclusive bit). 126 */ 127 int 128 spin_trylock_wr_contested(globaldata_t gd, struct spinlock *mtx, int value) 129 { 130 int bit; 131 132 ++spinlocks_contested1; 133 if ((value & SPINLOCK_EXCLUSIVE) == 0) { 134 while (value) { 135 bit = bsfl(value); 136 if (globaldata_find(bit)->gd_spinlock_rd == mtx) { 137 atomic_swap_int(&mtx->lock, value); 138 --gd->gd_spinlocks_wr; 139 --gd->gd_curthread->td_critcount; 140 return (FALSE); 141 } 142 value &= ~(1 << bit); 143 } 144 return (TRUE); 145 } 146 --gd->gd_spinlocks_wr; 147 --gd->gd_curthread->td_critcount; 148 return (FALSE); 149 } 150 151 /* 152 * We were either contested due to another exclusive lock holder, 153 * or due to the presence of shared locks 154 * 155 * NOTE: If value indicates an exclusively held mutex, no shared bits 156 * would have been set and we can throw away value. 157 */ 158 void 159 spin_lock_wr_contested(struct spinlock *mtx, int value) 160 { 161 struct exponential_backoff backoff; 162 globaldata_t gd = mycpu; 163 int bit; 164 int mask; 165 166 /* 167 * Wait until we can gain exclusive access vs another exclusive 168 * holder. 169 */ 170 exponential_init(&backoff, mtx); 171 ++spinlocks_contested1; 172 logspin(beg, mtx, 'w'); 173 174 while (value & SPINLOCK_EXCLUSIVE) { 175 value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE); 176 if (exponential_backoff(&backoff)) { 177 value &= ~SPINLOCK_EXCLUSIVE; 178 break; 179 } 180 } 181 182 /* 183 * Kill the cached shared bit for our own cpu. This is the most 184 * common case and there's no sense wasting cpu on it. Since 185 * spinlocks aren't recursive, we can't own a shared ref on the 186 * spinlock while trying to get an exclusive one. 187 * 188 * If multiple bits are set do not stall on any single cpu. Check 189 * all cpus that have the cache bit set, then loop and check again, 190 * until we've cleaned all the bits. 191 */ 192 value &= ~gd->gd_cpumask; 193 194 while ((mask = value) != 0) { 195 while (mask) { 196 bit = bsfl(value); 197 if (globaldata_find(bit)->gd_spinlock_rd != mtx) { 198 value &= ~(1 << bit); 199 } else if (exponential_backoff(&backoff)) { 200 value = 0; 201 break; 202 } 203 mask &= ~(1 << bit); 204 } 205 } 206 logspin(end, mtx, 'w'); 207 } 208 209 /* 210 * The cache bit wasn't set for our cpu. Loop until we can set the bit. 211 * As with the spin_lock_rd() inline we need a memory fence after setting 212 * gd_spinlock_rd to interlock against exclusive spinlocks waiting for 213 * that field to clear. 214 */ 215 void 216 spin_lock_rd_contested(struct spinlock *mtx) 217 { 218 struct exponential_backoff backoff; 219 globaldata_t gd = mycpu; 220 int value = mtx->lock; 221 222 /* 223 * Shortcut the op if we can just set the cache bit. This case 224 * occurs when the last lock was an exclusive lock. 225 */ 226 while ((value & SPINLOCK_EXCLUSIVE) == 0) { 227 if (atomic_cmpset_int(&mtx->lock, value, value|gd->gd_cpumask)) 228 return; 229 value = mtx->lock; 230 } 231 232 exponential_init(&backoff, mtx); 233 ++spinlocks_contested1; 234 235 logspin(beg, mtx, 'r'); 236 237 while ((value & gd->gd_cpumask) == 0) { 238 if (value & SPINLOCK_EXCLUSIVE) { 239 gd->gd_spinlock_rd = NULL; 240 if (exponential_backoff(&backoff)) { 241 gd->gd_spinlock_rd = mtx; 242 break; 243 } 244 gd->gd_spinlock_rd = mtx; 245 cpu_mfence(); 246 } else { 247 if (atomic_cmpset_int(&mtx->lock, value, value|gd->gd_cpumask)) 248 break; 249 } 250 value = mtx->lock; 251 } 252 logspin(end, mtx, 'r'); 253 } 254 255 /* 256 * Handle exponential backoff and indefinite waits. 257 * 258 * If the system is handling a panic we hand the spinlock over to the caller 259 * after 1 second. After 10 seconds we attempt to print a debugger 260 * backtrace. We also run pending interrupts in order to allow a console 261 * break into DDB. 262 */ 263 static 264 int 265 exponential_backoff(struct exponential_backoff *bo) 266 { 267 sysclock_t count; 268 int backoff; 269 270 #ifdef _RDTSC_SUPPORTED_ 271 if (cpu_feature & CPUID_TSC) { 272 backoff = 273 (((u_long)rdtsc() ^ (((u_long)curthread) >> 5)) & 274 (bo->backoff - 1)) + BACKOFF_INITIAL; 275 } else 276 #endif 277 backoff = bo->backoff; 278 logspin_backoff(bo->mtx, bo->backoff, curthread, backoff); 279 280 /* 281 * Quick backoff 282 */ 283 for (; backoff; --backoff) 284 cpu_pause(); 285 if (bo->backoff < spinlocks_backoff_limit) { 286 bo->backoff <<= 1; 287 return (FALSE); 288 } else { 289 bo->backoff = BACKOFF_INITIAL; 290 } 291 292 logspin(bofail, bo->mtx, 'u'); 293 294 /* 295 * Indefinite 296 */ 297 ++spinlocks_contested2; 298 cpu_spinlock_contested(); 299 if (bo->nsec == 0) { 300 bo->base = sys_cputimer->count(); 301 bo->nsec = 1; 302 } 303 304 count = sys_cputimer->count(); 305 if (count - bo->base > sys_cputimer->freq) { 306 kprintf("spin_lock: %p, indefinite wait!\n", bo->mtx); 307 if (panicstr) 308 return (TRUE); 309 #if defined(INVARIANTS) 310 if (spin_lock_test_mode) { 311 print_backtrace(-1); 312 return (TRUE); 313 } 314 #endif 315 ++bo->nsec; 316 #if defined(INVARIANTS) 317 if (bo->nsec == 11) 318 print_backtrace(-1); 319 #endif 320 if (bo->nsec == 60) 321 panic("spin_lock: %p, indefinite wait!\n", bo->mtx); 322 bo->base = count; 323 } 324 return (FALSE); 325 } 326 327 /* 328 * If INVARIANTS is enabled various spinlock timing tests can be run 329 * by setting debug.spin_lock_test: 330 * 331 * 1 Test the indefinite wait code 332 * 2 Time the best-case exclusive lock overhead (spin_test_count) 333 * 3 Time the best-case shared lock overhead (spin_test_count) 334 */ 335 336 #ifdef INVARIANTS 337 338 static int spin_test_count = 10000000; 339 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, ""); 340 341 static int 342 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 343 { 344 struct spinlock mtx; 345 int error; 346 int value = 0; 347 int i; 348 349 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 350 return (error); 351 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 352 return (error); 353 354 /* 355 * Indefinite wait test 356 */ 357 if (value == 1) { 358 spin_init(&mtx); 359 spin_lock_wr(&mtx); /* force an indefinite wait */ 360 spin_lock_test_mode = 1; 361 spin_lock_wr(&mtx); 362 spin_unlock_wr(&mtx); /* Clean up the spinlock count */ 363 spin_unlock_wr(&mtx); 364 spin_lock_test_mode = 0; 365 } 366 367 /* 368 * Time best-case exclusive spinlocks 369 */ 370 if (value == 2) { 371 globaldata_t gd = mycpu; 372 373 spin_init(&mtx); 374 for (i = spin_test_count; i > 0; --i) { 375 spin_lock_wr_quick(gd, &mtx); 376 spin_unlock_wr_quick(gd, &mtx); 377 } 378 } 379 380 /* 381 * Time best-case shared spinlocks 382 */ 383 if (value == 3) { 384 globaldata_t gd = mycpu; 385 386 spin_init(&mtx); 387 for (i = spin_test_count; i > 0; --i) { 388 spin_lock_rd_quick(gd, &mtx); 389 spin_unlock_rd_quick(gd, &mtx); 390 } 391 } 392 return (0); 393 } 394 395 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 396 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 397 398 #endif /* INVARIANTS */ 399 #endif /* SMP */ 400