1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $DragonFly: src/sys/kern/kern_spinlock.c,v 1.16 2008/09/11 01:11:42 y0netan1 Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/types.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #ifdef INVARIANTS 41 #include <sys/proc.h> 42 #endif 43 #include <sys/priv.h> 44 #include <machine/atomic.h> 45 #include <machine/cpufunc.h> 46 #include <machine/specialreg.h> 47 #include <machine/clock.h> 48 #include <sys/spinlock.h> 49 #include <sys/spinlock2.h> 50 #include <sys/ktr.h> 51 52 #define BACKOFF_INITIAL 1 53 #define BACKOFF_LIMIT 256 54 55 #ifdef SMP 56 57 /* 58 * Kernal Trace 59 */ 60 #if !defined(KTR_SPIN_CONTENTION) 61 #define KTR_SPIN_CONTENTION KTR_ALL 62 #endif 63 #define SPIN_STRING "spin=%p type=%c" 64 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 65 66 KTR_INFO_MASTER(spin); 67 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 68 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 69 KTR_INFO(KTR_SPIN_CONTENTION, spin, backoff, 2, 70 "spin=%p bo1=%d thr=%p bo=%d", 71 ((2 * sizeof(void *)) + (2 * sizeof(int)))); 72 KTR_INFO(KTR_SPIN_CONTENTION, spin, bofail, 3, SPIN_STRING, SPIN_ARG_SIZE); 73 74 #define logspin(name, mtx, type) \ 75 KTR_LOG(spin_ ## name, mtx, type) 76 77 #define logspin_backoff(mtx, bo1, thr, bo) \ 78 KTR_LOG(spin_backoff, mtx, bo1, thr, bo) 79 80 #ifdef INVARIANTS 81 static int spin_lock_test_mode; 82 #endif 83 84 static int64_t spinlocks_contested1; 85 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested1, CTLFLAG_RD, 86 &spinlocks_contested1, 0, ""); 87 88 static int64_t spinlocks_contested2; 89 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested2, CTLFLAG_RD, 90 &spinlocks_contested2, 0, ""); 91 92 static int spinlocks_backoff_limit = BACKOFF_LIMIT; 93 SYSCTL_INT(_debug, OID_AUTO, spinlocks_bolim, CTLFLAG_RW, 94 &spinlocks_backoff_limit, 0, ""); 95 96 struct exponential_backoff { 97 int backoff; 98 int nsec; 99 struct spinlock *mtx; 100 sysclock_t base; 101 }; 102 static int exponential_backoff(struct exponential_backoff *bo); 103 104 static __inline 105 void 106 exponential_init(struct exponential_backoff *bo, struct spinlock *mtx) 107 { 108 bo->backoff = BACKOFF_INITIAL; 109 bo->nsec = 0; 110 bo->mtx = mtx; 111 bo->base = 0; /* silence gcc */ 112 } 113 114 /* 115 * We contested due to another exclusive lock holder. We lose. 116 */ 117 int 118 spin_trylock_wr_contested2(globaldata_t gd) 119 { 120 ++spinlocks_contested1; 121 --gd->gd_spinlocks_wr; 122 --gd->gd_curthread->td_critcount; 123 return (FALSE); 124 } 125 126 /* 127 * We were either contested due to another exclusive lock holder, 128 * or due to the presence of shared locks 129 * 130 * NOTE: If value indicates an exclusively held mutex, no shared bits 131 * would have been set and we can throw away value. 132 */ 133 void 134 spin_lock_wr_contested2(struct spinlock *mtx) 135 { 136 struct exponential_backoff backoff; 137 int value; 138 139 /* 140 * Wait until we can gain exclusive access vs another exclusive 141 * holder. 142 */ 143 ++spinlocks_contested1; 144 exponential_init(&backoff, mtx); 145 146 logspin(beg, mtx, 'w'); 147 do { 148 if (exponential_backoff(&backoff)) 149 break; 150 value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE); 151 } while (value & SPINLOCK_EXCLUSIVE); 152 logspin(end, mtx, 'w'); 153 } 154 155 /* 156 * Handle exponential backoff and indefinite waits. 157 * 158 * If the system is handling a panic we hand the spinlock over to the caller 159 * after 1 second. After 10 seconds we attempt to print a debugger 160 * backtrace. We also run pending interrupts in order to allow a console 161 * break into DDB. 162 */ 163 static 164 int 165 exponential_backoff(struct exponential_backoff *bo) 166 { 167 sysclock_t count; 168 int backoff; 169 170 #ifdef _RDTSC_SUPPORTED_ 171 if (cpu_feature & CPUID_TSC) { 172 backoff = 173 (((u_long)rdtsc() ^ (((u_long)curthread) >> 5)) & 174 (bo->backoff - 1)) + BACKOFF_INITIAL; 175 } else 176 #endif 177 backoff = bo->backoff; 178 logspin_backoff(bo->mtx, bo->backoff, curthread, backoff); 179 180 /* 181 * Quick backoff 182 */ 183 for (; backoff; --backoff) 184 cpu_pause(); 185 if (bo->backoff < spinlocks_backoff_limit) { 186 bo->backoff <<= 1; 187 return (FALSE); 188 } else { 189 bo->backoff = BACKOFF_INITIAL; 190 } 191 192 logspin(bofail, bo->mtx, 'u'); 193 194 /* 195 * Indefinite 196 */ 197 ++spinlocks_contested2; 198 cpu_spinlock_contested(); 199 if (bo->nsec == 0) { 200 bo->base = sys_cputimer->count(); 201 bo->nsec = 1; 202 } 203 204 count = sys_cputimer->count(); 205 if (count - bo->base > sys_cputimer->freq) { 206 kprintf("spin_lock: %p, indefinite wait!\n", bo->mtx); 207 if (panicstr) 208 return (TRUE); 209 #if defined(INVARIANTS) 210 if (spin_lock_test_mode) { 211 print_backtrace(-1); 212 return (TRUE); 213 } 214 #endif 215 ++bo->nsec; 216 #if defined(INVARIANTS) 217 if (bo->nsec == 11) 218 print_backtrace(-1); 219 #endif 220 if (bo->nsec == 60) 221 panic("spin_lock: %p, indefinite wait!\n", bo->mtx); 222 bo->base = count; 223 } 224 return (FALSE); 225 } 226 227 /* 228 * If INVARIANTS is enabled various spinlock timing tests can be run 229 * by setting debug.spin_lock_test: 230 * 231 * 1 Test the indefinite wait code 232 * 2 Time the best-case exclusive lock overhead (spin_test_count) 233 * 3 Time the best-case shared lock overhead (spin_test_count) 234 */ 235 236 #ifdef INVARIANTS 237 238 static int spin_test_count = 10000000; 239 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, ""); 240 241 static int 242 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 243 { 244 struct spinlock mtx; 245 int error; 246 int value = 0; 247 int i; 248 249 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 250 return (error); 251 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 252 return (error); 253 254 /* 255 * Indefinite wait test 256 */ 257 if (value == 1) { 258 spin_init(&mtx); 259 spin_lock(&mtx); /* force an indefinite wait */ 260 spin_lock_test_mode = 1; 261 spin_lock(&mtx); 262 spin_unlock(&mtx); /* Clean up the spinlock count */ 263 spin_unlock(&mtx); 264 spin_lock_test_mode = 0; 265 } 266 267 /* 268 * Time best-case exclusive spinlocks 269 */ 270 if (value == 2) { 271 globaldata_t gd = mycpu; 272 273 spin_init(&mtx); 274 for (i = spin_test_count; i > 0; --i) { 275 spin_lock_quick(gd, &mtx); 276 spin_unlock_quick(gd, &mtx); 277 } 278 } 279 280 return (0); 281 } 282 283 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 284 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 285 286 #endif /* INVARIANTS */ 287 #endif /* SMP */ 288