1 /* $NetBSD: pthread_lock.c,v 1.9 2004/02/13 11:36:08 wiz Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread_lock.c,v 1.9 2004/02/13 11:36:08 wiz Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/ras.h> 44 #include <sys/sysctl.h> 45 46 #include <errno.h> 47 #include <unistd.h> 48 49 #include "pthread.h" 50 #include "pthread_int.h" 51 52 #ifdef PTHREAD_SPIN_DEBUG_PRINT 53 #define SDPRINTF(x) DPRINTF(x) 54 #else 55 #define SDPRINTF(x) 56 #endif 57 58 /* How many times to try before checking whether we've been continued. */ 59 #define NSPINS 1 /* no point in actually spinning until MP works */ 60 61 static int nspins = NSPINS; 62 63 extern void pthread__lock_ras_start(void), pthread__lock_ras_end(void); 64 65 static void 66 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 67 { 68 69 *alp = __SIMPLELOCK_UNLOCKED; 70 } 71 72 static int 73 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 74 { 75 __cpu_simple_lock_t old; 76 77 /* This is the atomic sequence. */ 78 __asm __volatile(".globl pthread__lock_ras_start \n" 79 "pthread__lock_ras_start:"); 80 old = *alp; 81 *alp = __SIMPLELOCK_LOCKED; 82 __asm __volatile(".globl pthread__lock_ras_end \n" 83 "pthread__lock_ras_end:"); 84 85 return (old == __SIMPLELOCK_UNLOCKED); 86 } 87 88 static void 89 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 90 { 91 92 *alp = __SIMPLELOCK_UNLOCKED; 93 } 94 95 static const struct pthread_lock_ops pthread__lock_ops_ras = { 96 pthread__ras_simple_lock_init, 97 pthread__ras_simple_lock_try, 98 pthread__ras_simple_unlock, 99 }; 100 101 static void 102 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 103 { 104 105 __cpu_simple_lock_init(alp); 106 } 107 108 static int 109 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 110 { 111 112 return (__cpu_simple_lock_try(alp)); 113 } 114 115 static void 116 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 117 { 118 119 __cpu_simple_unlock(alp); 120 } 121 122 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 123 pthread__atomic_simple_lock_init, 124 pthread__atomic_simple_lock_try, 125 pthread__atomic_simple_unlock, 126 }; 127 128 /* 129 * We default to pointing to the RAS primitives; we might need to use 130 * locks early, but before main() starts. This is safe, since no other 131 * threads will be active for the process, so atomicity will not be 132 * required. 133 */ 134 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 135 136 /* 137 * Initialize the locking primitives. On uniprocessors, we always 138 * use Restartable Atomic Sequences if they are available. Otherwise, 139 * we fall back onto machine-dependent atomic lock primitives. 140 */ 141 void 142 pthread__lockprim_init(void) 143 { 144 int mib[2]; 145 size_t len; 146 int ncpu; 147 148 mib[0] = CTL_HW; 149 mib[1] = HW_NCPU; 150 151 len = sizeof(ncpu); 152 sysctl(mib, 2, &ncpu, &len, NULL, 0); 153 154 if (ncpu == 1 && rasctl((void *)pthread__lock_ras_start, 155 (size_t)((uintptr_t)pthread__lock_ras_end - 156 (uintptr_t)pthread__lock_ras_start), 157 RAS_INSTALL) == 0) { 158 pthread__lock_ops = &pthread__lock_ops_ras; 159 return; 160 } 161 162 pthread__lock_ops = &pthread__lock_ops_atomic; 163 } 164 165 void 166 pthread_lockinit(pthread_spin_t *lock) 167 { 168 169 pthread__simple_lock_init(lock); 170 } 171 172 void 173 pthread_spinlock(pthread_t thread, pthread_spin_t *lock) 174 { 175 int count, ret; 176 177 count = nspins; 178 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock %p (count %d)\n", 179 thread, lock, thread->pt_spinlocks)); 180 #ifdef PTHREAD_SPIN_DEBUG 181 pthread__assert(thread->pt_spinlocks >= 0); 182 #endif 183 ++thread->pt_spinlocks; 184 185 do { 186 while (((ret = pthread__simple_lock_try(lock)) == 0) && --count) 187 ; 188 189 if (ret == 1) 190 break; 191 192 SDPRINTF(("(pthread_spinlock %p) decrementing spinlock %p (count %d)\n", 193 thread, lock, thread->pt_spinlocks)); 194 --thread->pt_spinlocks; 195 196 /* 197 * We may be preempted while spinning. If so, we will 198 * be restarted here if thread->pt_spinlocks is 199 * nonzero, which can happen if: 200 * a) we just got the lock 201 * b) we haven't yet decremented the lock count. 202 * If we're at this point, (b) applies. Therefore, 203 * check if we're being continued, and if so, bail. 204 * (in case (a), we should let the code finish and 205 * we will bail out in pthread_spinunlock()). 206 */ 207 if (thread->pt_next != NULL) { 208 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 209 pthread__switch(thread, thread->pt_next); 210 } 211 /* try again */ 212 count = nspins; 213 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 214 thread, thread->pt_spinlocks)); 215 ++thread->pt_spinlocks; 216 } while (/*CONSTCOND*/1); 217 218 PTHREADD_ADD(PTHREADD_SPINLOCKS); 219 /* Got it! We're out of here. */ 220 } 221 222 223 int 224 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock) 225 { 226 int ret; 227 228 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 229 thread, thread->pt_spinlocks)); 230 ++thread->pt_spinlocks; 231 232 ret = pthread__simple_lock_try(lock); 233 234 if (ret == 0) { 235 SDPRINTF(("(pthread_spintrylock %p) decrementing spinlock from %d\n", 236 thread, thread->pt_spinlocks)); 237 --thread->pt_spinlocks; 238 /* See above. */ 239 if (thread->pt_next != NULL) { 240 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 241 pthread__switch(thread, thread->pt_next); 242 } 243 } 244 245 return ret; 246 } 247 248 249 void 250 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock) 251 { 252 253 pthread__simple_unlock(lock); 254 SDPRINTF(("(pthread_spinunlock %p) decrementing spinlock %p (count %d)\n", 255 thread, lock, thread->pt_spinlocks)); 256 --thread->pt_spinlocks; 257 #ifdef PTHREAD_SPIN_DEBUG 258 pthread__assert(thread->pt_spinlocks >= 0); 259 #endif 260 PTHREADD_ADD(PTHREADD_SPINUNLOCKS); 261 262 /* 263 * If we were preempted while holding a spinlock, the 264 * scheduler will notice this and continue us. To be good 265 * citzens, we must now get out of here if that was our 266 * last spinlock. 267 * XXX when will we ever have more than one? 268 */ 269 270 if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) { 271 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 272 pthread__switch(thread, thread->pt_next); 273 } 274 } 275 276 277 /* 278 * Public (POSIX-specified) spinlocks. 279 * These don't interact with the spin-preemption code, nor do they 280 * perform any adaptive sleeping. 281 */ 282 283 int 284 pthread_spin_init(pthread_spinlock_t *lock, int pshared) 285 { 286 287 #ifdef ERRORCHECK 288 if ((lock == NULL) || 289 ((pshared != PTHREAD_PROCESS_PRIVATE) && 290 (pshared != PTHREAD_PROCESS_SHARED))) 291 return EINVAL; 292 #endif 293 lock->pts_magic = _PT_SPINLOCK_MAGIC; 294 /* 295 * We don't actually use the pshared flag for anything; 296 * CPU simple locks have all the process-shared properties 297 * that we want anyway. 298 */ 299 lock->pts_flags = pshared; 300 pthread_lockinit(&lock->pts_spin); 301 302 return 0; 303 } 304 305 int 306 pthread_spin_destroy(pthread_spinlock_t *lock) 307 { 308 309 #ifdef ERRORCHECK 310 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 311 return EINVAL; 312 313 if (lock->pts_spin != __SIMPLELOCK_UNLOCKED) 314 return EBUSY; 315 #endif 316 317 lock->pts_magic = _PT_SPINLOCK_DEAD; 318 319 return 0; 320 } 321 322 int 323 pthread_spin_lock(pthread_spinlock_t *lock) 324 { 325 326 #ifdef ERRORCHECK 327 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 328 return EINVAL; 329 #endif 330 331 while (pthread__simple_lock_try(&lock->pts_spin) == 0) 332 /* spin */ ; 333 334 return 0; 335 } 336 337 int 338 pthread_spin_trylock(pthread_spinlock_t *lock) 339 { 340 341 #ifdef ERRORCHECK 342 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 343 return EINVAL; 344 #endif 345 346 if (pthread__simple_lock_try(&lock->pts_spin) == 0) 347 return EBUSY; 348 349 return 0; 350 } 351 352 int 353 pthread_spin_unlock(pthread_spinlock_t *lock) 354 { 355 356 #ifdef ERRORCHECK 357 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 358 return EINVAL; 359 #endif 360 361 pthread__simple_unlock(&lock->pts_spin); 362 363 return 0; 364 } 365