1 /* $NetBSD: pthread_lock.c,v 1.14 2005/03/17 17:23:21 jwise Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread_lock.c,v 1.14 2005/03/17 17:23:21 jwise Exp $"); 41 42 #include <sys/types.h> 43 #include <sys/lock.h> 44 #include <sys/ras.h> 45 46 #include <errno.h> 47 #include <unistd.h> 48 49 #include "pthread.h" 50 #include "pthread_int.h" 51 52 #ifdef PTHREAD_SPIN_DEBUG_PRINT 53 #define SDPRINTF(x) DPRINTF(x) 54 #else 55 #define SDPRINTF(x) 56 #endif 57 58 /* How many times to try before checking whether we've been continued. */ 59 #define NSPINS 1000 /* no point in actually spinning until MP works */ 60 61 static int nspins = NSPINS; 62 63 RAS_DECL(pthread__lock); 64 65 static void 66 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 67 { 68 69 *alp = __SIMPLELOCK_UNLOCKED; 70 } 71 72 static int 73 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 74 { 75 __cpu_simple_lock_t old; 76 77 RAS_START(pthread__lock); 78 old = *alp; 79 *alp = __SIMPLELOCK_LOCKED; 80 RAS_END(pthread__lock); 81 82 return (old == __SIMPLELOCK_UNLOCKED); 83 } 84 85 static void 86 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 87 { 88 89 *alp = __SIMPLELOCK_UNLOCKED; 90 } 91 92 static const struct pthread_lock_ops pthread__lock_ops_ras = { 93 pthread__ras_simple_lock_init, 94 pthread__ras_simple_lock_try, 95 pthread__ras_simple_unlock, 96 }; 97 98 static void 99 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 100 { 101 102 __cpu_simple_lock_init(alp); 103 } 104 105 static int 106 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 107 { 108 109 return (__cpu_simple_lock_try(alp)); 110 } 111 112 static void 113 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 114 { 115 116 __cpu_simple_unlock(alp); 117 } 118 119 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 120 pthread__atomic_simple_lock_init, 121 pthread__atomic_simple_lock_try, 122 pthread__atomic_simple_unlock, 123 }; 124 125 /* 126 * We default to pointing to the RAS primitives; we might need to use 127 * locks early, but before main() starts. This is safe, since no other 128 * threads will be active for the process, so atomicity will not be 129 * required. 130 */ 131 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 132 133 /* 134 * Initialize the locking primitives. On uniprocessors, we always 135 * use Restartable Atomic Sequences if they are available. Otherwise, 136 * we fall back onto machine-dependent atomic lock primitives. 137 */ 138 void 139 pthread__lockprim_init(int ncpu) 140 { 141 142 if (ncpu == 1 && rasctl(RAS_ADDR(pthread__lock), 143 RAS_SIZE(pthread__lock), RAS_INSTALL) == 0) { 144 pthread__lock_ops = &pthread__lock_ops_ras; 145 return; 146 } 147 148 pthread__lock_ops = &pthread__lock_ops_atomic; 149 } 150 151 void 152 pthread_lockinit(pthread_spin_t *lock) 153 { 154 155 pthread__simple_lock_init(lock); 156 } 157 158 void 159 pthread_spinlock(pthread_t thread, pthread_spin_t *lock) 160 { 161 int count, ret; 162 163 count = nspins; 164 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock %p (count %d)\n", 165 thread, lock, thread->pt_spinlocks)); 166 #ifdef PTHREAD_SPIN_DEBUG 167 pthread__assert(thread->pt_spinlocks >= 0); 168 #endif 169 ++thread->pt_spinlocks; 170 171 do { 172 while (((ret = pthread__simple_lock_try(lock)) == 0) && --count) 173 ; 174 175 if (ret == 1) 176 break; 177 178 SDPRINTF(("(pthread_spinlock %p) decrementing spinlock %p (count %d)\n", 179 thread, lock, thread->pt_spinlocks)); 180 --thread->pt_spinlocks; 181 182 /* 183 * We may be preempted while spinning. If so, we will 184 * be restarted here if thread->pt_spinlocks is 185 * nonzero, which can happen if: 186 * a) we just got the lock 187 * b) we haven't yet decremented the lock count. 188 * If we're at this point, (b) applies. Therefore, 189 * check if we're being continued, and if so, bail. 190 * (in case (a), we should let the code finish and 191 * we will bail out in pthread_spinunlock()). 192 */ 193 if (thread->pt_next != NULL) { 194 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 195 pthread__assert(thread->pt_blockgen == thread->pt_unblockgen); 196 pthread__switch(thread, thread->pt_next); 197 } 198 /* try again */ 199 count = nspins; 200 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 201 thread, thread->pt_spinlocks)); 202 ++thread->pt_spinlocks; 203 } while (/*CONSTCOND*/1); 204 205 PTHREADD_ADD(PTHREADD_SPINLOCKS); 206 /* Got it! We're out of here. */ 207 } 208 209 210 int 211 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock) 212 { 213 int ret; 214 215 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 216 thread, thread->pt_spinlocks)); 217 ++thread->pt_spinlocks; 218 219 ret = pthread__simple_lock_try(lock); 220 221 if (ret == 0) { 222 SDPRINTF(("(pthread_spintrylock %p) decrementing spinlock from %d\n", 223 thread, thread->pt_spinlocks)); 224 --thread->pt_spinlocks; 225 /* See above. */ 226 if (thread->pt_next != NULL) { 227 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 228 pthread__assert(thread->pt_blockgen == thread->pt_unblockgen); 229 pthread__switch(thread, thread->pt_next); 230 } 231 } 232 233 return ret; 234 } 235 236 237 void 238 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock) 239 { 240 241 pthread__simple_unlock(lock); 242 SDPRINTF(("(pthread_spinunlock %p) decrementing spinlock %p (count %d)\n", 243 thread, lock, thread->pt_spinlocks)); 244 --thread->pt_spinlocks; 245 #ifdef PTHREAD_SPIN_DEBUG 246 pthread__assert(thread->pt_spinlocks >= 0); 247 #endif 248 PTHREADD_ADD(PTHREADD_SPINUNLOCKS); 249 250 /* 251 * If we were preempted while holding a spinlock, the 252 * scheduler will notice this and continue us. To be good 253 * citzens, we must now get out of here if that was our 254 * last spinlock. 255 * XXX when will we ever have more than one? 256 */ 257 258 if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) { 259 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 260 /* pthread__assert(thread->pt_blockgen == thread->pt_unblockgen); */ 261 pthread__switch(thread, thread->pt_next); 262 } 263 } 264 265 266 /* 267 * Public (POSIX-specified) spinlocks. 268 * These don't interact with the spin-preemption code, nor do they 269 * perform any adaptive sleeping. 270 */ 271 272 int 273 pthread_spin_init(pthread_spinlock_t *lock, int pshared) 274 { 275 276 #ifdef ERRORCHECK 277 if ((lock == NULL) || 278 ((pshared != PTHREAD_PROCESS_PRIVATE) && 279 (pshared != PTHREAD_PROCESS_SHARED))) 280 return EINVAL; 281 #endif 282 lock->pts_magic = _PT_SPINLOCK_MAGIC; 283 /* 284 * We don't actually use the pshared flag for anything; 285 * CPU simple locks have all the process-shared properties 286 * that we want anyway. 287 */ 288 lock->pts_flags = pshared; 289 pthread_lockinit(&lock->pts_spin); 290 291 return 0; 292 } 293 294 int 295 pthread_spin_destroy(pthread_spinlock_t *lock) 296 { 297 298 #ifdef ERRORCHECK 299 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 300 return EINVAL; 301 302 if (lock->pts_spin != __SIMPLELOCK_UNLOCKED) 303 return EBUSY; 304 #endif 305 306 lock->pts_magic = _PT_SPINLOCK_DEAD; 307 308 return 0; 309 } 310 311 int 312 pthread_spin_lock(pthread_spinlock_t *lock) 313 { 314 315 #ifdef ERRORCHECK 316 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 317 return EINVAL; 318 #endif 319 320 while (pthread__simple_lock_try(&lock->pts_spin) == 0) 321 /* spin */ ; 322 323 return 0; 324 } 325 326 int 327 pthread_spin_trylock(pthread_spinlock_t *lock) 328 { 329 330 #ifdef ERRORCHECK 331 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 332 return EINVAL; 333 #endif 334 335 if (pthread__simple_lock_try(&lock->pts_spin) == 0) 336 return EBUSY; 337 338 return 0; 339 } 340 341 int 342 pthread_spin_unlock(pthread_spinlock_t *lock) 343 { 344 345 #ifdef ERRORCHECK 346 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 347 return EINVAL; 348 #endif 349 350 pthread__simple_unlock(&lock->pts_spin); 351 352 return 0; 353 } 354