1 /* $NetBSD: pthread_lock.c,v 1.12 2004/03/14 12:49:31 he Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread_lock.c,v 1.12 2004/03/14 12:49:31 he Exp $"); 41 42 #include <sys/types.h> 43 #include <sys/lock.h> 44 #include <sys/ras.h> 45 46 #include <errno.h> 47 #include <unistd.h> 48 49 #include "pthread.h" 50 #include "pthread_int.h" 51 52 #ifdef PTHREAD_SPIN_DEBUG_PRINT 53 #define SDPRINTF(x) DPRINTF(x) 54 #else 55 #define SDPRINTF(x) 56 #endif 57 58 /* How many times to try before checking whether we've been continued. */ 59 #define NSPINS 1000 /* no point in actually spinning until MP works */ 60 61 static int nspins = NSPINS; 62 63 RAS_DECL(pthread__lock); 64 65 static void 66 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 67 { 68 69 *alp = __SIMPLELOCK_UNLOCKED; 70 } 71 72 static int 73 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 74 { 75 __cpu_simple_lock_t old; 76 77 RAS_START(pthread__lock); 78 old = *alp; 79 *alp = __SIMPLELOCK_LOCKED; 80 RAS_END(pthread__lock); 81 82 return (old == __SIMPLELOCK_UNLOCKED); 83 } 84 85 static void 86 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 87 { 88 89 *alp = __SIMPLELOCK_UNLOCKED; 90 } 91 92 static const struct pthread_lock_ops pthread__lock_ops_ras = { 93 pthread__ras_simple_lock_init, 94 pthread__ras_simple_lock_try, 95 pthread__ras_simple_unlock, 96 }; 97 98 static void 99 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 100 { 101 102 __cpu_simple_lock_init(alp); 103 } 104 105 static int 106 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 107 { 108 109 return (__cpu_simple_lock_try(alp)); 110 } 111 112 static void 113 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 114 { 115 116 __cpu_simple_unlock(alp); 117 } 118 119 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 120 pthread__atomic_simple_lock_init, 121 pthread__atomic_simple_lock_try, 122 pthread__atomic_simple_unlock, 123 }; 124 125 /* 126 * We default to pointing to the RAS primitives; we might need to use 127 * locks early, but before main() starts. This is safe, since no other 128 * threads will be active for the process, so atomicity will not be 129 * required. 130 */ 131 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 132 133 /* 134 * Initialize the locking primitives. On uniprocessors, we always 135 * use Restartable Atomic Sequences if they are available. Otherwise, 136 * we fall back onto machine-dependent atomic lock primitives. 137 */ 138 void 139 pthread__lockprim_init(int ncpu) 140 { 141 142 if (ncpu == 1 && rasctl(RAS_ADDR(pthread__lock), 143 RAS_SIZE(pthread__lock), RAS_INSTALL) == 0) { 144 pthread__lock_ops = &pthread__lock_ops_ras; 145 return; 146 } 147 148 pthread__lock_ops = &pthread__lock_ops_atomic; 149 } 150 151 void 152 pthread_lockinit(pthread_spin_t *lock) 153 { 154 155 pthread__simple_lock_init(lock); 156 } 157 158 void 159 pthread_spinlock(pthread_t thread, pthread_spin_t *lock) 160 { 161 int count, ret; 162 163 count = nspins; 164 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock %p (count %d)\n", 165 thread, lock, thread->pt_spinlocks)); 166 #ifdef PTHREAD_SPIN_DEBUG 167 pthread__assert(thread->pt_spinlocks >= 0); 168 #endif 169 ++thread->pt_spinlocks; 170 171 do { 172 while (((ret = pthread__simple_lock_try(lock)) == 0) && --count) 173 ; 174 175 if (ret == 1) 176 break; 177 178 SDPRINTF(("(pthread_spinlock %p) decrementing spinlock %p (count %d)\n", 179 thread, lock, thread->pt_spinlocks)); 180 --thread->pt_spinlocks; 181 182 /* 183 * We may be preempted while spinning. If so, we will 184 * be restarted here if thread->pt_spinlocks is 185 * nonzero, which can happen if: 186 * a) we just got the lock 187 * b) we haven't yet decremented the lock count. 188 * If we're at this point, (b) applies. Therefore, 189 * check if we're being continued, and if so, bail. 190 * (in case (a), we should let the code finish and 191 * we will bail out in pthread_spinunlock()). 192 */ 193 if (thread->pt_next != NULL) { 194 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 195 pthread__switch(thread, thread->pt_next); 196 } 197 /* try again */ 198 count = nspins; 199 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 200 thread, thread->pt_spinlocks)); 201 ++thread->pt_spinlocks; 202 } while (/*CONSTCOND*/1); 203 204 PTHREADD_ADD(PTHREADD_SPINLOCKS); 205 /* Got it! We're out of here. */ 206 } 207 208 209 int 210 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock) 211 { 212 int ret; 213 214 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 215 thread, thread->pt_spinlocks)); 216 ++thread->pt_spinlocks; 217 218 ret = pthread__simple_lock_try(lock); 219 220 if (ret == 0) { 221 SDPRINTF(("(pthread_spintrylock %p) decrementing spinlock from %d\n", 222 thread, thread->pt_spinlocks)); 223 --thread->pt_spinlocks; 224 /* See above. */ 225 if (thread->pt_next != NULL) { 226 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 227 pthread__switch(thread, thread->pt_next); 228 } 229 } 230 231 return ret; 232 } 233 234 235 void 236 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock) 237 { 238 239 pthread__simple_unlock(lock); 240 SDPRINTF(("(pthread_spinunlock %p) decrementing spinlock %p (count %d)\n", 241 thread, lock, thread->pt_spinlocks)); 242 --thread->pt_spinlocks; 243 #ifdef PTHREAD_SPIN_DEBUG 244 pthread__assert(thread->pt_spinlocks >= 0); 245 #endif 246 PTHREADD_ADD(PTHREADD_SPINUNLOCKS); 247 248 /* 249 * If we were preempted while holding a spinlock, the 250 * scheduler will notice this and continue us. To be good 251 * citzens, we must now get out of here if that was our 252 * last spinlock. 253 * XXX when will we ever have more than one? 254 */ 255 256 if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) { 257 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 258 pthread__switch(thread, thread->pt_next); 259 } 260 } 261 262 263 /* 264 * Public (POSIX-specified) spinlocks. 265 * These don't interact with the spin-preemption code, nor do they 266 * perform any adaptive sleeping. 267 */ 268 269 int 270 pthread_spin_init(pthread_spinlock_t *lock, int pshared) 271 { 272 273 #ifdef ERRORCHECK 274 if ((lock == NULL) || 275 ((pshared != PTHREAD_PROCESS_PRIVATE) && 276 (pshared != PTHREAD_PROCESS_SHARED))) 277 return EINVAL; 278 #endif 279 lock->pts_magic = _PT_SPINLOCK_MAGIC; 280 /* 281 * We don't actually use the pshared flag for anything; 282 * CPU simple locks have all the process-shared properties 283 * that we want anyway. 284 */ 285 lock->pts_flags = pshared; 286 pthread_lockinit(&lock->pts_spin); 287 288 return 0; 289 } 290 291 int 292 pthread_spin_destroy(pthread_spinlock_t *lock) 293 { 294 295 #ifdef ERRORCHECK 296 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 297 return EINVAL; 298 299 if (lock->pts_spin != __SIMPLELOCK_UNLOCKED) 300 return EBUSY; 301 #endif 302 303 lock->pts_magic = _PT_SPINLOCK_DEAD; 304 305 return 0; 306 } 307 308 int 309 pthread_spin_lock(pthread_spinlock_t *lock) 310 { 311 312 #ifdef ERRORCHECK 313 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 314 return EINVAL; 315 #endif 316 317 while (pthread__simple_lock_try(&lock->pts_spin) == 0) 318 /* spin */ ; 319 320 return 0; 321 } 322 323 int 324 pthread_spin_trylock(pthread_spinlock_t *lock) 325 { 326 327 #ifdef ERRORCHECK 328 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 329 return EINVAL; 330 #endif 331 332 if (pthread__simple_lock_try(&lock->pts_spin) == 0) 333 return EBUSY; 334 335 return 0; 336 } 337 338 int 339 pthread_spin_unlock(pthread_spinlock_t *lock) 340 { 341 342 #ifdef ERRORCHECK 343 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 344 return EINVAL; 345 #endif 346 347 pthread__simple_unlock(&lock->pts_spin); 348 349 return 0; 350 } 351