1 /* $NetBSD: pthread_lock.c,v 1.7 2003/05/16 23:37:47 scw Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread_lock.c,v 1.7 2003/05/16 23:37:47 scw Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/ras.h> 44 #include <sys/sysctl.h> 45 46 #include <errno.h> 47 #include <unistd.h> 48 49 #include "pthread.h" 50 #include "pthread_int.h" 51 52 #ifdef PTHREAD_SPIN_DEBUG_PRINT 53 #define SDPRINTF(x) DPRINTF(x) 54 #else 55 #define SDPRINTF(x) 56 #endif 57 58 /* How many times to try before checking whether we've been continued. */ 59 #define NSPINS 1 /* no point in actually spinning until MP works */ 60 61 static int nspins = NSPINS; 62 63 extern void pthread__lock_ras_start(void), pthread__lock_ras_end(void); 64 65 static void 66 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 67 { 68 69 *alp = __SIMPLELOCK_UNLOCKED; 70 } 71 72 static int 73 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 74 { 75 __cpu_simple_lock_t old; 76 77 /* This is the atomic sequence. */ 78 __asm __volatile(".globl pthread__lock_ras_start; pthread__lock_ras_start:"); 79 old = *alp; 80 *alp = __SIMPLELOCK_LOCKED; 81 __asm __volatile(".globl pthread__lock_ras_end; pthread__lock_ras_end:"); 82 83 return (old == __SIMPLELOCK_UNLOCKED); 84 } 85 86 static void 87 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 88 { 89 90 *alp = __SIMPLELOCK_UNLOCKED; 91 } 92 93 static const struct pthread_lock_ops pthread__lock_ops_ras = { 94 pthread__ras_simple_lock_init, 95 pthread__ras_simple_lock_try, 96 pthread__ras_simple_unlock, 97 }; 98 99 static void 100 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 101 { 102 103 __cpu_simple_lock_init(alp); 104 } 105 106 static int 107 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 108 { 109 110 return (__cpu_simple_lock_try(alp)); 111 } 112 113 static void 114 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 115 { 116 117 __cpu_simple_unlock(alp); 118 } 119 120 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 121 pthread__atomic_simple_lock_init, 122 pthread__atomic_simple_lock_try, 123 pthread__atomic_simple_unlock, 124 }; 125 126 /* 127 * We default to pointing to the RAS primitives; we might need to use 128 * locks early, but before main() starts. This is safe, since no other 129 * threads will be active for the process, so atomicity will not be 130 * required. 131 */ 132 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 133 134 /* 135 * Initialize the locking primitives. On uniprocessors, we always 136 * use Restartable Atomic Sequences if they are available. Otherwise, 137 * we fall back onto machine-dependent atomic lock primitives. 138 */ 139 void 140 pthread__lockprim_init(void) 141 { 142 int mib[2]; 143 size_t len; 144 int ncpu; 145 146 mib[0] = CTL_HW; 147 mib[1] = HW_NCPU; 148 149 len = sizeof(ncpu); 150 sysctl(mib, 2, &ncpu, &len, NULL, 0); 151 152 if (ncpu == 1 && rasctl((void *)pthread__lock_ras_start, 153 (size_t)((uintptr_t)pthread__lock_ras_end - 154 (uintptr_t)pthread__lock_ras_start), 155 RAS_INSTALL) == 0) { 156 pthread__lock_ops = &pthread__lock_ops_ras; 157 return; 158 } 159 160 pthread__lock_ops = &pthread__lock_ops_atomic; 161 } 162 163 void 164 pthread_lockinit(pthread_spin_t *lock) 165 { 166 167 pthread__simple_lock_init(lock); 168 } 169 170 void 171 pthread_spinlock(pthread_t thread, pthread_spin_t *lock) 172 { 173 int count, ret; 174 175 count = nspins; 176 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock %p (count %d)\n", 177 thread, lock, thread->pt_spinlocks)); 178 #ifdef PTHREAD_SPIN_DEBUG 179 pthread__assert(thread->pt_spinlocks >= 0); 180 #endif 181 ++thread->pt_spinlocks; 182 183 do { 184 while (((ret = pthread__simple_lock_try(lock)) == 0) && --count) 185 ; 186 187 if (ret == 1) 188 break; 189 190 SDPRINTF(("(pthread_spinlock %p) decrementing spinlock %p (count %d)\n", 191 thread, lock, thread->pt_spinlocks)); 192 --thread->pt_spinlocks; 193 194 /* 195 * We may be preempted while spinning. If so, we will 196 * be restarted here if thread->pt_spinlocks is 197 * nonzero, which can happen if: 198 * a) we just got the lock 199 * b) we haven't yet decremented the lock count. 200 * If we're at this point, (b) applies. Therefore, 201 * check if we're being continued, and if so, bail. 202 * (in case (a), we should let the code finish and 203 * we will bail out in pthread_spinunlock()). 204 */ 205 if (thread->pt_next != NULL) { 206 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 207 pthread__switch(thread, thread->pt_next); 208 } 209 /* try again */ 210 count = nspins; 211 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 212 thread, thread->pt_spinlocks)); 213 ++thread->pt_spinlocks; 214 } while (/*CONSTCOND*/1); 215 216 PTHREADD_ADD(PTHREADD_SPINLOCKS); 217 /* Got it! We're out of here. */ 218 } 219 220 221 int 222 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock) 223 { 224 int ret; 225 226 SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n", 227 thread, thread->pt_spinlocks)); 228 ++thread->pt_spinlocks; 229 230 ret = pthread__simple_lock_try(lock); 231 232 if (ret == 0) { 233 SDPRINTF(("(pthread_spintrylock %p) decrementing spinlock from %d\n", 234 thread, thread->pt_spinlocks)); 235 --thread->pt_spinlocks; 236 /* See above. */ 237 if (thread->pt_next != NULL) { 238 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 239 pthread__switch(thread, thread->pt_next); 240 } 241 } 242 243 return ret; 244 } 245 246 247 void 248 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock) 249 { 250 251 pthread__simple_unlock(lock); 252 SDPRINTF(("(pthread_spinunlock %p) decrementing spinlock %p (count %d)\n", 253 thread, lock, thread->pt_spinlocks)); 254 --thread->pt_spinlocks; 255 #ifdef PTHREAD_SPIN_DEBUG 256 pthread__assert(thread->pt_spinlocks >= 0); 257 #endif 258 PTHREADD_ADD(PTHREADD_SPINUNLOCKS); 259 260 /* 261 * If we were preempted while holding a spinlock, the 262 * scheduler will notice this and continue us. To be good 263 * citzens, we must now get out of here if that was our 264 * last spinlock. 265 * XXX when will we ever have more than one? 266 */ 267 268 if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) { 269 PTHREADD_ADD(PTHREADD_SPINPREEMPT); 270 pthread__switch(thread, thread->pt_next); 271 } 272 } 273 274 275 /* 276 * Public (POSIX-specified) spinlocks. 277 * These don't interact with the spin-preemption code, nor do they 278 * perform any adaptive sleeping. 279 */ 280 281 int 282 pthread_spin_init(pthread_spinlock_t *lock, int pshared) 283 { 284 285 #ifdef ERRORCHECK 286 if ((lock == NULL) || 287 ((pshared != PTHREAD_PROCESS_PRIVATE) && 288 (pshared != PTHREAD_PROCESS_SHARED))) 289 return EINVAL; 290 #endif 291 lock->pts_magic = _PT_SPINLOCK_MAGIC; 292 /* 293 * We don't actually use the pshared flag for anything; 294 * cpu simple locks have all the process-shared properties 295 * that we want anyway. 296 */ 297 lock->pts_flags = pshared; 298 pthread_lockinit(&lock->pts_spin); 299 300 return 0; 301 } 302 303 int 304 pthread_spin_destroy(pthread_spinlock_t *lock) 305 { 306 307 #ifdef ERRORCHECK 308 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 309 return EINVAL; 310 311 if (lock->pts_spin != __SIMPLELOCK_UNLOCKED) 312 return EBUSY; 313 #endif 314 315 lock->pts_magic = _PT_SPINLOCK_DEAD; 316 317 return 0; 318 } 319 320 int 321 pthread_spin_lock(pthread_spinlock_t *lock) 322 { 323 324 #ifdef ERRORCHECK 325 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 326 return EINVAL; 327 #endif 328 329 while (pthread__simple_lock_try(&lock->pts_spin) == 0) 330 /* spin */ ; 331 332 return 0; 333 } 334 335 int 336 pthread_spin_trylock(pthread_spinlock_t *lock) 337 { 338 339 #ifdef ERRORCHECK 340 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 341 return EINVAL; 342 #endif 343 344 if (pthread__simple_lock_try(&lock->pts_spin) == 0) 345 return EBUSY; 346 347 return 0; 348 } 349 350 int 351 pthread_spin_unlock(pthread_spinlock_t *lock) 352 { 353 354 #ifdef ERRORCHECK 355 if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC)) 356 return EINVAL; 357 #endif 358 359 pthread__simple_unlock(&lock->pts_spin); 360 361 return 0; 362 } 363