1 /* $NetBSD: pthread_lock.c,v 1.31 2007/10/04 21:04:32 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * libpthread internal spinlock routines. 41 */ 42 43 #include <sys/cdefs.h> 44 __RCSID("$NetBSD: pthread_lock.c,v 1.31 2007/10/04 21:04:32 ad Exp $"); 45 46 #include <sys/types.h> 47 #include <sys/lock.h> 48 #include <sys/ras.h> 49 50 #include <errno.h> 51 #include <unistd.h> 52 #include <stdio.h> 53 #include <stdlib.h> 54 55 #include "pthread.h" 56 #include "pthread_int.h" 57 58 /* How many times to try acquiring spin locks on MP systems. */ 59 #define PTHREAD__NSPINS 64 60 61 static void pthread_spinlock_slow(pthread_spin_t *); 62 63 RAS_DECL(pthread__lock); 64 65 int 66 pthread__simple_locked_p(__cpu_simple_lock_t *alp) 67 { 68 return __SIMPLELOCK_LOCKED_P(alp); 69 } 70 71 #ifdef PTHREAD__ASM_RASOPS 72 73 void pthread__ras_simple_lock_init(__cpu_simple_lock_t *); 74 int pthread__ras_simple_lock_try(__cpu_simple_lock_t *); 75 void pthread__ras_simple_unlock(__cpu_simple_lock_t *); 76 77 #else 78 79 static void 80 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 81 { 82 83 __cpu_simple_lock_clear(alp); 84 } 85 86 static int 87 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 88 { 89 int locked; 90 91 RAS_START(pthread__lock); 92 locked = __SIMPLELOCK_LOCKED_P(alp); 93 __cpu_simple_lock_set(alp); 94 RAS_END(pthread__lock); 95 96 return !locked; 97 } 98 99 static void 100 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 101 { 102 103 __cpu_simple_lock_clear(alp); 104 } 105 106 #endif /* PTHREAD__ASM_RASOPS */ 107 108 static const struct pthread_lock_ops pthread__lock_ops_ras = { 109 pthread__ras_simple_lock_init, 110 pthread__ras_simple_lock_try, 111 pthread__ras_simple_unlock, 112 }; 113 114 static void 115 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 116 { 117 118 __cpu_simple_lock_init(alp); 119 } 120 121 static int 122 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 123 { 124 125 return (__cpu_simple_lock_try(alp)); 126 } 127 128 static void 129 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 130 { 131 132 __cpu_simple_unlock(alp); 133 } 134 135 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 136 pthread__atomic_simple_lock_init, 137 pthread__atomic_simple_lock_try, 138 pthread__atomic_simple_unlock, 139 }; 140 141 /* 142 * We default to pointing to the RAS primitives; we might need to use 143 * locks early, but before main() starts. This is safe, since no other 144 * threads will be active for the process, so atomicity will not be 145 * required. 146 */ 147 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 148 149 void 150 pthread_spinlock(pthread_spin_t *lock) 151 { 152 if (__predict_true(pthread__simple_lock_try(lock))) 153 return; 154 155 pthread_spinlock_slow(lock); 156 } 157 158 /* 159 * Prevent this routine from being inlined. The common case is no 160 * contention and it's better to not burden the instruction decoder. 161 */ 162 #if __GNUC_PREREQ__(3, 0) 163 __attribute ((noinline)) 164 #endif 165 static void 166 pthread_spinlock_slow(pthread_spin_t *lock) 167 { 168 int count; 169 170 do { 171 count = pthread__nspins; 172 while (pthread__simple_locked_p(lock) && --count > 0) 173 pthread__smt_pause(); 174 if (count > 0) { 175 if (pthread__simple_lock_try(lock)) 176 break; 177 continue; 178 } 179 sched_yield(); 180 } while (/*CONSTCOND*/ 1); 181 } 182 183 int 184 pthread_spintrylock(pthread_spin_t *lock) 185 { 186 return pthread__simple_lock_try(lock); 187 } 188 189 void 190 pthread_spinunlock(pthread_spin_t *lock) 191 { 192 pthread__simple_unlock(lock); 193 } 194 195 /* 196 * Initialize the locking primitives. On uniprocessors, we always 197 * use Restartable Atomic Sequences if they are available. Otherwise, 198 * we fall back onto machine-dependent atomic lock primitives. 199 */ 200 void 201 pthread__lockprim_init(void) 202 { 203 char *p; 204 205 if ((p = getenv("PTHREAD_NSPINS")) != NULL) 206 pthread__nspins = atoi(p); 207 else if (pthread__concurrency != 1) 208 pthread__nspins = PTHREAD__NSPINS; 209 else 210 pthread__nspins = 1; 211 212 if (pthread__concurrency != 1) { 213 pthread__lock_ops = &pthread__lock_ops_atomic; 214 return; 215 } 216 217 if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock), 218 RAS_INSTALL) != 0) { 219 pthread__lock_ops = &pthread__lock_ops_atomic; 220 return; 221 } 222 } 223 224 void 225 pthread_lockinit(pthread_spin_t *lock) 226 { 227 228 pthread__simple_lock_init(lock); 229 } 230