1 /* $NetBSD: pthread_lock.c,v 1.32 2007/11/13 15:57:11 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * libpthread internal spinlock routines. 41 */ 42 43 #include <sys/cdefs.h> 44 __RCSID("$NetBSD: pthread_lock.c,v 1.32 2007/11/13 15:57:11 ad Exp $"); 45 46 #include <sys/types.h> 47 #include <sys/lock.h> 48 #include <sys/ras.h> 49 50 #include <errno.h> 51 #include <unistd.h> 52 #include <stdio.h> 53 #include <stdlib.h> 54 55 #include "pthread.h" 56 #include "pthread_int.h" 57 58 /* How many times to try acquiring spin locks on MP systems. */ 59 #define PTHREAD__NSPINS 64 60 61 RAS_DECL(pthread__lock); 62 63 static void pthread__spinlock_slow(pthread_spin_t *); 64 65 #ifdef PTHREAD__ASM_RASOPS 66 67 void pthread__ras_simple_lock_init(__cpu_simple_lock_t *); 68 int pthread__ras_simple_lock_try(__cpu_simple_lock_t *); 69 void pthread__ras_simple_unlock(__cpu_simple_lock_t *); 70 71 #else 72 73 static void 74 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 75 { 76 77 __cpu_simple_lock_clear(alp); 78 } 79 80 static int 81 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 82 { 83 int locked; 84 85 RAS_START(pthread__lock); 86 locked = __SIMPLELOCK_LOCKED_P(alp); 87 __cpu_simple_lock_set(alp); 88 RAS_END(pthread__lock); 89 90 return !locked; 91 } 92 93 static void 94 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 95 { 96 97 __cpu_simple_lock_clear(alp); 98 } 99 100 #endif /* PTHREAD__ASM_RASOPS */ 101 102 static const struct pthread_lock_ops pthread__lock_ops_ras = { 103 pthread__ras_simple_lock_init, 104 pthread__ras_simple_lock_try, 105 pthread__ras_simple_unlock, 106 pthread__spinlock_slow, 107 }; 108 109 static void 110 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 111 { 112 113 __cpu_simple_lock_init(alp); 114 } 115 116 static int 117 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 118 { 119 120 return (__cpu_simple_lock_try(alp)); 121 } 122 123 static void 124 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 125 { 126 127 __cpu_simple_unlock(alp); 128 } 129 130 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 131 pthread__atomic_simple_lock_init, 132 pthread__atomic_simple_lock_try, 133 pthread__atomic_simple_unlock, 134 pthread__spinlock_slow, 135 }; 136 137 /* 138 * We default to pointing to the RAS primitives; we might need to use 139 * locks early, but before main() starts. This is safe, since no other 140 * threads will be active for the process, so atomicity will not be 141 * required. 142 */ 143 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 144 145 /* 146 * Prevent this routine from being inlined. The common case is no 147 * contention and it's better to not burden the instruction decoder. 148 */ 149 static void 150 pthread__spinlock_slow(pthread_spin_t *lock) 151 { 152 pthread_t self; 153 int count; 154 155 self = pthread__self(); 156 157 do { 158 count = pthread__nspins; 159 while (__SIMPLELOCK_LOCKED_P(lock) && --count > 0) 160 pthread__smt_pause(); 161 if (count > 0) { 162 if ((*self->pt_lockops.plo_try)(lock)) 163 break; 164 continue; 165 } 166 sched_yield(); 167 } while (/*CONSTCOND*/ 1); 168 } 169 170 /* 171 * Initialize the locking primitives. On uniprocessors, we always 172 * use Restartable Atomic Sequences if they are available. Otherwise, 173 * we fall back onto machine-dependent atomic lock primitives. 174 */ 175 void 176 pthread__lockprim_init(void) 177 { 178 char *p; 179 180 if ((p = pthread__getenv("PTHREAD_NSPINS")) != NULL) 181 pthread__nspins = atoi(p); 182 else if (pthread__concurrency != 1) 183 pthread__nspins = PTHREAD__NSPINS; 184 else 185 pthread__nspins = 1; 186 187 if (pthread__concurrency != 1) { 188 pthread__lock_ops = &pthread__lock_ops_atomic; 189 return; 190 } 191 192 if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock), 193 RAS_INSTALL) != 0) { 194 pthread__lock_ops = &pthread__lock_ops_atomic; 195 return; 196 } 197 } 198 199 void 200 pthread_lockinit(pthread_spin_t *lock) 201 { 202 203 pthread__simple_lock_init(lock); 204 } 205