1 /* $NetBSD: pthread_lock.c,v 1.33 2008/01/05 01:37:35 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * libpthread internal spinlock routines. 41 */ 42 43 #include <sys/cdefs.h> 44 __RCSID("$NetBSD: pthread_lock.c,v 1.33 2008/01/05 01:37:35 ad Exp $"); 45 46 #include <sys/types.h> 47 #include <sys/ras.h> 48 49 #include <machine/lock.h> 50 51 #include <errno.h> 52 #include <unistd.h> 53 #include <stdio.h> 54 #include <stdlib.h> 55 56 #include "pthread.h" 57 #include "pthread_int.h" 58 59 /* How many times to try acquiring spin locks on MP systems. */ 60 #define PTHREAD__NSPINS 64 61 62 RAS_DECL(pthread__lock); 63 64 static void pthread__spinlock_slow(pthread_spin_t *); 65 66 #ifdef PTHREAD__ASM_RASOPS 67 68 void pthread__ras_simple_lock_init(__cpu_simple_lock_t *); 69 int pthread__ras_simple_lock_try(__cpu_simple_lock_t *); 70 void pthread__ras_simple_unlock(__cpu_simple_lock_t *); 71 72 #else 73 74 static void 75 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 76 { 77 78 __cpu_simple_lock_clear(alp); 79 } 80 81 static int 82 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 83 { 84 int locked; 85 86 RAS_START(pthread__lock); 87 locked = __SIMPLELOCK_LOCKED_P(alp); 88 __cpu_simple_lock_set(alp); 89 RAS_END(pthread__lock); 90 91 return !locked; 92 } 93 94 static void 95 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 96 { 97 98 __cpu_simple_lock_clear(alp); 99 } 100 101 #endif /* PTHREAD__ASM_RASOPS */ 102 103 static const struct pthread_lock_ops pthread__lock_ops_ras = { 104 pthread__ras_simple_lock_init, 105 pthread__ras_simple_lock_try, 106 pthread__ras_simple_unlock, 107 pthread__spinlock_slow, 108 }; 109 110 static void 111 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 112 { 113 114 __cpu_simple_lock_init(alp); 115 } 116 117 static int 118 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 119 { 120 121 return (__cpu_simple_lock_try(alp)); 122 } 123 124 static void 125 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 126 { 127 128 __cpu_simple_unlock(alp); 129 } 130 131 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 132 pthread__atomic_simple_lock_init, 133 pthread__atomic_simple_lock_try, 134 pthread__atomic_simple_unlock, 135 pthread__spinlock_slow, 136 }; 137 138 /* 139 * We default to pointing to the RAS primitives; we might need to use 140 * locks early, but before main() starts. This is safe, since no other 141 * threads will be active for the process, so atomicity will not be 142 * required. 143 */ 144 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 145 146 /* 147 * Prevent this routine from being inlined. The common case is no 148 * contention and it's better to not burden the instruction decoder. 149 */ 150 static void 151 pthread__spinlock_slow(pthread_spin_t *lock) 152 { 153 pthread_t self; 154 int count; 155 156 self = pthread__self(); 157 158 do { 159 count = pthread__nspins; 160 while (__SIMPLELOCK_LOCKED_P(lock) && --count > 0) 161 pthread__smt_pause(); 162 if (count > 0) { 163 if ((*self->pt_lockops.plo_try)(lock)) 164 break; 165 continue; 166 } 167 sched_yield(); 168 } while (/*CONSTCOND*/ 1); 169 } 170 171 /* 172 * Initialize the locking primitives. On uniprocessors, we always 173 * use Restartable Atomic Sequences if they are available. Otherwise, 174 * we fall back onto machine-dependent atomic lock primitives. 175 */ 176 void 177 pthread__lockprim_init(void) 178 { 179 char *p; 180 181 if ((p = pthread__getenv("PTHREAD_NSPINS")) != NULL) 182 pthread__nspins = atoi(p); 183 else if (pthread__concurrency != 1) 184 pthread__nspins = PTHREAD__NSPINS; 185 else 186 pthread__nspins = 1; 187 188 if (pthread__concurrency != 1) { 189 pthread__lock_ops = &pthread__lock_ops_atomic; 190 return; 191 } 192 193 if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock), 194 RAS_INSTALL) != 0) { 195 pthread__lock_ops = &pthread__lock_ops_atomic; 196 return; 197 } 198 } 199 200 void 201 pthread_lockinit(pthread_spin_t *lock) 202 { 203 204 pthread__simple_lock_init(lock); 205 } 206