1 /* $NetBSD: pthread_lock.c,v 1.34 2008/04/28 20:23:01 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * libpthread internal spinlock routines. 34 */ 35 36 #include <sys/cdefs.h> 37 __RCSID("$NetBSD: pthread_lock.c,v 1.34 2008/04/28 20:23:01 martin Exp $"); 38 39 #include <sys/types.h> 40 #include <sys/ras.h> 41 42 #include <machine/lock.h> 43 44 #include <errno.h> 45 #include <unistd.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 49 #include "pthread.h" 50 #include "pthread_int.h" 51 52 /* How many times to try acquiring spin locks on MP systems. */ 53 #define PTHREAD__NSPINS 64 54 55 RAS_DECL(pthread__lock); 56 57 static void pthread__spinlock_slow(pthread_spin_t *); 58 59 #ifdef PTHREAD__ASM_RASOPS 60 61 void pthread__ras_simple_lock_init(__cpu_simple_lock_t *); 62 int pthread__ras_simple_lock_try(__cpu_simple_lock_t *); 63 void pthread__ras_simple_unlock(__cpu_simple_lock_t *); 64 65 #else 66 67 static void 68 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 69 { 70 71 __cpu_simple_lock_clear(alp); 72 } 73 74 static int 75 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 76 { 77 int locked; 78 79 RAS_START(pthread__lock); 80 locked = __SIMPLELOCK_LOCKED_P(alp); 81 __cpu_simple_lock_set(alp); 82 RAS_END(pthread__lock); 83 84 return !locked; 85 } 86 87 static void 88 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 89 { 90 91 __cpu_simple_lock_clear(alp); 92 } 93 94 #endif /* PTHREAD__ASM_RASOPS */ 95 96 static const struct pthread_lock_ops pthread__lock_ops_ras = { 97 pthread__ras_simple_lock_init, 98 pthread__ras_simple_lock_try, 99 pthread__ras_simple_unlock, 100 pthread__spinlock_slow, 101 }; 102 103 static void 104 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 105 { 106 107 __cpu_simple_lock_init(alp); 108 } 109 110 static int 111 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 112 { 113 114 return (__cpu_simple_lock_try(alp)); 115 } 116 117 static void 118 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 119 { 120 121 __cpu_simple_unlock(alp); 122 } 123 124 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 125 pthread__atomic_simple_lock_init, 126 pthread__atomic_simple_lock_try, 127 pthread__atomic_simple_unlock, 128 pthread__spinlock_slow, 129 }; 130 131 /* 132 * We default to pointing to the RAS primitives; we might need to use 133 * locks early, but before main() starts. This is safe, since no other 134 * threads will be active for the process, so atomicity will not be 135 * required. 136 */ 137 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 138 139 /* 140 * Prevent this routine from being inlined. The common case is no 141 * contention and it's better to not burden the instruction decoder. 142 */ 143 static void 144 pthread__spinlock_slow(pthread_spin_t *lock) 145 { 146 pthread_t self; 147 int count; 148 149 self = pthread__self(); 150 151 do { 152 count = pthread__nspins; 153 while (__SIMPLELOCK_LOCKED_P(lock) && --count > 0) 154 pthread__smt_pause(); 155 if (count > 0) { 156 if ((*self->pt_lockops.plo_try)(lock)) 157 break; 158 continue; 159 } 160 sched_yield(); 161 } while (/*CONSTCOND*/ 1); 162 } 163 164 /* 165 * Initialize the locking primitives. On uniprocessors, we always 166 * use Restartable Atomic Sequences if they are available. Otherwise, 167 * we fall back onto machine-dependent atomic lock primitives. 168 */ 169 void 170 pthread__lockprim_init(void) 171 { 172 char *p; 173 174 if ((p = pthread__getenv("PTHREAD_NSPINS")) != NULL) 175 pthread__nspins = atoi(p); 176 else if (pthread__concurrency != 1) 177 pthread__nspins = PTHREAD__NSPINS; 178 else 179 pthread__nspins = 1; 180 181 if (pthread__concurrency != 1) { 182 pthread__lock_ops = &pthread__lock_ops_atomic; 183 return; 184 } 185 186 if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock), 187 RAS_INSTALL) != 0) { 188 pthread__lock_ops = &pthread__lock_ops_atomic; 189 return; 190 } 191 } 192 193 void 194 pthread_lockinit(pthread_spin_t *lock) 195 { 196 197 pthread__simple_lock_init(lock); 198 } 199