1 /* $NetBSD: pthread_lock.c,v 1.36 2022/04/10 10:38:33 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * libpthread internal spinlock routines. 34 */ 35 36 #include <sys/cdefs.h> 37 __RCSID("$NetBSD: pthread_lock.c,v 1.36 2022/04/10 10:38:33 riastradh Exp $"); 38 39 /* Need to use libc-private names for atomic operations. */ 40 #include "../../common/lib/libc/atomic/atomic_op_namespace.h" 41 42 #include <sys/types.h> 43 #include <sys/ras.h> 44 45 #include <machine/lock.h> 46 47 #include <errno.h> 48 #include <unistd.h> 49 #include <stdio.h> 50 #include <stdlib.h> 51 52 #include "pthread.h" 53 #include "pthread_int.h" 54 55 /* How many times to try acquiring spin locks on MP systems. */ 56 #define PTHREAD__NSPINS 64 57 58 RAS_DECL(pthread__lock); 59 60 static void pthread__spinlock_slow(pthread_spin_t *); 61 62 #ifdef PTHREAD__ASM_RASOPS 63 64 void pthread__ras_simple_lock_init(__cpu_simple_lock_t *); 65 int pthread__ras_simple_lock_try(__cpu_simple_lock_t *); 66 void pthread__ras_simple_unlock(__cpu_simple_lock_t *); 67 68 #else 69 70 static void 71 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 72 { 73 74 __cpu_simple_lock_clear(alp); 75 } 76 77 static int 78 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 79 { 80 int locked; 81 82 RAS_START(pthread__lock); 83 locked = __SIMPLELOCK_LOCKED_P(alp); 84 __cpu_simple_lock_set(alp); 85 RAS_END(pthread__lock); 86 87 return !locked; 88 } 89 90 static void 91 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 92 { 93 94 __cpu_simple_lock_clear(alp); 95 } 96 97 #endif /* PTHREAD__ASM_RASOPS */ 98 99 static const struct pthread_lock_ops pthread__lock_ops_ras = { 100 pthread__ras_simple_lock_init, 101 pthread__ras_simple_lock_try, 102 pthread__ras_simple_unlock, 103 pthread__spinlock_slow, 104 }; 105 106 static void 107 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 108 { 109 110 __cpu_simple_lock_init(alp); 111 } 112 113 static int 114 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 115 { 116 117 return (__cpu_simple_lock_try(alp)); 118 } 119 120 static void 121 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 122 { 123 124 __cpu_simple_unlock(alp); 125 } 126 127 static const struct pthread_lock_ops pthread__lock_ops_atomic = { 128 pthread__atomic_simple_lock_init, 129 pthread__atomic_simple_lock_try, 130 pthread__atomic_simple_unlock, 131 pthread__spinlock_slow, 132 }; 133 134 /* 135 * We default to pointing to the RAS primitives; we might need to use 136 * locks early, but before main() starts. This is safe, since no other 137 * threads will be active for the process, so atomicity will not be 138 * required. 139 */ 140 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 141 142 /* 143 * Prevent this routine from being inlined. The common case is no 144 * contention and it's better to not burden the instruction decoder. 145 */ 146 static void 147 pthread__spinlock_slow(pthread_spin_t *lock) 148 { 149 pthread_t self; 150 int count; 151 152 self = pthread__self(); 153 154 do { 155 count = pthread__nspins; 156 while (__SIMPLELOCK_LOCKED_P(lock) && --count > 0) 157 pthread__smt_pause(); 158 if (count > 0) { 159 if ((*self->pt_lockops.plo_try)(lock)) 160 break; 161 continue; 162 } 163 sched_yield(); 164 } while (/*CONSTCOND*/ 1); 165 } 166 167 /* 168 * Initialize the locking primitives. On uniprocessors, we always 169 * use Restartable Atomic Sequences if they are available. Otherwise, 170 * we fall back onto machine-dependent atomic lock primitives. 171 */ 172 void 173 pthread__lockprim_init(void) 174 { 175 char *p; 176 177 if ((p = pthread__getenv("PTHREAD_NSPINS")) != NULL) 178 pthread__nspins = atoi(p); 179 else if (pthread__concurrency != 1) 180 pthread__nspins = PTHREAD__NSPINS; 181 else 182 pthread__nspins = 1; 183 184 if (pthread__concurrency != 1) { 185 pthread__lock_ops = &pthread__lock_ops_atomic; 186 return; 187 } 188 189 if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock), 190 RAS_INSTALL) != 0) { 191 pthread__lock_ops = &pthread__lock_ops_atomic; 192 return; 193 } 194 } 195 196 void 197 pthread_lockinit(pthread_spin_t *lock) 198 { 199 200 pthread__simple_lock_init(lock); 201 } 202