1 /* $NetBSD: lock.h,v 1.33 2022/02/13 13:42:30 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Machine-dependent spin lock operations. 35 */ 36 37 #ifndef _ALPHA_LOCK_H_ 38 #define _ALPHA_LOCK_H_ 39 40 #ifdef _KERNEL_OPT 41 #include "opt_multiprocessor.h" 42 #endif 43 44 static __inline int 45 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) 46 { 47 return *__ptr == __SIMPLELOCK_LOCKED; 48 } 49 50 static __inline int 51 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) 52 { 53 return *__ptr == __SIMPLELOCK_UNLOCKED; 54 } 55 56 static __inline void 57 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 58 { 59 *__ptr = __SIMPLELOCK_UNLOCKED; 60 } 61 62 static __inline void 63 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 64 { 65 *__ptr = __SIMPLELOCK_LOCKED; 66 } 67 68 static __inline void 69 __cpu_simple_lock_init(__cpu_simple_lock_t *alp) 70 { 71 72 *alp = __SIMPLELOCK_UNLOCKED; 73 } 74 75 static __inline void 76 __cpu_simple_lock(__cpu_simple_lock_t *alp) 77 { 78 unsigned long t0; 79 80 /* 81 * Note, if we detect that the lock is held when 82 * we do the initial load-locked, we spin using 83 * a non-locked load to save the coherency logic 84 * some work. 85 */ 86 87 __asm volatile( 88 "# BEGIN __cpu_simple_lock\n" 89 "1: ldl_l %0, %3 \n" 90 " bne %0, 2f \n" 91 " bis $31, %2, %0 \n" 92 " stl_c %0, %1 \n" 93 " beq %0, 3f \n" 94 " mb \n" 95 " br 4f \n" 96 "2: ldl %0, %3 \n" 97 " beq %0, 1b \n" 98 " br 2b \n" 99 "3: br 1b \n" 100 "4: \n" 101 " # END __cpu_simple_lock\n" 102 : "=&r" (t0), "=m" (*alp) 103 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 104 : "cc", "memory"); 105 } 106 107 static __inline int 108 __cpu_simple_lock_try(__cpu_simple_lock_t *alp) 109 { 110 unsigned long t0, v0; 111 112 __asm volatile( 113 "# BEGIN __cpu_simple_lock_try\n" 114 "1: ldl_l %0, %4 \n" 115 " bne %0, 2f \n" 116 " bis $31, %3, %0 \n" 117 " stl_c %0, %2 \n" 118 " beq %0, 3f \n" 119 " mb \n" 120 " bis $31, 1, %1 \n" 121 " br 4f \n" 122 "2: bis $31, $31, %1 \n" 123 " br 4f \n" 124 "3: br 1b \n" 125 "4: \n" 126 " # END __cpu_simple_lock_try" 127 : "=&r" (t0), "=r" (v0), "=m" (*alp) 128 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 129 : "cc", "memory"); 130 131 return (v0 != 0); 132 } 133 134 static __inline void 135 __cpu_simple_unlock(__cpu_simple_lock_t *alp) 136 { 137 138 __asm volatile( 139 "# BEGIN __cpu_simple_unlock\n" 140 " mb \n" 141 " stl $31, %0 \n" 142 " # END __cpu_simple_unlock" 143 : "=m" (*alp) 144 : /* no inputs */ 145 : "memory"); 146 } 147 148 #if defined(MULTIPROCESSOR) 149 /* 150 * On the Alpha, interprocessor interrupts come in at device priority 151 * level (ALPHA_PSL_IPL_CLOCK). This can cause some problems while 152 * waiting for spin locks from a high'ish priority level (like spin 153 * mutexes used by the scheduler): IPIs that come in will not be 154 * processed. This can lead to deadlock. 155 * 156 * This hook allows IPIs to be processed while spinning. Note we only 157 * do the special thing if IPIs are blocked (current IPL >= IPL_CLOCK). 158 * IPIs will be processed in the normal fashion otherwise, and checking 159 * this way ensures that preemption is disabled (i.e. curcpu() is stable). 160 */ 161 #define SPINLOCK_SPIN_HOOK \ 162 do { \ 163 unsigned long _ipl_ = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK; \ 164 \ 165 if (_ipl_ >= ALPHA_PSL_IPL_CLOCK) { \ 166 struct cpu_info *__ci = curcpu(); \ 167 if (atomic_load_relaxed(&__ci->ci_ipis) != 0) { \ 168 alpha_ipi_process(__ci, NULL); \ 169 } \ 170 } \ 171 } while (/*CONSTCOND*/0) 172 #define SPINLOCK_BACKOFF_HOOK (void)nullop((void *)0) 173 #endif /* MULTIPROCESSOR */ 174 175 #endif /* _ALPHA_LOCK_H_ */ 176