1 /* $NetBSD: lock.h,v 1.25 2013/08/18 04:31:08 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Machine-dependent spin lock operations. 34 * 35 * NOTE: The SWP insn used here is available only on ARM architecture 36 * version 3 and later (as well as 2a). What we are going to do is 37 * expect that the kernel will trap and emulate the insn. That will 38 * be slow, but give us the atomicity that we need. 39 */ 40 41 #ifndef _ARM_LOCK_H_ 42 #define _ARM_LOCK_H_ 43 44 static __inline int 45 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr) 46 { 47 return *__ptr == __SIMPLELOCK_LOCKED; 48 } 49 50 static __inline int 51 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr) 52 { 53 return *__ptr == __SIMPLELOCK_UNLOCKED; 54 } 55 56 static __inline void 57 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 58 { 59 *__ptr = __SIMPLELOCK_UNLOCKED; 60 } 61 62 static __inline void 63 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 64 { 65 *__ptr = __SIMPLELOCK_LOCKED; 66 } 67 68 #ifdef _KERNEL 69 #include <arm/cpufunc.h> 70 71 #define mb_read drain_writebuf /* in cpufunc.h */ 72 #define mb_write drain_writebuf /* in cpufunc.h */ 73 #define mb_memory drain_writebuf /* in cpufunc.h */ 74 #endif 75 76 #if defined(_KERNEL) 77 static __inline unsigned char 78 __swp(__cpu_simple_lock_t __val, volatile __cpu_simple_lock_t *__ptr) 79 { 80 #ifdef _ARM_ARCH_6 81 uint32_t __rv, __tmp; 82 if (sizeof(*__ptr) == 1) { 83 __asm volatile( 84 "1:\t" 85 "ldrexb\t%[__rv], [%[__ptr]]" "\n\t" 86 "cmp\t%[__rv],%[__val]" "\n\t" 87 #ifdef __thumb__ 88 "itt\tne" "\n\t" 89 #endif 90 "strexbne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 91 "cmpne\t%[__tmp], #0" "\n\t" 92 "bne\t1b" "\n\t" 93 #ifdef _ARM_ARCH_7 94 "dmb" 95 #else 96 "mcr\tp15, 0, %[__tmp], c7, c10, 5" 97 #endif 98 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp) 99 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 100 } else { 101 __asm volatile( 102 "1:\t" 103 "ldrex\t%[__rv], [%[__ptr]]" "\n\t" 104 "cmp\t%[__rv],%[__val]" "\n\t" 105 #ifdef __thumb__ 106 "itt\tne" "\n\t" 107 #endif 108 "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 109 "cmpne\t%[__tmp], #0" "\n\t" 110 "bne\t1b" "\n\t" 111 #ifdef _ARM_ARCH_7 112 "nop" 113 #else 114 "mcr\tp15, 0, %[__tmp], c7, c10, 5" 115 #endif 116 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp) 117 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 118 } 119 return __rv; 120 #else 121 uint32_t __val32; 122 __asm volatile("swpb %0, %1, [%2]" 123 : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory"); 124 return __val32; 125 #endif 126 } 127 #else 128 /* 129 * On Cortex-A9 (SMP), SWP no longer guarantees atomic results. Thus we pad 130 * out SWP so that when the A9 generates an undefined exception we can replace 131 * the SWP/MOV instructions with the right LDREX/STREX instructions. 132 * 133 * This is why we force the SWP into the template needed for LDREX/STREX 134 * including the extra instructions and extra register for testing the result. 135 */ 136 static __inline int 137 __swp(int __val, volatile int *__ptr) 138 { 139 int __rv, __tmp; 140 __asm volatile( 141 "1:\t" 142 #ifdef _ARM_ARCH_6 143 "ldrex\t%[__rv], [%[__ptr]]" "\n\t" 144 "cmp\t%[__rv],%[__val]" "\n\t" 145 #ifdef __thumb__ 146 "it\tne" "\n\t" 147 #endif 148 "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 149 #else 150 "swp\t%[__rv], %[__val], [%[__ptr]]" "\n\t" 151 "mov\t%[__tmp], #0" "\n\t" 152 "cmp\t%[__rv],%[__val]" "\n\t" 153 #endif 154 "cmpne\t%[__tmp], #0" "\n\t" 155 "bne\t1b" "\n\t" 156 #ifdef _ARM_ARCH_7 157 "dmb" 158 #elif defined(_ARM_ARCH_6) 159 "mcr\tp15, 0, %[__tmp], c7, c10, 5" 160 #else 161 "nop" 162 #endif 163 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp) 164 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 165 return __rv; 166 } 167 #endif /* _KERNEL */ 168 169 static __inline void __unused 170 __cpu_simple_lock_init(__cpu_simple_lock_t *alp) 171 { 172 173 *alp = __SIMPLELOCK_UNLOCKED; 174 #ifdef _ARM_ARCH_7 175 __asm __volatile("dsb"); 176 #endif 177 } 178 179 #if !defined(__thumb__) || defined(_ARM_ARCH_T2) 180 static __inline void __unused 181 __cpu_simple_lock(__cpu_simple_lock_t *alp) 182 { 183 184 while (__swp(__SIMPLELOCK_LOCKED, alp) != __SIMPLELOCK_UNLOCKED) 185 continue; 186 } 187 #else 188 void __cpu_simple_lock(__cpu_simple_lock_t *); 189 #endif 190 191 #if !defined(__thumb__) || defined(_ARM_ARCH_T2) 192 static __inline int __unused 193 __cpu_simple_lock_try(__cpu_simple_lock_t *alp) 194 { 195 196 return (__swp(__SIMPLELOCK_LOCKED, alp) == __SIMPLELOCK_UNLOCKED); 197 } 198 #else 199 int __cpu_simple_lock_try(__cpu_simple_lock_t *); 200 #endif 201 202 static __inline void __unused 203 __cpu_simple_unlock(__cpu_simple_lock_t *alp) 204 { 205 206 #ifdef _ARM_ARCH_7 207 __asm __volatile("dmb"); 208 #endif 209 *alp = __SIMPLELOCK_UNLOCKED; 210 #ifdef _ARM_ARCH_7 211 __asm __volatile("dsb"); 212 #endif 213 } 214 215 #endif /* _ARM_LOCK_H_ */ 216