1 /* $NetBSD: lock.h,v 1.31 2008/04/28 20:23:36 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _MACHINE_LOCK_H 33 #define _MACHINE_LOCK_H 34 35 /* 36 * Machine dependent spin lock operations. 37 */ 38 39 #if __SIMPLELOCK_UNLOCKED != 0 40 #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation 41 #endif 42 43 /* XXX So we can expose this to userland. */ 44 #ifdef __lint__ 45 #define __ldstub(__addr) (__addr) 46 #else /* !__lint__ */ 47 static __inline int __ldstub(__cpu_simple_lock_t *addr); 48 static __inline int __ldstub(__cpu_simple_lock_t *addr) 49 { 50 int v; 51 52 __asm volatile("ldstub [%1],%0" 53 : "=&r" (v) 54 : "r" (addr) 55 : "memory"); 56 57 return v; 58 } 59 #endif /* __lint__ */ 60 61 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *) 62 __attribute__((__unused__)); 63 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *) 64 __attribute__((__unused__)); 65 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *) 66 __attribute__((__unused__)); 67 #ifndef __CPU_SIMPLE_LOCK_NOINLINE 68 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *) 69 __attribute__((__unused__)); 70 #else 71 extern void __cpu_simple_lock(__cpu_simple_lock_t *); 72 #endif 73 74 static __inline int 75 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr) 76 { 77 return *__ptr == __SIMPLELOCK_LOCKED; 78 } 79 80 static __inline int 81 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr) 82 { 83 return *__ptr == __SIMPLELOCK_UNLOCKED; 84 } 85 86 static __inline void 87 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 88 { 89 *__ptr = __SIMPLELOCK_UNLOCKED; 90 } 91 92 static __inline void 93 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 94 { 95 *__ptr = __SIMPLELOCK_LOCKED; 96 } 97 98 static __inline void 99 __cpu_simple_lock_init(__cpu_simple_lock_t *alp) 100 { 101 102 *alp = __SIMPLELOCK_UNLOCKED; 103 } 104 105 #ifndef __CPU_SIMPLE_LOCK_NOINLINE 106 static __inline void 107 __cpu_simple_lock(__cpu_simple_lock_t *alp) 108 { 109 110 /* 111 * If someone else holds the lock use simple reads until it 112 * is released, then retry the atomic operation. This reduces 113 * memory bus contention because the cache-coherency logic 114 * does not have to broadcast invalidates on the lock while 115 * we spin on it. 116 */ 117 while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) { 118 while (*alp != __SIMPLELOCK_UNLOCKED) 119 /* spin */ ; 120 } 121 } 122 #endif /* __CPU_SIMPLE_LOCK_NOINLINE */ 123 124 static __inline int 125 __cpu_simple_lock_try(__cpu_simple_lock_t *alp) 126 { 127 128 return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED); 129 } 130 131 static __inline void 132 __cpu_simple_unlock(__cpu_simple_lock_t *alp) 133 { 134 135 /* 136 * Insert compiler barrier to prevent instruction re-ordering 137 * around the lock release. 138 */ 139 __insn_barrier(); 140 *alp = __SIMPLELOCK_UNLOCKED; 141 } 142 143 #if defined(__sparc_v9__) 144 static __inline void 145 mb_read(void) 146 { 147 __asm __volatile("membar #LoadLoad" : : : "memory"); 148 } 149 150 static __inline void 151 mb_write(void) 152 { 153 __asm __volatile("" : : : "memory"); 154 } 155 156 static __inline void 157 mb_memory(void) 158 { 159 __asm __volatile("membar #MemIssue" : : : "memory"); 160 } 161 #else /* __sparc_v9__ */ 162 static __inline void 163 mb_read(void) 164 { 165 static volatile int junk; 166 __asm volatile("st %%g0,[%0]" 167 : 168 : "r" (&junk) 169 : "memory"); 170 } 171 172 static __inline void 173 mb_write(void) 174 { 175 __insn_barrier(); 176 } 177 178 static __inline void 179 mb_memory(void) 180 { 181 mb_read(); 182 } 183 #endif /* __sparc_v9__ */ 184 185 #endif /* _MACHINE_LOCK_H */ 186