1 /* $NetBSD: lock.h,v 1.11 2003/02/25 23:29:53 matt Exp $ */ 2 3 /* 4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed at Ludd, University of Lule}. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #ifndef _VAX_LOCK_H_ 34 #define _VAX_LOCK_H_ 35 36 #ifdef _KERNEL 37 #include <machine/cpu.h> 38 #endif 39 40 typedef __volatile int __cpu_simple_lock_t; 41 42 #define __SIMPLELOCK_LOCKED 1 43 #define __SIMPLELOCK_UNLOCKED 0 44 45 static __inline void 46 __cpu_simple_lock_init(__cpu_simple_lock_t *alp) 47 { 48 #ifdef _KERNEL 49 __asm__ __volatile ("movl %0,%%r1;jsb Sunlock" 50 : /* No output */ 51 : "g"(alp) 52 : "r1","cc","memory"); 53 #else 54 __asm__ __volatile ("bbcci $0,%0,1f;1:" 55 : /* No output */ 56 : "m"(*alp) 57 : "cc"); 58 #endif 59 } 60 61 static __inline int 62 __cpu_simple_lock_try(__cpu_simple_lock_t *alp) 63 { 64 int ret; 65 66 #ifdef _KERNEL 67 __asm__ __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0" 68 : "=&r"(ret) 69 : "g"(alp) 70 : "r0","r1","cc","memory"); 71 #else 72 __asm__ __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:" 73 : "=&r"(ret) 74 : "m"(*alp) 75 : "cc"); 76 #endif 77 78 return ret; 79 } 80 81 #ifdef _KERNEL 82 #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB)) 83 #define __cpu_simple_lock(alp) \ 84 do { \ 85 struct cpu_info *__ci = curcpu(); \ 86 \ 87 while (__cpu_simple_lock_try(alp) == 0) { \ 88 int __s; \ 89 \ 90 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \ 91 __s = splipi(); \ 92 cpu_handle_ipi(); \ 93 splx(__s); \ 94 } \ 95 } \ 96 } while (0) 97 #else 98 static __inline void 99 __cpu_simple_lock(__cpu_simple_lock_t *alp) 100 { 101 __asm__ __volatile ("1:bbssi $0,%0,1b" 102 : /* No outputs */ 103 : "m"(*alp) 104 : "cc"); 105 } 106 #endif /* _KERNEL */ 107 108 #if 0 109 static __inline void 110 __cpu_simple_lock(__cpu_simple_lock_t *alp) 111 { 112 struct cpu_info *ci = curcpu(); 113 114 while (__cpu_simple_lock_try(alp) == 0) { 115 int s; 116 117 if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) { 118 s = splipi(); 119 cpu_handle_ipi(); 120 splx(s); 121 } 122 } 123 124 #if 0 125 __asm__ __volatile ("movl %0,%%r1;jsb Slock" 126 : /* No output */ 127 : "g"(alp) 128 : "r0","r1","cc","memory"); 129 #endif 130 #if 0 131 __asm__ __volatile ("1:;bbssi $0, %0, 1b" 132 : /* No output */ 133 : "m"(*alp)); 134 #endif 135 } 136 #endif 137 138 static __inline void 139 __cpu_simple_unlock(__cpu_simple_lock_t *alp) 140 { 141 #ifdef _KERNEL 142 __asm__ __volatile ("movl %0,%%r1;jsb Sunlock" 143 : /* No output */ 144 : "g"(alp) 145 : "r1","cc","memory"); 146 #else 147 __asm__ __volatile ("bbcci $0,%0,1f;1:" 148 : /* No output */ 149 : "m"(*alp) 150 : "cc"); 151 #endif 152 } 153 154 #if defined(MULTIPROCESSOR) 155 /* 156 * On the Vax, interprocessor interrupts can come in at device priority 157 * level or lower. This can cause some problems while waiting for r/w 158 * spinlocks from a high'ish priority level: IPIs that come in will not 159 * be processed. This can lead to deadlock. 160 * 161 * This hook allows IPIs to be processed while a spinlock's interlock 162 * is released. 163 */ 164 #define SPINLOCK_SPIN_HOOK \ 165 do { \ 166 struct cpu_info *__ci = curcpu(); \ 167 int __s; \ 168 \ 169 if (__ci->ci_ipimsgs != 0) { \ 170 /* printf("CPU %lu has IPIs pending\n", \ 171 __ci->ci_cpuid); */ \ 172 __s = splipi(); \ 173 cpu_handle_ipi(); \ 174 splx(__s); \ 175 } \ 176 } while (0) 177 #endif /* MULTIPROCESSOR */ 178 #endif /* _VAX_LOCK_H_ */ 179