xref: /netbsd-src/sys/arch/vax/include/lock.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: lock.h,v 1.32 2019/11/29 20:06:44 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef _VAX_LOCK_H_
29 #define _VAX_LOCK_H_
30 
31 #include <sys/param.h>
32 
33 #ifdef _KERNEL
34 #ifdef _KERNEL_OPT
35 #include "opt_multiprocessor.h"
36 #include <machine/intr.h>
37 #endif
38 #include <machine/cpu.h>
39 #endif
40 
41 static __inline int
42 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
43 {
44 	return *__ptr == __SIMPLELOCK_LOCKED;
45 }
46 
47 static __inline int
48 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
49 {
50 	return *__ptr == __SIMPLELOCK_UNLOCKED;
51 }
52 
53 static __inline void
54 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
55 {
56 	*__ptr = __SIMPLELOCK_UNLOCKED;
57 }
58 
59 static __inline void
60 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
61 {
62 	*__ptr = __SIMPLELOCK_LOCKED;
63 }
64 
65 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
66 static __inline void
67 __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
68 {
69 #ifdef _HARDKERNEL
70 	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
71 		: /* No output */
72 		: "g"(__alp)
73 		: "r1","cc","memory");
74 #else
75 	__asm __volatile ("bbcci $0,%0,1f;1:"
76 		: /* No output */
77 		: "m"(*__alp)
78 		: "cc");
79 #endif
80 }
81 
82 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
83 static __inline int
84 __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
85 {
86 	int ret;
87 
88 #ifdef _HARDKERNEL
89 	__asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
90 		: "=&r"(ret)
91 		: "g"(__alp)
92 		: "r0","r1","cc","memory");
93 #else
94 	__asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
95 		: "=&r"(ret)
96 		: "m"(*__alp)
97 		: "cc");
98 #endif
99 
100 	return ret;
101 }
102 
103 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
104 static __inline void
105 __cpu_simple_lock(__cpu_simple_lock_t *__alp)
106 {
107 #if defined(_HARDKERNEL) && defined(MULTIPROCESSOR)
108 	struct cpu_info * const __ci = curcpu();
109 
110 	while (__cpu_simple_lock_try(__alp) == 0) {
111 #define	VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
112 		if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {
113 			cpu_handle_ipi();
114 		}
115 	}
116 #else /* _HARDKERNEL && MULTIPROCESSOR */
117 	__asm __volatile ("1:bbssi $0,%0,1b"
118 		: /* No outputs */
119 		: "m"(*__alp)
120 		: "cc");
121 #endif /* _HARDKERNEL && MULTIPROCESSOR */
122 }
123 
124 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
125 static __inline void
126 __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
127 {
128 #ifdef _HARDKERNEL
129 	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
130 		: /* No output */
131 		: "g"(__alp)
132 		: "r1","cc","memory");
133 #else
134 	__asm __volatile ("bbcci $0,%0,1f;1:"
135 		: /* No output */
136 		: "m"(*__alp)
137 		: "cc");
138 #endif
139 }
140 
141 #if defined(MULTIPROCESSOR)
142 /*
143  * On the Vax, interprocessor interrupts can come in at device priority
144  * level or lower. This can cause some problems while waiting for r/w
145  * spinlocks from a high'ish priority level: IPIs that come in will not
146  * be processed. This can lead to deadlock.
147  *
148  * This hook allows IPIs to be processed while a spinlock's interlock
149  * is released.
150  */
151 #define SPINLOCK_SPIN_HOOK						\
152 do {									\
153 	struct cpu_info * const __ci = curcpu();			\
154 									\
155 	if (__ci->ci_ipimsgs != 0) {					\
156 		/* printf("CPU %lu has IPIs pending\n",			\
157 		    __ci->ci_cpuid); */					\
158 		cpu_handle_ipi();					\
159 	}								\
160 } while (/*CONSTCOND*/0)
161 #endif /* MULTIPROCESSOR */
162 
163 #endif /* _VAX_LOCK_H_ */
164