xref: /netbsd-src/sys/arch/mips/include/lock.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: lock.h,v 1.21 2020/08/05 05:24:44 simonb Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Wayne Knowles and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Machine-dependent spin lock operations for MIPS processors.
34  *
35  * Note: R2000/R3000 doesn't have any atomic update instructions; this
36  * will cause problems for user applications using this header.
37  */
38 
39 #ifndef _MIPS_LOCK_H_
40 #define	_MIPS_LOCK_H_
41 
42 #include <sys/param.h>
43 
44 static __inline int
45 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
46 {
47 	return *__ptr != __SIMPLELOCK_UNLOCKED;
48 }
49 
50 static __inline int
51 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
52 {
53 	return *__ptr == __SIMPLELOCK_UNLOCKED;
54 }
55 
56 static __inline void
57 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
58 {
59 	*__ptr = __SIMPLELOCK_UNLOCKED;
60 }
61 
62 static __inline void
63 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
64 {
65 	*__ptr = __SIMPLELOCK_LOCKED;
66 }
67 
68 #ifndef _HARDKERNEL
69 
70 static __inline int
71 __cpu_simple_lock_try(__cpu_simple_lock_t *lp)
72 {
73 	unsigned long t0, v0;
74 
75 	__asm volatile(
76 		"# -- BEGIN __cpu_simple_lock_try\n"
77 		"	.set push		\n"
78 		"	.set mips2		\n"
79 		"1:	ll	%0, %4		\n"
80 		"	bnez	%0, 2f		\n"
81 		"	 nop			\n"
82 		"	li	%0, %3		\n"
83 		"	sc	%0, %2		\n"
84 		"	beqz	%0, 2f		\n"
85 		"	 nop			\n"
86 		"	li	%1, 1		\n"
87 		"	sync			\n"
88 		"	j	3f		\n"
89 		"	 nop			\n"
90 		"	nop			\n"
91 		"2:	li	%1, 0		\n"
92 		"3:				\n"
93 		"	.set pop		\n"
94 		"# -- END __cpu_simple_lock_try	\n"
95 		: "=r" (t0), "=r" (v0), "+m" (*lp)
96 		: "i" (__SIMPLELOCK_LOCKED), "m" (*lp));
97 
98 	return (v0 != 0);
99 }
100 
101 #ifdef MIPS1
102 static __inline void
103 mb_read(void)
104 {
105 	__insn_barrier();
106 }
107 
108 static __inline void
109 mb_write(void)
110 {
111 	__insn_barrier();
112 }
113 
114 static __inline void
115 mb_memory(void)
116 {
117 	__insn_barrier();
118 }
119 #else	/* MIPS1*/
120 static __inline void
121 mb_read(void)
122 {
123 	__asm volatile(
124 		"	.set push		\n"
125 		"	.set mips2		\n"
126 		"	sync			\n"
127 		"	.set pop"
128 		::: "memory"
129 	);
130 }
131 
132 static __inline void
133 mb_write(void)
134 {
135 	mb_read();
136 }
137 
138 static __inline void
139 mb_memory(void)
140 {
141 	mb_read();
142 }
143 #endif	/* MIPS1 */
144 
145 #else	/* !_HARDKERNEL */
146 
147 u_int	_atomic_cas_uint(volatile u_int *, u_int, u_int);
148 u_long	_atomic_cas_ulong(volatile u_long *, u_long, u_long);
149 void *	_atomic_cas_ptr(volatile void *, void *, void *);
150 void	mb_read(void);
151 void	mb_write(void);
152 void	mb_memory(void);
153 
154 static __inline int
155 __cpu_simple_lock_try(__cpu_simple_lock_t *lp)
156 {
157 
158 	return _atomic_cas_uint(lp,
159 	    __SIMPLELOCK_UNLOCKED, __SIMPLELOCK_LOCKED) ==
160 	    __SIMPLELOCK_UNLOCKED;
161 }
162 
163 #endif	/* _HARDKERNEL */
164 
165 static __inline void
166 __cpu_simple_lock_init(__cpu_simple_lock_t *lp)
167 {
168 
169 	*lp = __SIMPLELOCK_UNLOCKED;
170 	mb_memory();
171 }
172 
173 static __inline void
174 __cpu_simple_lock(__cpu_simple_lock_t *lp)
175 {
176 
177 	while (!__cpu_simple_lock_try(lp)) {
178 		while (*lp == __SIMPLELOCK_LOCKED)
179 			/* spin */;
180 	}
181 }
182 
183 static __inline void
184 __cpu_simple_unlock(__cpu_simple_lock_t *lp)
185 {
186 
187 #ifndef _MIPS_ARCH_OCTEONP
188 	mb_memory();
189 #endif
190 	*lp = __SIMPLELOCK_UNLOCKED;
191 #ifdef _MIPS_ARCH_OCTEONP
192 	mb_write();
193 #endif
194 }
195 
196 #endif /* _MIPS_LOCK_H_ */
197