xref: /netbsd-src/sys/arch/mips/include/lock.h (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: lock.h,v 1.17 2009/01/12 03:05:10 pooka Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Wayne Knowles and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Machine-dependent spin lock operations for MIPS processors.
34  *
35  * Note: R2000/R3000 doesn't have any atomic update instructions; this
36  * will cause problems for user applications using this header.
37  */
38 
39 #ifndef _MIPS_LOCK_H_
40 #define	_MIPS_LOCK_H_
41 
42 #include <sys/param.h>
43 
44 static __inline int
45 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
46 {
47 	return *__ptr == __SIMPLELOCK_LOCKED;
48 }
49 
50 static __inline int
51 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
52 {
53 	return *__ptr == __SIMPLELOCK_UNLOCKED;
54 }
55 
56 static __inline void
57 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
58 {
59 	*__ptr = __SIMPLELOCK_UNLOCKED;
60 }
61 
62 static __inline void
63 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
64 {
65 	*__ptr = __SIMPLELOCK_LOCKED;
66 }
67 
68 #ifndef _HARDKERNEL
69 
70 static __inline int
71 __cpu_simple_lock_try(__cpu_simple_lock_t *lp)
72 {
73 	unsigned long t0, v0;
74 
75 	__asm volatile(
76 		"# -- BEGIN __cpu_simple_lock_try\n"
77 		"	.set push		\n"
78 		"	.set mips2		\n"
79 		"1:	ll	%0, %4		\n"
80 		"	bnez	%0, 2f		\n"
81 		"	nop	       # BDslot	\n"
82 		"	li	%0, %3		\n"
83 		"	sc	%0, %2		\n"
84 		"	beqz	%0, 2f		\n"
85 		"	nop	       # BDslot	\n"
86 		"	li	%1, 1		\n"
87 		"	sync			\n"
88 		"	j	3f		\n"
89 		"	nop			\n"
90 		"	nop			\n"
91 		"2:	li	%1, 0		\n"
92 		"3:				\n"
93 		"	.set pop		\n"
94 		"# -- END __cpu_simple_lock_try	\n"
95 		: "=r" (t0), "=r" (v0), "+m" (*lp)
96 		: "i" (__SIMPLELOCK_LOCKED), "m" (*lp));
97 
98 	return (v0 != 0);
99 }
100 
101 #ifdef MIPS1
102 static __inline void
103 mb_read(void)
104 {
105 	__insn_barrier();
106 }
107 
108 static __inline void
109 mb_write(void)
110 {
111 	__insn_barrier();
112 }
113 
114 static __inline void
115 mb_memory(void)
116 {
117 	__insn_barrier();
118 }
119 #else	/* MIPS1*/
120 static __inline void
121 mb_read(void)
122 {
123 	__asm volatile(
124 	    "	.set push\n"
125 	    "	.set mips2\n"
126 	    "	sync\n"
127 	    "	.set pop"
128 	    ::: "memory"
129 	);
130 }
131 
132 static __inline void
133 mb_write(void)
134 {
135 	mb_read();
136 }
137 
138 static __inline void
139 mb_memory(void)
140 {
141 	mb_read();
142 }
143 #endif	/* MIPS1 */
144 
145 #else	/* !_HARDKERNEL */
146 
147 unsigned _atomic_cas_uint(volatile unsigned *, unsigned, unsigned);
148 void	mb_read(void);
149 void	mb_write(void);
150 void	mb_memory(void);
151 
152 static __inline int
153 __cpu_simple_lock_try(__cpu_simple_lock_t *lp)
154 {
155 
156 	return _atomic_cas_uint((volatile unsigned *)lp,
157 	    __SIMPLELOCK_UNLOCKED, __SIMPLELOCK_LOCKED) ==
158 	    __SIMPLELOCK_UNLOCKED;
159 }
160 
161 #endif	/* _HARDKERNEL */
162 
163 static __inline void
164 __cpu_simple_lock_init(__cpu_simple_lock_t *lp)
165 {
166 
167 	*lp = __SIMPLELOCK_UNLOCKED;
168 	mb_memory();
169 }
170 
171 static __inline void
172 __cpu_simple_lock(__cpu_simple_lock_t *lp)
173 {
174 
175 	while (!__cpu_simple_lock_try(lp))
176 		while (*lp == __SIMPLELOCK_LOCKED)
177 			/* spin */;
178 }
179 
180 static __inline void
181 __cpu_simple_unlock(__cpu_simple_lock_t *lp)
182 {
183 
184 	mb_memory();
185 	*lp = __SIMPLELOCK_UNLOCKED;
186 }
187 
188 #endif /* _MIPS_LOCK_H_ */
189