xref: /netbsd-src/sys/arch/mips/include/lock.h (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: lock.h,v 1.16 2008/04/28 20:23:28 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Wayne Knowles and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Machine-dependent spin lock operations for MIPS processors.
34  *
35  * Note: R2000/R3000 doesn't have any atomic update instructions; this
36  * will cause problems for user applications using this header.
37  */
38 
39 #ifndef _MIPS_LOCK_H_
40 #define	_MIPS_LOCK_H_
41 
42 static __inline int
43 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
44 {
45 	return *__ptr == __SIMPLELOCK_LOCKED;
46 }
47 
48 static __inline int
49 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
50 {
51 	return *__ptr == __SIMPLELOCK_UNLOCKED;
52 }
53 
54 static __inline void
55 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
56 {
57 	*__ptr = __SIMPLELOCK_UNLOCKED;
58 }
59 
60 static __inline void
61 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
62 {
63 	*__ptr = __SIMPLELOCK_LOCKED;
64 }
65 
66 #ifndef _KERNEL
67 
68 static __inline int
69 __cpu_simple_lock_try(__cpu_simple_lock_t *lp)
70 {
71 	unsigned long t0, v0;
72 
73 	__asm volatile(
74 		"# -- BEGIN __cpu_simple_lock_try\n"
75 		"	.set push		\n"
76 		"	.set mips2		\n"
77 		"1:	ll	%0, %4		\n"
78 		"	bnez	%0, 2f		\n"
79 		"	nop	       # BDslot	\n"
80 		"	li	%0, %3		\n"
81 		"	sc	%0, %2		\n"
82 		"	beqz	%0, 2f		\n"
83 		"	nop	       # BDslot	\n"
84 		"	li	%1, 1		\n"
85 		"	sync			\n"
86 		"	j	3f		\n"
87 		"	nop			\n"
88 		"	nop			\n"
89 		"2:	li	%1, 0		\n"
90 		"3:				\n"
91 		"	.set pop		\n"
92 		"# -- END __cpu_simple_lock_try	\n"
93 		: "=r" (t0), "=r" (v0), "+m" (*lp)
94 		: "i" (__SIMPLELOCK_LOCKED), "m" (*lp));
95 
96 	return (v0 != 0);
97 }
98 
99 #ifdef MIPS1
100 static __inline void
101 mb_read(void)
102 {
103 	__insn_barrier();
104 }
105 
106 static __inline void
107 mb_write(void)
108 {
109 	__insn_barrier();
110 }
111 
112 static __inline void
113 mb_memory(void)
114 {
115 	__insn_barrier();
116 }
117 #else	/* MIPS1*/
118 static __inline void
119 mb_read(void)
120 {
121 	__asm volatile(
122 	    "	.set push\n"
123 	    "	.set mips2\n"
124 	    "	sync\n"
125 	    "	.set pop"
126 	    ::: "memory"
127 	);
128 }
129 
130 static __inline void
131 mb_write(void)
132 {
133 	mb_read();
134 }
135 
136 static __inline void
137 mb_memory(void)
138 {
139 	mb_read();
140 }
141 #endif	/* MIPS1 */
142 
143 #else	/* !_KERNEL */
144 
145 unsigned _atomic_cas_uint(volatile unsigned *, unsigned, unsigned);
146 void	mb_read(void);
147 void	mb_write(void);
148 void	mb_memory(void);
149 
150 static __inline int
151 __cpu_simple_lock_try(__cpu_simple_lock_t *lp)
152 {
153 
154 	return _atomic_cas_uint((volatile unsigned *)lp,
155 	    __SIMPLELOCK_UNLOCKED, __SIMPLELOCK_LOCKED) ==
156 	    __SIMPLELOCK_UNLOCKED;
157 }
158 
159 #endif	/* _KERNEL */
160 
161 static __inline void
162 __cpu_simple_lock_init(__cpu_simple_lock_t *lp)
163 {
164 
165 	*lp = __SIMPLELOCK_UNLOCKED;
166 	mb_memory();
167 }
168 
169 static __inline void
170 __cpu_simple_lock(__cpu_simple_lock_t *lp)
171 {
172 
173 	while (!__cpu_simple_lock_try(lp))
174 		while (*lp == __SIMPLELOCK_LOCKED)
175 			/* spin */;
176 }
177 
178 static __inline void
179 __cpu_simple_unlock(__cpu_simple_lock_t *lp)
180 {
181 
182 	mb_memory();
183 	*lp = __SIMPLELOCK_UNLOCKED;
184 }
185 
186 #endif /* _MIPS_LOCK_H_ */
187