xref: /netbsd-src/sys/arch/arm/include/lock.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: lock.h,v 1.15 2007/10/17 19:53:41 garbled Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Machine-dependent spin lock operations.
41  *
42  * NOTE: The SWP insn used here is available only on ARM architecture
43  * version 3 and later (as well as 2a).  What we are going to do is
44  * expect that the kernel will trap and emulate the insn.  That will
45  * be slow, but give us the atomicity that we need.
46  */
47 
48 #ifndef _ARM_LOCK_H_
49 #define	_ARM_LOCK_H_
50 
51 static __inline int
52 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
53 {
54 	return *__ptr == __SIMPLELOCK_LOCKED;
55 }
56 
57 static __inline int
58 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
59 {
60 	return *__ptr == __SIMPLELOCK_UNLOCKED;
61 }
62 
63 static __inline void
64 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
65 {
66 	*__ptr = __SIMPLELOCK_UNLOCKED;
67 }
68 
69 static __inline void
70 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
71 {
72 	*__ptr = __SIMPLELOCK_LOCKED;
73 }
74 
75 #ifdef _KERNEL
76 #include <arm/cpufunc.h>
77 
78 #define	mb_read		drain_writebuf		/* in cpufunc.h */
79 #define	mb_write	drain_writebuf		/* in cpufunc.h */
80 #define	mb_memory	drain_writebuf		/* in cpufunc.h */
81 #endif
82 
83 #if defined(_KERNEL)
84 static __inline int
85 __swp(int __val, volatile unsigned char *__ptr)
86 {
87 
88 	__asm volatile("swpb %0, %1, [%2]"
89 	    : "=r" (__val) : "r" (__val), "r" (__ptr) : "memory");
90 	return __val;
91 }
92 #else
93 static __inline int
94 __swp(int __val, volatile int *__ptr)
95 {
96 
97 	__asm volatile("swp %0, %1, [%2]"
98 	    : "=r" (__val) : "r" (__val), "r" (__ptr) : "memory");
99 	return __val;
100 }
101 #endif /* _KERNEL */
102 
103 static __inline void __attribute__((__unused__))
104 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
105 {
106 
107 	*alp = __SIMPLELOCK_UNLOCKED;
108 }
109 
110 static __inline void __attribute__((__unused__))
111 __cpu_simple_lock(__cpu_simple_lock_t *alp)
112 {
113 
114 	while (__swp(__SIMPLELOCK_LOCKED, alp) != __SIMPLELOCK_UNLOCKED)
115 		continue;
116 }
117 
118 static __inline int __attribute__((__unused__))
119 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
120 {
121 
122 	return (__swp(__SIMPLELOCK_LOCKED, alp) == __SIMPLELOCK_UNLOCKED);
123 }
124 
125 static __inline void __attribute__((__unused__))
126 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
127 {
128 
129 	*alp = __SIMPLELOCK_UNLOCKED;
130 }
131 
132 #endif /* _ARM_LOCK_H_ */
133