xref: /netbsd-src/sys/arch/hppa/include/lock.h (revision 7fa608457b817eca6e0977b37f758ae064f3c99c)
1 /* 	$NetBSD: lock.h,v 1.14 2007/10/17 19:54:40 garbled Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and Matthew Fredette.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Machine-dependent spin lock operations.
42  */
43 
44 #ifndef _HPPA_LOCK_H_
45 #define	_HPPA_LOCK_H_
46 
47 #include <sys/stdint.h>
48 
49 #define HPPA_LDCW_ALIGN	16
50 
51 #define __SIMPLELOCK_ALIGN(p) \
52     (volatile unsigned long *)(((uintptr_t)(p) + HPPA_LDCW_ALIGN - 1) & \
53     ~(HPPA_LDCW_ALIGN - 1))
54 
55 #define __SIMPLELOCK_RAW_LOCKED		0
56 #define __SIMPLELOCK_RAW_UNLOCKED	1
57 
58 static __inline int
59 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
60 {
61 	return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_LOCKED;
62 }
63 
64 static __inline int
65 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
66 {
67 	return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_UNLOCKED;
68 }
69 
70 static __inline int
71 __ldcw(volatile unsigned long *__ptr)
72 {
73 	int __val;
74 
75 	__asm volatile("ldcw 0(%1), %0"
76 	    : "=r" (__val) : "r" (__ptr)
77 	    : "memory");
78 
79 	return __val;
80 }
81 
82 static __inline void
83 __sync(void)
84 {
85 
86 	__asm volatile("sync\n"
87 		: /* no outputs */
88 		: /* no inputs */
89 		: "memory");
90 }
91 
92 static __inline void
93 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
94 {
95 	__cpu_simple_lock_t ul = __SIMPLELOCK_UNLOCKED;
96 
97 	*alp = ul;
98 	__sync();
99 }
100 
101 static __inline void
102 __cpu_simple_lock(__cpu_simple_lock_t *alp)
103 {
104 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
105 
106 	/*
107 	 * Note, if we detect that the lock is held when
108 	 * we do the initial load-clear-word, we spin using
109 	 * a non-locked load to save the coherency logic
110 	 * some work.
111 	 */
112 
113 	while (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
114 		while (*__aptr == __SIMPLELOCK_RAW_LOCKED)
115 			;
116 }
117 
118 static __inline int
119 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
120 {
121 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
122 
123 	return (__ldcw(__aptr) != __SIMPLELOCK_RAW_LOCKED);
124 }
125 
126 static __inline void
127 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
128 {
129 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
130 
131 	__sync();
132 	*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
133 }
134 
135 static __inline void
136 __cpu_simple_lock_set(__cpu_simple_lock_t *alp)
137 {
138 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
139 
140 	*__aptr = __SIMPLELOCK_RAW_LOCKED;
141 }
142 
143 static __inline void
144 __cpu_simple_lock_clear(__cpu_simple_lock_t *alp)
145 {
146 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
147 
148 	*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
149 }
150 
151 static __inline void
152 mb_read(void)
153 {
154 	__sync();
155 }
156 
157 static __inline void
158 mb_write(void)
159 {
160 	__sync();
161 }
162 
163 static __inline void
164 mb_memory(void)
165 {
166 	__sync();
167 }
168 
169 #endif /* _HPPA_LOCK_H_ */
170