xref: /netbsd-src/sys/arch/hppa/include/lock.h (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* 	$NetBSD: lock.h,v 1.20 2017/09/17 00:01:07 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and Matthew Fredette.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Machine-dependent spin lock operations.
35  */
36 
37 #ifndef _HPPA_LOCK_H_
38 #define	_HPPA_LOCK_H_
39 
40 #include <sys/stdint.h>
41 
42 #define HPPA_LDCW_ALIGN	16UL
43 
44 #define __SIMPLELOCK_ALIGN(p) \
45     (volatile unsigned long *)((((uintptr_t)(p) + HPPA_LDCW_ALIGN - 1)) & \
46     ~(HPPA_LDCW_ALIGN - 1))
47 
48 #define __SIMPLELOCK_RAW_LOCKED		0UL
49 #define __SIMPLELOCK_RAW_UNLOCKED	1UL
50 
51 static __inline int
52 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
53 {
54 	return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_LOCKED;
55 }
56 
57 static __inline int
58 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
59 {
60 	return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_UNLOCKED;
61 }
62 
63 static __inline int
64 __ldcw(volatile unsigned long *__ptr)
65 {
66 	int __val;
67 
68 	__asm volatile("ldcw 0(%1), %0"
69 	    : "=r" (__val) : "r" (__ptr)
70 	    : "memory");
71 
72 	return __val;
73 }
74 
75 static __inline void
76 __sync(void)
77 {
78 
79 	__asm volatile("sync\n"
80 		: /* no outputs */
81 		: /* no inputs */
82 		: "memory");
83 }
84 
85 static __inline void
86 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
87 {
88 	alp->csl_lock[0] = alp->csl_lock[1] =
89 	alp->csl_lock[2] = alp->csl_lock[3] =
90 	    __SIMPLELOCK_RAW_UNLOCKED;
91 	__sync();
92 }
93 
94 static __inline void
95 __cpu_simple_lock(__cpu_simple_lock_t *alp)
96 {
97 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
98 
99 	/*
100 	 * Note, if we detect that the lock is held when
101 	 * we do the initial load-clear-word, we spin using
102 	 * a non-locked load to save the coherency logic
103 	 * some work.
104 	 */
105 
106 	while (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
107 		while (*__aptr == __SIMPLELOCK_RAW_LOCKED)
108 			;
109 }
110 
111 static __inline int
112 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
113 {
114 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
115 
116 	return (__ldcw(__aptr) != __SIMPLELOCK_RAW_LOCKED);
117 }
118 
119 static __inline void
120 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
121 {
122 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
123 
124 	__sync();
125 	*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
126 }
127 
128 static __inline void
129 __cpu_simple_lock_set(__cpu_simple_lock_t *alp)
130 {
131 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
132 
133 	*__aptr = __SIMPLELOCK_RAW_LOCKED;
134 }
135 
136 static __inline void
137 __cpu_simple_lock_clear(__cpu_simple_lock_t *alp)
138 {
139 	volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
140 
141 	*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
142 }
143 
144 static __inline void
145 mb_read(void)
146 {
147 	__sync();
148 }
149 
150 static __inline void
151 mb_write(void)
152 {
153 	__sync();
154 }
155 
156 static __inline void
157 mb_memory(void)
158 {
159 	__sync();
160 }
161 
162 #endif /* _HPPA_LOCK_H_ */
163