1 /* $NetBSD: lock.h,v 1.24 2022/02/13 14:06:51 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and Matthew Fredette.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Machine-dependent spin lock operations.
35 */
36
37 #ifndef _HPPA_LOCK_H_
38 #define _HPPA_LOCK_H_
39
40 #include <sys/stdint.h>
41
42 #define HPPA_LDCW_ALIGN 16UL
43
44 #define __SIMPLELOCK_ALIGN(p) \
45 (volatile unsigned long *)((((uintptr_t)(p) + HPPA_LDCW_ALIGN - 1)) & \
46 ~(HPPA_LDCW_ALIGN - 1))
47
48 #define __SIMPLELOCK_RAW_LOCKED 0UL
49 #define __SIMPLELOCK_RAW_UNLOCKED 1UL
50
51 static __inline int
__SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t * __ptr)52 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
53 {
54 return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_LOCKED;
55 }
56
57 static __inline int
__SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t * __ptr)58 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
59 {
60 return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_UNLOCKED;
61 }
62
63 static __inline int
__ldcw(volatile unsigned long * __ptr)64 __ldcw(volatile unsigned long *__ptr)
65 {
66 int __val;
67
68 __asm volatile("ldcw 0(%1), %0"
69 : "=r" (__val) : "r" (__ptr)
70 : "memory");
71
72 return __val;
73 }
74
75 static __inline void
__sync(void)76 __sync(void)
77 {
78
79 __asm volatile("sync\n"
80 : /* no outputs */
81 : /* no inputs */
82 : "memory");
83 }
84
85 static __inline void
__cpu_simple_lock_init(__cpu_simple_lock_t * alp)86 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
87 {
88
89 alp->csl_lock[0] = alp->csl_lock[1] =
90 alp->csl_lock[2] = alp->csl_lock[3] =
91 __SIMPLELOCK_RAW_UNLOCKED;
92 }
93
94 static __inline int
__cpu_simple_lock_try(__cpu_simple_lock_t * alp)95 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
96 {
97 volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
98
99 if (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
100 return 0;
101
102 /*
103 * __cpu_simple_lock_try must be a load-acquire operation, but
104 * HPPA's LDCW does not appear to guarantee load-acquire
105 * semantics, so we have to do LDCW and then an explicit SYNC
106 * to make a load-acquire operation that pairs with a preceding
107 * store-release in __cpu_simple_unlock.
108 */
109 __sync();
110 return 1;
111 }
112
113 static __inline void
__cpu_simple_lock(__cpu_simple_lock_t * alp)114 __cpu_simple_lock(__cpu_simple_lock_t *alp)
115 {
116 volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
117
118 /*
119 * Note, if we detect that the lock is held when
120 * we do the initial load-clear-word, we spin using
121 * a non-locked load to save the coherency logic
122 * some work.
123 */
124
125 while (!__cpu_simple_lock_try(alp))
126 while (*__aptr == __SIMPLELOCK_RAW_LOCKED)
127 ;
128 }
129
130 static __inline void
__cpu_simple_unlock(__cpu_simple_lock_t * alp)131 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
132 {
133 volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
134
135 /*
136 * SYNC and then store makes a store-release that pairs with
137 * the load-acquire in a subsequent __cpu_simple_lock_try.
138 */
139 __sync();
140 *__aptr = __SIMPLELOCK_RAW_UNLOCKED;
141 }
142
143 static __inline void
__cpu_simple_lock_set(__cpu_simple_lock_t * alp)144 __cpu_simple_lock_set(__cpu_simple_lock_t *alp)
145 {
146 volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
147
148 *__aptr = __SIMPLELOCK_RAW_LOCKED;
149 }
150
151 static __inline void
__cpu_simple_lock_clear(__cpu_simple_lock_t * alp)152 __cpu_simple_lock_clear(__cpu_simple_lock_t *alp)
153 {
154 volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
155
156 *__aptr = __SIMPLELOCK_RAW_UNLOCKED;
157 }
158
159 #endif /* _HPPA_LOCK_H_ */
160