xref: /netbsd-src/sys/arch/powerpc/include/lock.h (revision 2b25048cdd284ab89064935296b4895e91e8a510)
1 /*	$NetBSD: lock.h,v 1.17 2022/02/12 17:17:53 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Machine-dependent spin lock operations.
34  */
35 
36 #ifndef _POWERPC_LOCK_H_
37 #define _POWERPC_LOCK_H_
38 
39 static __inline int
__SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t * __ptr)40 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
41 {
42 	return *__ptr == __SIMPLELOCK_LOCKED;
43 }
44 
45 static __inline int
__SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t * __ptr)46 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
47 {
48 	return *__ptr == __SIMPLELOCK_UNLOCKED;
49 }
50 
51 static __inline void
__cpu_simple_lock_clear(__cpu_simple_lock_t * __ptr)52 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
53 {
54 	*__ptr = __SIMPLELOCK_UNLOCKED;
55 }
56 
57 static __inline void
__cpu_simple_lock_set(__cpu_simple_lock_t * __ptr)58 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
59 {
60 	*__ptr = __SIMPLELOCK_LOCKED;
61 }
62 
63 static __inline void
__cpu_simple_lock_init(__cpu_simple_lock_t * alp)64 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
65 {
66 	*alp = __SIMPLELOCK_UNLOCKED;
67 }
68 
69 static __inline void
__cpu_simple_lock(__cpu_simple_lock_t * alp)70 __cpu_simple_lock(__cpu_simple_lock_t *alp)
71 {
72 	int old;
73 
74 	__asm volatile ("	\
75 				\n\
76 1:	lwarx	%0,0,%1		\n\
77 	cmpwi	%0,%2		\n\
78 	beq+	3f		\n\
79 2:	lwzx	%0,0,%1		\n\
80 	cmpwi	%0,%2		\n\
81 	beq+	1b		\n\
82 	b	2b		\n\
83 3:				\n"
84 #ifdef IBM405_ERRATA77
85 	"dcbt	0,%1		\n"
86 #endif
87 	"stwcx.	%3,0,%1		\n\
88 	bne-	1b		\n\
89 	isync			\n\
90 				\n"
91 	: "=&r"(old)
92 	: "r"(alp), "I"(__SIMPLELOCK_UNLOCKED), "r"(__SIMPLELOCK_LOCKED)
93 	: "memory");
94 }
95 
96 static __inline int
__cpu_simple_lock_try(__cpu_simple_lock_t * alp)97 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
98 {
99 	int old, dummy;
100 
101 	__asm volatile ("	\
102 				\n\
103 1:	lwarx	%0,0,%1		\n\
104 	cmpwi	%0,%2		\n\
105 	bne	2f		\n"
106 #ifdef IBM405_ERRATA77
107 	"dcbt	0,%1		\n"
108 #endif
109 	"stwcx.	%3,0,%1		\n\
110 	bne-	1b		\n\
111 2:				\n"
112 #ifdef IBM405_ERRATA77
113 	"dcbt	0,%4		\n"
114 #endif
115 	"stwcx.	%3,0,%4		\n\
116 	isync			\n\
117 				\n"
118 	: "=&r"(old)
119 	: "r"(alp), "I"(__SIMPLELOCK_UNLOCKED), "r"(__SIMPLELOCK_LOCKED),
120 	  "r"(&dummy)
121 	: "memory");
122 
123 	return (old == __SIMPLELOCK_UNLOCKED);
124 }
125 
126 static __inline void
__cpu_simple_unlock(__cpu_simple_lock_t * alp)127 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
128 {
129 	__asm volatile ("sync");
130 	*alp = __SIMPLELOCK_UNLOCKED;
131 }
132 
133 #endif /* _POWERPC_LOCK_H_ */
134