xref: /netbsd-src/sys/arch/riscv/include/mutex.h (revision b426528770b1ff548bd444360bac04d668221a78)
1 /*	$NetBSD: mutex.h,v 1.7 2024/11/25 22:04:14 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _RISCV_MUTEX_H_
33 #define	_RISCV_MUTEX_H_
34 
35 #include <sys/types.h>
36 
37 #ifndef __MUTEX_PRIVATE
38 
39 struct kmutex {
40 	uintptr_t	mtx_pad1;
41 };
42 
43 #else	/* __MUTEX_PRIVATE */
44 
45 #include <sys/cdefs.h>
46 
47 #include <sys/param.h>
48 
49 #include <machine/intr.h>
50 
51 struct kmutex {
52 	volatile uintptr_t	mtx_owner;
53 };
54 
55 #ifdef _KERNEL
56 
57 #ifdef _LP64
58 #define MTX_ASMOP_SFX ".d"		// doubleword atomic op
59 #else
60 #define MTX_ASMOP_SFX ".w"		// word atomic op
61 #endif
62 
63 #define	MTX_LOCK			__BIT(8)	// just one bit
64 #define	MTX_IPL				__BITS(7,4)	// only need 4 bits
65 
66 #undef MUTEX_SPIN_IPL			// override <sys/mutex.h>
67 #define	MUTEX_SPIN_IPL(a)		riscv_mutex_spin_ipl(a)
68 #define	MUTEX_INITIALIZE_SPIN_IPL(a,b)	riscv_mutex_initialize_spin_ipl(a,b)
69 #define MUTEX_SPINBIT_LOCK_INIT(a)	riscv_mutex_spinbit_lock_init(a)
70 #define MUTEX_SPINBIT_LOCK_TRY(a)	riscv_mutex_spinbit_lock_try(a)
71 #define MUTEX_SPINBIT_LOCKED_P(a)	riscv_mutex_spinbit_locked_p(a)
72 #define MUTEX_SPINBIT_LOCK_UNLOCK(a)	riscv_mutex_spinbit_lock_unlock(a)
73 
74 static inline ipl_cookie_t
75 riscv_mutex_spin_ipl(kmutex_t *__mtx)
76 {
77 	return (ipl_cookie_t){._spl = __SHIFTOUT(__mtx->mtx_owner, MTX_IPL)};
78 }
79 
80 static inline void
81 riscv_mutex_initialize_spin_ipl(kmutex_t *__mtx, int ipl)
82 {
83 	__mtx->mtx_owner = (__mtx->mtx_owner & ~MTX_IPL)
84 	    | __SHIFTIN(ipl, MTX_IPL);
85 }
86 
87 static inline void
88 riscv_mutex_spinbit_lock_init(kmutex_t *__mtx)
89 {
90 	__mtx->mtx_owner &= ~MTX_LOCK;
91 }
92 
93 static inline bool
94 riscv_mutex_spinbit_locked_p(const kmutex_t *__mtx)
95 {
96 	return (__mtx->mtx_owner & MTX_LOCK) != 0;
97 }
98 
99 static inline bool
100 riscv_mutex_spinbit_lock_try(kmutex_t *__mtx)
101 {
102 	uintptr_t __old;
103 	__asm __volatile(
104 		"amoor" MTX_ASMOP_SFX ".aq\t%0, %1, (%2)"
105 	   :	"=r"(__old)
106 	   :	"r"(MTX_LOCK), "r"(__mtx));
107 	return (__old & MTX_LOCK) == 0;
108 }
109 
110 static inline void
111 riscv_mutex_spinbit_lock_unlock(kmutex_t *__mtx)
112 {
113 	__asm __volatile(
114 		"amoand" MTX_ASMOP_SFX ".rl\tx0, %0, (%1)"
115 	   ::	"r"(~MTX_LOCK), "r"(__mtx));
116 }
117 
118 #endif /* _KERNEL */
119 
120 #if 0
121 #define	__HAVE_MUTEX_STUBS		1
122 #define	__HAVE_SPIN_MUTEX_STUBS		1
123 #endif
124 #define	__HAVE_SIMPLE_MUTEXES		1
125 
126 #endif	/* __MUTEX_PRIVATE */
127 
128 #endif /* _RISCV_MUTEX_H_ */
129