xref: /openbsd-src/sys/arch/m88k/m88k/mutex.c (revision 406e3054fc812f8b8a6e22f1c3dac8f6eb192cb0)
1 /*	$OpenBSD: mutex.c,v 1.1 2020/05/26 11:55:10 aoyama Exp $	*/
2 
3 /*
4  * Copyright (c) 2020 Miodrag Vallat
5  * Copyright (c) 2017 Visa Hankala
6  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
7  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sched.h>
25 #include <sys/atomic.h>
26 #include <sys/witness.h>
27 #include <sys/mutex.h>
28 
29 #include <ddb/db_output.h>
30 
31 #ifdef MP_LOCKDEBUG
32 extern int __mp_lock_spinout;	/* kern_lock.c */
33 #endif /* MP_LOCKDEBUG */
34 
35 static inline int
atomic_swap(volatile int * lockptr,int new)36 atomic_swap(volatile int *lockptr, int new)
37 {
38         int old = new;
39         asm volatile
40             ("xmem %0, %2, %%r0" : "+r"(old), "+m"(*lockptr) : "r"(lockptr));
41 	return old;
42 }
43 
44 void
__mtx_init(struct mutex * mtx,int wantipl)45 __mtx_init(struct mutex *mtx, int wantipl)
46 {
47 	mtx->mtx_lock = 0;
48 	mtx->mtx_owner = NULL;
49 	mtx->mtx_wantipl = wantipl;
50 	mtx->mtx_oldipl = IPL_NONE;
51 }
52 
53 #ifdef MULTIPROCESSOR
54 void
mtx_enter(struct mutex * mtx)55 mtx_enter(struct mutex *mtx)
56 {
57 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
58 #ifdef MP_LOCKDEBUG
59 	int nticks = __mp_lock_spinout;
60 #endif
61 
62 	WITNESS_CHECKORDER(MUTEX_LOCK_OBJECT(mtx),
63 	    LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
64 
65 	spc->spc_spinning++;
66 	while (mtx_enter_try(mtx) == 0) {
67 		CPU_BUSY_CYCLE();
68 
69 #ifdef MP_LOCKDEBUG
70 		if (--nticks == 0) {
71 			db_printf("%s: %p lock spun out\n", __func__, mtx);
72 			db_enter();
73 			nticks = __mp_lock_spinout;
74 		}
75 #endif
76 	}
77 	spc->spc_spinning--;
78 }
79 
80 int
mtx_enter_try(struct mutex * mtx)81 mtx_enter_try(struct mutex *mtx)
82 {
83 	struct cpu_info *ci = curcpu();
84 	int s;
85 
86 	/* Avoid deadlocks after panic or in DDB */
87 	if (panicstr || db_active)
88 		return (1);
89 
90 	if (mtx->mtx_wantipl != IPL_NONE)
91 		s = splraise(mtx->mtx_wantipl);
92 
93 	if (atomic_swap(&mtx->mtx_lock, 1) == 0) {
94 		mtx->mtx_owner = ci;
95 		membar_enter_after_atomic();
96 		if (mtx->mtx_wantipl != IPL_NONE)
97 			mtx->mtx_oldipl = s;
98 #ifdef DIAGNOSTIC
99 		ci->ci_mutex_level++;
100 #endif
101 		WITNESS_LOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
102 		return (1);
103 	}
104 
105 #ifdef DIAGNOSTIC
106 	if (__predict_false(mtx->mtx_owner == ci))
107 		panic("mtx %p: locking against myself", mtx);
108 #endif
109 	if (mtx->mtx_wantipl != IPL_NONE)
110 		splx(s);
111 
112 	return (0);
113 }
114 #else
115 void
mtx_enter(struct mutex * mtx)116 mtx_enter(struct mutex *mtx)
117 {
118 	struct cpu_info *ci = curcpu();
119 
120 	/* Avoid deadlocks after panic or in DDB */
121 	if (panicstr || db_active)
122 		return;
123 
124 	WITNESS_CHECKORDER(MUTEX_LOCK_OBJECT(mtx),
125 	    LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
126 
127 #ifdef DIAGNOSTIC
128 	if (__predict_false(mtx->mtx_owner == ci))
129 		panic("mtx %p: locking against myself", mtx);
130 #endif
131 
132 	if (mtx->mtx_wantipl != IPL_NONE)
133 		mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
134 
135 	mtx->mtx_owner = ci;
136 
137 #ifdef DIAGNOSTIC
138 	ci->ci_mutex_level++;
139 #endif
140 	WITNESS_LOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
141 }
142 
143 int
mtx_enter_try(struct mutex * mtx)144 mtx_enter_try(struct mutex *mtx)
145 {
146 	mtx_enter(mtx);
147 	return (1);
148 }
149 #endif
150 
151 void
mtx_leave(struct mutex * mtx)152 mtx_leave(struct mutex *mtx)
153 {
154 	int s;
155 
156 	/* Avoid deadlocks after panic or in DDB */
157 	if (panicstr || db_active)
158 		return;
159 
160 	MUTEX_ASSERT_LOCKED(mtx);
161 	WITNESS_UNLOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
162 
163 #ifdef DIAGNOSTIC
164 	curcpu()->ci_mutex_level--;
165 #endif
166 
167 	s = mtx->mtx_oldipl;
168 #ifdef MULTIPROCESSOR
169 	membar_exit();
170 #endif
171 	mtx->mtx_owner = NULL;
172 	(void)atomic_swap(&mtx->mtx_lock, 0);
173 	if (mtx->mtx_wantipl != IPL_NONE)
174 		splx(s);
175 }
176