xref: /dflybsd-src/sys/dev/drm/include/linux/ww_mutex.h (revision 913ced85e870d64d4ae0bc8e4be5cee877161d3d)
1*913ced85SMichael Neumann /*
2*913ced85SMichael Neumann  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>
3*913ced85SMichael Neumann  * All rights reserved.
4*913ced85SMichael Neumann  *
5*913ced85SMichael Neumann  * Redistribution and use in source and binary forms, with or without
6*913ced85SMichael Neumann  * modification, are permitted provided that the following conditions
7*913ced85SMichael Neumann  * are met:
8*913ced85SMichael Neumann  * 1. Redistributions of source code must retain the above copyright
9*913ced85SMichael Neumann  *    notice unmodified, this list of conditions, and the following
10*913ced85SMichael Neumann  *    disclaimer.
11*913ced85SMichael Neumann  * 2. Redistributions in binary form must reproduce the above copyright
12*913ced85SMichael Neumann  *    notice, this list of conditions and the following disclaimer in the
13*913ced85SMichael Neumann  *    documentation and/or other materials provided with the distribution.
14*913ced85SMichael Neumann  *
15*913ced85SMichael Neumann  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16*913ced85SMichael Neumann  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17*913ced85SMichael Neumann  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18*913ced85SMichael Neumann  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19*913ced85SMichael Neumann  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20*913ced85SMichael Neumann  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21*913ced85SMichael Neumann  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22*913ced85SMichael Neumann  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23*913ced85SMichael Neumann  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24*913ced85SMichael Neumann  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25*913ced85SMichael Neumann  */
26*913ced85SMichael Neumann 
27*913ced85SMichael Neumann #ifndef _LINUX_WW_MUTEX_H_
28*913ced85SMichael Neumann #define _LINUX_WW_MUTEX_H_
29*913ced85SMichael Neumann 
30*913ced85SMichael Neumann /*
31*913ced85SMichael Neumann  * A basic, unoptimized implementation of wound/wait mutexes for DragonFly
32*913ced85SMichael Neumann  * modelled after the Linux API [1].
33*913ced85SMichael Neumann  *
34*913ced85SMichael Neumann  * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h
35*913ced85SMichael Neumann  */
36*913ced85SMichael Neumann 
37*913ced85SMichael Neumann #include <sys/errno.h>
38*913ced85SMichael Neumann #include <sys/types.h>
39*913ced85SMichael Neumann #include <machine/atomic.h>
40*913ced85SMichael Neumann #include <sys/spinlock.h>
41*913ced85SMichael Neumann #include <sys/spinlock2.h>
42*913ced85SMichael Neumann #include <stdbool.h>
43*913ced85SMichael Neumann 
44*913ced85SMichael Neumann struct ww_class {
45*913ced85SMichael Neumann 	volatile u_long			stamp;
46*913ced85SMichael Neumann 	const char			*name;
47*913ced85SMichael Neumann };
48*913ced85SMichael Neumann 
49*913ced85SMichael Neumann struct ww_acquire_ctx {
50*913ced85SMichael Neumann 	u_long				stamp;
51*913ced85SMichael Neumann 	struct ww_class			*ww_class;
52*913ced85SMichael Neumann };
53*913ced85SMichael Neumann 
54*913ced85SMichael Neumann struct ww_mutex {
55*913ced85SMichael Neumann 	struct spinlock			lock;
56*913ced85SMichael Neumann 	volatile int			acquired;
57*913ced85SMichael Neumann 	volatile struct ww_acquire_ctx	*ctx;
58*913ced85SMichael Neumann 	volatile struct thread		*owner;
59*913ced85SMichael Neumann };
60*913ced85SMichael Neumann 
61*913ced85SMichael Neumann #define DEFINE_WW_CLASS(classname)	\
62*913ced85SMichael Neumann 	struct ww_class classname {	\
63*913ced85SMichael Neumann 		.stamp = 0,		\
64*913ced85SMichael Neumann 		.name = #classname	\
65*913ced85SMichael Neumann 	}
66*913ced85SMichael Neumann 
67*913ced85SMichael Neumann static inline void
68*913ced85SMichael Neumann ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) {
69*913ced85SMichael Neumann 	ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
70*913ced85SMichael Neumann 	ctx->ww_class = ww_class;
71*913ced85SMichael Neumann }
72*913ced85SMichael Neumann 
73*913ced85SMichael Neumann static inline void
74*913ced85SMichael Neumann ww_acquire_done(__unused struct ww_acquire_ctx *ctx) {
75*913ced85SMichael Neumann }
76*913ced85SMichael Neumann 
77*913ced85SMichael Neumann static inline void
78*913ced85SMichael Neumann ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) {
79*913ced85SMichael Neumann }
80*913ced85SMichael Neumann 
81*913ced85SMichael Neumann static inline void
82*913ced85SMichael Neumann ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) {
83*913ced85SMichael Neumann 	spin_init(&lock->lock, ww_class->name);
84*913ced85SMichael Neumann 	lock->acquired = 0;
85*913ced85SMichael Neumann 	lock->ctx = NULL;
86*913ced85SMichael Neumann 	lock->owner = NULL;
87*913ced85SMichael Neumann }
88*913ced85SMichael Neumann 
89*913ced85SMichael Neumann static inline bool
90*913ced85SMichael Neumann ww_mutex_is_locked(struct ww_mutex *lock) {
91*913ced85SMichael Neumann 	bool res = false;
92*913ced85SMichael Neumann 	spin_lock(&lock->lock);
93*913ced85SMichael Neumann 	if (lock->acquired > 0) res = true;
94*913ced85SMichael Neumann 	spin_unlock(&lock->lock);
95*913ced85SMichael Neumann 	return res;
96*913ced85SMichael Neumann }
97*913ced85SMichael Neumann 
98*913ced85SMichael Neumann /*
99*913ced85SMichael Neumann  * Return 1 if lock could be acquired, else 0 (contended).
100*913ced85SMichael Neumann  */
101*913ced85SMichael Neumann static inline int
102*913ced85SMichael Neumann ww_mutex_trylock(struct ww_mutex *lock) {
103*913ced85SMichael Neumann 	int res = 1;
104*913ced85SMichael Neumann 	KKASSERT(curthread);
105*913ced85SMichael Neumann 
106*913ced85SMichael Neumann 	spin_lock(&lock->lock);
107*913ced85SMichael Neumann 	/*
108*913ced85SMichael Neumann 	 * In case no one holds the ww_mutex yet, we acquire it.
109*913ced85SMichael Neumann 	 */
110*913ced85SMichael Neumann 	if (lock->acquired == 0) {
111*913ced85SMichael Neumann 		KKASSERT(lock->ctx == NULL);
112*913ced85SMichael Neumann 		lock->acquired += 1;
113*913ced85SMichael Neumann 		lock->owner = curthread;
114*913ced85SMichael Neumann 	}
115*913ced85SMichael Neumann 	/*
116*913ced85SMichael Neumann 	 * In case we already hold the ww_mutex, increase a count.
117*913ced85SMichael Neumann 	 */
118*913ced85SMichael Neumann 	else if (lock->owner == curthread) {
119*913ced85SMichael Neumann 		lock->acquired += 1;
120*913ced85SMichael Neumann 	}
121*913ced85SMichael Neumann 	else {
122*913ced85SMichael Neumann 		res = 0;
123*913ced85SMichael Neumann 	}
124*913ced85SMichael Neumann 	spin_unlock(&lock->lock);
125*913ced85SMichael Neumann 	return res;
126*913ced85SMichael Neumann }
127*913ced85SMichael Neumann 
128*913ced85SMichael Neumann /*
129*913ced85SMichael Neumann  * When `slow` is `true`, it will always block if the ww_mutex is contended.
130*913ced85SMichael Neumann  * It is assumed that the called will not hold any (ww_mutex) resources when
131*913ced85SMichael Neumann  * calling the slow path as this could lead to deadlocks.
132*913ced85SMichael Neumann  *
133*913ced85SMichael Neumann  * When `intr` is `true`, the ssleep will be interruptable.
134*913ced85SMichael Neumann  */
135*913ced85SMichael Neumann static inline int
136*913ced85SMichael Neumann __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) {
137*913ced85SMichael Neumann 	int err;
138*913ced85SMichael Neumann 
139*913ced85SMichael Neumann 	KKASSERT(curthread);
140*913ced85SMichael Neumann 
141*913ced85SMichael Neumann 	spin_lock(&lock->lock);
142*913ced85SMichael Neumann 	for (;;) {
143*913ced85SMichael Neumann 		/*
144*913ced85SMichael Neumann 		 * In case no one holds the ww_mutex yet, we acquire it.
145*913ced85SMichael Neumann 		 */
146*913ced85SMichael Neumann 		if (lock->acquired == 0) {
147*913ced85SMichael Neumann 			KKASSERT(lock->ctx == NULL);
148*913ced85SMichael Neumann 			lock->acquired += 1;
149*913ced85SMichael Neumann 			lock->ctx = ctx;
150*913ced85SMichael Neumann 			lock->owner = curthread;
151*913ced85SMichael Neumann 			err = 0;
152*913ced85SMichael Neumann 			break;
153*913ced85SMichael Neumann 		}
154*913ced85SMichael Neumann 		/*
155*913ced85SMichael Neumann 		 * In case we already hold the ww_mutex, simply increase
156*913ced85SMichael Neumann 		 * a count and return -ALREADY.
157*913ced85SMichael Neumann 		 */
158*913ced85SMichael Neumann 		else if (lock->owner == curthread) {
159*913ced85SMichael Neumann 			KKASSERT(lock->ctx == ctx);
160*913ced85SMichael Neumann 			lock->acquired += 1;
161*913ced85SMichael Neumann 			err = -EALREADY;
162*913ced85SMichael Neumann 			break;
163*913ced85SMichael Neumann 		}
164*913ced85SMichael Neumann 		/*
165*913ced85SMichael Neumann 		 * This is the contention case where the ww_mutex is
166*913ced85SMichael Neumann 		 * already held by another context.
167*913ced85SMichael Neumann 		 */
168*913ced85SMichael Neumann 		else {
169*913ced85SMichael Neumann 			/*
170*913ced85SMichael Neumann 			 * Three cases:
171*913ced85SMichael Neumann 			 *
172*913ced85SMichael Neumann 			 * - We are in the slow-path (first lock to obtain).
173*913ced85SMichael Neumann                          *
174*913ced85SMichael Neumann 			 * - No context was specified. We assume a single
175*913ced85SMichael Neumann 			 *   resouce, so there is no danger of a deadlock.
176*913ced85SMichael Neumann                          *
177*913ced85SMichael Neumann 			 * - An `older` process (`ctx`) tries to acquire a
178*913ced85SMichael Neumann 			 *   lock already held by a `younger` process.
179*913ced85SMichael Neumann                          *   We put the `older` process to sleep until
180*913ced85SMichael Neumann                          *   the `younger` process gives up all it's
181*913ced85SMichael Neumann                          *   resources.
182*913ced85SMichael Neumann 			 */
183*913ced85SMichael Neumann 			if (slow || ctx == NULL || ctx->stamp < lock->ctx->stamp) {
184*913ced85SMichael Neumann 				int s = ssleep(lock, &lock->lock,
185*913ced85SMichael Neumann 					       intr ? PCATCH : 0,
186*913ced85SMichael Neumann 					       ctx ? ctx->ww_class->name : "ww_mutex_lock", 0);
187*913ced85SMichael Neumann 				if (intr && (s == EINTR || s == ERESTART)) {
188*913ced85SMichael Neumann 					// XXX: Should we handle ERESTART?
189*913ced85SMichael Neumann 					err = -EINTR;
190*913ced85SMichael Neumann 					break;
191*913ced85SMichael Neumann 				}
192*913ced85SMichael Neumann 			}
193*913ced85SMichael Neumann 			/*
194*913ced85SMichael Neumann 			 * If a `younger` process tries to acquire a lock
195*913ced85SMichael Neumann 			 * already held by an `older` process, we `wound` it,
196*913ced85SMichael Neumann 			 * i.e. we return -EDEADLK because there is a potential
197*913ced85SMichael Neumann 			 * risk for a deadlock. The `younger` process then
198*913ced85SMichael Neumann 			 * should give up all it's resources and try again to
199*913ced85SMichael Neumann 			 * acquire the lock in question, this time in a
200*913ced85SMichael Neumann 			 * blocking manner.
201*913ced85SMichael Neumann 			 */
202*913ced85SMichael Neumann 			else {
203*913ced85SMichael Neumann 				err = -EDEADLK;
204*913ced85SMichael Neumann 				break;
205*913ced85SMichael Neumann 			}
206*913ced85SMichael Neumann 		}
207*913ced85SMichael Neumann 
208*913ced85SMichael Neumann 	} /* for */
209*913ced85SMichael Neumann 	spin_unlock(&lock->lock);
210*913ced85SMichael Neumann 	return err;
211*913ced85SMichael Neumann }
212*913ced85SMichael Neumann 
213*913ced85SMichael Neumann static inline int
214*913ced85SMichael Neumann ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow) {
215*913ced85SMichael Neumann 	return __ww_mutex_lock(lock, ctx, false, false);
216*913ced85SMichael Neumann }
217*913ced85SMichael Neumann 
218*913ced85SMichael Neumann static inline void
219*913ced85SMichael Neumann ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
220*913ced85SMichael Neumann 	(void)__ww_mutex_lock(lock, ctx, true, false);
221*913ced85SMichael Neumann }
222*913ced85SMichael Neumann 
223*913ced85SMichael Neumann static inline int
224*913ced85SMichael Neumann ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow) {
225*913ced85SMichael Neumann 	return __ww_mutex_lock(lock, ctx, false, true);
226*913ced85SMichael Neumann }
227*913ced85SMichael Neumann 
228*913ced85SMichael Neumann static inline void
229*913ced85SMichael Neumann ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
230*913ced85SMichael Neumann 	(void)__ww_mutex_lock(lock, ctx, true, true);
231*913ced85SMichael Neumann }
232*913ced85SMichael Neumann 
233*913ced85SMichael Neumann static inline void
234*913ced85SMichael Neumann ww_mutex_unlock(struct ww_mutex *lock) {
235*913ced85SMichael Neumann 	spin_lock(&lock->lock);
236*913ced85SMichael Neumann 	KKASSERT(lock->owner == curthread);
237*913ced85SMichael Neumann 	KKASSERT(lock->acquired > 0);
238*913ced85SMichael Neumann 
239*913ced85SMichael Neumann 	--lock->acquired;
240*913ced85SMichael Neumann 	if (lock->acquired > 0) {
241*913ced85SMichael Neumann 		spin_unlock(&lock->lock);
242*913ced85SMichael Neumann 		return;
243*913ced85SMichael Neumann 	}
244*913ced85SMichael Neumann 
245*913ced85SMichael Neumann 	KKASSERT(lock->acquired == 0);
246*913ced85SMichael Neumann 	lock->ctx = NULL;
247*913ced85SMichael Neumann 	lock->owner = NULL;
248*913ced85SMichael Neumann 	spin_unlock(&lock->lock);
249*913ced85SMichael Neumann 	wakeup(lock);
250*913ced85SMichael Neumann }
251*913ced85SMichael Neumann 
252*913ced85SMichael Neumann static inline void
253*913ced85SMichael Neumann ww_mutex_destroy(struct ww_mutex *lock) {
254*913ced85SMichael Neumann 	KKASSERT(lock->acquired == 0);
255*913ced85SMichael Neumann 	KKASSERT(lock->ctx == NULL);
256*913ced85SMichael Neumann 	KKASSERT(lock->owner == NULL);
257*913ced85SMichael Neumann 	spin_uninit(&lock->lock);
258*913ced85SMichael Neumann }
259*913ced85SMichael Neumann 
260*913ced85SMichael Neumann #endif	/* _LINUX_WW_MUTEX_H_ */
261