xref: /dflybsd-src/sys/dev/drm/linux_wwmutex.c (revision 6cd4d95df71f3647b1a62e948326eade72a91d77)
1*6cd4d95dSMatthew Dillon /*-
2*6cd4d95dSMatthew Dillon  * Copyright (c) 2003-2011 The DragonFly Project.  All rights reserved.
3*6cd4d95dSMatthew Dillon  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>.  All rights reserved.
4*6cd4d95dSMatthew Dillon  *
5*6cd4d95dSMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
6*6cd4d95dSMatthew Dillon  * by Matthew Dillon <dillon@backplane.com> and
7*6cd4d95dSMatthew Dillon  *    Michael Neumann <mneumann@ntecs.de>
8*6cd4d95dSMatthew Dillon  *
9*6cd4d95dSMatthew Dillon  * Redistribution and use in source and binary forms, with or without
10*6cd4d95dSMatthew Dillon  * modification, are permitted provided that the following conditions
11*6cd4d95dSMatthew Dillon  * are met:
12*6cd4d95dSMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
13*6cd4d95dSMatthew Dillon  *    notice unmodified, this list of conditions, and the following
14*6cd4d95dSMatthew Dillon  *    disclaimer.
15*6cd4d95dSMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
16*6cd4d95dSMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
17*6cd4d95dSMatthew Dillon  *    documentation and/or other materials provided with the distribution.
18*6cd4d95dSMatthew Dillon  *
19*6cd4d95dSMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20*6cd4d95dSMatthew Dillon  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21*6cd4d95dSMatthew Dillon  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22*6cd4d95dSMatthew Dillon  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23*6cd4d95dSMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24*6cd4d95dSMatthew Dillon  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25*6cd4d95dSMatthew Dillon  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26*6cd4d95dSMatthew Dillon  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27*6cd4d95dSMatthew Dillon  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28*6cd4d95dSMatthew Dillon  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29*6cd4d95dSMatthew Dillon  */
30*6cd4d95dSMatthew Dillon 
31*6cd4d95dSMatthew Dillon #include <sys/types.h>
32*6cd4d95dSMatthew Dillon #include <sys/errno.h>
33*6cd4d95dSMatthew Dillon #include <sys/kernel.h>
34*6cd4d95dSMatthew Dillon #include <sys/spinlock.h>
35*6cd4d95dSMatthew Dillon #include <sys/spinlock2.h>
36*6cd4d95dSMatthew Dillon 
37*6cd4d95dSMatthew Dillon #include <machine/atomic.h>
38*6cd4d95dSMatthew Dillon 
39*6cd4d95dSMatthew Dillon #include <linux/ww_mutex.h>
40*6cd4d95dSMatthew Dillon 
41*6cd4d95dSMatthew Dillon void
42*6cd4d95dSMatthew Dillon ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class)
43*6cd4d95dSMatthew Dillon {
44*6cd4d95dSMatthew Dillon 	ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
45*6cd4d95dSMatthew Dillon 	ctx->acquired = 0;
46*6cd4d95dSMatthew Dillon 	ctx->ww_class = ww_class;
47*6cd4d95dSMatthew Dillon }
48*6cd4d95dSMatthew Dillon 
49*6cd4d95dSMatthew Dillon void
50*6cd4d95dSMatthew Dillon ww_acquire_done(struct ww_acquire_ctx *ctx __unused)
51*6cd4d95dSMatthew Dillon {
52*6cd4d95dSMatthew Dillon }
53*6cd4d95dSMatthew Dillon 
54*6cd4d95dSMatthew Dillon void
55*6cd4d95dSMatthew Dillon ww_acquire_fini(struct ww_acquire_ctx *ctx __unused)
56*6cd4d95dSMatthew Dillon {
57*6cd4d95dSMatthew Dillon }
58*6cd4d95dSMatthew Dillon 
59*6cd4d95dSMatthew Dillon void
60*6cd4d95dSMatthew Dillon ww_mutex_init(struct ww_mutex *ww, struct ww_class *ww_class)
61*6cd4d95dSMatthew Dillon {
62*6cd4d95dSMatthew Dillon 	lockinit(&ww->base, ww_class->name, 0, LK_CANRECURSE);
63*6cd4d95dSMatthew Dillon 	ww->ctx = NULL;
64*6cd4d95dSMatthew Dillon 	ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
65*6cd4d95dSMatthew Dillon 	ww->blocked = 0;
66*6cd4d95dSMatthew Dillon }
67*6cd4d95dSMatthew Dillon 
68*6cd4d95dSMatthew Dillon void
69*6cd4d95dSMatthew Dillon ww_mutex_destroy(struct ww_mutex *ww)
70*6cd4d95dSMatthew Dillon {
71*6cd4d95dSMatthew Dillon 	lockuninit(&ww->base);
72*6cd4d95dSMatthew Dillon }
73*6cd4d95dSMatthew Dillon 
74*6cd4d95dSMatthew Dillon /*
75*6cd4d95dSMatthew Dillon  * Optimized lock path.
76*6cd4d95dSMatthew Dillon  *
77*6cd4d95dSMatthew Dillon  * (slow) is optional as long as we block normally on the initial lock.
78*6cd4d95dSMatthew Dillon  * Currently not implemented.
79*6cd4d95dSMatthew Dillon  */
80*6cd4d95dSMatthew Dillon static __inline
81*6cd4d95dSMatthew Dillon int
82*6cd4d95dSMatthew Dillon __wwlock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx,
83*6cd4d95dSMatthew Dillon 	 bool slow __unused, bool intr)
84*6cd4d95dSMatthew Dillon {
85*6cd4d95dSMatthew Dillon 	int flags = LK_EXCLUSIVE;
86*6cd4d95dSMatthew Dillon 	int error;
87*6cd4d95dSMatthew Dillon 
88*6cd4d95dSMatthew Dillon 	if (intr)
89*6cd4d95dSMatthew Dillon 		flags |= LK_PCATCH;
90*6cd4d95dSMatthew Dillon 
91*6cd4d95dSMatthew Dillon 	/*
92*6cd4d95dSMatthew Dillon 	 * Normal mutex if ctx is NULL
93*6cd4d95dSMatthew Dillon 	 */
94*6cd4d95dSMatthew Dillon 	if (ctx == NULL) {
95*6cd4d95dSMatthew Dillon 		error = lockmgr(&ww->base, flags);
96*6cd4d95dSMatthew Dillon 		if (error)
97*6cd4d95dSMatthew Dillon 			error = -EINTR;
98*6cd4d95dSMatthew Dillon 		return error;
99*6cd4d95dSMatthew Dillon 	}
100*6cd4d95dSMatthew Dillon 
101*6cd4d95dSMatthew Dillon 	/*
102*6cd4d95dSMatthew Dillon 	 * A normal blocking lock can be used when ctx->acquired is 0 (no
103*6cd4d95dSMatthew Dillon 	 * prior locks are held).  If prior locks are held then we cannot
104*6cd4d95dSMatthew Dillon 	 * block here.
105*6cd4d95dSMatthew Dillon 	 *
106*6cd4d95dSMatthew Dillon 	 * In the non-blocking case setup our tsleep interlock prior to
107*6cd4d95dSMatthew Dillon 	 * attempting to acquire the lock.
108*6cd4d95dSMatthew Dillon 	 */
109*6cd4d95dSMatthew Dillon 	for (;;) {
110*6cd4d95dSMatthew Dillon 		if (ctx->acquired != 0) {
111*6cd4d95dSMatthew Dillon 			flags |= LK_NOWAIT;
112*6cd4d95dSMatthew Dillon 			tsleep_interlock(ww, (intr ? PCATCH : 0));
113*6cd4d95dSMatthew Dillon 		}
114*6cd4d95dSMatthew Dillon 		error = lockmgr(&ww->base, flags);
115*6cd4d95dSMatthew Dillon 		if (error == 0) {
116*6cd4d95dSMatthew Dillon 			ww->ctx = ctx;
117*6cd4d95dSMatthew Dillon 			ww->stamp = ctx->stamp;
118*6cd4d95dSMatthew Dillon 			++ctx->acquired;
119*6cd4d95dSMatthew Dillon 			return 0;
120*6cd4d95dSMatthew Dillon 		}
121*6cd4d95dSMatthew Dillon 
122*6cd4d95dSMatthew Dillon 		/*
123*6cd4d95dSMatthew Dillon 		 * EINTR or ERESTART returns -EINTR.  ENOLCK and EWOULDBLOCK
124*6cd4d95dSMatthew Dillon 		 * cannot happen (LK_SLEEPFAIL not set, timeout is not set).
125*6cd4d95dSMatthew Dillon 		 */
126*6cd4d95dSMatthew Dillon 		if (error != EBUSY)
127*6cd4d95dSMatthew Dillon 			return -EINTR;
128*6cd4d95dSMatthew Dillon 
129*6cd4d95dSMatthew Dillon 		/*
130*6cd4d95dSMatthew Dillon 		 * acquired can only be non-zero in this path.
131*6cd4d95dSMatthew Dillon 		 * NOTE: ww->ctx is not MPSAFE.
132*6cd4d95dSMatthew Dillon 		 * NOTE: ww->stamp is heuristical, a race is possible.
133*6cd4d95dSMatthew Dillon 		 */
134*6cd4d95dSMatthew Dillon 		KKASSERT(ctx->acquired > 0);
135*6cd4d95dSMatthew Dillon 
136*6cd4d95dSMatthew Dillon 		/*
137*6cd4d95dSMatthew Dillon 		 * Unwind if we aren't the oldest.
138*6cd4d95dSMatthew Dillon 		 */
139*6cd4d95dSMatthew Dillon 		if (ctx->stamp > ww->stamp)
140*6cd4d95dSMatthew Dillon 			return -EDEADLK;
141*6cd4d95dSMatthew Dillon 
142*6cd4d95dSMatthew Dillon 		/*
143*6cd4d95dSMatthew Dillon 		 * We have priority over the currently held lock.  Tell
144*6cd4d95dSMatthew Dillon 		 * the remote lock holder that we want them to unwind.
145*6cd4d95dSMatthew Dillon 		 *
146*6cd4d95dSMatthew Dillon 		 * error is zero if woken up
147*6cd4d95dSMatthew Dillon 		 *	    EINTR / ERESTART - signal
148*6cd4d95dSMatthew Dillon 		 *	    EWOULDBLOCK	     - timeout expired (if not 0)
149*6cd4d95dSMatthew Dillon 		 */
150*6cd4d95dSMatthew Dillon 		atomic_swap_int(&ww->blocked, 1);
151*6cd4d95dSMatthew Dillon 		error = tsleep(ww, PINTERLOCKED | (intr ? PCATCH : 0),
152*6cd4d95dSMatthew Dillon 			       ctx->ww_class->name, 0);
153*6cd4d95dSMatthew Dillon 		if (intr && (error == EINTR || error == ERESTART))
154*6cd4d95dSMatthew Dillon 			return -EINTR;
155*6cd4d95dSMatthew Dillon 		/* retry */
156*6cd4d95dSMatthew Dillon 	}
157*6cd4d95dSMatthew Dillon }
158*6cd4d95dSMatthew Dillon 
159*6cd4d95dSMatthew Dillon int
160*6cd4d95dSMatthew Dillon ww_mutex_lock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
161*6cd4d95dSMatthew Dillon {
162*6cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 0, 0);
163*6cd4d95dSMatthew Dillon }
164*6cd4d95dSMatthew Dillon 
165*6cd4d95dSMatthew Dillon int
166*6cd4d95dSMatthew Dillon ww_mutex_lock_slow(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
167*6cd4d95dSMatthew Dillon {
168*6cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 1, 0);
169*6cd4d95dSMatthew Dillon }
170*6cd4d95dSMatthew Dillon 
171*6cd4d95dSMatthew Dillon int
172*6cd4d95dSMatthew Dillon ww_mutex_lock_interruptible(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
173*6cd4d95dSMatthew Dillon {
174*6cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 0, 1);
175*6cd4d95dSMatthew Dillon }
176*6cd4d95dSMatthew Dillon 
177*6cd4d95dSMatthew Dillon int
178*6cd4d95dSMatthew Dillon ww_mutex_lock_slow_interruptible(struct ww_mutex *ww,
179*6cd4d95dSMatthew Dillon 				 struct ww_acquire_ctx *ctx)
180*6cd4d95dSMatthew Dillon {
181*6cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 1, 1);
182*6cd4d95dSMatthew Dillon }
183*6cd4d95dSMatthew Dillon 
184*6cd4d95dSMatthew Dillon void
185*6cd4d95dSMatthew Dillon ww_mutex_unlock(struct ww_mutex *ww)
186*6cd4d95dSMatthew Dillon {
187*6cd4d95dSMatthew Dillon 	struct ww_acquire_ctx *ctx;
188*6cd4d95dSMatthew Dillon 
189*6cd4d95dSMatthew Dillon 	ctx = ww->ctx;
190*6cd4d95dSMatthew Dillon 	if (ctx) {
191*6cd4d95dSMatthew Dillon 		KKASSERT(ctx->acquired > 0);
192*6cd4d95dSMatthew Dillon 		--ctx->acquired;
193*6cd4d95dSMatthew Dillon 		ww->ctx = NULL;
194*6cd4d95dSMatthew Dillon 		ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
195*6cd4d95dSMatthew Dillon 	}
196*6cd4d95dSMatthew Dillon 	lockmgr(&ww->base, LK_RELEASE);
197*6cd4d95dSMatthew Dillon 	if (atomic_swap_int(&ww->blocked, 0))
198*6cd4d95dSMatthew Dillon 		wakeup(ww);
199*6cd4d95dSMatthew Dillon }
200