xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_ww_mutex.c (revision 1bc3b8615e54d78027fcd3dbacd87e6de91a7a06)
1 /*	$NetBSD: linux_ww_mutex.c,v 1.16 2023/07/29 23:50:03 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.16 2023/07/29 23:50:03 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/condvar.h>
38 #include <sys/lockdebug.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/rbtree.h>
42 
43 #include <linux/ww_mutex.h>
44 #include <linux/errno.h>
45 
46 #define	WW_WANTLOCK(WW)							      \
47 	LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW),			      \
48 	    (uintptr_t)__builtin_return_address(0), 0)
49 #define	WW_LOCKED(WW)							      \
50 	LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL,			      \
51 	    (uintptr_t)__builtin_return_address(0), 0)
52 #define	WW_UNLOCKED(WW)							      \
53 	LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW),			      \
54 	    (uintptr_t)__builtin_return_address(0), 0)
55 
56 static int
ww_acquire_ctx_compare(void * cookie __unused,const void * va,const void * vb)57 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
58 {
59 	const struct ww_acquire_ctx *const ctx_a = va;
60 	const struct ww_acquire_ctx *const ctx_b = vb;
61 
62 	if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
63 		return -1;
64 	if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
65 		return +1;
66 	return 0;
67 }
68 
69 static int
ww_acquire_ctx_compare_key(void * cookie __unused,const void * vn,const void * vk)70 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
71     const void *vk)
72 {
73 	const struct ww_acquire_ctx *const ctx = vn;
74 	const uint64_t *const ticketp = vk, ticket = *ticketp;
75 
76 	if (ctx->wwx_ticket < ticket)
77 		return -1;
78 	if (ctx->wwx_ticket > ticket)
79 		return +1;
80 	return 0;
81 }
82 
83 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
84 	.rbto_compare_nodes = &ww_acquire_ctx_compare,
85 	.rbto_compare_key = &ww_acquire_ctx_compare_key,
86 	.rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
87 	.rbto_context = NULL,
88 };
89 
90 void
ww_acquire_init(struct ww_acquire_ctx * ctx,struct ww_class * class)91 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
92 {
93 
94 	ctx->wwx_class = class;
95 	ctx->wwx_owner = curlwp;
96 	ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket);
97 	ctx->wwx_acquired = 0;
98 	ctx->wwx_acquire_done = false;
99 }
100 
101 void
ww_acquire_done(struct ww_acquire_ctx * ctx)102 ww_acquire_done(struct ww_acquire_ctx *ctx)
103 {
104 
105 	KASSERTMSG((ctx->wwx_owner == curlwp),
106 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
107 
108 	ctx->wwx_acquire_done = true;
109 }
110 
111 static void
ww_acquire_done_check(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)112 ww_acquire_done_check(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
113 {
114 
115 	/*
116 	 * If caller has invoked ww_acquire_done, we must already hold
117 	 * this mutex.
118 	 */
119 	KASSERT(mutex_owned(&mutex->wwm_lock));
120 	KASSERTMSG((!ctx->wwx_acquire_done ||
121 		(mutex->wwm_state == WW_CTX && mutex->wwm_u.ctx == ctx)),
122 	    "ctx %p done acquiring locks, refusing to acquire %p",
123 	    ctx, mutex);
124 }
125 
126 void
ww_acquire_fini(struct ww_acquire_ctx * ctx)127 ww_acquire_fini(struct ww_acquire_ctx *ctx)
128 {
129 
130 	KASSERTMSG((ctx->wwx_owner == curlwp),
131 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
132 	KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
133 	    ctx, ctx->wwx_acquired);
134 
135 	ctx->wwx_acquired = ~0U;	/* Fail if called again. */
136 	ctx->wwx_owner = NULL;
137 }
138 
139 #ifdef LOCKDEBUG
140 static void
ww_dump(const volatile void * cookie,lockop_printer_t pr)141 ww_dump(const volatile void *cookie, lockop_printer_t pr)
142 {
143 	const volatile struct ww_mutex *mutex = cookie;
144 
145 	pr("%-13s: ", "state");
146 	switch (mutex->wwm_state) {
147 	case WW_UNLOCKED:
148 		pr("unlocked\n");
149 		break;
150 	case WW_OWNED:
151 		pr("owned by lwp\n");
152 		pr("%-13s: %p\n", "owner", mutex->wwm_u.owner);
153 		pr("%-13s: %s\n", "waiters",
154 		    cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
155 			? "yes" : "no");
156 		break;
157 	case WW_CTX:
158 		pr("owned via ctx\n");
159 		pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
160 		pr("%-13s: %p\n", "lwp",
161 		    mutex->wwm_u.ctx->wwx_owner);
162 		pr("%-13s: %s\n", "waiters",
163 		    cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
164 			? "yes" : "no");
165 		break;
166 	case WW_WANTOWN:
167 		pr("owned via ctx\n");
168 		pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
169 		pr("%-13s: %p\n", "lwp",
170 		    mutex->wwm_u.ctx->wwx_owner);
171 		pr("%-13s: %s\n", "waiters", "yes (noctx)");
172 		break;
173 	default:
174 		pr("unknown\n");
175 		break;
176 	}
177 }
178 
179 static lockops_t ww_lockops = {
180 	.lo_name = "Wait/wound mutex",
181 	.lo_type = LOCKOPS_SLEEP,
182 	.lo_dump = ww_dump,
183 };
184 #endif
185 
186 /*
187  * ww_mutex_init(mutex, class)
188  *
189  *	Initialize mutex in the given class.  Must precede any other
190  *	ww_mutex_* operations.  After done, mutex must be destroyed
191  *	with ww_mutex_destroy.
192  */
193 void
ww_mutex_init(struct ww_mutex * mutex,struct ww_class * class)194 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
195 {
196 
197 	/*
198 	 * XXX Apparently Linux takes these with spin locks held.  That
199 	 * strikes me as a bad idea, but so it is...
200 	 */
201 	mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
202 	mutex->wwm_state = WW_UNLOCKED;
203 	mutex->wwm_class = class;
204 	rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
205 	cv_init(&mutex->wwm_cv, "linuxwwm");
206 #ifdef LOCKDEBUG
207 	mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
208 	    (uintptr_t)__builtin_return_address(0));
209 #endif
210 }
211 
212 /*
213  * ww_mutex_destroy(mutex)
214  *
215  *	Destroy mutex initialized by ww_mutex_init.  Caller must not be
216  *	with any other ww_mutex_* operations except after
217  *	reinitializing with ww_mutex_init.
218  */
219 void
ww_mutex_destroy(struct ww_mutex * mutex)220 ww_mutex_destroy(struct ww_mutex *mutex)
221 {
222 
223 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
224 
225 #ifdef LOCKDEBUG
226 	LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
227 #endif
228 	cv_destroy(&mutex->wwm_cv);
229 #if 0
230 	rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
231 #endif
232 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
233 	mutex_destroy(&mutex->wwm_lock);
234 }
235 
236 /*
237  * ww_mutex_is_locked(mutex)
238  *
239  *	True if anyone holds mutex locked at the moment, false if not.
240  *	Answer is stale as soon returned unless mutex is held by
241  *	caller.
242  *
243  *	XXX WARNING: This returns true if it is locked by ANYONE.  Does
244  *	not mean `Do I hold this lock?' (answering which really
245  *	requires an acquire context).
246  */
247 bool
ww_mutex_is_locked(struct ww_mutex * mutex)248 ww_mutex_is_locked(struct ww_mutex *mutex)
249 {
250 	int locked;
251 
252 	mutex_enter(&mutex->wwm_lock);
253 	switch (mutex->wwm_state) {
254 	case WW_UNLOCKED:
255 		locked = false;
256 		break;
257 	case WW_OWNED:
258 	case WW_CTX:
259 	case WW_WANTOWN:
260 		locked = true;
261 		break;
262 	default:
263 		panic("wait/wound mutex %p in bad state: %d", mutex,
264 		    (int)mutex->wwm_state);
265 	}
266 	mutex_exit(&mutex->wwm_lock);
267 
268 	return locked;
269 }
270 
271 /*
272  * ww_mutex_state_wait(mutex, state)
273  *
274  *	Wait for mutex, which must be in the given state, to transition
275  *	to another state.  Uninterruptible; never fails.
276  *
277  *	Caller must hold mutex's internal lock.
278  *
279  *	May sleep.
280  *
281  *	Internal subroutine.
282  */
283 static void
ww_mutex_state_wait(struct ww_mutex * mutex,enum ww_mutex_state state)284 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
285 {
286 
287 	KASSERT(mutex_owned(&mutex->wwm_lock));
288 	KASSERT(mutex->wwm_state == state);
289 
290 	for (;;) {
291 		cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
292 		if (mutex->wwm_state != state)
293 			break;
294 	}
295 
296 	KASSERT(mutex->wwm_state != state);
297 }
298 
299 /*
300  * ww_mutex_state_wait_sig(mutex, state)
301  *
302  *	Wait for mutex, which must be in the given state, to transition
303  *	to another state, or fail if interrupted by a signal.  Return 0
304  *	on success, -EINTR if interrupted by a signal.
305  *
306  *	Caller must hold mutex's internal lock.
307  *
308  *	May sleep.
309  *
310  *	Internal subroutine.
311  */
312 static int
ww_mutex_state_wait_sig(struct ww_mutex * mutex,enum ww_mutex_state state)313 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
314 {
315 	int ret;
316 
317 	KASSERT(mutex_owned(&mutex->wwm_lock));
318 	KASSERT(mutex->wwm_state == state);
319 
320 	for (;;) {
321 		/* XXX errno NetBSD->Linux */
322 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
323 		if (mutex->wwm_state != state) {
324 			ret = 0;
325 			break;
326 		}
327 		if (ret) {
328 			KASSERTMSG((ret == -EINTR || ret == -ERESTART),
329 			    "ret=%d", ret);
330 			ret = -EINTR;
331 			break;
332 		}
333 	}
334 
335 	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
336 	KASSERTMSG(ret != 0 || mutex->wwm_state != state,
337 	    "ret=%d mutex=%p mutex->wwm_state=%d state=%d",
338 	    ret, mutex, mutex->wwm_state, state);
339 	return ret;
340 }
341 
342 /*
343  * ww_mutex_lock_wait(mutex, ctx)
344  *
345  *	With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
346  *	by another thread with an acquire context, wait to acquire
347  *	mutex.  While waiting, record ctx in the tree of waiters.  Does
348  *	not update the mutex state otherwise.
349  *
350  *	Caller must not already hold mutex.  Caller must hold mutex's
351  *	internal lock.  Uninterruptible; never fails.
352  *
353  *	May sleep.
354  *
355  *	Internal subroutine.
356  */
357 static void
ww_mutex_lock_wait(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)358 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
359 {
360 	struct ww_acquire_ctx *collision __diagused;
361 
362 	KASSERT(mutex_owned(&mutex->wwm_lock));
363 
364 	KASSERT((mutex->wwm_state == WW_CTX) ||
365 	    (mutex->wwm_state == WW_WANTOWN));
366 	KASSERT(mutex->wwm_u.ctx != ctx);
367 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
368 	    "ww mutex class mismatch: %p != %p",
369 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
370 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
371 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
372 	    ctx->wwx_ticket, ctx,
373 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
374 
375 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
376 	KASSERTMSG((collision == ctx),
377 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
378 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
379 
380 	for (;;) {
381 		cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
382 		if ((mutex->wwm_state == WW_CTX ||
383 			mutex->wwm_state == WW_WANTOWN) &&
384 		    mutex->wwm_u.ctx == ctx)
385 			break;
386 	}
387 
388 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
389 
390 	KASSERT(mutex->wwm_state == WW_CTX || mutex->wwm_state == WW_WANTOWN);
391 	KASSERT(mutex->wwm_u.ctx == ctx);
392 }
393 
394 /*
395  * ww_mutex_lock_wait_sig(mutex, ctx)
396  *
397  *	With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
398  *	by another thread with an acquire context, wait to acquire
399  *	mutex and return 0, or return -EINTR if interrupted by a
400  *	signal.  While waiting, record ctx in the tree of waiters.
401  *	Does not update the mutex state otherwise.
402  *
403  *	Caller must not already hold mutex.  Caller must hold mutex's
404  *	internal lock.
405  *
406  *	May sleep.
407  *
408  *	Internal subroutine.
409  */
410 static int
ww_mutex_lock_wait_sig(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)411 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
412 {
413 	struct ww_acquire_ctx *collision __diagused;
414 	int ret;
415 
416 	KASSERT(mutex_owned(&mutex->wwm_lock));
417 
418 	KASSERT((mutex->wwm_state == WW_CTX) ||
419 	    (mutex->wwm_state == WW_WANTOWN));
420 	KASSERT(mutex->wwm_u.ctx != ctx);
421 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
422 	    "ww mutex class mismatch: %p != %p",
423 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
424 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
425 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
426 	    ctx->wwx_ticket, ctx,
427 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
428 
429 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
430 	KASSERTMSG((collision == ctx),
431 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
432 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
433 
434 	for (;;) {
435 		/* XXX errno NetBSD->Linux */
436 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
437 		if ((mutex->wwm_state == WW_CTX ||
438 			mutex->wwm_state == WW_WANTOWN) &&
439 		    mutex->wwm_u.ctx == ctx) {
440 			ret = 0;
441 			break;
442 		}
443 		if (ret) {
444 			KASSERTMSG((ret == -EINTR || ret == -ERESTART),
445 			    "ret=%d", ret);
446 			ret = -EINTR;
447 			break;
448 		}
449 	}
450 
451 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
452 
453 	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
454 	KASSERT(ret != 0 ||
455 	    mutex->wwm_state == WW_CTX || mutex->wwm_state == WW_WANTOWN);
456 	KASSERT(ret != 0 || mutex->wwm_u.ctx == ctx);
457 	return ret;
458 }
459 
460 /*
461  * ww_mutex_lock_noctx(mutex)
462  *
463  *	Acquire mutex without an acquire context.  Caller must not
464  *	already hold the mutex.  Uninterruptible; never fails.
465  *
466  *	May sleep.
467  *
468  *	Internal subroutine, implementing ww_mutex_lock(..., NULL).
469  */
470 static void
ww_mutex_lock_noctx(struct ww_mutex * mutex)471 ww_mutex_lock_noctx(struct ww_mutex *mutex)
472 {
473 
474 	mutex_enter(&mutex->wwm_lock);
475 retry:	switch (mutex->wwm_state) {
476 	case WW_UNLOCKED:
477 		mutex->wwm_state = WW_OWNED;
478 		mutex->wwm_u.owner = curlwp;
479 		break;
480 	case WW_OWNED:
481 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
482 		    "locking %p against myself: %p", mutex, curlwp);
483 		ww_mutex_state_wait(mutex, WW_OWNED);
484 		goto retry;
485 	case WW_CTX:
486 		KASSERT(mutex->wwm_u.ctx != NULL);
487 		mutex->wwm_state = WW_WANTOWN;
488 		/* FALLTHROUGH */
489 	case WW_WANTOWN:
490 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
491 		    "locking %p against myself: %p", mutex, curlwp);
492 		ww_mutex_state_wait(mutex, WW_WANTOWN);
493 		goto retry;
494 	default:
495 		panic("wait/wound mutex %p in bad state: %d",
496 		    mutex, (int)mutex->wwm_state);
497 	}
498 	KASSERT(mutex->wwm_state == WW_OWNED);
499 	KASSERT(mutex->wwm_u.owner == curlwp);
500 	WW_LOCKED(mutex);
501 	mutex_exit(&mutex->wwm_lock);
502 }
503 
504 /*
505  * ww_mutex_lock_noctx_sig(mutex)
506  *
507  *	Acquire mutex without an acquire context and return 0, or fail
508  *	and return -EINTR if interrupted by a signal.  Caller must not
509  *	already hold the mutex.
510  *
511  *	May sleep.
512  *
513  *	Internal subroutine, implementing
514  *	ww_mutex_lock_interruptible(..., NULL).
515  */
516 static int
ww_mutex_lock_noctx_sig(struct ww_mutex * mutex)517 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
518 {
519 	int ret;
520 
521 	mutex_enter(&mutex->wwm_lock);
522 retry:	switch (mutex->wwm_state) {
523 	case WW_UNLOCKED:
524 		mutex->wwm_state = WW_OWNED;
525 		mutex->wwm_u.owner = curlwp;
526 		break;
527 	case WW_OWNED:
528 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
529 		    "locking %p against myself: %p", mutex, curlwp);
530 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
531 		if (ret) {
532 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
533 			goto out;
534 		}
535 		goto retry;
536 	case WW_CTX:
537 		KASSERT(mutex->wwm_u.ctx != NULL);
538 		mutex->wwm_state = WW_WANTOWN;
539 		/* FALLTHROUGH */
540 	case WW_WANTOWN:
541 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
542 		    "locking %p against myself: %p", mutex, curlwp);
543 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
544 		if (ret) {
545 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
546 			goto out;
547 		}
548 		goto retry;
549 	default:
550 		panic("wait/wound mutex %p in bad state: %d",
551 		    mutex, (int)mutex->wwm_state);
552 	}
553 	KASSERT(mutex->wwm_state == WW_OWNED);
554 	KASSERT(mutex->wwm_u.owner == curlwp);
555 	WW_LOCKED(mutex);
556 	ret = 0;
557 out:	mutex_exit(&mutex->wwm_lock);
558 	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
559 	return ret;
560 }
561 
562 /*
563  * ww_mutex_lock(mutex, ctx)
564  *
565  *	Lock the mutex and return 0, or fail if impossible.
566  *
567  *	- If ctx is null, caller must not hold mutex, and ww_mutex_lock
568  *	  always succeeds and returns 0.
569  *
570  *	- If ctx is nonnull, then:
571  *	  . Fail with -EALREADY if caller already holds mutex.
572  *	  . Fail with -EDEADLK if someone else holds mutex but there is
573  *	    a cycle.
574  *
575  *	May sleep.
576  */
577 int
ww_mutex_lock(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)578 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
579 {
580 	int ret;
581 
582 	/*
583 	 * We do not WW_WANTLOCK at the beginning because we may
584 	 * correctly already hold it, if we have a context, in which
585 	 * case we must return EALREADY to the caller.
586 	 */
587 	ASSERT_SLEEPABLE();
588 
589 	if (ctx == NULL) {
590 		WW_WANTLOCK(mutex);
591 		ww_mutex_lock_noctx(mutex);
592 		ret = 0;
593 		goto out;
594 	}
595 
596 	KASSERTMSG((ctx->wwx_owner == curlwp),
597 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
598 	KASSERTMSG((ctx->wwx_acquired != ~0U),
599 	    "ctx %p finished, can't be used any more", ctx);
600 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
601 	    "ctx %p in class %p, mutex %p in class %p",
602 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
603 
604 	mutex_enter(&mutex->wwm_lock);
605 	ww_acquire_done_check(mutex, ctx);
606 retry:	switch (mutex->wwm_state) {
607 	case WW_UNLOCKED:
608 		WW_WANTLOCK(mutex);
609 		mutex->wwm_state = WW_CTX;
610 		mutex->wwm_u.ctx = ctx;
611 		goto locked;
612 	case WW_OWNED:
613 		WW_WANTLOCK(mutex);
614 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
615 		    "locking %p against myself: %p", mutex, curlwp);
616 		ww_mutex_state_wait(mutex, WW_OWNED);
617 		goto retry;
618 	case WW_CTX:
619 		break;
620 	case WW_WANTOWN:
621 		ww_mutex_state_wait(mutex, WW_WANTOWN);
622 		goto retry;
623 	default:
624 		panic("wait/wound mutex %p in bad state: %d",
625 		    mutex, (int)mutex->wwm_state);
626 	}
627 
628 	KASSERT(mutex->wwm_state == WW_CTX);
629 	KASSERT(mutex->wwm_u.ctx != NULL);
630 	KASSERT((mutex->wwm_u.ctx == ctx) ||
631 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
632 
633 	if (mutex->wwm_u.ctx == ctx) {
634 		/*
635 		 * We already own it.  Yes, this can happen correctly
636 		 * for objects whose locking order is determined by
637 		 * userland.
638 		 */
639 		ret = -EALREADY;
640 		goto out_unlock;
641 	}
642 
643 	/*
644 	 * We do not own it.  We can safely assert to LOCKDEBUG that we
645 	 * want it.
646 	 */
647 	WW_WANTLOCK(mutex);
648 
649 	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
650 		/*
651 		 * Owned by a higher-priority party.  Tell the caller
652 		 * to unlock everything and start over.
653 		 */
654 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
655 		    "ww mutex class mismatch: %p != %p",
656 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
657 		ret = -EDEADLK;
658 		goto out_unlock;
659 	}
660 
661 	/*
662 	 * Owned by a lower-priority party.  Ask that party to wake us
663 	 * when it is done or it realizes it needs to back off.
664 	 */
665 	ww_mutex_lock_wait(mutex, ctx);
666 
667 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
668 	    (mutex->wwm_state == WW_WANTOWN));
669 	KASSERT(mutex->wwm_u.ctx == ctx);
670 	WW_LOCKED(mutex);
671 	ctx->wwx_acquired++;
672 	ret = 0;
673 out_unlock:
674 	mutex_exit(&mutex->wwm_lock);
675 out:	KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
676 	    "ret=%d", ret);
677 	return ret;
678 }
679 
680 /*
681  * ww_mutex_lock_interruptible(mutex, ctx)
682  *
683  *	Lock the mutex and return 0, or fail if impossible or
684  *	interrupted.
685  *
686  *	- If ctx is null, caller must not hold mutex, and ww_mutex_lock
687  *	  always succeeds and returns 0.
688  *
689  *	- If ctx is nonnull, then:
690  *	  . Fail with -EALREADY if caller already holds mutex.
691  *	  . Fail with -EDEADLK if someone else holds mutex but there is
692  *	    a cycle.
693  *	  . Fail with -EINTR if interrupted by a signal.
694  *
695  *	May sleep.
696  */
697 int
ww_mutex_lock_interruptible(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)698 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
699 {
700 	int ret;
701 
702 	/*
703 	 * We do not WW_WANTLOCK at the beginning because we may
704 	 * correctly already hold it, if we have a context, in which
705 	 * case we must return EALREADY to the caller.
706 	 */
707 	ASSERT_SLEEPABLE();
708 
709 	if (ctx == NULL) {
710 		WW_WANTLOCK(mutex);
711 		ret = ww_mutex_lock_noctx_sig(mutex);
712 		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
713 		goto out;
714 	}
715 
716 	KASSERTMSG((ctx->wwx_owner == curlwp),
717 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
718 	KASSERTMSG((ctx->wwx_acquired != ~0U),
719 	    "ctx %p finished, can't be used any more", ctx);
720 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
721 	    "ctx %p in class %p, mutex %p in class %p",
722 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
723 
724 	mutex_enter(&mutex->wwm_lock);
725 	ww_acquire_done_check(mutex, ctx);
726 retry:	switch (mutex->wwm_state) {
727 	case WW_UNLOCKED:
728 		WW_WANTLOCK(mutex);
729 		mutex->wwm_state = WW_CTX;
730 		mutex->wwm_u.ctx = ctx;
731 		goto locked;
732 	case WW_OWNED:
733 		WW_WANTLOCK(mutex);
734 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
735 		    "locking %p against myself: %p", mutex, curlwp);
736 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
737 		if (ret) {
738 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
739 			goto out_unlock;
740 		}
741 		goto retry;
742 	case WW_CTX:
743 		break;
744 	case WW_WANTOWN:
745 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
746 		if (ret) {
747 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
748 			goto out_unlock;
749 		}
750 		goto retry;
751 	default:
752 		panic("wait/wound mutex %p in bad state: %d",
753 		    mutex, (int)mutex->wwm_state);
754 	}
755 
756 	KASSERT(mutex->wwm_state == WW_CTX);
757 	KASSERT(mutex->wwm_u.ctx != NULL);
758 	KASSERT((mutex->wwm_u.ctx == ctx) ||
759 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
760 
761 	if (mutex->wwm_u.ctx == ctx) {
762 		/*
763 		 * We already own it.  Yes, this can happen correctly
764 		 * for objects whose locking order is determined by
765 		 * userland.
766 		 */
767 		ret = -EALREADY;
768 		goto out_unlock;
769 	}
770 
771 	/*
772 	 * We do not own it.  We can safely assert to LOCKDEBUG that we
773 	 * want it.
774 	 */
775 	WW_WANTLOCK(mutex);
776 
777 	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
778 		/*
779 		 * Owned by a higher-priority party.  Tell the caller
780 		 * to unlock everything and start over.
781 		 */
782 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
783 		    "ww mutex class mismatch: %p != %p",
784 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
785 		ret = -EDEADLK;
786 		goto out_unlock;
787 	}
788 
789 	/*
790 	 * Owned by a lower-priority party.  Ask that party to wake us
791 	 * when it is done or it realizes it needs to back off.
792 	 */
793 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
794 	if (ret) {
795 		KASSERTMSG(ret == -EINTR, "ret=%d", ret);
796 		goto out_unlock;
797 	}
798 
799 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
800 	    (mutex->wwm_state == WW_WANTOWN));
801 	KASSERT(mutex->wwm_u.ctx == ctx);
802 	WW_LOCKED(mutex);
803 	ctx->wwx_acquired++;
804 	ret = 0;
805 out_unlock:
806 	mutex_exit(&mutex->wwm_lock);
807 out:	KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
808 		ret == -EINTR), "ret=%d", ret);
809 	return ret;
810 }
811 
812 /*
813  * ww_mutex_lock_slow(mutex, ctx)
814  *
815  *	Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
816  *	after the caller has ditched all its locks, wait for the owner
817  *	of mutex to relinquish mutex before the caller can start over
818  *	acquiring locks again.
819  *
820  *	Uninterruptible; never fails.
821  *
822  *	May sleep.
823  */
824 void
ww_mutex_lock_slow(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)825 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
826 {
827 
828 	/* Caller must not try to lock against self here.  */
829 	WW_WANTLOCK(mutex);
830 	ASSERT_SLEEPABLE();
831 
832 	if (ctx == NULL) {
833 		ww_mutex_lock_noctx(mutex);
834 		return;
835 	}
836 
837 	KASSERTMSG((ctx->wwx_owner == curlwp),
838 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
839 	KASSERTMSG((ctx->wwx_acquired != ~0U),
840 	    "ctx %p finished, can't be used any more", ctx);
841 	KASSERTMSG((ctx->wwx_acquired == 0),
842 	    "ctx %p still holds %u locks, not allowed in slow path",
843 	    ctx, ctx->wwx_acquired);
844 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
845 	    "ctx %p in class %p, mutex %p in class %p",
846 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
847 
848 	mutex_enter(&mutex->wwm_lock);
849 	ww_acquire_done_check(mutex, ctx);
850 retry:	switch (mutex->wwm_state) {
851 	case WW_UNLOCKED:
852 		mutex->wwm_state = WW_CTX;
853 		mutex->wwm_u.ctx = ctx;
854 		goto locked;
855 	case WW_OWNED:
856 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
857 		    "locking %p against myself: %p", mutex, curlwp);
858 		ww_mutex_state_wait(mutex, WW_OWNED);
859 		goto retry;
860 	case WW_CTX:
861 		break;
862 	case WW_WANTOWN:
863 		ww_mutex_state_wait(mutex, WW_WANTOWN);
864 		goto retry;
865 	default:
866 		panic("wait/wound mutex %p in bad state: %d",
867 		    mutex, (int)mutex->wwm_state);
868 	}
869 
870 	KASSERT(mutex->wwm_state == WW_CTX);
871 	KASSERT(mutex->wwm_u.ctx != NULL);
872 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
873 	    "locking %p against myself: %p", mutex, curlwp);
874 
875 	/*
876 	 * Owned by another party, of any priority.  Ask that party to
877 	 * wake us when it's done.
878 	 */
879 	ww_mutex_lock_wait(mutex, ctx);
880 
881 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
882 	    (mutex->wwm_state == WW_WANTOWN));
883 	KASSERT(mutex->wwm_u.ctx == ctx);
884 	WW_LOCKED(mutex);
885 	ctx->wwx_acquired++;
886 	mutex_exit(&mutex->wwm_lock);
887 }
888 
889 /*
890  * ww_mutex_lock_slow(mutex, ctx)
891  *
892  *	Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
893  *	after the caller has ditched all its locks, wait for the owner
894  *	of mutex to relinquish mutex before the caller can start over
895  *	acquiring locks again, or fail with -EINTR if interrupted by a
896  *	signal.
897  *
898  *	May sleep.
899  */
900 int
ww_mutex_lock_slow_interruptible(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)901 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
902     struct ww_acquire_ctx *ctx)
903 {
904 	int ret;
905 
906 	WW_WANTLOCK(mutex);
907 	ASSERT_SLEEPABLE();
908 
909 	if (ctx == NULL) {
910 		ret = ww_mutex_lock_noctx_sig(mutex);
911 		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
912 		goto out;
913 	}
914 
915 	KASSERTMSG((ctx->wwx_owner == curlwp),
916 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
917 	KASSERTMSG((ctx->wwx_acquired != ~0U),
918 	    "ctx %p finished, can't be used any more", ctx);
919 	KASSERTMSG((ctx->wwx_acquired == 0),
920 	    "ctx %p still holds %u locks, not allowed in slow path",
921 	    ctx, ctx->wwx_acquired);
922 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
923 	    "ctx %p in class %p, mutex %p in class %p",
924 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
925 
926 	mutex_enter(&mutex->wwm_lock);
927 	ww_acquire_done_check(mutex, ctx);
928 retry:	switch (mutex->wwm_state) {
929 	case WW_UNLOCKED:
930 		mutex->wwm_state = WW_CTX;
931 		mutex->wwm_u.ctx = ctx;
932 		goto locked;
933 	case WW_OWNED:
934 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
935 		    "locking %p against myself: %p", mutex, curlwp);
936 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
937 		if (ret) {
938 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
939 			goto out_unlock;
940 		}
941 		goto retry;
942 	case WW_CTX:
943 		break;
944 	case WW_WANTOWN:
945 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
946 		if (ret) {
947 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
948 			goto out_unlock;
949 		}
950 		goto retry;
951 	default:
952 		panic("wait/wound mutex %p in bad state: %d",
953 		    mutex, (int)mutex->wwm_state);
954 	}
955 
956 	KASSERT(mutex->wwm_state == WW_CTX);
957 	KASSERT(mutex->wwm_u.ctx != NULL);
958 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
959 	    "locking %p against myself: %p", mutex, curlwp);
960 
961 	/*
962 	 * Owned by another party, of any priority.  Ask that party to
963 	 * wake us when it's done.
964 	 */
965 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
966 	if (ret) {
967 		KASSERTMSG(ret == -EINTR, "ret=%d", ret);
968 		goto out_unlock;
969 	}
970 
971 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
972 	    (mutex->wwm_state == WW_WANTOWN));
973 	KASSERT(mutex->wwm_u.ctx == ctx);
974 	WW_LOCKED(mutex);
975 	ctx->wwx_acquired++;
976 	ret = 0;
977 out_unlock:
978 	mutex_exit(&mutex->wwm_lock);
979 out:	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
980 	return ret;
981 }
982 
983 /*
984  * ww_mutex_trylock(mutex)
985  *
986  *	Tro to acquire mutex and return 1, but if it can't be done
987  *	immediately, return 0.
988  */
989 int
ww_mutex_trylock(struct ww_mutex * mutex)990 ww_mutex_trylock(struct ww_mutex *mutex)
991 {
992 	int ret;
993 
994 	mutex_enter(&mutex->wwm_lock);
995 	if (mutex->wwm_state == WW_UNLOCKED) {
996 		mutex->wwm_state = WW_OWNED;
997 		mutex->wwm_u.owner = curlwp;
998 		WW_WANTLOCK(mutex);
999 		WW_LOCKED(mutex);
1000 		ret = 1;
1001 	} else {
1002 		/*
1003 		 * It is tempting to assert that we do not hold the
1004 		 * mutex here, because trylock when we hold the lock
1005 		 * already generally indicates a bug in the design of
1006 		 * the code.  However, it seems that Linux relies on
1007 		 * this deep in ttm buffer reservation logic, so these
1008 		 * assertions are disabled until we find another way to
1009 		 * work around that or fix the bug that leads to it.
1010 		 *
1011 		 * That said: we should not be in the WW_WANTOWN state,
1012 		 * which happens only while we're in the ww mutex logic
1013 		 * waiting to acquire the lock.
1014 		 */
1015 #if 0
1016 		KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
1017 		    (mutex->wwm_u.owner != curlwp)),
1018 		    "locking %p against myself: %p", mutex, curlwp);
1019 		KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
1020 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
1021 		    "locking %p against myself: %p", mutex, curlwp);
1022 #endif
1023 		KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
1024 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
1025 		    "locking %p against myself: %p", mutex, curlwp);
1026 		ret = 0;
1027 	}
1028 	mutex_exit(&mutex->wwm_lock);
1029 
1030 	return ret;
1031 }
1032 
1033 /*
1034  * ww_mutex_unlock_release(mutex)
1035  *
1036  *	Decrement the number of mutexes acquired in the current locking
1037  *	context of mutex, which must be held by the caller and in
1038  *	WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
1039  *	Caller must hold the internal lock of mutex, and is responsible
1040  *	for notifying waiters.
1041  *
1042  *	Internal subroutine.
1043  */
1044 static void
ww_mutex_unlock_release(struct ww_mutex * mutex)1045 ww_mutex_unlock_release(struct ww_mutex *mutex)
1046 {
1047 
1048 	KASSERT(mutex_owned(&mutex->wwm_lock));
1049 	KASSERT((mutex->wwm_state == WW_CTX) ||
1050 	    (mutex->wwm_state == WW_WANTOWN));
1051 	KASSERT(mutex->wwm_u.ctx != NULL);
1052 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
1053 	    "ww_mutex %p ctx %p held by %p, not by self (%p)",
1054 	    mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
1055 	    curlwp);
1056 	KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
1057 	mutex->wwm_u.ctx->wwx_acquired--;
1058 	mutex->wwm_u.ctx = NULL;
1059 }
1060 
1061 /*
1062  * ww_mutex_unlock(mutex)
1063  *
1064  *	Release mutex and wake the next caller waiting, if any.
1065  */
1066 void
ww_mutex_unlock(struct ww_mutex * mutex)1067 ww_mutex_unlock(struct ww_mutex *mutex)
1068 {
1069 	struct ww_acquire_ctx *ctx;
1070 
1071 	mutex_enter(&mutex->wwm_lock);
1072 	WW_UNLOCKED(mutex);
1073 	KASSERTMSG(mutex->wwm_state != WW_UNLOCKED, "mutex %p", mutex);
1074 	switch (mutex->wwm_state) {
1075 	case WW_UNLOCKED:
1076 		panic("unlocking unlocked wait/wound mutex: %p", mutex);
1077 	case WW_OWNED:
1078 		/* Let the context lockers fight over it.  */
1079 		mutex->wwm_u.owner = NULL;
1080 		mutex->wwm_state = WW_UNLOCKED;
1081 		break;
1082 	case WW_CTX:
1083 		ww_mutex_unlock_release(mutex);
1084 		/*
1085 		 * If there are any waiters with contexts, grant the
1086 		 * lock to the highest-priority one.  Otherwise, just
1087 		 * unlock it.
1088 		 */
1089 		if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
1090 			mutex->wwm_state = WW_CTX;
1091 			mutex->wwm_u.ctx = ctx;
1092 		} else {
1093 			mutex->wwm_state = WW_UNLOCKED;
1094 		}
1095 		break;
1096 	case WW_WANTOWN:
1097 		ww_mutex_unlock_release(mutex);
1098 		/* Let the non-context lockers fight over it.  */
1099 		mutex->wwm_state = WW_UNLOCKED;
1100 		break;
1101 	}
1102 	cv_broadcast(&mutex->wwm_cv);
1103 	mutex_exit(&mutex->wwm_lock);
1104 }
1105 
1106 /*
1107  * ww_mutex_locking_ctx(mutex)
1108  *
1109  *	Return the current acquire context of mutex.  Answer is stale
1110  *	as soon as returned unless mutex is held by caller.
1111  */
1112 struct ww_acquire_ctx *
ww_mutex_locking_ctx(struct ww_mutex * mutex)1113 ww_mutex_locking_ctx(struct ww_mutex *mutex)
1114 {
1115 	struct ww_acquire_ctx *ctx;
1116 
1117 	mutex_enter(&mutex->wwm_lock);
1118 	switch (mutex->wwm_state) {
1119 	case WW_UNLOCKED:
1120 	case WW_OWNED:
1121 		ctx = NULL;
1122 		break;
1123 	case WW_CTX:
1124 	case WW_WANTOWN:
1125 		ctx = mutex->wwm_u.ctx;
1126 		break;
1127 	default:
1128 		panic("wait/wound mutex %p in bad state: %d",
1129 		    mutex, (int)mutex->wwm_state);
1130 	}
1131 	mutex_exit(&mutex->wwm_lock);
1132 
1133 	return ctx;
1134 }
1135