xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_ww_mutex.c (revision f89f6560d453f5e37386cc7938c072d2f528b9fa)
1 /*	$NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/condvar.h>
38 #include <sys/lwp.h>
39 #include <sys/mutex.h>
40 #include <sys/rbtree.h>
41 
42 #include <linux/ww_mutex.h>
43 
44 static int
45 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
46 {
47 	const struct ww_acquire_ctx *const ctx_a = va;
48 	const struct ww_acquire_ctx *const ctx_b = vb;
49 
50 	if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
51 		return -1;
52 	if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
53 		return -1;
54 	return 0;
55 }
56 
57 static int
58 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
59     const void *vk)
60 {
61 	const struct ww_acquire_ctx *const ctx = vn;
62 	const uint64_t *const ticketp = vk, ticket = *ticketp;
63 
64 	if (ctx->wwx_ticket < ticket)
65 		return -1;
66 	if (ctx->wwx_ticket > ticket)
67 		return -1;
68 	return 0;
69 }
70 
71 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
72 	.rbto_compare_nodes = &ww_acquire_ctx_compare,
73 	.rbto_compare_key = &ww_acquire_ctx_compare_key,
74 	.rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
75 	.rbto_context = NULL,
76 };
77 
78 void
79 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
80 {
81 
82 	ctx->wwx_class = class;
83 	ctx->wwx_owner = curlwp;
84 	ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
85 	ctx->wwx_acquired = 0;
86 	ctx->wwx_acquire_done = false;
87 }
88 
89 void
90 ww_acquire_done(struct ww_acquire_ctx *ctx)
91 {
92 
93 	KASSERTMSG((ctx->wwx_owner == curlwp),
94 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
95 
96 	ctx->wwx_acquire_done = true;
97 }
98 
99 void
100 ww_acquire_fini(struct ww_acquire_ctx *ctx)
101 {
102 
103 	KASSERTMSG((ctx->wwx_owner == curlwp),
104 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
105 	KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
106 	    ctx, ctx->wwx_acquired);
107 
108 	ctx->wwx_acquired = ~0U;	/* Fail if called again. */
109 	ctx->wwx_owner = NULL;
110 }
111 
112 void
113 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
114 {
115 
116 	/*
117 	 * XXX Apparently Linux takes these with spin locks held.  That
118 	 * strikes me as a bad idea, but so it is...
119 	 */
120 	mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
121 	mutex->wwm_state = WW_UNLOCKED;
122 	mutex->wwm_class = class;
123 	rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
124 	cv_init(&mutex->wwm_cv, "linuxwwm");
125 }
126 
127 void
128 ww_mutex_destroy(struct ww_mutex *mutex)
129 {
130 
131 	cv_destroy(&mutex->wwm_cv);
132 #if 0
133 	rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
134 #endif
135 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
136 	mutex_destroy(&mutex->wwm_lock);
137 }
138 
139 /*
140  * XXX WARNING: This returns true if it is locked by ANYONE.  Does not
141  * mean `Do I hold this lock?' (answering which really requires an
142  * acquire context).
143  */
144 bool
145 ww_mutex_is_locked(struct ww_mutex *mutex)
146 {
147 	int locked;
148 
149 	mutex_enter(&mutex->wwm_lock);
150 	switch (mutex->wwm_state) {
151 	case WW_UNLOCKED:
152 		locked = false;
153 		break;
154 	case WW_OWNED:
155 	case WW_CTX:
156 	case WW_WANTOWN:
157 		locked = true;
158 		break;
159 	default:
160 		panic("wait/wound mutex %p in bad state: %d", mutex,
161 		    (int)mutex->wwm_state);
162 	}
163 	mutex_exit(&mutex->wwm_lock);
164 
165 	return locked;
166 }
167 
168 static void
169 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
170 {
171 
172 	KASSERT(mutex->wwm_state == state);
173 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
174 	while (mutex->wwm_state == state);
175 }
176 
177 static int
178 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
179 {
180 	int ret;
181 
182 	KASSERT(mutex->wwm_state == state);
183 	do {
184 		/* XXX errno NetBSD->Linux */
185 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
186 		if (ret)
187 			break;
188 	} while (mutex->wwm_state == state);
189 
190 	return ret;
191 }
192 
193 static void
194 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
195 {
196 	struct ww_acquire_ctx *collision __diagused;
197 
198 	KASSERT(mutex_owned(&mutex->wwm_lock));
199 
200 	KASSERT((mutex->wwm_state == WW_CTX) ||
201 	    (mutex->wwm_state == WW_WANTOWN));
202 	KASSERT(mutex->wwm_u.ctx != ctx);
203 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
204 	    "ww mutex class mismatch: %p != %p",
205 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
206 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
207 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
208 	    ctx->wwx_ticket, ctx,
209 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
210 
211 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
212 	KASSERTMSG((collision == ctx),
213 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
214 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
215 
216 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
217 	while (!(((mutex->wwm_state == WW_CTX) ||
218 		    (mutex->wwm_state == WW_WANTOWN)) &&
219 		 (mutex->wwm_u.ctx == ctx)));
220 
221 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
222 }
223 
224 static int
225 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
226 {
227 	struct ww_acquire_ctx *collision __diagused;
228 	int ret;
229 
230 	KASSERT(mutex_owned(&mutex->wwm_lock));
231 
232 	KASSERT((mutex->wwm_state == WW_CTX) ||
233 	    (mutex->wwm_state == WW_WANTOWN));
234 	KASSERT(mutex->wwm_u.ctx != ctx);
235 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
236 	    "ww mutex class mismatch: %p != %p",
237 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
238 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
239 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
240 	    ctx->wwx_ticket, ctx,
241 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
242 
243 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
244 	KASSERTMSG((collision == ctx),
245 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
246 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
247 
248 	do {
249 		/* XXX errno NetBSD->Linux */
250 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
251 		if (ret)
252 			goto out;
253 	} while (!(((mutex->wwm_state == WW_CTX) ||
254 		    (mutex->wwm_state == WW_WANTOWN)) &&
255 		(mutex->wwm_u.ctx == ctx)));
256 
257 out:	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
258 	return ret;
259 }
260 
261 static void
262 ww_mutex_lock_noctx(struct ww_mutex *mutex)
263 {
264 
265 	mutex_enter(&mutex->wwm_lock);
266 retry:	switch (mutex->wwm_state) {
267 	case WW_UNLOCKED:
268 		mutex->wwm_state = WW_OWNED;
269 		mutex->wwm_u.owner = curlwp;
270 		break;
271 	case WW_OWNED:
272 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
273 		    "locking %p against myself: %p", mutex, curlwp);
274 		ww_mutex_state_wait(mutex, WW_OWNED);
275 		goto retry;
276 	case WW_CTX:
277 		KASSERT(mutex->wwm_u.ctx != NULL);
278 		mutex->wwm_state = WW_WANTOWN;
279 		/* FALLTHROUGH */
280 	case WW_WANTOWN:
281 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
282 		    "locking %p against myself: %p", mutex, curlwp);
283 		ww_mutex_state_wait(mutex, WW_WANTOWN);
284 		goto retry;
285 	default:
286 		panic("wait/wound mutex %p in bad state: %d",
287 		    mutex, (int)mutex->wwm_state);
288 	}
289 	KASSERT(mutex->wwm_state == WW_OWNED);
290 	KASSERT(mutex->wwm_u.owner == curlwp);
291 	mutex_exit(&mutex->wwm_lock);
292 }
293 
294 static int
295 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
296 {
297 	int ret;
298 
299 	mutex_enter(&mutex->wwm_lock);
300 retry:	switch (mutex->wwm_state) {
301 	case WW_UNLOCKED:
302 		mutex->wwm_state = WW_OWNED;
303 		mutex->wwm_u.owner = curlwp;
304 		break;
305 	case WW_OWNED:
306 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
307 		    "locking %p against myself: %p", mutex, curlwp);
308 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
309 		if (ret)
310 			goto out;
311 		goto retry;
312 	case WW_CTX:
313 		KASSERT(mutex->wwm_u.ctx != NULL);
314 		mutex->wwm_state = WW_WANTOWN;
315 		/* FALLTHROUGH */
316 	case WW_WANTOWN:
317 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
318 		    "locking %p against myself: %p", mutex, curlwp);
319 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
320 		if (ret)
321 			goto out;
322 		goto retry;
323 	default:
324 		panic("wait/wound mutex %p in bad state: %d",
325 		    mutex, (int)mutex->wwm_state);
326 	}
327 	KASSERT(mutex->wwm_state == WW_OWNED);
328 	KASSERT(mutex->wwm_u.owner == curlwp);
329 	ret = 0;
330 out:	mutex_exit(&mutex->wwm_lock);
331 	return ret;
332 }
333 
334 int
335 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
336 {
337 
338 	ASSERT_SLEEPABLE();
339 
340 	if (ctx == NULL) {
341 		ww_mutex_lock_noctx(mutex);
342 		return 0;
343 	}
344 
345 	KASSERTMSG((ctx->wwx_owner == curlwp),
346 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
347 	KASSERTMSG(!ctx->wwx_acquire_done,
348 	    "ctx %p done acquiring locks, can't acquire more", ctx);
349 	KASSERTMSG((ctx->wwx_acquired != ~0U),
350 	    "ctx %p finished, can't be used any more", ctx);
351 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
352 	    "ctx %p in class %p, mutex %p in class %p",
353 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
354 
355 	mutex_enter(&mutex->wwm_lock);
356 retry:	switch (mutex->wwm_state) {
357 	case WW_UNLOCKED:
358 		mutex->wwm_state = WW_CTX;
359 		mutex->wwm_u.ctx = ctx;
360 		goto locked;
361 	case WW_OWNED:
362 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
363 		    "locking %p against myself: %p", mutex, curlwp);
364 		ww_mutex_state_wait(mutex, WW_OWNED);
365 		goto retry;
366 	case WW_CTX:
367 		break;
368 	case WW_WANTOWN:
369 		ww_mutex_state_wait(mutex, WW_WANTOWN);
370 		goto retry;
371 	default:
372 		panic("wait/wound mutex %p in bad state: %d",
373 		    mutex, (int)mutex->wwm_state);
374 	}
375 	KASSERT(mutex->wwm_state == WW_CTX);
376 	KASSERT(mutex->wwm_u.ctx != NULL);
377 	KASSERT((mutex->wwm_u.ctx == ctx) ||
378 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
379 	if (mutex->wwm_u.ctx == ctx) {
380 		/*
381 		 * We already own it.  Yes, this can happen correctly
382 		 * for objects whose locking order is determined by
383 		 * userland.
384 		 */
385 		mutex_exit(&mutex->wwm_lock);
386 		return -EALREADY;
387 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
388 		/*
389 		 * Owned by a higher-priority party.  Tell the caller
390 		 * to unlock everything and start over.
391 		 */
392 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
393 		    "ww mutex class mismatch: %p != %p",
394 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
395 		mutex_exit(&mutex->wwm_lock);
396 		return -EDEADLK;
397 	} else {
398 		/*
399 		 * Owned by a lower-priority party.  Ask that party to
400 		 * wake us when it is done or it realizes it needs to
401 		 * back off.
402 		 */
403 		ww_mutex_lock_wait(mutex, ctx);
404 	}
405 locked:	ctx->wwx_acquired++;
406 	KASSERT((mutex->wwm_state == WW_CTX) ||
407 	    (mutex->wwm_state == WW_WANTOWN));
408 	KASSERT(mutex->wwm_u.ctx == ctx);
409 	mutex_exit(&mutex->wwm_lock);
410 	return 0;
411 }
412 
413 int
414 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
415 {
416 	int ret;
417 
418 	ASSERT_SLEEPABLE();
419 
420 	if (ctx == NULL)
421 		return ww_mutex_lock_noctx_sig(mutex);
422 
423 	KASSERTMSG((ctx->wwx_owner == curlwp),
424 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
425 	KASSERTMSG(!ctx->wwx_acquire_done,
426 	    "ctx %p done acquiring locks, can't acquire more", ctx);
427 	KASSERTMSG((ctx->wwx_acquired != ~0U),
428 	    "ctx %p finished, can't be used any more", ctx);
429 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
430 	    "ctx %p in class %p, mutex %p in class %p",
431 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
432 
433 	mutex_enter(&mutex->wwm_lock);
434 retry:	switch (mutex->wwm_state) {
435 	case WW_UNLOCKED:
436 		mutex->wwm_state = WW_CTX;
437 		mutex->wwm_u.ctx = ctx;
438 		goto locked;
439 	case WW_OWNED:
440 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
441 		    "locking %p against myself: %p", mutex, curlwp);
442 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
443 		if (ret)
444 			goto out;
445 		goto retry;
446 	case WW_CTX:
447 		break;
448 	case WW_WANTOWN:
449 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
450 		if (ret)
451 			goto out;
452 		goto retry;
453 	default:
454 		panic("wait/wound mutex %p in bad state: %d",
455 		    mutex, (int)mutex->wwm_state);
456 	}
457 	KASSERT(mutex->wwm_state == WW_CTX);
458 	KASSERT(mutex->wwm_u.ctx != NULL);
459 	KASSERT((mutex->wwm_u.ctx == ctx) ||
460 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
461 	if (mutex->wwm_u.ctx == ctx) {
462 		/*
463 		 * We already own it.  Yes, this can happen correctly
464 		 * for objects whose locking order is determined by
465 		 * userland.
466 		 */
467 		mutex_exit(&mutex->wwm_lock);
468 		return -EALREADY;
469 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
470 		/*
471 		 * Owned by a higher-priority party.  Tell the caller
472 		 * to unlock everything and start over.
473 		 */
474 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
475 		    "ww mutex class mismatch: %p != %p",
476 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
477 		mutex_exit(&mutex->wwm_lock);
478 		return -EDEADLK;
479 	} else {
480 		/*
481 		 * Owned by a lower-priority party.  Ask that party to
482 		 * wake us when it is done or it realizes it needs to
483 		 * back off.
484 		 */
485 		ret = ww_mutex_lock_wait_sig(mutex, ctx);
486 		if (ret)
487 			goto out;
488 	}
489 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
490 	    (mutex->wwm_state == WW_WANTOWN));
491 	KASSERT(mutex->wwm_u.ctx == ctx);
492 	ctx->wwx_acquired++;
493 	ret = 0;
494 out:	mutex_exit(&mutex->wwm_lock);
495 	return ret;
496 }
497 
498 void
499 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
500 {
501 
502 	ASSERT_SLEEPABLE();
503 
504 	if (ctx == NULL) {
505 		ww_mutex_lock_noctx(mutex);
506 		return;
507 	}
508 
509 	KASSERTMSG((ctx->wwx_owner == curlwp),
510 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
511 	KASSERTMSG(!ctx->wwx_acquire_done,
512 	    "ctx %p done acquiring locks, can't acquire more", ctx);
513 	KASSERTMSG((ctx->wwx_acquired != ~0U),
514 	    "ctx %p finished, can't be used any more", ctx);
515 	KASSERTMSG((ctx->wwx_acquired == 0),
516 	    "ctx %p still holds %u locks, not allowed in slow path",
517 	    ctx, ctx->wwx_acquired);
518 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
519 	    "ctx %p in class %p, mutex %p in class %p",
520 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
521 
522 	mutex_enter(&mutex->wwm_lock);
523 retry:	switch (mutex->wwm_state) {
524 	case WW_UNLOCKED:
525 		mutex->wwm_state = WW_CTX;
526 		mutex->wwm_u.ctx = ctx;
527 		goto locked;
528 	case WW_OWNED:
529 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
530 		    "locking %p against myself: %p", mutex, curlwp);
531 		ww_mutex_state_wait(mutex, WW_OWNED);
532 		goto retry;
533 	case WW_CTX:
534 		break;
535 	case WW_WANTOWN:
536 		ww_mutex_state_wait(mutex, WW_WANTOWN);
537 		goto retry;
538 	default:
539 		panic("wait/wound mutex %p in bad state: %d",
540 		    mutex, (int)mutex->wwm_state);
541 	}
542 	KASSERT(mutex->wwm_state == WW_CTX);
543 	KASSERT(mutex->wwm_u.ctx != NULL);
544 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
545 	    "locking %p against myself: %p", mutex, curlwp);
546 	/*
547 	 * Owned by another party, of any priority.  Ask that party to
548 	 * wake us when it's done.
549 	 */
550 	ww_mutex_lock_wait(mutex, ctx);
551 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
552 	    (mutex->wwm_state == WW_WANTOWN));
553 	KASSERT(mutex->wwm_u.ctx == ctx);
554 	ctx->wwx_acquired++;
555 	mutex_exit(&mutex->wwm_lock);
556 }
557 
558 int
559 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
560     struct ww_acquire_ctx *ctx)
561 {
562 	int ret;
563 
564 	ASSERT_SLEEPABLE();
565 
566 	if (ctx == NULL)
567 		return ww_mutex_lock_noctx_sig(mutex);
568 
569 	KASSERTMSG((ctx->wwx_owner == curlwp),
570 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
571 	KASSERTMSG(!ctx->wwx_acquire_done,
572 	    "ctx %p done acquiring locks, can't acquire more", ctx);
573 	KASSERTMSG((ctx->wwx_acquired != ~0U),
574 	    "ctx %p finished, can't be used any more", ctx);
575 	KASSERTMSG((ctx->wwx_acquired == 0),
576 	    "ctx %p still holds %u locks, not allowed in slow path",
577 	    ctx, ctx->wwx_acquired);
578 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
579 	    "ctx %p in class %p, mutex %p in class %p",
580 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
581 
582 	mutex_enter(&mutex->wwm_lock);
583 retry:	switch (mutex->wwm_state) {
584 	case WW_UNLOCKED:
585 		mutex->wwm_state = WW_CTX;
586 		mutex->wwm_u.ctx = ctx;
587 		goto locked;
588 	case WW_OWNED:
589 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
590 		    "locking %p against myself: %p", mutex, curlwp);
591 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
592 		if (ret)
593 			goto out;
594 		goto retry;
595 	case WW_CTX:
596 		break;
597 	case WW_WANTOWN:
598 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
599 		if (ret)
600 			goto out;
601 		goto retry;
602 	default:
603 		panic("wait/wound mutex %p in bad state: %d",
604 		    mutex, (int)mutex->wwm_state);
605 	}
606 	KASSERT(mutex->wwm_state == WW_CTX);
607 	KASSERT(mutex->wwm_u.ctx != NULL);
608 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
609 	    "locking %p against myself: %p", mutex, curlwp);
610 	/*
611 	 * Owned by another party, of any priority.  Ask that party to
612 	 * wake us when it's done.
613 	 */
614 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
615 	if (ret)
616 		goto out;
617 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
618 	    (mutex->wwm_state == WW_WANTOWN));
619 	KASSERT(mutex->wwm_u.ctx == ctx);
620 	ctx->wwx_acquired++;
621 	ret = 0;
622 out:	mutex_exit(&mutex->wwm_lock);
623 	return ret;
624 }
625 
626 int
627 ww_mutex_trylock(struct ww_mutex *mutex)
628 {
629 	int ret;
630 
631 	mutex_enter(&mutex->wwm_lock);
632 	if (mutex->wwm_state == WW_UNLOCKED) {
633 		mutex->wwm_state = WW_OWNED;
634 		mutex->wwm_u.owner = curlwp;
635 		ret = 1;
636 	} else {
637 		KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
638 		    (mutex->wwm_u.owner != curlwp)),
639 		    "locking %p against myself: %p", mutex, curlwp);
640 		KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
641 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
642 		    "locking %p against myself: %p", mutex, curlwp);
643 		KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
644 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
645 		    "locking %p against myself: %p", mutex, curlwp);
646 		ret = 0;
647 	}
648 	mutex_exit(&mutex->wwm_lock);
649 
650 	return ret;
651 }
652 
653 static void
654 ww_mutex_unlock_release(struct ww_mutex *mutex)
655 {
656 
657 	KASSERT(mutex_owned(&mutex->wwm_lock));
658 	KASSERT((mutex->wwm_state == WW_CTX) ||
659 	    (mutex->wwm_state == WW_WANTOWN));
660 	KASSERT(mutex->wwm_u.ctx != NULL);
661 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
662 	    "ww_mutex %p ctx %p held by %p, not by self (%p)",
663 	    mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
664 	    curlwp);
665 	KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
666 	mutex->wwm_u.ctx->wwx_acquired--;
667 	mutex->wwm_u.ctx = NULL;
668 }
669 
670 void
671 ww_mutex_unlock(struct ww_mutex *mutex)
672 {
673 	struct ww_acquire_ctx *ctx;
674 
675 	mutex_enter(&mutex->wwm_lock);
676 	KASSERT(mutex->wwm_state != WW_UNLOCKED);
677 	switch (mutex->wwm_state) {
678 	case WW_UNLOCKED:
679 		panic("unlocking unlocked wait/wound mutex: %p", mutex);
680 	case WW_OWNED:
681 		/* Let the context lockers fight over it.  */
682 		mutex->wwm_u.owner = NULL;
683 		mutex->wwm_state = WW_UNLOCKED;
684 		break;
685 	case WW_CTX:
686 		ww_mutex_unlock_release(mutex);
687 		/*
688 		 * If there are any waiters with contexts, grant the
689 		 * lock to the highest-priority one.  Otherwise, just
690 		 * unlock it.
691 		 */
692 		if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
693 			mutex->wwm_state = WW_CTX;
694 			mutex->wwm_u.ctx = ctx;
695 		} else {
696 			mutex->wwm_state = WW_UNLOCKED;
697 		}
698 		break;
699 	case WW_WANTOWN:
700 		ww_mutex_unlock_release(mutex);
701 		/* Let the non-context lockers fight over it.  */
702 		mutex->wwm_state = WW_UNLOCKED;
703 		break;
704 	}
705 	cv_broadcast(&mutex->wwm_cv);
706 	mutex_exit(&mutex->wwm_lock);
707 }
708