1*1bc3b861Sriastradh /* $NetBSD: linux_ww_mutex.c,v 1.16 2023/07/29 23:50:03 riastradh Exp $ */
2e04e273fSriastradh
3e04e273fSriastradh /*-
4e04e273fSriastradh * Copyright (c) 2014 The NetBSD Foundation, Inc.
5e04e273fSriastradh * All rights reserved.
6e04e273fSriastradh *
7e04e273fSriastradh * This code is derived from software contributed to The NetBSD Foundation
8e04e273fSriastradh * by Taylor R. Campbell.
9e04e273fSriastradh *
10e04e273fSriastradh * Redistribution and use in source and binary forms, with or without
11e04e273fSriastradh * modification, are permitted provided that the following conditions
12e04e273fSriastradh * are met:
13e04e273fSriastradh * 1. Redistributions of source code must retain the above copyright
14e04e273fSriastradh * notice, this list of conditions and the following disclaimer.
15e04e273fSriastradh * 2. Redistributions in binary form must reproduce the above copyright
16e04e273fSriastradh * notice, this list of conditions and the following disclaimer in the
17e04e273fSriastradh * documentation and/or other materials provided with the distribution.
18e04e273fSriastradh *
19e04e273fSriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20e04e273fSriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21e04e273fSriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22e04e273fSriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23e04e273fSriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24e04e273fSriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25e04e273fSriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26e04e273fSriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27e04e273fSriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28e04e273fSriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29e04e273fSriastradh * POSSIBILITY OF SUCH DAMAGE.
30e04e273fSriastradh */
31e04e273fSriastradh
32e04e273fSriastradh #include <sys/cdefs.h>
33*1bc3b861Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.16 2023/07/29 23:50:03 riastradh Exp $");
34e04e273fSriastradh
35e04e273fSriastradh #include <sys/types.h>
36e04e273fSriastradh #include <sys/atomic.h>
37e04e273fSriastradh #include <sys/condvar.h>
386543027aSriastradh #include <sys/lockdebug.h>
39e04e273fSriastradh #include <sys/lwp.h>
40e04e273fSriastradh #include <sys/mutex.h>
41e04e273fSriastradh #include <sys/rbtree.h>
42e04e273fSriastradh
43e04e273fSriastradh #include <linux/ww_mutex.h>
448c07fa7cSmrg #include <linux/errno.h>
45e04e273fSriastradh
466543027aSriastradh #define WW_WANTLOCK(WW) \
476543027aSriastradh LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
486543027aSriastradh (uintptr_t)__builtin_return_address(0), 0)
496543027aSriastradh #define WW_LOCKED(WW) \
506543027aSriastradh LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
516543027aSriastradh (uintptr_t)__builtin_return_address(0), 0)
526543027aSriastradh #define WW_UNLOCKED(WW) \
536543027aSriastradh LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
546543027aSriastradh (uintptr_t)__builtin_return_address(0), 0)
556543027aSriastradh
56e04e273fSriastradh static int
ww_acquire_ctx_compare(void * cookie __unused,const void * va,const void * vb)57e04e273fSriastradh ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
58e04e273fSriastradh {
59e04e273fSriastradh const struct ww_acquire_ctx *const ctx_a = va;
60e04e273fSriastradh const struct ww_acquire_ctx *const ctx_b = vb;
61e04e273fSriastradh
62e04e273fSriastradh if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
63e04e273fSriastradh return -1;
64e04e273fSriastradh if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
65*1bc3b861Sriastradh return +1;
66e04e273fSriastradh return 0;
67e04e273fSriastradh }
68e04e273fSriastradh
69e04e273fSriastradh static int
ww_acquire_ctx_compare_key(void * cookie __unused,const void * vn,const void * vk)70e04e273fSriastradh ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
71e04e273fSriastradh const void *vk)
72e04e273fSriastradh {
73e04e273fSriastradh const struct ww_acquire_ctx *const ctx = vn;
74e04e273fSriastradh const uint64_t *const ticketp = vk, ticket = *ticketp;
75e04e273fSriastradh
76e04e273fSriastradh if (ctx->wwx_ticket < ticket)
77e04e273fSriastradh return -1;
78e04e273fSriastradh if (ctx->wwx_ticket > ticket)
79*1bc3b861Sriastradh return +1;
80e04e273fSriastradh return 0;
81e04e273fSriastradh }
82e04e273fSriastradh
83e04e273fSriastradh static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
84e04e273fSriastradh .rbto_compare_nodes = &ww_acquire_ctx_compare,
85e04e273fSriastradh .rbto_compare_key = &ww_acquire_ctx_compare_key,
86e04e273fSriastradh .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
87e04e273fSriastradh .rbto_context = NULL,
88e04e273fSriastradh };
89e04e273fSriastradh
90e04e273fSriastradh void
ww_acquire_init(struct ww_acquire_ctx * ctx,struct ww_class * class)91e04e273fSriastradh ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
92e04e273fSriastradh {
93e04e273fSriastradh
94e04e273fSriastradh ctx->wwx_class = class;
95e04e273fSriastradh ctx->wwx_owner = curlwp;
965f9c05ccSriastradh ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket);
97e04e273fSriastradh ctx->wwx_acquired = 0;
98e04e273fSriastradh ctx->wwx_acquire_done = false;
99e04e273fSriastradh }
100e04e273fSriastradh
101e04e273fSriastradh void
ww_acquire_done(struct ww_acquire_ctx * ctx)102e04e273fSriastradh ww_acquire_done(struct ww_acquire_ctx *ctx)
103e04e273fSriastradh {
104e04e273fSriastradh
105e04e273fSriastradh KASSERTMSG((ctx->wwx_owner == curlwp),
106e04e273fSriastradh "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
107e04e273fSriastradh
108e04e273fSriastradh ctx->wwx_acquire_done = true;
109e04e273fSriastradh }
110e04e273fSriastradh
111f48e2d77Sriastradh static void
ww_acquire_done_check(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)112f48e2d77Sriastradh ww_acquire_done_check(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
113f48e2d77Sriastradh {
114f48e2d77Sriastradh
115f48e2d77Sriastradh /*
116f48e2d77Sriastradh * If caller has invoked ww_acquire_done, we must already hold
117f48e2d77Sriastradh * this mutex.
118f48e2d77Sriastradh */
119f48e2d77Sriastradh KASSERT(mutex_owned(&mutex->wwm_lock));
120f48e2d77Sriastradh KASSERTMSG((!ctx->wwx_acquire_done ||
121f48e2d77Sriastradh (mutex->wwm_state == WW_CTX && mutex->wwm_u.ctx == ctx)),
122f48e2d77Sriastradh "ctx %p done acquiring locks, refusing to acquire %p",
123f48e2d77Sriastradh ctx, mutex);
124f48e2d77Sriastradh }
125f48e2d77Sriastradh
126e04e273fSriastradh void
ww_acquire_fini(struct ww_acquire_ctx * ctx)127e04e273fSriastradh ww_acquire_fini(struct ww_acquire_ctx *ctx)
128e04e273fSriastradh {
129e04e273fSriastradh
130e04e273fSriastradh KASSERTMSG((ctx->wwx_owner == curlwp),
131e04e273fSriastradh "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
132e04e273fSriastradh KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
133e04e273fSriastradh ctx, ctx->wwx_acquired);
134e04e273fSriastradh
135e04e273fSriastradh ctx->wwx_acquired = ~0U; /* Fail if called again. */
136e04e273fSriastradh ctx->wwx_owner = NULL;
137e04e273fSriastradh }
138e04e273fSriastradh
1396543027aSriastradh #ifdef LOCKDEBUG
1406543027aSriastradh static void
ww_dump(const volatile void * cookie,lockop_printer_t pr)14100cd510aSozaki-r ww_dump(const volatile void *cookie, lockop_printer_t pr)
1426543027aSriastradh {
143e7f0067cSchristos const volatile struct ww_mutex *mutex = cookie;
1446543027aSriastradh
14500cd510aSozaki-r pr("%-13s: ", "state");
1466543027aSriastradh switch (mutex->wwm_state) {
1476543027aSriastradh case WW_UNLOCKED:
14800cd510aSozaki-r pr("unlocked\n");
1496543027aSriastradh break;
1506543027aSriastradh case WW_OWNED:
15100cd510aSozaki-r pr("owned by lwp\n");
15200cd510aSozaki-r pr("%-13s: %p\n", "owner", mutex->wwm_u.owner);
15300cd510aSozaki-r pr("%-13s: %s\n", "waiters",
154e7f0067cSchristos cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
1556543027aSriastradh ? "yes" : "no");
1566543027aSriastradh break;
1576543027aSriastradh case WW_CTX:
15800cd510aSozaki-r pr("owned via ctx\n");
15900cd510aSozaki-r pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
16000cd510aSozaki-r pr("%-13s: %p\n", "lwp",
1616543027aSriastradh mutex->wwm_u.ctx->wwx_owner);
16200cd510aSozaki-r pr("%-13s: %s\n", "waiters",
163e7f0067cSchristos cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
1646543027aSriastradh ? "yes" : "no");
1656543027aSriastradh break;
1666543027aSriastradh case WW_WANTOWN:
16700cd510aSozaki-r pr("owned via ctx\n");
16800cd510aSozaki-r pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
16900cd510aSozaki-r pr("%-13s: %p\n", "lwp",
1706543027aSriastradh mutex->wwm_u.ctx->wwx_owner);
17100cd510aSozaki-r pr("%-13s: %s\n", "waiters", "yes (noctx)");
1726543027aSriastradh break;
1736543027aSriastradh default:
17400cd510aSozaki-r pr("unknown\n");
1756543027aSriastradh break;
1766543027aSriastradh }
1776543027aSriastradh }
1786543027aSriastradh
1796543027aSriastradh static lockops_t ww_lockops = {
1806543027aSriastradh .lo_name = "Wait/wound mutex",
1816543027aSriastradh .lo_type = LOCKOPS_SLEEP,
1826543027aSriastradh .lo_dump = ww_dump,
1836543027aSriastradh };
1846543027aSriastradh #endif
1856543027aSriastradh
186f5962ff8Sriastradh /*
187f5962ff8Sriastradh * ww_mutex_init(mutex, class)
188f5962ff8Sriastradh *
189f5962ff8Sriastradh * Initialize mutex in the given class. Must precede any other
190f5962ff8Sriastradh * ww_mutex_* operations. After done, mutex must be destroyed
191f5962ff8Sriastradh * with ww_mutex_destroy.
192f5962ff8Sriastradh */
193e04e273fSriastradh void
ww_mutex_init(struct ww_mutex * mutex,struct ww_class * class)194e04e273fSriastradh ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
195e04e273fSriastradh {
196e04e273fSriastradh
197e04e273fSriastradh /*
198e04e273fSriastradh * XXX Apparently Linux takes these with spin locks held. That
199e04e273fSriastradh * strikes me as a bad idea, but so it is...
200e04e273fSriastradh */
201e04e273fSriastradh mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
202e04e273fSriastradh mutex->wwm_state = WW_UNLOCKED;
203e04e273fSriastradh mutex->wwm_class = class;
204e04e273fSriastradh rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
205e04e273fSriastradh cv_init(&mutex->wwm_cv, "linuxwwm");
2066543027aSriastradh #ifdef LOCKDEBUG
2076543027aSriastradh mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
2086543027aSriastradh (uintptr_t)__builtin_return_address(0));
2096543027aSriastradh #endif
210e04e273fSriastradh }
211e04e273fSriastradh
212f5962ff8Sriastradh /*
213f5962ff8Sriastradh * ww_mutex_destroy(mutex)
214f5962ff8Sriastradh *
215f5962ff8Sriastradh * Destroy mutex initialized by ww_mutex_init. Caller must not be
216f5962ff8Sriastradh * with any other ww_mutex_* operations except after
217f5962ff8Sriastradh * reinitializing with ww_mutex_init.
218f5962ff8Sriastradh */
219e04e273fSriastradh void
ww_mutex_destroy(struct ww_mutex * mutex)220e04e273fSriastradh ww_mutex_destroy(struct ww_mutex *mutex)
221e04e273fSriastradh {
222e04e273fSriastradh
2236543027aSriastradh KASSERT(mutex->wwm_state == WW_UNLOCKED);
2246543027aSriastradh
2256543027aSriastradh #ifdef LOCKDEBUG
2266543027aSriastradh LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
2276543027aSriastradh #endif
228e04e273fSriastradh cv_destroy(&mutex->wwm_cv);
229e04e273fSriastradh #if 0
230e04e273fSriastradh rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
231e04e273fSriastradh #endif
232e04e273fSriastradh KASSERT(mutex->wwm_state == WW_UNLOCKED);
233e04e273fSriastradh mutex_destroy(&mutex->wwm_lock);
234e04e273fSriastradh }
235e04e273fSriastradh
236e04e273fSriastradh /*
237f5962ff8Sriastradh * ww_mutex_is_locked(mutex)
238f5962ff8Sriastradh *
239f5962ff8Sriastradh * True if anyone holds mutex locked at the moment, false if not.
240f5962ff8Sriastradh * Answer is stale as soon returned unless mutex is held by
241f5962ff8Sriastradh * caller.
242f5962ff8Sriastradh *
243f5962ff8Sriastradh * XXX WARNING: This returns true if it is locked by ANYONE. Does
244f5962ff8Sriastradh * not mean `Do I hold this lock?' (answering which really
245f5962ff8Sriastradh * requires an acquire context).
246e04e273fSriastradh */
247e04e273fSriastradh bool
ww_mutex_is_locked(struct ww_mutex * mutex)248e04e273fSriastradh ww_mutex_is_locked(struct ww_mutex *mutex)
249e04e273fSriastradh {
250e04e273fSriastradh int locked;
251e04e273fSriastradh
252e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
253e04e273fSriastradh switch (mutex->wwm_state) {
254e04e273fSriastradh case WW_UNLOCKED:
255e04e273fSriastradh locked = false;
256e04e273fSriastradh break;
257e04e273fSriastradh case WW_OWNED:
258e04e273fSriastradh case WW_CTX:
259e04e273fSriastradh case WW_WANTOWN:
260e04e273fSriastradh locked = true;
261e04e273fSriastradh break;
262e04e273fSriastradh default:
263e04e273fSriastradh panic("wait/wound mutex %p in bad state: %d", mutex,
264e04e273fSriastradh (int)mutex->wwm_state);
265e04e273fSriastradh }
266e04e273fSriastradh mutex_exit(&mutex->wwm_lock);
267e04e273fSriastradh
268e04e273fSriastradh return locked;
269e04e273fSriastradh }
270e04e273fSriastradh
271f5962ff8Sriastradh /*
272f5962ff8Sriastradh * ww_mutex_state_wait(mutex, state)
273f5962ff8Sriastradh *
274f5962ff8Sriastradh * Wait for mutex, which must be in the given state, to transition
275f5962ff8Sriastradh * to another state. Uninterruptible; never fails.
276f5962ff8Sriastradh *
277f5962ff8Sriastradh * Caller must hold mutex's internal lock.
278f5962ff8Sriastradh *
279f5962ff8Sriastradh * May sleep.
280f5962ff8Sriastradh *
281f5962ff8Sriastradh * Internal subroutine.
282f5962ff8Sriastradh */
283e04e273fSriastradh static void
ww_mutex_state_wait(struct ww_mutex * mutex,enum ww_mutex_state state)284e04e273fSriastradh ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
285e04e273fSriastradh {
286e04e273fSriastradh
287f5962ff8Sriastradh KASSERT(mutex_owned(&mutex->wwm_lock));
288e04e273fSriastradh KASSERT(mutex->wwm_state == state);
289b9b36368Sriastradh
290b9b36368Sriastradh for (;;) {
291b9b36368Sriastradh cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
292b9b36368Sriastradh if (mutex->wwm_state != state)
293b9b36368Sriastradh break;
294b9b36368Sriastradh }
295b9b36368Sriastradh
296b9b36368Sriastradh KASSERT(mutex->wwm_state != state);
297e04e273fSriastradh }
298e04e273fSriastradh
299f5962ff8Sriastradh /*
300f5962ff8Sriastradh * ww_mutex_state_wait_sig(mutex, state)
301f5962ff8Sriastradh *
302f5962ff8Sriastradh * Wait for mutex, which must be in the given state, to transition
303f5962ff8Sriastradh * to another state, or fail if interrupted by a signal. Return 0
304f5962ff8Sriastradh * on success, -EINTR if interrupted by a signal.
305f5962ff8Sriastradh *
306f5962ff8Sriastradh * Caller must hold mutex's internal lock.
307f5962ff8Sriastradh *
308f5962ff8Sriastradh * May sleep.
309f5962ff8Sriastradh *
310f5962ff8Sriastradh * Internal subroutine.
311f5962ff8Sriastradh */
312e04e273fSriastradh static int
ww_mutex_state_wait_sig(struct ww_mutex * mutex,enum ww_mutex_state state)313e04e273fSriastradh ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
314e04e273fSriastradh {
315e04e273fSriastradh int ret;
316e04e273fSriastradh
317f5962ff8Sriastradh KASSERT(mutex_owned(&mutex->wwm_lock));
318e04e273fSriastradh KASSERT(mutex->wwm_state == state);
319b9b36368Sriastradh
320b9b36368Sriastradh for (;;) {
321e04e273fSriastradh /* XXX errno NetBSD->Linux */
322e04e273fSriastradh ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
323b9b36368Sriastradh if (mutex->wwm_state != state) {
324b9b36368Sriastradh ret = 0;
325b9b36368Sriastradh break;
326b9b36368Sriastradh }
327f5962ff8Sriastradh if (ret) {
328f5962ff8Sriastradh KASSERTMSG((ret == -EINTR || ret == -ERESTART),
329f5962ff8Sriastradh "ret=%d", ret);
330f5962ff8Sriastradh ret = -EINTR;
331e04e273fSriastradh break;
332f5962ff8Sriastradh }
333b9b36368Sriastradh }
334e04e273fSriastradh
335f5962ff8Sriastradh KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
336b9b36368Sriastradh KASSERTMSG(ret != 0 || mutex->wwm_state != state,
337b9b36368Sriastradh "ret=%d mutex=%p mutex->wwm_state=%d state=%d",
338b9b36368Sriastradh ret, mutex, mutex->wwm_state, state);
339e04e273fSriastradh return ret;
340e04e273fSriastradh }
341e04e273fSriastradh
342f5962ff8Sriastradh /*
343f5962ff8Sriastradh * ww_mutex_lock_wait(mutex, ctx)
344f5962ff8Sriastradh *
345f5962ff8Sriastradh * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
346f5962ff8Sriastradh * by another thread with an acquire context, wait to acquire
347f5962ff8Sriastradh * mutex. While waiting, record ctx in the tree of waiters. Does
348f5962ff8Sriastradh * not update the mutex state otherwise.
349f5962ff8Sriastradh *
350f5962ff8Sriastradh * Caller must not already hold mutex. Caller must hold mutex's
351f5962ff8Sriastradh * internal lock. Uninterruptible; never fails.
352f5962ff8Sriastradh *
353f5962ff8Sriastradh * May sleep.
354f5962ff8Sriastradh *
355f5962ff8Sriastradh * Internal subroutine.
356f5962ff8Sriastradh */
357e04e273fSriastradh static void
ww_mutex_lock_wait(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)358e04e273fSriastradh ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
359e04e273fSriastradh {
360e04e273fSriastradh struct ww_acquire_ctx *collision __diagused;
361e04e273fSriastradh
362e04e273fSriastradh KASSERT(mutex_owned(&mutex->wwm_lock));
363e04e273fSriastradh
364e04e273fSriastradh KASSERT((mutex->wwm_state == WW_CTX) ||
365e04e273fSriastradh (mutex->wwm_state == WW_WANTOWN));
366e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != ctx);
367e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
368e04e273fSriastradh "ww mutex class mismatch: %p != %p",
369e04e273fSriastradh ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
370e04e273fSriastradh KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
371e04e273fSriastradh "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
372e04e273fSriastradh ctx->wwx_ticket, ctx,
373e04e273fSriastradh mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
374e04e273fSriastradh
375e04e273fSriastradh collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
376e04e273fSriastradh KASSERTMSG((collision == ctx),
377e04e273fSriastradh "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
378e04e273fSriastradh ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
379e04e273fSriastradh
380b9b36368Sriastradh for (;;) {
381b9b36368Sriastradh cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
382b9b36368Sriastradh if ((mutex->wwm_state == WW_CTX ||
383b9b36368Sriastradh mutex->wwm_state == WW_WANTOWN) &&
384b9b36368Sriastradh mutex->wwm_u.ctx == ctx)
385b9b36368Sriastradh break;
386b9b36368Sriastradh }
387e04e273fSriastradh
388e04e273fSriastradh rb_tree_remove_node(&mutex->wwm_waiters, ctx);
389b9b36368Sriastradh
390b9b36368Sriastradh KASSERT(mutex->wwm_state == WW_CTX || mutex->wwm_state == WW_WANTOWN);
391b9b36368Sriastradh KASSERT(mutex->wwm_u.ctx == ctx);
392e04e273fSriastradh }
393e04e273fSriastradh
394f5962ff8Sriastradh /*
395f5962ff8Sriastradh * ww_mutex_lock_wait_sig(mutex, ctx)
396f5962ff8Sriastradh *
397f5962ff8Sriastradh * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
398f5962ff8Sriastradh * by another thread with an acquire context, wait to acquire
399f5962ff8Sriastradh * mutex and return 0, or return -EINTR if interrupted by a
400f5962ff8Sriastradh * signal. While waiting, record ctx in the tree of waiters.
401f5962ff8Sriastradh * Does not update the mutex state otherwise.
402f5962ff8Sriastradh *
403f5962ff8Sriastradh * Caller must not already hold mutex. Caller must hold mutex's
404f5962ff8Sriastradh * internal lock.
405f5962ff8Sriastradh *
406f5962ff8Sriastradh * May sleep.
407f5962ff8Sriastradh *
408f5962ff8Sriastradh * Internal subroutine.
409f5962ff8Sriastradh */
410e04e273fSriastradh static int
ww_mutex_lock_wait_sig(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)411e04e273fSriastradh ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
412e04e273fSriastradh {
413e04e273fSriastradh struct ww_acquire_ctx *collision __diagused;
414e04e273fSriastradh int ret;
415e04e273fSriastradh
416e04e273fSriastradh KASSERT(mutex_owned(&mutex->wwm_lock));
417e04e273fSriastradh
418e04e273fSriastradh KASSERT((mutex->wwm_state == WW_CTX) ||
419e04e273fSriastradh (mutex->wwm_state == WW_WANTOWN));
420e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != ctx);
421e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
422e04e273fSriastradh "ww mutex class mismatch: %p != %p",
423e04e273fSriastradh ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
424e04e273fSriastradh KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
425e04e273fSriastradh "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
426e04e273fSriastradh ctx->wwx_ticket, ctx,
427e04e273fSriastradh mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
428e04e273fSriastradh
429e04e273fSriastradh collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
430e04e273fSriastradh KASSERTMSG((collision == ctx),
431e04e273fSriastradh "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
432e04e273fSriastradh ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
433e04e273fSriastradh
434b9b36368Sriastradh for (;;) {
435e04e273fSriastradh /* XXX errno NetBSD->Linux */
436e04e273fSriastradh ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
437b9b36368Sriastradh if ((mutex->wwm_state == WW_CTX ||
438b9b36368Sriastradh mutex->wwm_state == WW_WANTOWN) &&
439b9b36368Sriastradh mutex->wwm_u.ctx == ctx) {
440b9b36368Sriastradh ret = 0;
441b9b36368Sriastradh break;
442b9b36368Sriastradh }
443f5962ff8Sriastradh if (ret) {
444f5962ff8Sriastradh KASSERTMSG((ret == -EINTR || ret == -ERESTART),
445f5962ff8Sriastradh "ret=%d", ret);
446f5962ff8Sriastradh ret = -EINTR;
447b9b36368Sriastradh break;
448f5962ff8Sriastradh }
449b9b36368Sriastradh }
450e04e273fSriastradh
451b9b36368Sriastradh rb_tree_remove_node(&mutex->wwm_waiters, ctx);
452b9b36368Sriastradh
453f5962ff8Sriastradh KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
454b9b36368Sriastradh KASSERT(ret != 0 ||
455b9b36368Sriastradh mutex->wwm_state == WW_CTX || mutex->wwm_state == WW_WANTOWN);
456b9b36368Sriastradh KASSERT(ret != 0 || mutex->wwm_u.ctx == ctx);
457e04e273fSriastradh return ret;
458e04e273fSriastradh }
459e04e273fSriastradh
460f5962ff8Sriastradh /*
461f5962ff8Sriastradh * ww_mutex_lock_noctx(mutex)
462f5962ff8Sriastradh *
463f5962ff8Sriastradh * Acquire mutex without an acquire context. Caller must not
464f5962ff8Sriastradh * already hold the mutex. Uninterruptible; never fails.
465f5962ff8Sriastradh *
466f5962ff8Sriastradh * May sleep.
467f5962ff8Sriastradh *
468f5962ff8Sriastradh * Internal subroutine, implementing ww_mutex_lock(..., NULL).
469f5962ff8Sriastradh */
470e04e273fSriastradh static void
ww_mutex_lock_noctx(struct ww_mutex * mutex)471e04e273fSriastradh ww_mutex_lock_noctx(struct ww_mutex *mutex)
472e04e273fSriastradh {
473e04e273fSriastradh
474e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
475e04e273fSriastradh retry: switch (mutex->wwm_state) {
476e04e273fSriastradh case WW_UNLOCKED:
477e04e273fSriastradh mutex->wwm_state = WW_OWNED;
478e04e273fSriastradh mutex->wwm_u.owner = curlwp;
479e04e273fSriastradh break;
480e04e273fSriastradh case WW_OWNED:
481e04e273fSriastradh KASSERTMSG((mutex->wwm_u.owner != curlwp),
482e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
483e04e273fSriastradh ww_mutex_state_wait(mutex, WW_OWNED);
484e04e273fSriastradh goto retry;
485e04e273fSriastradh case WW_CTX:
486e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != NULL);
487e04e273fSriastradh mutex->wwm_state = WW_WANTOWN;
488e04e273fSriastradh /* FALLTHROUGH */
489e04e273fSriastradh case WW_WANTOWN:
490e04e273fSriastradh KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
491e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
492e04e273fSriastradh ww_mutex_state_wait(mutex, WW_WANTOWN);
493e04e273fSriastradh goto retry;
494e04e273fSriastradh default:
495e04e273fSriastradh panic("wait/wound mutex %p in bad state: %d",
496e04e273fSriastradh mutex, (int)mutex->wwm_state);
497e04e273fSriastradh }
498e04e273fSriastradh KASSERT(mutex->wwm_state == WW_OWNED);
499e04e273fSriastradh KASSERT(mutex->wwm_u.owner == curlwp);
500d65bfa51Sriastradh WW_LOCKED(mutex);
501e04e273fSriastradh mutex_exit(&mutex->wwm_lock);
502e04e273fSriastradh }
503e04e273fSriastradh
504f5962ff8Sriastradh /*
505f5962ff8Sriastradh * ww_mutex_lock_noctx_sig(mutex)
506f5962ff8Sriastradh *
507f5962ff8Sriastradh * Acquire mutex without an acquire context and return 0, or fail
508f5962ff8Sriastradh * and return -EINTR if interrupted by a signal. Caller must not
509f5962ff8Sriastradh * already hold the mutex.
510f5962ff8Sriastradh *
511f5962ff8Sriastradh * May sleep.
512f5962ff8Sriastradh *
513f5962ff8Sriastradh * Internal subroutine, implementing
514f5962ff8Sriastradh * ww_mutex_lock_interruptible(..., NULL).
515f5962ff8Sriastradh */
516e04e273fSriastradh static int
ww_mutex_lock_noctx_sig(struct ww_mutex * mutex)517e04e273fSriastradh ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
518e04e273fSriastradh {
519e04e273fSriastradh int ret;
520e04e273fSriastradh
521e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
522e04e273fSriastradh retry: switch (mutex->wwm_state) {
523e04e273fSriastradh case WW_UNLOCKED:
524e04e273fSriastradh mutex->wwm_state = WW_OWNED;
525e04e273fSriastradh mutex->wwm_u.owner = curlwp;
526e04e273fSriastradh break;
527e04e273fSriastradh case WW_OWNED:
528e04e273fSriastradh KASSERTMSG((mutex->wwm_u.owner != curlwp),
529e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
530e04e273fSriastradh ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
531f5962ff8Sriastradh if (ret) {
532f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
533e04e273fSriastradh goto out;
534f5962ff8Sriastradh }
535e04e273fSriastradh goto retry;
536e04e273fSriastradh case WW_CTX:
537e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != NULL);
538e04e273fSriastradh mutex->wwm_state = WW_WANTOWN;
539e04e273fSriastradh /* FALLTHROUGH */
540e04e273fSriastradh case WW_WANTOWN:
541e04e273fSriastradh KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
542e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
543e04e273fSriastradh ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
544f5962ff8Sriastradh if (ret) {
545f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
546e04e273fSriastradh goto out;
547f5962ff8Sriastradh }
548e04e273fSriastradh goto retry;
549e04e273fSriastradh default:
550e04e273fSriastradh panic("wait/wound mutex %p in bad state: %d",
551e04e273fSriastradh mutex, (int)mutex->wwm_state);
552e04e273fSriastradh }
553e04e273fSriastradh KASSERT(mutex->wwm_state == WW_OWNED);
554e04e273fSriastradh KASSERT(mutex->wwm_u.owner == curlwp);
555d65bfa51Sriastradh WW_LOCKED(mutex);
556e04e273fSriastradh ret = 0;
557e04e273fSriastradh out: mutex_exit(&mutex->wwm_lock);
558f5962ff8Sriastradh KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
559e04e273fSriastradh return ret;
560e04e273fSriastradh }
561e04e273fSriastradh
562f5962ff8Sriastradh /*
563f5962ff8Sriastradh * ww_mutex_lock(mutex, ctx)
564f5962ff8Sriastradh *
565f5962ff8Sriastradh * Lock the mutex and return 0, or fail if impossible.
566f5962ff8Sriastradh *
567f5962ff8Sriastradh * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
568f5962ff8Sriastradh * always succeeds and returns 0.
569f5962ff8Sriastradh *
570f5962ff8Sriastradh * - If ctx is nonnull, then:
571f5962ff8Sriastradh * . Fail with -EALREADY if caller already holds mutex.
572f5962ff8Sriastradh * . Fail with -EDEADLK if someone else holds mutex but there is
573f5962ff8Sriastradh * a cycle.
574f5962ff8Sriastradh *
575f5962ff8Sriastradh * May sleep.
576f5962ff8Sriastradh */
577e04e273fSriastradh int
ww_mutex_lock(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)578e04e273fSriastradh ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
579e04e273fSriastradh {
580f5962ff8Sriastradh int ret;
581e04e273fSriastradh
5826543027aSriastradh /*
5836543027aSriastradh * We do not WW_WANTLOCK at the beginning because we may
5846543027aSriastradh * correctly already hold it, if we have a context, in which
5856543027aSriastradh * case we must return EALREADY to the caller.
5866543027aSriastradh */
587e04e273fSriastradh ASSERT_SLEEPABLE();
588e04e273fSriastradh
589e04e273fSriastradh if (ctx == NULL) {
5906543027aSriastradh WW_WANTLOCK(mutex);
591e04e273fSriastradh ww_mutex_lock_noctx(mutex);
592f5962ff8Sriastradh ret = 0;
593f5962ff8Sriastradh goto out;
594e04e273fSriastradh }
595e04e273fSriastradh
596e04e273fSriastradh KASSERTMSG((ctx->wwx_owner == curlwp),
597e04e273fSriastradh "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
598e04e273fSriastradh KASSERTMSG((ctx->wwx_acquired != ~0U),
599e04e273fSriastradh "ctx %p finished, can't be used any more", ctx);
600e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
601e04e273fSriastradh "ctx %p in class %p, mutex %p in class %p",
602e04e273fSriastradh ctx, ctx->wwx_class, mutex, mutex->wwm_class);
603e04e273fSriastradh
604e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
605f48e2d77Sriastradh ww_acquire_done_check(mutex, ctx);
606e04e273fSriastradh retry: switch (mutex->wwm_state) {
607e04e273fSriastradh case WW_UNLOCKED:
6086543027aSriastradh WW_WANTLOCK(mutex);
609e04e273fSriastradh mutex->wwm_state = WW_CTX;
610e04e273fSriastradh mutex->wwm_u.ctx = ctx;
611e04e273fSriastradh goto locked;
612e04e273fSriastradh case WW_OWNED:
6136543027aSriastradh WW_WANTLOCK(mutex);
614e04e273fSriastradh KASSERTMSG((mutex->wwm_u.owner != curlwp),
615e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
616e04e273fSriastradh ww_mutex_state_wait(mutex, WW_OWNED);
617e04e273fSriastradh goto retry;
618e04e273fSriastradh case WW_CTX:
619e04e273fSriastradh break;
620e04e273fSriastradh case WW_WANTOWN:
621e04e273fSriastradh ww_mutex_state_wait(mutex, WW_WANTOWN);
622e04e273fSriastradh goto retry;
623e04e273fSriastradh default:
624e04e273fSriastradh panic("wait/wound mutex %p in bad state: %d",
625e04e273fSriastradh mutex, (int)mutex->wwm_state);
626e04e273fSriastradh }
6276543027aSriastradh
628e04e273fSriastradh KASSERT(mutex->wwm_state == WW_CTX);
629e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != NULL);
630e04e273fSriastradh KASSERT((mutex->wwm_u.ctx == ctx) ||
631e04e273fSriastradh (mutex->wwm_u.ctx->wwx_owner != curlwp));
6326543027aSriastradh
633e04e273fSriastradh if (mutex->wwm_u.ctx == ctx) {
634e04e273fSriastradh /*
635e04e273fSriastradh * We already own it. Yes, this can happen correctly
636e04e273fSriastradh * for objects whose locking order is determined by
637e04e273fSriastradh * userland.
638e04e273fSriastradh */
639f5962ff8Sriastradh ret = -EALREADY;
640f5962ff8Sriastradh goto out_unlock;
6416543027aSriastradh }
6426543027aSriastradh
6436543027aSriastradh /*
6446543027aSriastradh * We do not own it. We can safely assert to LOCKDEBUG that we
6456543027aSriastradh * want it.
6466543027aSriastradh */
6476543027aSriastradh WW_WANTLOCK(mutex);
6486543027aSriastradh
6496543027aSriastradh if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
650e04e273fSriastradh /*
651e04e273fSriastradh * Owned by a higher-priority party. Tell the caller
652e04e273fSriastradh * to unlock everything and start over.
653e04e273fSriastradh */
654e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
655e04e273fSriastradh "ww mutex class mismatch: %p != %p",
656e04e273fSriastradh ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
657f5962ff8Sriastradh ret = -EDEADLK;
658f5962ff8Sriastradh goto out_unlock;
6596543027aSriastradh }
6606543027aSriastradh
661e04e273fSriastradh /*
6626543027aSriastradh * Owned by a lower-priority party. Ask that party to wake us
6636543027aSriastradh * when it is done or it realizes it needs to back off.
664e04e273fSriastradh */
665e04e273fSriastradh ww_mutex_lock_wait(mutex, ctx);
6666543027aSriastradh
667d65bfa51Sriastradh locked: KASSERT((mutex->wwm_state == WW_CTX) ||
668e04e273fSriastradh (mutex->wwm_state == WW_WANTOWN));
669e04e273fSriastradh KASSERT(mutex->wwm_u.ctx == ctx);
670d65bfa51Sriastradh WW_LOCKED(mutex);
671d65bfa51Sriastradh ctx->wwx_acquired++;
672f5962ff8Sriastradh ret = 0;
673f5962ff8Sriastradh out_unlock:
674e04e273fSriastradh mutex_exit(&mutex->wwm_lock);
675f5962ff8Sriastradh out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
676f5962ff8Sriastradh "ret=%d", ret);
677f5962ff8Sriastradh return ret;
678e04e273fSriastradh }
679e04e273fSriastradh
680f5962ff8Sriastradh /*
681f5962ff8Sriastradh * ww_mutex_lock_interruptible(mutex, ctx)
682f5962ff8Sriastradh *
683f5962ff8Sriastradh * Lock the mutex and return 0, or fail if impossible or
684f5962ff8Sriastradh * interrupted.
685f5962ff8Sriastradh *
686f5962ff8Sriastradh * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
687f5962ff8Sriastradh * always succeeds and returns 0.
688f5962ff8Sriastradh *
689f5962ff8Sriastradh * - If ctx is nonnull, then:
690f5962ff8Sriastradh * . Fail with -EALREADY if caller already holds mutex.
691f5962ff8Sriastradh * . Fail with -EDEADLK if someone else holds mutex but there is
692f5962ff8Sriastradh * a cycle.
693f5962ff8Sriastradh * . Fail with -EINTR if interrupted by a signal.
694f5962ff8Sriastradh *
695f5962ff8Sriastradh * May sleep.
696f5962ff8Sriastradh */
697e04e273fSriastradh int
ww_mutex_lock_interruptible(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)698e04e273fSriastradh ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
699e04e273fSriastradh {
700e04e273fSriastradh int ret;
701e04e273fSriastradh
7026543027aSriastradh /*
7036543027aSriastradh * We do not WW_WANTLOCK at the beginning because we may
7046543027aSriastradh * correctly already hold it, if we have a context, in which
7056543027aSriastradh * case we must return EALREADY to the caller.
7066543027aSriastradh */
707e04e273fSriastradh ASSERT_SLEEPABLE();
708e04e273fSriastradh
7096543027aSriastradh if (ctx == NULL) {
7106543027aSriastradh WW_WANTLOCK(mutex);
711f5962ff8Sriastradh ret = ww_mutex_lock_noctx_sig(mutex);
712f5962ff8Sriastradh KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
713f5962ff8Sriastradh goto out;
7146543027aSriastradh }
715e04e273fSriastradh
716e04e273fSriastradh KASSERTMSG((ctx->wwx_owner == curlwp),
717e04e273fSriastradh "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
718e04e273fSriastradh KASSERTMSG((ctx->wwx_acquired != ~0U),
719e04e273fSriastradh "ctx %p finished, can't be used any more", ctx);
720e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
721e04e273fSriastradh "ctx %p in class %p, mutex %p in class %p",
722e04e273fSriastradh ctx, ctx->wwx_class, mutex, mutex->wwm_class);
723e04e273fSriastradh
724e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
725f48e2d77Sriastradh ww_acquire_done_check(mutex, ctx);
726e04e273fSriastradh retry: switch (mutex->wwm_state) {
727e04e273fSriastradh case WW_UNLOCKED:
7286543027aSriastradh WW_WANTLOCK(mutex);
729e04e273fSriastradh mutex->wwm_state = WW_CTX;
730e04e273fSriastradh mutex->wwm_u.ctx = ctx;
731e04e273fSriastradh goto locked;
732e04e273fSriastradh case WW_OWNED:
7336543027aSriastradh WW_WANTLOCK(mutex);
734e04e273fSriastradh KASSERTMSG((mutex->wwm_u.owner != curlwp),
735e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
736e04e273fSriastradh ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
737f5962ff8Sriastradh if (ret) {
738f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
739f5962ff8Sriastradh goto out_unlock;
740f5962ff8Sriastradh }
741e04e273fSriastradh goto retry;
742e04e273fSriastradh case WW_CTX:
743e04e273fSriastradh break;
744e04e273fSriastradh case WW_WANTOWN:
745e04e273fSriastradh ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
746f5962ff8Sriastradh if (ret) {
747f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
748f5962ff8Sriastradh goto out_unlock;
749f5962ff8Sriastradh }
750e04e273fSriastradh goto retry;
751e04e273fSriastradh default:
752e04e273fSriastradh panic("wait/wound mutex %p in bad state: %d",
753e04e273fSriastradh mutex, (int)mutex->wwm_state);
754e04e273fSriastradh }
7556543027aSriastradh
756e04e273fSriastradh KASSERT(mutex->wwm_state == WW_CTX);
757e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != NULL);
758e04e273fSriastradh KASSERT((mutex->wwm_u.ctx == ctx) ||
759e04e273fSriastradh (mutex->wwm_u.ctx->wwx_owner != curlwp));
7606543027aSriastradh
761e04e273fSriastradh if (mutex->wwm_u.ctx == ctx) {
762e04e273fSriastradh /*
763e04e273fSriastradh * We already own it. Yes, this can happen correctly
764e04e273fSriastradh * for objects whose locking order is determined by
765e04e273fSriastradh * userland.
766e04e273fSriastradh */
767f5962ff8Sriastradh ret = -EALREADY;
768f5962ff8Sriastradh goto out_unlock;
7696543027aSriastradh }
7706543027aSriastradh
7716543027aSriastradh /*
7726543027aSriastradh * We do not own it. We can safely assert to LOCKDEBUG that we
7736543027aSriastradh * want it.
7746543027aSriastradh */
7756543027aSriastradh WW_WANTLOCK(mutex);
7766543027aSriastradh
7776543027aSriastradh if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
778e04e273fSriastradh /*
779e04e273fSriastradh * Owned by a higher-priority party. Tell the caller
780e04e273fSriastradh * to unlock everything and start over.
781e04e273fSriastradh */
782e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
783e04e273fSriastradh "ww mutex class mismatch: %p != %p",
784e04e273fSriastradh ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
785f5962ff8Sriastradh ret = -EDEADLK;
786f5962ff8Sriastradh goto out_unlock;
7876543027aSriastradh }
7886543027aSriastradh
789e04e273fSriastradh /*
7906543027aSriastradh * Owned by a lower-priority party. Ask that party to wake us
7916543027aSriastradh * when it is done or it realizes it needs to back off.
792e04e273fSriastradh */
793e04e273fSriastradh ret = ww_mutex_lock_wait_sig(mutex, ctx);
794f5962ff8Sriastradh if (ret) {
795f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
796f5962ff8Sriastradh goto out_unlock;
797f5962ff8Sriastradh }
7986543027aSriastradh
799e04e273fSriastradh locked: KASSERT((mutex->wwm_state == WW_CTX) ||
800e04e273fSriastradh (mutex->wwm_state == WW_WANTOWN));
801e04e273fSriastradh KASSERT(mutex->wwm_u.ctx == ctx);
802d65bfa51Sriastradh WW_LOCKED(mutex);
803e04e273fSriastradh ctx->wwx_acquired++;
804e04e273fSriastradh ret = 0;
805f5962ff8Sriastradh out_unlock:
806f5962ff8Sriastradh mutex_exit(&mutex->wwm_lock);
807f5962ff8Sriastradh out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
808f5962ff8Sriastradh ret == -EINTR), "ret=%d", ret);
809e04e273fSriastradh return ret;
810e04e273fSriastradh }
811e04e273fSriastradh
812f5962ff8Sriastradh /*
813f5962ff8Sriastradh * ww_mutex_lock_slow(mutex, ctx)
814f5962ff8Sriastradh *
815f5962ff8Sriastradh * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
816f5962ff8Sriastradh * after the caller has ditched all its locks, wait for the owner
817f5962ff8Sriastradh * of mutex to relinquish mutex before the caller can start over
818f5962ff8Sriastradh * acquiring locks again.
819f5962ff8Sriastradh *
820f5962ff8Sriastradh * Uninterruptible; never fails.
821f5962ff8Sriastradh *
822f5962ff8Sriastradh * May sleep.
823f5962ff8Sriastradh */
824e04e273fSriastradh void
ww_mutex_lock_slow(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)825e04e273fSriastradh ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
826e04e273fSriastradh {
827e04e273fSriastradh
8286543027aSriastradh /* Caller must not try to lock against self here. */
8296543027aSriastradh WW_WANTLOCK(mutex);
830e04e273fSriastradh ASSERT_SLEEPABLE();
831e04e273fSriastradh
832e04e273fSriastradh if (ctx == NULL) {
833e04e273fSriastradh ww_mutex_lock_noctx(mutex);
834e04e273fSriastradh return;
835e04e273fSriastradh }
836e04e273fSriastradh
837e04e273fSriastradh KASSERTMSG((ctx->wwx_owner == curlwp),
838e04e273fSriastradh "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
839e04e273fSriastradh KASSERTMSG((ctx->wwx_acquired != ~0U),
840e04e273fSriastradh "ctx %p finished, can't be used any more", ctx);
841e04e273fSriastradh KASSERTMSG((ctx->wwx_acquired == 0),
842e04e273fSriastradh "ctx %p still holds %u locks, not allowed in slow path",
843e04e273fSriastradh ctx, ctx->wwx_acquired);
844e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
845e04e273fSriastradh "ctx %p in class %p, mutex %p in class %p",
846e04e273fSriastradh ctx, ctx->wwx_class, mutex, mutex->wwm_class);
847e04e273fSriastradh
848e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
849f48e2d77Sriastradh ww_acquire_done_check(mutex, ctx);
850e04e273fSriastradh retry: switch (mutex->wwm_state) {
851e04e273fSriastradh case WW_UNLOCKED:
852e04e273fSriastradh mutex->wwm_state = WW_CTX;
853e04e273fSriastradh mutex->wwm_u.ctx = ctx;
854e04e273fSriastradh goto locked;
855e04e273fSriastradh case WW_OWNED:
856e04e273fSriastradh KASSERTMSG((mutex->wwm_u.owner != curlwp),
857e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
858e04e273fSriastradh ww_mutex_state_wait(mutex, WW_OWNED);
859e04e273fSriastradh goto retry;
860e04e273fSriastradh case WW_CTX:
861e04e273fSriastradh break;
862e04e273fSriastradh case WW_WANTOWN:
863e04e273fSriastradh ww_mutex_state_wait(mutex, WW_WANTOWN);
864e04e273fSriastradh goto retry;
865e04e273fSriastradh default:
866e04e273fSriastradh panic("wait/wound mutex %p in bad state: %d",
867e04e273fSriastradh mutex, (int)mutex->wwm_state);
868e04e273fSriastradh }
8696543027aSriastradh
870e04e273fSriastradh KASSERT(mutex->wwm_state == WW_CTX);
871e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != NULL);
872e04e273fSriastradh KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
873e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
8746543027aSriastradh
875e04e273fSriastradh /*
876e04e273fSriastradh * Owned by another party, of any priority. Ask that party to
877e04e273fSriastradh * wake us when it's done.
878e04e273fSriastradh */
879e04e273fSriastradh ww_mutex_lock_wait(mutex, ctx);
8806543027aSriastradh
881e04e273fSriastradh locked: KASSERT((mutex->wwm_state == WW_CTX) ||
882e04e273fSriastradh (mutex->wwm_state == WW_WANTOWN));
883e04e273fSriastradh KASSERT(mutex->wwm_u.ctx == ctx);
884d65bfa51Sriastradh WW_LOCKED(mutex);
885e04e273fSriastradh ctx->wwx_acquired++;
886e04e273fSriastradh mutex_exit(&mutex->wwm_lock);
887e04e273fSriastradh }
888e04e273fSriastradh
889f5962ff8Sriastradh /*
890f5962ff8Sriastradh * ww_mutex_lock_slow(mutex, ctx)
891f5962ff8Sriastradh *
892f5962ff8Sriastradh * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
893f5962ff8Sriastradh * after the caller has ditched all its locks, wait for the owner
894f5962ff8Sriastradh * of mutex to relinquish mutex before the caller can start over
895f5962ff8Sriastradh * acquiring locks again, or fail with -EINTR if interrupted by a
896f5962ff8Sriastradh * signal.
897f5962ff8Sriastradh *
898f5962ff8Sriastradh * May sleep.
899f5962ff8Sriastradh */
900e04e273fSriastradh int
ww_mutex_lock_slow_interruptible(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)901e04e273fSriastradh ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
902e04e273fSriastradh struct ww_acquire_ctx *ctx)
903e04e273fSriastradh {
904e04e273fSriastradh int ret;
905e04e273fSriastradh
9066543027aSriastradh WW_WANTLOCK(mutex);
907e04e273fSriastradh ASSERT_SLEEPABLE();
908e04e273fSriastradh
909f5962ff8Sriastradh if (ctx == NULL) {
910f5962ff8Sriastradh ret = ww_mutex_lock_noctx_sig(mutex);
911f5962ff8Sriastradh KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
912f5962ff8Sriastradh goto out;
913f5962ff8Sriastradh }
914e04e273fSriastradh
915e04e273fSriastradh KASSERTMSG((ctx->wwx_owner == curlwp),
916e04e273fSriastradh "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
917e04e273fSriastradh KASSERTMSG((ctx->wwx_acquired != ~0U),
918e04e273fSriastradh "ctx %p finished, can't be used any more", ctx);
919e04e273fSriastradh KASSERTMSG((ctx->wwx_acquired == 0),
920e04e273fSriastradh "ctx %p still holds %u locks, not allowed in slow path",
921e04e273fSriastradh ctx, ctx->wwx_acquired);
922e04e273fSriastradh KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
923e04e273fSriastradh "ctx %p in class %p, mutex %p in class %p",
924e04e273fSriastradh ctx, ctx->wwx_class, mutex, mutex->wwm_class);
925e04e273fSriastradh
926e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
927f48e2d77Sriastradh ww_acquire_done_check(mutex, ctx);
928e04e273fSriastradh retry: switch (mutex->wwm_state) {
929e04e273fSriastradh case WW_UNLOCKED:
930e04e273fSriastradh mutex->wwm_state = WW_CTX;
931e04e273fSriastradh mutex->wwm_u.ctx = ctx;
932e04e273fSriastradh goto locked;
933e04e273fSriastradh case WW_OWNED:
934e04e273fSriastradh KASSERTMSG((mutex->wwm_u.owner != curlwp),
935e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
936e04e273fSriastradh ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
937f5962ff8Sriastradh if (ret) {
938f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
939f5962ff8Sriastradh goto out_unlock;
940f5962ff8Sriastradh }
941e04e273fSriastradh goto retry;
942e04e273fSriastradh case WW_CTX:
943e04e273fSriastradh break;
944e04e273fSriastradh case WW_WANTOWN:
945e04e273fSriastradh ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
946f5962ff8Sriastradh if (ret) {
947f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
948f5962ff8Sriastradh goto out_unlock;
949f5962ff8Sriastradh }
950e04e273fSriastradh goto retry;
951e04e273fSriastradh default:
952e04e273fSriastradh panic("wait/wound mutex %p in bad state: %d",
953e04e273fSriastradh mutex, (int)mutex->wwm_state);
954e04e273fSriastradh }
9556543027aSriastradh
956e04e273fSriastradh KASSERT(mutex->wwm_state == WW_CTX);
957e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != NULL);
958e04e273fSriastradh KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
959e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
9606543027aSriastradh
961e04e273fSriastradh /*
962e04e273fSriastradh * Owned by another party, of any priority. Ask that party to
963e04e273fSriastradh * wake us when it's done.
964e04e273fSriastradh */
965e04e273fSriastradh ret = ww_mutex_lock_wait_sig(mutex, ctx);
966f5962ff8Sriastradh if (ret) {
967f5962ff8Sriastradh KASSERTMSG(ret == -EINTR, "ret=%d", ret);
968f5962ff8Sriastradh goto out_unlock;
969f5962ff8Sriastradh }
9706543027aSriastradh
971e04e273fSriastradh locked: KASSERT((mutex->wwm_state == WW_CTX) ||
972e04e273fSriastradh (mutex->wwm_state == WW_WANTOWN));
973e04e273fSriastradh KASSERT(mutex->wwm_u.ctx == ctx);
974d65bfa51Sriastradh WW_LOCKED(mutex);
975e04e273fSriastradh ctx->wwx_acquired++;
976e04e273fSriastradh ret = 0;
977f5962ff8Sriastradh out_unlock:
978f5962ff8Sriastradh mutex_exit(&mutex->wwm_lock);
979f5962ff8Sriastradh out: KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
980e04e273fSriastradh return ret;
981e04e273fSriastradh }
982e04e273fSriastradh
983f5962ff8Sriastradh /*
984f5962ff8Sriastradh * ww_mutex_trylock(mutex)
985f5962ff8Sriastradh *
986f5962ff8Sriastradh * Tro to acquire mutex and return 1, but if it can't be done
987f5962ff8Sriastradh * immediately, return 0.
988f5962ff8Sriastradh */
989e04e273fSriastradh int
ww_mutex_trylock(struct ww_mutex * mutex)990e04e273fSriastradh ww_mutex_trylock(struct ww_mutex *mutex)
991e04e273fSriastradh {
992e04e273fSriastradh int ret;
993e04e273fSriastradh
994e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
995e04e273fSriastradh if (mutex->wwm_state == WW_UNLOCKED) {
996e04e273fSriastradh mutex->wwm_state = WW_OWNED;
997e04e273fSriastradh mutex->wwm_u.owner = curlwp;
9986543027aSriastradh WW_WANTLOCK(mutex);
9996543027aSriastradh WW_LOCKED(mutex);
1000e04e273fSriastradh ret = 1;
1001e04e273fSriastradh } else {
1002c95f5ba4Sriastradh /*
1003c95f5ba4Sriastradh * It is tempting to assert that we do not hold the
1004c95f5ba4Sriastradh * mutex here, because trylock when we hold the lock
1005c95f5ba4Sriastradh * already generally indicates a bug in the design of
1006c95f5ba4Sriastradh * the code. However, it seems that Linux relies on
1007c95f5ba4Sriastradh * this deep in ttm buffer reservation logic, so these
1008c95f5ba4Sriastradh * assertions are disabled until we find another way to
1009c95f5ba4Sriastradh * work around that or fix the bug that leads to it.
1010c95f5ba4Sriastradh *
1011c95f5ba4Sriastradh * That said: we should not be in the WW_WANTOWN state,
1012c95f5ba4Sriastradh * which happens only while we're in the ww mutex logic
1013c95f5ba4Sriastradh * waiting to acquire the lock.
1014c95f5ba4Sriastradh */
1015c95f5ba4Sriastradh #if 0
1016e04e273fSriastradh KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
1017e04e273fSriastradh (mutex->wwm_u.owner != curlwp)),
1018e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
1019e04e273fSriastradh KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
1020e04e273fSriastradh (mutex->wwm_u.ctx->wwx_owner != curlwp)),
1021e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
1022c95f5ba4Sriastradh #endif
1023e04e273fSriastradh KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
1024e04e273fSriastradh (mutex->wwm_u.ctx->wwx_owner != curlwp)),
1025e04e273fSriastradh "locking %p against myself: %p", mutex, curlwp);
1026e04e273fSriastradh ret = 0;
1027e04e273fSriastradh }
1028e04e273fSriastradh mutex_exit(&mutex->wwm_lock);
1029e04e273fSriastradh
1030e04e273fSriastradh return ret;
1031e04e273fSriastradh }
1032e04e273fSriastradh
1033f5962ff8Sriastradh /*
1034f5962ff8Sriastradh * ww_mutex_unlock_release(mutex)
1035f5962ff8Sriastradh *
1036f5962ff8Sriastradh * Decrement the number of mutexes acquired in the current locking
1037f5962ff8Sriastradh * context of mutex, which must be held by the caller and in
1038f5962ff8Sriastradh * WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
1039f5962ff8Sriastradh * Caller must hold the internal lock of mutex, and is responsible
1040f5962ff8Sriastradh * for notifying waiters.
1041f5962ff8Sriastradh *
1042f5962ff8Sriastradh * Internal subroutine.
1043f5962ff8Sriastradh */
1044e04e273fSriastradh static void
ww_mutex_unlock_release(struct ww_mutex * mutex)1045e04e273fSriastradh ww_mutex_unlock_release(struct ww_mutex *mutex)
1046e04e273fSriastradh {
1047e04e273fSriastradh
1048e04e273fSriastradh KASSERT(mutex_owned(&mutex->wwm_lock));
1049e04e273fSriastradh KASSERT((mutex->wwm_state == WW_CTX) ||
1050e04e273fSriastradh (mutex->wwm_state == WW_WANTOWN));
1051e04e273fSriastradh KASSERT(mutex->wwm_u.ctx != NULL);
1052e04e273fSriastradh KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
1053e04e273fSriastradh "ww_mutex %p ctx %p held by %p, not by self (%p)",
1054e04e273fSriastradh mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
1055e04e273fSriastradh curlwp);
1056e04e273fSriastradh KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
1057e04e273fSriastradh mutex->wwm_u.ctx->wwx_acquired--;
1058e04e273fSriastradh mutex->wwm_u.ctx = NULL;
1059e04e273fSriastradh }
1060e04e273fSriastradh
1061f5962ff8Sriastradh /*
1062f5962ff8Sriastradh * ww_mutex_unlock(mutex)
1063f5962ff8Sriastradh *
1064f5962ff8Sriastradh * Release mutex and wake the next caller waiting, if any.
1065f5962ff8Sriastradh */
1066e04e273fSriastradh void
ww_mutex_unlock(struct ww_mutex * mutex)1067e04e273fSriastradh ww_mutex_unlock(struct ww_mutex *mutex)
1068e04e273fSriastradh {
1069e04e273fSriastradh struct ww_acquire_ctx *ctx;
1070e04e273fSriastradh
1071e04e273fSriastradh mutex_enter(&mutex->wwm_lock);
1072a9f92eb7Sriastradh WW_UNLOCKED(mutex);
1073a9f92eb7Sriastradh KASSERTMSG(mutex->wwm_state != WW_UNLOCKED, "mutex %p", mutex);
1074e04e273fSriastradh switch (mutex->wwm_state) {
1075e04e273fSriastradh case WW_UNLOCKED:
1076e04e273fSriastradh panic("unlocking unlocked wait/wound mutex: %p", mutex);
1077e04e273fSriastradh case WW_OWNED:
1078e04e273fSriastradh /* Let the context lockers fight over it. */
1079e04e273fSriastradh mutex->wwm_u.owner = NULL;
1080e04e273fSriastradh mutex->wwm_state = WW_UNLOCKED;
1081e04e273fSriastradh break;
1082e04e273fSriastradh case WW_CTX:
1083e04e273fSriastradh ww_mutex_unlock_release(mutex);
1084e04e273fSriastradh /*
1085e04e273fSriastradh * If there are any waiters with contexts, grant the
1086e04e273fSriastradh * lock to the highest-priority one. Otherwise, just
1087e04e273fSriastradh * unlock it.
1088e04e273fSriastradh */
1089e04e273fSriastradh if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
1090e04e273fSriastradh mutex->wwm_state = WW_CTX;
1091e04e273fSriastradh mutex->wwm_u.ctx = ctx;
1092e04e273fSriastradh } else {
1093e04e273fSriastradh mutex->wwm_state = WW_UNLOCKED;
1094e04e273fSriastradh }
1095e04e273fSriastradh break;
1096e04e273fSriastradh case WW_WANTOWN:
1097e04e273fSriastradh ww_mutex_unlock_release(mutex);
1098e04e273fSriastradh /* Let the non-context lockers fight over it. */
1099e04e273fSriastradh mutex->wwm_state = WW_UNLOCKED;
1100e04e273fSriastradh break;
1101e04e273fSriastradh }
1102e04e273fSriastradh cv_broadcast(&mutex->wwm_cv);
1103e04e273fSriastradh mutex_exit(&mutex->wwm_lock);
1104e04e273fSriastradh }
1105da53b7a5Sriastradh
1106f5962ff8Sriastradh /*
1107f5962ff8Sriastradh * ww_mutex_locking_ctx(mutex)
1108f5962ff8Sriastradh *
1109f5962ff8Sriastradh * Return the current acquire context of mutex. Answer is stale
1110f5962ff8Sriastradh * as soon as returned unless mutex is held by caller.
1111f5962ff8Sriastradh */
1112da53b7a5Sriastradh struct ww_acquire_ctx *
ww_mutex_locking_ctx(struct ww_mutex * mutex)1113da53b7a5Sriastradh ww_mutex_locking_ctx(struct ww_mutex *mutex)
1114da53b7a5Sriastradh {
1115da53b7a5Sriastradh struct ww_acquire_ctx *ctx;
1116da53b7a5Sriastradh
1117da53b7a5Sriastradh mutex_enter(&mutex->wwm_lock);
1118da53b7a5Sriastradh switch (mutex->wwm_state) {
1119da53b7a5Sriastradh case WW_UNLOCKED:
1120da53b7a5Sriastradh case WW_OWNED:
1121da53b7a5Sriastradh ctx = NULL;
1122da53b7a5Sriastradh break;
1123da53b7a5Sriastradh case WW_CTX:
1124da53b7a5Sriastradh case WW_WANTOWN:
1125da53b7a5Sriastradh ctx = mutex->wwm_u.ctx;
1126da53b7a5Sriastradh break;
1127da53b7a5Sriastradh default:
1128da53b7a5Sriastradh panic("wait/wound mutex %p in bad state: %d",
1129da53b7a5Sriastradh mutex, (int)mutex->wwm_state);
1130da53b7a5Sriastradh }
1131da53b7a5Sriastradh mutex_exit(&mutex->wwm_lock);
1132da53b7a5Sriastradh
1133da53b7a5Sriastradh return ctx;
1134da53b7a5Sriastradh }
1135