xref: /netbsd-src/sys/kern/sys_futex.c (revision 9218bab4ab6beb132586eb64ad18d80e19b39f07)
1 /*	$NetBSD: sys_futex.c,v 1.22 2025/01/18 07:26:21 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell and Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_futex.c,v 1.22 2025/01/18 07:26:21 riastradh Exp $");
34 
35 /*
36  * Futexes
37  *
38  *	The futex system call coordinates notifying threads waiting for
39  *	changes on a 32-bit word of memory.  The word can be managed by
40  *	CPU atomic operations in userland, without system calls, as long
41  *	as there is no contention.
42  *
43  *	The simplest use case demonstrating the utility is:
44  *
45  *		// 32-bit word of memory shared among threads or
46  *		// processes in userland.  lock & 1 means owned;
47  *		// lock & 2 means there are waiters waiting.
48  *		volatile int lock = 0;
49  *
50  *		int v;
51  *
52  *		// Acquire a lock.
53  *		do {
54  *			v = lock;
55  *			if (v & 1) {
56  *				// Lock is held.  Set a bit to say that
57  *				// there are waiters, and wait for lock
58  *				// to change to anything other than v;
59  *				// then retry.
60  *				if (atomic_cas_uint(&lock, v, v | 2) != v)
61  *					continue;
62  *				futex(FUTEX_WAIT, &lock, v | 2, NULL, NULL, 0);
63  *				continue;
64  *			}
65  *		} while (atomic_cas_uint(&lock, v, v | 1) != v);
66  *		membar_acquire();
67  *
68  *		...
69  *
70  *		// Release the lock.  Optimistically assume there are
71  *		// no waiters first until demonstrated otherwise.
72  *		membar_release();
73  *		if (atomic_cas_uint(&lock, 1, 0) != 1) {
74  *			// There may be waiters.
75  *			v = atomic_swap_uint(&lock, 0);
76  *			// If there are still waiters, wake one.
77  *			if (v & 2)
78  *				futex(FUTEX_WAKE, &lock, 1, NULL, NULL, 0);
79  *		}
80  *
81  *	The goal is to avoid the futex system call unless there is
82  *	contention; then if there is contention, to guarantee no missed
83  *	wakeups.
84  *
85  *	For a simple implementation, futex(FUTEX_WAIT) could queue
86  *	itself to be woken, double-check the lock word, and then sleep;
87  *	spurious wakeups are generally a fact of life, so any
88  *	FUTEX_WAKE could just wake every FUTEX_WAIT in the system.
89  *
90  *	If this were all there is to it, we could then increase
91  *	parallelism by refining the approximation: partition the
92  *	waiters into buckets by hashing the lock addresses to reduce
93  *	the incidence of spurious wakeups.  But this is not all.
94  *
95  *	The futex(FUTEX_CMP_REQUEUE, &lock, n, &lock2, m, val)
96  *	operation not only wakes n waiters on lock if lock == val, but
97  *	also _transfers_ m additional waiters to lock2.  Unless wakeups
98  *	on lock2 also trigger wakeups on lock, we cannot move waiters
99  *	to lock2 if they merely share the same hash as waiters on lock.
100  *	Thus, we can't approximately distribute waiters into queues by
101  *	a hash function; we must distinguish futex queues exactly by
102  *	lock address.
103  *
104  *	For now, we use a global red/black tree to index futexes.  This
105  *	should be replaced by a lockless radix tree with a thread to
106  *	free entries no longer in use once all lookups on all CPUs have
107  *	completed.
108  *
109  *	Specifically, we maintain two maps:
110  *
111  *	futex_tab.va[vmspace, va] for private futexes
112  *	futex_tab.oa[uvm_voaddr] for shared futexes
113  *
114  *	This implementation does not support priority inheritance.
115  */
116 
117 #include <sys/param.h>
118 #include <sys/types.h>
119 #include <sys/atomic.h>
120 #include <sys/condvar.h>
121 #include <sys/futex.h>
122 #include <sys/mutex.h>
123 #include <sys/rbtree.h>
124 #include <sys/queue.h>
125 
126 #include <sys/syscall.h>
127 #include <sys/syscallargs.h>
128 #include <sys/syscallvar.h>
129 
130 #include <uvm/uvm_extern.h>
131 
132 /*
133  * Lock order:
134  *
135  *	futex_tab.lock
136  *	futex::fx_qlock			ordered by kva of struct futex
137  *	 -> futex_wait::fw_lock		only one at a time
138  *	futex_wait::fw_lock		only one at a time
139  *	 -> futex::fx_abortlock		only one at a time
140  */
141 
142 /*
143  * union futex_key
144  *
145  *	A futex is addressed either by a vmspace+va (private) or by
146  *	a uvm_voaddr (shared).
147  */
148 union futex_key {
149 	struct {
150 		struct vmspace			*vmspace;
151 		vaddr_t				va;
152 	}			fk_private;
153 	struct uvm_voaddr	fk_shared;
154 };
155 
156 /*
157  * struct futex
158  *
159  *	Kernel state for a futex located at a particular address in a
160  *	particular virtual address space.
161  *
162  *	N.B. fx_refcnt is an unsigned long because we need to be able
163  *	to operate on it atomically on all systems while at the same
164  *	time rendering practically impossible the chance of it reaching
165  *	its max value.  In practice, we're limited by the number of LWPs
166  *	that can be present on the system at any given time, and the
167  *	assumption is that limit will be good enough on a 32-bit platform.
168  *	See futex_wake() for why overflow needs to be avoided.
169  */
170 struct futex {
171 	union futex_key		fx_key;
172 	unsigned long		fx_refcnt;
173 	bool			fx_shared;
174 	bool			fx_on_tree;
175 	struct rb_node		fx_node;
176 
177 	kmutex_t			fx_qlock;
178 	TAILQ_HEAD(, futex_wait)	fx_queue;
179 
180 	kmutex_t			fx_abortlock;
181 	LIST_HEAD(, futex_wait)		fx_abortlist;
182 	kcondvar_t			fx_abortcv;
183 };
184 
185 /*
186  * struct futex_wait
187  *
188  *	State for a thread to wait on a futex.  Threads wait on fw_cv
189  *	for fw_bitset to be set to zero.  The thread may transition to
190  *	a different futex queue at any time under the futex's lock.
191  */
192 struct futex_wait {
193 	kmutex_t		fw_lock;
194 	kcondvar_t		fw_cv;
195 	struct futex		*fw_futex;
196 	TAILQ_ENTRY(futex_wait)	fw_entry;	/* queue lock */
197 	LIST_ENTRY(futex_wait)	fw_abort;	/* queue abortlock */
198 	int			fw_bitset;
199 	bool			fw_aborting;	/* fw_lock */
200 };
201 
202 /*
203  * futex_tab
204  *
205  *	Global trees of futexes by vmspace/va and VM object address.
206  *
207  *	XXX This obviously doesn't scale in parallel.  We could use a
208  *	pserialize-safe data structure, but there may be a high cost to
209  *	frequent deletion since we don't cache futexes after we're done
210  *	with them.  We could use hashed locks.  But for now, just make
211  *	sure userland can't DoS the serial performance, by using a
212  *	balanced binary tree for lookup.
213  *
214  *	XXX We could use a per-process tree for the table indexed by
215  *	virtual address to reduce contention between processes.
216  */
217 static struct {
218 	kmutex_t	lock;
219 	struct rb_tree	va;
220 	struct rb_tree	oa;
221 } futex_tab __cacheline_aligned;
222 
223 static int
224 compare_futex_key(void *cookie, const void *n, const void *k)
225 {
226 	const struct futex *fa = n;
227 	const union futex_key *fka = &fa->fx_key;
228 	const union futex_key *fkb = k;
229 
230 	if ((uintptr_t)fka->fk_private.vmspace <
231 	    (uintptr_t)fkb->fk_private.vmspace)
232 		return -1;
233 	if ((uintptr_t)fka->fk_private.vmspace >
234 	    (uintptr_t)fkb->fk_private.vmspace)
235 		return +1;
236 	if (fka->fk_private.va < fkb->fk_private.va)
237 		return -1;
238 	if (fka->fk_private.va > fkb->fk_private.va)
239 		return +1;
240 	return 0;
241 }
242 
243 static int
244 compare_futex(void *cookie, const void *na, const void *nb)
245 {
246 	const struct futex *fa = na;
247 	const struct futex *fb = nb;
248 
249 	return compare_futex_key(cookie, fa, &fb->fx_key);
250 }
251 
252 static const rb_tree_ops_t futex_rb_ops = {
253 	.rbto_compare_nodes = compare_futex,
254 	.rbto_compare_key = compare_futex_key,
255 	.rbto_node_offset = offsetof(struct futex, fx_node),
256 };
257 
258 static int
259 compare_futex_shared_key(void *cookie, const void *n, const void *k)
260 {
261 	const struct futex *fa = n;
262 	const union futex_key *fka = &fa->fx_key;
263 	const union futex_key *fkb = k;
264 
265 	return uvm_voaddr_compare(&fka->fk_shared, &fkb->fk_shared);
266 }
267 
268 static int
269 compare_futex_shared(void *cookie, const void *na, const void *nb)
270 {
271 	const struct futex *fa = na;
272 	const struct futex *fb = nb;
273 
274 	return compare_futex_shared_key(cookie, fa, &fb->fx_key);
275 }
276 
277 static const rb_tree_ops_t futex_shared_rb_ops = {
278 	.rbto_compare_nodes = compare_futex_shared,
279 	.rbto_compare_key = compare_futex_shared_key,
280 	.rbto_node_offset = offsetof(struct futex, fx_node),
281 };
282 
283 static void	futex_wait_dequeue(struct futex_wait *, struct futex *);
284 
285 /*
286  * futex_load(uaddr, kaddr)
287  *
288  *	Perform a single atomic load to read *uaddr, and return the
289  *	result in *kaddr.  Return 0 on success, EFAULT if uaddr is not
290  *	mapped.
291  */
292 static inline int
293 futex_load(int *uaddr, int *kaddr)
294 {
295 	return ufetch_int((u_int *)uaddr, (u_int *)kaddr);
296 }
297 
298 /*
299  * futex_test(uaddr, expected)
300  *
301  *	True if *uaddr == expected.  False if *uaddr != expected, or if
302  *	uaddr is not mapped.
303  */
304 static bool
305 futex_test(int *uaddr, int expected)
306 {
307 	int val;
308 	int error;
309 
310 	error = futex_load(uaddr, &val);
311 	if (error)
312 		return false;
313 	return val == expected;
314 }
315 
316 /*
317  * futex_sys_init()
318  *
319  *	Initialize the futex subsystem.
320  */
321 void
322 futex_sys_init(void)
323 {
324 
325 	mutex_init(&futex_tab.lock, MUTEX_DEFAULT, IPL_NONE);
326 	rb_tree_init(&futex_tab.va, &futex_rb_ops);
327 	rb_tree_init(&futex_tab.oa, &futex_shared_rb_ops);
328 }
329 
330 /*
331  * futex_sys_fini()
332  *
333  *	Finalize the futex subsystem.
334  */
335 void
336 futex_sys_fini(void)
337 {
338 
339 	KASSERT(RB_TREE_MIN(&futex_tab.oa) == NULL);
340 	KASSERT(RB_TREE_MIN(&futex_tab.va) == NULL);
341 	mutex_destroy(&futex_tab.lock);
342 }
343 
344 /*
345  * futex_queue_init(f)
346  *
347  *	Initialize the futex queue.  Caller must call futex_queue_fini
348  *	when done.
349  *
350  *	Never sleeps.
351  */
352 static void
353 futex_queue_init(struct futex *f)
354 {
355 
356 	mutex_init(&f->fx_qlock, MUTEX_DEFAULT, IPL_NONE);
357 	mutex_init(&f->fx_abortlock, MUTEX_DEFAULT, IPL_NONE);
358 	cv_init(&f->fx_abortcv, "fqabort");
359 	LIST_INIT(&f->fx_abortlist);
360 	TAILQ_INIT(&f->fx_queue);
361 }
362 
363 /*
364  * futex_queue_drain(f)
365  *
366  *	Wait for any aborting waiters in f; then empty the queue of
367  *	any stragglers and wake them.  Caller must guarantee no new
368  *	references to f.
369  *
370  *	May sleep.
371  */
372 static void
373 futex_queue_drain(struct futex *f)
374 {
375 	struct futex_wait *fw, *fw_next;
376 
377 	mutex_enter(&f->fx_abortlock);
378 	while (!LIST_EMPTY(&f->fx_abortlist))
379 		cv_wait(&f->fx_abortcv, &f->fx_abortlock);
380 	mutex_exit(&f->fx_abortlock);
381 
382 	mutex_enter(&f->fx_qlock);
383 	TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
384 		mutex_enter(&fw->fw_lock);
385 		futex_wait_dequeue(fw, f);
386 		cv_broadcast(&fw->fw_cv);
387 		mutex_exit(&fw->fw_lock);
388 	}
389 	mutex_exit(&f->fx_qlock);
390 }
391 
392 /*
393  * futex_queue_fini(fq)
394  *
395  *	Finalize the futex queue initialized by futex_queue_init.  Queue
396  *	must be empty.  Caller must not use f again until a subsequent
397  *	futex_queue_init.
398  */
399 static void
400 futex_queue_fini(struct futex *f)
401 {
402 
403 	KASSERT(TAILQ_EMPTY(&f->fx_queue));
404 	KASSERT(LIST_EMPTY(&f->fx_abortlist));
405 	mutex_destroy(&f->fx_qlock);
406 	mutex_destroy(&f->fx_abortlock);
407 	cv_destroy(&f->fx_abortcv);
408 }
409 
410 /*
411  * futex_key_init(key, vm, va, shared)
412  *
413  *	Initialize a futex key for lookup, etc.
414  */
415 static int
416 futex_key_init(union futex_key *fk, struct vmspace *vm, vaddr_t va, bool shared)
417 {
418 	int error = 0;
419 
420 	if (__predict_false(shared)) {
421 		if (!uvm_voaddr_acquire(&vm->vm_map, va, &fk->fk_shared))
422 			error = EFAULT;
423 	} else {
424 		fk->fk_private.vmspace = vm;
425 		fk->fk_private.va = va;
426 	}
427 
428 	return error;
429 }
430 
431 /*
432  * futex_key_fini(key, shared)
433  *
434  *	Release a futex key.
435  */
436 static void
437 futex_key_fini(union futex_key *fk, bool shared)
438 {
439 	if (__predict_false(shared))
440 		uvm_voaddr_release(&fk->fk_shared);
441 	memset(fk, 0, sizeof(*fk));
442 }
443 
444 /*
445  * futex_create(fk, shared)
446  *
447  *	Create a futex.  Initial reference count is 1, representing the
448  *	caller.  Returns NULL on failure.  Always takes ownership of the
449  *	key, either transferring it to the newly-created futex, or releasing
450  *	the key if creation fails.
451  *
452  *	Never sleeps for memory, but may sleep to acquire a lock.
453  */
454 static struct futex *
455 futex_create(union futex_key *fk, bool shared)
456 {
457 	struct futex *f;
458 
459 	f = kmem_alloc(sizeof(*f), KM_NOSLEEP);
460 	if (f == NULL) {
461 		futex_key_fini(fk, shared);
462 		return NULL;
463 	}
464 	f->fx_key = *fk;
465 	f->fx_refcnt = 1;
466 	f->fx_shared = shared;
467 	f->fx_on_tree = false;
468 	futex_queue_init(f);
469 
470 	return f;
471 }
472 
473 /*
474  * futex_destroy(f)
475  *
476  *	Destroy a futex created with futex_create.  Reference count
477  *	must be zero.
478  *
479  *	May sleep.
480  */
481 static void
482 futex_destroy(struct futex *f)
483 {
484 
485 	ASSERT_SLEEPABLE();
486 
487 	KASSERT(atomic_load_relaxed(&f->fx_refcnt) == 0);
488 	KASSERT(!f->fx_on_tree);
489 
490 	/* Drain and destroy the private queue.  */
491 	futex_queue_drain(f);
492 	futex_queue_fini(f);
493 
494 	futex_key_fini(&f->fx_key, f->fx_shared);
495 
496 	kmem_free(f, sizeof(*f));
497 }
498 
499 /*
500  * futex_hold(f)
501  *
502  *	Attempt to acquire a reference to f.  Return 0 on success,
503  *	ENFILE on too many references.
504  *
505  *	Never sleeps.
506  */
507 static int
508 futex_hold(struct futex *f)
509 {
510 	unsigned long refcnt;
511 
512 	do {
513 		refcnt = atomic_load_relaxed(&f->fx_refcnt);
514 		if (refcnt == ULONG_MAX)
515 			return ENFILE;
516 	} while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt + 1) != refcnt);
517 
518 	return 0;
519 }
520 
521 /*
522  * futex_rele(f)
523  *
524  *	Release a reference to f acquired with futex_create or
525  *	futex_hold.
526  *
527  *	May sleep to free f.
528  */
529 static void
530 futex_rele(struct futex *f)
531 {
532 	unsigned long refcnt;
533 
534 	ASSERT_SLEEPABLE();
535 
536 	do {
537 		refcnt = atomic_load_relaxed(&f->fx_refcnt);
538 		if (refcnt == 1)
539 			goto trylast;
540 		membar_release();
541 	} while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt);
542 	return;
543 
544 trylast:
545 	mutex_enter(&futex_tab.lock);
546 	if (atomic_dec_ulong_nv(&f->fx_refcnt) == 0) {
547 		membar_acquire();
548 		if (f->fx_on_tree) {
549 			if (__predict_false(f->fx_shared))
550 				rb_tree_remove_node(&futex_tab.oa, f);
551 			else
552 				rb_tree_remove_node(&futex_tab.va, f);
553 			f->fx_on_tree = false;
554 		}
555 	} else {
556 		/* References remain -- don't destroy it.  */
557 		f = NULL;
558 	}
559 	mutex_exit(&futex_tab.lock);
560 	if (f != NULL)
561 		futex_destroy(f);
562 }
563 
564 /*
565  * futex_rele_not_last(f)
566  *
567  *	Release a reference to f acquired with futex_create or
568  *	futex_hold.
569  *
570  *	This version asserts that we are not dropping the last
571  *	reference to f.
572  */
573 static void
574 futex_rele_not_last(struct futex *f)
575 {
576 	unsigned long refcnt;
577 
578 	do {
579 		refcnt = atomic_load_relaxed(&f->fx_refcnt);
580 		KASSERT(refcnt > 1);
581 	} while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt);
582 }
583 
584 /*
585  * futex_lookup_by_key(key, shared, &f)
586  *
587  *	Try to find an existing futex va reference in the specified key
588  *	On success, return 0, set f to found futex or to NULL if not found,
589  *	and increment f's reference count if found.
590  *
591  *	Return ENFILE if reference count too high.
592  *
593  *	Internal lookup routine shared by futex_lookup() and
594  *	futex_lookup_create().
595  */
596 static int
597 futex_lookup_by_key(union futex_key *fk, bool shared, struct futex **fp)
598 {
599 	struct futex *f;
600 	int error = 0;
601 
602 	mutex_enter(&futex_tab.lock);
603 	if (__predict_false(shared)) {
604 		f = rb_tree_find_node(&futex_tab.oa, fk);
605 	} else {
606 		f = rb_tree_find_node(&futex_tab.va, fk);
607 	}
608 	if (f) {
609 		error = futex_hold(f);
610 		if (error)
611 			f = NULL;
612 	}
613  	*fp = f;
614 	mutex_exit(&futex_tab.lock);
615 
616 	return error;
617 }
618 
619 /*
620  * futex_insert(f, fp)
621  *
622  *	Try to insert the futex f into the tree by va.  If there
623  *	already is a futex for its va, acquire a reference to it, and
624  *	store it in *fp; otherwise store f in *fp.
625  *
626  *	Return 0 on success, ENFILE if there already is a futex but its
627  *	reference count is too high.
628  */
629 static int
630 futex_insert(struct futex *f, struct futex **fp)
631 {
632 	struct futex *f0;
633 	int error;
634 
635 	KASSERT(atomic_load_relaxed(&f->fx_refcnt) != 0);
636 	KASSERT(!f->fx_on_tree);
637 
638 	mutex_enter(&futex_tab.lock);
639 	if (__predict_false(f->fx_shared))
640 		f0 = rb_tree_insert_node(&futex_tab.oa, f);
641 	else
642 		f0 = rb_tree_insert_node(&futex_tab.va, f);
643 	if (f0 == f) {
644 		f->fx_on_tree = true;
645 		error = 0;
646 	} else {
647 		KASSERT(atomic_load_relaxed(&f0->fx_refcnt) != 0);
648 		KASSERT(f0->fx_on_tree);
649 		error = futex_hold(f0);
650 		if (error)
651 			goto out;
652 	}
653 	*fp = f0;
654 out:	mutex_exit(&futex_tab.lock);
655 
656 	return error;
657 }
658 
659 /*
660  * futex_lookup(uaddr, shared, &f)
661  *
662  *	Find a futex at the userland pointer uaddr in the current
663  *	process's VM space.  On success, return the futex in f and
664  *	increment its reference count.
665  *
666  *	Caller must call futex_rele when done.
667  */
668 static int
669 futex_lookup(int *uaddr, bool shared, struct futex **fp)
670 {
671 	union futex_key fk;
672 	struct vmspace *vm = curproc->p_vmspace;
673 	vaddr_t va = (vaddr_t)uaddr;
674 	int error;
675 
676 	/*
677 	 * Reject unaligned user pointers so we don't cross page
678 	 * boundaries and so atomics will work.
679 	 */
680 	if ((va & 3) != 0)
681 		return EINVAL;
682 
683 	/* Look it up. */
684 	error = futex_key_init(&fk, vm, va, shared);
685 	if (error)
686 		return error;
687 
688 	error = futex_lookup_by_key(&fk, shared, fp);
689 	futex_key_fini(&fk, shared);
690 	if (error)
691 		return error;
692 
693 	KASSERT(*fp == NULL || (*fp)->fx_shared == shared);
694 	KASSERT(*fp == NULL || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0);
695 
696 	/*
697 	 * Success!  (Caller must still check whether we found
698 	 * anything, but nothing went _wrong_ like trying to use
699 	 * unmapped memory.)
700 	 */
701 	KASSERT(error == 0);
702 
703 	return error;
704 }
705 
706 /*
707  * futex_lookup_create(uaddr, shared, &f)
708  *
709  *	Find or create a futex at the userland pointer uaddr in the
710  *	current process's VM space.  On success, return the futex in f
711  *	and increment its reference count.
712  *
713  *	Caller must call futex_rele when done.
714  */
715 static int
716 futex_lookup_create(int *uaddr, bool shared, struct futex **fp)
717 {
718 	union futex_key fk;
719 	struct vmspace *vm = curproc->p_vmspace;
720 	struct futex *f = NULL;
721 	vaddr_t va = (vaddr_t)uaddr;
722 	int error;
723 
724 	/*
725 	 * Reject unaligned user pointers so we don't cross page
726 	 * boundaries and so atomics will work.
727 	 */
728 	if ((va & 3) != 0)
729 		return EINVAL;
730 
731 	error = futex_key_init(&fk, vm, va, shared);
732 	if (error)
733 		return error;
734 
735 	/*
736 	 * Optimistically assume there already is one, and try to find
737 	 * it.
738 	 */
739 	error = futex_lookup_by_key(&fk, shared, fp);
740 	if (error || *fp != NULL) {
741 		/*
742 		 * We either found one, or there was an error.
743 		 * In either case, we are done with the key.
744 		 */
745 		futex_key_fini(&fk, shared);
746 		goto out;
747 	}
748 
749 	/*
750 	 * Create a futex record.  This transfers ownership of the key
751 	 * in all cases.
752 	 */
753 	f = futex_create(&fk, shared);
754 	if (f == NULL) {
755 		error = ENOMEM;
756 		goto out;
757 	}
758 
759 	/*
760 	 * Insert our new futex, or use existing if someone else beat
761 	 * us to it.
762 	 */
763 	error = futex_insert(f, fp);
764 	if (error)
765 		goto out;
766 	if (*fp == f)
767 		f = NULL;	/* don't release on exit */
768 
769 	/* Success!  */
770 	KASSERT(error == 0);
771 
772 out:	if (f != NULL)
773 		futex_rele(f);
774 	KASSERT(error || *fp != NULL);
775 	KASSERT(error || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0);
776 	return error;
777 }
778 
779 /*
780  * futex_wait_init(fw, bitset)
781  *
782  *	Initialize a record for a thread to wait on a futex matching
783  *	the specified bit set.  Should be passed to futex_wait_enqueue
784  *	before futex_wait, and should be passed to futex_wait_fini when
785  *	done.
786  */
787 static void
788 futex_wait_init(struct futex_wait *fw, int bitset)
789 {
790 
791 	KASSERT(bitset);
792 
793 	mutex_init(&fw->fw_lock, MUTEX_DEFAULT, IPL_NONE);
794 	cv_init(&fw->fw_cv, "futex");
795 	fw->fw_futex = NULL;
796 	fw->fw_bitset = bitset;
797 	fw->fw_aborting = false;
798 }
799 
800 /*
801  * futex_wait_fini(fw)
802  *
803  *	Finalize a record for a futex waiter.  Must not be on any
804  *	futex's queue.
805  */
806 static void
807 futex_wait_fini(struct futex_wait *fw)
808 {
809 
810 	KASSERT(fw->fw_futex == NULL);
811 
812 	cv_destroy(&fw->fw_cv);
813 	mutex_destroy(&fw->fw_lock);
814 }
815 
816 /*
817  * futex_wait_enqueue(fw, f)
818  *
819  *	Put fw on the futex queue.  Must be done before futex_wait.
820  *	Caller must hold fw's lock and f's lock, and fw must not be on
821  *	any existing futex's waiter list.
822  */
823 static void
824 futex_wait_enqueue(struct futex_wait *fw, struct futex *f)
825 {
826 
827 	KASSERT(mutex_owned(&f->fx_qlock));
828 	KASSERT(mutex_owned(&fw->fw_lock));
829 	KASSERT(fw->fw_futex == NULL);
830 	KASSERT(!fw->fw_aborting);
831 
832 	fw->fw_futex = f;
833 	TAILQ_INSERT_TAIL(&f->fx_queue, fw, fw_entry);
834 }
835 
836 /*
837  * futex_wait_dequeue(fw, f)
838  *
839  *	Remove fw from the futex queue.  Precludes subsequent
840  *	futex_wait until a futex_wait_enqueue.  Caller must hold fw's
841  *	lock and f's lock, and fw must be on f.
842  */
843 static void
844 futex_wait_dequeue(struct futex_wait *fw, struct futex *f)
845 {
846 
847 	KASSERT(mutex_owned(&f->fx_qlock));
848 	KASSERT(mutex_owned(&fw->fw_lock));
849 	KASSERT(fw->fw_futex == f);
850 
851 	TAILQ_REMOVE(&f->fx_queue, fw, fw_entry);
852 	fw->fw_futex = NULL;
853 }
854 
855 /*
856  * futex_wait_abort(fw)
857  *
858  *	Caller is no longer waiting for fw.  Remove it from any queue
859  *	if it was on one.  Caller must hold fw->fw_lock.
860  */
861 static void
862 futex_wait_abort(struct futex_wait *fw)
863 {
864 	struct futex *f;
865 
866 	KASSERT(mutex_owned(&fw->fw_lock));
867 
868 	/*
869 	 * Grab the futex queue.  It can't go away as long as we hold
870 	 * fw_lock.  However, we can't take the queue lock because
871 	 * that's a lock order reversal.
872 	 */
873 	f = fw->fw_futex;
874 
875 	/* Put us on the abort list so that fq won't go away.  */
876 	mutex_enter(&f->fx_abortlock);
877 	LIST_INSERT_HEAD(&f->fx_abortlist, fw, fw_abort);
878 	mutex_exit(&f->fx_abortlock);
879 
880 	/*
881 	 * Mark fw as aborting so it won't lose wakeups and won't be
882 	 * transferred to any other queue.
883 	 */
884 	fw->fw_aborting = true;
885 
886 	/* f is now stable, so we can release fw_lock.  */
887 	mutex_exit(&fw->fw_lock);
888 
889 	/* Now we can remove fw under the queue lock.  */
890 	mutex_enter(&f->fx_qlock);
891 	mutex_enter(&fw->fw_lock);
892 	futex_wait_dequeue(fw, f);
893 	mutex_exit(&fw->fw_lock);
894 	mutex_exit(&f->fx_qlock);
895 
896 	/*
897 	 * Finally, remove us from the abort list and notify anyone
898 	 * waiting for the abort to complete if we were the last to go.
899 	 */
900 	mutex_enter(&f->fx_abortlock);
901 	LIST_REMOVE(fw, fw_abort);
902 	if (LIST_EMPTY(&f->fx_abortlist))
903 		cv_broadcast(&f->fx_abortcv);
904 	mutex_exit(&f->fx_abortlock);
905 
906 	/*
907 	 * Release our reference to the futex now that we are not
908 	 * waiting for it.
909 	 */
910 	futex_rele(f);
911 
912 	/*
913 	 * Reacquire the fw lock as caller expects.  Verify that we're
914 	 * aborting and no longer associated with a futex.
915 	 */
916 	mutex_enter(&fw->fw_lock);
917 	KASSERT(fw->fw_aborting);
918 	KASSERT(fw->fw_futex == NULL);
919 }
920 
921 /*
922  * futex_wait(fw, deadline, clkid)
923  *
924  *	fw must be a waiter on a futex's queue.  Wait until deadline on
925  *	the clock clkid, or forever if deadline is NULL, for a futex
926  *	wakeup.  Return 0 on explicit wakeup or destruction of futex,
927  *	ETIMEDOUT on timeout, EINTR/ERESTART on signal.  Either way, fw
928  *	will no longer be on a futex queue on return.
929  */
930 static int
931 futex_wait(struct futex_wait *fw, const struct timespec *deadline,
932     clockid_t clkid)
933 {
934 	int error = 0;
935 
936 	/* Test and wait under the wait lock.  */
937 	mutex_enter(&fw->fw_lock);
938 
939 	for (;;) {
940 		/* If we're done yet, stop and report success.  */
941 		if (fw->fw_bitset == 0 || fw->fw_futex == NULL) {
942 			error = 0;
943 			break;
944 		}
945 
946 		/* If anything went wrong in the last iteration, stop.  */
947 		if (error)
948 			break;
949 
950 		/* Not done yet.  Wait.  */
951 		if (deadline) {
952 			struct timespec ts;
953 
954 			/* Check our watch.  */
955 			error = clock_gettime1(clkid, &ts);
956 			if (error)
957 				break;
958 
959 			/* If we're past the deadline, ETIMEDOUT.  */
960 			if (timespeccmp(deadline, &ts, <=)) {
961 				error = ETIMEDOUT;
962 				break;
963 			}
964 
965 			/* Count how much time is left.  */
966 			timespecsub(deadline, &ts, &ts);
967 
968 			/* Wait for that much time, allowing signals.  */
969 			error = cv_timedwait_sig(&fw->fw_cv, &fw->fw_lock,
970 			    tstohz(&ts));
971 		} else {
972 			/* Wait indefinitely, allowing signals. */
973 			error = cv_wait_sig(&fw->fw_cv, &fw->fw_lock);
974 		}
975 	}
976 
977 	/*
978 	 * If we were woken up, the waker will have removed fw from the
979 	 * queue.  But if anything went wrong, we must remove fw from
980 	 * the queue ourselves.  While here, convert EWOULDBLOCK to
981 	 * ETIMEDOUT.
982 	 */
983 	if (error) {
984 		futex_wait_abort(fw);
985 		if (error == EWOULDBLOCK)
986 			error = ETIMEDOUT;
987 	}
988 
989 	mutex_exit(&fw->fw_lock);
990 
991 	return error;
992 }
993 
994 /*
995  * futex_wake(f, nwake, f2, nrequeue, bitset)
996  *
997  *	Wake up to nwake waiters on f matching bitset; then, if f2 is
998  *	provided, move up to nrequeue remaining waiters on f matching
999  *	bitset to f2.  Return the number of waiters actually woken or
1000  *	requeued.  Caller must hold the locks of f and f2, if provided.
1001  */
1002 static unsigned
1003 futex_wake(struct futex *f, unsigned nwake, struct futex *f2,
1004     unsigned nrequeue, int bitset)
1005 {
1006 	struct futex_wait *fw, *fw_next;
1007 	unsigned nwoken_or_requeued = 0;
1008 	int hold_error __diagused;
1009 
1010 	KASSERT(mutex_owned(&f->fx_qlock));
1011 	KASSERT(f2 == NULL || mutex_owned(&f2->fx_qlock));
1012 
1013 	/* Wake up to nwake waiters, and count the number woken.  */
1014 	TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
1015 		if ((fw->fw_bitset & bitset) == 0)
1016 			continue;
1017 		if (nwake > 0) {
1018 			mutex_enter(&fw->fw_lock);
1019 			if (__predict_false(fw->fw_aborting)) {
1020 				mutex_exit(&fw->fw_lock);
1021 				continue;
1022 			}
1023 			futex_wait_dequeue(fw, f);
1024 			fw->fw_bitset = 0;
1025 			cv_broadcast(&fw->fw_cv);
1026 			mutex_exit(&fw->fw_lock);
1027 			nwake--;
1028 			nwoken_or_requeued++;
1029 			/*
1030 			 * Drop the futex reference on behalf of the
1031 			 * waiter.  We assert this is not the last
1032 			 * reference on the futex (our caller should
1033 			 * also have one).
1034 			 */
1035 			futex_rele_not_last(f);
1036 		} else {
1037 			break;
1038 		}
1039 	}
1040 
1041 	if (f2) {
1042 		/* Move up to nrequeue waiters from f's queue to f2's queue. */
1043 		TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
1044 			if ((fw->fw_bitset & bitset) == 0)
1045 				continue;
1046 			if (nrequeue > 0) {
1047 				mutex_enter(&fw->fw_lock);
1048 				if (__predict_false(fw->fw_aborting)) {
1049 					mutex_exit(&fw->fw_lock);
1050 					continue;
1051 				}
1052 				futex_wait_dequeue(fw, f);
1053 				futex_wait_enqueue(fw, f2);
1054 				mutex_exit(&fw->fw_lock);
1055 				nrequeue--;
1056 				/*
1057 				 * PR kern/59004: Missing constant for upper
1058 				 * bound on systemwide number of lwps
1059 				 */
1060 				KASSERT(nwoken_or_requeued <
1061 				    MIN(PID_MAX*MAXMAXLWP, FUTEX_TID_MASK));
1062 				__CTASSERT(UINT_MAX >=
1063 				    MIN(PID_MAX*MAXMAXLWP, FUTEX_TID_MASK));
1064 				if (++nwoken_or_requeued == 0) /* paranoia */
1065 					nwoken_or_requeued = UINT_MAX;
1066 				/*
1067 				 * Transfer the reference from f to f2.
1068 				 * As above, we assert that we are not
1069 				 * dropping the last reference to f here.
1070 				 *
1071 				 * XXX futex_hold() could theoretically
1072 				 * XXX fail here.
1073 				 */
1074 				futex_rele_not_last(f);
1075 				hold_error = futex_hold(f2);
1076 				KASSERT(hold_error == 0);
1077 			} else {
1078 				break;
1079 			}
1080 		}
1081 	} else {
1082 		KASSERT(nrequeue == 0);
1083 	}
1084 
1085 	/* Return the number of waiters woken or requeued.  */
1086 	return nwoken_or_requeued;
1087 }
1088 
1089 /*
1090  * futex_queue_lock(f)
1091  *
1092  *	Acquire the queue lock of f.  Pair with futex_queue_unlock.  Do
1093  *	not use if caller needs to acquire two locks; use
1094  *	futex_queue_lock2 instead.
1095  */
1096 static void
1097 futex_queue_lock(struct futex *f)
1098 {
1099 	mutex_enter(&f->fx_qlock);
1100 }
1101 
1102 /*
1103  * futex_queue_unlock(f)
1104  *
1105  *	Release the queue lock of f.
1106  */
1107 static void
1108 futex_queue_unlock(struct futex *f)
1109 {
1110 	mutex_exit(&f->fx_qlock);
1111 }
1112 
1113 /*
1114  * futex_queue_lock2(f, f2)
1115  *
1116  *	Acquire the queue locks of both f and f2, which may be null, or
1117  *	which may have the same underlying queue.  If they are
1118  *	distinct, an arbitrary total order is chosen on the locks.
1119  *
1120  *	Callers should only ever acquire multiple queue locks
1121  *	simultaneously using futex_queue_lock2.
1122  */
1123 static void
1124 futex_queue_lock2(struct futex *f, struct futex *f2)
1125 {
1126 
1127 	/*
1128 	 * If both are null, do nothing; if one is null and the other
1129 	 * is not, lock the other and be done with it.
1130 	 */
1131 	if (f == NULL && f2 == NULL) {
1132 		return;
1133 	} else if (f == NULL) {
1134 		mutex_enter(&f2->fx_qlock);
1135 		return;
1136 	} else if (f2 == NULL) {
1137 		mutex_enter(&f->fx_qlock);
1138 		return;
1139 	}
1140 
1141 	/* If both futexes are the same, acquire only one. */
1142 	if (f == f2) {
1143 		mutex_enter(&f->fx_qlock);
1144 		return;
1145 	}
1146 
1147 	/* Otherwise, use the ordering on the kva of the futex pointer.  */
1148 	if ((uintptr_t)f < (uintptr_t)f2) {
1149 		mutex_enter(&f->fx_qlock);
1150 		mutex_enter(&f2->fx_qlock);
1151 	} else {
1152 		mutex_enter(&f2->fx_qlock);
1153 		mutex_enter(&f->fx_qlock);
1154 	}
1155 }
1156 
1157 /*
1158  * futex_queue_unlock2(f, f2)
1159  *
1160  *	Release the queue locks of both f and f2, which may be null, or
1161  *	which may have the same underlying queue.
1162  */
1163 static void
1164 futex_queue_unlock2(struct futex *f, struct futex *f2)
1165 {
1166 
1167 	/*
1168 	 * If both are null, do nothing; if one is null and the other
1169 	 * is not, unlock the other and be done with it.
1170 	 */
1171 	if (f == NULL && f2 == NULL) {
1172 		return;
1173 	} else if (f == NULL) {
1174 		mutex_exit(&f2->fx_qlock);
1175 		return;
1176 	} else if (f2 == NULL) {
1177 		mutex_exit(&f->fx_qlock);
1178 		return;
1179 	}
1180 
1181 	/* If both futexes are the same, release only one. */
1182 	if (f == f2) {
1183 		mutex_exit(&f->fx_qlock);
1184 		return;
1185 	}
1186 
1187 	/* Otherwise, use the ordering on the kva of the futex pointer.  */
1188 	if ((uintptr_t)f < (uintptr_t)f2) {
1189 		mutex_exit(&f2->fx_qlock);
1190 		mutex_exit(&f->fx_qlock);
1191 	} else {
1192 		mutex_exit(&f->fx_qlock);
1193 		mutex_exit(&f2->fx_qlock);
1194 	}
1195 }
1196 
1197 /*
1198  * futex_func_wait(uaddr, val, val3, timeout, clkid, clkflags, retval)
1199  *
1200  *	Implement futex(FUTEX_WAIT).
1201  */
1202 static int
1203 futex_func_wait(bool shared, int *uaddr, int val, int val3,
1204     const struct timespec *timeout, clockid_t clkid, int clkflags,
1205     register_t *retval)
1206 {
1207 	struct futex *f;
1208 	struct futex_wait wait, *fw = &wait;
1209 	struct timespec ts;
1210 	const struct timespec *deadline;
1211 	int error;
1212 
1213 	/*
1214 	 * If there's nothing to wait for, and nobody will ever wake
1215 	 * us, then don't set anything up to wait -- just stop here.
1216 	 */
1217 	if (val3 == 0)
1218 		return EINVAL;
1219 
1220 	/* Optimistically test before anything else.  */
1221 	if (!futex_test(uaddr, val))
1222 		return EAGAIN;
1223 
1224 	/* Determine a deadline on the specified clock.  */
1225 	if (timeout == NULL || (clkflags & TIMER_ABSTIME) == TIMER_ABSTIME) {
1226 		deadline = timeout;
1227 	} else {
1228 		error = clock_gettime1(clkid, &ts);
1229 		if (error)
1230 			return error;
1231 		timespecadd(&ts, timeout, &ts);
1232 		deadline = &ts;
1233 	}
1234 
1235 	/* Get the futex, creating it if necessary.  */
1236 	error = futex_lookup_create(uaddr, shared, &f);
1237 	if (error)
1238 		return error;
1239 	KASSERT(f);
1240 
1241 	/* Get ready to wait.  */
1242 	futex_wait_init(fw, val3);
1243 
1244 	/*
1245 	 * Under the queue lock, check the value again: if it has
1246 	 * already changed, EAGAIN; otherwise enqueue the waiter.
1247 	 * Since FUTEX_WAKE will use the same lock and be done after
1248 	 * modifying the value, the order in which we check and enqueue
1249 	 * is immaterial.
1250 	 */
1251 	futex_queue_lock(f);
1252 	if (!futex_test(uaddr, val)) {
1253 		futex_queue_unlock(f);
1254 		error = EAGAIN;
1255 		goto out;
1256 	}
1257 	mutex_enter(&fw->fw_lock);
1258 	futex_wait_enqueue(fw, f);
1259 	mutex_exit(&fw->fw_lock);
1260 	futex_queue_unlock(f);
1261 
1262 	/*
1263 	 * We cannot drop our reference to the futex here, because
1264 	 * we might be enqueued on a different one when we are awakened.
1265 	 * The references will be managed on our behalf in the requeue
1266 	 * and wake cases.
1267 	 */
1268 	f = NULL;
1269 
1270 	/* Wait. */
1271 	error = futex_wait(fw, deadline, clkid);
1272 	if (error)
1273 		goto out;
1274 
1275 	/* Return 0 on success, error on failure. */
1276 	*retval = 0;
1277 
1278 out:	if (f != NULL)
1279 		futex_rele(f);
1280 	futex_wait_fini(fw);
1281 	return error;
1282 }
1283 
1284 /*
1285  * futex_func_wake(uaddr, val, val3, retval)
1286  *
1287  *	Implement futex(FUTEX_WAKE) and futex(FUTEX_WAKE_BITSET).
1288  */
1289 static int
1290 futex_func_wake(bool shared, int *uaddr, int val, int val3, register_t *retval)
1291 {
1292 	struct futex *f;
1293 	unsigned int nwoken = 0;
1294 	int error = 0;
1295 
1296 	/* Reject negative number of wakeups.  */
1297 	if (val < 0) {
1298 		error = EINVAL;
1299 		goto out;
1300 	}
1301 
1302 	/* Look up the futex, if any.  */
1303 	error = futex_lookup(uaddr, shared, &f);
1304 	if (error)
1305 		goto out;
1306 
1307 	/* If there's no futex, there are no waiters to wake.  */
1308 	if (f == NULL)
1309 		goto out;
1310 
1311 	/*
1312 	 * Under f's queue lock, wake the waiters and remember the
1313 	 * number woken.
1314 	 */
1315 	futex_queue_lock(f);
1316 	nwoken = futex_wake(f, /*nwake*/val, NULL, /*nrequeue*/0,
1317 	    /*bitset*/val3);
1318 	futex_queue_unlock(f);
1319 
1320 	/* Release the futex.  */
1321 	futex_rele(f);
1322 
1323 out:
1324 	/* Return the number of waiters woken.  */
1325 	*retval = nwoken;
1326 
1327 	/* Success!  */
1328 	return error;
1329 }
1330 
1331 /*
1332  * futex_func_requeue(op, uaddr, val, uaddr2, val2, val3, retval)
1333  *
1334  *	Implement futex(FUTEX_REQUEUE) and futex(FUTEX_CMP_REQUEUE).
1335  */
1336 static int
1337 futex_func_requeue(bool shared, int op, int *uaddr, int val, int *uaddr2,
1338     int val2, int val3, register_t *retval)
1339 {
1340 	struct futex *f = NULL, *f2 = NULL;
1341 	unsigned nwoken_or_requeued = 0; /* default to zero on early return */
1342 	int error;
1343 
1344 	/* Reject negative number of wakeups or requeues. */
1345 	if (val < 0 || val2 < 0) {
1346 		error = EINVAL;
1347 		goto out;
1348 	}
1349 
1350 	/*
1351 	 * Look up or create the source futex.  For FUTEX_CMP_REQUEUE,
1352 	 * we always create it, rather than bail if it has no waiters,
1353 	 * because FUTEX_CMP_REQUEUE always tests the futex word in
1354 	 * order to report EAGAIN.
1355 	 */
1356 	error = (op == FUTEX_CMP_REQUEUE
1357 	    ? futex_lookup_create(uaddr, shared, &f)
1358 	    : futex_lookup(uaddr, shared, &f));
1359 	if (error)
1360 		goto out;
1361 
1362 	/* If there is none for FUTEX_REQUEUE, nothing to do. */
1363 	if (f == NULL) {
1364 		KASSERT(op != FUTEX_CMP_REQUEUE);
1365 		goto out;
1366 	}
1367 
1368 	/*
1369 	 * We may need to create the destination futex because it's
1370 	 * entirely possible it does not currently have any waiters.
1371 	 */
1372 	error = futex_lookup_create(uaddr2, shared, &f2);
1373 	if (error)
1374 		goto out;
1375 
1376 	/*
1377 	 * Under the futexes' queue locks, check the value; if
1378 	 * unchanged from val3, wake the waiters.
1379 	 */
1380 	futex_queue_lock2(f, f2);
1381 	if (op == FUTEX_CMP_REQUEUE && !futex_test(uaddr, val3)) {
1382 		error = EAGAIN;
1383 	} else {
1384 		error = 0;
1385 		nwoken_or_requeued = futex_wake(f, /*nwake*/val,
1386 		    f2, /*nrequeue*/val2,
1387 		    FUTEX_BITSET_MATCH_ANY);
1388 	}
1389 	futex_queue_unlock2(f, f2);
1390 
1391 out:
1392 	/* Return the number of waiters woken or requeued.  */
1393 	*retval = nwoken_or_requeued;
1394 
1395 	/* Release the futexes if we got them.  */
1396 	if (f2)
1397 		futex_rele(f2);
1398 	if (f)
1399 		futex_rele(f);
1400 	return error;
1401 }
1402 
1403 /*
1404  * futex_validate_op_cmp(val3)
1405  *
1406  *	Validate an op/cmp argument for FUTEX_WAKE_OP.
1407  */
1408 static int
1409 futex_validate_op_cmp(int val3)
1410 {
1411 	int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK);
1412 	int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK);
1413 
1414 	if (op & FUTEX_OP_OPARG_SHIFT) {
1415 		int oparg = __SHIFTOUT(val3, FUTEX_OP_OPARG_MASK);
1416 		if (oparg < 0)
1417 			return EINVAL;
1418 		if (oparg >= 32)
1419 			return EINVAL;
1420 		op &= ~FUTEX_OP_OPARG_SHIFT;
1421 	}
1422 
1423 	switch (op) {
1424 	case FUTEX_OP_SET:
1425 	case FUTEX_OP_ADD:
1426 	case FUTEX_OP_OR:
1427 	case FUTEX_OP_ANDN:
1428 	case FUTEX_OP_XOR:
1429 		break;
1430 	default:
1431 		return EINVAL;
1432 	}
1433 
1434 	switch (cmp) {
1435 	case FUTEX_OP_CMP_EQ:
1436 	case FUTEX_OP_CMP_NE:
1437 	case FUTEX_OP_CMP_LT:
1438 	case FUTEX_OP_CMP_LE:
1439 	case FUTEX_OP_CMP_GT:
1440 	case FUTEX_OP_CMP_GE:
1441 		break;
1442 	default:
1443 		return EINVAL;
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 /*
1450  * futex_compute_op(oldval, val3)
1451  *
1452  *	Apply a FUTEX_WAIT_OP operation to oldval.
1453  */
1454 static int
1455 futex_compute_op(int oldval, int val3)
1456 {
1457 	int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK);
1458 	int oparg = __SHIFTOUT(val3, FUTEX_OP_OPARG_MASK);
1459 
1460 	if (op & FUTEX_OP_OPARG_SHIFT) {
1461 		KASSERT(oparg >= 0);
1462 		KASSERT(oparg < 32);
1463 		oparg = 1u << oparg;
1464 		op &= ~FUTEX_OP_OPARG_SHIFT;
1465 	}
1466 
1467 	switch (op) {
1468 	case FUTEX_OP_SET:
1469 		return oparg;
1470 
1471 	case FUTEX_OP_ADD:
1472 		/*
1473 		 * Avoid signed arithmetic overflow by doing
1474 		 * arithmetic unsigned and converting back to signed
1475 		 * at the end.
1476 		 */
1477 		return (int)((unsigned)oldval + (unsigned)oparg);
1478 
1479 	case FUTEX_OP_OR:
1480 		return oldval | oparg;
1481 
1482 	case FUTEX_OP_ANDN:
1483 		return oldval & ~oparg;
1484 
1485 	case FUTEX_OP_XOR:
1486 		return oldval ^ oparg;
1487 
1488 	default:
1489 		panic("invalid futex op");
1490 	}
1491 }
1492 
1493 /*
1494  * futex_compute_cmp(oldval, val3)
1495  *
1496  *	Apply a FUTEX_WAIT_OP comparison to oldval.
1497  */
1498 static bool
1499 futex_compute_cmp(int oldval, int val3)
1500 {
1501 	int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK);
1502 	int cmparg = __SHIFTOUT(val3, FUTEX_OP_CMPARG_MASK);
1503 
1504 	switch (cmp) {
1505 	case FUTEX_OP_CMP_EQ:
1506 		return (oldval == cmparg);
1507 
1508 	case FUTEX_OP_CMP_NE:
1509 		return (oldval != cmparg);
1510 
1511 	case FUTEX_OP_CMP_LT:
1512 		return (oldval < cmparg);
1513 
1514 	case FUTEX_OP_CMP_LE:
1515 		return (oldval <= cmparg);
1516 
1517 	case FUTEX_OP_CMP_GT:
1518 		return (oldval > cmparg);
1519 
1520 	case FUTEX_OP_CMP_GE:
1521 		return (oldval >= cmparg);
1522 
1523 	default:
1524 		panic("invalid futex cmp operation");
1525 	}
1526 }
1527 
1528 /*
1529  * futex_func_wake_op(uaddr, val, uaddr2, val2, val3, retval)
1530  *
1531  *	Implement futex(FUTEX_WAKE_OP).
1532  */
1533 static int
1534 futex_func_wake_op(bool shared, int *uaddr, int val, int *uaddr2, int val2,
1535     int val3, register_t *retval)
1536 {
1537 	struct futex *f = NULL, *f2 = NULL;
1538 	int oldval, newval, actual;
1539 	unsigned nwoken = 0;
1540 	int error;
1541 
1542 	/* Reject negative number of wakeups.  */
1543 	if (val < 0 || val2 < 0) {
1544 		error = EINVAL;
1545 		goto out;
1546 	}
1547 
1548 	/* Reject invalid operations before we start doing things.  */
1549 	if ((error = futex_validate_op_cmp(val3)) != 0)
1550 		goto out;
1551 
1552 	/* Look up the first futex, if any.  */
1553 	error = futex_lookup(uaddr, shared, &f);
1554 	if (error)
1555 		goto out;
1556 
1557 	/* Look up the second futex, if any.  */
1558 	error = futex_lookup(uaddr2, shared, &f2);
1559 	if (error)
1560 		goto out;
1561 
1562 	/*
1563 	 * Under the queue locks:
1564 	 *
1565 	 * 1. Read/modify/write: *uaddr2 op= oparg.
1566 	 * 2. Unconditionally wake uaddr.
1567 	 * 3. Conditionally wake uaddr2, if it previously matched val2.
1568 	 */
1569 	futex_queue_lock2(f, f2);
1570 	do {
1571 		error = futex_load(uaddr2, &oldval);
1572 		if (error)
1573 			goto out_unlock;
1574 		newval = futex_compute_op(oldval, val3);
1575 		error = ucas_int(uaddr2, oldval, newval, &actual);
1576 		if (error)
1577 			goto out_unlock;
1578 	} while (actual != oldval);
1579 	if (f == NULL) {
1580 		nwoken = 0;
1581 	} else {
1582 		nwoken = futex_wake(f, /*nwake*/val, NULL, /*nrequeue*/0,
1583 		    FUTEX_BITSET_MATCH_ANY);
1584 	}
1585 	if (f2 && futex_compute_cmp(oldval, val3)) {
1586 		nwoken += futex_wake(f2, /*nwake*/val2, NULL, /*nrequeue*/0,
1587 		    FUTEX_BITSET_MATCH_ANY);
1588 	}
1589 
1590 	/* Success! */
1591 	error = 0;
1592 out_unlock:
1593 	futex_queue_unlock2(f, f2);
1594 
1595 out:
1596 	/* Return the number of waiters woken. */
1597 	*retval = nwoken;
1598 
1599 	/* Release the futexes, if we got them. */
1600 	if (f2)
1601 		futex_rele(f2);
1602 	if (f)
1603 		futex_rele(f);
1604 	return error;
1605 }
1606 
1607 /*
1608  * do_futex(uaddr, op, val, timeout, uaddr2, val2, val3)
1609  *
1610  *	Implement the futex system call with all the parameters
1611  *	parsed out.
1612  */
1613 int
1614 do_futex(int *uaddr, int op, int val, const struct timespec *timeout,
1615     int *uaddr2, int val2, int val3, register_t *retval)
1616 {
1617 	const bool shared = (op & FUTEX_PRIVATE_FLAG) ? false : true;
1618 	const clockid_t clkid = (op & FUTEX_CLOCK_REALTIME) ? CLOCK_REALTIME
1619 							    : CLOCK_MONOTONIC;
1620 
1621 	op &= FUTEX_CMD_MASK;
1622 
1623 	switch (op) {
1624 	case FUTEX_WAIT:
1625 		return futex_func_wait(shared, uaddr, val,
1626 		    FUTEX_BITSET_MATCH_ANY, timeout, clkid, TIMER_RELTIME,
1627 		    retval);
1628 
1629 	case FUTEX_WAKE:
1630 		val3 = FUTEX_BITSET_MATCH_ANY;
1631 		/* FALLTHROUGH */
1632 	case FUTEX_WAKE_BITSET:
1633 		return futex_func_wake(shared, uaddr, val, val3, retval);
1634 
1635 	case FUTEX_REQUEUE:
1636 	case FUTEX_CMP_REQUEUE:
1637 		return futex_func_requeue(shared, op, uaddr, val, uaddr2,
1638 		    val2, val3, retval);
1639 
1640 	case FUTEX_WAIT_BITSET:
1641 		return futex_func_wait(shared, uaddr, val, val3, timeout,
1642 		    clkid, TIMER_ABSTIME, retval);
1643 
1644 	case FUTEX_WAKE_OP:
1645 		return futex_func_wake_op(shared, uaddr, val, uaddr2, val2,
1646 		    val3, retval);
1647 
1648 	case FUTEX_FD:
1649 	default:
1650 		return ENOSYS;
1651 	}
1652 }
1653 
1654 /*
1655  * sys___futex(l, uap, retval)
1656  *
1657  *	__futex(2) system call: generic futex operations.
1658  */
1659 int
1660 sys___futex(struct lwp *l, const struct sys___futex_args *uap,
1661     register_t *retval)
1662 {
1663 	/* {
1664 		syscallarg(int *) uaddr;
1665 		syscallarg(int) op;
1666 		syscallarg(int) val;
1667 		syscallarg(const struct timespec *) timeout;
1668 		syscallarg(int *) uaddr2;
1669 		syscallarg(int) val2;
1670 		syscallarg(int) val3;
1671 	} */
1672 	struct timespec ts, *tsp;
1673 	int error;
1674 
1675 	/*
1676 	 * Copy in the timeout argument, if specified.
1677 	 */
1678 	if (SCARG(uap, timeout)) {
1679 		error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
1680 		if (error)
1681 			return error;
1682 		tsp = &ts;
1683 	} else {
1684 		tsp = NULL;
1685 	}
1686 
1687 	return do_futex(SCARG(uap, uaddr), SCARG(uap, op), SCARG(uap, val),
1688 	    tsp, SCARG(uap, uaddr2), SCARG(uap, val2), SCARG(uap, val3),
1689 	    retval);
1690 }
1691 
1692 /*
1693  * sys___futex_set_robust_list(l, uap, retval)
1694  *
1695  *	__futex_set_robust_list(2) system call for robust futexes.
1696  */
1697 int
1698 sys___futex_set_robust_list(struct lwp *l,
1699     const struct sys___futex_set_robust_list_args *uap, register_t *retval)
1700 {
1701 	/* {
1702 		syscallarg(void *) head;
1703 		syscallarg(size_t) len;
1704 	} */
1705 	void *head = SCARG(uap, head);
1706 
1707 	if (SCARG(uap, len) != _FUTEX_ROBUST_HEAD_SIZE)
1708 		return EINVAL;
1709 	if ((uintptr_t)head % sizeof(u_long))
1710 		return EINVAL;
1711 
1712 	l->l_robust_head = (uintptr_t)head;
1713 
1714 	return 0;
1715 }
1716 
1717 /*
1718  * sys___futex_get_robust_list(l, uap, retval)
1719  *
1720  *	__futex_get_robust_list(2) system call for robust futexes.
1721  */
1722 int
1723 sys___futex_get_robust_list(struct lwp *l,
1724     const struct sys___futex_get_robust_list_args *uap, register_t *retval)
1725 {
1726 	/* {
1727 		syscallarg(lwpid_t) lwpid;
1728 		syscallarg(void **) headp;
1729 		syscallarg(size_t *) lenp;
1730 	} */
1731 	void *head;
1732 	const size_t len = _FUTEX_ROBUST_HEAD_SIZE;
1733 	int error;
1734 
1735 	error = futex_robust_head_lookup(l, SCARG(uap, lwpid), &head);
1736 	if (error)
1737 		return error;
1738 
1739 	/* Copy out the head pointer and the head structure length. */
1740 	error = copyout(&head, SCARG(uap, headp), sizeof(head));
1741 	if (__predict_true(error == 0)) {
1742 		error = copyout(&len, SCARG(uap, lenp), sizeof(len));
1743 	}
1744 
1745 	return error;
1746 }
1747 
1748 /*
1749  * release_futex(uva, tid)
1750  *
1751  *	Try to release the robust futex at uva in the current process
1752  *	on lwp exit.  If anything goes wrong, silently fail.  It is the
1753  *	userland program's obligation to arrange correct behaviour.
1754  */
1755 static void
1756 release_futex(uintptr_t const uptr, lwpid_t const tid, bool const is_pi,
1757     bool const is_pending)
1758 {
1759 	int *uaddr;
1760 	struct futex *f;
1761 	int oldval, newval, actual;
1762 	int error;
1763 
1764 	/* If it's misaligned, tough.  */
1765 	if (__predict_false(uptr & 3))
1766 		return;
1767 	uaddr = (int *)uptr;
1768 
1769 	error = futex_load(uaddr, &oldval);
1770 	if (__predict_false(error))
1771 		return;
1772 
1773 	/*
1774 	 * There are two race conditions we need to handle here:
1775 	 *
1776 	 * 1. User space cleared the futex word but died before
1777 	 *    being able to issue the wakeup.  No wakeups will
1778 	 *    ever be issued, oops!
1779 	 *
1780 	 * 2. Awakened waiter died before being able to acquire
1781 	 *    the futex in user space.  Any other waiters are
1782 	 *    now stuck, oops!
1783 	 *
1784 	 * In both of these cases, the futex word will be 0 (because
1785 	 * it's updated before the wake is issued).  The best we can
1786 	 * do is detect this situation if it's the pending futex and
1787 	 * issue a wake without modifying the futex word.
1788 	 *
1789 	 * XXX eventual PI handling?
1790 	 */
1791 	if (__predict_false(is_pending && (oldval & ~FUTEX_WAITERS) == 0)) {
1792 		register_t retval;
1793 		(void) futex_func_wake(/*shared*/true, uaddr, 1,
1794 		    FUTEX_BITSET_MATCH_ANY, &retval);
1795 		return;
1796 	}
1797 
1798 	/* Optimistically test whether we need to do anything at all.  */
1799 	if ((oldval & FUTEX_TID_MASK) != tid)
1800 		return;
1801 
1802 	/*
1803 	 * We need to handle the case where this thread owned the futex,
1804 	 * but it was uncontended.  In this case, there won't be any
1805 	 * kernel state to look up.  All we can do is mark the futex
1806 	 * as a zombie to be mopped up the next time another thread
1807 	 * attempts to acquire it.
1808 	 *
1809 	 * N.B. It's important to ensure to set FUTEX_OWNER_DIED in
1810 	 * this loop, even if waiters appear while we're are doing
1811 	 * so.  This is beause FUTEX_WAITERS is set by user space
1812 	 * before calling __futex() to wait, and the futex needs
1813 	 * to be marked as a zombie when the new waiter gets into
1814 	 * the kernel.
1815 	 */
1816 	if ((oldval & FUTEX_WAITERS) == 0) {
1817 		do {
1818 			error = futex_load(uaddr, &oldval);
1819 			if (error)
1820 				return;
1821 			if ((oldval & FUTEX_TID_MASK) != tid)
1822 				return;
1823 			newval = oldval | FUTEX_OWNER_DIED;
1824 			error = ucas_int(uaddr, oldval, newval, &actual);
1825 			if (error)
1826 				return;
1827 		} while (actual != oldval);
1828 
1829 		/*
1830 		 * If where is still no indication of waiters, then there is
1831 		 * no more work for us to do.
1832 		 */
1833 		if ((oldval & FUTEX_WAITERS) == 0)
1834 			return;
1835 	}
1836 
1837 	/*
1838 	 * Look for a shared futex since we have no positive indication
1839 	 * it is private.  If we can't, tough.
1840 	 */
1841 	error = futex_lookup(uaddr, /*shared*/true, &f);
1842 	if (error)
1843 		return;
1844 
1845 	/*
1846 	 * If there's no kernel state for this futex, there's nothing to
1847 	 * release.
1848 	 */
1849 	if (f == NULL)
1850 		return;
1851 
1852 	/* Work under the futex queue lock.  */
1853 	futex_queue_lock(f);
1854 
1855 	/*
1856 	 * Fetch the word: if the tid doesn't match ours, skip;
1857 	 * otherwise, set the owner-died bit, atomically.
1858 	 */
1859 	do {
1860 		error = futex_load(uaddr, &oldval);
1861 		if (error)
1862 			goto out;
1863 		if ((oldval & FUTEX_TID_MASK) != tid)
1864 			goto out;
1865 		newval = oldval | FUTEX_OWNER_DIED;
1866 		error = ucas_int(uaddr, oldval, newval, &actual);
1867 		if (error)
1868 			goto out;
1869 	} while (actual != oldval);
1870 
1871 	/*
1872 	 * If there may be waiters, try to wake one.  If anything goes
1873 	 * wrong, tough.
1874 	 *
1875 	 * XXX eventual PI handling?
1876 	 */
1877 	if (oldval & FUTEX_WAITERS) {
1878 		(void)futex_wake(f, /*nwake*/1, NULL, /*nrequeue*/0,
1879 		    FUTEX_BITSET_MATCH_ANY);
1880 	}
1881 
1882 	/* Unlock the queue and release the futex.  */
1883 out:	futex_queue_unlock(f);
1884 	futex_rele(f);
1885 }
1886 
1887 /*
1888  * futex_robust_head_lookup(l, lwpid)
1889  *
1890  *	Helper function to look up a robust head by LWP ID.
1891  */
1892 int
1893 futex_robust_head_lookup(struct lwp *l, lwpid_t lwpid, void **headp)
1894 {
1895 	struct proc *p = l->l_proc;
1896 
1897 	/* Find the other lwp, if requested; otherwise use our robust head.  */
1898 	if (lwpid) {
1899 		mutex_enter(p->p_lock);
1900 		l = lwp_find(p, lwpid);
1901 		if (l == NULL) {
1902 			mutex_exit(p->p_lock);
1903 			return ESRCH;
1904 		}
1905 		*headp = (void *)l->l_robust_head;
1906 		mutex_exit(p->p_lock);
1907 	} else {
1908 		*headp = (void *)l->l_robust_head;
1909 	}
1910 	return 0;
1911 }
1912 
1913 /*
1914  * futex_fetch_robust_head(uaddr)
1915  *
1916  *	Helper routine to fetch the futex robust list head that
1917  *	handles 32-bit binaries running on 64-bit kernels.
1918  */
1919 static int
1920 futex_fetch_robust_head(uintptr_t uaddr, u_long *rhead)
1921 {
1922 #ifdef _LP64
1923 	if (curproc->p_flag & PK_32) {
1924 		uint32_t rhead32[_FUTEX_ROBUST_HEAD_NWORDS];
1925 		int error;
1926 
1927 		error = copyin((void *)uaddr, rhead32, sizeof(rhead32));
1928 		if (__predict_true(error == 0)) {
1929 			for (int i = 0; i < _FUTEX_ROBUST_HEAD_NWORDS; i++) {
1930 				if (i == _FUTEX_ROBUST_HEAD_OFFSET) {
1931 					/*
1932 					 * Make sure the offset is sign-
1933 					 * extended.
1934 					 */
1935 					rhead[i] = (int32_t)rhead32[i];
1936 				} else {
1937 					rhead[i] = rhead32[i];
1938 				}
1939 			}
1940 		}
1941 		return error;
1942 	}
1943 #endif /* _L64 */
1944 
1945 	return copyin((void *)uaddr, rhead,
1946 	    sizeof(*rhead) * _FUTEX_ROBUST_HEAD_NWORDS);
1947 }
1948 
1949 /*
1950  * futex_decode_robust_word(word)
1951  *
1952  *	Decode a robust futex list word into the entry and entry
1953  *	properties.
1954  */
1955 static inline void
1956 futex_decode_robust_word(uintptr_t const word, uintptr_t * const entry,
1957     bool * const is_pi)
1958 {
1959 	*is_pi = (word & _FUTEX_ROBUST_ENTRY_PI) ? true : false;
1960 	*entry = word & ~_FUTEX_ROBUST_ENTRY_PI;
1961 }
1962 
1963 /*
1964  * futex_fetch_robust_entry(uaddr)
1965  *
1966  *	Helper routine to fetch and decode a robust futex entry
1967  *	that handles 32-bit binaries running on 64-bit kernels.
1968  */
1969 static int
1970 futex_fetch_robust_entry(uintptr_t const uaddr, uintptr_t * const valp,
1971     bool * const is_pi)
1972 {
1973 	uintptr_t val = 0;
1974 	int error = 0;
1975 
1976 #ifdef _LP64
1977 	if (curproc->p_flag & PK_32) {
1978 		uint32_t val32;
1979 
1980 		error = ufetch_32((uint32_t *)uaddr, &val32);
1981 		if (__predict_true(error == 0))
1982 			val = val32;
1983 	} else
1984 #endif /* _LP64 */
1985 		error = ufetch_long((u_long *)uaddr, (u_long *)&val);
1986 	if (__predict_false(error))
1987 		return error;
1988 
1989 	futex_decode_robust_word(val, valp, is_pi);
1990 	return 0;
1991 }
1992 
1993 /*
1994  * futex_release_all_lwp(l, tid)
1995  *
1996  *	Release all l's robust futexes.  If anything looks funny in
1997  *	the process, give up -- it's userland's responsibility to dot
1998  *	the i's and cross the t's.
1999  */
2000 void
2001 futex_release_all_lwp(struct lwp * const l)
2002 {
2003 	u_long rhead[_FUTEX_ROBUST_HEAD_NWORDS];
2004 	int limit = 1000000;
2005 	int error;
2006 
2007 	/* If there's no robust list there's nothing to do. */
2008 	if (l->l_robust_head == 0)
2009 		return;
2010 
2011 	KASSERT((l->l_lid & FUTEX_TID_MASK) == l->l_lid);
2012 
2013 	/* Read the final snapshot of the robust list head. */
2014 	error = futex_fetch_robust_head(l->l_robust_head, rhead);
2015 	if (error) {
2016 		printf("WARNING: pid %jd (%s) lwp %jd:"
2017 		    " unmapped robust futex list head\n",
2018 		    (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm,
2019 		    (uintmax_t)l->l_lid);
2020 		return;
2021 	}
2022 
2023 	const long offset = (long)rhead[_FUTEX_ROBUST_HEAD_OFFSET];
2024 
2025 	uintptr_t next, pending;
2026 	bool is_pi, pending_is_pi;
2027 
2028 	futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_LIST],
2029 	    &next, &is_pi);
2030 	futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_PENDING],
2031 	    &pending, &pending_is_pi);
2032 
2033 	/*
2034 	 * Walk down the list of locked futexes and release them, up
2035 	 * to one million of them before we give up.
2036 	 */
2037 
2038 	while (next != l->l_robust_head && limit-- > 0) {
2039 		/* pending handled below. */
2040 		if (next != pending)
2041 			release_futex(next + offset, l->l_lid, is_pi, false);
2042 		error = futex_fetch_robust_entry(next, &next, &is_pi);
2043 		if (error)
2044 			break;
2045 		preempt_point();
2046 	}
2047 	if (limit <= 0) {
2048 		printf("WARNING: pid %jd (%s) lwp %jd:"
2049 		    " exhausted robust futex limit\n",
2050 		    (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm,
2051 		    (uintmax_t)l->l_lid);
2052 	}
2053 
2054 	/* If there's a pending futex, it may need to be released too. */
2055 	if (pending != 0) {
2056 		release_futex(pending + offset, l->l_lid, pending_is_pi, true);
2057 	}
2058 }
2059