xref: /openbsd-src/lib/libc/thread/rthread_sync.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: rthread_sync.c,v 1.4 2017/09/05 02:40:54 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
5  * All Rights Reserved.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 /*
20  * Mutexes and conditions - synchronization functions.
21  */
22 
23 #include <assert.h>
24 #include <errno.h>
25 #include <pthread.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 
30 #include "rthread.h"
31 #include "cancel.h"		/* in libc/include */
32 
33 static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
34 
35 /*
36  * mutexen
37  */
38 int
39 pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
40 {
41 	struct pthread_mutex *mutex;
42 
43 	mutex = calloc(1, sizeof(*mutex));
44 	if (!mutex)
45 		return (errno);
46 	mutex->lock = _SPINLOCK_UNLOCKED;
47 	TAILQ_INIT(&mutex->lockers);
48 	if (attr == NULL) {
49 		mutex->type = PTHREAD_MUTEX_DEFAULT;
50 		mutex->prioceiling = -1;
51 	} else {
52 		mutex->type = (*attr)->ma_type;
53 		mutex->prioceiling = (*attr)->ma_protocol ==
54 		    PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
55 	}
56 	*mutexp = mutex;
57 
58 	return (0);
59 }
60 DEF_STRONG(pthread_mutex_init);
61 
62 int
63 pthread_mutex_destroy(pthread_mutex_t *mutexp)
64 {
65 	struct pthread_mutex *mutex;
66 
67 	assert(mutexp);
68 	mutex = (struct pthread_mutex *)*mutexp;
69 	if (mutex) {
70 		if (mutex->count || mutex->owner != NULL ||
71 		    !TAILQ_EMPTY(&mutex->lockers)) {
72 #define MSG "pthread_mutex_destroy on mutex with waiters!\n"
73 			write(2, MSG, sizeof(MSG) - 1);
74 #undef MSG
75 			return (EBUSY);
76 		}
77 		free(mutex);
78 		*mutexp = NULL;
79 	}
80 	return (0);
81 }
82 DEF_STRONG(pthread_mutex_destroy);
83 
84 static int
85 _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
86     const struct timespec *abstime)
87 {
88 	struct pthread_mutex *mutex;
89 	pthread_t self = pthread_self();
90 	int ret = 0;
91 
92 	/*
93 	 * If the mutex is statically initialized, perform the dynamic
94 	 * initialization. Note: _thread_mutex_lock() in libc requires
95 	 * _rthread_mutex_lock() to perform the mutex init when *mutexp
96 	 * is NULL.
97 	 */
98 	if (*mutexp == NULL) {
99 		_spinlock(&static_init_lock);
100 		if (*mutexp == NULL)
101 			ret = pthread_mutex_init(mutexp, NULL);
102 		_spinunlock(&static_init_lock);
103 		if (ret != 0)
104 			return (EINVAL);
105 	}
106 	mutex = (struct pthread_mutex *)*mutexp;
107 
108 	_rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
109 	_spinlock(&mutex->lock);
110 	if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
111 		assert(mutex->count == 0);
112 		mutex->owner = self;
113 	} else if (mutex->owner == self) {
114 		assert(mutex->count > 0);
115 
116 		/* already owner?  handle recursive behavior */
117 		if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
118 		{
119 			if (trywait ||
120 			    mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
121 				_spinunlock(&mutex->lock);
122 				return (trywait ? EBUSY : EDEADLK);
123 			}
124 
125 			/* self-deadlock is disallowed by strict */
126 			if (mutex->type == PTHREAD_MUTEX_STRICT_NP &&
127 			    abstime == NULL)
128 				abort();
129 
130 			/* self-deadlock, possibly until timeout */
131 			while (__thrsleep(self, CLOCK_REALTIME, abstime,
132 			    &mutex->lock, NULL) != EWOULDBLOCK)
133 				_spinlock(&mutex->lock);
134 			return (ETIMEDOUT);
135 		}
136 		if (mutex->count == INT_MAX) {
137 			_spinunlock(&mutex->lock);
138 			return (EAGAIN);
139 		}
140 	} else if (trywait) {
141 		/* try failed */
142 		_spinunlock(&mutex->lock);
143 		return (EBUSY);
144 	} else {
145 		/* add to the wait queue and block until at the head */
146 		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
147 		while (mutex->owner != self) {
148 			ret = __thrsleep(self, CLOCK_REALTIME, abstime,
149 			    &mutex->lock, NULL);
150 			_spinlock(&mutex->lock);
151 			assert(mutex->owner != NULL);
152 			if (ret == EWOULDBLOCK) {
153 				if (mutex->owner == self)
154 					break;
155 				TAILQ_REMOVE(&mutex->lockers, self, waiting);
156 				_spinunlock(&mutex->lock);
157 				return (ETIMEDOUT);
158 			}
159 		}
160 	}
161 
162 	mutex->count++;
163 	_spinunlock(&mutex->lock);
164 
165 	return (0);
166 }
167 
168 int
169 pthread_mutex_lock(pthread_mutex_t *p)
170 {
171 	return (_rthread_mutex_lock(p, 0, NULL));
172 }
173 DEF_STRONG(pthread_mutex_lock);
174 
175 int
176 pthread_mutex_trylock(pthread_mutex_t *p)
177 {
178 	return (_rthread_mutex_lock(p, 1, NULL));
179 }
180 
181 int
182 pthread_mutex_timedlock(pthread_mutex_t *p, const struct timespec *abstime)
183 {
184 	return (_rthread_mutex_lock(p, 0, abstime));
185 }
186 
187 int
188 pthread_mutex_unlock(pthread_mutex_t *mutexp)
189 {
190 	pthread_t self = pthread_self();
191 	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
192 
193 	_rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self,
194 	    (void *)mutex);
195 
196 	if (mutex == NULL)
197 #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
198 		return (EPERM);
199 #elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
200 		return(0);
201 #else
202 		abort();
203 #endif
204 
205 	if (mutex->owner != self) {
206 		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
207 		    mutex->type == PTHREAD_MUTEX_RECURSIVE)
208 			return (EPERM);
209 		else {
210 			/*
211 			 * For mutex type NORMAL our undefined behavior for
212 			 * unlocking an unlocked mutex is to succeed without
213 			 * error.  All other undefined behaviors are to
214 			 * abort() immediately.
215 			 */
216 			if (mutex->owner == NULL &&
217 			    mutex->type == PTHREAD_MUTEX_NORMAL)
218 				return (0);
219 			else
220 				abort();
221 		}
222 	}
223 
224 	if (--mutex->count == 0) {
225 		pthread_t next;
226 
227 		_spinlock(&mutex->lock);
228 		mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
229 		if (next != NULL)
230 			TAILQ_REMOVE(&mutex->lockers, next, waiting);
231 		_spinunlock(&mutex->lock);
232 		if (next != NULL)
233 			__thrwakeup(next, 1);
234 	}
235 
236 	return (0);
237 }
238 DEF_STRONG(pthread_mutex_unlock);
239 
240 /*
241  * condition variables
242  */
243 int
244 pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
245 {
246 	pthread_cond_t cond;
247 
248 	cond = calloc(1, sizeof(*cond));
249 	if (!cond)
250 		return (errno);
251 	cond->lock = _SPINLOCK_UNLOCKED;
252 	TAILQ_INIT(&cond->waiters);
253 	if (attr == NULL)
254 		cond->clock = CLOCK_REALTIME;
255 	else
256 		cond->clock = (*attr)->ca_clock;
257 	*condp = cond;
258 
259 	return (0);
260 }
261 DEF_STRONG(pthread_cond_init);
262 
263 int
264 pthread_cond_destroy(pthread_cond_t *condp)
265 {
266 	pthread_cond_t cond;
267 
268 	assert(condp);
269 	cond = *condp;
270 	if (cond) {
271 		if (!TAILQ_EMPTY(&cond->waiters)) {
272 #define MSG "pthread_cond_destroy on condvar with waiters!\n"
273 			write(2, MSG, sizeof(MSG) - 1);
274 #undef MSG
275 			return (EBUSY);
276 		}
277 		free(cond);
278 	}
279 	*condp = NULL;
280 
281 	return (0);
282 }
283 
284 int
285 pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
286     const struct timespec *abstime)
287 {
288 	pthread_cond_t cond;
289 	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
290 	struct tib *tib = TIB_GET();
291 	pthread_t self = tib->tib_thread;
292 	pthread_t next;
293 	int mutex_count;
294 	int canceled = 0;
295 	int rv = 0;
296 	int error;
297 	PREP_CANCEL_POINT(tib);
298 
299 	if (!*condp)
300 		if ((error = pthread_cond_init(condp, NULL)))
301 			return (error);
302 	cond = *condp;
303 	_rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
304 	    (void *)cond, (void *)mutex);
305 
306 	if (mutex == NULL)
307 #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
308 		return (EPERM);
309 #else
310 		abort();
311 #endif
312 
313 	if (mutex->owner != self) {
314 		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
315 			return (EPERM);
316 		else
317 			abort();
318 	}
319 
320 	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
321 	    abstime->tv_nsec >= 1000000000)
322 		return (EINVAL);
323 
324 	ENTER_DELAYED_CANCEL_POINT(tib, self);
325 
326 	_spinlock(&cond->lock);
327 
328 	/* mark the condvar as being associated with this mutex */
329 	if (cond->mutex == NULL) {
330 		cond->mutex = mutex;
331 		assert(TAILQ_EMPTY(&cond->waiters));
332 	} else if (cond->mutex != mutex) {
333 		assert(cond->mutex == mutex);
334 		_spinunlock(&cond->lock);
335 		LEAVE_CANCEL_POINT_INNER(tib, 1);
336 		return (EINVAL);
337 	} else
338 		assert(! TAILQ_EMPTY(&cond->waiters));
339 
340 	/* snag the count in case this is a recursive mutex */
341 	mutex_count = mutex->count;
342 
343 	/* transfer from the mutex queue to the condvar queue */
344 	_spinlock(&mutex->lock);
345 	self->blocking_cond = cond;
346 	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
347 	_spinunlock(&cond->lock);
348 
349 	/* wake the next guy blocked on the mutex */
350 	mutex->count = 0;
351 	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
352 	if (next != NULL) {
353 		TAILQ_REMOVE(&mutex->lockers, next, waiting);
354 		__thrwakeup(next, 1);
355 	}
356 
357 	/* wait until we're the owner of the mutex again */
358 	while (mutex->owner != self) {
359 		error = __thrsleep(self, cond->clock, abstime,
360 		    &mutex->lock, &self->delayed_cancel);
361 
362 		/*
363 		 * If abstime == NULL, then we're definitely waiting
364 		 * on the mutex instead of the condvar, and are
365 		 * just waiting for mutex ownership, regardless of
366 		 * why we woke up.
367 		 */
368 		if (abstime == NULL) {
369 			_spinlock(&mutex->lock);
370 			continue;
371 		}
372 
373 		/*
374 		 * If we took a normal signal (not from
375 		 * cancellation) then we should just go back to
376 		 * sleep without changing state (timeouts, etc).
377 		 */
378 		if (error == EINTR && (tib->tib_canceled == 0 ||
379 		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
380 			_spinlock(&mutex->lock);
381 			continue;
382 		}
383 
384 		/*
385 		 * The remaining reasons for waking up (normal
386 		 * wakeup, timeout, and cancellation) all mean that
387 		 * we won't be staying in the condvar queue and
388 		 * we'll no longer time out or be cancelable.
389 		 */
390 		abstime = NULL;
391 		LEAVE_CANCEL_POINT_INNER(tib, 0);
392 
393 		/*
394 		 * If we're no longer in the condvar's queue then
395 		 * we're just waiting for mutex ownership.  Need
396 		 * cond->lock here to prevent race with cond_signal().
397 		 */
398 		_spinlock(&cond->lock);
399 		if (self->blocking_cond == NULL) {
400 			_spinunlock(&cond->lock);
401 			_spinlock(&mutex->lock);
402 			continue;
403 		}
404 		assert(self->blocking_cond == cond);
405 
406 		/* if timeout or canceled, make note of that */
407 		if (error == EWOULDBLOCK)
408 			rv = ETIMEDOUT;
409 		else if (error == EINTR)
410 			canceled = 1;
411 
412 		/* transfer between the queues */
413 		TAILQ_REMOVE(&cond->waiters, self, waiting);
414 		assert(mutex == cond->mutex);
415 		if (TAILQ_EMPTY(&cond->waiters))
416 			cond->mutex = NULL;
417 		self->blocking_cond = NULL;
418 		_spinunlock(&cond->lock);
419 		_spinlock(&mutex->lock);
420 
421 		/* mutex unlocked right now? */
422 		if (mutex->owner == NULL &&
423 		    TAILQ_EMPTY(&mutex->lockers)) {
424 			assert(mutex->count == 0);
425 			mutex->owner = self;
426 			break;
427 		}
428 		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
429 	}
430 
431 	/* restore the mutex's count */
432 	mutex->count = mutex_count;
433 	_spinunlock(&mutex->lock);
434 
435 	LEAVE_CANCEL_POINT_INNER(tib, canceled);
436 
437 	return (rv);
438 }
439 
440 int
441 pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
442 {
443 	pthread_cond_t cond;
444 	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
445 	struct tib *tib = TIB_GET();
446 	pthread_t self = tib->tib_thread;
447 	pthread_t next;
448 	int mutex_count;
449 	int canceled = 0;
450 	int error;
451 	PREP_CANCEL_POINT(tib);
452 
453 	if (!*condp)
454 		if ((error = pthread_cond_init(condp, NULL)))
455 			return (error);
456 	cond = *condp;
457 	_rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
458 	    (void *)cond, (void *)mutex);
459 
460 	if (mutex == NULL)
461 #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
462 		return (EPERM);
463 #else
464 		abort();
465 #endif
466 
467 	if (mutex->owner != self) {
468 		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
469 			return (EPERM);
470 		else
471 			abort();
472 	}
473 
474 	ENTER_DELAYED_CANCEL_POINT(tib, self);
475 
476 	_spinlock(&cond->lock);
477 
478 	/* mark the condvar as being associated with this mutex */
479 	if (cond->mutex == NULL) {
480 		cond->mutex = mutex;
481 		assert(TAILQ_EMPTY(&cond->waiters));
482 	} else if (cond->mutex != mutex) {
483 		assert(cond->mutex == mutex);
484 		_spinunlock(&cond->lock);
485 		LEAVE_CANCEL_POINT_INNER(tib, 1);
486 		return (EINVAL);
487 	} else
488 		assert(! TAILQ_EMPTY(&cond->waiters));
489 
490 	/* snag the count in case this is a recursive mutex */
491 	mutex_count = mutex->count;
492 
493 	/* transfer from the mutex queue to the condvar queue */
494 	_spinlock(&mutex->lock);
495 	self->blocking_cond = cond;
496 	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
497 	_spinunlock(&cond->lock);
498 
499 	/* wake the next guy blocked on the mutex */
500 	mutex->count = 0;
501 	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
502 	if (next != NULL) {
503 		TAILQ_REMOVE(&mutex->lockers, next, waiting);
504 		__thrwakeup(next, 1);
505 	}
506 
507 	/* wait until we're the owner of the mutex again */
508 	while (mutex->owner != self) {
509 		error = __thrsleep(self, 0, NULL, &mutex->lock,
510 		    &self->delayed_cancel);
511 
512 		/*
513 		 * If we took a normal signal (not from
514 		 * cancellation) then we should just go back to
515 		 * sleep without changing state (timeouts, etc).
516 		 */
517 		if (error == EINTR && (tib->tib_canceled == 0 ||
518 		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
519 			_spinlock(&mutex->lock);
520 			continue;
521 		}
522 
523 		/*
524 		 * The remaining reasons for waking up (normal
525 		 * wakeup and cancellation) all mean that we won't
526 		 * be staying in the condvar queue and we'll no
527 		 * longer be cancelable.
528 		 */
529 		LEAVE_CANCEL_POINT_INNER(tib, 0);
530 
531 		/*
532 		 * If we're no longer in the condvar's queue then
533 		 * we're just waiting for mutex ownership.  Need
534 		 * cond->lock here to prevent race with cond_signal().
535 		 */
536 		_spinlock(&cond->lock);
537 		if (self->blocking_cond == NULL) {
538 			_spinunlock(&cond->lock);
539 			_spinlock(&mutex->lock);
540 			continue;
541 		}
542 		assert(self->blocking_cond == cond);
543 
544 		/* if canceled, make note of that */
545 		if (error == EINTR)
546 			canceled = 1;
547 
548 		/* transfer between the queues */
549 		TAILQ_REMOVE(&cond->waiters, self, waiting);
550 		assert(mutex == cond->mutex);
551 		if (TAILQ_EMPTY(&cond->waiters))
552 			cond->mutex = NULL;
553 		self->blocking_cond = NULL;
554 		_spinunlock(&cond->lock);
555 		_spinlock(&mutex->lock);
556 
557 		/* mutex unlocked right now? */
558 		if (mutex->owner == NULL &&
559 		    TAILQ_EMPTY(&mutex->lockers)) {
560 			assert(mutex->count == 0);
561 			mutex->owner = self;
562 			break;
563 		}
564 		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
565 	}
566 
567 	/* restore the mutex's count */
568 	mutex->count = mutex_count;
569 	_spinunlock(&mutex->lock);
570 
571 	LEAVE_CANCEL_POINT_INNER(tib, canceled);
572 
573 	return (0);
574 }
575 
576 
577 int
578 pthread_cond_signal(pthread_cond_t *condp)
579 {
580 	pthread_cond_t cond;
581 	struct pthread_mutex *mutex;
582 	pthread_t thread;
583 	int wakeup;
584 
585 	/* uninitialized?  Then there's obviously no one waiting! */
586 	if (!*condp)
587 		return 0;
588 
589 	cond = *condp;
590 	_rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(),
591 	    (void *)cond, (void *)cond->mutex);
592 	_spinlock(&cond->lock);
593 	thread = TAILQ_FIRST(&cond->waiters);
594 	if (thread == NULL) {
595 		assert(cond->mutex == NULL);
596 		_spinunlock(&cond->lock);
597 		return (0);
598 	}
599 
600 	assert(thread->blocking_cond == cond);
601 	TAILQ_REMOVE(&cond->waiters, thread, waiting);
602 	thread->blocking_cond = NULL;
603 
604 	mutex = cond->mutex;
605 	assert(mutex != NULL);
606 	if (TAILQ_EMPTY(&cond->waiters))
607 		cond->mutex = NULL;
608 
609 	/* link locks to prevent race with timedwait */
610 	_spinlock(&mutex->lock);
611 	_spinunlock(&cond->lock);
612 
613 	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
614 	if (wakeup)
615 		mutex->owner = thread;
616 	else
617 		TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting);
618 	_spinunlock(&mutex->lock);
619 	if (wakeup)
620 		__thrwakeup(thread, 1);
621 
622 	return (0);
623 }
624 
625 int
626 pthread_cond_broadcast(pthread_cond_t *condp)
627 {
628 	pthread_cond_t cond;
629 	struct pthread_mutex *mutex;
630 	pthread_t thread;
631 	pthread_t p;
632 	int wakeup;
633 
634 	/* uninitialized?  Then there's obviously no one waiting! */
635 	if (!*condp)
636 		return 0;
637 
638 	cond = *condp;
639 	_rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(),
640 	    (void *)cond, (void *)cond->mutex);
641 	_spinlock(&cond->lock);
642 	thread = TAILQ_FIRST(&cond->waiters);
643 	if (thread == NULL) {
644 		assert(cond->mutex == NULL);
645 		_spinunlock(&cond->lock);
646 		return (0);
647 	}
648 
649 	mutex = cond->mutex;
650 	assert(mutex != NULL);
651 
652 	/* walk the list, clearing the "blocked on condvar" pointer */
653 	p = thread;
654 	do
655 		p->blocking_cond = NULL;
656 	while ((p = TAILQ_NEXT(p, waiting)) != NULL);
657 
658 	/*
659 	 * We want to transfer all the threads from the condvar's list
660 	 * to the mutex's list.  The TAILQ_* macros don't let us do that
661 	 * efficiently, so this is direct list surgery.  Pay attention!
662 	 */
663 
664 	/* 1) attach the first thread to the end of the mutex's list */
665 	_spinlock(&mutex->lock);
666 	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
667 	thread->waiting.tqe_prev = mutex->lockers.tqh_last;
668 	*(mutex->lockers.tqh_last) = thread;
669 
670 	/* 2) fix up the end pointer for the mutex's list */
671 	mutex->lockers.tqh_last = cond->waiters.tqh_last;
672 
673 	if (wakeup) {
674 		TAILQ_REMOVE(&mutex->lockers, thread, waiting);
675 		mutex->owner = thread;
676 		_spinunlock(&mutex->lock);
677 		__thrwakeup(thread, 1);
678 	} else
679 		_spinunlock(&mutex->lock);
680 
681 	/* 3) reset the condvar's list and mutex pointer */
682 	TAILQ_INIT(&cond->waiters);
683 	assert(cond->mutex != NULL);
684 	cond->mutex = NULL;
685 	_spinunlock(&cond->lock);
686 
687 	return (0);
688 }
689