xref: /netbsd-src/lib/libpthread/pthread_mutex.c (revision ed75d7a867996c84cfa88e3b8906816277e957f7)
1 /*	$NetBSD: pthread_mutex.c,v 1.76 2020/02/16 17:45:11 kamil Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * To track threads waiting for mutexes to be released, we use lockless
34  * lists built on atomic operations and memory barriers.
35  *
36  * A simple spinlock would be faster and make the code easier to
37  * follow, but spinlocks are problematic in userspace.  If a thread is
38  * preempted by the kernel while holding a spinlock, any other thread
39  * attempting to acquire that spinlock will needlessly busy wait.
40  *
41  * There is no good way to know that the holding thread is no longer
42  * running, nor to request a wake-up once it has begun running again.
43  * Of more concern, threads in the SCHED_FIFO class do not have a
44  * limited time quantum and so could spin forever, preventing the
45  * thread holding the spinlock from getting CPU time: it would never
46  * be released.
47  */
48 
49 #include <sys/cdefs.h>
50 __RCSID("$NetBSD: pthread_mutex.c,v 1.76 2020/02/16 17:45:11 kamil Exp $");
51 
52 #include <sys/types.h>
53 #include <sys/lwpctl.h>
54 #include <sys/sched.h>
55 #include <sys/lock.h>
56 
57 #include <errno.h>
58 #include <limits.h>
59 #include <stdlib.h>
60 #include <time.h>
61 #include <string.h>
62 #include <stdio.h>
63 
64 #include "pthread.h"
65 #include "pthread_int.h"
66 #include "reentrant.h"
67 
68 #define	MUTEX_WAITERS_BIT		((uintptr_t)0x01)
69 #define	MUTEX_RECURSIVE_BIT		((uintptr_t)0x02)
70 #define	MUTEX_DEFERRED_BIT		((uintptr_t)0x04)
71 #define	MUTEX_PROTECT_BIT		((uintptr_t)0x08)
72 #define	MUTEX_THREAD			((uintptr_t)~0x0f)
73 
74 #define	MUTEX_HAS_WAITERS(x)		((uintptr_t)(x) & MUTEX_WAITERS_BIT)
75 #define	MUTEX_RECURSIVE(x)		((uintptr_t)(x) & MUTEX_RECURSIVE_BIT)
76 #define	MUTEX_PROTECT(x)		((uintptr_t)(x) & MUTEX_PROTECT_BIT)
77 #define	MUTEX_OWNER(x)			((uintptr_t)(x) & MUTEX_THREAD)
78 
79 #define	MUTEX_GET_TYPE(x)		\
80     ((int)(((uintptr_t)(x) & 0x000000ff) >> 0))
81 #define	MUTEX_SET_TYPE(x, t) 		\
82     (x) = (void *)(((uintptr_t)(x) & ~0x000000ff) | ((t) << 0))
83 #define	MUTEX_GET_PROTOCOL(x)		\
84     ((int)(((uintptr_t)(x) & 0x0000ff00) >> 8))
85 #define	MUTEX_SET_PROTOCOL(x, p)	\
86     (x) = (void *)(((uintptr_t)(x) & ~0x0000ff00) | ((p) << 8))
87 #define	MUTEX_GET_CEILING(x)		\
88     ((int)(((uintptr_t)(x) & 0x00ff0000) >> 16))
89 #define	MUTEX_SET_CEILING(x, c)	\
90     (x) = (void *)(((uintptr_t)(x) & ~0x00ff0000) | ((c) << 16))
91 
92 #if __GNUC_PREREQ__(3, 0)
93 #define	NOINLINE		__attribute ((noinline))
94 #else
95 #define	NOINLINE		/* nothing */
96 #endif
97 
98 static void	pthread__mutex_wakeup(pthread_t, pthread_mutex_t *);
99 static int	pthread__mutex_lock_slow(pthread_mutex_t *,
100     const struct timespec *);
101 static int	pthread__mutex_unlock_slow(pthread_mutex_t *);
102 static void	pthread__mutex_pause(void);
103 
104 int		_pthread_mutex_held_np(pthread_mutex_t *);
105 pthread_t	_pthread_mutex_owner_np(pthread_mutex_t *);
106 
107 __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
108 __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
109 
110 __strong_alias(__libc_mutex_init,pthread_mutex_init)
111 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
112 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
113 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
114 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
115 
116 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
117 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
118 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
119 
120 int
121 pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr)
122 {
123 	uintptr_t type, proto, val, ceil;
124 
125 #if 0
126 	/*
127 	 * Always initialize the mutex structure, maybe be used later
128 	 * and the cost should be minimal.
129 	 */
130 	if (__predict_false(__uselibcstub))
131 		return __libc_mutex_init_stub(ptm, attr);
132 #endif
133 
134 	pthread__error(EINVAL, "Invalid mutes attribute",
135 	    attr == NULL || attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
136 
137 	if (attr == NULL) {
138 		type = PTHREAD_MUTEX_NORMAL;
139 		proto = PTHREAD_PRIO_NONE;
140 		ceil = 0;
141 	} else {
142 		val = (uintptr_t)attr->ptma_private;
143 
144 		type = MUTEX_GET_TYPE(val);
145 		proto = MUTEX_GET_PROTOCOL(val);
146 		ceil = MUTEX_GET_CEILING(val);
147 	}
148 	switch (type) {
149 	case PTHREAD_MUTEX_ERRORCHECK:
150 		__cpu_simple_lock_set(&ptm->ptm_errorcheck);
151 		ptm->ptm_owner = NULL;
152 		break;
153 	case PTHREAD_MUTEX_RECURSIVE:
154 		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
155 		ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT;
156 		break;
157 	default:
158 		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
159 		ptm->ptm_owner = NULL;
160 		break;
161 	}
162 	switch (proto) {
163 	case PTHREAD_PRIO_PROTECT:
164 		val = (uintptr_t)ptm->ptm_owner;
165 		val |= MUTEX_PROTECT_BIT;
166 		ptm->ptm_owner = (void *)val;
167 		break;
168 
169 	}
170 	ptm->ptm_magic = _PT_MUTEX_MAGIC;
171 	ptm->ptm_waiters = NULL;
172 	ptm->ptm_recursed = 0;
173 	ptm->ptm_ceiling = (unsigned char)ceil;
174 
175 	return 0;
176 }
177 
178 int
179 pthread_mutex_destroy(pthread_mutex_t *ptm)
180 {
181 
182 	if (__predict_false(__uselibcstub))
183 		return __libc_mutex_destroy_stub(ptm);
184 
185 	pthread__error(EINVAL, "Invalid mutex",
186 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
187 	pthread__error(EBUSY, "Destroying locked mutex",
188 	    MUTEX_OWNER(ptm->ptm_owner) == 0);
189 
190 	ptm->ptm_magic = _PT_MUTEX_DEAD;
191 	return 0;
192 }
193 
194 int
195 pthread_mutex_lock(pthread_mutex_t *ptm)
196 {
197 	pthread_t self;
198 	void *val;
199 
200 	if (__predict_false(__uselibcstub))
201 		return __libc_mutex_lock_stub(ptm);
202 
203 	pthread__error(EINVAL, "Invalid mutex",
204 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
205 
206 	self = pthread__self();
207 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
208 	if (__predict_true(val == NULL)) {
209 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
210 		membar_enter();
211 #endif
212 		return 0;
213 	}
214 	return pthread__mutex_lock_slow(ptm, NULL);
215 }
216 
217 int
218 pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts)
219 {
220 	pthread_t self;
221 	void *val;
222 
223 	pthread__error(EINVAL, "Invalid mutex",
224 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
225 
226 	self = pthread__self();
227 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
228 	if (__predict_true(val == NULL)) {
229 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
230 		membar_enter();
231 #endif
232 		return 0;
233 	}
234 	return pthread__mutex_lock_slow(ptm, ts);
235 }
236 
237 /* We want function call overhead. */
238 NOINLINE static void
239 pthread__mutex_pause(void)
240 {
241 
242 	pthread__smt_pause();
243 }
244 
245 /*
246  * Spin while the holder is running.  'lwpctl' gives us the true
247  * status of the thread.
248  */
249 NOINLINE static void *
250 pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
251 {
252 	pthread_t thread;
253 	unsigned int count, i;
254 
255 	for (count = 2;; owner = ptm->ptm_owner) {
256 		thread = (pthread_t)MUTEX_OWNER(owner);
257 		if (thread == NULL)
258 			break;
259 		if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
260 			break;
261 		if (count < 128)
262 			count += count;
263 		for (i = count; i != 0; i--)
264 			pthread__mutex_pause();
265 	}
266 
267 	return owner;
268 }
269 
270 NOINLINE static bool
271 pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm)
272 {
273 	void *owner, *next;
274 
275 	/*
276 	 * Note that the mutex can become unlocked before we set
277 	 * the waiters bit.  If that happens it's not safe to sleep
278 	 * as we may never be awoken: we must remove the current
279 	 * thread from the waiters list and try again.
280 	 *
281 	 * Because we are doing this atomically, we can't remove
282 	 * one waiter: we must remove all waiters and awken them,
283 	 * then sleep in _lwp_park() until we have been awoken.
284 	 *
285 	 * Issue a memory barrier to ensure that we are reading
286 	 * the value of ptm_owner/pt_mutexwait after we have entered
287 	 * the waiters list (the CAS itself must be atomic).
288 	 */
289 	for (owner = ptm->ptm_owner;; owner = next) {
290 		if (MUTEX_OWNER(owner) == 0) {
291 			pthread__mutex_wakeup(self, ptm);
292 			return true;
293 		}
294 		if (MUTEX_HAS_WAITERS(owner)) {
295 			return false;
296 		}
297 		next = atomic_cas_ptr(&ptm->ptm_owner, owner,
298 		    (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT));
299 	}
300 }
301 
302 NOINLINE static int
303 pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
304 {
305 	void *waiters, *new, *owner, *next;
306 	pthread_t self;
307 	int serrno;
308 	int error;
309 
310 	owner = ptm->ptm_owner;
311 	self = pthread__self();
312 
313 	/* Recursive or errorcheck? */
314 	if (MUTEX_OWNER(owner) == (uintptr_t)self) {
315 		if (MUTEX_RECURSIVE(owner)) {
316 			if (ptm->ptm_recursed == INT_MAX)
317 				return EAGAIN;
318 			ptm->ptm_recursed++;
319 			return 0;
320 		}
321 		if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck))
322 			return EDEADLK;
323 	}
324 
325 	/* priority protect */
326 	if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) {
327 		return errno;
328 	}
329 	serrno = errno;
330 	for (;; owner = ptm->ptm_owner) {
331 		/* Spin while the owner is running. */
332 		if (MUTEX_OWNER(owner) != (uintptr_t)self)
333 			owner = pthread__mutex_spin(ptm, owner);
334 
335 		/* If it has become free, try to acquire it again. */
336 		if (MUTEX_OWNER(owner) == 0) {
337 			do {
338 				new = (void *)
339 				    ((uintptr_t)self | (uintptr_t)owner);
340 				next = atomic_cas_ptr(&ptm->ptm_owner, owner,
341 				    new);
342 				if (next == owner) {
343 					errno = serrno;
344 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
345 					membar_enter();
346 #endif
347 					return 0;
348 				}
349 				owner = next;
350 			} while (MUTEX_OWNER(owner) == 0);
351 			/*
352 			 * We have lost the race to acquire the mutex.
353 			 * The new owner could be running on another
354 			 * CPU, in which case we should spin and avoid
355 			 * the overhead of blocking.
356 			 */
357 			continue;
358 		}
359 
360 		/*
361 		 * Nope, still held.  Add thread to the list of waiters.
362 		 * Issue a memory barrier to ensure mutexwait/mutexnext
363 		 * are visible before we enter the waiters list.
364 		 */
365 		self->pt_mutexwait = 1;
366 		for (waiters = ptm->ptm_waiters;; waiters = next) {
367 			self->pt_mutexnext = waiters;
368 			membar_producer();
369 			next = atomic_cas_ptr(&ptm->ptm_waiters, waiters, self);
370 			if (next == waiters)
371 			    	break;
372 		}
373 
374 		/* Set the waiters bit and block. */
375 		membar_sync();
376 		if (pthread__mutex_setwaiters(self, ptm)) {
377 			continue;
378 		}
379 
380 		/*
381 		 * We may have been awoken by the current thread above,
382 		 * or will be awoken by the current holder of the mutex.
383 		 * The key requirement is that we must not proceed until
384 		 * told that we are no longer waiting (via pt_mutexwait
385 		 * being set to zero).  Otherwise it is unsafe to re-enter
386 		 * the thread onto the waiters list.
387 		 */
388 		membar_sync();
389 		while (self->pt_mutexwait) {
390 			error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
391 			    __UNCONST(ts), self->pt_unpark,
392 			    __UNVOLATILE(&ptm->ptm_waiters),
393 			    __UNVOLATILE(&ptm->ptm_waiters));
394 			self->pt_unpark = 0;
395 			if (__predict_true(error != -1)) {
396 				continue;
397 			}
398 			if (errno == ETIMEDOUT && self->pt_mutexwait) {
399 				/*Remove self from waiters list*/
400 				pthread__mutex_wakeup(self, ptm);
401 				/*priority protect*/
402 				if (MUTEX_PROTECT(owner))
403 					(void)_sched_protect(-1);
404 				return ETIMEDOUT;
405 			}
406 		}
407 	}
408 }
409 
410 int
411 pthread_mutex_trylock(pthread_mutex_t *ptm)
412 {
413 	pthread_t self;
414 	void *val, *new, *next;
415 
416 	if (__predict_false(__uselibcstub))
417 		return __libc_mutex_trylock_stub(ptm);
418 
419 	pthread__error(EINVAL, "Invalid mutex",
420 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
421 
422 	self = pthread__self();
423 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
424 	if (__predict_true(val == NULL)) {
425 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
426 		membar_enter();
427 #endif
428 		return 0;
429 	}
430 
431 	if (MUTEX_RECURSIVE(val)) {
432 		if (MUTEX_OWNER(val) == 0) {
433 			new = (void *)((uintptr_t)self | (uintptr_t)val);
434 			next = atomic_cas_ptr(&ptm->ptm_owner, val, new);
435 			if (__predict_true(next == val)) {
436 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
437 				membar_enter();
438 #endif
439 				return 0;
440 			}
441 		}
442 		if (MUTEX_OWNER(val) == (uintptr_t)self) {
443 			if (ptm->ptm_recursed == INT_MAX)
444 				return EAGAIN;
445 			ptm->ptm_recursed++;
446 			return 0;
447 		}
448 	}
449 
450 	return EBUSY;
451 }
452 
453 int
454 pthread_mutex_unlock(pthread_mutex_t *ptm)
455 {
456 	pthread_t self;
457 	void *value;
458 
459 	if (__predict_false(__uselibcstub))
460 		return __libc_mutex_unlock_stub(ptm);
461 
462 	pthread__error(EINVAL, "Invalid mutex",
463 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
464 
465 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
466 	membar_exit();
467 #endif
468 	self = pthread__self();
469 	value = atomic_cas_ptr(&ptm->ptm_owner, self, NULL);
470 	if (__predict_true(value == self)) {
471 		pthread__smt_wake();
472 		return 0;
473 	}
474 	return pthread__mutex_unlock_slow(ptm);
475 }
476 
477 NOINLINE static int
478 pthread__mutex_unlock_slow(pthread_mutex_t *ptm)
479 {
480 	pthread_t self, owner, new;
481 	int weown, error;
482 
483 	self = pthread__self();
484 	owner = ptm->ptm_owner;
485 	weown = (MUTEX_OWNER(owner) == (uintptr_t)self);
486 	error = 0;
487 
488 	if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {
489 		if (!weown) {
490 			error = EPERM;
491 			new = owner;
492 		} else {
493 			new = NULL;
494 		}
495 	} else if (MUTEX_RECURSIVE(owner)) {
496 		if (!weown) {
497 			error = EPERM;
498 			new = owner;
499 		} else if (ptm->ptm_recursed) {
500 			ptm->ptm_recursed--;
501 			new = owner;
502 		} else {
503 			new = (pthread_t)MUTEX_RECURSIVE_BIT;
504 		}
505 	} else {
506 		pthread__error(EPERM,
507 		    "Unlocking unlocked mutex", (owner != NULL));
508 		pthread__error(EPERM,
509 		    "Unlocking mutex owned by another thread", weown);
510 		new = NULL;
511 	}
512 
513 	/*
514 	 * Release the mutex.  If there appear to be waiters, then
515 	 * wake them up.
516 	 */
517 	if (new != owner) {
518 		owner = atomic_swap_ptr(&ptm->ptm_owner, new);
519 		if (__predict_false(MUTEX_PROTECT(owner))) {
520 			/* restore elevated priority */
521 			(void)_sched_protect(-1);
522 		}
523 		if (MUTEX_HAS_WAITERS(owner) != 0) {
524 			pthread__mutex_wakeup(self, ptm);
525 			return 0;
526 		}
527 		error = 0;
528 	}
529 
530 	if (self->pt_nwaiters == 1) {
531 		/*
532 		 * If the calling thread is about to block, defer
533 		 * unparking the target until _lwp_park() is called.
534 		 */
535 		if (self->pt_willpark && self->pt_unpark == 0) {
536 			self->pt_unpark = self->pt_waiters[0];
537 		} else {
538 			(void)_lwp_unpark(self->pt_waiters[0],
539 			    __UNVOLATILE(&ptm->ptm_waiters));
540 		}
541 	} else if (self->pt_nwaiters > 0) {
542 		(void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters,
543 		    __UNVOLATILE(&ptm->ptm_waiters));
544 	}
545 	self->pt_nwaiters = 0;
546 
547 	return error;
548 }
549 
550 /*
551  * pthread__mutex_wakeup: unpark threads waiting for us
552  *
553  * unpark threads on the ptm->ptm_waiters list and self->pt_waiters.
554  */
555 
556 static void
557 pthread__mutex_wakeup(pthread_t self, pthread_mutex_t *ptm)
558 {
559 	pthread_t thread, next;
560 	ssize_t n, rv;
561 
562 	/* Take ownership of the current set of waiters. */
563 	thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL);
564 	membar_datadep_consumer(); /* for alpha */
565 	pthread__smt_wake();
566 
567 	for (;;) {
568 		/*
569 		 * Pull waiters from the queue and add to our list.
570 		 * Use a memory barrier to ensure that we safely
571 		 * read the value of pt_mutexnext before 'thread'
572 		 * sees pt_mutexwait being cleared.
573 		 */
574 		for (n = self->pt_nwaiters, self->pt_nwaiters = 0;
575 		    n < pthread__unpark_max && thread != NULL;
576 		    thread = next) {
577 		    	next = thread->pt_mutexnext;
578 		    	if (thread != self) {
579 				self->pt_waiters[n++] = thread->pt_lid;
580 				membar_sync();
581 			}
582 			thread->pt_mutexwait = 0;
583 			/* No longer safe to touch 'thread' */
584 		}
585 
586 		switch (n) {
587 		case 0:
588 			return;
589 		case 1:
590 			/*
591 			 * If the calling thread is about to block,
592 			 * defer unparking the target until _lwp_park()
593 			 * is called.
594 			 */
595 			if (self->pt_willpark && self->pt_unpark == 0) {
596 				self->pt_unpark = self->pt_waiters[0];
597 				return;
598 			}
599 			rv = (ssize_t)_lwp_unpark(self->pt_waiters[0],
600 			    __UNVOLATILE(&ptm->ptm_waiters));
601 			if (rv != 0 && errno != EALREADY && errno != EINTR &&
602 			    errno != ESRCH) {
603 				pthread__errorfunc(__FILE__, __LINE__,
604 				    __func__, "_lwp_unpark failed");
605 			}
606 			return;
607 		default:
608 			rv = _lwp_unpark_all(self->pt_waiters, (size_t)n,
609 			    __UNVOLATILE(&ptm->ptm_waiters));
610 			if (rv != 0 && errno != EINTR) {
611 				pthread__errorfunc(__FILE__, __LINE__,
612 				    __func__, "_lwp_unpark_all failed");
613 			}
614 			break;
615 		}
616 	}
617 }
618 
619 int
620 pthread_mutexattr_init(pthread_mutexattr_t *attr)
621 {
622 #if 0
623 	if (__predict_false(__uselibcstub))
624 		return __libc_mutexattr_init_stub(attr);
625 #endif
626 
627 	attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
628 	attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT;
629 	return 0;
630 }
631 
632 int
633 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
634 {
635 	if (__predict_false(__uselibcstub))
636 		return __libc_mutexattr_destroy_stub(attr);
637 
638 	pthread__error(EINVAL, "Invalid mutex attribute",
639 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
640 
641 	attr->ptma_magic = _PT_MUTEXATTR_DEAD;
642 
643 	return 0;
644 }
645 
646 int
647 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
648 {
649 
650 	pthread__error(EINVAL, "Invalid mutex attribute",
651 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
652 
653 	*typep = MUTEX_GET_TYPE(attr->ptma_private);
654 	return 0;
655 }
656 
657 int
658 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
659 {
660 
661 	if (__predict_false(__uselibcstub))
662 		return __libc_mutexattr_settype_stub(attr, type);
663 
664 	pthread__error(EINVAL, "Invalid mutex attribute",
665 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
666 
667 	switch (type) {
668 	case PTHREAD_MUTEX_NORMAL:
669 	case PTHREAD_MUTEX_ERRORCHECK:
670 	case PTHREAD_MUTEX_RECURSIVE:
671 		MUTEX_SET_TYPE(attr->ptma_private, type);
672 		return 0;
673 	default:
674 		return EINVAL;
675 	}
676 }
677 
678 int
679 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int*proto)
680 {
681 
682 	pthread__error(EINVAL, "Invalid mutex attribute",
683 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
684 
685 	*proto = MUTEX_GET_PROTOCOL(attr->ptma_private);
686 	return 0;
687 }
688 
689 int
690 pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int proto)
691 {
692 
693 	pthread__error(EINVAL, "Invalid mutex attribute",
694 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
695 
696 	switch (proto) {
697 	case PTHREAD_PRIO_NONE:
698 	case PTHREAD_PRIO_PROTECT:
699 		MUTEX_SET_PROTOCOL(attr->ptma_private, proto);
700 		return 0;
701 	case PTHREAD_PRIO_INHERIT:
702 		return ENOTSUP;
703 	default:
704 		return EINVAL;
705 	}
706 }
707 
708 int
709 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *ceil)
710 {
711 
712 	pthread__error(EINVAL, "Invalid mutex attribute",
713 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
714 
715 	*ceil = MUTEX_GET_CEILING(attr->ptma_private);
716 	return 0;
717 }
718 
719 int
720 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int ceil)
721 {
722 
723 	pthread__error(EINVAL, "Invalid mutex attribute",
724 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
725 
726 	if (ceil & ~0xff)
727 		return EINVAL;
728 
729 	MUTEX_SET_CEILING(attr->ptma_private, ceil);
730 	return 0;
731 }
732 
733 #ifdef _PTHREAD_PSHARED
734 int
735 pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict attr,
736     int * __restrict pshared)
737 {
738 
739 	pthread__error(EINVAL, "Invalid mutex attribute",
740 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
741 
742 	*pshared = PTHREAD_PROCESS_PRIVATE;
743 	return 0;
744 }
745 
746 int
747 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
748 {
749 
750 	pthread__error(EINVAL, "Invalid mutex attribute",
751 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
752 
753 	switch(pshared) {
754 	case PTHREAD_PROCESS_PRIVATE:
755 		return 0;
756 	case PTHREAD_PROCESS_SHARED:
757 		return ENOSYS;
758 	}
759 	return EINVAL;
760 }
761 #endif
762 
763 /*
764  * pthread__mutex_deferwake: try to defer unparking threads in self->pt_waiters
765  *
766  * In order to avoid unnecessary contention on the interlocking mutex,
767  * we defer waking up threads until we unlock the mutex.  The threads will
768  * be woken up when the calling thread (self) releases the first mutex with
769  * MUTEX_DEFERRED_BIT set.  It likely be the mutex 'ptm', but no problem
770  * even if it isn't.
771  */
772 
773 void
774 pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm)
775 {
776 
777 	if (__predict_false(ptm == NULL ||
778 	    MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) {
779 	    	(void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters,
780 	    	    __UNVOLATILE(&ptm->ptm_waiters));
781 	    	self->pt_nwaiters = 0;
782 	} else {
783 		atomic_or_ulong((volatile unsigned long *)
784 		    (uintptr_t)&ptm->ptm_owner,
785 		    (unsigned long)MUTEX_DEFERRED_BIT);
786 	}
787 }
788 
789 int
790 pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)
791 {
792 
793 	pthread__error(EINVAL, "Invalid mutex",
794 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
795 
796 	*ceil = ptm->ptm_ceiling;
797 	return 0;
798 }
799 
800 int
801 pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil)
802 {
803 	int error;
804 
805 	pthread__error(EINVAL, "Invalid mutex",
806 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
807 
808 	error = pthread_mutex_lock(ptm);
809 	if (error == 0) {
810 		*old_ceil = ptm->ptm_ceiling;
811 		/*check range*/
812 		ptm->ptm_ceiling = ceil;
813 		pthread_mutex_unlock(ptm);
814 	}
815 	return error;
816 }
817 
818 int
819 _pthread_mutex_held_np(pthread_mutex_t *ptm)
820 {
821 
822 	return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self();
823 }
824 
825 pthread_t
826 _pthread_mutex_owner_np(pthread_mutex_t *ptm)
827 {
828 
829 	return (pthread_t)MUTEX_OWNER(ptm->ptm_owner);
830 }
831