xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /*	$NetBSD: pthread_rwlock.c,v 1.34 2016/07/03 14:24:58 christos Exp $ */
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.34 2016/07/03 14:24:58 christos Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
37 
38 #include <time.h>
39 #include <errno.h>
40 #include <stddef.h>
41 
42 #include "pthread.h"
43 #include "pthread_int.h"
44 #include "reentrant.h"
45 
46 #define	_RW_LOCKED		0
47 #define	_RW_WANT_WRITE		1
48 #define	_RW_WANT_READ		2
49 
50 #if __GNUC_PREREQ__(3, 0)
51 #define	NOINLINE		__attribute ((noinline))
52 #else
53 #define	NOINLINE		/* nothing */
54 #endif
55 
56 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
57 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
58 static void pthread__rwlock_early(void *);
59 
60 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
61 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
62 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
63 
64 #ifndef lint
65 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
66 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
67 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
68 #endif
69 
70 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
71 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
72 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
73 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
74 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
75 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
76 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
77 
78 static inline uintptr_t
79 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
80 {
81 
82 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
83 	    (void *)n);
84 }
85 
86 int
87 pthread_rwlock_init(pthread_rwlock_t *ptr,
88 	    const pthread_rwlockattr_t *attr)
89 {
90 	if (__predict_false(__uselibcstub))
91 		return __libc_rwlock_init_stub(ptr, attr);
92 
93 	if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
94 		return EINVAL;
95 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
96 	PTQ_INIT(&ptr->ptr_rblocked);
97 	PTQ_INIT(&ptr->ptr_wblocked);
98 	ptr->ptr_nreaders = 0;
99 	ptr->ptr_owner = NULL;
100 
101 	return 0;
102 }
103 
104 
105 int
106 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
107 {
108 	if (__predict_false(__uselibcstub))
109 		return __libc_rwlock_destroy_stub(ptr);
110 
111 	if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
112 	    (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
113 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
114 	    (ptr->ptr_nreaders != 0) ||
115 	    (ptr->ptr_owner != NULL))
116 		return EINVAL;
117 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
118 
119 	return 0;
120 }
121 
122 /* We want function call overhead. */
123 NOINLINE static void
124 pthread__rwlock_pause(void)
125 {
126 
127 	pthread__smt_pause();
128 }
129 
130 NOINLINE static int
131 pthread__rwlock_spin(uintptr_t owner)
132 {
133 	pthread_t thread;
134 	unsigned int i;
135 
136 	thread = (pthread_t)(owner & RW_THREAD);
137 	if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
138 		return 0;
139 	if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
140 	    thread->pt_blocking)
141 		return 0;
142 	for (i = 128; i != 0; i--)
143 		pthread__rwlock_pause();
144 	return 1;
145 }
146 
147 static int
148 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
149 {
150 	uintptr_t owner, next;
151 	pthread_mutex_t *interlock;
152 	pthread_t self;
153 	int error;
154 
155 #ifdef ERRORCHECK
156 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
157 		return EINVAL;
158 #endif
159 
160 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
161 		/*
162 		 * Read the lock owner field.  If the need-to-wait
163 		 * indicator is clear, then try to acquire the lock.
164 		 */
165 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
166 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
167 			if (owner == next) {
168 				/* Got it! */
169 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
170 				membar_enter();
171 #endif
172 				return 0;
173 			}
174 
175 			/*
176 			 * Didn't get it -- spin around again (we'll
177 			 * probably sleep on the next iteration).
178 			 */
179 			continue;
180 		}
181 
182 		self = pthread__self();
183 		if ((owner & RW_THREAD) == (uintptr_t)self)
184 			return EDEADLK;
185 
186 		/* If held write locked and no waiters, spin. */
187 		if (pthread__rwlock_spin(owner)) {
188 			while (pthread__rwlock_spin(owner)) {
189 				owner = (uintptr_t)ptr->ptr_owner;
190 			}
191 			next = owner;
192 			continue;
193 		}
194 
195 		/*
196 		 * Grab the interlock.  Once we have that, we
197 		 * can adjust the waiter bits and sleep queue.
198 		 */
199 		interlock = pthread__hashlock(ptr);
200 		pthread_mutex_lock(interlock);
201 
202 		/*
203 		 * Mark the rwlock as having waiters.  If the set fails,
204 		 * then we may not need to sleep and should spin again.
205 		 */
206 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
207 		if (owner != next) {
208 			pthread_mutex_unlock(interlock);
209 			continue;
210 		}
211 
212 		/* The waiters bit is set - it's safe to sleep. */
213 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
214 	    	ptr->ptr_nreaders++;
215 		self->pt_rwlocked = _RW_WANT_READ;
216 		self->pt_sleepobj = &ptr->ptr_rblocked;
217 		self->pt_early = pthread__rwlock_early;
218 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
219 		    ts, 0, &ptr->ptr_rblocked);
220 
221 		/* Did we get the lock? */
222 		if (self->pt_rwlocked == _RW_LOCKED) {
223 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
224 			membar_enter();
225 #endif
226 			return 0;
227 		}
228 		if (error != 0)
229 			return error;
230 
231 		pthread__errorfunc(__FILE__, __LINE__, __func__,
232 		    "direct handoff failure");
233 	}
234 }
235 
236 
237 int
238 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
239 {
240 	uintptr_t owner, next;
241 
242 	if (__predict_false(__uselibcstub))
243 		return __libc_rwlock_tryrdlock_stub(ptr);
244 
245 #ifdef ERRORCHECK
246 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
247 		return EINVAL;
248 #endif
249 
250 	/*
251 	 * Don't get a readlock if there is a writer or if there are waiting
252 	 * writers; i.e. prefer writers to readers. This strategy is dictated
253 	 * by SUSv3.
254 	 */
255 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
256 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
257 			return EBUSY;
258 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
259 		if (owner == next) {
260 			/* Got it! */
261 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
262 			membar_enter();
263 #endif
264 			return 0;
265 		}
266 	}
267 }
268 
269 static int
270 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
271 {
272 	uintptr_t owner, next;
273 	pthread_mutex_t *interlock;
274 	pthread_t self;
275 	int error;
276 
277 	self = pthread__self();
278 
279 #ifdef ERRORCHECK
280 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
281 		return EINVAL;
282 #endif
283 
284 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
285 		/*
286 		 * Read the lock owner field.  If the need-to-wait
287 		 * indicator is clear, then try to acquire the lock.
288 		 */
289 		if ((owner & RW_THREAD) == 0) {
290 			next = rw_cas(ptr, owner,
291 			    (uintptr_t)self | RW_WRITE_LOCKED);
292 			if (owner == next) {
293 				/* Got it! */
294 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
295 				membar_enter();
296 #endif
297 				return 0;
298 			}
299 
300 			/*
301 			 * Didn't get it -- spin around again (we'll
302 			 * probably sleep on the next iteration).
303 			 */
304 			continue;
305 		}
306 
307 		if ((owner & RW_THREAD) == (uintptr_t)self)
308 			return EDEADLK;
309 
310 		/* If held write locked and no waiters, spin. */
311 		if (pthread__rwlock_spin(owner)) {
312 			while (pthread__rwlock_spin(owner)) {
313 				owner = (uintptr_t)ptr->ptr_owner;
314 			}
315 			next = owner;
316 			continue;
317 		}
318 
319 		/*
320 		 * Grab the interlock.  Once we have that, we
321 		 * can adjust the waiter bits and sleep queue.
322 		 */
323 		interlock = pthread__hashlock(ptr);
324 		pthread_mutex_lock(interlock);
325 
326 		/*
327 		 * Mark the rwlock as having waiters.  If the set fails,
328 		 * then we may not need to sleep and should spin again.
329 		 */
330 		next = rw_cas(ptr, owner,
331 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
332 		if (owner != next) {
333 			pthread_mutex_unlock(interlock);
334 			continue;
335 		}
336 
337 		/* The waiters bit is set - it's safe to sleep. */
338 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
339 		self->pt_rwlocked = _RW_WANT_WRITE;
340 		self->pt_sleepobj = &ptr->ptr_wblocked;
341 		self->pt_early = pthread__rwlock_early;
342 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
343 		    ts, 0, &ptr->ptr_wblocked);
344 
345 		/* Did we get the lock? */
346 		if (self->pt_rwlocked == _RW_LOCKED) {
347 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
348 			membar_enter();
349 #endif
350 			return 0;
351 		}
352 		if (error != 0)
353 			return error;
354 
355 		pthread__errorfunc(__FILE__, __LINE__, __func__,
356 		    "direct handoff failure");
357 	}
358 }
359 
360 
361 int
362 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
363 {
364 	uintptr_t owner, next;
365 	pthread_t self;
366 
367 	if (__predict_false(__uselibcstub))
368 		return __libc_rwlock_trywrlock_stub(ptr);
369 
370 #ifdef ERRORCHECK
371 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
372 		return EINVAL;
373 #endif
374 
375 	self = pthread__self();
376 
377 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
378 		if (owner != 0)
379 			return EBUSY;
380 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
381 		if (owner == next) {
382 			/* Got it! */
383 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
384 			membar_enter();
385 #endif
386 			return 0;
387 		}
388 	}
389 }
390 
391 int
392 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
393 {
394 	if (__predict_false(__uselibcstub))
395 		return __libc_rwlock_rdlock_stub(ptr);
396 
397 	return pthread__rwlock_rdlock(ptr, NULL);
398 }
399 
400 int
401 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
402 			   const struct timespec *abs_timeout)
403 {
404 	if (abs_timeout == NULL)
405 		return EINVAL;
406 	if ((abs_timeout->tv_nsec >= 1000000000) ||
407 	    (abs_timeout->tv_nsec < 0) ||
408 	    (abs_timeout->tv_sec < 0))
409 		return EINVAL;
410 
411 	return pthread__rwlock_rdlock(ptr, abs_timeout);
412 }
413 
414 int
415 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
416 {
417 	if (__predict_false(__uselibcstub))
418 		return __libc_rwlock_wrlock_stub(ptr);
419 
420 	return pthread__rwlock_wrlock(ptr, NULL);
421 }
422 
423 int
424 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
425 			   const struct timespec *abs_timeout)
426 {
427 	if (abs_timeout == NULL)
428 		return EINVAL;
429 	if ((abs_timeout->tv_nsec >= 1000000000) ||
430 	    (abs_timeout->tv_nsec < 0) ||
431 	    (abs_timeout->tv_sec < 0))
432 		return EINVAL;
433 
434 	return pthread__rwlock_wrlock(ptr, abs_timeout);
435 }
436 
437 
438 int
439 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
440 {
441 	uintptr_t owner, decr, new, next;
442 	pthread_mutex_t *interlock;
443 	pthread_t self, thread;
444 
445 	if (__predict_false(__uselibcstub))
446 		return __libc_rwlock_unlock_stub(ptr);
447 
448 #ifdef ERRORCHECK
449 	if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
450 		return EINVAL;
451 #endif
452 
453 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
454 	membar_exit();
455 #endif
456 
457 	/*
458 	 * Since we used an add operation to set the required lock
459 	 * bits, we can use a subtract to clear them, which makes
460 	 * the read-release and write-release path similar.
461 	 */
462 	owner = (uintptr_t)ptr->ptr_owner;
463 	if ((owner & RW_WRITE_LOCKED) != 0) {
464 		self = pthread__self();
465 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
466 		if ((owner & RW_THREAD) != (uintptr_t)self) {
467 			return EPERM;
468 		}
469 	} else {
470 		decr = RW_READ_INCR;
471 		if (owner == 0) {
472 			return EPERM;
473 		}
474 	}
475 
476 	for (;; owner = next) {
477 		/*
478 		 * Compute what we expect the new value of the lock to be.
479 		 * Only proceed to do direct handoff if there are waiters,
480 		 * and if the lock would become unowned.
481 		 */
482 		new = (owner - decr);
483 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
484 			next = rw_cas(ptr, owner, new);
485 			if (owner == next) {
486 				/* Released! */
487 				return 0;
488 			}
489 			continue;
490 		}
491 
492 		/*
493 		 * Grab the interlock.  Once we have that, we can adjust
494 		 * the waiter bits.  We must check to see if there are
495 		 * still waiters before proceeding.
496 		 */
497 		interlock = pthread__hashlock(ptr);
498 		pthread_mutex_lock(interlock);
499 		owner = (uintptr_t)ptr->ptr_owner;
500 		if ((owner & RW_HAS_WAITERS) == 0) {
501 			pthread_mutex_unlock(interlock);
502 			next = owner;
503 			continue;
504 		}
505 
506 		/*
507 		 * Give the lock away.  SUSv3 dictates that we must give
508 		 * preference to writers.
509 		 */
510 		self = pthread__self();
511 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
512 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
513 
514 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
515 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
516 			else if (ptr->ptr_nreaders != 0)
517 				new |= RW_HAS_WAITERS;
518 
519 			/*
520 			 * Set in the new value.  The lock becomes owned
521 			 * by the writer that we are about to wake.
522 			 */
523 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
524 
525 			/* Wake the writer. */
526 			thread->pt_rwlocked = _RW_LOCKED;
527 			pthread__unpark(&ptr->ptr_wblocked, self,
528 			    interlock);
529 		} else {
530 			new = 0;
531 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
532 				/*
533 				 * May have already been handed the lock,
534 				 * since pthread__unpark_all() can release
535 				 * our interlock before awakening all
536 				 * threads.
537 				 */
538 				if (thread->pt_sleepobj == NULL)
539 					continue;
540 				new += RW_READ_INCR;
541 				thread->pt_rwlocked = _RW_LOCKED;
542 			}
543 
544 			/*
545 			 * Set in the new value.  The lock becomes owned
546 			 * by the readers that we are about to wake.
547 			 */
548 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
549 
550 			/* Wake up all sleeping readers. */
551 			ptr->ptr_nreaders = 0;
552 			pthread__unpark_all(&ptr->ptr_rblocked, self,
553 			    interlock);
554 		}
555 		pthread_mutex_unlock(interlock);
556 
557 		return 0;
558 	}
559 }
560 
561 /*
562  * Called when a timedlock awakens early to adjust the waiter bits.
563  * The rwlock's interlock is held on entry, and the caller has been
564  * removed from the waiters lists.
565  */
566 static void
567 pthread__rwlock_early(void *obj)
568 {
569 	uintptr_t owner, set, new, next;
570 	pthread_rwlock_t *ptr;
571 	pthread_t self;
572 	u_int off;
573 
574 	self = pthread__self();
575 
576 	switch (self->pt_rwlocked) {
577 	case _RW_WANT_READ:
578 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
579 		break;
580 	case _RW_WANT_WRITE:
581 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
582 		break;
583 	default:
584 		pthread__errorfunc(__FILE__, __LINE__, __func__,
585 		    "bad value of pt_rwlocked");
586 		off = 0;
587 		/* NOTREACHED */
588 		break;
589 	}
590 
591 	/* LINTED mind your own business */
592 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
593 	owner = (uintptr_t)ptr->ptr_owner;
594 
595 	if ((owner & RW_THREAD) == 0) {
596 		pthread__errorfunc(__FILE__, __LINE__, __func__,
597 		    "lock not held");
598 	}
599 
600 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
601 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
602 	else if (ptr->ptr_nreaders != 0)
603 		set = RW_HAS_WAITERS;
604 	else
605 		set = 0;
606 
607 	for (;; owner = next) {
608 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
609 		next = rw_cas(ptr, owner, new);
610 		if (owner == next)
611 			break;
612 	}
613 }
614 
615 int
616 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
617 {
618 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
619 
620 	if ((owner & RW_WRITE_LOCKED) != 0)
621 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
622 	return (owner & RW_THREAD) != 0;
623 }
624 
625 int
626 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
627 {
628 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
629 
630 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
631 }
632 
633 int
634 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
635 {
636 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
637 
638 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
639 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
640 }
641 
642 #ifdef _PTHREAD_PSHARED
643 int
644 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
645     int * __restrict pshared)
646 {
647 	*pshared = PTHREAD_PROCESS_PRIVATE;
648 	return 0;
649 }
650 
651 int
652 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
653 {
654 
655 	switch(pshared) {
656 	case PTHREAD_PROCESS_PRIVATE:
657 		return 0;
658 	case PTHREAD_PROCESS_SHARED:
659 		return ENOSYS;
660 	}
661 	return EINVAL;
662 }
663 #endif
664 
665 int
666 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
667 {
668 
669 	if (attr == NULL)
670 		return EINVAL;
671 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
672 
673 	return 0;
674 }
675 
676 
677 int
678 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
679 {
680 
681 	if ((attr == NULL) ||
682 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
683 		return EINVAL;
684 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
685 
686 	return 0;
687 }
688