xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: pthread_rwlock.c,v 1.42 2020/06/02 00:29:53 joerg Exp $ */
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.42 2020/06/02 00:29:53 joerg Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
37 
38 #include <assert.h>
39 #include <time.h>
40 #include <errno.h>
41 #include <stddef.h>
42 
43 #include "pthread.h"
44 #include "pthread_int.h"
45 #include "reentrant.h"
46 
47 #define	_RW_LOCKED		0
48 #define	_RW_WANT_WRITE		1
49 #define	_RW_WANT_READ		2
50 
51 #if __GNUC_PREREQ__(3, 0)
52 #define	NOINLINE		__attribute ((noinline))
53 #else
54 #define	NOINLINE		/* nothing */
55 #endif
56 
57 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
58 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
59 static void pthread__rwlock_early(pthread_t, pthread_rwlock_t *,
60     pthread_mutex_t *);
61 
62 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
63 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
64 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
65 
66 #ifndef lint
67 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
68 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
69 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
70 #endif
71 
72 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
73 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
74 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
75 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
76 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
77 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
78 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
79 
80 static inline uintptr_t
81 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
82 {
83 
84 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
85 	    (void *)n);
86 }
87 
88 int
89 pthread_rwlock_init(pthread_rwlock_t *ptr,
90 	    const pthread_rwlockattr_t *attr)
91 {
92 	if (__predict_false(__uselibcstub))
93 		return __libc_rwlock_init_stub(ptr, attr);
94 
95 	pthread__error(EINVAL, "Invalid rwlock attribute",
96 	    attr == NULL || attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
97 
98 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
99 	PTQ_INIT(&ptr->ptr_rblocked);
100 	PTQ_INIT(&ptr->ptr_wblocked);
101 	ptr->ptr_nreaders = 0;
102 	ptr->ptr_owner = NULL;
103 
104 	return 0;
105 }
106 
107 
108 int
109 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
110 {
111 	if (__predict_false(__uselibcstub))
112 		return __libc_rwlock_destroy_stub(ptr);
113 
114 	pthread__error(EINVAL, "Invalid rwlock",
115 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
116 
117 	if ((!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
118 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
119 	    (ptr->ptr_nreaders != 0) ||
120 	    (ptr->ptr_owner != NULL))
121 		return EINVAL;
122 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
123 
124 	return 0;
125 }
126 
127 /* We want function call overhead. */
128 NOINLINE static void
129 pthread__rwlock_pause(void)
130 {
131 
132 	pthread__smt_pause();
133 }
134 
135 NOINLINE static int
136 pthread__rwlock_spin(uintptr_t owner)
137 {
138 	pthread_t thread;
139 	unsigned int i;
140 
141 	if ((owner & ~RW_THREAD) != RW_WRITE_LOCKED)
142 		return 0;
143 
144 	thread = (pthread_t)(owner & RW_THREAD);
145 	if (__predict_false(thread == NULL) ||
146 	    thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
147 		return 0;
148 
149 	for (i = 128; i != 0; i--)
150 		pthread__rwlock_pause();
151 	return 1;
152 }
153 
154 static int
155 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
156 {
157 	uintptr_t owner, next;
158 	pthread_mutex_t *interlock;
159 	pthread_t self;
160 	int error;
161 
162 	pthread__error(EINVAL, "Invalid rwlock",
163 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
164 
165 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
166 		/*
167 		 * Read the lock owner field.  If the need-to-wait
168 		 * indicator is clear, then try to acquire the lock.
169 		 */
170 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
171 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
172 			if (owner == next) {
173 				/* Got it! */
174 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
175 				membar_enter();
176 #endif
177 				return 0;
178 			}
179 
180 			/*
181 			 * Didn't get it -- spin around again (we'll
182 			 * probably sleep on the next iteration).
183 			 */
184 			continue;
185 		}
186 
187 		self = pthread__self();
188 		if ((owner & RW_THREAD) == (uintptr_t)self)
189 			return EDEADLK;
190 
191 		/* If held write locked and no waiters, spin. */
192 		if (pthread__rwlock_spin(owner)) {
193 			while (pthread__rwlock_spin(owner)) {
194 				owner = (uintptr_t)ptr->ptr_owner;
195 			}
196 			next = owner;
197 			continue;
198 		}
199 
200 		/*
201 		 * Grab the interlock.  Once we have that, we
202 		 * can adjust the waiter bits and sleep queue.
203 		 */
204 		interlock = pthread__hashlock(ptr);
205 		pthread_mutex_lock(interlock);
206 
207 		/*
208 		 * Mark the rwlock as having waiters.  If the set fails,
209 		 * then we may not need to sleep and should spin again.
210 		 */
211 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
212 		if (owner != next) {
213 			pthread_mutex_unlock(interlock);
214 			continue;
215 		}
216 
217 		/* The waiters bit is set - it's safe to sleep. */
218 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
219 	    	ptr->ptr_nreaders++;
220 		self->pt_rwlocked = _RW_WANT_READ;
221 		self->pt_sleepobj = &ptr->ptr_rblocked;
222 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
223 		    ts, 0);
224 
225 		if (self->pt_sleepobj != NULL) {
226 			pthread__rwlock_early(self, ptr, interlock);
227 		}
228 
229 		/* Did we get the lock? */
230 		if (self->pt_rwlocked == _RW_LOCKED) {
231 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
232 			membar_enter();
233 #endif
234 			return 0;
235 		}
236 		if (error != 0)
237 			return error;
238 
239 		pthread__errorfunc(__FILE__, __LINE__, __func__,
240 		    "direct handoff failure");
241 	}
242 }
243 
244 
245 int
246 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
247 {
248 	uintptr_t owner, next;
249 
250 	if (__predict_false(__uselibcstub))
251 		return __libc_rwlock_tryrdlock_stub(ptr);
252 
253 	pthread__error(EINVAL, "Invalid rwlock",
254 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
255 
256 	/*
257 	 * Don't get a readlock if there is a writer or if there are waiting
258 	 * writers; i.e. prefer writers to readers. This strategy is dictated
259 	 * by SUSv3.
260 	 */
261 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
262 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
263 			return EBUSY;
264 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
265 		if (owner == next) {
266 			/* Got it! */
267 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
268 			membar_enter();
269 #endif
270 			return 0;
271 		}
272 	}
273 }
274 
275 static int
276 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
277 {
278 	uintptr_t owner, next;
279 	pthread_mutex_t *interlock;
280 	pthread_t self;
281 	int error;
282 
283 	self = pthread__self();
284 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
285 
286 	pthread__error(EINVAL, "Invalid rwlock",
287 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
288 
289 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
290 		/*
291 		 * Read the lock owner field.  If the need-to-wait
292 		 * indicator is clear, then try to acquire the lock.
293 		 */
294 		if ((owner & RW_THREAD) == 0) {
295 			next = rw_cas(ptr, owner,
296 			    (uintptr_t)self | RW_WRITE_LOCKED);
297 			if (owner == next) {
298 				/* Got it! */
299 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
300 				membar_enter();
301 #endif
302 				return 0;
303 			}
304 
305 			/*
306 			 * Didn't get it -- spin around again (we'll
307 			 * probably sleep on the next iteration).
308 			 */
309 			continue;
310 		}
311 
312 		if ((owner & RW_THREAD) == (uintptr_t)self)
313 			return EDEADLK;
314 
315 		/* If held write locked and no waiters, spin. */
316 		if (pthread__rwlock_spin(owner)) {
317 			while (pthread__rwlock_spin(owner)) {
318 				owner = (uintptr_t)ptr->ptr_owner;
319 			}
320 			next = owner;
321 			continue;
322 		}
323 
324 		/*
325 		 * Grab the interlock.  Once we have that, we
326 		 * can adjust the waiter bits and sleep queue.
327 		 */
328 		interlock = pthread__hashlock(ptr);
329 		pthread_mutex_lock(interlock);
330 
331 		/*
332 		 * Mark the rwlock as having waiters.  If the set fails,
333 		 * then we may not need to sleep and should spin again.
334 		 */
335 		next = rw_cas(ptr, owner,
336 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
337 		if (owner != next) {
338 			pthread_mutex_unlock(interlock);
339 			continue;
340 		}
341 
342 		/* The waiters bit is set - it's safe to sleep. */
343 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
344 		self->pt_rwlocked = _RW_WANT_WRITE;
345 		self->pt_sleepobj = &ptr->ptr_wblocked;
346 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
347 		    ts, 0);
348 
349 		if (self->pt_sleepobj != NULL) {
350 			pthread__rwlock_early(self, ptr, interlock);
351 		}
352 
353 		/* Did we get the lock? */
354 		if (self->pt_rwlocked == _RW_LOCKED) {
355 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
356 			membar_enter();
357 #endif
358 			return 0;
359 		}
360 		if (error != 0)
361 			return error;
362 
363 		pthread__errorfunc(__FILE__, __LINE__, __func__,
364 		    "direct handoff failure: %d", errno);
365 	}
366 }
367 
368 int
369 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
370 {
371 	uintptr_t owner, next;
372 	pthread_t self;
373 
374 	if (__predict_false(__uselibcstub))
375 		return __libc_rwlock_trywrlock_stub(ptr);
376 
377 	pthread__error(EINVAL, "Invalid rwlock",
378 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
379 
380 	self = pthread__self();
381 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
382 
383 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
384 		if (owner != 0)
385 			return EBUSY;
386 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
387 		if (owner == next) {
388 			/* Got it! */
389 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
390 			membar_enter();
391 #endif
392 			return 0;
393 		}
394 	}
395 }
396 
397 int
398 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
399 {
400 	if (__predict_false(__uselibcstub))
401 		return __libc_rwlock_rdlock_stub(ptr);
402 
403 	return pthread__rwlock_rdlock(ptr, NULL);
404 }
405 
406 int
407 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
408 			   const struct timespec *abs_timeout)
409 {
410 	if (abs_timeout == NULL)
411 		return EINVAL;
412 	if ((abs_timeout->tv_nsec >= 1000000000) ||
413 	    (abs_timeout->tv_nsec < 0) ||
414 	    (abs_timeout->tv_sec < 0))
415 		return EINVAL;
416 
417 	return pthread__rwlock_rdlock(ptr, abs_timeout);
418 }
419 
420 int
421 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
422 {
423 	if (__predict_false(__uselibcstub))
424 		return __libc_rwlock_wrlock_stub(ptr);
425 
426 	return pthread__rwlock_wrlock(ptr, NULL);
427 }
428 
429 int
430 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
431 			   const struct timespec *abs_timeout)
432 {
433 	if (abs_timeout == NULL)
434 		return EINVAL;
435 	if ((abs_timeout->tv_nsec >= 1000000000) ||
436 	    (abs_timeout->tv_nsec < 0) ||
437 	    (abs_timeout->tv_sec < 0))
438 		return EINVAL;
439 
440 	return pthread__rwlock_wrlock(ptr, abs_timeout);
441 }
442 
443 
444 int
445 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
446 {
447 	uintptr_t owner, decr, new, next;
448 	pthread_mutex_t *interlock;
449 	pthread_t self, thread;
450 
451 	if (__predict_false(__uselibcstub))
452 		return __libc_rwlock_unlock_stub(ptr);
453 
454 	pthread__error(EINVAL, "Invalid rwlock",
455 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
456 
457 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
458 	membar_exit();
459 #endif
460 
461 	/*
462 	 * Since we used an add operation to set the required lock
463 	 * bits, we can use a subtract to clear them, which makes
464 	 * the read-release and write-release path similar.
465 	 */
466 	owner = (uintptr_t)ptr->ptr_owner;
467 	if ((owner & RW_WRITE_LOCKED) != 0) {
468 		self = pthread__self();
469 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
470 		if ((owner & RW_THREAD) != (uintptr_t)self) {
471 			return EPERM;
472 		}
473 	} else {
474 		decr = RW_READ_INCR;
475 		if (owner == 0) {
476 			return EPERM;
477 		}
478 	}
479 
480 	for (;; owner = next) {
481 		/*
482 		 * Compute what we expect the new value of the lock to be.
483 		 * Only proceed to do direct handoff if there are waiters,
484 		 * and if the lock would become unowned.
485 		 */
486 		new = (owner - decr);
487 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
488 			next = rw_cas(ptr, owner, new);
489 			if (owner == next) {
490 				/* Released! */
491 				return 0;
492 			}
493 			continue;
494 		}
495 
496 		/*
497 		 * Grab the interlock.  Once we have that, we can adjust
498 		 * the waiter bits.  We must check to see if there are
499 		 * still waiters before proceeding.
500 		 */
501 		interlock = pthread__hashlock(ptr);
502 		pthread_mutex_lock(interlock);
503 		owner = (uintptr_t)ptr->ptr_owner;
504 		if ((owner & RW_HAS_WAITERS) == 0) {
505 			pthread_mutex_unlock(interlock);
506 			next = owner;
507 			continue;
508 		}
509 
510 		/*
511 		 * Give the lock away.  SUSv3 dictates that we must give
512 		 * preference to writers.
513 		 */
514 		self = pthread__self();
515 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
516 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
517 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
518 
519 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
520 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
521 			else if (ptr->ptr_nreaders != 0)
522 				new |= RW_HAS_WAITERS;
523 
524 			/*
525 			 * Set in the new value.  The lock becomes owned
526 			 * by the writer that we are about to wake.
527 			 */
528 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
529 
530 			/* Wake the writer. */
531 			thread->pt_rwlocked = _RW_LOCKED;
532 			pthread__unpark(&ptr->ptr_wblocked, self,
533 			    interlock);
534 		} else {
535 			new = 0;
536 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
537 				/*
538 				 * May have already been handed the lock,
539 				 * since pthread__unpark_all() can release
540 				 * our interlock before awakening all
541 				 * threads.
542 				 */
543 				if (thread->pt_sleepobj == NULL)
544 					continue;
545 				new += RW_READ_INCR;
546 				thread->pt_rwlocked = _RW_LOCKED;
547 			}
548 
549 			/*
550 			 * Set in the new value.  The lock becomes owned
551 			 * by the readers that we are about to wake.
552 			 */
553 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
554 
555 			/* Wake up all sleeping readers. */
556 			ptr->ptr_nreaders = 0;
557 			pthread__unpark_all(&ptr->ptr_rblocked, self,
558 			    interlock);
559 		}
560 		pthread_mutex_unlock(interlock);
561 
562 		return 0;
563 	}
564 }
565 
566 /*
567  * Called when a timedlock awakens early to adjust the waiter bits.
568  * The rwlock's interlock is held on entry, and the caller has been
569  * removed from the waiters lists.
570  */
571 static void
572 pthread__rwlock_early(pthread_t self, pthread_rwlock_t *ptr,
573     pthread_mutex_t *interlock)
574 {
575 	uintptr_t owner, set, newval, next;
576 	pthread_queue_t *queue;
577 
578 	pthread_mutex_lock(interlock);
579 	if ((queue = self->pt_sleepobj) == NULL) {
580 		pthread_mutex_unlock(interlock);
581 		return;
582 	}
583 	PTQ_REMOVE(queue, self, pt_sleep);
584 	self->pt_sleepobj = NULL;
585 	owner = (uintptr_t)ptr->ptr_owner;
586 
587 	if ((owner & RW_THREAD) == 0) {
588 		pthread__errorfunc(__FILE__, __LINE__, __func__,
589 		    "lock not held");
590 	}
591 
592 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
593 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
594 	else if (ptr->ptr_nreaders != 0)
595 		set = RW_HAS_WAITERS;
596 	else
597 		set = 0;
598 
599 	for (;; owner = next) {
600 		newval = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
601 		next = rw_cas(ptr, owner, newval);
602 		if (owner == next)
603 			break;
604 	}
605 	pthread_mutex_unlock(interlock);
606 }
607 
608 int
609 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
610 {
611 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
612 
613 	if ((owner & RW_WRITE_LOCKED) != 0)
614 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
615 	return (owner & RW_THREAD) != 0;
616 }
617 
618 int
619 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
620 {
621 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
622 
623 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
624 }
625 
626 int
627 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
628 {
629 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
630 
631 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
632 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
633 }
634 
635 #ifdef _PTHREAD_PSHARED
636 int
637 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
638     int * __restrict pshared)
639 {
640 
641 	pthread__error(EINVAL, "Invalid rwlock attribute",
642 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
643 
644 	*pshared = PTHREAD_PROCESS_PRIVATE;
645 	return 0;
646 }
647 
648 int
649 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
650 {
651 
652 	pthread__error(EINVAL, "Invalid rwlock attribute",
653 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
654 
655 	switch(pshared) {
656 	case PTHREAD_PROCESS_PRIVATE:
657 		return 0;
658 	case PTHREAD_PROCESS_SHARED:
659 		return ENOSYS;
660 	}
661 	return EINVAL;
662 }
663 #endif
664 
665 int
666 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
667 {
668 
669 	if (attr == NULL)
670 		return EINVAL;
671 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
672 
673 	return 0;
674 }
675 
676 
677 int
678 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
679 {
680 
681 	pthread__error(EINVAL, "Invalid rwlock attribute",
682 	    attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
683 
684 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
685 
686 	return 0;
687 }
688