xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1 /*	$NetBSD: pthread_rwlock.c,v 1.39 2020/02/05 11:05:10 kamil Exp $ */
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.39 2020/02/05 11:05:10 kamil Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
37 
38 #include <assert.h>
39 #include <time.h>
40 #include <errno.h>
41 #include <stddef.h>
42 
43 #include "pthread.h"
44 #include "pthread_int.h"
45 #include "reentrant.h"
46 
47 #define	_RW_LOCKED		0
48 #define	_RW_WANT_WRITE		1
49 #define	_RW_WANT_READ		2
50 
51 #if __GNUC_PREREQ__(3, 0)
52 #define	NOINLINE		__attribute ((noinline))
53 #else
54 #define	NOINLINE		/* nothing */
55 #endif
56 
57 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
58 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
59 static void pthread__rwlock_early(void *);
60 
61 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
62 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
63 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
64 
65 #ifndef lint
66 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
67 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
68 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
69 #endif
70 
71 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
72 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
73 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
74 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
75 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
76 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
77 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
78 
79 static inline uintptr_t
80 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
81 {
82 
83 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
84 	    (void *)n);
85 }
86 
87 int
88 pthread_rwlock_init(pthread_rwlock_t *ptr,
89 	    const pthread_rwlockattr_t *attr)
90 {
91 	if (__predict_false(__uselibcstub))
92 		return __libc_rwlock_init_stub(ptr, attr);
93 
94 	pthread__error(EINVAL, "Invalid rwlock attribute",
95 	    attr == NULL || attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
96 
97 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
98 	PTQ_INIT(&ptr->ptr_rblocked);
99 	PTQ_INIT(&ptr->ptr_wblocked);
100 	ptr->ptr_nreaders = 0;
101 	ptr->ptr_owner = NULL;
102 
103 	return 0;
104 }
105 
106 
107 int
108 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
109 {
110 	if (__predict_false(__uselibcstub))
111 		return __libc_rwlock_destroy_stub(ptr);
112 
113 	pthread__error(EINVAL, "Invalid rwlock",
114 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
115 
116 	if ((!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
117 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
118 	    (ptr->ptr_nreaders != 0) ||
119 	    (ptr->ptr_owner != NULL))
120 		return EINVAL;
121 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
122 
123 	return 0;
124 }
125 
126 /* We want function call overhead. */
127 NOINLINE static void
128 pthread__rwlock_pause(void)
129 {
130 
131 	pthread__smt_pause();
132 }
133 
134 NOINLINE static int
135 pthread__rwlock_spin(uintptr_t owner)
136 {
137 	pthread_t thread;
138 	unsigned int i;
139 
140 	if ((owner & ~RW_THREAD) != RW_WRITE_LOCKED)
141 		return 0;
142 
143 	thread = (pthread_t)(owner & RW_THREAD);
144 	if (__predict_false(thread == NULL) ||
145 	    thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
146 		return 0;
147 
148 	for (i = 128; i != 0; i--)
149 		pthread__rwlock_pause();
150 	return 1;
151 }
152 
153 static int
154 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
155 {
156 	uintptr_t owner, next;
157 	pthread_mutex_t *interlock;
158 	pthread_t self;
159 	int error;
160 
161 	pthread__error(EINVAL, "Invalid rwlock",
162 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
163 
164 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
165 		/*
166 		 * Read the lock owner field.  If the need-to-wait
167 		 * indicator is clear, then try to acquire the lock.
168 		 */
169 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
170 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
171 			if (owner == next) {
172 				/* Got it! */
173 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
174 				membar_enter();
175 #endif
176 				return 0;
177 			}
178 
179 			/*
180 			 * Didn't get it -- spin around again (we'll
181 			 * probably sleep on the next iteration).
182 			 */
183 			continue;
184 		}
185 
186 		self = pthread__self();
187 		if ((owner & RW_THREAD) == (uintptr_t)self)
188 			return EDEADLK;
189 
190 		/* If held write locked and no waiters, spin. */
191 		if (pthread__rwlock_spin(owner)) {
192 			while (pthread__rwlock_spin(owner)) {
193 				owner = (uintptr_t)ptr->ptr_owner;
194 			}
195 			next = owner;
196 			continue;
197 		}
198 
199 		/*
200 		 * Grab the interlock.  Once we have that, we
201 		 * can adjust the waiter bits and sleep queue.
202 		 */
203 		interlock = pthread__hashlock(ptr);
204 		pthread_mutex_lock(interlock);
205 
206 		/*
207 		 * Mark the rwlock as having waiters.  If the set fails,
208 		 * then we may not need to sleep and should spin again.
209 		 */
210 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
211 		if (owner != next) {
212 			pthread_mutex_unlock(interlock);
213 			continue;
214 		}
215 
216 		/* The waiters bit is set - it's safe to sleep. */
217 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
218 	    	ptr->ptr_nreaders++;
219 		self->pt_rwlocked = _RW_WANT_READ;
220 		self->pt_sleepobj = &ptr->ptr_rblocked;
221 		self->pt_early = pthread__rwlock_early;
222 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
223 		    ts, 0, &ptr->ptr_rblocked);
224 
225 		/* Did we get the lock? */
226 		if (self->pt_rwlocked == _RW_LOCKED) {
227 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
228 			membar_enter();
229 #endif
230 			return 0;
231 		}
232 		if (error != 0)
233 			return error;
234 
235 		pthread__errorfunc(__FILE__, __LINE__, __func__,
236 		    "direct handoff failure");
237 	}
238 }
239 
240 
241 int
242 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
243 {
244 	uintptr_t owner, next;
245 
246 	if (__predict_false(__uselibcstub))
247 		return __libc_rwlock_tryrdlock_stub(ptr);
248 
249 	pthread__error(EINVAL, "Invalid rwlock",
250 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
251 
252 	/*
253 	 * Don't get a readlock if there is a writer or if there are waiting
254 	 * writers; i.e. prefer writers to readers. This strategy is dictated
255 	 * by SUSv3.
256 	 */
257 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
258 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
259 			return EBUSY;
260 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
261 		if (owner == next) {
262 			/* Got it! */
263 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
264 			membar_enter();
265 #endif
266 			return 0;
267 		}
268 	}
269 }
270 
271 static int
272 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
273 {
274 	uintptr_t owner, next;
275 	pthread_mutex_t *interlock;
276 	pthread_t self;
277 	int error;
278 
279 	self = pthread__self();
280 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
281 
282 	pthread__error(EINVAL, "Invalid rwlock",
283 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
284 
285 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
286 		/*
287 		 * Read the lock owner field.  If the need-to-wait
288 		 * indicator is clear, then try to acquire the lock.
289 		 */
290 		if ((owner & RW_THREAD) == 0) {
291 			next = rw_cas(ptr, owner,
292 			    (uintptr_t)self | RW_WRITE_LOCKED);
293 			if (owner == next) {
294 				/* Got it! */
295 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
296 				membar_enter();
297 #endif
298 				return 0;
299 			}
300 
301 			/*
302 			 * Didn't get it -- spin around again (we'll
303 			 * probably sleep on the next iteration).
304 			 */
305 			continue;
306 		}
307 
308 		if ((owner & RW_THREAD) == (uintptr_t)self)
309 			return EDEADLK;
310 
311 		/* If held write locked and no waiters, spin. */
312 		if (pthread__rwlock_spin(owner)) {
313 			while (pthread__rwlock_spin(owner)) {
314 				owner = (uintptr_t)ptr->ptr_owner;
315 			}
316 			next = owner;
317 			continue;
318 		}
319 
320 		/*
321 		 * Grab the interlock.  Once we have that, we
322 		 * can adjust the waiter bits and sleep queue.
323 		 */
324 		interlock = pthread__hashlock(ptr);
325 		pthread_mutex_lock(interlock);
326 
327 		/*
328 		 * Mark the rwlock as having waiters.  If the set fails,
329 		 * then we may not need to sleep and should spin again.
330 		 */
331 		next = rw_cas(ptr, owner,
332 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
333 		if (owner != next) {
334 			pthread_mutex_unlock(interlock);
335 			continue;
336 		}
337 
338 		/* The waiters bit is set - it's safe to sleep. */
339 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
340 		self->pt_rwlocked = _RW_WANT_WRITE;
341 		self->pt_sleepobj = &ptr->ptr_wblocked;
342 		self->pt_early = pthread__rwlock_early;
343 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
344 		    ts, 0, &ptr->ptr_wblocked);
345 
346 		/* Did we get the lock? */
347 		if (self->pt_rwlocked == _RW_LOCKED) {
348 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
349 			membar_enter();
350 #endif
351 			return 0;
352 		}
353 		if (error != 0)
354 			return error;
355 
356 		pthread__errorfunc(__FILE__, __LINE__, __func__,
357 		    "direct handoff failure");
358 	}
359 }
360 
361 
362 int
363 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
364 {
365 	uintptr_t owner, next;
366 	pthread_t self;
367 
368 	if (__predict_false(__uselibcstub))
369 		return __libc_rwlock_trywrlock_stub(ptr);
370 
371 	pthread__error(EINVAL, "Invalid rwlock",
372 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
373 
374 	self = pthread__self();
375 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
376 
377 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
378 		if (owner != 0)
379 			return EBUSY;
380 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
381 		if (owner == next) {
382 			/* Got it! */
383 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
384 			membar_enter();
385 #endif
386 			return 0;
387 		}
388 	}
389 }
390 
391 int
392 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
393 {
394 	if (__predict_false(__uselibcstub))
395 		return __libc_rwlock_rdlock_stub(ptr);
396 
397 	return pthread__rwlock_rdlock(ptr, NULL);
398 }
399 
400 int
401 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
402 			   const struct timespec *abs_timeout)
403 {
404 	if (abs_timeout == NULL)
405 		return EINVAL;
406 	if ((abs_timeout->tv_nsec >= 1000000000) ||
407 	    (abs_timeout->tv_nsec < 0) ||
408 	    (abs_timeout->tv_sec < 0))
409 		return EINVAL;
410 
411 	return pthread__rwlock_rdlock(ptr, abs_timeout);
412 }
413 
414 int
415 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
416 {
417 	if (__predict_false(__uselibcstub))
418 		return __libc_rwlock_wrlock_stub(ptr);
419 
420 	return pthread__rwlock_wrlock(ptr, NULL);
421 }
422 
423 int
424 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
425 			   const struct timespec *abs_timeout)
426 {
427 	if (abs_timeout == NULL)
428 		return EINVAL;
429 	if ((abs_timeout->tv_nsec >= 1000000000) ||
430 	    (abs_timeout->tv_nsec < 0) ||
431 	    (abs_timeout->tv_sec < 0))
432 		return EINVAL;
433 
434 	return pthread__rwlock_wrlock(ptr, abs_timeout);
435 }
436 
437 
438 int
439 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
440 {
441 	uintptr_t owner, decr, new, next;
442 	pthread_mutex_t *interlock;
443 	pthread_t self, thread;
444 
445 	if (__predict_false(__uselibcstub))
446 		return __libc_rwlock_unlock_stub(ptr);
447 
448 	pthread__error(EINVAL, "Invalid rwlock",
449 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
450 
451 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
452 	membar_exit();
453 #endif
454 
455 	/*
456 	 * Since we used an add operation to set the required lock
457 	 * bits, we can use a subtract to clear them, which makes
458 	 * the read-release and write-release path similar.
459 	 */
460 	owner = (uintptr_t)ptr->ptr_owner;
461 	if ((owner & RW_WRITE_LOCKED) != 0) {
462 		self = pthread__self();
463 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
464 		if ((owner & RW_THREAD) != (uintptr_t)self) {
465 			return EPERM;
466 		}
467 	} else {
468 		decr = RW_READ_INCR;
469 		if (owner == 0) {
470 			return EPERM;
471 		}
472 	}
473 
474 	for (;; owner = next) {
475 		/*
476 		 * Compute what we expect the new value of the lock to be.
477 		 * Only proceed to do direct handoff if there are waiters,
478 		 * and if the lock would become unowned.
479 		 */
480 		new = (owner - decr);
481 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
482 			next = rw_cas(ptr, owner, new);
483 			if (owner == next) {
484 				/* Released! */
485 				return 0;
486 			}
487 			continue;
488 		}
489 
490 		/*
491 		 * Grab the interlock.  Once we have that, we can adjust
492 		 * the waiter bits.  We must check to see if there are
493 		 * still waiters before proceeding.
494 		 */
495 		interlock = pthread__hashlock(ptr);
496 		pthread_mutex_lock(interlock);
497 		owner = (uintptr_t)ptr->ptr_owner;
498 		if ((owner & RW_HAS_WAITERS) == 0) {
499 			pthread_mutex_unlock(interlock);
500 			next = owner;
501 			continue;
502 		}
503 
504 		/*
505 		 * Give the lock away.  SUSv3 dictates that we must give
506 		 * preference to writers.
507 		 */
508 		self = pthread__self();
509 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
510 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
511 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
512 
513 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
514 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
515 			else if (ptr->ptr_nreaders != 0)
516 				new |= RW_HAS_WAITERS;
517 
518 			/*
519 			 * Set in the new value.  The lock becomes owned
520 			 * by the writer that we are about to wake.
521 			 */
522 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
523 
524 			/* Wake the writer. */
525 			thread->pt_rwlocked = _RW_LOCKED;
526 			pthread__unpark(&ptr->ptr_wblocked, self,
527 			    interlock);
528 		} else {
529 			new = 0;
530 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
531 				/*
532 				 * May have already been handed the lock,
533 				 * since pthread__unpark_all() can release
534 				 * our interlock before awakening all
535 				 * threads.
536 				 */
537 				if (thread->pt_sleepobj == NULL)
538 					continue;
539 				new += RW_READ_INCR;
540 				thread->pt_rwlocked = _RW_LOCKED;
541 			}
542 
543 			/*
544 			 * Set in the new value.  The lock becomes owned
545 			 * by the readers that we are about to wake.
546 			 */
547 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
548 
549 			/* Wake up all sleeping readers. */
550 			ptr->ptr_nreaders = 0;
551 			pthread__unpark_all(&ptr->ptr_rblocked, self,
552 			    interlock);
553 		}
554 		pthread_mutex_unlock(interlock);
555 
556 		return 0;
557 	}
558 }
559 
560 /*
561  * Called when a timedlock awakens early to adjust the waiter bits.
562  * The rwlock's interlock is held on entry, and the caller has been
563  * removed from the waiters lists.
564  */
565 static void
566 pthread__rwlock_early(void *obj)
567 {
568 	uintptr_t owner, set, new, next;
569 	pthread_rwlock_t *ptr;
570 	pthread_t self;
571 	u_int off;
572 
573 	self = pthread__self();
574 
575 	switch (self->pt_rwlocked) {
576 	case _RW_WANT_READ:
577 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
578 		break;
579 	case _RW_WANT_WRITE:
580 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
581 		break;
582 	default:
583 		pthread__errorfunc(__FILE__, __LINE__, __func__,
584 		    "bad value of pt_rwlocked");
585 		off = 0;
586 		/* NOTREACHED */
587 		break;
588 	}
589 
590 	/* LINTED mind your own business */
591 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
592 	owner = (uintptr_t)ptr->ptr_owner;
593 
594 	if ((owner & RW_THREAD) == 0) {
595 		pthread__errorfunc(__FILE__, __LINE__, __func__,
596 		    "lock not held");
597 	}
598 
599 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
600 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
601 	else if (ptr->ptr_nreaders != 0)
602 		set = RW_HAS_WAITERS;
603 	else
604 		set = 0;
605 
606 	for (;; owner = next) {
607 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
608 		next = rw_cas(ptr, owner, new);
609 		if (owner == next)
610 			break;
611 	}
612 }
613 
614 int
615 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
616 {
617 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
618 
619 	if ((owner & RW_WRITE_LOCKED) != 0)
620 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
621 	return (owner & RW_THREAD) != 0;
622 }
623 
624 int
625 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
626 {
627 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
628 
629 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
630 }
631 
632 int
633 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
634 {
635 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
636 
637 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
638 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
639 }
640 
641 #ifdef _PTHREAD_PSHARED
642 int
643 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
644     int * __restrict pshared)
645 {
646 
647 	pthread__error(EINVAL, "Invalid rwlock attribute",
648 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
649 
650 	*pshared = PTHREAD_PROCESS_PRIVATE;
651 	return 0;
652 }
653 
654 int
655 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
656 {
657 
658 	pthread__error(EINVAL, "Invalid rwlock attribute",
659 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
660 
661 	switch(pshared) {
662 	case PTHREAD_PROCESS_PRIVATE:
663 		return 0;
664 	case PTHREAD_PROCESS_SHARED:
665 		return ENOSYS;
666 	}
667 	return EINVAL;
668 }
669 #endif
670 
671 int
672 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
673 {
674 
675 	if (attr == NULL)
676 		return EINVAL;
677 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
678 
679 	return 0;
680 }
681 
682 
683 int
684 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
685 {
686 
687 	pthread__error(EINVAL, "Invalid rwlock attribute",
688 	    attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
689 
690 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
691 
692 	return 0;
693 }
694