xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision ecf6466c633518f478c293c388551b29e46729cc)
1 /*	$NetBSD: pthread_rwlock.c,v 1.37 2020/01/13 18:22:56 ad Exp $ */
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.37 2020/01/13 18:22:56 ad Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
37 
38 #include <assert.h>
39 #include <time.h>
40 #include <errno.h>
41 #include <stddef.h>
42 
43 #include "pthread.h"
44 #include "pthread_int.h"
45 #include "reentrant.h"
46 
47 #define	_RW_LOCKED		0
48 #define	_RW_WANT_WRITE		1
49 #define	_RW_WANT_READ		2
50 
51 #if __GNUC_PREREQ__(3, 0)
52 #define	NOINLINE		__attribute ((noinline))
53 #else
54 #define	NOINLINE		/* nothing */
55 #endif
56 
57 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
58 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
59 static void pthread__rwlock_early(void *);
60 
61 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
62 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
63 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
64 
65 #ifndef lint
66 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
67 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
68 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
69 #endif
70 
71 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
72 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
73 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
74 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
75 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
76 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
77 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
78 
79 static inline uintptr_t
80 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
81 {
82 
83 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
84 	    (void *)n);
85 }
86 
87 int
88 pthread_rwlock_init(pthread_rwlock_t *ptr,
89 	    const pthread_rwlockattr_t *attr)
90 {
91 	if (__predict_false(__uselibcstub))
92 		return __libc_rwlock_init_stub(ptr, attr);
93 
94 	if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
95 		return EINVAL;
96 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
97 	PTQ_INIT(&ptr->ptr_rblocked);
98 	PTQ_INIT(&ptr->ptr_wblocked);
99 	ptr->ptr_nreaders = 0;
100 	ptr->ptr_owner = NULL;
101 
102 	return 0;
103 }
104 
105 
106 int
107 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
108 {
109 	if (__predict_false(__uselibcstub))
110 		return __libc_rwlock_destroy_stub(ptr);
111 
112 	if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
113 	    (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
114 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
115 	    (ptr->ptr_nreaders != 0) ||
116 	    (ptr->ptr_owner != NULL))
117 		return EINVAL;
118 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
119 
120 	return 0;
121 }
122 
123 /* We want function call overhead. */
124 NOINLINE static void
125 pthread__rwlock_pause(void)
126 {
127 
128 	pthread__smt_pause();
129 }
130 
131 NOINLINE static int
132 pthread__rwlock_spin(uintptr_t owner)
133 {
134 	pthread_t thread;
135 	unsigned int i;
136 
137 	if ((owner & ~RW_THREAD) != RW_WRITE_LOCKED)
138 		return 0;
139 
140 	thread = (pthread_t)(owner & RW_THREAD);
141 	if (__predict_false(thread == NULL) ||
142 	    thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
143 		return 0;
144 
145 	for (i = 128; i != 0; i--)
146 		pthread__rwlock_pause();
147 	return 1;
148 }
149 
150 static int
151 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
152 {
153 	uintptr_t owner, next;
154 	pthread_mutex_t *interlock;
155 	pthread_t self;
156 	int error;
157 
158 #ifdef ERRORCHECK
159 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
160 		return EINVAL;
161 #endif
162 
163 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
164 		/*
165 		 * Read the lock owner field.  If the need-to-wait
166 		 * indicator is clear, then try to acquire the lock.
167 		 */
168 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
169 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
170 			if (owner == next) {
171 				/* Got it! */
172 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
173 				membar_enter();
174 #endif
175 				return 0;
176 			}
177 
178 			/*
179 			 * Didn't get it -- spin around again (we'll
180 			 * probably sleep on the next iteration).
181 			 */
182 			continue;
183 		}
184 
185 		self = pthread__self();
186 		if ((owner & RW_THREAD) == (uintptr_t)self)
187 			return EDEADLK;
188 
189 		/* If held write locked and no waiters, spin. */
190 		if (pthread__rwlock_spin(owner)) {
191 			while (pthread__rwlock_spin(owner)) {
192 				owner = (uintptr_t)ptr->ptr_owner;
193 			}
194 			next = owner;
195 			continue;
196 		}
197 
198 		/*
199 		 * Grab the interlock.  Once we have that, we
200 		 * can adjust the waiter bits and sleep queue.
201 		 */
202 		interlock = pthread__hashlock(ptr);
203 		pthread_mutex_lock(interlock);
204 
205 		/*
206 		 * Mark the rwlock as having waiters.  If the set fails,
207 		 * then we may not need to sleep and should spin again.
208 		 */
209 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
210 		if (owner != next) {
211 			pthread_mutex_unlock(interlock);
212 			continue;
213 		}
214 
215 		/* The waiters bit is set - it's safe to sleep. */
216 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
217 	    	ptr->ptr_nreaders++;
218 		self->pt_rwlocked = _RW_WANT_READ;
219 		self->pt_sleepobj = &ptr->ptr_rblocked;
220 		self->pt_early = pthread__rwlock_early;
221 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
222 		    ts, 0, &ptr->ptr_rblocked);
223 
224 		/* Did we get the lock? */
225 		if (self->pt_rwlocked == _RW_LOCKED) {
226 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
227 			membar_enter();
228 #endif
229 			return 0;
230 		}
231 		if (error != 0)
232 			return error;
233 
234 		pthread__errorfunc(__FILE__, __LINE__, __func__,
235 		    "direct handoff failure");
236 	}
237 }
238 
239 
240 int
241 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
242 {
243 	uintptr_t owner, next;
244 
245 	if (__predict_false(__uselibcstub))
246 		return __libc_rwlock_tryrdlock_stub(ptr);
247 
248 #ifdef ERRORCHECK
249 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
250 		return EINVAL;
251 #endif
252 
253 	/*
254 	 * Don't get a readlock if there is a writer or if there are waiting
255 	 * writers; i.e. prefer writers to readers. This strategy is dictated
256 	 * by SUSv3.
257 	 */
258 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
259 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
260 			return EBUSY;
261 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
262 		if (owner == next) {
263 			/* Got it! */
264 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
265 			membar_enter();
266 #endif
267 			return 0;
268 		}
269 	}
270 }
271 
272 static int
273 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
274 {
275 	uintptr_t owner, next;
276 	pthread_mutex_t *interlock;
277 	pthread_t self;
278 	int error;
279 
280 	self = pthread__self();
281 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
282 
283 #ifdef ERRORCHECK
284 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
285 		return EINVAL;
286 #endif
287 
288 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
289 		/*
290 		 * Read the lock owner field.  If the need-to-wait
291 		 * indicator is clear, then try to acquire the lock.
292 		 */
293 		if ((owner & RW_THREAD) == 0) {
294 			next = rw_cas(ptr, owner,
295 			    (uintptr_t)self | RW_WRITE_LOCKED);
296 			if (owner == next) {
297 				/* Got it! */
298 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
299 				membar_enter();
300 #endif
301 				return 0;
302 			}
303 
304 			/*
305 			 * Didn't get it -- spin around again (we'll
306 			 * probably sleep on the next iteration).
307 			 */
308 			continue;
309 		}
310 
311 		if ((owner & RW_THREAD) == (uintptr_t)self)
312 			return EDEADLK;
313 
314 		/* If held write locked and no waiters, spin. */
315 		if (pthread__rwlock_spin(owner)) {
316 			while (pthread__rwlock_spin(owner)) {
317 				owner = (uintptr_t)ptr->ptr_owner;
318 			}
319 			next = owner;
320 			continue;
321 		}
322 
323 		/*
324 		 * Grab the interlock.  Once we have that, we
325 		 * can adjust the waiter bits and sleep queue.
326 		 */
327 		interlock = pthread__hashlock(ptr);
328 		pthread_mutex_lock(interlock);
329 
330 		/*
331 		 * Mark the rwlock as having waiters.  If the set fails,
332 		 * then we may not need to sleep and should spin again.
333 		 */
334 		next = rw_cas(ptr, owner,
335 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
336 		if (owner != next) {
337 			pthread_mutex_unlock(interlock);
338 			continue;
339 		}
340 
341 		/* The waiters bit is set - it's safe to sleep. */
342 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
343 		self->pt_rwlocked = _RW_WANT_WRITE;
344 		self->pt_sleepobj = &ptr->ptr_wblocked;
345 		self->pt_early = pthread__rwlock_early;
346 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
347 		    ts, 0, &ptr->ptr_wblocked);
348 
349 		/* Did we get the lock? */
350 		if (self->pt_rwlocked == _RW_LOCKED) {
351 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
352 			membar_enter();
353 #endif
354 			return 0;
355 		}
356 		if (error != 0)
357 			return error;
358 
359 		pthread__errorfunc(__FILE__, __LINE__, __func__,
360 		    "direct handoff failure");
361 	}
362 }
363 
364 
365 int
366 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
367 {
368 	uintptr_t owner, next;
369 	pthread_t self;
370 
371 	if (__predict_false(__uselibcstub))
372 		return __libc_rwlock_trywrlock_stub(ptr);
373 
374 #ifdef ERRORCHECK
375 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
376 		return EINVAL;
377 #endif
378 
379 	self = pthread__self();
380 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
381 
382 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
383 		if (owner != 0)
384 			return EBUSY;
385 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
386 		if (owner == next) {
387 			/* Got it! */
388 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
389 			membar_enter();
390 #endif
391 			return 0;
392 		}
393 	}
394 }
395 
396 int
397 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
398 {
399 	if (__predict_false(__uselibcstub))
400 		return __libc_rwlock_rdlock_stub(ptr);
401 
402 	return pthread__rwlock_rdlock(ptr, NULL);
403 }
404 
405 int
406 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
407 			   const struct timespec *abs_timeout)
408 {
409 	if (abs_timeout == NULL)
410 		return EINVAL;
411 	if ((abs_timeout->tv_nsec >= 1000000000) ||
412 	    (abs_timeout->tv_nsec < 0) ||
413 	    (abs_timeout->tv_sec < 0))
414 		return EINVAL;
415 
416 	return pthread__rwlock_rdlock(ptr, abs_timeout);
417 }
418 
419 int
420 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
421 {
422 	if (__predict_false(__uselibcstub))
423 		return __libc_rwlock_wrlock_stub(ptr);
424 
425 	return pthread__rwlock_wrlock(ptr, NULL);
426 }
427 
428 int
429 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
430 			   const struct timespec *abs_timeout)
431 {
432 	if (abs_timeout == NULL)
433 		return EINVAL;
434 	if ((abs_timeout->tv_nsec >= 1000000000) ||
435 	    (abs_timeout->tv_nsec < 0) ||
436 	    (abs_timeout->tv_sec < 0))
437 		return EINVAL;
438 
439 	return pthread__rwlock_wrlock(ptr, abs_timeout);
440 }
441 
442 
443 int
444 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
445 {
446 	uintptr_t owner, decr, new, next;
447 	pthread_mutex_t *interlock;
448 	pthread_t self, thread;
449 
450 	if (__predict_false(__uselibcstub))
451 		return __libc_rwlock_unlock_stub(ptr);
452 
453 #ifdef ERRORCHECK
454 	if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
455 		return EINVAL;
456 #endif
457 
458 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
459 	membar_exit();
460 #endif
461 
462 	/*
463 	 * Since we used an add operation to set the required lock
464 	 * bits, we can use a subtract to clear them, which makes
465 	 * the read-release and write-release path similar.
466 	 */
467 	owner = (uintptr_t)ptr->ptr_owner;
468 	if ((owner & RW_WRITE_LOCKED) != 0) {
469 		self = pthread__self();
470 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
471 		if ((owner & RW_THREAD) != (uintptr_t)self) {
472 			return EPERM;
473 		}
474 	} else {
475 		decr = RW_READ_INCR;
476 		if (owner == 0) {
477 			return EPERM;
478 		}
479 	}
480 
481 	for (;; owner = next) {
482 		/*
483 		 * Compute what we expect the new value of the lock to be.
484 		 * Only proceed to do direct handoff if there are waiters,
485 		 * and if the lock would become unowned.
486 		 */
487 		new = (owner - decr);
488 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
489 			next = rw_cas(ptr, owner, new);
490 			if (owner == next) {
491 				/* Released! */
492 				return 0;
493 			}
494 			continue;
495 		}
496 
497 		/*
498 		 * Grab the interlock.  Once we have that, we can adjust
499 		 * the waiter bits.  We must check to see if there are
500 		 * still waiters before proceeding.
501 		 */
502 		interlock = pthread__hashlock(ptr);
503 		pthread_mutex_lock(interlock);
504 		owner = (uintptr_t)ptr->ptr_owner;
505 		if ((owner & RW_HAS_WAITERS) == 0) {
506 			pthread_mutex_unlock(interlock);
507 			next = owner;
508 			continue;
509 		}
510 
511 		/*
512 		 * Give the lock away.  SUSv3 dictates that we must give
513 		 * preference to writers.
514 		 */
515 		self = pthread__self();
516 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
517 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
518 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
519 
520 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
521 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
522 			else if (ptr->ptr_nreaders != 0)
523 				new |= RW_HAS_WAITERS;
524 
525 			/*
526 			 * Set in the new value.  The lock becomes owned
527 			 * by the writer that we are about to wake.
528 			 */
529 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
530 
531 			/* Wake the writer. */
532 			thread->pt_rwlocked = _RW_LOCKED;
533 			pthread__unpark(&ptr->ptr_wblocked, self,
534 			    interlock);
535 		} else {
536 			new = 0;
537 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
538 				/*
539 				 * May have already been handed the lock,
540 				 * since pthread__unpark_all() can release
541 				 * our interlock before awakening all
542 				 * threads.
543 				 */
544 				if (thread->pt_sleepobj == NULL)
545 					continue;
546 				new += RW_READ_INCR;
547 				thread->pt_rwlocked = _RW_LOCKED;
548 			}
549 
550 			/*
551 			 * Set in the new value.  The lock becomes owned
552 			 * by the readers that we are about to wake.
553 			 */
554 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
555 
556 			/* Wake up all sleeping readers. */
557 			ptr->ptr_nreaders = 0;
558 			pthread__unpark_all(&ptr->ptr_rblocked, self,
559 			    interlock);
560 		}
561 		pthread_mutex_unlock(interlock);
562 
563 		return 0;
564 	}
565 }
566 
567 /*
568  * Called when a timedlock awakens early to adjust the waiter bits.
569  * The rwlock's interlock is held on entry, and the caller has been
570  * removed from the waiters lists.
571  */
572 static void
573 pthread__rwlock_early(void *obj)
574 {
575 	uintptr_t owner, set, new, next;
576 	pthread_rwlock_t *ptr;
577 	pthread_t self;
578 	u_int off;
579 
580 	self = pthread__self();
581 
582 	switch (self->pt_rwlocked) {
583 	case _RW_WANT_READ:
584 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
585 		break;
586 	case _RW_WANT_WRITE:
587 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
588 		break;
589 	default:
590 		pthread__errorfunc(__FILE__, __LINE__, __func__,
591 		    "bad value of pt_rwlocked");
592 		off = 0;
593 		/* NOTREACHED */
594 		break;
595 	}
596 
597 	/* LINTED mind your own business */
598 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
599 	owner = (uintptr_t)ptr->ptr_owner;
600 
601 	if ((owner & RW_THREAD) == 0) {
602 		pthread__errorfunc(__FILE__, __LINE__, __func__,
603 		    "lock not held");
604 	}
605 
606 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
607 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
608 	else if (ptr->ptr_nreaders != 0)
609 		set = RW_HAS_WAITERS;
610 	else
611 		set = 0;
612 
613 	for (;; owner = next) {
614 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
615 		next = rw_cas(ptr, owner, new);
616 		if (owner == next)
617 			break;
618 	}
619 }
620 
621 int
622 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
623 {
624 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
625 
626 	if ((owner & RW_WRITE_LOCKED) != 0)
627 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
628 	return (owner & RW_THREAD) != 0;
629 }
630 
631 int
632 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
633 {
634 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
635 
636 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
637 }
638 
639 int
640 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
641 {
642 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
643 
644 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
645 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
646 }
647 
648 #ifdef _PTHREAD_PSHARED
649 int
650 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
651     int * __restrict pshared)
652 {
653 	*pshared = PTHREAD_PROCESS_PRIVATE;
654 	return 0;
655 }
656 
657 int
658 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
659 {
660 
661 	switch(pshared) {
662 	case PTHREAD_PROCESS_PRIVATE:
663 		return 0;
664 	case PTHREAD_PROCESS_SHARED:
665 		return ENOSYS;
666 	}
667 	return EINVAL;
668 }
669 #endif
670 
671 int
672 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
673 {
674 
675 	if (attr == NULL)
676 		return EINVAL;
677 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
678 
679 	return 0;
680 }
681 
682 
683 int
684 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
685 {
686 
687 	if ((attr == NULL) ||
688 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
689 		return EINVAL;
690 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
691 
692 	return 0;
693 }
694