xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision 7adb41074dac288e339b9f066df80fad4df5817f)
1*7adb4107Sriastradh /*	$NetBSD: pthread_rwlock.c,v 1.44 2022/02/12 14:59:32 riastradh Exp $ */
2c62a74e6Sthorpej 
3c62a74e6Sthorpej /*-
406d492d1Sad  * Copyright (c) 2002, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5c62a74e6Sthorpej  * All rights reserved.
6c62a74e6Sthorpej  *
7c62a74e6Sthorpej  * This code is derived from software contributed to The NetBSD Foundation
8a67e1e34Sad  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9c62a74e6Sthorpej  *
10c62a74e6Sthorpej  * Redistribution and use in source and binary forms, with or without
11c62a74e6Sthorpej  * modification, are permitted provided that the following conditions
12c62a74e6Sthorpej  * are met:
13c62a74e6Sthorpej  * 1. Redistributions of source code must retain the above copyright
14c62a74e6Sthorpej  *    notice, this list of conditions and the following disclaimer.
15c62a74e6Sthorpej  * 2. Redistributions in binary form must reproduce the above copyright
16c62a74e6Sthorpej  *    notice, this list of conditions and the following disclaimer in the
17c62a74e6Sthorpej  *    documentation and/or other materials provided with the distribution.
18c62a74e6Sthorpej  *
19c62a74e6Sthorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20c62a74e6Sthorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21c62a74e6Sthorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22c62a74e6Sthorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23c62a74e6Sthorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24c62a74e6Sthorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25c62a74e6Sthorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26c62a74e6Sthorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27c62a74e6Sthorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28c62a74e6Sthorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29c62a74e6Sthorpej  * POSSIBILITY OF SUCH DAMAGE.
30c62a74e6Sthorpej  */
31c62a74e6Sthorpej 
32c62a74e6Sthorpej #include <sys/cdefs.h>
33*7adb4107Sriastradh __RCSID("$NetBSD: pthread_rwlock.c,v 1.44 2022/02/12 14:59:32 riastradh Exp $");
34*7adb4107Sriastradh 
35*7adb4107Sriastradh /* Need to use libc-private names for atomic operations. */
36*7adb4107Sriastradh #include "../../common/lib/libc/atomic/atomic_op_namespace.h"
372bcb8bf1Sad 
382bcb8bf1Sad #include <sys/types.h>
392bcb8bf1Sad #include <sys/lwpctl.h>
40f043c0fbSlukem 
41067b84dbSuwe #include <assert.h>
4271d484f9Schristos #include <time.h>
43f043c0fbSlukem #include <errno.h>
44a67e1e34Sad #include <stddef.h>
45c62a74e6Sthorpej 
46c62a74e6Sthorpej #include "pthread.h"
47c62a74e6Sthorpej #include "pthread_int.h"
4871d484f9Schristos #include "reentrant.h"
49c62a74e6Sthorpej 
50a67e1e34Sad #define	_RW_LOCKED		0
51a67e1e34Sad #define	_RW_WANT_WRITE		1
52a67e1e34Sad #define	_RW_WANT_READ		2
538ccc6e06Sad 
542bcb8bf1Sad #if __GNUC_PREREQ__(3, 0)
552bcb8bf1Sad #define	NOINLINE		__attribute ((noinline))
562bcb8bf1Sad #else
572bcb8bf1Sad #define	NOINLINE		/* nothing */
582bcb8bf1Sad #endif
592bcb8bf1Sad 
60a67e1e34Sad static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
61a67e1e34Sad static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
6206d492d1Sad static void pthread__rwlock_early(pthread_t, pthread_rwlock_t *,
6306d492d1Sad     pthread_mutex_t *);
6472f5cf02Schristos 
65989565f8Sad int	_pthread_rwlock_held_np(pthread_rwlock_t *);
66989565f8Sad int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
67989565f8Sad int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
68989565f8Sad 
69a67e1e34Sad #ifndef lint
__weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)70639a0868Syamt __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
71639a0868Syamt __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
72639a0868Syamt __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
73a67e1e34Sad #endif
74a67e1e34Sad 
75c62a74e6Sthorpej __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
76c62a74e6Sthorpej __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
77c62a74e6Sthorpej __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
78c62a74e6Sthorpej __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
79c62a74e6Sthorpej __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
80c62a74e6Sthorpej __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
81c62a74e6Sthorpej __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
82c62a74e6Sthorpej 
83a67e1e34Sad static inline uintptr_t
84a67e1e34Sad rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
85a67e1e34Sad {
86a67e1e34Sad 
87a67e1e34Sad 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
88a67e1e34Sad 	    (void *)n);
89a67e1e34Sad }
90a67e1e34Sad 
91c62a74e6Sthorpej int
pthread_rwlock_init(pthread_rwlock_t * ptr,const pthread_rwlockattr_t * attr)92a67e1e34Sad pthread_rwlock_init(pthread_rwlock_t *ptr,
93c62a74e6Sthorpej 	    const pthread_rwlockattr_t *attr)
94c62a74e6Sthorpej {
9571d484f9Schristos 	if (__predict_false(__uselibcstub))
9671d484f9Schristos 		return __libc_rwlock_init_stub(ptr, attr);
97a67e1e34Sad 
98260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock attribute",
99260b3a17Skamil 	    attr == NULL || attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
100260b3a17Skamil 
101a67e1e34Sad 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
102a67e1e34Sad 	PTQ_INIT(&ptr->ptr_rblocked);
103a67e1e34Sad 	PTQ_INIT(&ptr->ptr_wblocked);
104a67e1e34Sad 	ptr->ptr_nreaders = 0;
105a67e1e34Sad 	ptr->ptr_owner = NULL;
106c62a74e6Sthorpej 
107c62a74e6Sthorpej 	return 0;
108c62a74e6Sthorpej }
109c62a74e6Sthorpej 
110c62a74e6Sthorpej 
111c62a74e6Sthorpej int
pthread_rwlock_destroy(pthread_rwlock_t * ptr)112a67e1e34Sad pthread_rwlock_destroy(pthread_rwlock_t *ptr)
113c62a74e6Sthorpej {
11471d484f9Schristos 	if (__predict_false(__uselibcstub))
11571d484f9Schristos 		return __libc_rwlock_destroy_stub(ptr);
116a67e1e34Sad 
117260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock",
118260b3a17Skamil 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
119260b3a17Skamil 
120260b3a17Skamil 	if ((!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
121a67e1e34Sad 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
122a67e1e34Sad 	    (ptr->ptr_nreaders != 0) ||
123a67e1e34Sad 	    (ptr->ptr_owner != NULL))
124c62a74e6Sthorpej 		return EINVAL;
125a67e1e34Sad 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
126c62a74e6Sthorpej 
127c62a74e6Sthorpej 	return 0;
128c62a74e6Sthorpej }
129c62a74e6Sthorpej 
1302bcb8bf1Sad /* We want function call overhead. */
1312bcb8bf1Sad NOINLINE static void
pthread__rwlock_pause(void)1322bcb8bf1Sad pthread__rwlock_pause(void)
1332bcb8bf1Sad {
1342bcb8bf1Sad 
1352bcb8bf1Sad 	pthread__smt_pause();
1362bcb8bf1Sad }
1372bcb8bf1Sad 
1382bcb8bf1Sad NOINLINE static int
pthread__rwlock_spin(uintptr_t owner)1392bcb8bf1Sad pthread__rwlock_spin(uintptr_t owner)
1402bcb8bf1Sad {
1412bcb8bf1Sad 	pthread_t thread;
1422bcb8bf1Sad 	unsigned int i;
1432bcb8bf1Sad 
144df1f230dSuwe 	if ((owner & ~RW_THREAD) != RW_WRITE_LOCKED)
1452bcb8bf1Sad 		return 0;
146df1f230dSuwe 
147df1f230dSuwe 	thread = (pthread_t)(owner & RW_THREAD);
148df1f230dSuwe 	if (__predict_false(thread == NULL) ||
14951002188Sad 	    thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
1502bcb8bf1Sad 		return 0;
151df1f230dSuwe 
1522bcb8bf1Sad 	for (i = 128; i != 0; i--)
1532bcb8bf1Sad 		pthread__rwlock_pause();
1542bcb8bf1Sad 	return 1;
1552bcb8bf1Sad }
1562bcb8bf1Sad 
157a67e1e34Sad static int
pthread__rwlock_rdlock(pthread_rwlock_t * ptr,const struct timespec * ts)158a67e1e34Sad pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
159c62a74e6Sthorpej {
160a67e1e34Sad 	uintptr_t owner, next;
1612bcb8bf1Sad 	pthread_mutex_t *interlock;
162c62a74e6Sthorpej 	pthread_t self;
163a67e1e34Sad 	int error;
164a67e1e34Sad 
165260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock",
166260b3a17Skamil 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
167a67e1e34Sad 
168a67e1e34Sad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
169a67e1e34Sad 		/*
170a67e1e34Sad 		 * Read the lock owner field.  If the need-to-wait
171a67e1e34Sad 		 * indicator is clear, then try to acquire the lock.
172a67e1e34Sad 		 */
173a67e1e34Sad 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
174a67e1e34Sad 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
175a67e1e34Sad 			if (owner == next) {
176a67e1e34Sad 				/* Got it! */
177a67e1e34Sad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
178a67e1e34Sad 				membar_enter();
179a67e1e34Sad #endif
180a67e1e34Sad 				return 0;
181a67e1e34Sad 			}
182a67e1e34Sad 
183a67e1e34Sad 			/*
184a67e1e34Sad 			 * Didn't get it -- spin around again (we'll
185a67e1e34Sad 			 * probably sleep on the next iteration).
186a67e1e34Sad 			 */
187a67e1e34Sad 			continue;
188a67e1e34Sad 		}
189a67e1e34Sad 
190d9a30823Sad 		self = pthread__self();
191a67e1e34Sad 		if ((owner & RW_THREAD) == (uintptr_t)self)
192a67e1e34Sad 			return EDEADLK;
193a67e1e34Sad 
1942bcb8bf1Sad 		/* If held write locked and no waiters, spin. */
1952bcb8bf1Sad 		if (pthread__rwlock_spin(owner)) {
1962bcb8bf1Sad 			while (pthread__rwlock_spin(owner)) {
1972bcb8bf1Sad 				owner = (uintptr_t)ptr->ptr_owner;
1982bcb8bf1Sad 			}
1992bcb8bf1Sad 			next = owner;
2002bcb8bf1Sad 			continue;
2012bcb8bf1Sad 		}
2022bcb8bf1Sad 
203a67e1e34Sad 		/*
204a67e1e34Sad 		 * Grab the interlock.  Once we have that, we
205a67e1e34Sad 		 * can adjust the waiter bits and sleep queue.
206a67e1e34Sad 		 */
2072bcb8bf1Sad 		interlock = pthread__hashlock(ptr);
2082bcb8bf1Sad 		pthread_mutex_lock(interlock);
209a67e1e34Sad 
210a67e1e34Sad 		/*
211a67e1e34Sad 		 * Mark the rwlock as having waiters.  If the set fails,
212a67e1e34Sad 		 * then we may not need to sleep and should spin again.
213a67e1e34Sad 		 */
214a67e1e34Sad 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
215a67e1e34Sad 		if (owner != next) {
2162bcb8bf1Sad 			pthread_mutex_unlock(interlock);
217a67e1e34Sad 			continue;
218a67e1e34Sad 		}
219a67e1e34Sad 
220a67e1e34Sad 		/* The waiters bit is set - it's safe to sleep. */
221a67e1e34Sad 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
222a67e1e34Sad 	    	ptr->ptr_nreaders++;
223a67e1e34Sad 		self->pt_rwlocked = _RW_WANT_READ;
224a67e1e34Sad 		self->pt_sleepobj = &ptr->ptr_rblocked;
2252bcb8bf1Sad 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
226bc77394cSad 		    ts, 0);
227a67e1e34Sad 
22806d492d1Sad 		if (self->pt_sleepobj != NULL) {
22906d492d1Sad 			pthread__rwlock_early(self, ptr, interlock);
23006d492d1Sad 		}
23106d492d1Sad 
232a67e1e34Sad 		/* Did we get the lock? */
233a67e1e34Sad 		if (self->pt_rwlocked == _RW_LOCKED) {
234a67e1e34Sad 			membar_enter();
235a67e1e34Sad 			return 0;
236a67e1e34Sad 		}
237a67e1e34Sad 		if (error != 0)
238a67e1e34Sad 			return error;
239a67e1e34Sad 
240a67e1e34Sad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
241a67e1e34Sad 		    "direct handoff failure");
242a67e1e34Sad 	}
243a67e1e34Sad }
244a67e1e34Sad 
245a67e1e34Sad 
246a67e1e34Sad int
pthread_rwlock_tryrdlock(pthread_rwlock_t * ptr)247a67e1e34Sad pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
248a67e1e34Sad {
249a67e1e34Sad 	uintptr_t owner, next;
250a67e1e34Sad 
25171d484f9Schristos 	if (__predict_false(__uselibcstub))
25271d484f9Schristos 		return __libc_rwlock_tryrdlock_stub(ptr);
25371d484f9Schristos 
254260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock",
255260b3a17Skamil 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
256a67e1e34Sad 
257c62a74e6Sthorpej 	/*
258c62a74e6Sthorpej 	 * Don't get a readlock if there is a writer or if there are waiting
259c62a74e6Sthorpej 	 * writers; i.e. prefer writers to readers. This strategy is dictated
260c62a74e6Sthorpej 	 * by SUSv3.
261c62a74e6Sthorpej 	 */
262a67e1e34Sad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
263a67e1e34Sad 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
264c62a74e6Sthorpej 			return EBUSY;
265a67e1e34Sad 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
266a67e1e34Sad 		if (owner == next) {
267a67e1e34Sad 			/* Got it! */
268a67e1e34Sad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
269a67e1e34Sad 			membar_enter();
270a67e1e34Sad #endif
271a67e1e34Sad 			return 0;
272a67e1e34Sad 		}
273a67e1e34Sad 	}
274c62a74e6Sthorpej }
275c62a74e6Sthorpej 
276a67e1e34Sad static int
pthread__rwlock_wrlock(pthread_rwlock_t * ptr,const struct timespec * ts)277a67e1e34Sad pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
278a67e1e34Sad {
279a67e1e34Sad 	uintptr_t owner, next;
2802bcb8bf1Sad 	pthread_mutex_t *interlock;
281a67e1e34Sad 	pthread_t self;
282a67e1e34Sad 	int error;
283c62a74e6Sthorpej 
284a67e1e34Sad 	self = pthread__self();
285067b84dbSuwe 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
286a67e1e34Sad 
287260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock",
288260b3a17Skamil 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
289a67e1e34Sad 
290a67e1e34Sad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
291a67e1e34Sad 		/*
292a67e1e34Sad 		 * Read the lock owner field.  If the need-to-wait
293a67e1e34Sad 		 * indicator is clear, then try to acquire the lock.
294a67e1e34Sad 		 */
295a67e1e34Sad 		if ((owner & RW_THREAD) == 0) {
296a67e1e34Sad 			next = rw_cas(ptr, owner,
297a67e1e34Sad 			    (uintptr_t)self | RW_WRITE_LOCKED);
298a67e1e34Sad 			if (owner == next) {
299a67e1e34Sad 				/* Got it! */
300a67e1e34Sad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
301a67e1e34Sad 				membar_enter();
302a67e1e34Sad #endif
303c62a74e6Sthorpej 				return 0;
304c62a74e6Sthorpej 			}
305c62a74e6Sthorpej 
306c62a74e6Sthorpej 			/*
307a67e1e34Sad 			 * Didn't get it -- spin around again (we'll
308a67e1e34Sad 			 * probably sleep on the next iteration).
309c62a74e6Sthorpej 			 */
310a67e1e34Sad 			continue;
3110e675542Schs 		}
312a67e1e34Sad 
313a67e1e34Sad 		if ((owner & RW_THREAD) == (uintptr_t)self)
314a67e1e34Sad 			return EDEADLK;
315a67e1e34Sad 
3162bcb8bf1Sad 		/* If held write locked and no waiters, spin. */
3172bcb8bf1Sad 		if (pthread__rwlock_spin(owner)) {
3182bcb8bf1Sad 			while (pthread__rwlock_spin(owner)) {
3192bcb8bf1Sad 				owner = (uintptr_t)ptr->ptr_owner;
3202bcb8bf1Sad 			}
3212bcb8bf1Sad 			next = owner;
3222bcb8bf1Sad 			continue;
3232bcb8bf1Sad 		}
3242bcb8bf1Sad 
325a67e1e34Sad 		/*
326a67e1e34Sad 		 * Grab the interlock.  Once we have that, we
327a67e1e34Sad 		 * can adjust the waiter bits and sleep queue.
328a67e1e34Sad 		 */
3292bcb8bf1Sad 		interlock = pthread__hashlock(ptr);
3302bcb8bf1Sad 		pthread_mutex_lock(interlock);
331a67e1e34Sad 
332a67e1e34Sad 		/*
333a67e1e34Sad 		 * Mark the rwlock as having waiters.  If the set fails,
334a67e1e34Sad 		 * then we may not need to sleep and should spin again.
335a67e1e34Sad 		 */
336a67e1e34Sad 		next = rw_cas(ptr, owner,
337a67e1e34Sad 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
338a67e1e34Sad 		if (owner != next) {
3392bcb8bf1Sad 			pthread_mutex_unlock(interlock);
340a67e1e34Sad 			continue;
341a67e1e34Sad 		}
342a67e1e34Sad 
343a67e1e34Sad 		/* The waiters bit is set - it's safe to sleep. */
344a67e1e34Sad 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
345a67e1e34Sad 		self->pt_rwlocked = _RW_WANT_WRITE;
346a67e1e34Sad 		self->pt_sleepobj = &ptr->ptr_wblocked;
3472bcb8bf1Sad 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
348bc77394cSad 		    ts, 0);
349c62a74e6Sthorpej 
35006d492d1Sad 		if (self->pt_sleepobj != NULL) {
35106d492d1Sad 			pthread__rwlock_early(self, ptr, interlock);
35206d492d1Sad 		}
35306d492d1Sad 
354a67e1e34Sad 		/* Did we get the lock? */
355a67e1e34Sad 		if (self->pt_rwlocked == _RW_LOCKED) {
356a67e1e34Sad 			membar_enter();
357c62a74e6Sthorpej 			return 0;
358c62a74e6Sthorpej 		}
359a67e1e34Sad 		if (error != 0)
360a67e1e34Sad 			return error;
361a67e1e34Sad 
362a67e1e34Sad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
36334a4ae72Sjoerg 		    "direct handoff failure: %d", errno);
364a67e1e34Sad 	}
365a67e1e34Sad }
366c62a74e6Sthorpej 
367c62a74e6Sthorpej int
pthread_rwlock_trywrlock(pthread_rwlock_t * ptr)368a67e1e34Sad pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
369c62a74e6Sthorpej {
370a67e1e34Sad 	uintptr_t owner, next;
371c62a74e6Sthorpej 	pthread_t self;
372a67e1e34Sad 
37371d484f9Schristos 	if (__predict_false(__uselibcstub))
37471d484f9Schristos 		return __libc_rwlock_trywrlock_stub(ptr);
37571d484f9Schristos 
376260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock",
377260b3a17Skamil 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
378a67e1e34Sad 
379c62a74e6Sthorpej 	self = pthread__self();
380067b84dbSuwe 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
381c62a74e6Sthorpej 
382a67e1e34Sad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
383a67e1e34Sad 		if (owner != 0)
384c62a74e6Sthorpej 			return EBUSY;
385a67e1e34Sad 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
386a67e1e34Sad 		if (owner == next) {
387a67e1e34Sad 			/* Got it! */
388a67e1e34Sad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
389a67e1e34Sad 			membar_enter();
390a67e1e34Sad #endif
391c62a74e6Sthorpej 			return 0;
392c62a74e6Sthorpej 		}
393a67e1e34Sad 	}
394a67e1e34Sad }
395c62a74e6Sthorpej 
396c62a74e6Sthorpej int
pthread_rwlock_rdlock(pthread_rwlock_t * ptr)397a67e1e34Sad pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
398a67e1e34Sad {
39971d484f9Schristos 	if (__predict_false(__uselibcstub))
40071d484f9Schristos 		return __libc_rwlock_rdlock_stub(ptr);
401a67e1e34Sad 
402a67e1e34Sad 	return pthread__rwlock_rdlock(ptr, NULL);
403a67e1e34Sad }
404a67e1e34Sad 
405a67e1e34Sad int
pthread_rwlock_timedrdlock(pthread_rwlock_t * ptr,const struct timespec * abs_timeout)406a67e1e34Sad pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
407c62a74e6Sthorpej 			   const struct timespec *abs_timeout)
408c62a74e6Sthorpej {
4096a562a3aSnathanw 	if (abs_timeout == NULL)
410c62a74e6Sthorpej 		return EINVAL;
4116a562a3aSnathanw 	if ((abs_timeout->tv_nsec >= 1000000000) ||
4126a562a3aSnathanw 	    (abs_timeout->tv_nsec < 0) ||
4136a562a3aSnathanw 	    (abs_timeout->tv_sec < 0))
4146a562a3aSnathanw 		return EINVAL;
415c62a74e6Sthorpej 
416a67e1e34Sad 	return pthread__rwlock_rdlock(ptr, abs_timeout);
417c62a74e6Sthorpej }
418c62a74e6Sthorpej 
419c62a74e6Sthorpej int
pthread_rwlock_wrlock(pthread_rwlock_t * ptr)420a67e1e34Sad pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
421a67e1e34Sad {
42271d484f9Schristos 	if (__predict_false(__uselibcstub))
42371d484f9Schristos 		return __libc_rwlock_wrlock_stub(ptr);
424a67e1e34Sad 
425a67e1e34Sad 	return pthread__rwlock_wrlock(ptr, NULL);
426a67e1e34Sad }
427a67e1e34Sad 
428a67e1e34Sad int
pthread_rwlock_timedwrlock(pthread_rwlock_t * ptr,const struct timespec * abs_timeout)429a67e1e34Sad pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
430c62a74e6Sthorpej 			   const struct timespec *abs_timeout)
431c62a74e6Sthorpej {
4326a562a3aSnathanw 	if (abs_timeout == NULL)
4336a562a3aSnathanw 		return EINVAL;
4346a562a3aSnathanw 	if ((abs_timeout->tv_nsec >= 1000000000) ||
4356a562a3aSnathanw 	    (abs_timeout->tv_nsec < 0) ||
4366a562a3aSnathanw 	    (abs_timeout->tv_sec < 0))
4376a562a3aSnathanw 		return EINVAL;
438c62a74e6Sthorpej 
439a67e1e34Sad 	return pthread__rwlock_wrlock(ptr, abs_timeout);
440c62a74e6Sthorpej }
441c62a74e6Sthorpej 
442c62a74e6Sthorpej 
443c62a74e6Sthorpej int
pthread_rwlock_unlock(pthread_rwlock_t * ptr)444a67e1e34Sad pthread_rwlock_unlock(pthread_rwlock_t *ptr)
445c62a74e6Sthorpej {
446a67e1e34Sad 	uintptr_t owner, decr, new, next;
4472bcb8bf1Sad 	pthread_mutex_t *interlock;
448a67e1e34Sad 	pthread_t self, thread;
449a67e1e34Sad 
45071d484f9Schristos 	if (__predict_false(__uselibcstub))
45171d484f9Schristos 		return __libc_rwlock_unlock_stub(ptr);
45271d484f9Schristos 
453260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock",
454260b3a17Skamil 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
455c62a74e6Sthorpej 
456a67e1e34Sad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
457a67e1e34Sad 	membar_exit();
458a67e1e34Sad #endif
459a67e1e34Sad 
460a67e1e34Sad 	/*
461a67e1e34Sad 	 * Since we used an add operation to set the required lock
462a67e1e34Sad 	 * bits, we can use a subtract to clear them, which makes
463a67e1e34Sad 	 * the read-release and write-release path similar.
464a67e1e34Sad 	 */
465a67e1e34Sad 	owner = (uintptr_t)ptr->ptr_owner;
466a67e1e34Sad 	if ((owner & RW_WRITE_LOCKED) != 0) {
467d9a30823Sad 		self = pthread__self();
468a67e1e34Sad 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
469a67e1e34Sad 		if ((owner & RW_THREAD) != (uintptr_t)self) {
470c62a74e6Sthorpej 			return EPERM;
471c62a74e6Sthorpej 		}
472c8017c0dScl 	} else {
473a67e1e34Sad 		decr = RW_READ_INCR;
474a67e1e34Sad 		if (owner == 0) {
475c8017c0dScl 			return EPERM;
476a67e1e34Sad 		}
477c62a74e6Sthorpej 	}
478c62a74e6Sthorpej 
479a67e1e34Sad 	for (;; owner = next) {
480a67e1e34Sad 		/*
481a67e1e34Sad 		 * Compute what we expect the new value of the lock to be.
482a67e1e34Sad 		 * Only proceed to do direct handoff if there are waiters,
483a67e1e34Sad 		 * and if the lock would become unowned.
484a67e1e34Sad 		 */
485a67e1e34Sad 		new = (owner - decr);
486a67e1e34Sad 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
487a67e1e34Sad 			next = rw_cas(ptr, owner, new);
488a67e1e34Sad 			if (owner == next) {
489a67e1e34Sad 				/* Released! */
490a67e1e34Sad 				return 0;
491a67e1e34Sad 			}
492a67e1e34Sad 			continue;
493a67e1e34Sad 		}
494a67e1e34Sad 
495a67e1e34Sad 		/*
496a67e1e34Sad 		 * Grab the interlock.  Once we have that, we can adjust
497a67e1e34Sad 		 * the waiter bits.  We must check to see if there are
498a67e1e34Sad 		 * still waiters before proceeding.
499a67e1e34Sad 		 */
5002bcb8bf1Sad 		interlock = pthread__hashlock(ptr);
5012bcb8bf1Sad 		pthread_mutex_lock(interlock);
502a67e1e34Sad 		owner = (uintptr_t)ptr->ptr_owner;
503a67e1e34Sad 		if ((owner & RW_HAS_WAITERS) == 0) {
5042bcb8bf1Sad 			pthread_mutex_unlock(interlock);
505a67e1e34Sad 			next = owner;
506a67e1e34Sad 			continue;
507a67e1e34Sad 		}
508a67e1e34Sad 
509a67e1e34Sad 		/*
510a67e1e34Sad 		 * Give the lock away.  SUSv3 dictates that we must give
511a67e1e34Sad 		 * preference to writers.
512a67e1e34Sad 		 */
513d9a30823Sad 		self = pthread__self();
514a67e1e34Sad 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
515067b84dbSuwe 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
516a67e1e34Sad 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
517a67e1e34Sad 
518a67e1e34Sad 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
519a67e1e34Sad 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
520a67e1e34Sad 			else if (ptr->ptr_nreaders != 0)
521a67e1e34Sad 				new |= RW_HAS_WAITERS;
522a67e1e34Sad 
523a67e1e34Sad 			/*
524a67e1e34Sad 			 * Set in the new value.  The lock becomes owned
525a67e1e34Sad 			 * by the writer that we are about to wake.
526a67e1e34Sad 			 */
527a67e1e34Sad 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
52863f70f52Sriastradh #ifndef PTHREAD__ATOMIC_IS_MEMBAR
52963f70f52Sriastradh 			membar_exit();
53063f70f52Sriastradh #endif
531a67e1e34Sad 
532a67e1e34Sad 			/* Wake the writer. */
533a67e1e34Sad 			thread->pt_rwlocked = _RW_LOCKED;
5342bcb8bf1Sad 			pthread__unpark(&ptr->ptr_wblocked, self,
5352bcb8bf1Sad 			    interlock);
536a67e1e34Sad 		} else {
537a67e1e34Sad 			new = 0;
538a67e1e34Sad 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
539a67e1e34Sad 				/*
540a67e1e34Sad 				 * May have already been handed the lock,
541a67e1e34Sad 				 * since pthread__unpark_all() can release
542a67e1e34Sad 				 * our interlock before awakening all
543a67e1e34Sad 				 * threads.
544a67e1e34Sad 				 */
545a67e1e34Sad 				if (thread->pt_sleepobj == NULL)
546a67e1e34Sad 					continue;
547a67e1e34Sad 				new += RW_READ_INCR;
54863f70f52Sriastradh 				membar_exit();
549a67e1e34Sad 				thread->pt_rwlocked = _RW_LOCKED;
550a67e1e34Sad 			}
551a67e1e34Sad 
552a67e1e34Sad 			/*
553a67e1e34Sad 			 * Set in the new value.  The lock becomes owned
554a67e1e34Sad 			 * by the readers that we are about to wake.
555a67e1e34Sad 			 */
556a67e1e34Sad 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
557a67e1e34Sad 
558a67e1e34Sad 			/* Wake up all sleeping readers. */
559a67e1e34Sad 			ptr->ptr_nreaders = 0;
5602bcb8bf1Sad 			pthread__unpark_all(&ptr->ptr_rblocked, self,
5612bcb8bf1Sad 			    interlock);
562a67e1e34Sad 		}
5632bcb8bf1Sad 		pthread_mutex_unlock(interlock);
564774b4b22Scl 
565c62a74e6Sthorpej 		return 0;
566c62a74e6Sthorpej 	}
567a67e1e34Sad }
568c62a74e6Sthorpej 
569a67e1e34Sad /*
570a67e1e34Sad  * Called when a timedlock awakens early to adjust the waiter bits.
571a67e1e34Sad  * The rwlock's interlock is held on entry, and the caller has been
572a67e1e34Sad  * removed from the waiters lists.
573a67e1e34Sad  */
574a67e1e34Sad static void
pthread__rwlock_early(pthread_t self,pthread_rwlock_t * ptr,pthread_mutex_t * interlock)57506d492d1Sad pthread__rwlock_early(pthread_t self, pthread_rwlock_t *ptr,
57606d492d1Sad     pthread_mutex_t *interlock)
577a67e1e34Sad {
57806d492d1Sad 	uintptr_t owner, set, newval, next;
57906d492d1Sad 	pthread_queue_t *queue;
580a67e1e34Sad 
58106d492d1Sad 	pthread_mutex_lock(interlock);
58206d492d1Sad 	if ((queue = self->pt_sleepobj) == NULL) {
58306d492d1Sad 		pthread_mutex_unlock(interlock);
58406d492d1Sad 		return;
585a67e1e34Sad 	}
58606d492d1Sad 	PTQ_REMOVE(queue, self, pt_sleep);
58706d492d1Sad 	self->pt_sleepobj = NULL;
588a67e1e34Sad 	owner = (uintptr_t)ptr->ptr_owner;
589a67e1e34Sad 
590a67e1e34Sad 	if ((owner & RW_THREAD) == 0) {
591a67e1e34Sad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
592a67e1e34Sad 		    "lock not held");
593a67e1e34Sad 	}
594a67e1e34Sad 
595a67e1e34Sad 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
596a67e1e34Sad 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
597a67e1e34Sad 	else if (ptr->ptr_nreaders != 0)
598a67e1e34Sad 		set = RW_HAS_WAITERS;
599a67e1e34Sad 	else
600a67e1e34Sad 		set = 0;
601a67e1e34Sad 
602a67e1e34Sad 	for (;; owner = next) {
60306d492d1Sad 		newval = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
60406d492d1Sad 		next = rw_cas(ptr, owner, newval);
605a67e1e34Sad 		if (owner == next)
606a67e1e34Sad 			break;
607a67e1e34Sad 	}
60806d492d1Sad 	pthread_mutex_unlock(interlock);
609a67e1e34Sad }
610a67e1e34Sad 
611a67e1e34Sad int
_pthread_rwlock_held_np(pthread_rwlock_t * ptr)612a67e1e34Sad _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
613a67e1e34Sad {
614a67e1e34Sad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
615a67e1e34Sad 
61661f666baSad 	if ((owner & RW_WRITE_LOCKED) != 0)
61761f666baSad 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
618a67e1e34Sad 	return (owner & RW_THREAD) != 0;
619a67e1e34Sad }
620a67e1e34Sad 
621a67e1e34Sad int
_pthread_rwlock_rdheld_np(pthread_rwlock_t * ptr)622a67e1e34Sad _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
623a67e1e34Sad {
624a67e1e34Sad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
625a67e1e34Sad 
626a67e1e34Sad 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
627a67e1e34Sad }
628a67e1e34Sad 
629a67e1e34Sad int
_pthread_rwlock_wrheld_np(pthread_rwlock_t * ptr)630a67e1e34Sad _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
631a67e1e34Sad {
632a67e1e34Sad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
633a67e1e34Sad 
634a67e1e34Sad 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
635a67e1e34Sad 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
636a67e1e34Sad }
637c62a74e6Sthorpej 
6387cf7644fSchristos #ifdef _PTHREAD_PSHARED
6397cf7644fSchristos int
pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,int * __restrict pshared)6407cf7644fSchristos pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
6417cf7644fSchristos     int * __restrict pshared)
6427cf7644fSchristos {
643260b3a17Skamil 
644260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock attribute",
645260b3a17Skamil 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
646260b3a17Skamil 
6477cf7644fSchristos 	*pshared = PTHREAD_PROCESS_PRIVATE;
6487cf7644fSchristos 	return 0;
6497cf7644fSchristos }
6507cf7644fSchristos 
6517cf7644fSchristos int
pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr,int pshared)6527cf7644fSchristos pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
6537cf7644fSchristos {
6547cf7644fSchristos 
655260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock attribute",
656260b3a17Skamil 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
657260b3a17Skamil 
6587cf7644fSchristos 	switch(pshared) {
6597cf7644fSchristos 	case PTHREAD_PROCESS_PRIVATE:
6607cf7644fSchristos 		return 0;
6617cf7644fSchristos 	case PTHREAD_PROCESS_SHARED:
6627cf7644fSchristos 		return ENOSYS;
6637cf7644fSchristos 	}
6647cf7644fSchristos 	return EINVAL;
6657cf7644fSchristos }
6667cf7644fSchristos #endif
6677cf7644fSchristos 
668c62a74e6Sthorpej int
pthread_rwlockattr_init(pthread_rwlockattr_t * attr)669c62a74e6Sthorpej pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
670c62a74e6Sthorpej {
671a67e1e34Sad 
672c62a74e6Sthorpej 	if (attr == NULL)
673c62a74e6Sthorpej 		return EINVAL;
674c62a74e6Sthorpej 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
675c62a74e6Sthorpej 
676c62a74e6Sthorpej 	return 0;
677c62a74e6Sthorpej }
678c62a74e6Sthorpej 
679c62a74e6Sthorpej 
680c62a74e6Sthorpej int
pthread_rwlockattr_destroy(pthread_rwlockattr_t * attr)681c62a74e6Sthorpej pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
682c62a74e6Sthorpej {
683a67e1e34Sad 
684260b3a17Skamil 	pthread__error(EINVAL, "Invalid rwlock attribute",
685260b3a17Skamil 	    attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
686260b3a17Skamil 
687c62a74e6Sthorpej 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
688c62a74e6Sthorpej 
689c62a74e6Sthorpej 	return 0;
690c62a74e6Sthorpej }
691