xref: /openbsd-src/lib/librthread/rthread_rwlock.c (revision b6c30fbbf428daf85b015d43c7bca87414fded9a)
1*b6c30fbbSvisa /*	$OpenBSD: rthread_rwlock.c,v 1.13 2019/03/03 18:39:10 visa Exp $ */
271f897f1Sguenther /*
35ecdd056Smpi  * Copyright (c) 2019 Martin Pieuchot <mpi@openbsd.org>
42aa8ea07Sguenther  * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
571f897f1Sguenther  *
671f897f1Sguenther  * Permission to use, copy, modify, and distribute this software for any
771f897f1Sguenther  * purpose with or without fee is hereby granted, provided that the above
871f897f1Sguenther  * copyright notice and this permission notice appear in all copies.
971f897f1Sguenther  *
1071f897f1Sguenther  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1171f897f1Sguenther  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1271f897f1Sguenther  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1371f897f1Sguenther  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1471f897f1Sguenther  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1571f897f1Sguenther  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1671f897f1Sguenther  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1771f897f1Sguenther  */
1871f897f1Sguenther 
1971f897f1Sguenther #include <stdlib.h>
2071f897f1Sguenther #include <unistd.h>
2171f897f1Sguenther #include <errno.h>
2271f897f1Sguenther 
2371f897f1Sguenther #include <pthread.h>
2471f897f1Sguenther 
2571f897f1Sguenther #include "rthread.h"
265ecdd056Smpi #include "synch.h"
275ecdd056Smpi 
285ecdd056Smpi #define UNLOCKED	0
295ecdd056Smpi #define MAXREADER	0x7ffffffe
305ecdd056Smpi #define WRITER		0x7fffffff
315ecdd056Smpi #define WAITING		0x80000000
325ecdd056Smpi #define COUNT(v)	((v) & WRITER)
335ecdd056Smpi 
345ecdd056Smpi #define SPIN_COUNT	128
355ecdd056Smpi #if defined(__i386__) || defined(__amd64__)
365ecdd056Smpi #define SPIN_WAIT()	asm volatile("pause": : : "memory")
375ecdd056Smpi #else
385ecdd056Smpi #define SPIN_WAIT()	do { } while (0)
395ecdd056Smpi #endif
4071f897f1Sguenther 
41f050dd83Sakfaew static _atomic_lock_t rwlock_init_lock = _SPINLOCK_UNLOCKED;
4271f897f1Sguenther 
4371f897f1Sguenther int
pthread_rwlock_init(pthread_rwlock_t * lockp,const pthread_rwlockattr_t * attrp __unused)442aa8ea07Sguenther pthread_rwlock_init(pthread_rwlock_t *lockp,
452aa8ea07Sguenther     const pthread_rwlockattr_t *attrp __unused)
4671f897f1Sguenther {
475ecdd056Smpi 	pthread_rwlock_t rwlock;
4871f897f1Sguenther 
495ecdd056Smpi 	rwlock = calloc(1, sizeof(*rwlock));
505ecdd056Smpi 	if (!rwlock)
5171f897f1Sguenther 		return (errno);
522aa8ea07Sguenther 
535ecdd056Smpi 	*lockp = rwlock;
5471f897f1Sguenther 
5571f897f1Sguenther 	return (0);
5671f897f1Sguenther }
577567a0bfSguenther DEF_STD(pthread_rwlock_init);
5871f897f1Sguenther 
5971f897f1Sguenther int
pthread_rwlock_destroy(pthread_rwlock_t * lockp)6071f897f1Sguenther pthread_rwlock_destroy(pthread_rwlock_t *lockp)
6171f897f1Sguenther {
625ecdd056Smpi 	pthread_rwlock_t rwlock;
632aa8ea07Sguenther 
645ecdd056Smpi 	rwlock = *lockp;
655ecdd056Smpi 	if (rwlock) {
665ecdd056Smpi 		if (rwlock->value != UNLOCKED) {
6771f897f1Sguenther #define MSG "pthread_rwlock_destroy on rwlock with waiters!\n"
6871f897f1Sguenther 			write(2, MSG, sizeof(MSG) - 1);
6971f897f1Sguenther #undef MSG
7071f897f1Sguenther 			return (EBUSY);
7171f897f1Sguenther 		}
725ecdd056Smpi 		free((void *)rwlock);
7371f897f1Sguenther 		*lockp = NULL;
745ecdd056Smpi 	}
7571f897f1Sguenther 
7671f897f1Sguenther 	return (0);
7771f897f1Sguenther }
7871f897f1Sguenther 
7971f897f1Sguenther static int
_rthread_rwlock_ensure_init(pthread_rwlock_t * rwlockp)805ecdd056Smpi _rthread_rwlock_ensure_init(pthread_rwlock_t *rwlockp)
8171f897f1Sguenther {
8271f897f1Sguenther 	int ret = 0;
8371f897f1Sguenther 
8471f897f1Sguenther 	/*
8571f897f1Sguenther 	 * If the rwlock is statically initialized, perform the dynamic
8671f897f1Sguenther 	 * initialization.
8771f897f1Sguenther 	 */
885ecdd056Smpi 	if (*rwlockp == NULL) {
8971f897f1Sguenther 		_spinlock(&rwlock_init_lock);
905ecdd056Smpi 		if (*rwlockp == NULL)
915ecdd056Smpi 			ret = pthread_rwlock_init(rwlockp, NULL);
9271f897f1Sguenther 		_spinunlock(&rwlock_init_lock);
9371f897f1Sguenther 	}
9471f897f1Sguenther 	return (ret);
9571f897f1Sguenther }
9671f897f1Sguenther 
972aa8ea07Sguenther static int
_rthread_rwlock_tryrdlock(pthread_rwlock_t rwlock)985ecdd056Smpi _rthread_rwlock_tryrdlock(pthread_rwlock_t rwlock)
9971f897f1Sguenther {
1005ecdd056Smpi 	unsigned int val;
10171f897f1Sguenther 
1022aa8ea07Sguenther 	do {
1035ecdd056Smpi 		val = rwlock->value;
1045ecdd056Smpi 		if (COUNT(val) == WRITER)
1055ecdd056Smpi 			return (EBUSY);
1065ecdd056Smpi 		if (COUNT(val) == MAXREADER)
1075ecdd056Smpi 			return (EAGAIN);
1085ecdd056Smpi 	} while (atomic_cas_uint(&rwlock->value, val, val + 1) != val);
10971f897f1Sguenther 
1105ecdd056Smpi 	membar_enter_after_atomic();
1112aa8ea07Sguenther 	return (0);
1122aa8ea07Sguenther }
1132aa8ea07Sguenther 
1145ecdd056Smpi static int
_rthread_rwlock_timedrdlock(pthread_rwlock_t * rwlockp,int trywait,const struct timespec * abs,int timed)1155ecdd056Smpi _rthread_rwlock_timedrdlock(pthread_rwlock_t *rwlockp, int trywait,
1165ecdd056Smpi     const struct timespec *abs, int timed)
1175ecdd056Smpi {
1185ecdd056Smpi 	pthread_t self = pthread_self();
1195ecdd056Smpi 	pthread_rwlock_t rwlock;
1205ecdd056Smpi 	unsigned int val, new;
1215ecdd056Smpi 	int i, error;
1225ecdd056Smpi 
1235ecdd056Smpi 	if ((error = _rthread_rwlock_ensure_init(rwlockp)))
1245ecdd056Smpi 		return (error);
1255ecdd056Smpi 
1265ecdd056Smpi 	rwlock = *rwlockp;
1275ecdd056Smpi 	_rthread_debug(5, "%p: rwlock_%srdlock %p (%u)\n", self,
1285ecdd056Smpi 	    (timed ? "timed" : (trywait ? "try" : "")), (void *)rwlock,
1295ecdd056Smpi 	    rwlock->value);
1305ecdd056Smpi 
1315ecdd056Smpi 	error = _rthread_rwlock_tryrdlock(rwlock);
1325ecdd056Smpi 	if (error != EBUSY || trywait)
1335ecdd056Smpi 		return (error);
1345ecdd056Smpi 
1355ecdd056Smpi 	/* Try hard to not enter the kernel. */
1365ecdd056Smpi 	for (i = 0; i < SPIN_COUNT; i++) {
1375ecdd056Smpi 		val = rwlock->value;
1385ecdd056Smpi 		if (val == UNLOCKED || (val & WAITING))
1395ecdd056Smpi 			break;
1405ecdd056Smpi 
1415ecdd056Smpi 		SPIN_WAIT();
1425ecdd056Smpi 	}
1435ecdd056Smpi 
1445ecdd056Smpi 	while ((error = _rthread_rwlock_tryrdlock(rwlock)) == EBUSY) {
1455ecdd056Smpi 		val = rwlock->value;
1465ecdd056Smpi 		if (val == UNLOCKED || (COUNT(val)) != WRITER)
1475ecdd056Smpi 			continue;
1485ecdd056Smpi 		new = val | WAITING;
1495ecdd056Smpi 		if (atomic_cas_uint(&rwlock->value, val, new) == val) {
1505ecdd056Smpi 			error = _twait(&rwlock->value, new, CLOCK_REALTIME,
1515ecdd056Smpi 			    abs);
1525ecdd056Smpi 		}
1535ecdd056Smpi 		if (error == ETIMEDOUT)
1545ecdd056Smpi 			break;
1555ecdd056Smpi 	}
1565ecdd056Smpi 
1575ecdd056Smpi 	return (error);
1585ecdd056Smpi 
1595ecdd056Smpi }
1605ecdd056Smpi 
1615ecdd056Smpi int
pthread_rwlock_tryrdlock(pthread_rwlock_t * rwlockp)1625ecdd056Smpi pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlockp)
1635ecdd056Smpi {
1645ecdd056Smpi 	return (_rthread_rwlock_timedrdlock(rwlockp, 1, NULL, 0));
1655ecdd056Smpi }
1665ecdd056Smpi 
1675ecdd056Smpi int
pthread_rwlock_timedrdlock(pthread_rwlock_t * rwlockp,const struct timespec * abs)1685ecdd056Smpi pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlockp,
1695ecdd056Smpi     const struct timespec *abs)
1705ecdd056Smpi {
1715ecdd056Smpi 	return (_rthread_rwlock_timedrdlock(rwlockp, 0, abs, 1));
1725ecdd056Smpi }
1735ecdd056Smpi 
1745ecdd056Smpi int
pthread_rwlock_rdlock(pthread_rwlock_t * rwlockp)1755ecdd056Smpi pthread_rwlock_rdlock(pthread_rwlock_t *rwlockp)
1765ecdd056Smpi {
1775ecdd056Smpi 	return (_rthread_rwlock_timedrdlock(rwlockp, 0, NULL, 0));
1785ecdd056Smpi }
1795ecdd056Smpi 
1805ecdd056Smpi static int
_rthread_rwlock_tryrwlock(pthread_rwlock_t rwlock)1815ecdd056Smpi _rthread_rwlock_tryrwlock(pthread_rwlock_t rwlock)
1825ecdd056Smpi {
1835ecdd056Smpi 	if (atomic_cas_uint(&rwlock->value, UNLOCKED, WRITER) != UNLOCKED)
1845ecdd056Smpi 		return (EBUSY);
1855ecdd056Smpi 
1865ecdd056Smpi 	membar_enter_after_atomic();
1875ecdd056Smpi 	return (0);
1885ecdd056Smpi }
1895ecdd056Smpi 
1905ecdd056Smpi 
1915ecdd056Smpi static int
_rthread_rwlock_timedwrlock(pthread_rwlock_t * rwlockp,int trywait,const struct timespec * abs,int timed)1925ecdd056Smpi _rthread_rwlock_timedwrlock(pthread_rwlock_t *rwlockp, int trywait,
1935ecdd056Smpi     const struct timespec *abs, int timed)
1945ecdd056Smpi {
1955ecdd056Smpi 	pthread_t self = pthread_self();
1965ecdd056Smpi 	pthread_rwlock_t rwlock;
1975ecdd056Smpi 	unsigned int val, new;
1985ecdd056Smpi 	int i, error;
1995ecdd056Smpi 
2005ecdd056Smpi 	if ((error = _rthread_rwlock_ensure_init(rwlockp)))
2015ecdd056Smpi 		return (error);
2025ecdd056Smpi 
2035ecdd056Smpi 	rwlock = *rwlockp;
2045ecdd056Smpi 	_rthread_debug(5, "%p: rwlock_%swrlock %p (%u)\n", self,
2055ecdd056Smpi 	    (timed ? "timed" : (trywait ? "try" : "")), (void *)rwlock,
2065ecdd056Smpi 	    rwlock->value);
2075ecdd056Smpi 
2085ecdd056Smpi 	error = _rthread_rwlock_tryrwlock(rwlock);
2095ecdd056Smpi 	if (error != EBUSY || trywait)
2105ecdd056Smpi 		return (error);
2115ecdd056Smpi 
2125ecdd056Smpi 	/* Try hard to not enter the kernel. */
2135ecdd056Smpi 	for (i = 0; i < SPIN_COUNT; i++) {
2145ecdd056Smpi 		val = rwlock->value;
2155ecdd056Smpi 		if (val == UNLOCKED || (val & WAITING))
2165ecdd056Smpi 			break;
2175ecdd056Smpi 
2185ecdd056Smpi 		SPIN_WAIT();
2195ecdd056Smpi 	}
2205ecdd056Smpi 
2215ecdd056Smpi 	while ((error = _rthread_rwlock_tryrwlock(rwlock)) == EBUSY) {
2225ecdd056Smpi 		val = rwlock->value;
2235ecdd056Smpi 		if (val == UNLOCKED)
2245ecdd056Smpi 			continue;
2255ecdd056Smpi 		new = val | WAITING;
2265ecdd056Smpi 		if (atomic_cas_uint(&rwlock->value, val, new) == val) {
2275ecdd056Smpi 			error = _twait(&rwlock->value, new, CLOCK_REALTIME,
2285ecdd056Smpi 			    abs);
2295ecdd056Smpi 		}
2305ecdd056Smpi 		if (error == ETIMEDOUT)
2315ecdd056Smpi 			break;
2325ecdd056Smpi 	}
2335ecdd056Smpi 
2345ecdd056Smpi 	return (error);
2355ecdd056Smpi }
2365ecdd056Smpi 
2375ecdd056Smpi int
pthread_rwlock_trywrlock(pthread_rwlock_t * rwlockp)2385ecdd056Smpi pthread_rwlock_trywrlock(pthread_rwlock_t *rwlockp)
2395ecdd056Smpi {
2405ecdd056Smpi 	return (_rthread_rwlock_timedwrlock(rwlockp, 1, NULL, 0));
2415ecdd056Smpi }
2425ecdd056Smpi 
2435ecdd056Smpi int
pthread_rwlock_timedwrlock(pthread_rwlock_t * rwlockp,const struct timespec * abs)2445ecdd056Smpi pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlockp,
2455ecdd056Smpi     const struct timespec *abs)
2465ecdd056Smpi {
2475ecdd056Smpi 	return (_rthread_rwlock_timedwrlock(rwlockp, 0, abs, 1));
2485ecdd056Smpi }
2495ecdd056Smpi 
2505ecdd056Smpi int
pthread_rwlock_wrlock(pthread_rwlock_t * rwlockp)2515ecdd056Smpi pthread_rwlock_wrlock(pthread_rwlock_t *rwlockp)
2525ecdd056Smpi {
2535ecdd056Smpi 	return (_rthread_rwlock_timedwrlock(rwlockp, 0, NULL, 0));
2545ecdd056Smpi }
2555ecdd056Smpi 
2565ecdd056Smpi int
pthread_rwlock_unlock(pthread_rwlock_t * rwlockp)2575ecdd056Smpi pthread_rwlock_unlock(pthread_rwlock_t *rwlockp)
2585ecdd056Smpi {
2595ecdd056Smpi 	pthread_t self = pthread_self();
2605ecdd056Smpi 	pthread_rwlock_t rwlock;
2615ecdd056Smpi 	unsigned int val, new;
2625ecdd056Smpi 
2635ecdd056Smpi 	rwlock = *rwlockp;
2645ecdd056Smpi 	_rthread_debug(5, "%p: rwlock_unlock %p\n", self, (void *)rwlock);
2655ecdd056Smpi 
2665ecdd056Smpi 	membar_exit_before_atomic();
2675ecdd056Smpi 	do {
2685ecdd056Smpi 		val = rwlock->value;
2695ecdd056Smpi 		if (COUNT(val) == WRITER || COUNT(val) == 1)
2705ecdd056Smpi 			new = UNLOCKED;
2715ecdd056Smpi 		else
2725ecdd056Smpi 			new = val - 1;
2735ecdd056Smpi 	} while (atomic_cas_uint(&rwlock->value, val, new) != val);
2745ecdd056Smpi 
2755ecdd056Smpi 	if (new == UNLOCKED && (val & WAITING))
276*b6c30fbbSvisa 		_wake(&rwlock->value, INT_MAX);
27771f897f1Sguenther 
27871f897f1Sguenther 	return (0);
27971f897f1Sguenther }
280