xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: pthread_rwlock.c,v 1.32 2008/10/25 14:14:11 yamt Exp $ */
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.32 2008/10/25 14:14:11 yamt Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
37 
38 #include <errno.h>
39 #include <stddef.h>
40 
41 #include "pthread.h"
42 #include "pthread_int.h"
43 
44 #define	_RW_LOCKED		0
45 #define	_RW_WANT_WRITE		1
46 #define	_RW_WANT_READ		2
47 
48 #if __GNUC_PREREQ__(3, 0)
49 #define	NOINLINE		__attribute ((noinline))
50 #else
51 #define	NOINLINE		/* nothing */
52 #endif
53 
54 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
55 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
56 static void pthread__rwlock_early(void *);
57 
58 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
59 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
60 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
61 
62 #ifndef lint
63 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
64 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
65 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
66 #endif
67 
68 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
69 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
70 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
71 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
72 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
73 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
74 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
75 
76 static inline uintptr_t
77 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
78 {
79 
80 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
81 	    (void *)n);
82 }
83 
84 int
85 pthread_rwlock_init(pthread_rwlock_t *ptr,
86 	    const pthread_rwlockattr_t *attr)
87 {
88 
89 	if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
90 		return EINVAL;
91 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
92 	PTQ_INIT(&ptr->ptr_rblocked);
93 	PTQ_INIT(&ptr->ptr_wblocked);
94 	ptr->ptr_nreaders = 0;
95 	ptr->ptr_owner = NULL;
96 
97 	return 0;
98 }
99 
100 
101 int
102 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
103 {
104 
105 	if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
106 	    (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
107 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
108 	    (ptr->ptr_nreaders != 0) ||
109 	    (ptr->ptr_owner != NULL))
110 		return EINVAL;
111 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
112 
113 	return 0;
114 }
115 
116 /* We want function call overhead. */
117 NOINLINE static void
118 pthread__rwlock_pause(void)
119 {
120 
121 	pthread__smt_pause();
122 }
123 
124 NOINLINE static int
125 pthread__rwlock_spin(uintptr_t owner)
126 {
127 	pthread_t thread;
128 	unsigned int i;
129 
130 	thread = (pthread_t)(owner & RW_THREAD);
131 	if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
132 		return 0;
133 	if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
134 	    thread->pt_blocking)
135 		return 0;
136 	for (i = 128; i != 0; i--)
137 		pthread__rwlock_pause();
138 	return 1;
139 }
140 
141 static int
142 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
143 {
144 	uintptr_t owner, next;
145 	pthread_mutex_t *interlock;
146 	pthread_t self;
147 	int error;
148 
149 #ifdef ERRORCHECK
150 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
151 		return EINVAL;
152 #endif
153 
154 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
155 		/*
156 		 * Read the lock owner field.  If the need-to-wait
157 		 * indicator is clear, then try to acquire the lock.
158 		 */
159 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
160 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
161 			if (owner == next) {
162 				/* Got it! */
163 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
164 				membar_enter();
165 #endif
166 				return 0;
167 			}
168 
169 			/*
170 			 * Didn't get it -- spin around again (we'll
171 			 * probably sleep on the next iteration).
172 			 */
173 			continue;
174 		}
175 
176 		self = pthread__self();
177 		if ((owner & RW_THREAD) == (uintptr_t)self)
178 			return EDEADLK;
179 
180 		/* If held write locked and no waiters, spin. */
181 		if (pthread__rwlock_spin(owner)) {
182 			while (pthread__rwlock_spin(owner)) {
183 				owner = (uintptr_t)ptr->ptr_owner;
184 			}
185 			next = owner;
186 			continue;
187 		}
188 
189 		/*
190 		 * Grab the interlock.  Once we have that, we
191 		 * can adjust the waiter bits and sleep queue.
192 		 */
193 		interlock = pthread__hashlock(ptr);
194 		pthread_mutex_lock(interlock);
195 
196 		/*
197 		 * Mark the rwlock as having waiters.  If the set fails,
198 		 * then we may not need to sleep and should spin again.
199 		 */
200 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
201 		if (owner != next) {
202 			pthread_mutex_unlock(interlock);
203 			continue;
204 		}
205 
206 		/* The waiters bit is set - it's safe to sleep. */
207 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
208 	    	ptr->ptr_nreaders++;
209 		self->pt_rwlocked = _RW_WANT_READ;
210 		self->pt_sleepobj = &ptr->ptr_rblocked;
211 		self->pt_early = pthread__rwlock_early;
212 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
213 		    ts, 0, &ptr->ptr_rblocked);
214 
215 		/* Did we get the lock? */
216 		if (self->pt_rwlocked == _RW_LOCKED) {
217 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
218 			membar_enter();
219 #endif
220 			return 0;
221 		}
222 		if (error != 0)
223 			return error;
224 
225 		pthread__errorfunc(__FILE__, __LINE__, __func__,
226 		    "direct handoff failure");
227 	}
228 }
229 
230 
231 int
232 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
233 {
234 	uintptr_t owner, next;
235 
236 #ifdef ERRORCHECK
237 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
238 		return EINVAL;
239 #endif
240 
241 	/*
242 	 * Don't get a readlock if there is a writer or if there are waiting
243 	 * writers; i.e. prefer writers to readers. This strategy is dictated
244 	 * by SUSv3.
245 	 */
246 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
247 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
248 			return EBUSY;
249 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
250 		if (owner == next) {
251 			/* Got it! */
252 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
253 			membar_enter();
254 #endif
255 			return 0;
256 		}
257 	}
258 }
259 
260 static int
261 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
262 {
263 	uintptr_t owner, next;
264 	pthread_mutex_t *interlock;
265 	pthread_t self;
266 	int error;
267 
268 	self = pthread__self();
269 
270 #ifdef ERRORCHECK
271 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
272 		return EINVAL;
273 #endif
274 
275 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
276 		/*
277 		 * Read the lock owner field.  If the need-to-wait
278 		 * indicator is clear, then try to acquire the lock.
279 		 */
280 		if ((owner & RW_THREAD) == 0) {
281 			next = rw_cas(ptr, owner,
282 			    (uintptr_t)self | RW_WRITE_LOCKED);
283 			if (owner == next) {
284 				/* Got it! */
285 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
286 				membar_enter();
287 #endif
288 				return 0;
289 			}
290 
291 			/*
292 			 * Didn't get it -- spin around again (we'll
293 			 * probably sleep on the next iteration).
294 			 */
295 			continue;
296 		}
297 
298 		if ((owner & RW_THREAD) == (uintptr_t)self)
299 			return EDEADLK;
300 
301 		/* If held write locked and no waiters, spin. */
302 		if (pthread__rwlock_spin(owner)) {
303 			while (pthread__rwlock_spin(owner)) {
304 				owner = (uintptr_t)ptr->ptr_owner;
305 			}
306 			next = owner;
307 			continue;
308 		}
309 
310 		/*
311 		 * Grab the interlock.  Once we have that, we
312 		 * can adjust the waiter bits and sleep queue.
313 		 */
314 		interlock = pthread__hashlock(ptr);
315 		pthread_mutex_lock(interlock);
316 
317 		/*
318 		 * Mark the rwlock as having waiters.  If the set fails,
319 		 * then we may not need to sleep and should spin again.
320 		 */
321 		next = rw_cas(ptr, owner,
322 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
323 		if (owner != next) {
324 			pthread_mutex_unlock(interlock);
325 			continue;
326 		}
327 
328 		/* The waiters bit is set - it's safe to sleep. */
329 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
330 		self->pt_rwlocked = _RW_WANT_WRITE;
331 		self->pt_sleepobj = &ptr->ptr_wblocked;
332 		self->pt_early = pthread__rwlock_early;
333 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
334 		    ts, 0, &ptr->ptr_wblocked);
335 
336 		/* Did we get the lock? */
337 		if (self->pt_rwlocked == _RW_LOCKED) {
338 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
339 			membar_enter();
340 #endif
341 			return 0;
342 		}
343 		if (error != 0)
344 			return error;
345 
346 		pthread__errorfunc(__FILE__, __LINE__, __func__,
347 		    "direct handoff failure");
348 	}
349 }
350 
351 
352 int
353 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
354 {
355 	uintptr_t owner, next;
356 	pthread_t self;
357 
358 #ifdef ERRORCHECK
359 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
360 		return EINVAL;
361 #endif
362 
363 	self = pthread__self();
364 
365 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
366 		if (owner != 0)
367 			return EBUSY;
368 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
369 		if (owner == next) {
370 			/* Got it! */
371 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
372 			membar_enter();
373 #endif
374 			return 0;
375 		}
376 	}
377 }
378 
379 int
380 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
381 {
382 
383 	return pthread__rwlock_rdlock(ptr, NULL);
384 }
385 
386 int
387 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
388 			   const struct timespec *abs_timeout)
389 {
390 
391 	if (abs_timeout == NULL)
392 		return EINVAL;
393 	if ((abs_timeout->tv_nsec >= 1000000000) ||
394 	    (abs_timeout->tv_nsec < 0) ||
395 	    (abs_timeout->tv_sec < 0))
396 		return EINVAL;
397 
398 	return pthread__rwlock_rdlock(ptr, abs_timeout);
399 }
400 
401 int
402 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
403 {
404 
405 	return pthread__rwlock_wrlock(ptr, NULL);
406 }
407 
408 int
409 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
410 			   const struct timespec *abs_timeout)
411 {
412 
413 	if (abs_timeout == NULL)
414 		return EINVAL;
415 	if ((abs_timeout->tv_nsec >= 1000000000) ||
416 	    (abs_timeout->tv_nsec < 0) ||
417 	    (abs_timeout->tv_sec < 0))
418 		return EINVAL;
419 
420 	return pthread__rwlock_wrlock(ptr, abs_timeout);
421 }
422 
423 
424 int
425 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
426 {
427 	uintptr_t owner, decr, new, next;
428 	pthread_mutex_t *interlock;
429 	pthread_t self, thread;
430 
431 #ifdef ERRORCHECK
432 	if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
433 		return EINVAL;
434 #endif
435 
436 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
437 	membar_exit();
438 #endif
439 
440 	/*
441 	 * Since we used an add operation to set the required lock
442 	 * bits, we can use a subtract to clear them, which makes
443 	 * the read-release and write-release path similar.
444 	 */
445 	owner = (uintptr_t)ptr->ptr_owner;
446 	if ((owner & RW_WRITE_LOCKED) != 0) {
447 		self = pthread__self();
448 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
449 		if ((owner & RW_THREAD) != (uintptr_t)self) {
450 			return EPERM;
451 		}
452 	} else {
453 		decr = RW_READ_INCR;
454 		if (owner == 0) {
455 			return EPERM;
456 		}
457 	}
458 
459 	for (;; owner = next) {
460 		/*
461 		 * Compute what we expect the new value of the lock to be.
462 		 * Only proceed to do direct handoff if there are waiters,
463 		 * and if the lock would become unowned.
464 		 */
465 		new = (owner - decr);
466 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
467 			next = rw_cas(ptr, owner, new);
468 			if (owner == next) {
469 				/* Released! */
470 				return 0;
471 			}
472 			continue;
473 		}
474 
475 		/*
476 		 * Grab the interlock.  Once we have that, we can adjust
477 		 * the waiter bits.  We must check to see if there are
478 		 * still waiters before proceeding.
479 		 */
480 		interlock = pthread__hashlock(ptr);
481 		pthread_mutex_lock(interlock);
482 		owner = (uintptr_t)ptr->ptr_owner;
483 		if ((owner & RW_HAS_WAITERS) == 0) {
484 			pthread_mutex_unlock(interlock);
485 			next = owner;
486 			continue;
487 		}
488 
489 		/*
490 		 * Give the lock away.  SUSv3 dictates that we must give
491 		 * preference to writers.
492 		 */
493 		self = pthread__self();
494 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
495 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
496 
497 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
498 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
499 			else if (ptr->ptr_nreaders != 0)
500 				new |= RW_HAS_WAITERS;
501 
502 			/*
503 			 * Set in the new value.  The lock becomes owned
504 			 * by the writer that we are about to wake.
505 			 */
506 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
507 
508 			/* Wake the writer. */
509 			thread->pt_rwlocked = _RW_LOCKED;
510 			pthread__unpark(&ptr->ptr_wblocked, self,
511 			    interlock);
512 		} else {
513 			new = 0;
514 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
515 				/*
516 				 * May have already been handed the lock,
517 				 * since pthread__unpark_all() can release
518 				 * our interlock before awakening all
519 				 * threads.
520 				 */
521 				if (thread->pt_sleepobj == NULL)
522 					continue;
523 				new += RW_READ_INCR;
524 				thread->pt_rwlocked = _RW_LOCKED;
525 			}
526 
527 			/*
528 			 * Set in the new value.  The lock becomes owned
529 			 * by the readers that we are about to wake.
530 			 */
531 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
532 
533 			/* Wake up all sleeping readers. */
534 			ptr->ptr_nreaders = 0;
535 			pthread__unpark_all(&ptr->ptr_rblocked, self,
536 			    interlock);
537 		}
538 		pthread_mutex_unlock(interlock);
539 
540 		return 0;
541 	}
542 }
543 
544 /*
545  * Called when a timedlock awakens early to adjust the waiter bits.
546  * The rwlock's interlock is held on entry, and the caller has been
547  * removed from the waiters lists.
548  */
549 static void
550 pthread__rwlock_early(void *obj)
551 {
552 	uintptr_t owner, set, new, next;
553 	pthread_rwlock_t *ptr;
554 	pthread_t self;
555 	u_int off;
556 
557 	self = pthread__self();
558 
559 	switch (self->pt_rwlocked) {
560 	case _RW_WANT_READ:
561 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
562 		break;
563 	case _RW_WANT_WRITE:
564 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
565 		break;
566 	default:
567 		pthread__errorfunc(__FILE__, __LINE__, __func__,
568 		    "bad value of pt_rwlocked");
569 		off = 0;
570 		/* NOTREACHED */
571 		break;
572 	}
573 
574 	/* LINTED mind your own business */
575 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
576 	owner = (uintptr_t)ptr->ptr_owner;
577 
578 	if ((owner & RW_THREAD) == 0) {
579 		pthread__errorfunc(__FILE__, __LINE__, __func__,
580 		    "lock not held");
581 	}
582 
583 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
584 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
585 	else if (ptr->ptr_nreaders != 0)
586 		set = RW_HAS_WAITERS;
587 	else
588 		set = 0;
589 
590 	for (;; owner = next) {
591 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
592 		next = rw_cas(ptr, owner, new);
593 		if (owner == next)
594 			break;
595 	}
596 }
597 
598 int
599 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
600 {
601 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
602 
603 	if ((owner & RW_WRITE_LOCKED) != 0)
604 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
605 	return (owner & RW_THREAD) != 0;
606 }
607 
608 int
609 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
610 {
611 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
612 
613 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
614 }
615 
616 int
617 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
618 {
619 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
620 
621 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
622 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
623 }
624 
625 int
626 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
627 {
628 
629 	if (attr == NULL)
630 		return EINVAL;
631 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
632 
633 	return 0;
634 }
635 
636 
637 int
638 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
639 {
640 
641 	if ((attr == NULL) ||
642 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
643 		return EINVAL;
644 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
645 
646 	return 0;
647 }
648