xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision 5b84b3983f71fd20a534cfa5d1556623a8aaa717)
1 /*	$NetBSD: pthread_rwlock.c,v 1.11 2005/01/09 01:57:38 nathanw Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_rwlock.c,v 1.11 2005/01/09 01:57:38 nathanw Exp $");
41 
42 #include <errno.h>
43 
44 #include "pthread.h"
45 #include "pthread_int.h"
46 
47 static void pthread_rwlock__callback(void *);
48 
49 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
50 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
51 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
52 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
53 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
54 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
55 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
56 
57 int
58 pthread_rwlock_init(pthread_rwlock_t *rwlock,
59 	    const pthread_rwlockattr_t *attr)
60 {
61 #ifdef ERRORCHECK
62 	if ((rwlock == NULL) ||
63 	    (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC)))
64 		return EINVAL;
65 #endif
66 	rwlock->ptr_magic = _PT_RWLOCK_MAGIC;
67 	pthread_lockinit(&rwlock->ptr_interlock);
68 	PTQ_INIT(&rwlock->ptr_rblocked);
69 	PTQ_INIT(&rwlock->ptr_wblocked);
70 	rwlock->ptr_nreaders = 0;
71 	rwlock->ptr_writer = NULL;
72 
73 	return 0;
74 }
75 
76 
77 int
78 pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
79 {
80 #ifdef ERRORCHECK
81 	if ((rwlock == NULL) ||
82 	    (rwlock->ptr_magic != _PT_RWLOCK_MAGIC) ||
83 	    (!PTQ_EMPTY(&rwlock->ptr_rblocked)) ||
84 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked)) ||
85 	    (rwlock->ptr_nreaders != 0) ||
86 	    (rwlock->ptr_writer != NULL))
87 		return EINVAL;
88 #endif
89 	rwlock->ptr_magic = _PT_RWLOCK_DEAD;
90 
91 	return 0;
92 }
93 
94 
95 int
96 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
97 {
98 	pthread_t self;
99 #ifdef ERRORCHECK
100 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
101 		return EINVAL;
102 #endif
103 	self = pthread__self();
104 
105 	pthread_spinlock(self, &rwlock->ptr_interlock);
106 #ifdef ERRORCHECK
107 	if (rwlock->ptr_writer == self) {
108 		pthread_spinunlock(self, &rwlock->ptr_interlock);
109 		return EDEADLK;
110 	}
111 #endif
112 	/*
113 	 * Don't get a readlock if there is a writer or if there are waiting
114 	 * writers; i.e. prefer writers to readers. This strategy is dictated
115 	 * by SUSv3.
116 	 */
117 	while ((rwlock->ptr_writer != NULL) ||
118 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
119 		PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
120 		/* Locking a rwlock is not a cancellation point; don't check */
121 		pthread_spinlock(self, &self->pt_statelock);
122 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
123 		self->pt_sleepobj = rwlock;
124 		self->pt_sleepq = &rwlock->ptr_rblocked;
125 		self->pt_sleeplock = &rwlock->ptr_interlock;
126 		pthread_spinunlock(self, &self->pt_statelock);
127 		pthread__block(self, &rwlock->ptr_interlock);
128 		/* interlock is not held when we return */
129 		pthread_spinlock(self, &rwlock->ptr_interlock);
130 	}
131 
132 	rwlock->ptr_nreaders++;
133 	pthread_spinunlock(self, &rwlock->ptr_interlock);
134 
135 	return 0;
136 }
137 
138 
139 int
140 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
141 {
142 	pthread_t self;
143 #ifdef ERRORCHECK
144 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
145 		return EINVAL;
146 #endif
147 	self = pthread__self();
148 
149 	pthread_spinlock(self, &rwlock->ptr_interlock);
150 	/*
151 	 * Don't get a readlock if there is a writer or if there are waiting
152 	 * writers; i.e. prefer writers to readers. This strategy is dictated
153 	 * by SUSv3.
154 	 */
155 	if ((rwlock->ptr_writer != NULL) ||
156 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
157 		pthread_spinunlock(self, &rwlock->ptr_interlock);
158 		return EBUSY;
159 	}
160 
161 	rwlock->ptr_nreaders++;
162 	pthread_spinunlock(self, &rwlock->ptr_interlock);
163 
164 	return 0;
165 }
166 
167 
168 int
169 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
170 {
171 	pthread_t self;
172 #ifdef ERRORCHECK
173 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
174 		return EINVAL;
175 #endif
176 	self = pthread__self();
177 
178 	pthread_spinlock(self, &rwlock->ptr_interlock);
179 #ifdef ERRORCHECK
180 	if (rwlock->ptr_writer == self) {
181 		pthread_spinunlock(self, &rwlock->ptr_interlock);
182 		return EDEADLK;
183 	}
184 #endif
185 	/*
186 	 * Prefer writers to readers here; permit writers even if there are
187 	 * waiting readers.
188 	 */
189 	while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
190 		PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
191 		/* Locking a rwlock is not a cancellation point; don't check */
192 		pthread_spinlock(self, &self->pt_statelock);
193 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
194 		self->pt_sleepobj = rwlock;
195 		self->pt_sleepq = &rwlock->ptr_wblocked;
196 		self->pt_sleeplock = &rwlock->ptr_interlock;
197 		pthread_spinunlock(self, &self->pt_statelock);
198 		pthread__block(self, &rwlock->ptr_interlock);
199 		/* interlock is not held when we return */
200 		pthread_spinlock(self, &rwlock->ptr_interlock);
201 	}
202 
203 	rwlock->ptr_writer = self;
204 	pthread_spinunlock(self, &rwlock->ptr_interlock);
205 
206 	return 0;
207 }
208 
209 
210 int
211 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
212 {
213 	pthread_t self;
214 #ifdef ERRORCHECK
215 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
216 		return EINVAL;
217 #endif
218 	self = pthread__self();
219 
220 	pthread_spinlock(self, &rwlock->ptr_interlock);
221 	/*
222 	 * Prefer writers to readers here; permit writers even if there are
223 	 * waiting readers.
224 	 */
225 	if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
226 		pthread_spinunlock(self, &rwlock->ptr_interlock);
227 		return EBUSY;
228 	}
229 
230 	rwlock->ptr_writer = self;
231 	pthread_spinunlock(self, &rwlock->ptr_interlock);
232 
233 	return 0;
234 }
235 
236 
237 struct pthread_rwlock__waitarg {
238 	pthread_t ptw_thread;
239 	pthread_rwlock_t *ptw_rwlock;
240 	struct pthread_queue_t *ptw_queue;
241 };
242 
243 int
244 pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
245 	    const struct timespec *abs_timeout)
246 {
247 	pthread_t self;
248 	struct pthread_rwlock__waitarg wait;
249 	struct pt_alarm_t alarm;
250 	int retval;
251 #ifdef ERRORCHECK
252 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
253 		return EINVAL;
254 	if (abs_timeout == NULL)
255 		return EINVAL;
256 #endif
257 	if ((abs_timeout->tv_nsec >= 1000000000) ||
258 	    (abs_timeout->tv_nsec < 0) ||
259 	    (abs_timeout->tv_sec < 0))
260 		return EINVAL;
261 	self = pthread__self();
262 
263 	pthread_spinlock(self, &rwlock->ptr_interlock);
264 #ifdef ERRORCHECK
265 	if (rwlock->ptr_writer == self) {
266 		pthread_spinunlock(self, &rwlock->ptr_interlock);
267 		return EDEADLK;
268 	}
269 #endif
270 	/*
271 	 * Don't get a readlock if there is a writer or if there are waiting
272 	 * writers; i.e. prefer writers to readers. This strategy is dictated
273 	 * by SUSv3.
274 	 */
275 	retval = 0;
276 	while ((retval == 0) && ((rwlock->ptr_writer != NULL) ||
277 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked)))) {
278 		wait.ptw_thread = self;
279 		wait.ptw_rwlock = rwlock;
280 		wait.ptw_queue = &rwlock->ptr_rblocked;
281 		pthread__alarm_add(self, &alarm, abs_timeout,
282 		    pthread_rwlock__callback, &wait);
283 		PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
284 		/* Locking a rwlock is not a cancellation point; don't check */
285 		pthread_spinlock(self, &self->pt_statelock);
286 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
287 		self->pt_sleepobj = rwlock;
288 		self->pt_sleepq = &rwlock->ptr_rblocked;
289 		self->pt_sleeplock = &rwlock->ptr_interlock;
290 		pthread_spinunlock(self, &self->pt_statelock);
291 		pthread__block(self, &rwlock->ptr_interlock);
292 		/* interlock is not held when we return */
293 		pthread__alarm_del(self, &alarm);
294 		if (pthread__alarm_fired(&alarm))
295 			retval = ETIMEDOUT;
296 		pthread_spinlock(self, &rwlock->ptr_interlock);
297 	}
298 
299 	/* One last chance to get the lock, in case it was released between
300 	   the alarm firing and when this thread got rescheduled, or in case
301 	   a signal handler kept it busy */
302 	if ((rwlock->ptr_writer == NULL) &&
303 	    (PTQ_EMPTY(&rwlock->ptr_wblocked))) {
304 		rwlock->ptr_nreaders++;
305 		retval = 0;
306 	}
307 	pthread_spinunlock(self, &rwlock->ptr_interlock);
308 
309 	return retval;
310 }
311 
312 
313 int
314 pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
315 	    const struct timespec *abs_timeout)
316 {
317 	struct pthread_rwlock__waitarg wait;
318 	struct pt_alarm_t alarm;
319 	int retval;
320 	pthread_t self;
321 #ifdef ERRORCHECK
322 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
323 		return EINVAL;
324 	if (abs_timeout == NULL)
325 		return EINVAL;
326 #endif
327 	if ((abs_timeout->tv_nsec >= 1000000000) ||
328 	    (abs_timeout->tv_nsec < 0) ||
329 	    (abs_timeout->tv_sec < 0))
330 		return EINVAL;
331 	self = pthread__self();
332 
333 	pthread_spinlock(self, &rwlock->ptr_interlock);
334 #ifdef ERRORCHECK
335 	if (rwlock->ptr_writer == self) {
336 		pthread_spinunlock(self, &rwlock->ptr_interlock);
337 		return EDEADLK;
338 	}
339 #endif
340 	/*
341 	 * Prefer writers to readers here; permit writers even if there are
342 	 * waiting readers.
343 	 */
344 	retval = 0;
345 	while (retval == 0 &&
346 	    ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) {
347 		wait.ptw_thread = self;
348 		wait.ptw_rwlock = rwlock;
349 		wait.ptw_queue = &rwlock->ptr_wblocked;
350 		pthread__alarm_add(self, &alarm, abs_timeout,
351 		    pthread_rwlock__callback, &wait);
352 		PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
353 		/* Locking a rwlock is not a cancellation point; don't check */
354 		pthread_spinlock(self, &self->pt_statelock);
355 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
356 		self->pt_sleepobj = rwlock;
357 		self->pt_sleepq = &rwlock->ptr_wblocked;
358 		self->pt_sleeplock = &rwlock->ptr_interlock;
359 		pthread_spinunlock(self, &self->pt_statelock);
360 		pthread__block(self, &rwlock->ptr_interlock);
361 		/* interlock is not held when we return */
362 		pthread__alarm_del(self, &alarm);
363 		if (pthread__alarm_fired(&alarm))
364 			retval = ETIMEDOUT;
365 		pthread_spinlock(self, &rwlock->ptr_interlock);
366 	}
367 
368 	if ((rwlock->ptr_nreaders == 0) && (rwlock->ptr_writer == NULL)) {
369 		rwlock->ptr_writer = self;
370 		retval = 0;
371 	}
372 	pthread_spinunlock(self, &rwlock->ptr_interlock);
373 
374 	return retval;
375 }
376 
377 
378 static void
379 pthread_rwlock__callback(void *arg)
380 {
381 	struct pthread_rwlock__waitarg *a;
382 	pthread_t self;
383 
384 	a = arg;
385 	self = pthread__self();
386 
387 	pthread_spinlock(self, &a->ptw_rwlock->ptr_interlock);
388 	/*
389 	 * Don't dequeue and schedule the thread if it's already been
390 	 * queued up by a signal or broadcast (but hasn't yet run as far
391 	 * as pthread__alarm_del(), or we wouldn't be here, and hence can't
392 	 * have become blocked on some *other* queue).
393 	 */
394 	if (a->ptw_thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
395 		PTQ_REMOVE(a->ptw_queue, a->ptw_thread, pt_sleep);
396 		pthread__sched(self, a->ptw_thread);
397 	}
398 	pthread_spinunlock(self, &a->ptw_rwlock->ptr_interlock);
399 
400 }
401 
402 
403 int
404 pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
405 {
406 	pthread_t self, writer;
407 	struct pthread_queue_t blockedq;
408 #ifdef ERRORCHECK
409 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
410 		return EINVAL;
411 #endif
412 	writer = NULL;
413 	PTQ_INIT(&blockedq);
414 	self = pthread__self();
415 
416 	pthread_spinlock(self, &rwlock->ptr_interlock);
417 	if (rwlock->ptr_writer != NULL) {
418 		/* Releasing a write lock. */
419 #ifdef ERRORCHECK
420 		if (rwlock->ptr_writer != self) {
421 			pthread_spinunlock(self, &rwlock->ptr_interlock);
422 			return EPERM;
423 		}
424 #endif
425 		rwlock->ptr_writer = NULL;
426 		writer = PTQ_FIRST(&rwlock->ptr_wblocked);
427 		if (writer != NULL) {
428 			PTQ_REMOVE(&rwlock->ptr_wblocked, writer, pt_sleep);
429 		} else {
430 			blockedq = rwlock->ptr_rblocked;
431 			PTQ_INIT(&rwlock->ptr_rblocked);
432 		}
433 	} else
434 #ifdef ERRORCHECK
435 	if (rwlock->ptr_nreaders > 0)
436 #endif
437 	{
438 		/* Releasing a read lock. */
439 		rwlock->ptr_nreaders--;
440 		if (rwlock->ptr_nreaders == 0) {
441 			writer = PTQ_FIRST(&rwlock->ptr_wblocked);
442 			if (writer != NULL)
443 				PTQ_REMOVE(&rwlock->ptr_wblocked, writer,
444 				    pt_sleep);
445 		}
446 #ifdef ERRORCHECK
447 	} else {
448 		pthread_spinunlock(self, &rwlock->ptr_interlock);
449 		return EPERM;
450 #endif
451 	}
452 
453 	if (writer != NULL)
454 		pthread__sched(self, writer);
455 	else
456 		pthread__sched_sleepers(self, &blockedq);
457 
458 	pthread_spinunlock(self, &rwlock->ptr_interlock);
459 
460 	return 0;
461 }
462 
463 
464 int
465 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
466 {
467 #ifdef ERRORCHECK
468 	if (attr == NULL)
469 		return EINVAL;
470 #endif
471 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
472 
473 	return 0;
474 }
475 
476 
477 int
478 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
479 {
480 #ifdef ERRORCHECK
481 	if ((attr == NULL) ||
482 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
483 		return EINVAL;
484 #endif
485 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
486 
487 	return 0;
488 }
489