xref: /netbsd-src/lib/libpthread/pthread_rwlock.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: pthread_rwlock.c,v 1.9 2004/08/03 11:50:45 yamt Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_rwlock.c,v 1.9 2004/08/03 11:50:45 yamt Exp $");
41 
42 #include <errno.h>
43 
44 #include "pthread.h"
45 #include "pthread_int.h"
46 
47 static void pthread_rwlock__callback(void *);
48 
49 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
50 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
51 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
52 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
53 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
54 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
55 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
56 
57 int
58 pthread_rwlock_init(pthread_rwlock_t *rwlock,
59 	    const pthread_rwlockattr_t *attr)
60 {
61 #ifdef ERRORCHECK
62 	if ((rwlock == NULL) ||
63 	    (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC)))
64 		return EINVAL;
65 #endif
66 	rwlock->ptr_magic = _PT_RWLOCK_MAGIC;
67 	pthread_lockinit(&rwlock->ptr_interlock);
68 	PTQ_INIT(&rwlock->ptr_rblocked);
69 	PTQ_INIT(&rwlock->ptr_wblocked);
70 	rwlock->ptr_nreaders = 0;
71 	rwlock->ptr_writer = NULL;
72 
73 	return 0;
74 }
75 
76 
77 int
78 pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
79 {
80 #ifdef ERRORCHECK
81 	if ((rwlock == NULL) ||
82 	    (rwlock->ptr_magic != _PT_RWLOCK_MAGIC) ||
83 	    (!PTQ_EMPTY(&rwlock->ptr_rblocked)) ||
84 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked)) ||
85 	    (rwlock->ptr_nreaders != 0) ||
86 	    (rwlock->ptr_writer != NULL))
87 		return EINVAL;
88 #endif
89 	rwlock->ptr_magic = _PT_RWLOCK_DEAD;
90 
91 	return 0;
92 }
93 
94 
95 int
96 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
97 {
98 	pthread_t self;
99 #ifdef ERRORCHECK
100 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
101 		return EINVAL;
102 #endif
103 	self = pthread__self();
104 
105 	pthread_spinlock(self, &rwlock->ptr_interlock);
106 #ifdef ERRORCHECK
107 	if (rwlock->ptr_writer == self) {
108 		pthread_spinunlock(self, &rwlock->ptr_interlock);
109 		return EDEADLK;
110 	}
111 #endif
112 	/*
113 	 * Don't get a readlock if there is a writer or if there are waiting
114 	 * writers; i.e. prefer writers to readers. This strategy is dictated
115 	 * by SUSv3.
116 	 */
117 	while ((rwlock->ptr_writer != NULL) ||
118 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
119 		PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
120 		/* Locking a rwlock is not a cancellation point; don't check */
121 		pthread_spinlock(self, &self->pt_statelock);
122 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
123 		self->pt_sleepobj = rwlock;
124 		self->pt_sleepq = &rwlock->ptr_rblocked;
125 		self->pt_sleeplock = &rwlock->ptr_interlock;
126 		pthread_spinunlock(self, &self->pt_statelock);
127 		pthread__block(self, &rwlock->ptr_interlock);
128 		/* interlock is not held when we return */
129 		pthread_spinlock(self, &rwlock->ptr_interlock);
130 	}
131 
132 	rwlock->ptr_nreaders++;
133 	pthread_spinunlock(self, &rwlock->ptr_interlock);
134 
135 	return 0;
136 }
137 
138 
139 int
140 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
141 {
142 	pthread_t self;
143 #ifdef ERRORCHECK
144 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
145 		return EINVAL;
146 #endif
147 	self = pthread__self();
148 
149 	pthread_spinlock(self, &rwlock->ptr_interlock);
150 	/*
151 	 * Don't get a readlock if there is a writer or if there are waiting
152 	 * writers; i.e. prefer writers to readers. This strategy is dictated
153 	 * by SUSv3.
154 	 */
155 	if ((rwlock->ptr_writer != NULL) ||
156 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
157 		pthread_spinunlock(self, &rwlock->ptr_interlock);
158 		return EBUSY;
159 	}
160 
161 	rwlock->ptr_nreaders++;
162 	pthread_spinunlock(self, &rwlock->ptr_interlock);
163 
164 	return 0;
165 }
166 
167 
168 int
169 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
170 {
171 	pthread_t self;
172 #ifdef ERRORCHECK
173 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
174 		return EINVAL;
175 #endif
176 	self = pthread__self();
177 
178 	pthread_spinlock(self, &rwlock->ptr_interlock);
179 #ifdef ERRORCHECK
180 	if (rwlock->ptr_writer == self) {
181 		pthread_spinunlock(self, &rwlock->ptr_interlock);
182 		return EDEADLK;
183 	}
184 #endif
185 	/*
186 	 * Prefer writers to readers here; permit writers even if there are
187 	 * waiting readers.
188 	 */
189 	while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
190 		PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
191 		/* Locking a rwlock is not a cancellation point; don't check */
192 		pthread_spinlock(self, &self->pt_statelock);
193 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
194 		self->pt_sleepobj = rwlock;
195 		self->pt_sleepq = &rwlock->ptr_wblocked;
196 		self->pt_sleeplock = &rwlock->ptr_interlock;
197 		pthread_spinunlock(self, &self->pt_statelock);
198 		pthread__block(self, &rwlock->ptr_interlock);
199 		/* interlock is not held when we return */
200 		pthread_spinlock(self, &rwlock->ptr_interlock);
201 	}
202 
203 	rwlock->ptr_writer = self;
204 	pthread_spinunlock(self, &rwlock->ptr_interlock);
205 
206 	return 0;
207 }
208 
209 
210 int
211 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
212 {
213 	pthread_t self;
214 #ifdef ERRORCHECK
215 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
216 		return EINVAL;
217 #endif
218 	self = pthread__self();
219 
220 	pthread_spinlock(self, &rwlock->ptr_interlock);
221 	/*
222 	 * Prefer writers to readers here; permit writers even if there are
223 	 * waiting readers.
224 	 */
225 	if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
226 		pthread_spinunlock(self, &rwlock->ptr_interlock);
227 		return EBUSY;
228 	}
229 
230 	rwlock->ptr_writer = self;
231 	pthread_spinunlock(self, &rwlock->ptr_interlock);
232 
233 	return 0;
234 }
235 
236 
237 struct pthread_rwlock__waitarg {
238 	pthread_t ptw_thread;
239 	pthread_rwlock_t *ptw_rwlock;
240 	struct pthread_queue_t *ptw_queue;
241 };
242 
243 int
244 pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
245 	    const struct timespec *abs_timeout)
246 {
247 	pthread_t self;
248 	struct pthread_rwlock__waitarg wait;
249 	struct pt_alarm_t alarm;
250 	int retval;
251 #ifdef ERRORCHECK
252 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
253 		return EINVAL;
254 	if ((abs_timeout == NULL) || (abs_timeout->tv_nsec >= 1000000000))
255 		return EINVAL;
256 #endif
257 	self = pthread__self();
258 
259 	pthread_spinlock(self, &rwlock->ptr_interlock);
260 #ifdef ERRORCHECK
261 	if (rwlock->ptr_writer == self) {
262 		pthread_spinunlock(self, &rwlock->ptr_interlock);
263 		return EDEADLK;
264 	}
265 #endif
266 	/*
267 	 * Don't get a readlock if there is a writer or if there are waiting
268 	 * writers; i.e. prefer writers to readers. This strategy is dictated
269 	 * by SUSv3.
270 	 */
271 	retval = 0;
272 	while ((retval == 0) && ((rwlock->ptr_writer != NULL) ||
273 	    (!PTQ_EMPTY(&rwlock->ptr_wblocked)))) {
274 		wait.ptw_thread = self;
275 		wait.ptw_rwlock = rwlock;
276 		wait.ptw_queue = &rwlock->ptr_rblocked;
277 		pthread__alarm_add(self, &alarm, abs_timeout,
278 		    pthread_rwlock__callback, &wait);
279 		PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
280 		/* Locking a rwlock is not a cancellation point; don't check */
281 		pthread_spinlock(self, &self->pt_statelock);
282 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
283 		self->pt_sleepobj = rwlock;
284 		self->pt_sleepq = &rwlock->ptr_rblocked;
285 		self->pt_sleeplock = &rwlock->ptr_interlock;
286 		pthread_spinunlock(self, &self->pt_statelock);
287 		pthread__block(self, &rwlock->ptr_interlock);
288 		/* interlock is not held when we return */
289 		pthread__alarm_del(self, &alarm);
290 		if (pthread__alarm_fired(&alarm))
291 			retval = ETIMEDOUT;
292 		pthread_spinlock(self, &rwlock->ptr_interlock);
293 	}
294 
295 	if (retval == 0)
296 		rwlock->ptr_nreaders++;
297 	pthread_spinunlock(self, &rwlock->ptr_interlock);
298 
299 	return retval;
300 }
301 
302 
303 int
304 pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
305 	    const struct timespec *abs_timeout)
306 {
307 	struct pthread_rwlock__waitarg wait;
308 	struct pt_alarm_t alarm;
309 	int retval;
310 	pthread_t self;
311 #ifdef ERRORCHECK
312 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
313 		return EINVAL;
314 #endif
315 	self = pthread__self();
316 
317 	pthread_spinlock(self, &rwlock->ptr_interlock);
318 #ifdef ERRORCHECK
319 	if (rwlock->ptr_writer == self) {
320 		pthread_spinunlock(self, &rwlock->ptr_interlock);
321 		return EDEADLK;
322 	}
323 #endif
324 	/*
325 	 * Prefer writers to readers here; permit writers even if there are
326 	 * waiting readers.
327 	 */
328 	retval = 0;
329 	while (retval == 0 &&
330 	    ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) {
331 		wait.ptw_thread = self;
332 		wait.ptw_rwlock = rwlock;
333 		wait.ptw_queue = &rwlock->ptr_wblocked;
334 		pthread__alarm_add(self, &alarm, abs_timeout,
335 		    pthread_rwlock__callback, &wait);
336 		PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
337 		/* Locking a rwlock is not a cancellation point; don't check */
338 		pthread_spinlock(self, &self->pt_statelock);
339 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
340 		self->pt_sleepobj = rwlock;
341 		self->pt_sleepq = &rwlock->ptr_wblocked;
342 		self->pt_sleeplock = &rwlock->ptr_interlock;
343 		pthread_spinunlock(self, &self->pt_statelock);
344 		pthread__block(self, &rwlock->ptr_interlock);
345 		/* interlock is not held when we return */
346 		pthread__alarm_del(self, &alarm);
347 		if (pthread__alarm_fired(&alarm))
348 			retval = ETIMEDOUT;
349 		pthread_spinlock(self, &rwlock->ptr_interlock);
350 	}
351 
352 	if (retval == 0)
353 		rwlock->ptr_writer = self;
354 	pthread_spinunlock(self, &rwlock->ptr_interlock);
355 
356 	return retval;
357 }
358 
359 
360 static void
361 pthread_rwlock__callback(void *arg)
362 {
363 	struct pthread_rwlock__waitarg *a;
364 	pthread_t self;
365 
366 	a = arg;
367 	self = pthread__self();
368 
369 	pthread_spinlock(self, &a->ptw_rwlock->ptr_interlock);
370 	/*
371 	 * Don't dequeue and schedule the thread if it's already been
372 	 * queued up by a signal or broadcast (but hasn't yet run as far
373 	 * as pthread__alarm_del(), or we wouldn't be here, and hence can't
374 	 * have become blocked on some *other* queue).
375 	 */
376 	if (a->ptw_thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
377 		PTQ_REMOVE(a->ptw_queue, a->ptw_thread, pt_sleep);
378 		pthread__sched(self, a->ptw_thread);
379 	}
380 	pthread_spinunlock(self, &a->ptw_rwlock->ptr_interlock);
381 
382 }
383 
384 
385 int
386 pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
387 {
388 	pthread_t self, writer;
389 	struct pthread_queue_t blockedq;
390 #ifdef ERRORCHECK
391 	if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
392 		return EINVAL;
393 #endif
394 	writer = NULL;
395 	PTQ_INIT(&blockedq);
396 	self = pthread__self();
397 
398 	pthread_spinlock(self, &rwlock->ptr_interlock);
399 	if (rwlock->ptr_writer != NULL) {
400 		/* Releasing a write lock. */
401 #ifdef ERRORCHECK
402 		if (rwlock->ptr_writer != self) {
403 			pthread_spinunlock(self, &rwlock->ptr_interlock);
404 			return EPERM;
405 		}
406 #endif
407 		rwlock->ptr_writer = NULL;
408 		writer = PTQ_FIRST(&rwlock->ptr_wblocked);
409 		if (writer != NULL) {
410 			PTQ_REMOVE(&rwlock->ptr_wblocked, writer, pt_sleep);
411 		} else {
412 			blockedq = rwlock->ptr_rblocked;
413 			PTQ_INIT(&rwlock->ptr_rblocked);
414 		}
415 	} else
416 #ifdef ERRORCHECK
417 	if (rwlock->ptr_nreaders > 0)
418 #endif
419 	{
420 		/* Releasing a read lock. */
421 		rwlock->ptr_nreaders--;
422 		if (rwlock->ptr_nreaders == 0) {
423 			writer = PTQ_FIRST(&rwlock->ptr_wblocked);
424 			if (writer != NULL)
425 				PTQ_REMOVE(&rwlock->ptr_wblocked, writer,
426 				    pt_sleep);
427 		}
428 #ifdef ERRORCHECK
429 	} else {
430 		pthread_spinunlock(self, &rwlock->ptr_interlock);
431 		return EPERM;
432 #endif
433 	}
434 
435 	if (writer != NULL)
436 		pthread__sched(self, writer);
437 	else
438 		pthread__sched_sleepers(self, &blockedq);
439 
440 	pthread_spinunlock(self, &rwlock->ptr_interlock);
441 
442 	return 0;
443 }
444 
445 
446 int
447 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
448 {
449 #ifdef ERRORCHECK
450 	if (attr == NULL)
451 		return EINVAL;
452 #endif
453 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
454 
455 	return 0;
456 }
457 
458 
459 int
460 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
461 {
462 #ifdef ERRORCHECK
463 	if ((attr == NULL) ||
464 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
465 		return EINVAL;
466 #endif
467 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
468 
469 	return 0;
470 }
471