1 /* $NetBSD: pthread_rwlock.c,v 1.5 2003/03/08 08:03:35 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread_rwlock.c,v 1.5 2003/03/08 08:03:35 lukem Exp $"); 41 42 #include <errno.h> 43 44 #include "pthread.h" 45 #include "pthread_int.h" 46 47 static void pthread_rwlock__callback(void *); 48 49 __strong_alias(__libc_rwlock_init,pthread_rwlock_init) 50 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock) 51 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock) 52 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock) 53 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock) 54 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock) 55 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy) 56 57 int 58 pthread_rwlock_init(pthread_rwlock_t *rwlock, 59 const pthread_rwlockattr_t *attr) 60 { 61 #ifdef ERRORCHECK 62 if ((rwlock == NULL) || 63 (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))) 64 return EINVAL; 65 #endif 66 rwlock->ptr_magic = _PT_RWLOCK_MAGIC; 67 pthread_lockinit(&rwlock->ptr_interlock); 68 PTQ_INIT(&rwlock->ptr_rblocked); 69 PTQ_INIT(&rwlock->ptr_wblocked); 70 rwlock->ptr_nreaders = 0; 71 rwlock->ptr_writer = NULL; 72 73 return 0; 74 } 75 76 77 int 78 pthread_rwlock_destroy(pthread_rwlock_t *rwlock) 79 { 80 #ifdef ERRORCHECK 81 if ((rwlock == NULL) || 82 (rwlock->ptr_magic != _PT_RWLOCK_MAGIC) || 83 (!PTQ_EMPTY(&rwlock->ptr_rblocked)) || 84 (!PTQ_EMPTY(&rwlock->ptr_wblocked)) || 85 (rwlock->ptr_nreaders != 0) || 86 (rwlock->ptr_writer != NULL)) 87 return EINVAL; 88 #endif 89 rwlock->ptr_magic = _PT_RWLOCK_DEAD; 90 91 return 0; 92 } 93 94 95 int 96 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) 97 { 98 pthread_t self; 99 #ifdef ERRORCHECK 100 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC)) 101 return EINVAL; 102 #endif 103 self = pthread__self(); 104 105 pthread_spinlock(self, &rwlock->ptr_interlock); 106 #ifdef ERRORCHECK 107 if (rwlock->ptr_writer == self) { 108 pthread_spinunlock(self, &rwlock->ptr_interlock); 109 return EDEADLK; 110 } 111 #endif 112 /* 113 * Don't get a readlock if there is a writer or if there are waiting 114 * writers; i.e. prefer writers to readers. This strategy is dictated 115 * by SUSv3. 116 */ 117 while ((rwlock->ptr_writer != NULL) || 118 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) { 119 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep); 120 /* Locking a rwlock is not a cancellation point; don't check */ 121 pthread_spinlock(self, &self->pt_statelock); 122 self->pt_state = PT_STATE_BLOCKED_QUEUE; 123 self->pt_sleepobj = rwlock; 124 self->pt_sleepq = &rwlock->ptr_rblocked; 125 self->pt_sleeplock = &rwlock->ptr_interlock; 126 pthread_spinunlock(self, &self->pt_statelock); 127 pthread__block(self, &rwlock->ptr_interlock); 128 /* interlock is not held when we return */ 129 pthread_spinlock(self, &rwlock->ptr_interlock); 130 } 131 132 rwlock->ptr_nreaders++; 133 pthread_spinunlock(self, &rwlock->ptr_interlock); 134 135 return 0; 136 } 137 138 139 int 140 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) 141 { 142 pthread_t self; 143 #ifdef ERRORCHECK 144 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC)) 145 return EINVAL; 146 #endif 147 self = pthread__self(); 148 149 pthread_spinlock(self, &rwlock->ptr_interlock); 150 #ifdef ERRORCHECK 151 if (rwlock->ptr_writer == self) { 152 pthread_spinunlock(self, &rwlock->ptr_interlock); 153 return EDEADLK; 154 } 155 #endif 156 /* 157 * Don't get a readlock if there is a writer or if there are waiting 158 * writers; i.e. prefer writers to readers. This strategy is dictated 159 * by SUSv3. 160 */ 161 if ((rwlock->ptr_writer != NULL) || 162 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) { 163 pthread_spinunlock(self, &rwlock->ptr_interlock); 164 return EBUSY; 165 } 166 167 rwlock->ptr_nreaders++; 168 pthread_spinunlock(self, &rwlock->ptr_interlock); 169 170 return 0; 171 } 172 173 174 int 175 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) 176 { 177 pthread_t self; 178 #ifdef ERRORCHECK 179 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC)) 180 return EINVAL; 181 #endif 182 self = pthread__self(); 183 184 pthread_spinlock(self, &rwlock->ptr_interlock); 185 /* 186 * Prefer writers to readers here; permit writers even if there are 187 * waiting readers. 188 */ 189 while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) { 190 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep); 191 /* Locking a rwlock is not a cancellation point; don't check */ 192 pthread_spinlock(self, &self->pt_statelock); 193 self->pt_state = PT_STATE_BLOCKED_QUEUE; 194 self->pt_sleepobj = rwlock; 195 self->pt_sleepq = &rwlock->ptr_wblocked; 196 self->pt_sleeplock = &rwlock->ptr_interlock; 197 pthread_spinunlock(self, &self->pt_statelock); 198 pthread__block(self, &rwlock->ptr_interlock); 199 /* interlock is not held when we return */ 200 pthread_spinlock(self, &rwlock->ptr_interlock); 201 } 202 203 rwlock->ptr_writer = self; 204 pthread_spinunlock(self, &rwlock->ptr_interlock); 205 206 return 0; 207 } 208 209 210 int 211 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) 212 { 213 pthread_t self; 214 #ifdef ERRORCHECK 215 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC)) 216 return EINVAL; 217 #endif 218 self = pthread__self(); 219 220 pthread_spinlock(self, &rwlock->ptr_interlock); 221 /* 222 * Prefer writers to readers here; permit writers even if there are 223 * waiting readers. 224 */ 225 if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) { 226 pthread_spinunlock(self, &rwlock->ptr_interlock); 227 return EBUSY; 228 } 229 230 rwlock->ptr_writer = self; 231 pthread_spinunlock(self, &rwlock->ptr_interlock); 232 233 return 0; 234 } 235 236 237 struct pthread_rwlock__waitarg { 238 pthread_t ptw_thread; 239 pthread_rwlock_t *ptw_rwlock; 240 struct pthread_queue_t *ptw_queue; 241 }; 242 243 int 244 pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, 245 const struct timespec *abs_timeout) 246 { 247 pthread_t self; 248 struct pthread_rwlock__waitarg wait; 249 struct pt_alarm_t alarm; 250 int retval; 251 #ifdef ERRORCHECK 252 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC)) 253 return EINVAL; 254 if ((abs_timeout == NULL) || (abs_timeout->tv_nsec >= 1000000000)) 255 return EINVAL; 256 #endif 257 self = pthread__self(); 258 259 pthread_spinlock(self, &rwlock->ptr_interlock); 260 #ifdef ERRORCHECK 261 if (rwlock->ptr_writer == self) { 262 pthread_spinlock(self, &rwlock->ptr_interlock); 263 return EDEADLK; 264 } 265 #endif 266 /* 267 * Don't get a readlock if there is a writer or if there are waiting 268 * writers; i.e. prefer writers to readers. This strategy is dictated 269 * by SUSv3. 270 */ 271 retval = 0; 272 while ((retval == 0) && ((rwlock->ptr_writer != NULL) || 273 (!PTQ_EMPTY(&rwlock->ptr_wblocked)))) { 274 wait.ptw_thread = self; 275 wait.ptw_rwlock = rwlock; 276 wait.ptw_queue = &rwlock->ptr_rblocked; 277 pthread__alarm_add(self, &alarm, abs_timeout, 278 pthread_rwlock__callback, &wait); 279 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep); 280 /* Locking a rwlock is not a cancellation point; don't check */ 281 pthread_spinlock(self, &self->pt_statelock); 282 self->pt_state = PT_STATE_BLOCKED_QUEUE; 283 self->pt_sleepobj = rwlock; 284 self->pt_sleepq = &rwlock->ptr_rblocked; 285 self->pt_sleeplock = &rwlock->ptr_interlock; 286 pthread_spinunlock(self, &self->pt_statelock); 287 pthread__block(self, &rwlock->ptr_interlock); 288 /* interlock is not held when we return */ 289 pthread__alarm_del(self, &alarm); 290 if (pthread__alarm_fired(&alarm)) 291 retval = ETIMEDOUT; 292 pthread_spinlock(self, &rwlock->ptr_interlock); 293 } 294 295 if (retval == 0) 296 rwlock->ptr_nreaders++; 297 pthread_spinunlock(self, &rwlock->ptr_interlock); 298 299 return retval; 300 } 301 302 303 int 304 pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, 305 const struct timespec *abs_timeout) 306 { 307 struct pthread_rwlock__waitarg wait; 308 struct pt_alarm_t alarm; 309 int retval; 310 pthread_t self; 311 #ifdef ERRORCHECK 312 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC)) 313 return EINVAL; 314 #endif 315 self = pthread__self(); 316 317 pthread_spinlock(self, &rwlock->ptr_interlock); 318 /* 319 * Prefer writers to readers here; permit writers even if there are 320 * waiting readers. 321 */ 322 retval = 0; 323 while (retval == 0 && 324 ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) { 325 wait.ptw_thread = self; 326 wait.ptw_rwlock = rwlock; 327 wait.ptw_queue = &rwlock->ptr_wblocked; 328 pthread__alarm_add(self, &alarm, abs_timeout, 329 pthread_rwlock__callback, &wait); 330 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep); 331 /* Locking a rwlock is not a cancellation point; don't check */ 332 pthread_spinlock(self, &self->pt_statelock); 333 self->pt_state = PT_STATE_BLOCKED_QUEUE; 334 self->pt_sleepobj = rwlock; 335 self->pt_sleepq = &rwlock->ptr_wblocked; 336 self->pt_sleeplock = &rwlock->ptr_interlock; 337 pthread_spinunlock(self, &self->pt_statelock); 338 pthread__block(self, &rwlock->ptr_interlock); 339 /* interlock is not held when we return */ 340 pthread__alarm_del(self, &alarm); 341 if (pthread__alarm_fired(&alarm)) 342 retval = ETIMEDOUT; 343 pthread_spinlock(self, &rwlock->ptr_interlock); 344 } 345 346 if (retval == 0) 347 rwlock->ptr_writer = self; 348 pthread_spinunlock(self, &rwlock->ptr_interlock); 349 350 return 0; 351 } 352 353 354 static void 355 pthread_rwlock__callback(void *arg) 356 { 357 struct pthread_rwlock__waitarg *a; 358 pthread_t self; 359 360 a = arg; 361 self = pthread__self(); 362 363 pthread_spinlock(self, &a->ptw_rwlock->ptr_interlock); 364 /* 365 * Don't dequeue and schedule the thread if it's already been 366 * queued up by a signal or broadcast (but hasn't yet run as far 367 * as pthread__alarm_del(), or we wouldn't be here, and hence can't 368 * have become blocked on some *other* queue). 369 */ 370 if (a->ptw_thread->pt_state == PT_STATE_BLOCKED_QUEUE) { 371 PTQ_REMOVE(a->ptw_queue, a->ptw_thread, pt_sleep); 372 pthread__sched(self, a->ptw_thread); 373 } 374 pthread_spinunlock(self, &a->ptw_rwlock->ptr_interlock); 375 376 } 377 378 379 int 380 pthread_rwlock_unlock(pthread_rwlock_t *rwlock) 381 { 382 pthread_t self, writer; 383 struct pthread_queue_t blockedq; 384 #ifdef ERRORCHECK 385 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC)) 386 return EINVAL; 387 #endif 388 writer = NULL; 389 PTQ_INIT(&blockedq); 390 self = pthread__self(); 391 392 pthread_spinlock(self, &rwlock->ptr_interlock); 393 if (rwlock->ptr_writer != NULL) { 394 /* Releasing a write lock. */ 395 #ifdef ERRORCHECK 396 if (rwlock->ptr_writer != self) { 397 pthread_spinunlock(self, &rwlock->ptr_interlock); 398 return EPERM; 399 } 400 #endif 401 rwlock->ptr_writer = NULL; 402 writer = PTQ_FIRST(&rwlock->ptr_wblocked); 403 if (writer != NULL) { 404 PTQ_REMOVE(&rwlock->ptr_wblocked, writer, pt_sleep); 405 } else { 406 blockedq = rwlock->ptr_rblocked; 407 PTQ_INIT(&rwlock->ptr_rblocked); 408 } 409 } else { 410 /* Releasing a read lock. */ 411 rwlock->ptr_nreaders--; 412 if (rwlock->ptr_nreaders == 0) { 413 writer = PTQ_FIRST(&rwlock->ptr_wblocked); 414 if (writer != NULL) 415 PTQ_REMOVE(&rwlock->ptr_wblocked, writer, 416 pt_sleep); 417 } 418 } 419 420 pthread_spinunlock(self, &rwlock->ptr_interlock); 421 422 if (writer != NULL) 423 pthread__sched(self, writer); 424 else 425 pthread__sched_sleepers(self, &blockedq); 426 427 return 0; 428 } 429 430 431 int 432 pthread_rwlockattr_init(pthread_rwlockattr_t *attr) 433 { 434 #ifdef ERRORCHECK 435 if (attr == NULL) 436 return EINVAL; 437 #endif 438 attr->ptra_magic = _PT_RWLOCKATTR_MAGIC; 439 440 return 0; 441 } 442 443 444 int 445 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr) 446 { 447 #ifdef ERRORCHECK 448 if ((attr == NULL) || 449 (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC)) 450 return EINVAL; 451 #endif 452 attr->ptra_magic = _PT_RWLOCKATTR_DEAD; 453 454 return 0; 455 } 456