1 /* $NetBSD: kern_rwlock.c,v 1.28 2008/07/29 16:13:39 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Kernel reader/writer lock implementation, modeled after those 34 * found in Solaris, a description of which can be found in: 35 * 36 * Solaris Internals: Core Kernel Architecture, Jim Mauro and 37 * Richard McDougall. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.28 2008/07/29 16:13:39 thorpej Exp $"); 42 43 #define __RWLOCK_PRIVATE 44 45 #include <sys/param.h> 46 #include <sys/proc.h> 47 #include <sys/rwlock.h> 48 #include <sys/sched.h> 49 #include <sys/sleepq.h> 50 #include <sys/systm.h> 51 #include <sys/lockdebug.h> 52 #include <sys/cpu.h> 53 #include <sys/atomic.h> 54 #include <sys/lock.h> 55 56 #include <dev/lockstat.h> 57 58 /* 59 * LOCKDEBUG 60 */ 61 62 #if defined(LOCKDEBUG) 63 64 #define RW_WANTLOCK(rw, op, t) \ 65 LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \ 66 (uintptr_t)__builtin_return_address(0), op == RW_READER, t); 67 #define RW_LOCKED(rw, op) \ 68 LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \ 69 (uintptr_t)__builtin_return_address(0), op == RW_READER); 70 #define RW_UNLOCKED(rw, op) \ 71 LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), \ 72 (uintptr_t)__builtin_return_address(0), op == RW_READER); 73 #define RW_DASSERT(rw, cond) \ 74 do { \ 75 if (!(cond)) \ 76 rw_abort(rw, __func__, "assertion failed: " #cond); \ 77 } while (/* CONSTCOND */ 0); 78 79 #else /* LOCKDEBUG */ 80 81 #define RW_WANTLOCK(rw, op, t) /* nothing */ 82 #define RW_LOCKED(rw, op) /* nothing */ 83 #define RW_UNLOCKED(rw, op) /* nothing */ 84 #define RW_DASSERT(rw, cond) /* nothing */ 85 86 #endif /* LOCKDEBUG */ 87 88 /* 89 * DIAGNOSTIC 90 */ 91 92 #if defined(DIAGNOSTIC) 93 94 #define RW_ASSERT(rw, cond) \ 95 do { \ 96 if (!(cond)) \ 97 rw_abort(rw, __func__, "assertion failed: " #cond); \ 98 } while (/* CONSTCOND */ 0) 99 100 #else 101 102 #define RW_ASSERT(rw, cond) /* nothing */ 103 104 #endif /* DIAGNOSTIC */ 105 106 #define RW_SETDEBUG(rw, on) ((rw)->rw_owner |= (on) ? RW_DEBUG : 0) 107 #define RW_DEBUG_P(rw) (((rw)->rw_owner & RW_DEBUG) != 0) 108 #if defined(LOCKDEBUG) 109 #define RW_INHERITDEBUG(new, old) (new) |= (old) & RW_DEBUG 110 #else /* defined(LOCKDEBUG) */ 111 #define RW_INHERITDEBUG(new, old) /* nothing */ 112 #endif /* defined(LOCKDEBUG) */ 113 114 static void rw_abort(krwlock_t *, const char *, const char *); 115 static void rw_dump(volatile void *); 116 static lwp_t *rw_owner(wchan_t); 117 118 static inline uintptr_t 119 rw_cas(krwlock_t *rw, uintptr_t o, uintptr_t n) 120 { 121 122 RW_INHERITDEBUG(n, o); 123 return (uintptr_t)atomic_cas_ptr((volatile void *)&rw->rw_owner, 124 (void *)o, (void *)n); 125 } 126 127 static inline void 128 rw_swap(krwlock_t *rw, uintptr_t o, uintptr_t n) 129 { 130 131 RW_INHERITDEBUG(n, o); 132 n = (uintptr_t)atomic_swap_ptr((volatile void *)&rw->rw_owner, 133 (void *)n); 134 RW_DASSERT(rw, n == o); 135 } 136 137 /* 138 * For platforms that do not provide stubs, or for the LOCKDEBUG case. 139 */ 140 #ifdef LOCKDEBUG 141 #undef __HAVE_RW_STUBS 142 #endif 143 144 #ifndef __HAVE_RW_STUBS 145 __strong_alias(rw_enter,rw_vector_enter); 146 __strong_alias(rw_exit,rw_vector_exit); 147 __strong_alias(rw_tryenter,rw_vector_tryenter); 148 #endif 149 150 lockops_t rwlock_lockops = { 151 "Reader / writer lock", 152 LOCKOPS_SLEEP, 153 rw_dump 154 }; 155 156 syncobj_t rw_syncobj = { 157 SOBJ_SLEEPQ_SORTED, 158 turnstile_unsleep, 159 turnstile_changepri, 160 sleepq_lendpri, 161 rw_owner, 162 }; 163 164 /* 165 * rw_dump: 166 * 167 * Dump the contents of a rwlock structure. 168 */ 169 static void 170 rw_dump(volatile void *cookie) 171 { 172 volatile krwlock_t *rw = cookie; 173 174 printf_nolog("owner/count : %#018lx flags : %#018x\n", 175 (long)RW_OWNER(rw), (int)RW_FLAGS(rw)); 176 } 177 178 /* 179 * rw_abort: 180 * 181 * Dump information about an error and panic the system. This 182 * generates a lot of machine code in the DIAGNOSTIC case, so 183 * we ask the compiler to not inline it. 184 */ 185 static void __noinline 186 rw_abort(krwlock_t *rw, const char *func, const char *msg) 187 { 188 189 if (panicstr != NULL) 190 return; 191 192 LOCKDEBUG_ABORT(rw, &rwlock_lockops, func, msg); 193 } 194 195 /* 196 * rw_init: 197 * 198 * Initialize a rwlock for use. 199 */ 200 void 201 rw_init(krwlock_t *rw) 202 { 203 bool dodebug; 204 205 memset(rw, 0, sizeof(*rw)); 206 207 dodebug = LOCKDEBUG_ALLOC(rw, &rwlock_lockops, 208 (uintptr_t)__builtin_return_address(0)); 209 RW_SETDEBUG(rw, dodebug); 210 } 211 212 /* 213 * rw_destroy: 214 * 215 * Tear down a rwlock. 216 */ 217 void 218 rw_destroy(krwlock_t *rw) 219 { 220 221 RW_ASSERT(rw, (rw->rw_owner & ~RW_DEBUG) == 0); 222 LOCKDEBUG_FREE(RW_DEBUG_P(rw), rw); 223 } 224 225 /* 226 * rw_onproc: 227 * 228 * Return true if an rwlock owner is running on a CPU in the system. 229 * If the target is waiting on the kernel big lock, then we must 230 * release it. This is necessary to avoid deadlock. 231 * 232 * Note that we can't use the rwlock owner field as an LWP pointer. We 233 * don't have full control over the timing of our execution, and so the 234 * pointer could be completely invalid by the time we dereference it. 235 */ 236 static int 237 rw_onproc(uintptr_t owner, struct cpu_info **cip) 238 { 239 #ifdef MULTIPROCESSOR 240 CPU_INFO_ITERATOR cii; 241 struct cpu_info *ci; 242 lwp_t *l; 243 244 if ((owner & (RW_WRITE_LOCKED|RW_HAS_WAITERS)) != RW_WRITE_LOCKED) 245 return 0; 246 l = (lwp_t *)(owner & RW_THREAD); 247 248 /* See if the target is running on a CPU somewhere. */ 249 if ((ci = *cip) != NULL && ci->ci_curlwp == l) 250 goto run; 251 for (CPU_INFO_FOREACH(cii, ci)) 252 if (ci->ci_curlwp == l) 253 goto run; 254 255 /* No: it may be safe to block now. */ 256 *cip = NULL; 257 return 0; 258 259 run: 260 /* Target is running; do we need to block? */ 261 *cip = ci; 262 return ci->ci_biglock_wanted != l; 263 #else 264 return 0; 265 #endif /* MULTIPROCESSOR */ 266 } 267 268 /* 269 * rw_vector_enter: 270 * 271 * Acquire a rwlock. 272 */ 273 void 274 rw_vector_enter(krwlock_t *rw, const krw_t op) 275 { 276 uintptr_t owner, incr, need_wait, set_wait, curthread, next; 277 struct cpu_info *ci; 278 turnstile_t *ts; 279 int queue; 280 lwp_t *l; 281 LOCKSTAT_TIMER(slptime); 282 LOCKSTAT_TIMER(slpcnt); 283 LOCKSTAT_TIMER(spintime); 284 LOCKSTAT_COUNTER(spincnt); 285 LOCKSTAT_FLAG(lsflag); 286 287 l = curlwp; 288 curthread = (uintptr_t)l; 289 290 RW_ASSERT(rw, !cpu_intr_p()); 291 RW_ASSERT(rw, curthread != 0); 292 RW_WANTLOCK(rw, op, false); 293 294 if (panicstr == NULL) { 295 LOCKDEBUG_BARRIER(&kernel_lock, 1); 296 } 297 298 /* 299 * We play a slight trick here. If we're a reader, we want 300 * increment the read count. If we're a writer, we want to 301 * set the owner field and whe WRITE_LOCKED bit. 302 * 303 * In the latter case, we expect those bits to be zero, 304 * therefore we can use an add operation to set them, which 305 * means an add operation for both cases. 306 */ 307 if (__predict_true(op == RW_READER)) { 308 incr = RW_READ_INCR; 309 set_wait = RW_HAS_WAITERS; 310 need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED; 311 queue = TS_READER_Q; 312 } else { 313 RW_DASSERT(rw, op == RW_WRITER); 314 incr = curthread | RW_WRITE_LOCKED; 315 set_wait = RW_HAS_WAITERS | RW_WRITE_WANTED; 316 need_wait = RW_WRITE_LOCKED | RW_THREAD; 317 queue = TS_WRITER_Q; 318 } 319 320 LOCKSTAT_ENTER(lsflag); 321 322 for (ci = NULL, owner = rw->rw_owner;;) { 323 /* 324 * Read the lock owner field. If the need-to-wait 325 * indicator is clear, then try to acquire the lock. 326 */ 327 if ((owner & need_wait) == 0) { 328 next = rw_cas(rw, owner, (owner + incr) & 329 ~RW_WRITE_WANTED); 330 if (__predict_true(next == owner)) { 331 /* Got it! */ 332 #ifndef __HAVE_ATOMIC_AS_MEMBAR 333 membar_enter(); 334 #endif 335 break; 336 } 337 338 /* 339 * Didn't get it -- spin around again (we'll 340 * probably sleep on the next iteration). 341 */ 342 owner = next; 343 continue; 344 } 345 346 if (__predict_false(panicstr != NULL)) 347 return; 348 if (__predict_false(RW_OWNER(rw) == curthread)) 349 rw_abort(rw, __func__, "locking against myself"); 350 351 /* 352 * If the lock owner is running on another CPU, and 353 * there are no existing waiters, then spin. 354 */ 355 if (rw_onproc(owner, &ci)) { 356 LOCKSTAT_START_TIMER(lsflag, spintime); 357 u_int count = SPINLOCK_BACKOFF_MIN; 358 do { 359 SPINLOCK_BACKOFF(count); 360 owner = rw->rw_owner; 361 } while (rw_onproc(owner, &ci)); 362 LOCKSTAT_STOP_TIMER(lsflag, spintime); 363 LOCKSTAT_COUNT(spincnt, 1); 364 if ((owner & need_wait) == 0) 365 continue; 366 } 367 368 /* 369 * Grab the turnstile chain lock. Once we have that, we 370 * can adjust the waiter bits and sleep queue. 371 */ 372 ts = turnstile_lookup(rw); 373 374 /* 375 * Mark the rwlock as having waiters. If the set fails, 376 * then we may not need to sleep and should spin again. 377 * Reload rw_owner because turnstile_lookup() may have 378 * spun on the turnstile chain lock. 379 */ 380 owner = rw->rw_owner; 381 if ((owner & need_wait) == 0 || rw_onproc(owner, &ci)) { 382 turnstile_exit(rw); 383 continue; 384 } 385 next = rw_cas(rw, owner, owner | set_wait); 386 if (__predict_false(next != owner)) { 387 turnstile_exit(rw); 388 owner = next; 389 continue; 390 } 391 392 LOCKSTAT_START_TIMER(lsflag, slptime); 393 turnstile_block(ts, queue, rw, &rw_syncobj); 394 LOCKSTAT_STOP_TIMER(lsflag, slptime); 395 LOCKSTAT_COUNT(slpcnt, 1); 396 397 /* 398 * No need for a memory barrier because of context switch. 399 * If not handed the lock, then spin again. 400 */ 401 if (op == RW_READER || (rw->rw_owner & RW_THREAD) == curthread) 402 break; 403 } 404 405 LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK | 406 (op == RW_WRITER ? LB_SLEEP1 : LB_SLEEP2), slpcnt, slptime); 407 LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK | LB_SPIN, spincnt, spintime); 408 LOCKSTAT_EXIT(lsflag); 409 410 RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) || 411 (op == RW_READER && RW_COUNT(rw) != 0)); 412 RW_LOCKED(rw, op); 413 } 414 415 /* 416 * rw_vector_exit: 417 * 418 * Release a rwlock. 419 */ 420 void 421 rw_vector_exit(krwlock_t *rw) 422 { 423 uintptr_t curthread, owner, decr, new, next; 424 turnstile_t *ts; 425 int rcnt, wcnt; 426 lwp_t *l; 427 428 curthread = (uintptr_t)curlwp; 429 RW_ASSERT(rw, curthread != 0); 430 431 if (__predict_false(panicstr != NULL)) 432 return; 433 434 /* 435 * Again, we use a trick. Since we used an add operation to 436 * set the required lock bits, we can use a subtract to clear 437 * them, which makes the read-release and write-release path 438 * the same. 439 */ 440 owner = rw->rw_owner; 441 if (__predict_false((owner & RW_WRITE_LOCKED) != 0)) { 442 RW_UNLOCKED(rw, RW_WRITER); 443 RW_ASSERT(rw, RW_OWNER(rw) == curthread); 444 decr = curthread | RW_WRITE_LOCKED; 445 } else { 446 RW_UNLOCKED(rw, RW_READER); 447 RW_ASSERT(rw, RW_COUNT(rw) != 0); 448 decr = RW_READ_INCR; 449 } 450 451 /* 452 * Compute what we expect the new value of the lock to be. Only 453 * proceed to do direct handoff if there are waiters, and if the 454 * lock would become unowned. 455 */ 456 #ifndef __HAVE_ATOMIC_AS_MEMBAR 457 membar_exit(); 458 #endif 459 for (;;) { 460 new = (owner - decr); 461 if ((new & (RW_THREAD | RW_HAS_WAITERS)) == RW_HAS_WAITERS) 462 break; 463 next = rw_cas(rw, owner, new); 464 if (__predict_true(next == owner)) 465 return; 466 owner = next; 467 } 468 469 /* 470 * Grab the turnstile chain lock. This gets the interlock 471 * on the sleep queue. Once we have that, we can adjust the 472 * waiter bits. 473 */ 474 ts = turnstile_lookup(rw); 475 owner = rw->rw_owner; 476 RW_DASSERT(rw, ts != NULL); 477 RW_DASSERT(rw, (owner & RW_HAS_WAITERS) != 0); 478 479 wcnt = TS_WAITERS(ts, TS_WRITER_Q); 480 rcnt = TS_WAITERS(ts, TS_READER_Q); 481 482 /* 483 * Give the lock away. 484 * 485 * If we are releasing a write lock, then prefer to wake all 486 * outstanding readers. Otherwise, wake one writer if there 487 * are outstanding readers, or all writers if there are no 488 * pending readers. If waking one specific writer, the writer 489 * is handed the lock here. If waking multiple writers, we 490 * set WRITE_WANTED to block out new readers, and let them 491 * do the work of acquring the lock in rw_vector_enter(). 492 */ 493 if (rcnt == 0 || (decr == RW_READ_INCR && wcnt != 0)) { 494 RW_DASSERT(rw, wcnt != 0); 495 RW_DASSERT(rw, (owner & RW_WRITE_WANTED) != 0); 496 497 if (rcnt != 0) { 498 /* Give the lock to the longest waiting writer. */ 499 l = TS_FIRST(ts, TS_WRITER_Q); 500 new = (uintptr_t)l | RW_WRITE_LOCKED | RW_HAS_WAITERS; 501 if (wcnt > 1) 502 new |= RW_WRITE_WANTED; 503 rw_swap(rw, owner, new); 504 turnstile_wakeup(ts, TS_WRITER_Q, 1, l); 505 } else { 506 /* Wake all writers and let them fight it out. */ 507 rw_swap(rw, owner, RW_WRITE_WANTED); 508 turnstile_wakeup(ts, TS_WRITER_Q, wcnt, NULL); 509 } 510 } else { 511 RW_DASSERT(rw, rcnt != 0); 512 513 /* 514 * Give the lock to all blocked readers. If there 515 * is a writer waiting, new readers that arrive 516 * after the release will be blocked out. 517 */ 518 new = rcnt << RW_READ_COUNT_SHIFT; 519 if (wcnt != 0) 520 new |= RW_HAS_WAITERS | RW_WRITE_WANTED; 521 522 /* Wake up all sleeping readers. */ 523 rw_swap(rw, owner, new); 524 turnstile_wakeup(ts, TS_READER_Q, rcnt, NULL); 525 } 526 } 527 528 /* 529 * rw_vector_tryenter: 530 * 531 * Try to acquire a rwlock. 532 */ 533 int 534 rw_vector_tryenter(krwlock_t *rw, const krw_t op) 535 { 536 uintptr_t curthread, owner, incr, need_wait, next; 537 538 curthread = (uintptr_t)curlwp; 539 540 RW_ASSERT(rw, curthread != 0); 541 542 if (op == RW_READER) { 543 incr = RW_READ_INCR; 544 need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED; 545 } else { 546 RW_DASSERT(rw, op == RW_WRITER); 547 incr = curthread | RW_WRITE_LOCKED; 548 need_wait = RW_WRITE_LOCKED | RW_THREAD; 549 } 550 551 for (owner = rw->rw_owner;; owner = next) { 552 owner = rw->rw_owner; 553 if (__predict_false((owner & need_wait) != 0)) 554 return 0; 555 next = rw_cas(rw, owner, owner + incr); 556 if (__predict_true(next == owner)) { 557 /* Got it! */ 558 break; 559 } 560 } 561 562 #ifndef __HAVE_ATOMIC_AS_MEMBAR 563 membar_enter(); 564 #endif 565 RW_WANTLOCK(rw, op, true); 566 RW_LOCKED(rw, op); 567 RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) || 568 (op == RW_READER && RW_COUNT(rw) != 0)); 569 570 return 1; 571 } 572 573 /* 574 * rw_downgrade: 575 * 576 * Downgrade a write lock to a read lock. 577 */ 578 void 579 rw_downgrade(krwlock_t *rw) 580 { 581 uintptr_t owner, curthread, new, next; 582 turnstile_t *ts; 583 int rcnt, wcnt; 584 585 curthread = (uintptr_t)curlwp; 586 RW_ASSERT(rw, curthread != 0); 587 RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) != 0); 588 RW_ASSERT(rw, RW_OWNER(rw) == curthread); 589 RW_UNLOCKED(rw, RW_WRITER); 590 591 #ifndef __HAVE_ATOMIC_AS_MEMBAR 592 membar_producer(); 593 #endif 594 595 owner = rw->rw_owner; 596 if ((owner & RW_HAS_WAITERS) == 0) { 597 /* 598 * There are no waiters, so we can do this the easy way. 599 * Try swapping us down to one read hold. If it fails, the 600 * lock condition has changed and we most likely now have 601 * waiters. 602 */ 603 next = rw_cas(rw, owner, RW_READ_INCR); 604 if (__predict_true(next == owner)) { 605 RW_LOCKED(rw, RW_READER); 606 RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0); 607 RW_DASSERT(rw, RW_COUNT(rw) != 0); 608 return; 609 } 610 owner = next; 611 } 612 613 /* 614 * Grab the turnstile chain lock. This gets the interlock 615 * on the sleep queue. Once we have that, we can adjust the 616 * waiter bits. 617 */ 618 for (;; owner = next) { 619 ts = turnstile_lookup(rw); 620 RW_DASSERT(rw, ts != NULL); 621 622 rcnt = TS_WAITERS(ts, TS_READER_Q); 623 wcnt = TS_WAITERS(ts, TS_WRITER_Q); 624 625 /* 626 * If there are no readers, just preserve the waiters 627 * bits, swap us down to one read hold and return. 628 */ 629 if (rcnt == 0) { 630 RW_DASSERT(rw, wcnt != 0); 631 RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_WANTED) != 0); 632 RW_DASSERT(rw, (rw->rw_owner & RW_HAS_WAITERS) != 0); 633 634 new = RW_READ_INCR | RW_HAS_WAITERS | RW_WRITE_WANTED; 635 next = rw_cas(rw, owner, new); 636 turnstile_exit(rw); 637 if (__predict_true(next == owner)) 638 break; 639 } else { 640 /* 641 * Give the lock to all blocked readers. We may 642 * retain one read hold if downgrading. If there 643 * is a writer waiting, new readers will be blocked 644 * out. 645 */ 646 new = (rcnt << RW_READ_COUNT_SHIFT) + RW_READ_INCR; 647 if (wcnt != 0) 648 new |= RW_HAS_WAITERS | RW_WRITE_WANTED; 649 650 next = rw_cas(rw, owner, new); 651 if (__predict_true(next == owner)) { 652 /* Wake up all sleeping readers. */ 653 turnstile_wakeup(ts, TS_READER_Q, rcnt, NULL); 654 break; 655 } 656 turnstile_exit(rw); 657 } 658 } 659 660 RW_LOCKED(rw, RW_READER); 661 RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0); 662 RW_DASSERT(rw, RW_COUNT(rw) != 0); 663 } 664 665 /* 666 * rw_tryupgrade: 667 * 668 * Try to upgrade a read lock to a write lock. We must be the 669 * only reader. 670 */ 671 int 672 rw_tryupgrade(krwlock_t *rw) 673 { 674 uintptr_t owner, curthread, new, next; 675 676 curthread = (uintptr_t)curlwp; 677 RW_ASSERT(rw, curthread != 0); 678 RW_WANTLOCK(rw, RW_WRITER, true); 679 680 for (owner = rw->rw_owner;; owner = next) { 681 RW_ASSERT(rw, (owner & RW_WRITE_LOCKED) == 0); 682 if (__predict_false((owner & RW_THREAD) != RW_READ_INCR)) { 683 RW_ASSERT(rw, (owner & RW_THREAD) != 0); 684 return 0; 685 } 686 new = curthread | RW_WRITE_LOCKED | (owner & ~RW_THREAD); 687 next = rw_cas(rw, owner, new); 688 if (__predict_true(next == owner)) 689 break; 690 } 691 692 RW_UNLOCKED(rw, RW_READER); 693 RW_LOCKED(rw, RW_WRITER); 694 RW_DASSERT(rw, rw->rw_owner & RW_WRITE_LOCKED); 695 RW_DASSERT(rw, RW_OWNER(rw) == curthread); 696 697 #ifndef __HAVE_ATOMIC_AS_MEMBAR 698 membar_producer(); 699 #endif 700 701 return 1; 702 } 703 704 /* 705 * rw_read_held: 706 * 707 * Returns true if the rwlock is held for reading. Must only be 708 * used for diagnostic assertions, and never be used to make 709 * decisions about how to use a rwlock. 710 */ 711 int 712 rw_read_held(krwlock_t *rw) 713 { 714 uintptr_t owner; 715 716 if (panicstr != NULL) 717 return 1; 718 if (rw == NULL) 719 return 0; 720 owner = rw->rw_owner; 721 return (owner & RW_WRITE_LOCKED) == 0 && (owner & RW_THREAD) != 0; 722 } 723 724 /* 725 * rw_write_held: 726 * 727 * Returns true if the rwlock is held for writing. Must only be 728 * used for diagnostic assertions, and never be used to make 729 * decisions about how to use a rwlock. 730 */ 731 int 732 rw_write_held(krwlock_t *rw) 733 { 734 735 if (panicstr != NULL) 736 return 1; 737 if (rw == NULL) 738 return 0; 739 return (rw->rw_owner & (RW_WRITE_LOCKED | RW_THREAD)) == 740 (RW_WRITE_LOCKED | (uintptr_t)curlwp); 741 } 742 743 /* 744 * rw_lock_held: 745 * 746 * Returns true if the rwlock is held for reading or writing. Must 747 * only be used for diagnostic assertions, and never be used to make 748 * decisions about how to use a rwlock. 749 */ 750 int 751 rw_lock_held(krwlock_t *rw) 752 { 753 754 if (panicstr != NULL) 755 return 1; 756 if (rw == NULL) 757 return 0; 758 return (rw->rw_owner & RW_THREAD) != 0; 759 } 760 761 /* 762 * rw_owner: 763 * 764 * Return the current owner of an RW lock, but only if it is write 765 * held. Used for priority inheritance. 766 */ 767 static lwp_t * 768 rw_owner(wchan_t obj) 769 { 770 krwlock_t *rw = (void *)(uintptr_t)obj; /* discard qualifiers */ 771 uintptr_t owner = rw->rw_owner; 772 773 if ((owner & RW_WRITE_LOCKED) == 0) 774 return NULL; 775 776 return (void *)(owner & RW_THREAD); 777 } 778