xref: /netbsd-src/sys/kern/kern_rwlock.c (revision e89934bbf778a6d6d6894877c4da59d0c7835b0f)
1 /*	$NetBSD: kern_rwlock.c,v 1.46 2017/01/26 04:11:56 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Kernel reader/writer lock implementation, modeled after those
34  * found in Solaris, a description of which can be found in:
35  *
36  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
37  *	    Richard McDougall.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.46 2017/01/26 04:11:56 christos Exp $");
42 
43 #define	__RWLOCK_PRIVATE
44 
45 #include <sys/param.h>
46 #include <sys/proc.h>
47 #include <sys/rwlock.h>
48 #include <sys/sched.h>
49 #include <sys/sleepq.h>
50 #include <sys/systm.h>
51 #include <sys/lockdebug.h>
52 #include <sys/cpu.h>
53 #include <sys/atomic.h>
54 #include <sys/lock.h>
55 
56 #include <dev/lockstat.h>
57 
58 /*
59  * LOCKDEBUG
60  */
61 
62 #if defined(LOCKDEBUG)
63 
64 #define	RW_WANTLOCK(rw, op)						\
65 	LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw),			\
66 	    (uintptr_t)__builtin_return_address(0), op == RW_READER);
67 #define	RW_LOCKED(rw, op)						\
68 	LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL,			\
69 	    (uintptr_t)__builtin_return_address(0), op == RW_READER);
70 #define	RW_UNLOCKED(rw, op)						\
71 	LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw),			\
72 	    (uintptr_t)__builtin_return_address(0), op == RW_READER);
73 #define	RW_DASSERT(rw, cond)						\
74 do {									\
75 	if (!(cond))							\
76 		rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
77 } while (/* CONSTCOND */ 0);
78 
79 #else	/* LOCKDEBUG */
80 
81 #define	RW_WANTLOCK(rw, op)	/* nothing */
82 #define	RW_LOCKED(rw, op)	/* nothing */
83 #define	RW_UNLOCKED(rw, op)	/* nothing */
84 #define	RW_DASSERT(rw, cond)	/* nothing */
85 
86 #endif	/* LOCKDEBUG */
87 
88 /*
89  * DIAGNOSTIC
90  */
91 
92 #if defined(DIAGNOSTIC)
93 
94 #define	RW_ASSERT(rw, cond)						\
95 do {									\
96 	if (!(cond))							\
97 		rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
98 } while (/* CONSTCOND */ 0)
99 
100 #else
101 
102 #define	RW_ASSERT(rw, cond)	/* nothing */
103 
104 #endif	/* DIAGNOSTIC */
105 
106 #define	RW_SETDEBUG(rw, on)		((rw)->rw_owner |= (on) ? 0 : RW_NODEBUG)
107 #define	RW_DEBUG_P(rw)			(((rw)->rw_owner & RW_NODEBUG) == 0)
108 #if defined(LOCKDEBUG)
109 #define	RW_INHERITDEBUG(n, o)		(n) |= (o) & RW_NODEBUG
110 #else /* defined(LOCKDEBUG) */
111 #define	RW_INHERITDEBUG(n, o)		/* nothing */
112 #endif /* defined(LOCKDEBUG) */
113 
114 static void	rw_abort(const char *, size_t, krwlock_t *, const char *);
115 static void	rw_dump(volatile void *);
116 static lwp_t	*rw_owner(wchan_t);
117 
118 static inline uintptr_t
119 rw_cas(krwlock_t *rw, uintptr_t o, uintptr_t n)
120 {
121 
122 	RW_INHERITDEBUG(n, o);
123 	return (uintptr_t)atomic_cas_ptr((volatile void *)&rw->rw_owner,
124 	    (void *)o, (void *)n);
125 }
126 
127 static inline void
128 rw_swap(krwlock_t *rw, uintptr_t o, uintptr_t n)
129 {
130 
131 	RW_INHERITDEBUG(n, o);
132 	n = (uintptr_t)atomic_swap_ptr((volatile void *)&rw->rw_owner,
133 	    (void *)n);
134 	RW_DASSERT(rw, n == o);
135 }
136 
137 /*
138  * For platforms that do not provide stubs, or for the LOCKDEBUG case.
139  */
140 #ifdef LOCKDEBUG
141 #undef	__HAVE_RW_STUBS
142 #endif
143 
144 #ifndef __HAVE_RW_STUBS
145 __strong_alias(rw_enter,rw_vector_enter);
146 __strong_alias(rw_exit,rw_vector_exit);
147 __strong_alias(rw_tryenter,rw_vector_tryenter);
148 #endif
149 
150 lockops_t rwlock_lockops = {
151 	"Reader / writer lock",
152 	LOCKOPS_SLEEP,
153 	rw_dump
154 };
155 
156 syncobj_t rw_syncobj = {
157 	SOBJ_SLEEPQ_SORTED,
158 	turnstile_unsleep,
159 	turnstile_changepri,
160 	sleepq_lendpri,
161 	rw_owner,
162 };
163 
164 /*
165  * rw_dump:
166  *
167  *	Dump the contents of a rwlock structure.
168  */
169 static void
170 rw_dump(volatile void *cookie)
171 {
172 	volatile krwlock_t *rw = cookie;
173 
174 	printf_nolog("owner/count  : %#018lx flags    : %#018x\n",
175 	    (long)RW_OWNER(rw), (int)RW_FLAGS(rw));
176 }
177 
178 /*
179  * rw_abort:
180  *
181  *	Dump information about an error and panic the system.  This
182  *	generates a lot of machine code in the DIAGNOSTIC case, so
183  *	we ask the compiler to not inline it.
184  */
185 static void __noinline
186 rw_abort(const char *func, size_t line, krwlock_t *rw, const char *msg)
187 {
188 
189 	if (panicstr != NULL)
190 		return;
191 
192 	LOCKDEBUG_ABORT(func, line, rw, &rwlock_lockops, msg);
193 }
194 
195 /*
196  * rw_init:
197  *
198  *	Initialize a rwlock for use.
199  */
200 void
201 rw_init(krwlock_t *rw)
202 {
203 	bool dodebug;
204 
205 	memset(rw, 0, sizeof(*rw));
206 
207 	dodebug = LOCKDEBUG_ALLOC(rw, &rwlock_lockops,
208 	    (uintptr_t)__builtin_return_address(0));
209 	RW_SETDEBUG(rw, dodebug);
210 }
211 
212 /*
213  * rw_destroy:
214  *
215  *	Tear down a rwlock.
216  */
217 void
218 rw_destroy(krwlock_t *rw)
219 {
220 
221 	RW_ASSERT(rw, (rw->rw_owner & ~RW_NODEBUG) == 0);
222 	LOCKDEBUG_FREE(RW_DEBUG_P(rw), rw);
223 }
224 
225 /*
226  * rw_oncpu:
227  *
228  *	Return true if an rwlock owner is running on a CPU in the system.
229  *	If the target is waiting on the kernel big lock, then we must
230  *	release it.  This is necessary to avoid deadlock.
231  */
232 static bool
233 rw_oncpu(uintptr_t owner)
234 {
235 #ifdef MULTIPROCESSOR
236 	struct cpu_info *ci;
237 	lwp_t *l;
238 
239 	KASSERT(kpreempt_disabled());
240 
241 	if ((owner & (RW_WRITE_LOCKED|RW_HAS_WAITERS)) != RW_WRITE_LOCKED) {
242 		return false;
243 	}
244 
245 	/*
246 	 * See lwp_dtor() why dereference of the LWP pointer is safe.
247 	 * We must have kernel preemption disabled for that.
248 	 */
249 	l = (lwp_t *)(owner & RW_THREAD);
250 	ci = l->l_cpu;
251 
252 	if (ci && ci->ci_curlwp == l) {
253 		/* Target is running; do we need to block? */
254 		return (ci->ci_biglock_wanted != l);
255 	}
256 #endif
257 	/* Not running.  It may be safe to block now. */
258 	return false;
259 }
260 
261 /*
262  * rw_vector_enter:
263  *
264  *	Acquire a rwlock.
265  */
266 void
267 rw_vector_enter(krwlock_t *rw, const krw_t op)
268 {
269 	uintptr_t owner, incr, need_wait, set_wait, curthread, next;
270 	turnstile_t *ts;
271 	int queue;
272 	lwp_t *l;
273 	LOCKSTAT_TIMER(slptime);
274 	LOCKSTAT_TIMER(slpcnt);
275 	LOCKSTAT_TIMER(spintime);
276 	LOCKSTAT_COUNTER(spincnt);
277 	LOCKSTAT_FLAG(lsflag);
278 
279 	l = curlwp;
280 	curthread = (uintptr_t)l;
281 
282 	RW_ASSERT(rw, !cpu_intr_p());
283 	RW_ASSERT(rw, curthread != 0);
284 	RW_WANTLOCK(rw, op);
285 
286 	if (panicstr == NULL) {
287 		LOCKDEBUG_BARRIER(&kernel_lock, 1);
288 	}
289 
290 	/*
291 	 * We play a slight trick here.  If we're a reader, we want
292 	 * increment the read count.  If we're a writer, we want to
293 	 * set the owner field and the WRITE_LOCKED bit.
294 	 *
295 	 * In the latter case, we expect those bits to be zero,
296 	 * therefore we can use an add operation to set them, which
297 	 * means an add operation for both cases.
298 	 */
299 	if (__predict_true(op == RW_READER)) {
300 		incr = RW_READ_INCR;
301 		set_wait = RW_HAS_WAITERS;
302 		need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED;
303 		queue = TS_READER_Q;
304 	} else {
305 		RW_DASSERT(rw, op == RW_WRITER);
306 		incr = curthread | RW_WRITE_LOCKED;
307 		set_wait = RW_HAS_WAITERS | RW_WRITE_WANTED;
308 		need_wait = RW_WRITE_LOCKED | RW_THREAD;
309 		queue = TS_WRITER_Q;
310 	}
311 
312 	LOCKSTAT_ENTER(lsflag);
313 
314 	KPREEMPT_DISABLE(curlwp);
315 	for (owner = rw->rw_owner; ;) {
316 		/*
317 		 * Read the lock owner field.  If the need-to-wait
318 		 * indicator is clear, then try to acquire the lock.
319 		 */
320 		if ((owner & need_wait) == 0) {
321 			next = rw_cas(rw, owner, (owner + incr) &
322 			    ~RW_WRITE_WANTED);
323 			if (__predict_true(next == owner)) {
324 				/* Got it! */
325 				membar_enter();
326 				break;
327 			}
328 
329 			/*
330 			 * Didn't get it -- spin around again (we'll
331 			 * probably sleep on the next iteration).
332 			 */
333 			owner = next;
334 			continue;
335 		}
336 		if (__predict_false(panicstr != NULL)) {
337 			KPREEMPT_ENABLE(curlwp);
338 			return;
339 		}
340 		if (__predict_false(RW_OWNER(rw) == curthread)) {
341 			rw_abort(__func__, __LINE__, rw,
342 			    "locking against myself");
343 		}
344 		/*
345 		 * If the lock owner is running on another CPU, and
346 		 * there are no existing waiters, then spin.
347 		 */
348 		if (rw_oncpu(owner)) {
349 			LOCKSTAT_START_TIMER(lsflag, spintime);
350 			u_int count = SPINLOCK_BACKOFF_MIN;
351 			do {
352 				KPREEMPT_ENABLE(curlwp);
353 				SPINLOCK_BACKOFF(count);
354 				KPREEMPT_DISABLE(curlwp);
355 				owner = rw->rw_owner;
356 			} while (rw_oncpu(owner));
357 			LOCKSTAT_STOP_TIMER(lsflag, spintime);
358 			LOCKSTAT_COUNT(spincnt, 1);
359 			if ((owner & need_wait) == 0)
360 				continue;
361 		}
362 
363 		/*
364 		 * Grab the turnstile chain lock.  Once we have that, we
365 		 * can adjust the waiter bits and sleep queue.
366 		 */
367 		ts = turnstile_lookup(rw);
368 
369 		/*
370 		 * Mark the rwlock as having waiters.  If the set fails,
371 		 * then we may not need to sleep and should spin again.
372 		 * Reload rw_owner because turnstile_lookup() may have
373 		 * spun on the turnstile chain lock.
374 		 */
375 		owner = rw->rw_owner;
376 		if ((owner & need_wait) == 0 || rw_oncpu(owner)) {
377 			turnstile_exit(rw);
378 			continue;
379 		}
380 		next = rw_cas(rw, owner, owner | set_wait);
381 		if (__predict_false(next != owner)) {
382 			turnstile_exit(rw);
383 			owner = next;
384 			continue;
385 		}
386 
387 		LOCKSTAT_START_TIMER(lsflag, slptime);
388 		turnstile_block(ts, queue, rw, &rw_syncobj);
389 		LOCKSTAT_STOP_TIMER(lsflag, slptime);
390 		LOCKSTAT_COUNT(slpcnt, 1);
391 
392 		/*
393 		 * No need for a memory barrier because of context switch.
394 		 * If not handed the lock, then spin again.
395 		 */
396 		if (op == RW_READER || (rw->rw_owner & RW_THREAD) == curthread)
397 			break;
398 
399 		owner = rw->rw_owner;
400 	}
401 	KPREEMPT_ENABLE(curlwp);
402 
403 	LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK |
404 	    (op == RW_WRITER ? LB_SLEEP1 : LB_SLEEP2), slpcnt, slptime);
405 	LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK | LB_SPIN, spincnt, spintime);
406 	LOCKSTAT_EXIT(lsflag);
407 
408 	RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
409 	    (op == RW_READER && RW_COUNT(rw) != 0));
410 	RW_LOCKED(rw, op);
411 }
412 
413 /*
414  * rw_vector_exit:
415  *
416  *	Release a rwlock.
417  */
418 void
419 rw_vector_exit(krwlock_t *rw)
420 {
421 	uintptr_t curthread, owner, decr, newown, next;
422 	turnstile_t *ts;
423 	int rcnt, wcnt;
424 	lwp_t *l;
425 
426 	curthread = (uintptr_t)curlwp;
427 	RW_ASSERT(rw, curthread != 0);
428 
429 	if (__predict_false(panicstr != NULL))
430 		return;
431 
432 	/*
433 	 * Again, we use a trick.  Since we used an add operation to
434 	 * set the required lock bits, we can use a subtract to clear
435 	 * them, which makes the read-release and write-release path
436 	 * the same.
437 	 */
438 	owner = rw->rw_owner;
439 	if (__predict_false((owner & RW_WRITE_LOCKED) != 0)) {
440 		RW_UNLOCKED(rw, RW_WRITER);
441 		RW_ASSERT(rw, RW_OWNER(rw) == curthread);
442 		decr = curthread | RW_WRITE_LOCKED;
443 	} else {
444 		RW_UNLOCKED(rw, RW_READER);
445 		RW_ASSERT(rw, RW_COUNT(rw) != 0);
446 		decr = RW_READ_INCR;
447 	}
448 
449 	/*
450 	 * Compute what we expect the new value of the lock to be. Only
451 	 * proceed to do direct handoff if there are waiters, and if the
452 	 * lock would become unowned.
453 	 */
454 	membar_exit();
455 	for (;;) {
456 		newown = (owner - decr);
457 		if ((newown & (RW_THREAD | RW_HAS_WAITERS)) == RW_HAS_WAITERS)
458 			break;
459 		next = rw_cas(rw, owner, newown);
460 		if (__predict_true(next == owner))
461 			return;
462 		owner = next;
463 	}
464 
465 	/*
466 	 * Grab the turnstile chain lock.  This gets the interlock
467 	 * on the sleep queue.  Once we have that, we can adjust the
468 	 * waiter bits.
469 	 */
470 	ts = turnstile_lookup(rw);
471 	owner = rw->rw_owner;
472 	RW_DASSERT(rw, ts != NULL);
473 	RW_DASSERT(rw, (owner & RW_HAS_WAITERS) != 0);
474 
475 	wcnt = TS_WAITERS(ts, TS_WRITER_Q);
476 	rcnt = TS_WAITERS(ts, TS_READER_Q);
477 
478 	/*
479 	 * Give the lock away.
480 	 *
481 	 * If we are releasing a write lock, then prefer to wake all
482 	 * outstanding readers.  Otherwise, wake one writer if there
483 	 * are outstanding readers, or all writers if there are no
484 	 * pending readers.  If waking one specific writer, the writer
485 	 * is handed the lock here.  If waking multiple writers, we
486 	 * set WRITE_WANTED to block out new readers, and let them
487 	 * do the work of acquiring the lock in rw_vector_enter().
488 	 */
489 	if (rcnt == 0 || decr == RW_READ_INCR) {
490 		RW_DASSERT(rw, wcnt != 0);
491 		RW_DASSERT(rw, (owner & RW_WRITE_WANTED) != 0);
492 
493 		if (rcnt != 0) {
494 			/* Give the lock to the longest waiting writer. */
495 			l = TS_FIRST(ts, TS_WRITER_Q);
496 			newown = (uintptr_t)l | RW_WRITE_LOCKED | RW_HAS_WAITERS;
497 			if (wcnt > 1)
498 				newown |= RW_WRITE_WANTED;
499 			rw_swap(rw, owner, newown);
500 			turnstile_wakeup(ts, TS_WRITER_Q, 1, l);
501 		} else {
502 			/* Wake all writers and let them fight it out. */
503 			rw_swap(rw, owner, RW_WRITE_WANTED);
504 			turnstile_wakeup(ts, TS_WRITER_Q, wcnt, NULL);
505 		}
506 	} else {
507 		RW_DASSERT(rw, rcnt != 0);
508 
509 		/*
510 		 * Give the lock to all blocked readers.  If there
511 		 * is a writer waiting, new readers that arrive
512 		 * after the release will be blocked out.
513 		 */
514 		newown = rcnt << RW_READ_COUNT_SHIFT;
515 		if (wcnt != 0)
516 			newown |= RW_HAS_WAITERS | RW_WRITE_WANTED;
517 
518 		/* Wake up all sleeping readers. */
519 		rw_swap(rw, owner, newown);
520 		turnstile_wakeup(ts, TS_READER_Q, rcnt, NULL);
521 	}
522 }
523 
524 /*
525  * rw_vector_tryenter:
526  *
527  *	Try to acquire a rwlock.
528  */
529 int
530 rw_vector_tryenter(krwlock_t *rw, const krw_t op)
531 {
532 	uintptr_t curthread, owner, incr, need_wait, next;
533 
534 	curthread = (uintptr_t)curlwp;
535 
536 	RW_ASSERT(rw, curthread != 0);
537 
538 	if (op == RW_READER) {
539 		incr = RW_READ_INCR;
540 		need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED;
541 	} else {
542 		RW_DASSERT(rw, op == RW_WRITER);
543 		incr = curthread | RW_WRITE_LOCKED;
544 		need_wait = RW_WRITE_LOCKED | RW_THREAD;
545 	}
546 
547 	for (owner = rw->rw_owner;; owner = next) {
548 		owner = rw->rw_owner;
549 		if (__predict_false((owner & need_wait) != 0))
550 			return 0;
551 		next = rw_cas(rw, owner, owner + incr);
552 		if (__predict_true(next == owner)) {
553 			/* Got it! */
554 			membar_enter();
555 			break;
556 		}
557 	}
558 
559 	RW_WANTLOCK(rw, op);
560 	RW_LOCKED(rw, op);
561 	RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
562 	    (op == RW_READER && RW_COUNT(rw) != 0));
563 
564 	return 1;
565 }
566 
567 /*
568  * rw_downgrade:
569  *
570  *	Downgrade a write lock to a read lock.
571  */
572 void
573 rw_downgrade(krwlock_t *rw)
574 {
575 	uintptr_t owner, curthread, newown, next;
576 	turnstile_t *ts;
577 	int rcnt, wcnt;
578 
579 	curthread = (uintptr_t)curlwp;
580 	RW_ASSERT(rw, curthread != 0);
581 	RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) != 0);
582 	RW_ASSERT(rw, RW_OWNER(rw) == curthread);
583 	RW_UNLOCKED(rw, RW_WRITER);
584 #if !defined(DIAGNOSTIC)
585 	__USE(curthread);
586 #endif
587 
588 
589 	membar_producer();
590 	owner = rw->rw_owner;
591 	if ((owner & RW_HAS_WAITERS) == 0) {
592 		/*
593 		 * There are no waiters, so we can do this the easy way.
594 		 * Try swapping us down to one read hold.  If it fails, the
595 		 * lock condition has changed and we most likely now have
596 		 * waiters.
597 		 */
598 		next = rw_cas(rw, owner, RW_READ_INCR);
599 		if (__predict_true(next == owner)) {
600 			RW_LOCKED(rw, RW_READER);
601 			RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
602 			RW_DASSERT(rw, RW_COUNT(rw) != 0);
603 			return;
604 		}
605 		owner = next;
606 	}
607 
608 	/*
609 	 * Grab the turnstile chain lock.  This gets the interlock
610 	 * on the sleep queue.  Once we have that, we can adjust the
611 	 * waiter bits.
612 	 */
613 	for (;; owner = next) {
614 		ts = turnstile_lookup(rw);
615 		RW_DASSERT(rw, ts != NULL);
616 
617 		rcnt = TS_WAITERS(ts, TS_READER_Q);
618 		wcnt = TS_WAITERS(ts, TS_WRITER_Q);
619 
620 		/*
621 		 * If there are no readers, just preserve the waiters
622 		 * bits, swap us down to one read hold and return.
623 		 */
624 		if (rcnt == 0) {
625 			RW_DASSERT(rw, wcnt != 0);
626 			RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_WANTED) != 0);
627 			RW_DASSERT(rw, (rw->rw_owner & RW_HAS_WAITERS) != 0);
628 
629 			newown = RW_READ_INCR | RW_HAS_WAITERS | RW_WRITE_WANTED;
630 			next = rw_cas(rw, owner, newown);
631 			turnstile_exit(rw);
632 			if (__predict_true(next == owner))
633 				break;
634 		} else {
635 			/*
636 			 * Give the lock to all blocked readers.  We may
637 			 * retain one read hold if downgrading.  If there
638 			 * is a writer waiting, new readers will be blocked
639 			 * out.
640 			 */
641 			newown = (rcnt << RW_READ_COUNT_SHIFT) + RW_READ_INCR;
642 			if (wcnt != 0)
643 				newown |= RW_HAS_WAITERS | RW_WRITE_WANTED;
644 
645 			next = rw_cas(rw, owner, newown);
646 			if (__predict_true(next == owner)) {
647 				/* Wake up all sleeping readers. */
648 				turnstile_wakeup(ts, TS_READER_Q, rcnt, NULL);
649 				break;
650 			}
651 			turnstile_exit(rw);
652 		}
653 	}
654 
655 	RW_WANTLOCK(rw, RW_READER);
656 	RW_LOCKED(rw, RW_READER);
657 	RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
658 	RW_DASSERT(rw, RW_COUNT(rw) != 0);
659 }
660 
661 /*
662  * rw_tryupgrade:
663  *
664  *	Try to upgrade a read lock to a write lock.  We must be the
665  *	only reader.
666  */
667 int
668 rw_tryupgrade(krwlock_t *rw)
669 {
670 	uintptr_t owner, curthread, newown, next;
671 
672 	curthread = (uintptr_t)curlwp;
673 	RW_ASSERT(rw, curthread != 0);
674 	RW_ASSERT(rw, rw_read_held(rw));
675 
676 	for (owner = rw->rw_owner;; owner = next) {
677 		RW_ASSERT(rw, (owner & RW_WRITE_LOCKED) == 0);
678 		if (__predict_false((owner & RW_THREAD) != RW_READ_INCR)) {
679 			RW_ASSERT(rw, (owner & RW_THREAD) != 0);
680 			return 0;
681 		}
682 		newown = curthread | RW_WRITE_LOCKED | (owner & ~RW_THREAD);
683 		next = rw_cas(rw, owner, newown);
684 		if (__predict_true(next == owner)) {
685 			membar_producer();
686 			break;
687 		}
688 	}
689 
690 	RW_UNLOCKED(rw, RW_READER);
691 	RW_WANTLOCK(rw, RW_WRITER);
692 	RW_LOCKED(rw, RW_WRITER);
693 	RW_DASSERT(rw, rw->rw_owner & RW_WRITE_LOCKED);
694 	RW_DASSERT(rw, RW_OWNER(rw) == curthread);
695 
696 	return 1;
697 }
698 
699 /*
700  * rw_read_held:
701  *
702  *	Returns true if the rwlock is held for reading.  Must only be
703  *	used for diagnostic assertions, and never be used to make
704  * 	decisions about how to use a rwlock.
705  */
706 int
707 rw_read_held(krwlock_t *rw)
708 {
709 	uintptr_t owner;
710 
711 	if (panicstr != NULL)
712 		return 1;
713 	if (rw == NULL)
714 		return 0;
715 	owner = rw->rw_owner;
716 	return (owner & RW_WRITE_LOCKED) == 0 && (owner & RW_THREAD) != 0;
717 }
718 
719 /*
720  * rw_write_held:
721  *
722  *	Returns true if the rwlock is held for writing.  Must only be
723  *	used for diagnostic assertions, and never be used to make
724  *	decisions about how to use a rwlock.
725  */
726 int
727 rw_write_held(krwlock_t *rw)
728 {
729 
730 	if (panicstr != NULL)
731 		return 1;
732 	if (rw == NULL)
733 		return 0;
734 	return (rw->rw_owner & (RW_WRITE_LOCKED | RW_THREAD)) ==
735 	    (RW_WRITE_LOCKED | (uintptr_t)curlwp);
736 }
737 
738 /*
739  * rw_lock_held:
740  *
741  *	Returns true if the rwlock is held for reading or writing.  Must
742  *	only be used for diagnostic assertions, and never be used to make
743  *	decisions about how to use a rwlock.
744  */
745 int
746 rw_lock_held(krwlock_t *rw)
747 {
748 
749 	if (panicstr != NULL)
750 		return 1;
751 	if (rw == NULL)
752 		return 0;
753 	return (rw->rw_owner & RW_THREAD) != 0;
754 }
755 
756 /*
757  * rw_owner:
758  *
759  *	Return the current owner of an RW lock, but only if it is write
760  *	held.  Used for priority inheritance.
761  */
762 static lwp_t *
763 rw_owner(wchan_t obj)
764 {
765 	krwlock_t *rw = (void *)(uintptr_t)obj; /* discard qualifiers */
766 	uintptr_t owner = rw->rw_owner;
767 
768 	if ((owner & RW_WRITE_LOCKED) == 0)
769 		return NULL;
770 
771 	return (void *)(owner & RW_THREAD);
772 }
773