xref: /openbsd-src/sys/kern/kern_rwlock.c (revision 53555c846a0a6f917dbd0a191f826da995ab1c42)
1 /*	$OpenBSD: kern_rwlock.c,v 1.53 2025/01/04 02:34:11 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
5  * Copyright (c) 2011 Thordur Bjornsson <thib@secnorth.net>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/pool.h>
23 #include <sys/proc.h>
24 #include <sys/rwlock.h>
25 #include <sys/limits.h>
26 #include <sys/atomic.h>
27 #include <sys/witness.h>
28 
29 #ifdef RWDIAG
30 #include <sys/kernel.h> /* for hz */
31 #define RW_SLEEP_TMO	10 * hz
32 #else
33 #define RW_SLEEP_TMO	0
34 #endif
35 
36 /*
37  * Other OSes implement more sophisticated mechanism to determine how long the
38  * process attempting to acquire the lock should be spinning. We start with
39  * the most simple approach: we do RW_SPINS attempts at most before eventually
40  * giving up and putting the process to sleep queue.
41  */
42 #define RW_SPINS	1000
43 
44 #ifdef MULTIPROCESSOR
45 #define rw_cas(p, e, n)	atomic_cas_ulong(p, e, n)
46 #define rw_inc(p)	atomic_inc_int(p)
47 #define rw_dec(p)	atomic_dec_int(p)
48 #else
49 static inline unsigned long
50 rw_cas(volatile unsigned long *p, unsigned long e, unsigned long n)
51 {
52 	unsigned long o = *p;
53 
54 	if (o == e)
55 		*p = n;
56 
57 	return (o);
58 }
59 
60 static inline void
61 rw_inc(volatile unsigned int *p)
62 {
63 	++(*p);
64 }
65 
66 static inline void
67 rw_dec(volatile unsigned int *p)
68 {
69 	(*p)--;
70 }
71 #endif
72 
73 static int	rw_do_enter_read(struct rwlock *, int);
74 static void	rw_do_exit_read(struct rwlock *, unsigned long);
75 static int	rw_do_enter_write(struct rwlock *, int);
76 static int	rw_downgrade(struct rwlock *, int);
77 
78 static void	rw_exited(struct rwlock *);
79 
80 static unsigned long
81 rw_self(void)
82 {
83 	unsigned long self = (unsigned long)curproc;
84 
85 	CLR(self, RWLOCK_MASK);
86 	SET(self, RWLOCK_WRLOCK);
87 
88 	return (self);
89 }
90 
91 void
92 rw_enter_read(struct rwlock *rwl)
93 {
94 	rw_do_enter_read(rwl, 0);
95 }
96 
97 void
98 rw_enter_write(struct rwlock *rwl)
99 {
100 	rw_do_enter_write(rwl, 0);
101 }
102 
103 void
104 rw_exit_read(struct rwlock *rwl)
105 {
106 	/* maybe we're the last one? */
107 	rw_do_exit_read(rwl, RWLOCK_READ_INCR);
108 }
109 
110 static void
111 rw_do_exit_read(struct rwlock *rwl, unsigned long owner)
112 {
113 	unsigned long decr;
114 	unsigned long nowner;
115 
116 	WITNESS_UNLOCK(&rwl->rwl_lock_obj, 0);
117 
118 	for (;;) {
119 		decr = owner - RWLOCK_READ_INCR;
120 		nowner = rw_cas(&rwl->rwl_owner, owner, decr);
121 		if (owner == nowner)
122 			break;
123 
124 		if (__predict_false(ISSET(nowner, RWLOCK_WRLOCK))) {
125 			panic("%s rwlock %p: exit read on write locked lock"
126 			    " (owner 0x%lx)", rwl->rwl_name, rwl, nowner);
127 		}
128 		if (__predict_false(nowner == 0)) {
129 			panic("%s rwlock %p: exit read on unlocked lock",
130 			    rwl->rwl_name, rwl);
131 		}
132 
133 		owner = nowner;
134 	}
135 
136 	/* read lock didn't change anything, so no barrier needed? */
137 
138 	if (decr == 0) {
139 		/* last one out */
140 		rw_exited(rwl);
141 	}
142 }
143 
144 void
145 rw_exit_write(struct rwlock *rwl)
146 {
147 	unsigned long self = rw_self();
148 	unsigned long owner;
149 
150 	WITNESS_UNLOCK(&rwl->rwl_lock_obj, LOP_EXCLUSIVE);
151 
152 	membar_exit_before_atomic();
153 	owner = rw_cas(&rwl->rwl_owner, self, 0);
154 	if (__predict_false(owner != self)) {
155 		panic("%s rwlock %p: exit write when lock not held "
156 		    "(owner 0x%lx, self 0x%lx)", rwl->rwl_name, rwl,
157 		    owner, self);
158 	}
159 
160 	rw_exited(rwl);
161 }
162 
163 static void
164 _rw_init_flags_witness(struct rwlock *rwl, const char *name, int lo_flags,
165     const struct lock_type *type)
166 {
167 	rwl->rwl_owner = 0;
168 	rwl->rwl_waiters = 0;
169 	rwl->rwl_readers = 0;
170 	rwl->rwl_name = name;
171 
172 #ifdef WITNESS
173 	rwl->rwl_lock_obj.lo_flags = lo_flags;
174 	rwl->rwl_lock_obj.lo_name = name;
175 	rwl->rwl_lock_obj.lo_type = type;
176 	WITNESS_INIT(&rwl->rwl_lock_obj, type);
177 #else
178 	(void)type;
179 	(void)lo_flags;
180 #endif
181 }
182 
183 void
184 _rw_init_flags(struct rwlock *rwl, const char *name, int flags,
185     const struct lock_type *type)
186 {
187 	_rw_init_flags_witness(rwl, name, RWLOCK_LO_FLAGS(flags), type);
188 }
189 
190 int
191 rw_enter(struct rwlock *rwl, int flags)
192 {
193 	int op = flags & RW_OPMASK;
194 	int error;
195 
196 	switch (op) {
197 	case RW_WRITE:
198 		error = rw_do_enter_write(rwl, flags);
199 		break;
200 	case RW_READ:
201 		error = rw_do_enter_read(rwl, flags);
202 		break;
203 	case RW_DOWNGRADE:
204 		error = rw_downgrade(rwl, flags);
205 		break;
206 	default:
207 		panic("%s rwlock %p: %s unexpected op 0x%x",
208 		    rwl->rwl_name, rwl, __func__, op);
209 		/* NOTREACHED */
210 	}
211 
212 	return (error);
213 }
214 
215 static int
216 rw_do_enter_write(struct rwlock *rwl, int flags)
217 {
218 	unsigned long self = rw_self();
219 	unsigned long owner;
220 	int prio;
221 	int error;
222 
223 #ifdef WITNESS
224 	int lop_flags = LOP_NEWORDER | LOP_EXCLUSIVE;
225 	if (ISSET(flags, RW_DUPOK))
226 		lop_flags |= LOP_DUPOK;
227 
228 	if (!ISSET(flags, RW_NOSLEEP))
229 		WITNESS_CHECKORDER(&rwl->rwl_lock_obj, lop_flags, NULL);
230 #endif
231 
232 	owner = rw_cas(&rwl->rwl_owner, 0, self);
233 	if (owner == 0) {
234 		/* wow, we won. so easy */
235 		goto locked;
236 	}
237 	if (__predict_false(owner == self)) {
238 		panic("%s rwlock %p: enter write deadlock",
239 		    rwl->rwl_name, rwl);
240 	}
241 
242 #ifdef MULTIPROCESSOR
243 	/*
244 	 * If process holds the kernel lock, then we want to give up on CPU
245 	 * as soon as possible so other processes waiting for the kernel lock
246 	 * can progress. Hence no spinning if we hold the kernel lock.
247 	 */
248 	if (!_kernel_lock_held()) {
249 		int spins;
250 
251 		/*
252 		 * It makes sense to try to spin just in case the lock
253 		 * is acquired by writer.
254 		 */
255 
256 		for (spins = 0; spins < RW_SPINS; spins++) {
257 			CPU_BUSY_CYCLE();
258 			owner = atomic_load_long(&rwl->rwl_owner);
259 			if (owner != 0)
260 				continue;
261 
262 			owner = rw_cas(&rwl->rwl_owner, 0, self);
263 			if (owner == 0) {
264 				/* ok, we won now. */
265 				goto locked;
266 			}
267 		}
268 	}
269 #endif
270 
271 	if (ISSET(flags, RW_NOSLEEP))
272 		return (EBUSY);
273 
274 	prio = PLOCK - 4;
275 	if (ISSET(flags, RW_INTR))
276 		prio |= PCATCH;
277 
278 	rw_inc(&rwl->rwl_waiters);
279 	membar_producer();
280 	do {
281 		sleep_setup(&rwl->rwl_waiters, prio, rwl->rwl_name);
282 		membar_consumer();
283 		owner = atomic_load_long(&rwl->rwl_owner);
284 		error = sleep_finish(RW_SLEEP_TMO, owner != 0);
285 #ifdef RWDIAG
286 		if (error == EWOULDBLOCK) {
287 			printf("%s rwlock %p: %s timeout owner 0x%lx "
288 			    "(self 0x%lx)", rwl->rwl_name, rwl, __func__,
289 			    owner, self);
290 			db_enter();
291 		}
292 #endif
293 		if (ISSET(flags, RW_INTR) && (error != 0)) {
294 			rw_dec(&rwl->rwl_waiters);
295 			return (error);
296 		}
297 		if (ISSET(flags, RW_SLEEPFAIL)) {
298 			rw_dec(&rwl->rwl_waiters);
299 			rw_exited(rwl);
300 			return (EAGAIN);
301 		}
302 
303 		owner = rw_cas(&rwl->rwl_owner, 0, self);
304 	} while (owner != 0);
305 	rw_dec(&rwl->rwl_waiters);
306 
307 locked:
308 	membar_enter_after_atomic();
309 	WITNESS_LOCK(&rwl->rwl_lock_obj, lop_flags);
310 
311 	return (0);
312 }
313 
314 static int
315 rw_read_incr(struct rwlock *rwl, unsigned long owner)
316 {
317 	unsigned long incr;
318 	unsigned long nowner;
319 
320 	do {
321 		incr = owner + RWLOCK_READ_INCR;
322 		nowner = rw_cas(&rwl->rwl_owner, owner, incr);
323 		if (nowner == owner)
324 			return (1);
325 
326 		owner = nowner;
327 	} while (!ISSET(owner, RWLOCK_WRLOCK));
328 
329 	return (0);
330 }
331 
332 static int
333 rw_do_enter_read(struct rwlock *rwl, int flags)
334 {
335 	unsigned long owner;
336 	int error;
337 	int prio;
338 
339 #ifdef WITNESS
340 	int lop_flags = LOP_NEWORDER;
341 	if (ISSET(flags, RW_DUPOK))
342 		lop_flags |= LOP_DUPOK;
343 	if (!ISSET(flags, RW_NOSLEEP))
344 		WITNESS_CHECKORDER(&rwl->rwl_lock_obj, lop_flags, NULL);
345 #endif
346 
347 	owner = rw_cas(&rwl->rwl_owner, 0, RWLOCK_READ_INCR);
348 	if (owner == 0) {
349 		/* ermagerd, we won! */
350 		goto locked;
351 	}
352 
353 	if (ISSET(owner, RWLOCK_WRLOCK)) {
354 		if (__predict_false(owner == rw_self())) {
355 			panic("%s rwlock %p: enter read deadlock",
356 			    rwl->rwl_name, rwl);
357 		}
358 	} else if (atomic_load_int(&rwl->rwl_waiters) == 0) {
359 		if (rw_read_incr(rwl, owner)) {
360 			/* nailed it */
361 			goto locked;
362 		}
363 	}
364 
365 	if (ISSET(flags, RW_NOSLEEP))
366 		return (EBUSY);
367 
368 	prio = PLOCK;
369 	if (ISSET(flags, RW_INTR))
370 		prio |= PCATCH;
371 
372 	rw_inc(&rwl->rwl_readers);
373 	membar_producer();
374 	do {
375 		sleep_setup(&rwl->rwl_readers, prio, rwl->rwl_name);
376 		membar_consumer();
377 		error = sleep_finish(RW_SLEEP_TMO,
378 		    atomic_load_int(&rwl->rwl_waiters) > 0 ||
379 		    ISSET(atomic_load_long(&rwl->rwl_owner), RWLOCK_WRLOCK));
380 #ifdef RWDIAG
381 		if (error == EWOULDBLOCK) {
382 			printf("%s rwlock %p: %s timeout owner 0x%lx\n",
383 			    rwl->rwl_name, rwl, __func__, owner);
384 			db_enter();
385 		}
386 #endif
387 		if (ISSET(flags, RW_INTR) && (error != 0))
388 			goto fail;
389 		if (ISSET(flags, RW_SLEEPFAIL)) {
390 			error = EAGAIN;
391 			goto fail;
392 		}
393 	} while (!rw_read_incr(rwl, 0));
394 	rw_dec(&rwl->rwl_readers);
395 
396 locked:
397 	membar_enter_after_atomic();
398 	WITNESS_LOCK(&rwl->rwl_lock_obj, lop_flags);
399 
400 	return (0);
401 fail:
402 	rw_dec(&rwl->rwl_readers);
403 	return (error);
404 }
405 
406 static int
407 rw_downgrade(struct rwlock *rwl, int flags)
408 {
409 	unsigned long self = rw_self();
410 	unsigned long owner;
411 
412 	membar_exit_before_atomic();
413 	owner = atomic_cas_ulong(&rwl->rwl_owner, self, RWLOCK_READ_INCR);
414 	if (__predict_false(owner != self)) {
415 		panic("%s rwlock %p: downgrade when lock not held "
416 		    "(owner 0x%lx, self 0x%lx)", rwl->rwl_name, rwl,
417 		    owner, self);
418 	}
419 
420 #ifdef WITNESS
421 	{
422 		int lop_flags = LOP_NEWORDER;
423 		if (ISSET(flags, RW_DUPOK))
424 			lop_flags |= LOP_DUPOK;
425 		WITNESS_DOWNGRADE(&rwl->rwl_lock_obj, lop_flags);
426 	}
427 #endif
428 
429 	membar_consumer();
430 	if (atomic_load_int(&rwl->rwl_waiters) == 0 &&
431 	    atomic_load_int(&rwl->rwl_readers) > 0)
432 		wakeup(&rwl->rwl_readers);
433 
434 	return (0);
435 }
436 
437 void
438 rw_exit(struct rwlock *rwl)
439 {
440 	unsigned long owner;
441 
442 	owner = atomic_load_long(&rwl->rwl_owner);
443 	if (__predict_false(owner == 0)) {
444 		panic("%s rwlock %p: exit on unlocked lock",
445 		    rwl->rwl_name, rwl);
446 	}
447 
448 	if (ISSET(owner, RWLOCK_WRLOCK))
449 		rw_exit_write(rwl);
450 	else
451 		rw_do_exit_read(rwl, owner);
452 }
453 
454 static void
455 rw_exited(struct rwlock *rwl)
456 {
457 	membar_consumer();
458 	if (atomic_load_int(&rwl->rwl_waiters) > 0)
459 		wakeup_one(&rwl->rwl_waiters);
460 	else if (atomic_load_int(&rwl->rwl_readers) > 0)
461 		wakeup(&rwl->rwl_readers);
462 }
463 
464 int
465 rw_status(struct rwlock *rwl)
466 {
467 	unsigned long owner;
468 
469 	owner = atomic_load_long(&rwl->rwl_owner);
470 	if (ISSET(owner, RWLOCK_WRLOCK)) {
471 		if (rw_self() == owner)
472 			return RW_WRITE;
473 		else
474 			return RW_WRITE_OTHER;
475 	}
476 	if (owner)
477 		return RW_READ;
478 	return (0);
479 }
480 
481 #ifdef DIAGNOSTIC
482 void
483 rw_assert_wrlock(struct rwlock *rwl)
484 {
485 	if (panicstr || db_active)
486 		return;
487 
488 #ifdef WITNESS
489 	witness_assert(&rwl->rwl_lock_obj, LA_XLOCKED);
490 #else
491 	if (atomic_load_long(&rwl->rwl_owner) != rw_self()) {
492 		panic("%s rwlock %p: lock not held by this process",
493 		    rwl->rwl_name, rwl);
494 	}
495 #endif
496 }
497 
498 void
499 rw_assert_rdlock(struct rwlock *rwl)
500 {
501 	if (panicstr || db_active)
502 		return;
503 
504 #ifdef WITNESS
505 	witness_assert(&rwl->rwl_lock_obj, LA_SLOCKED);
506 #else
507 	if (rw_status(rwl) != RW_READ)
508 		panic("%s rwlock %p: lock not shared", rwl->rwl_name, rwl);
509 #endif
510 }
511 
512 void
513 rw_assert_anylock(struct rwlock *rwl)
514 {
515 	if (panicstr || db_active)
516 		return;
517 
518 #ifdef WITNESS
519 	witness_assert(&rwl->rwl_lock_obj, LA_LOCKED);
520 #else
521 	switch (rw_status(rwl)) {
522 	case RW_WRITE_OTHER:
523 		panic("%s rwlock %p: lock held by different process "
524 		    "(self %lx, owner %lx)", rwl->rwl_name, rwl,
525 		    rw_self(), rwl->rwl_owner);
526 	case 0:
527 		panic("%s rwlock %p: lock not held", rwl->rwl_name, rwl);
528 	}
529 #endif
530 }
531 
532 void
533 rw_assert_unlocked(struct rwlock *rwl)
534 {
535 	if (panicstr || db_active)
536 		return;
537 
538 #ifdef WITNESS
539 	witness_assert(&rwl->rwl_lock_obj, LA_UNLOCKED);
540 #else
541 	if (atomic_load_long(&rwl->rwl_owner) == rw_self())
542 		panic("%s rwlock %p: lock held", rwl->rwl_name, rwl);
543 #endif
544 }
545 #endif
546 
547 /* recursive rwlocks; */
548 void
549 _rrw_init_flags(struct rrwlock *rrwl, const char *name, int flags,
550     const struct lock_type *type)
551 {
552 	memset(rrwl, 0, sizeof(struct rrwlock));
553 	_rw_init_flags_witness(&rrwl->rrwl_lock, name, RRWLOCK_LO_FLAGS(flags),
554 	    type);
555 }
556 
557 int
558 rrw_enter(struct rrwlock *rrwl, int flags)
559 {
560 	int	rv;
561 
562 	if (atomic_load_long(&rrwl->rrwl_lock.rwl_owner) == rw_self()) {
563 		if (flags & RW_RECURSEFAIL)
564 			return (EDEADLK);
565 		else {
566 			rrwl->rrwl_wcnt++;
567 			WITNESS_LOCK(&rrwl->rrwl_lock.rwl_lock_obj,
568 			    LOP_EXCLUSIVE);
569 			return (0);
570 		}
571 	}
572 
573 	rv = rw_enter(&rrwl->rrwl_lock, flags);
574 	if (rv == 0)
575 		rrwl->rrwl_wcnt = 1;
576 
577 	return (rv);
578 }
579 
580 void
581 rrw_exit(struct rrwlock *rrwl)
582 {
583 
584 	if (atomic_load_long(&rrwl->rrwl_lock.rwl_owner) == rw_self()) {
585 		KASSERT(rrwl->rrwl_wcnt > 0);
586 		rrwl->rrwl_wcnt--;
587 		if (rrwl->rrwl_wcnt != 0) {
588 			WITNESS_UNLOCK(&rrwl->rrwl_lock.rwl_lock_obj,
589 			    LOP_EXCLUSIVE);
590 			return;
591 		}
592 	}
593 
594 	rw_exit(&rrwl->rrwl_lock);
595 }
596 
597 int
598 rrw_status(struct rrwlock *rrwl)
599 {
600 	return (rw_status(&rrwl->rrwl_lock));
601 }
602 
603 /*-
604  * Copyright (c) 2008 The NetBSD Foundation, Inc.
605  * All rights reserved.
606  *
607  * This code is derived from software contributed to The NetBSD Foundation
608  * by Andrew Doran.
609  *
610  * Redistribution and use in source and binary forms, with or without
611  * modification, are permitted provided that the following conditions
612  * are met:
613  * 1. Redistributions of source code must retain the above copyright
614  *    notice, this list of conditions and the following disclaimer.
615  * 2. Redistributions in binary form must reproduce the above copyright
616  *    notice, this list of conditions and the following disclaimer in the
617  *    documentation and/or other materials provided with the distribution.
618  *
619  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
620  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
621  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
622  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
623  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
624  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
625  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
626  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
627  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
628  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
629  * POSSIBILITY OF SUCH DAMAGE.
630  */
631 
632 #define	RWLOCK_OBJ_MAGIC	0x5aa3c85d
633 struct rwlock_obj {
634 	struct rwlock	ro_lock;
635 	u_int		ro_magic;
636 	u_int		ro_refcnt;
637 };
638 
639 
640 struct pool rwlock_obj_pool;
641 
642 /*
643  * rw_obj_init:
644  *
645  *	Initialize the mutex object store.
646  */
647 void
648 rw_obj_init(void)
649 {
650 	pool_init(&rwlock_obj_pool, sizeof(struct rwlock_obj), 0, IPL_MPFLOOR,
651 	    PR_WAITOK, "rwobjpl", NULL);
652 }
653 
654 /*
655  * rw_obj_alloc:
656  *
657  *	Allocate a single lock object.
658  */
659 void
660 _rw_obj_alloc_flags(struct rwlock **lock, const char *name, int flags,
661     struct lock_type *type)
662 {
663 	struct rwlock_obj *mo;
664 
665 	mo = pool_get(&rwlock_obj_pool, PR_WAITOK);
666 	mo->ro_magic = RWLOCK_OBJ_MAGIC;
667 	_rw_init_flags(&mo->ro_lock, name, flags, type);
668 	mo->ro_refcnt = 1;
669 
670 	*lock = &mo->ro_lock;
671 }
672 
673 /*
674  * rw_obj_hold:
675  *
676  *	Add a single reference to a lock object.  A reference to the object
677  *	must already be held, and must be held across this call.
678  */
679 
680 void
681 rw_obj_hold(struct rwlock *lock)
682 {
683 	struct rwlock_obj *mo = (struct rwlock_obj *)lock;
684 
685 	KASSERTMSG(mo->ro_magic == RWLOCK_OBJ_MAGIC,
686 	    "%s: lock %p: mo->ro_magic (%#x) != RWLOCK_OBJ_MAGIC (%#x)",
687 	     __func__, mo, mo->ro_magic, RWLOCK_OBJ_MAGIC);
688 	KASSERTMSG(mo->ro_refcnt > 0,
689 	    "%s: lock %p: mo->ro_refcnt (%#x) == 0",
690 	     __func__, mo, mo->ro_refcnt);
691 
692 	atomic_inc_int(&mo->ro_refcnt);
693 }
694 
695 /*
696  * rw_obj_free:
697  *
698  *	Drop a reference from a lock object.  If the last reference is being
699  *	dropped, free the object and return true.  Otherwise, return false.
700  */
701 int
702 rw_obj_free(struct rwlock *lock)
703 {
704 	struct rwlock_obj *mo = (struct rwlock_obj *)lock;
705 
706 	KASSERTMSG(mo->ro_magic == RWLOCK_OBJ_MAGIC,
707 	    "%s: lock %p: mo->ro_magic (%#x) != RWLOCK_OBJ_MAGIC (%#x)",
708 	     __func__, mo, mo->ro_magic, RWLOCK_OBJ_MAGIC);
709 	KASSERTMSG(mo->ro_refcnt > 0,
710 	    "%s: lock %p: mo->ro_refcnt (%#x) == 0",
711 	     __func__, mo, mo->ro_refcnt);
712 
713 	if (atomic_dec_int_nv(&mo->ro_refcnt) > 0) {
714 		return false;
715 	}
716 #if notyet
717 	WITNESS_DESTROY(&mo->ro_lock);
718 #endif
719 	pool_put(&rwlock_obj_pool, mo);
720 	return true;
721 }
722