xref: /openbsd-src/sys/kern/kern_rwlock.c (revision 0c0478dcf387c6bf881f64e03b2ced90788ebda2)
1 /*	$OpenBSD: kern_rwlock.c,v 1.55 2025/01/29 15:10:09 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
5  * Copyright (c) 2011 Thordur Bjornsson <thib@secnorth.net>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/pool.h>
23 #include <sys/proc.h>
24 #include <sys/rwlock.h>
25 #include <sys/limits.h>
26 #include <sys/atomic.h>
27 #include <sys/witness.h>
28 
29 #ifdef RWDIAG
30 #include <sys/kernel.h> /* for hz */
31 #define RW_SLEEP_TMO	10 * hz
32 #else
33 #define RW_SLEEP_TMO	0
34 #endif
35 
36 /*
37  * Other OSes implement more sophisticated mechanism to determine how long the
38  * process attempting to acquire the lock should be spinning. We start with
39  * the most simple approach: we do RW_SPINS attempts at most before eventually
40  * giving up and putting the process to sleep queue.
41  */
42 #define RW_SPINS	1000
43 
44 #ifdef MULTIPROCESSOR
45 #define rw_cas(p, e, n)	atomic_cas_ulong(p, e, n)
46 #define rw_inc(p)	atomic_inc_int(p)
47 #define rw_dec(p)	atomic_dec_int(p)
48 #else
49 static inline unsigned long
50 rw_cas(volatile unsigned long *p, unsigned long e, unsigned long n)
51 {
52 	unsigned long o = *p;
53 
54 	if (o == e)
55 		*p = n;
56 
57 	return (o);
58 }
59 
60 static inline void
61 rw_inc(volatile unsigned int *p)
62 {
63 	++(*p);
64 }
65 
66 static inline void
67 rw_dec(volatile unsigned int *p)
68 {
69 	(*p)--;
70 }
71 #endif
72 
73 static int	rw_do_enter_read(struct rwlock *, int);
74 static void	rw_do_exit_read(struct rwlock *, unsigned long);
75 static int	rw_do_enter_write(struct rwlock *, int);
76 static int	rw_downgrade(struct rwlock *, int);
77 static int	rw_upgrade(struct rwlock *, int);
78 
79 static void	rw_exited(struct rwlock *);
80 
81 static unsigned long
82 rw_self(void)
83 {
84 	unsigned long self = (unsigned long)curproc;
85 
86 	CLR(self, RWLOCK_MASK);
87 	SET(self, RWLOCK_WRLOCK);
88 
89 	return (self);
90 }
91 
92 void
93 rw_enter_read(struct rwlock *rwl)
94 {
95 	rw_do_enter_read(rwl, 0);
96 }
97 
98 void
99 rw_enter_write(struct rwlock *rwl)
100 {
101 	rw_do_enter_write(rwl, 0);
102 }
103 
104 void
105 rw_exit_read(struct rwlock *rwl)
106 {
107 	/* maybe we're the last one? */
108 	rw_do_exit_read(rwl, RWLOCK_READ_INCR);
109 }
110 
111 static void
112 rw_do_exit_read(struct rwlock *rwl, unsigned long owner)
113 {
114 	unsigned long decr;
115 	unsigned long nowner;
116 
117 	WITNESS_UNLOCK(&rwl->rwl_lock_obj, 0);
118 
119 	for (;;) {
120 		decr = owner - RWLOCK_READ_INCR;
121 		nowner = rw_cas(&rwl->rwl_owner, owner, decr);
122 		if (owner == nowner)
123 			break;
124 
125 		if (__predict_false(ISSET(nowner, RWLOCK_WRLOCK))) {
126 			panic("%s rwlock %p: exit read on write locked lock"
127 			    " (owner 0x%lx)", rwl->rwl_name, rwl, nowner);
128 		}
129 		if (__predict_false(nowner == 0)) {
130 			panic("%s rwlock %p: exit read on unlocked lock",
131 			    rwl->rwl_name, rwl);
132 		}
133 
134 		owner = nowner;
135 	}
136 
137 	/* read lock didn't change anything, so no barrier needed? */
138 
139 	if (decr == 0) {
140 		/* last one out */
141 		rw_exited(rwl);
142 	}
143 }
144 
145 void
146 rw_exit_write(struct rwlock *rwl)
147 {
148 	unsigned long self = rw_self();
149 	unsigned long owner;
150 
151 	WITNESS_UNLOCK(&rwl->rwl_lock_obj, LOP_EXCLUSIVE);
152 
153 	membar_exit_before_atomic();
154 	owner = rw_cas(&rwl->rwl_owner, self, 0);
155 	if (__predict_false(owner != self)) {
156 		panic("%s rwlock %p: exit write when lock not held "
157 		    "(owner 0x%lx, self 0x%lx)", rwl->rwl_name, rwl,
158 		    owner, self);
159 	}
160 
161 	rw_exited(rwl);
162 }
163 
164 static void
165 _rw_init_flags_witness(struct rwlock *rwl, const char *name, int lo_flags,
166     const struct lock_type *type)
167 {
168 	rwl->rwl_owner = 0;
169 	rwl->rwl_waiters = 0;
170 	rwl->rwl_readers = 0;
171 	rwl->rwl_name = name;
172 
173 #ifdef WITNESS
174 	rwl->rwl_lock_obj.lo_flags = lo_flags;
175 	rwl->rwl_lock_obj.lo_name = name;
176 	rwl->rwl_lock_obj.lo_type = type;
177 	WITNESS_INIT(&rwl->rwl_lock_obj, type);
178 #else
179 	(void)type;
180 	(void)lo_flags;
181 #endif
182 }
183 
184 void
185 _rw_init_flags(struct rwlock *rwl, const char *name, int flags,
186     const struct lock_type *type)
187 {
188 	_rw_init_flags_witness(rwl, name, RWLOCK_LO_FLAGS(flags), type);
189 }
190 
191 int
192 rw_enter(struct rwlock *rwl, int flags)
193 {
194 	int op = flags & RW_OPMASK;
195 	int error;
196 
197 	switch (op) {
198 	case RW_WRITE:
199 		error = rw_do_enter_write(rwl, flags);
200 		break;
201 	case RW_READ:
202 		error = rw_do_enter_read(rwl, flags);
203 		break;
204 	case RW_DOWNGRADE:
205 		error = rw_downgrade(rwl, flags);
206 		break;
207 	case RW_UPGRADE:
208 		error = rw_upgrade(rwl, flags);
209 		break;
210 	default:
211 		panic("%s rwlock %p: %s unexpected op 0x%x",
212 		    rwl->rwl_name, rwl, __func__, op);
213 		/* NOTREACHED */
214 	}
215 
216 	return (error);
217 }
218 
219 static int
220 rw_do_enter_write(struct rwlock *rwl, int flags)
221 {
222 	unsigned long self = rw_self();
223 	unsigned long owner;
224 	int prio;
225 	int error;
226 
227 #ifdef WITNESS
228 	int lop_flags = LOP_NEWORDER | LOP_EXCLUSIVE;
229 	if (ISSET(flags, RW_DUPOK))
230 		lop_flags |= LOP_DUPOK;
231 
232 	if (!ISSET(flags, RW_NOSLEEP))
233 		WITNESS_CHECKORDER(&rwl->rwl_lock_obj, lop_flags, NULL);
234 #endif
235 
236 	owner = rw_cas(&rwl->rwl_owner, 0, self);
237 	if (owner == 0) {
238 		/* wow, we won. so easy */
239 		goto locked;
240 	}
241 	if (__predict_false(owner == self)) {
242 		panic("%s rwlock %p: enter write deadlock",
243 		    rwl->rwl_name, rwl);
244 	}
245 
246 #ifdef MULTIPROCESSOR
247 	/*
248 	 * If process holds the kernel lock, then we want to give up on CPU
249 	 * as soon as possible so other processes waiting for the kernel lock
250 	 * can progress. Hence no spinning if we hold the kernel lock.
251 	 */
252 	if (!_kernel_lock_held()) {
253 		struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
254 		int spins;
255 
256 		/*
257 		 * It makes sense to try to spin just in case the lock
258 		 * is acquired by writer.
259 		 */
260 
261 		spc->spc_spinning++;
262 		for (spins = 0; spins < RW_SPINS; spins++) {
263 			CPU_BUSY_CYCLE();
264 			owner = atomic_load_long(&rwl->rwl_owner);
265 			if (owner != 0)
266 				continue;
267 
268 			owner = rw_cas(&rwl->rwl_owner, 0, self);
269 			if (owner == 0) {
270 				spc->spc_spinning--;
271 				/* ok, we won now. */
272 				goto locked;
273 			}
274 		}
275 		spc->spc_spinning--;
276 	}
277 #endif
278 
279 	if (ISSET(flags, RW_NOSLEEP))
280 		return (EBUSY);
281 
282 	prio = PLOCK - 4;
283 	if (ISSET(flags, RW_INTR))
284 		prio |= PCATCH;
285 
286 	rw_inc(&rwl->rwl_waiters);
287 	membar_producer();
288 	do {
289 		sleep_setup(&rwl->rwl_waiters, prio, rwl->rwl_name);
290 		membar_consumer();
291 		owner = atomic_load_long(&rwl->rwl_owner);
292 		error = sleep_finish(RW_SLEEP_TMO, owner != 0);
293 #ifdef RWDIAG
294 		if (error == EWOULDBLOCK) {
295 			printf("%s rwlock %p: %s timeout owner 0x%lx "
296 			    "(self 0x%lx)", rwl->rwl_name, rwl, __func__,
297 			    owner, self);
298 			db_enter();
299 		}
300 #endif
301 		if (ISSET(flags, RW_INTR) && (error != 0)) {
302 			rw_dec(&rwl->rwl_waiters);
303 			return (error);
304 		}
305 		if (ISSET(flags, RW_SLEEPFAIL)) {
306 			rw_dec(&rwl->rwl_waiters);
307 			rw_exited(rwl);
308 			return (EAGAIN);
309 		}
310 
311 		owner = rw_cas(&rwl->rwl_owner, 0, self);
312 	} while (owner != 0);
313 	rw_dec(&rwl->rwl_waiters);
314 
315 locked:
316 	membar_enter_after_atomic();
317 	WITNESS_LOCK(&rwl->rwl_lock_obj, lop_flags);
318 
319 	return (0);
320 }
321 
322 static int
323 rw_read_incr(struct rwlock *rwl, unsigned long owner)
324 {
325 	unsigned long incr;
326 	unsigned long nowner;
327 
328 	do {
329 		incr = owner + RWLOCK_READ_INCR;
330 		nowner = rw_cas(&rwl->rwl_owner, owner, incr);
331 		if (nowner == owner)
332 			return (1);
333 
334 		owner = nowner;
335 	} while (!ISSET(owner, RWLOCK_WRLOCK));
336 
337 	return (0);
338 }
339 
340 static int
341 rw_do_enter_read(struct rwlock *rwl, int flags)
342 {
343 	unsigned long owner;
344 	int error;
345 	int prio;
346 
347 #ifdef WITNESS
348 	int lop_flags = LOP_NEWORDER;
349 	if (ISSET(flags, RW_DUPOK))
350 		lop_flags |= LOP_DUPOK;
351 	if (!ISSET(flags, RW_NOSLEEP))
352 		WITNESS_CHECKORDER(&rwl->rwl_lock_obj, lop_flags, NULL);
353 #endif
354 
355 	owner = rw_cas(&rwl->rwl_owner, 0, RWLOCK_READ_INCR);
356 	if (owner == 0) {
357 		/* ermagerd, we won! */
358 		goto locked;
359 	}
360 
361 	if (ISSET(owner, RWLOCK_WRLOCK)) {
362 		if (__predict_false(owner == rw_self())) {
363 			panic("%s rwlock %p: enter read deadlock",
364 			    rwl->rwl_name, rwl);
365 		}
366 	} else if (atomic_load_int(&rwl->rwl_waiters) == 0) {
367 		if (rw_read_incr(rwl, owner)) {
368 			/* nailed it */
369 			goto locked;
370 		}
371 	}
372 
373 	if (ISSET(flags, RW_NOSLEEP))
374 		return (EBUSY);
375 
376 	prio = PLOCK;
377 	if (ISSET(flags, RW_INTR))
378 		prio |= PCATCH;
379 
380 	rw_inc(&rwl->rwl_readers);
381 	membar_producer();
382 	do {
383 		sleep_setup(&rwl->rwl_readers, prio, rwl->rwl_name);
384 		membar_consumer();
385 		error = sleep_finish(RW_SLEEP_TMO,
386 		    atomic_load_int(&rwl->rwl_waiters) > 0 ||
387 		    ISSET(atomic_load_long(&rwl->rwl_owner), RWLOCK_WRLOCK));
388 #ifdef RWDIAG
389 		if (error == EWOULDBLOCK) {
390 			printf("%s rwlock %p: %s timeout owner 0x%lx\n",
391 			    rwl->rwl_name, rwl, __func__, owner);
392 			db_enter();
393 		}
394 #endif
395 		if (ISSET(flags, RW_INTR) && (error != 0))
396 			goto fail;
397 		if (ISSET(flags, RW_SLEEPFAIL)) {
398 			error = EAGAIN;
399 			goto fail;
400 		}
401 	} while (!rw_read_incr(rwl, 0));
402 	rw_dec(&rwl->rwl_readers);
403 
404 locked:
405 	membar_enter_after_atomic();
406 	WITNESS_LOCK(&rwl->rwl_lock_obj, lop_flags);
407 
408 	return (0);
409 fail:
410 	rw_dec(&rwl->rwl_readers);
411 	return (error);
412 }
413 
414 static int
415 rw_downgrade(struct rwlock *rwl, int flags)
416 {
417 	unsigned long self = rw_self();
418 	unsigned long owner;
419 
420 	membar_exit_before_atomic();
421 	owner = atomic_cas_ulong(&rwl->rwl_owner, self, RWLOCK_READ_INCR);
422 	if (__predict_false(owner != self)) {
423 		panic("%s rwlock %p: downgrade when lock not held "
424 		    "(owner 0x%lx, self 0x%lx)", rwl->rwl_name, rwl,
425 		    owner, self);
426 	}
427 
428 #ifdef WITNESS
429 	{
430 		int lop_flags = LOP_NEWORDER;
431 		if (ISSET(flags, RW_DUPOK))
432 			lop_flags |= LOP_DUPOK;
433 		WITNESS_DOWNGRADE(&rwl->rwl_lock_obj, lop_flags);
434 	}
435 #endif
436 
437 	membar_consumer();
438 	if (atomic_load_int(&rwl->rwl_waiters) == 0 &&
439 	    atomic_load_int(&rwl->rwl_readers) > 0)
440 		wakeup(&rwl->rwl_readers);
441 
442 	return (0);
443 }
444 
445 static int
446 rw_upgrade(struct rwlock *rwl, int flags)
447 {
448 	unsigned long self = rw_self();
449 	unsigned long owner;
450 
451 	KASSERTMSG(ISSET(flags, RW_NOSLEEP), "RW_UPGRADE without RW_NOSLEEP");
452 
453 	owner = atomic_cas_ulong(&rwl->rwl_owner, RWLOCK_READ_INCR, self);
454 	if (owner != RWLOCK_READ_INCR) {
455 		if (__predict_false(owner == 0)) {
456 			panic("%s rwlock %p: upgrade on unowned lock",
457 			    rwl->rwl_name, rwl);
458 		}
459 		if (__predict_false(ISSET(owner, RWLOCK_WRLOCK))) {
460 			panic("%s rwlock %p: upgrade on write locked lock"
461 			    "(owner 0x%lx, self 0x%lx)", rwl->rwl_name, rwl,
462 			    owner, self);
463 		}
464 
465 		return (EBUSY);
466 	}
467 
468 #ifdef WITNESS
469 	{
470 		int lop_flags = LOP_NEWORDER;
471 		if (ISSET(flags, RW_DUPOK))
472 			lop_flags |= LOP_DUPOK;
473 		WITNESS_UPGRADE(&rwl->rwl_lock_obj, lop_flags);
474 	}
475 #endif
476 
477 	return (0);
478 }
479 
480 void
481 rw_exit(struct rwlock *rwl)
482 {
483 	unsigned long owner;
484 
485 	owner = atomic_load_long(&rwl->rwl_owner);
486 	if (__predict_false(owner == 0)) {
487 		panic("%s rwlock %p: exit on unlocked lock",
488 		    rwl->rwl_name, rwl);
489 	}
490 
491 	if (ISSET(owner, RWLOCK_WRLOCK))
492 		rw_exit_write(rwl);
493 	else
494 		rw_do_exit_read(rwl, owner);
495 }
496 
497 static void
498 rw_exited(struct rwlock *rwl)
499 {
500 	membar_consumer();
501 	if (atomic_load_int(&rwl->rwl_waiters) > 0)
502 		wakeup_one(&rwl->rwl_waiters);
503 	else if (atomic_load_int(&rwl->rwl_readers) > 0)
504 		wakeup(&rwl->rwl_readers);
505 }
506 
507 int
508 rw_status(struct rwlock *rwl)
509 {
510 	unsigned long owner;
511 
512 	owner = atomic_load_long(&rwl->rwl_owner);
513 	if (ISSET(owner, RWLOCK_WRLOCK)) {
514 		if (rw_self() == owner)
515 			return RW_WRITE;
516 		else
517 			return RW_WRITE_OTHER;
518 	}
519 	if (owner)
520 		return RW_READ;
521 	return (0);
522 }
523 
524 #ifdef DIAGNOSTIC
525 void
526 rw_assert_wrlock(struct rwlock *rwl)
527 {
528 	if (panicstr || db_active)
529 		return;
530 
531 #ifdef WITNESS
532 	witness_assert(&rwl->rwl_lock_obj, LA_XLOCKED);
533 #else
534 	if (atomic_load_long(&rwl->rwl_owner) != rw_self()) {
535 		panic("%s rwlock %p: lock not held by this process",
536 		    rwl->rwl_name, rwl);
537 	}
538 #endif
539 }
540 
541 void
542 rw_assert_rdlock(struct rwlock *rwl)
543 {
544 	if (panicstr || db_active)
545 		return;
546 
547 #ifdef WITNESS
548 	witness_assert(&rwl->rwl_lock_obj, LA_SLOCKED);
549 #else
550 	if (rw_status(rwl) != RW_READ)
551 		panic("%s rwlock %p: lock not shared", rwl->rwl_name, rwl);
552 #endif
553 }
554 
555 void
556 rw_assert_anylock(struct rwlock *rwl)
557 {
558 	if (panicstr || db_active)
559 		return;
560 
561 #ifdef WITNESS
562 	witness_assert(&rwl->rwl_lock_obj, LA_LOCKED);
563 #else
564 	switch (rw_status(rwl)) {
565 	case RW_WRITE_OTHER:
566 		panic("%s rwlock %p: lock held by different process "
567 		    "(self %lx, owner %lx)", rwl->rwl_name, rwl,
568 		    rw_self(), rwl->rwl_owner);
569 	case 0:
570 		panic("%s rwlock %p: lock not held", rwl->rwl_name, rwl);
571 	}
572 #endif
573 }
574 
575 void
576 rw_assert_unlocked(struct rwlock *rwl)
577 {
578 	if (panicstr || db_active)
579 		return;
580 
581 #ifdef WITNESS
582 	witness_assert(&rwl->rwl_lock_obj, LA_UNLOCKED);
583 #else
584 	if (atomic_load_long(&rwl->rwl_owner) == rw_self())
585 		panic("%s rwlock %p: lock held", rwl->rwl_name, rwl);
586 #endif
587 }
588 #endif
589 
590 /* recursive rwlocks; */
591 void
592 _rrw_init_flags(struct rrwlock *rrwl, const char *name, int flags,
593     const struct lock_type *type)
594 {
595 	memset(rrwl, 0, sizeof(struct rrwlock));
596 	_rw_init_flags_witness(&rrwl->rrwl_lock, name, RRWLOCK_LO_FLAGS(flags),
597 	    type);
598 }
599 
600 int
601 rrw_enter(struct rrwlock *rrwl, int flags)
602 {
603 	int	rv;
604 
605 	if (atomic_load_long(&rrwl->rrwl_lock.rwl_owner) == rw_self()) {
606 		if (flags & RW_RECURSEFAIL)
607 			return (EDEADLK);
608 		else {
609 			rrwl->rrwl_wcnt++;
610 			WITNESS_LOCK(&rrwl->rrwl_lock.rwl_lock_obj,
611 			    LOP_EXCLUSIVE);
612 			return (0);
613 		}
614 	}
615 
616 	rv = rw_enter(&rrwl->rrwl_lock, flags);
617 	if (rv == 0)
618 		rrwl->rrwl_wcnt = 1;
619 
620 	return (rv);
621 }
622 
623 void
624 rrw_exit(struct rrwlock *rrwl)
625 {
626 
627 	if (atomic_load_long(&rrwl->rrwl_lock.rwl_owner) == rw_self()) {
628 		KASSERT(rrwl->rrwl_wcnt > 0);
629 		rrwl->rrwl_wcnt--;
630 		if (rrwl->rrwl_wcnt != 0) {
631 			WITNESS_UNLOCK(&rrwl->rrwl_lock.rwl_lock_obj,
632 			    LOP_EXCLUSIVE);
633 			return;
634 		}
635 	}
636 
637 	rw_exit(&rrwl->rrwl_lock);
638 }
639 
640 int
641 rrw_status(struct rrwlock *rrwl)
642 {
643 	return (rw_status(&rrwl->rrwl_lock));
644 }
645 
646 /*-
647  * Copyright (c) 2008 The NetBSD Foundation, Inc.
648  * All rights reserved.
649  *
650  * This code is derived from software contributed to The NetBSD Foundation
651  * by Andrew Doran.
652  *
653  * Redistribution and use in source and binary forms, with or without
654  * modification, are permitted provided that the following conditions
655  * are met:
656  * 1. Redistributions of source code must retain the above copyright
657  *    notice, this list of conditions and the following disclaimer.
658  * 2. Redistributions in binary form must reproduce the above copyright
659  *    notice, this list of conditions and the following disclaimer in the
660  *    documentation and/or other materials provided with the distribution.
661  *
662  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
663  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
664  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
665  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
666  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
667  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
668  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
669  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
670  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
671  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
672  * POSSIBILITY OF SUCH DAMAGE.
673  */
674 
675 #define	RWLOCK_OBJ_MAGIC	0x5aa3c85d
676 struct rwlock_obj {
677 	struct rwlock	ro_lock;
678 	u_int		ro_magic;
679 	u_int		ro_refcnt;
680 };
681 
682 
683 struct pool rwlock_obj_pool;
684 
685 /*
686  * rw_obj_init:
687  *
688  *	Initialize the mutex object store.
689  */
690 void
691 rw_obj_init(void)
692 {
693 	pool_init(&rwlock_obj_pool, sizeof(struct rwlock_obj), 0, IPL_MPFLOOR,
694 	    PR_WAITOK, "rwobjpl", NULL);
695 }
696 
697 /*
698  * rw_obj_alloc:
699  *
700  *	Allocate a single lock object.
701  */
702 void
703 _rw_obj_alloc_flags(struct rwlock **lock, const char *name, int flags,
704     struct lock_type *type)
705 {
706 	struct rwlock_obj *mo;
707 
708 	mo = pool_get(&rwlock_obj_pool, PR_WAITOK);
709 	mo->ro_magic = RWLOCK_OBJ_MAGIC;
710 	_rw_init_flags(&mo->ro_lock, name, flags, type);
711 	mo->ro_refcnt = 1;
712 
713 	*lock = &mo->ro_lock;
714 }
715 
716 /*
717  * rw_obj_hold:
718  *
719  *	Add a single reference to a lock object.  A reference to the object
720  *	must already be held, and must be held across this call.
721  */
722 
723 void
724 rw_obj_hold(struct rwlock *lock)
725 {
726 	struct rwlock_obj *mo = (struct rwlock_obj *)lock;
727 
728 	KASSERTMSG(mo->ro_magic == RWLOCK_OBJ_MAGIC,
729 	    "%s: lock %p: mo->ro_magic (%#x) != RWLOCK_OBJ_MAGIC (%#x)",
730 	     __func__, mo, mo->ro_magic, RWLOCK_OBJ_MAGIC);
731 	KASSERTMSG(mo->ro_refcnt > 0,
732 	    "%s: lock %p: mo->ro_refcnt (%#x) == 0",
733 	     __func__, mo, mo->ro_refcnt);
734 
735 	atomic_inc_int(&mo->ro_refcnt);
736 }
737 
738 /*
739  * rw_obj_free:
740  *
741  *	Drop a reference from a lock object.  If the last reference is being
742  *	dropped, free the object and return true.  Otherwise, return false.
743  */
744 int
745 rw_obj_free(struct rwlock *lock)
746 {
747 	struct rwlock_obj *mo = (struct rwlock_obj *)lock;
748 
749 	KASSERTMSG(mo->ro_magic == RWLOCK_OBJ_MAGIC,
750 	    "%s: lock %p: mo->ro_magic (%#x) != RWLOCK_OBJ_MAGIC (%#x)",
751 	     __func__, mo, mo->ro_magic, RWLOCK_OBJ_MAGIC);
752 	KASSERTMSG(mo->ro_refcnt > 0,
753 	    "%s: lock %p: mo->ro_refcnt (%#x) == 0",
754 	     __func__, mo, mo->ro_refcnt);
755 
756 	if (atomic_dec_int_nv(&mo->ro_refcnt) > 0) {
757 		return false;
758 	}
759 #if notyet
760 	WITNESS_DESTROY(&mo->ro_lock);
761 #endif
762 	pool_put(&rwlock_obj_pool, mo);
763 	return true;
764 }
765