xref: /netbsd-src/sys/kern/kern_rwlock_obj.c (revision 2246c1eb4d5bd980bce9feef7bfdd436b03afc2d)
1 /*	$NetBSD: kern_rwlock_obj.c,v 1.13 2023/10/02 21:03:55 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2008, 2009, 2019, 2023 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.13 2023/10/02 21:03:55 ad Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/rwlock.h>
39 
40 /* Mutex cache */
41 #define	RW_OBJ_MAGIC	0x85d3c85d
42 struct krwobj {
43 	krwlock_t	ro_lock;
44 	u_int		ro_magic;
45 	u_int		ro_refcnt;
46 	uint8_t		mo_pad[COHERENCY_UNIT - sizeof(krwlock_t) -
47 	    sizeof(u_int) * 2];
48 };
49 
50 /*
51  * rw_obj_alloc:
52  *
53  *	Allocate a single lock object, waiting for memory if needed.
54  */
55 krwlock_t *
rw_obj_alloc(void)56 rw_obj_alloc(void)
57 {
58 	struct krwobj *ro;
59 
60 	ro = kmem_intr_alloc(sizeof(*ro), KM_SLEEP);
61 	KASSERT(ALIGNED_POINTER(ro, coherency_unit));
62 	_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
63 	ro->ro_magic = RW_OBJ_MAGIC;
64 	ro->ro_refcnt = 1;
65 
66 	return (krwlock_t *)ro;
67 }
68 
69 /*
70  * rw_obj_tryalloc:
71  *
72  *	Allocate a single lock object, but fail if no memory is available.
73  */
74 krwlock_t *
rw_obj_tryalloc(void)75 rw_obj_tryalloc(void)
76 {
77 	struct krwobj *ro;
78 
79 	ro = kmem_intr_alloc(sizeof(*ro), KM_NOSLEEP);
80 	KASSERT(ALIGNED_POINTER(ro, coherency_unit));
81 	if (__predict_true(ro != NULL)) {
82 		_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
83 		ro->ro_magic = RW_OBJ_MAGIC;
84 		ro->ro_refcnt = 1;
85 	}
86 
87 	return (krwlock_t *)ro;
88 }
89 
90 /*
91  * rw_obj_hold:
92  *
93  *	Add a single reference to a lock object.  A reference to the object
94  *	must already be held, and must be held across this call.
95  */
96 void
rw_obj_hold(krwlock_t * lock)97 rw_obj_hold(krwlock_t *lock)
98 {
99 	struct krwobj *ro = (struct krwobj *)lock;
100 
101 	KASSERT(ro->ro_magic == RW_OBJ_MAGIC);
102 	KASSERT(ro->ro_refcnt > 0);
103 
104 	atomic_inc_uint(&ro->ro_refcnt);
105 }
106 
107 /*
108  * rw_obj_free:
109  *
110  *	Drop a reference from a lock object.  If the last reference is being
111  *	dropped, free the object and return true.  Otherwise, return false.
112  */
113 bool
rw_obj_free(krwlock_t * lock)114 rw_obj_free(krwlock_t *lock)
115 {
116 	struct krwobj *ro = (struct krwobj *)lock;
117 
118 	KASSERT(ro->ro_magic == RW_OBJ_MAGIC);
119 	KASSERT(ro->ro_refcnt > 0);
120 
121 	membar_release();
122 	if (atomic_dec_uint_nv(&ro->ro_refcnt) > 0) {
123 		return false;
124 	}
125 	membar_acquire();
126 	rw_destroy(&ro->ro_lock);
127 	kmem_intr_free(ro, sizeof(*ro));
128 	return true;
129 }
130 
131 /*
132  * rw_obj_refcnt:
133  *
134  *	Return the reference count for a lock object.
135  */
136 u_int
rw_obj_refcnt(krwlock_t * lock)137 rw_obj_refcnt(krwlock_t *lock)
138 {
139 	struct krwobj *ro = (struct krwobj *)lock;
140 
141 	return ro->ro_refcnt;
142 }
143