1 /* $NetBSD: kern_rwlock_obj.c,v 1.9 2023/02/24 11:02:27 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.9 2023/02/24 11:02:27 riastradh Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/atomic.h> 37 #include <sys/pool.h> 38 #include <sys/rwlock.h> 39 40 /* Mutex cache */ 41 #define RW_OBJ_MAGIC 0x85d3c85d 42 struct krwobj { 43 krwlock_t ro_lock; 44 u_int ro_magic; 45 u_int ro_refcnt; 46 }; 47 48 static int rw_obj_ctor(void *, void *, int); 49 50 static pool_cache_t rw_obj_cache __read_mostly; 51 52 /* 53 * rw_obj_init: 54 * 55 * Initialize the rw object store. 56 */ 57 void 58 rw_obj_init(void) 59 { 60 61 rw_obj_cache = pool_cache_init(sizeof(struct krwobj), 62 coherency_unit, 0, 0, "rwlock", NULL, IPL_NONE, rw_obj_ctor, 63 NULL, NULL); 64 } 65 66 /* 67 * rw_obj_ctor: 68 * 69 * Initialize a new lock for the cache. 70 */ 71 static int 72 rw_obj_ctor(void *arg, void *obj, int flags) 73 { 74 struct krwobj * ro = obj; 75 76 ro->ro_magic = RW_OBJ_MAGIC; 77 78 return 0; 79 } 80 81 /* 82 * rw_obj_alloc: 83 * 84 * Allocate a single lock object, waiting for memory if needed. 85 */ 86 krwlock_t * 87 rw_obj_alloc(void) 88 { 89 struct krwobj *ro; 90 91 ro = pool_cache_get(rw_obj_cache, PR_WAITOK); 92 _rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0)); 93 ro->ro_refcnt = 1; 94 95 return (krwlock_t *)ro; 96 } 97 98 /* 99 * rw_obj_tryalloc: 100 * 101 * Allocate a single lock object, but fail if no memory is available. 102 */ 103 krwlock_t * 104 rw_obj_tryalloc(void) 105 { 106 struct krwobj *ro; 107 108 ro = pool_cache_get(rw_obj_cache, PR_NOWAIT); 109 if (__predict_true(ro != NULL)) { 110 _rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0)); 111 ro->ro_refcnt = 1; 112 } 113 114 return (krwlock_t *)ro; 115 } 116 117 /* 118 * rw_obj_hold: 119 * 120 * Add a single reference to a lock object. A reference to the object 121 * must already be held, and must be held across this call. 122 */ 123 void 124 rw_obj_hold(krwlock_t *lock) 125 { 126 struct krwobj *ro = (struct krwobj *)lock; 127 128 KASSERT(ro->ro_magic == RW_OBJ_MAGIC); 129 KASSERT(ro->ro_refcnt > 0); 130 131 atomic_inc_uint(&ro->ro_refcnt); 132 } 133 134 /* 135 * rw_obj_free: 136 * 137 * Drop a reference from a lock object. If the last reference is being 138 * dropped, free the object and return true. Otherwise, return false. 139 */ 140 bool 141 rw_obj_free(krwlock_t *lock) 142 { 143 struct krwobj *ro = (struct krwobj *)lock; 144 145 KASSERT(ro->ro_magic == RW_OBJ_MAGIC); 146 KASSERT(ro->ro_refcnt > 0); 147 148 membar_release(); 149 if (atomic_dec_uint_nv(&ro->ro_refcnt) > 0) { 150 return false; 151 } 152 membar_acquire(); 153 rw_destroy(&ro->ro_lock); 154 pool_cache_put(rw_obj_cache, ro); 155 return true; 156 } 157 158 /* 159 * rw_obj_refcnt: 160 * 161 * Return the reference count for a lock object. 162 */ 163 u_int 164 rw_obj_refcnt(krwlock_t *lock) 165 { 166 struct krwobj *ro = (struct krwobj *)lock; 167 168 return ro->ro_refcnt; 169 } 170