1 /* $NetBSD: kern_rwlock_obj.c,v 1.4 2018/02/05 04:25:04 ozaki-r Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.4 2018/02/05 04:25:04 ozaki-r Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/atomic.h> 37 #include <sys/pool.h> 38 #include <sys/rwlock.h> 39 40 /* Mutex cache */ 41 #define RW_OBJ_MAGIC 0x85d3c85d 42 struct krwobj { 43 krwlock_t ro_lock; 44 u_int ro_magic; 45 u_int ro_refcnt; 46 }; 47 48 static int rw_obj_ctor(void *, void *, int); 49 50 static pool_cache_t rw_obj_cache __read_mostly; 51 52 /* 53 * rw_obj_init: 54 * 55 * Initialize the rw object store. 56 */ 57 void 58 rw_obj_init(void) 59 { 60 61 rw_obj_cache = pool_cache_init(sizeof(struct krwobj), 62 coherency_unit, 0, 0, "rwlock", NULL, IPL_NONE, rw_obj_ctor, 63 NULL, NULL); 64 } 65 66 /* 67 * rw_obj_ctor: 68 * 69 * Initialize a new lock for the cache. 70 */ 71 static int 72 rw_obj_ctor(void *arg, void *obj, int flags) 73 { 74 struct krwobj * ro = obj; 75 76 ro->ro_magic = RW_OBJ_MAGIC; 77 78 return 0; 79 } 80 81 /* 82 * rw_obj_alloc: 83 * 84 * Allocate a single lock object. 85 */ 86 krwlock_t * 87 rw_obj_alloc(void) 88 { 89 struct krwobj *ro; 90 extern void _rw_init(krwlock_t *, uintptr_t); 91 92 ro = pool_cache_get(rw_obj_cache, PR_WAITOK); 93 _rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0)); 94 ro->ro_refcnt = 1; 95 96 return (krwlock_t *)ro; 97 } 98 99 /* 100 * rw_obj_hold: 101 * 102 * Add a single reference to a lock object. A reference to the object 103 * must already be held, and must be held across this call. 104 */ 105 void 106 rw_obj_hold(krwlock_t *lock) 107 { 108 struct krwobj *ro = (struct krwobj *)lock; 109 110 KASSERT(ro->ro_magic == RW_OBJ_MAGIC); 111 KASSERT(ro->ro_refcnt > 0); 112 113 atomic_inc_uint(&ro->ro_refcnt); 114 } 115 116 /* 117 * rw_obj_free: 118 * 119 * Drop a reference from a lock object. If the last reference is being 120 * dropped, free the object and return true. Otherwise, return false. 121 */ 122 bool 123 rw_obj_free(krwlock_t *lock) 124 { 125 struct krwobj *ro = (struct krwobj *)lock; 126 127 KASSERT(ro->ro_magic == RW_OBJ_MAGIC); 128 KASSERT(ro->ro_refcnt > 0); 129 130 if (atomic_dec_uint_nv(&ro->ro_refcnt) > 0) { 131 return false; 132 } 133 rw_destroy(&ro->ro_lock); 134 pool_cache_put(rw_obj_cache, ro); 135 return true; 136 } 137