1 /* $NetBSD: kern_mutex_obj.c,v 1.15 2023/10/02 21:03:55 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2019, 2023 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.15 2023/10/02 21:03:55 ad Exp $");
34
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/mutex.h>
38 #include <sys/kmem.h>
39
40 /* Mutex cache */
41 #define MUTEX_OBJ_MAGIC 0x5aa3c85d
42 struct kmutexobj {
43 kmutex_t mo_lock;
44 u_int mo_magic;
45 u_int mo_refcnt;
46 uint8_t mo_pad[COHERENCY_UNIT - sizeof(kmutex_t) -
47 sizeof(u_int) * 2];
48 };
49
50 /*
51 * mutex_obj_alloc:
52 *
53 * Allocate a single lock object, waiting for memory if needed.
54 */
55 kmutex_t *
mutex_obj_alloc(kmutex_type_t type,int ipl)56 mutex_obj_alloc(kmutex_type_t type, int ipl)
57 {
58 struct kmutexobj *mo;
59
60 mo = kmem_intr_alloc(sizeof(*mo), KM_SLEEP);
61 KASSERT(ALIGNED_POINTER(mo, coherency_unit));
62 _mutex_init(&mo->mo_lock, type, ipl,
63 (uintptr_t)__builtin_return_address(0));
64 mo->mo_magic = MUTEX_OBJ_MAGIC;
65 mo->mo_refcnt = 1;
66
67 return (kmutex_t *)mo;
68 }
69
70 /*
71 * mutex_obj_alloc:
72 *
73 * Allocate a single lock object, failing if no memory available.
74 */
75 kmutex_t *
mutex_obj_tryalloc(kmutex_type_t type,int ipl)76 mutex_obj_tryalloc(kmutex_type_t type, int ipl)
77 {
78 struct kmutexobj *mo;
79
80 mo = kmem_intr_alloc(sizeof(*mo), KM_NOSLEEP);
81 KASSERT(ALIGNED_POINTER(mo, coherency_unit));
82 if (__predict_true(mo != NULL)) {
83 _mutex_init(&mo->mo_lock, type, ipl,
84 (uintptr_t)__builtin_return_address(0));
85 mo->mo_magic = MUTEX_OBJ_MAGIC;
86 mo->mo_refcnt = 1;
87 }
88
89 return (kmutex_t *)mo;
90 }
91
92 /*
93 * mutex_obj_hold:
94 *
95 * Add a single reference to a lock object. A reference to the object
96 * must already be held, and must be held across this call.
97 */
98 void
mutex_obj_hold(kmutex_t * lock)99 mutex_obj_hold(kmutex_t *lock)
100 {
101 struct kmutexobj *mo = (struct kmutexobj *)lock;
102
103 KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
104 "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
105 __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
106 KASSERTMSG(mo->mo_refcnt > 0,
107 "%s: lock %p: mo->mo_refcnt (%#x) == 0",
108 __func__, mo, mo->mo_refcnt);
109
110 atomic_inc_uint(&mo->mo_refcnt);
111 }
112
113 /*
114 * mutex_obj_free:
115 *
116 * Drop a reference from a lock object. If the last reference is being
117 * dropped, free the object and return true. Otherwise, return false.
118 */
119 bool
mutex_obj_free(kmutex_t * lock)120 mutex_obj_free(kmutex_t *lock)
121 {
122 struct kmutexobj *mo = (struct kmutexobj *)lock;
123
124 KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
125 "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
126 __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
127 KASSERTMSG(mo->mo_refcnt > 0,
128 "%s: lock %p: mo->mo_refcnt (%#x) == 0",
129 __func__, mo, mo->mo_refcnt);
130
131 membar_release();
132 if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
133 return false;
134 }
135 membar_acquire();
136 mutex_destroy(&mo->mo_lock);
137 kmem_intr_free(mo, sizeof(*mo));
138 return true;
139 }
140
141 /*
142 * mutex_obj_refcnt:
143 *
144 * Return the reference count on a lock object.
145 */
146 u_int
mutex_obj_refcnt(kmutex_t * lock)147 mutex_obj_refcnt(kmutex_t *lock)
148 {
149 struct kmutexobj *mo = (struct kmutexobj *)lock;
150
151 return mo->mo_refcnt;
152 }
153