1 /* 2 * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #ifndef _LINUX_WW_MUTEX_H_ 28 #define _LINUX_WW_MUTEX_H_ 29 30 #include <linux/mutex.h> 31 32 /* 33 * A basic, unoptimized implementation of wound/wait mutexes for DragonFly 34 * modelled after the Linux API [1]. 35 * 36 * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h 37 */ 38 39 #include <sys/errno.h> 40 #include <sys/types.h> 41 #include <machine/atomic.h> 42 #include <sys/spinlock.h> 43 #include <sys/spinlock2.h> 44 45 struct ww_class { 46 volatile u_long stamp; 47 const char *name; 48 }; 49 50 struct ww_acquire_ctx { 51 u_long stamp; 52 struct ww_class *ww_class; 53 }; 54 55 struct ww_mutex { 56 struct spinlock lock; 57 volatile int acquired; 58 volatile struct ww_acquire_ctx *ctx; 59 volatile struct thread *owner; 60 }; 61 62 #define DEFINE_WW_CLASS(classname) \ 63 struct ww_class classname = { \ 64 .stamp = 0, \ 65 .name = #classname \ 66 } 67 68 static inline void 69 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) { 70 ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1); 71 ctx->ww_class = ww_class; 72 } 73 74 static inline void 75 ww_acquire_done(__unused struct ww_acquire_ctx *ctx) { 76 } 77 78 static inline void 79 ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) { 80 } 81 82 static inline void 83 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { 84 spin_init(&lock->lock, ww_class->name); 85 lock->acquired = 0; 86 lock->ctx = NULL; 87 lock->owner = NULL; 88 } 89 90 static inline bool 91 ww_mutex_is_locked(struct ww_mutex *lock) { 92 bool res = false; 93 spin_lock(&lock->lock); 94 if (lock->acquired > 0) res = true; 95 spin_unlock(&lock->lock); 96 return res; 97 } 98 99 /* 100 * Return 1 if lock could be acquired, else 0 (contended). 101 */ 102 static inline int 103 ww_mutex_trylock(struct ww_mutex *lock) { 104 int res = 1; 105 KKASSERT(curthread); 106 107 spin_lock(&lock->lock); 108 /* 109 * In case no one holds the ww_mutex yet, we acquire it. 110 */ 111 if (lock->acquired == 0) { 112 KKASSERT(lock->ctx == NULL); 113 lock->acquired += 1; 114 lock->owner = curthread; 115 } 116 /* 117 * In case we already hold the ww_mutex, increase a count. 118 */ 119 else if (lock->owner == curthread) { 120 lock->acquired += 1; 121 } 122 else { 123 res = 0; 124 } 125 spin_unlock(&lock->lock); 126 return res; 127 } 128 129 /* 130 * When `slow` is `true`, it will always block if the ww_mutex is contended. 131 * It is assumed that the called will not hold any (ww_mutex) resources when 132 * calling the slow path as this could lead to deadlocks. 133 * 134 * When `intr` is `true`, the ssleep will be interruptable. 135 * `ctx` can be NULL in order to acquire only a single lock. 136 */ 137 static inline int 138 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) { 139 int err; 140 141 KKASSERT(curthread); 142 143 spin_lock(&lock->lock); 144 for (;;) { 145 /* 146 * In case no one holds the ww_mutex yet, we acquire it. 147 */ 148 if (lock->acquired == 0) { 149 KKASSERT(lock->ctx == NULL); 150 lock->acquired += 1; 151 lock->ctx = ctx; 152 lock->owner = curthread; 153 err = 0; 154 break; 155 } 156 /* 157 * In case we already hold the ww_mutex, simply increase 158 * a count and return -ALREADY. 159 */ 160 else if (lock->owner == curthread) { 161 lock->acquired += 1; 162 err = -EALREADY; 163 break; 164 } 165 /* 166 * This is the contention case where the ww_mutex is 167 * already held by another context. 168 */ 169 else { 170 /* 171 * Three cases: 172 * 173 * - We are in the slow-path (first lock to obtain). 174 * 175 * - No context was specified. We assume a single 176 * resouce, so there is no danger of a deadlock. 177 * 178 * - An `older` process (`ctx`) tries to acquire a 179 * lock already held by a `younger` process. 180 * We put the `older` process to sleep until 181 * the `younger` process gives up all it's 182 * resources. 183 */ 184 if (slow || ctx == NULL || 185 (lock->ctx != NULL && ctx->stamp < lock->ctx->stamp)) { 186 int s = ssleep(lock, &lock->lock, 187 intr ? PCATCH : 0, 188 ctx ? ctx->ww_class->name : "ww_mutex_lock", 0); 189 if (intr && (s == EINTR || s == ERESTART)) { 190 // XXX: Should we handle ERESTART? 191 err = -EINTR; 192 break; 193 } 194 } 195 /* 196 * If a `younger` process tries to acquire a lock 197 * already held by an `older` process, we `wound` it, 198 * i.e. we return -EDEADLK because there is a potential 199 * risk for a deadlock. The `younger` process then 200 * should give up all it's resources and try again to 201 * acquire the lock in question, this time in a 202 * blocking manner. 203 */ 204 else { 205 err = -EDEADLK; 206 break; 207 } 208 } 209 210 } /* for */ 211 spin_unlock(&lock->lock); 212 return err; 213 } 214 215 static inline int 216 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 217 return __ww_mutex_lock(lock, ctx, false, false); 218 } 219 220 static inline void 221 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 222 (void)__ww_mutex_lock(lock, ctx, true, false); 223 } 224 225 static inline int 226 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 227 return __ww_mutex_lock(lock, ctx, false, true); 228 } 229 230 static inline int __must_check 231 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 232 return __ww_mutex_lock(lock, ctx, true, true); 233 } 234 235 static inline void 236 ww_mutex_unlock(struct ww_mutex *lock) { 237 spin_lock(&lock->lock); 238 KKASSERT(lock->owner == curthread); 239 KKASSERT(lock->acquired > 0); 240 241 --lock->acquired; 242 if (lock->acquired > 0) { 243 spin_unlock(&lock->lock); 244 return; 245 } 246 247 KKASSERT(lock->acquired == 0); 248 lock->ctx = NULL; 249 lock->owner = NULL; 250 spin_unlock(&lock->lock); 251 wakeup(lock); 252 } 253 254 static inline void 255 ww_mutex_destroy(struct ww_mutex *lock) { 256 KKASSERT(lock->acquired == 0); 257 KKASSERT(lock->ctx == NULL); 258 KKASSERT(lock->owner == NULL); 259 spin_uninit(&lock->lock); 260 } 261 262 #endif /* _LINUX_WW_MUTEX_H_ */ 263