1913ced85SMichael Neumann /* 2913ced85SMichael Neumann * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de> 3913ced85SMichael Neumann * All rights reserved. 4913ced85SMichael Neumann * 5913ced85SMichael Neumann * Redistribution and use in source and binary forms, with or without 6913ced85SMichael Neumann * modification, are permitted provided that the following conditions 7913ced85SMichael Neumann * are met: 8913ced85SMichael Neumann * 1. Redistributions of source code must retain the above copyright 9913ced85SMichael Neumann * notice unmodified, this list of conditions, and the following 10913ced85SMichael Neumann * disclaimer. 11913ced85SMichael Neumann * 2. Redistributions in binary form must reproduce the above copyright 12913ced85SMichael Neumann * notice, this list of conditions and the following disclaimer in the 13913ced85SMichael Neumann * documentation and/or other materials provided with the distribution. 14913ced85SMichael Neumann * 15913ced85SMichael Neumann * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16913ced85SMichael Neumann * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17913ced85SMichael Neumann * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18913ced85SMichael Neumann * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19913ced85SMichael Neumann * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20913ced85SMichael Neumann * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21913ced85SMichael Neumann * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22913ced85SMichael Neumann * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23913ced85SMichael Neumann * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24913ced85SMichael Neumann * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25913ced85SMichael Neumann */ 26913ced85SMichael Neumann 27913ced85SMichael Neumann #ifndef _LINUX_WW_MUTEX_H_ 28913ced85SMichael Neumann #define _LINUX_WW_MUTEX_H_ 29913ced85SMichael Neumann 30*d6aa1cc5SFrançois Tigeot #include <linux/mutex.h> 31*d6aa1cc5SFrançois Tigeot 32913ced85SMichael Neumann /* 33913ced85SMichael Neumann * A basic, unoptimized implementation of wound/wait mutexes for DragonFly 34913ced85SMichael Neumann * modelled after the Linux API [1]. 35913ced85SMichael Neumann * 36913ced85SMichael Neumann * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h 37913ced85SMichael Neumann */ 38913ced85SMichael Neumann 39913ced85SMichael Neumann #include <sys/errno.h> 40913ced85SMichael Neumann #include <sys/types.h> 41913ced85SMichael Neumann #include <machine/atomic.h> 42913ced85SMichael Neumann #include <sys/spinlock.h> 43913ced85SMichael Neumann #include <sys/spinlock2.h> 44913ced85SMichael Neumann 45913ced85SMichael Neumann struct ww_class { 46913ced85SMichael Neumann volatile u_long stamp; 47913ced85SMichael Neumann const char *name; 48913ced85SMichael Neumann }; 49913ced85SMichael Neumann 50913ced85SMichael Neumann struct ww_acquire_ctx { 51913ced85SMichael Neumann u_long stamp; 52913ced85SMichael Neumann struct ww_class *ww_class; 53913ced85SMichael Neumann }; 54913ced85SMichael Neumann 55913ced85SMichael Neumann struct ww_mutex { 56913ced85SMichael Neumann struct spinlock lock; 57913ced85SMichael Neumann volatile int acquired; 58913ced85SMichael Neumann volatile struct ww_acquire_ctx *ctx; 59913ced85SMichael Neumann volatile struct thread *owner; 60913ced85SMichael Neumann }; 61913ced85SMichael Neumann 62913ced85SMichael Neumann #define DEFINE_WW_CLASS(classname) \ 63ba55f2f5SFrançois Tigeot struct ww_class classname = { \ 64913ced85SMichael Neumann .stamp = 0, \ 65913ced85SMichael Neumann .name = #classname \ 66913ced85SMichael Neumann } 67913ced85SMichael Neumann 68913ced85SMichael Neumann static inline void 69913ced85SMichael Neumann ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) { 70913ced85SMichael Neumann ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1); 71913ced85SMichael Neumann ctx->ww_class = ww_class; 72913ced85SMichael Neumann } 73913ced85SMichael Neumann 74913ced85SMichael Neumann static inline void 75913ced85SMichael Neumann ww_acquire_done(__unused struct ww_acquire_ctx *ctx) { 76913ced85SMichael Neumann } 77913ced85SMichael Neumann 78913ced85SMichael Neumann static inline void 79913ced85SMichael Neumann ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) { 80913ced85SMichael Neumann } 81913ced85SMichael Neumann 82913ced85SMichael Neumann static inline void 83913ced85SMichael Neumann ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { 84913ced85SMichael Neumann spin_init(&lock->lock, ww_class->name); 85913ced85SMichael Neumann lock->acquired = 0; 86913ced85SMichael Neumann lock->ctx = NULL; 87913ced85SMichael Neumann lock->owner = NULL; 88913ced85SMichael Neumann } 89913ced85SMichael Neumann 90913ced85SMichael Neumann static inline bool 91913ced85SMichael Neumann ww_mutex_is_locked(struct ww_mutex *lock) { 92913ced85SMichael Neumann bool res = false; 93913ced85SMichael Neumann spin_lock(&lock->lock); 94913ced85SMichael Neumann if (lock->acquired > 0) res = true; 95913ced85SMichael Neumann spin_unlock(&lock->lock); 96913ced85SMichael Neumann return res; 97913ced85SMichael Neumann } 98913ced85SMichael Neumann 99913ced85SMichael Neumann /* 100913ced85SMichael Neumann * Return 1 if lock could be acquired, else 0 (contended). 101913ced85SMichael Neumann */ 102913ced85SMichael Neumann static inline int 103913ced85SMichael Neumann ww_mutex_trylock(struct ww_mutex *lock) { 104913ced85SMichael Neumann int res = 1; 105913ced85SMichael Neumann KKASSERT(curthread); 106913ced85SMichael Neumann 107913ced85SMichael Neumann spin_lock(&lock->lock); 108913ced85SMichael Neumann /* 109913ced85SMichael Neumann * In case no one holds the ww_mutex yet, we acquire it. 110913ced85SMichael Neumann */ 111913ced85SMichael Neumann if (lock->acquired == 0) { 112913ced85SMichael Neumann KKASSERT(lock->ctx == NULL); 113913ced85SMichael Neumann lock->acquired += 1; 114913ced85SMichael Neumann lock->owner = curthread; 115913ced85SMichael Neumann } 116913ced85SMichael Neumann /* 117913ced85SMichael Neumann * In case we already hold the ww_mutex, increase a count. 118913ced85SMichael Neumann */ 119913ced85SMichael Neumann else if (lock->owner == curthread) { 120913ced85SMichael Neumann lock->acquired += 1; 121913ced85SMichael Neumann } 122913ced85SMichael Neumann else { 123913ced85SMichael Neumann res = 0; 124913ced85SMichael Neumann } 125913ced85SMichael Neumann spin_unlock(&lock->lock); 126913ced85SMichael Neumann return res; 127913ced85SMichael Neumann } 128913ced85SMichael Neumann 129913ced85SMichael Neumann /* 130913ced85SMichael Neumann * When `slow` is `true`, it will always block if the ww_mutex is contended. 131913ced85SMichael Neumann * It is assumed that the called will not hold any (ww_mutex) resources when 132913ced85SMichael Neumann * calling the slow path as this could lead to deadlocks. 133913ced85SMichael Neumann * 134913ced85SMichael Neumann * When `intr` is `true`, the ssleep will be interruptable. 135913ced85SMichael Neumann */ 136913ced85SMichael Neumann static inline int 137913ced85SMichael Neumann __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) { 138913ced85SMichael Neumann int err; 139913ced85SMichael Neumann 140913ced85SMichael Neumann KKASSERT(curthread); 141913ced85SMichael Neumann 142913ced85SMichael Neumann spin_lock(&lock->lock); 143913ced85SMichael Neumann for (;;) { 144913ced85SMichael Neumann /* 145913ced85SMichael Neumann * In case no one holds the ww_mutex yet, we acquire it. 146913ced85SMichael Neumann */ 147913ced85SMichael Neumann if (lock->acquired == 0) { 148913ced85SMichael Neumann KKASSERT(lock->ctx == NULL); 149913ced85SMichael Neumann lock->acquired += 1; 150913ced85SMichael Neumann lock->ctx = ctx; 151913ced85SMichael Neumann lock->owner = curthread; 152913ced85SMichael Neumann err = 0; 153913ced85SMichael Neumann break; 154913ced85SMichael Neumann } 155913ced85SMichael Neumann /* 156913ced85SMichael Neumann * In case we already hold the ww_mutex, simply increase 157913ced85SMichael Neumann * a count and return -ALREADY. 158913ced85SMichael Neumann */ 159913ced85SMichael Neumann else if (lock->owner == curthread) { 160913ced85SMichael Neumann lock->acquired += 1; 161913ced85SMichael Neumann err = -EALREADY; 162913ced85SMichael Neumann break; 163913ced85SMichael Neumann } 164913ced85SMichael Neumann /* 165913ced85SMichael Neumann * This is the contention case where the ww_mutex is 166913ced85SMichael Neumann * already held by another context. 167913ced85SMichael Neumann */ 168913ced85SMichael Neumann else { 169913ced85SMichael Neumann /* 170913ced85SMichael Neumann * Three cases: 171913ced85SMichael Neumann * 172913ced85SMichael Neumann * - We are in the slow-path (first lock to obtain). 173913ced85SMichael Neumann * 174913ced85SMichael Neumann * - No context was specified. We assume a single 175913ced85SMichael Neumann * resouce, so there is no danger of a deadlock. 176913ced85SMichael Neumann * 177913ced85SMichael Neumann * - An `older` process (`ctx`) tries to acquire a 178913ced85SMichael Neumann * lock already held by a `younger` process. 179913ced85SMichael Neumann * We put the `older` process to sleep until 180913ced85SMichael Neumann * the `younger` process gives up all it's 181913ced85SMichael Neumann * resources. 182913ced85SMichael Neumann */ 183913ced85SMichael Neumann if (slow || ctx == NULL || ctx->stamp < lock->ctx->stamp) { 184913ced85SMichael Neumann int s = ssleep(lock, &lock->lock, 185913ced85SMichael Neumann intr ? PCATCH : 0, 186913ced85SMichael Neumann ctx ? ctx->ww_class->name : "ww_mutex_lock", 0); 187913ced85SMichael Neumann if (intr && (s == EINTR || s == ERESTART)) { 188913ced85SMichael Neumann // XXX: Should we handle ERESTART? 189913ced85SMichael Neumann err = -EINTR; 190913ced85SMichael Neumann break; 191913ced85SMichael Neumann } 192913ced85SMichael Neumann } 193913ced85SMichael Neumann /* 194913ced85SMichael Neumann * If a `younger` process tries to acquire a lock 195913ced85SMichael Neumann * already held by an `older` process, we `wound` it, 196913ced85SMichael Neumann * i.e. we return -EDEADLK because there is a potential 197913ced85SMichael Neumann * risk for a deadlock. The `younger` process then 198913ced85SMichael Neumann * should give up all it's resources and try again to 199913ced85SMichael Neumann * acquire the lock in question, this time in a 200913ced85SMichael Neumann * blocking manner. 201913ced85SMichael Neumann */ 202913ced85SMichael Neumann else { 203913ced85SMichael Neumann err = -EDEADLK; 204913ced85SMichael Neumann break; 205913ced85SMichael Neumann } 206913ced85SMichael Neumann } 207913ced85SMichael Neumann 208913ced85SMichael Neumann } /* for */ 209913ced85SMichael Neumann spin_unlock(&lock->lock); 210913ced85SMichael Neumann return err; 211913ced85SMichael Neumann } 212913ced85SMichael Neumann 213913ced85SMichael Neumann static inline int 214ba55f2f5SFrançois Tigeot ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 215913ced85SMichael Neumann return __ww_mutex_lock(lock, ctx, false, false); 216913ced85SMichael Neumann } 217913ced85SMichael Neumann 218913ced85SMichael Neumann static inline void 219913ced85SMichael Neumann ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 220913ced85SMichael Neumann (void)__ww_mutex_lock(lock, ctx, true, false); 221913ced85SMichael Neumann } 222913ced85SMichael Neumann 223913ced85SMichael Neumann static inline int 224ba55f2f5SFrançois Tigeot ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 225913ced85SMichael Neumann return __ww_mutex_lock(lock, ctx, false, true); 226913ced85SMichael Neumann } 227913ced85SMichael Neumann 228ba55f2f5SFrançois Tigeot static inline int __must_check 229913ced85SMichael Neumann ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 230ba55f2f5SFrançois Tigeot return __ww_mutex_lock(lock, ctx, true, true); 231913ced85SMichael Neumann } 232913ced85SMichael Neumann 233913ced85SMichael Neumann static inline void 234913ced85SMichael Neumann ww_mutex_unlock(struct ww_mutex *lock) { 235913ced85SMichael Neumann spin_lock(&lock->lock); 236913ced85SMichael Neumann KKASSERT(lock->owner == curthread); 237913ced85SMichael Neumann KKASSERT(lock->acquired > 0); 238913ced85SMichael Neumann 239913ced85SMichael Neumann --lock->acquired; 240913ced85SMichael Neumann if (lock->acquired > 0) { 241913ced85SMichael Neumann spin_unlock(&lock->lock); 242913ced85SMichael Neumann return; 243913ced85SMichael Neumann } 244913ced85SMichael Neumann 245913ced85SMichael Neumann KKASSERT(lock->acquired == 0); 246913ced85SMichael Neumann lock->ctx = NULL; 247913ced85SMichael Neumann lock->owner = NULL; 248913ced85SMichael Neumann spin_unlock(&lock->lock); 249913ced85SMichael Neumann wakeup(lock); 250913ced85SMichael Neumann } 251913ced85SMichael Neumann 252913ced85SMichael Neumann static inline void 253913ced85SMichael Neumann ww_mutex_destroy(struct ww_mutex *lock) { 254913ced85SMichael Neumann KKASSERT(lock->acquired == 0); 255913ced85SMichael Neumann KKASSERT(lock->ctx == NULL); 256913ced85SMichael Neumann KKASSERT(lock->owner == NULL); 257913ced85SMichael Neumann spin_uninit(&lock->lock); 258913ced85SMichael Neumann } 259913ced85SMichael Neumann 260913ced85SMichael Neumann #endif /* _LINUX_WW_MUTEX_H_ */ 261