1 /* $NetBSD: mutex.h,v 1.11 2007/10/19 12:16:48 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #ifndef _SYS_MUTEX_H_ 40 #define _SYS_MUTEX_H_ 41 42 /* 43 * There are 2 types of mutexes: 44 * 45 * * Adaptive -- If the lock is already held, the thread attempting 46 * to acquire the lock determines if the thread that holds it is 47 * currently running. If so, it spins, else it sleeps. 48 * 49 * * Spin -- If the lock is already held, the thread attempting to 50 * acquire the lock spins. The IPL will be raised on entry. 51 * 52 * Machine dependent code must provide the following: 53 * 54 * struct mutex 55 * The actual mutex structure. This structure is mostly 56 * opaque to machine-independent code; most access are done 57 * through macros. However, machine-independent code must 58 * be able to access the following members: 59 * 60 * uintptr_t mtx_owner 61 * ipl_cookie_t mtx_ipl 62 * __cpu_simple_lock_t mtx_lock 63 * 64 * If an architecture can be considered 'simple' (no interlock required in 65 * the MP case, or no MP) it need only define __HAVE_SIMPLE_MUTEXES and 66 * provide the following: 67 * 68 * struct mutex 69 * 70 * [additionally:] 71 * volatile integer mtx_id 72 * 73 * MUTEX_RECEIVE(mtx) 74 * Post a load fence after acquiring the mutex, if necessary. 75 * 76 * MUTEX_GIVE(mtx) 77 * Post a load/store fence after releasing the mutex, if 78 * necessary. 79 * 80 * MUTEX_CAS(ptr, old, new) 81 * Perform an atomic "compare and swap" operation and 82 * evaluate to true or false according to the success 83 * 84 * Otherwise, the following must be defined: 85 * 86 * MUTEX_INITIALIZE_SPIN(mtx, id, minipl) 87 * Initialize a spin mutex. 88 * 89 * MUTEX_INITIALIZE_ADAPTIVE(mtx, id) 90 * Initialize an adaptive mutex. 91 * 92 * MUTEX_DESTROY(mtx) 93 * Tear down a mutex. 94 * 95 * MUTEX_ADAPTIVE_P(mtx) 96 * Evaluates to true if the mutex is an adaptive mutex. 97 * 98 * MUTEX_SPIN_P(mtx) 99 * Evaluates to true if the mutex is a spin mutex. 100 * 101 * MUTEX_OWNER(owner) 102 * Returns the owner of the adaptive mutex (LWP address). 103 * 104 * MUTEX_OWNED(owner) 105 * Returns non-zero if an adaptive mutex is currently 106 * held by an LWP. 107 * 108 * MUTEX_HAS_WAITERS(mtx) 109 * Returns true if the mutex has waiters. 110 * 111 * MUTEX_SET_WAITERS(mtx) 112 * Mark the mutex has having waiters. 113 * 114 * MUTEX_ACQUIRE(mtx, owner) 115 * Try to acquire an adaptive mutex such that: 116 * if (lock held OR waiters) 117 * return 0; 118 * else 119 * return 1; 120 * Must be MP/interrupt atomic. 121 * 122 * MUTEX_RELEASE(mtx) 123 * Release the lock and clear the "has waiters" indication. 124 * Must be interrupt atomic, need not be MP safe. 125 * 126 * MUTEX_GETID(rw) 127 * Get the debugging ID for the mutex, an integer. Only 128 * used in the LOCKDEBUG case. 129 * 130 * Machine dependent code may optionally provide stubs for the following 131 * functions to implement the easy (unlocked / no waiters) cases. If 132 * these stubs are provided, __HAVE_MUTEX_STUBS should be defined. 133 * 134 * mutex_enter() 135 * mutex_exit() 136 * 137 * Two additional stubs may be implemented that handle only the spinlock 138 * case, primarily for the scheduler. These should not be documented for 139 * or used by device drivers. __HAVE_SPIN_MUTEX_STUBS should be defined 140 * if these are provided: 141 * 142 * mutex_spin_enter() 143 * mutex_spin_exit() 144 */ 145 146 #if defined(_KERNEL_OPT) 147 #include "opt_lockdebug.h" 148 #endif 149 150 #if !defined(_KERNEL) 151 #include <sys/types.h> 152 #include <sys/inttypes.h> 153 #endif 154 155 /* 156 * MUTEX_NODEBUG disables most LOCKDEBUG checks for the lock. It should 157 * not be used. 158 */ 159 typedef enum kmutex_type_t { 160 MUTEX_SPIN = 0, 161 MUTEX_ADAPTIVE = 1, 162 MUTEX_DEFAULT = 2, 163 MUTEX_DRIVER = 3, 164 MUTEX_NODEBUG = 4 165 } kmutex_type_t; 166 167 typedef struct kmutex kmutex_t; 168 169 #if defined(__MUTEX_PRIVATE) 170 171 #define MUTEX_THREAD ((uintptr_t)-16L) 172 173 #define MUTEX_BIT_SPIN 0x01 174 #define MUTEX_BIT_WAITERS 0x02 175 176 #define MUTEX_SPIN_IPL(mtx) ((mtx)->mtx_ipl) 177 #define MUTEX_SPIN_OLDSPL(ci) ((ci)->ci_mtx_oldspl) 178 179 void mutex_vector_enter(kmutex_t *); 180 void mutex_vector_exit(kmutex_t *); 181 void mutex_spin_retry(kmutex_t *); 182 void mutex_wakeup(kmutex_t *); 183 184 #endif /* __MUTEX_PRIVATE */ 185 186 #ifdef _KERNEL 187 #include <sys/intr.h> 188 #endif 189 190 #include <machine/mutex.h> 191 192 /* 193 * Return true if no spin mutexes are held by the current CPU. 194 */ 195 #ifndef MUTEX_NO_SPIN_ACTIVE_P 196 #define MUTEX_NO_SPIN_ACTIVE_P(ci) ((ci)->ci_mtx_count == 0) 197 #endif 198 199 #ifdef _KERNEL 200 201 void mutex_init(kmutex_t *, kmutex_type_t, int); 202 void mutex_destroy(kmutex_t *); 203 204 void mutex_enter(kmutex_t *); 205 void mutex_exit(kmutex_t *); 206 207 void mutex_spin_enter(kmutex_t *); 208 void mutex_spin_exit(kmutex_t *); 209 210 int mutex_tryenter(kmutex_t *); 211 212 int mutex_owned(kmutex_t *); 213 214 #endif /* _KERNEL */ 215 216 #endif /* _SYS_MUTEX_H_ */ 217