1 /* $NetBSD: mutex.h,v 1.13 2007/12/05 07:06:55 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #ifndef _SYS_MUTEX_H_ 40 #define _SYS_MUTEX_H_ 41 42 /* 43 * There are 2 types of mutexes: 44 * 45 * * Adaptive -- If the lock is already held, the thread attempting 46 * to acquire the lock determines if the thread that holds it is 47 * currently running. If so, it spins, else it sleeps. 48 * 49 * * Spin -- If the lock is already held, the thread attempting to 50 * acquire the lock spins. The IPL will be raised on entry. 51 * 52 * Machine dependent code must provide the following: 53 * 54 * struct mutex 55 * The actual mutex structure. This structure is mostly 56 * opaque to machine-independent code; most access are done 57 * through macros. However, machine-independent code must 58 * be able to access the following members: 59 * 60 * uintptr_t mtx_owner 61 * ipl_cookie_t mtx_ipl 62 * __cpu_simple_lock_t mtx_lock 63 * 64 * If an architecture can be considered 'simple' (no interlock required in 65 * the MP case, or no MP) it need only define __HAVE_SIMPLE_MUTEXES and 66 * provide the following: 67 * 68 * struct mutex 69 * 70 * [additionally:] 71 * volatile integer mtx_id 72 * 73 * MUTEX_RECEIVE(mtx) 74 * Post a load fence after acquiring the mutex, if necessary. 75 * 76 * MUTEX_GIVE(mtx) 77 * Post a load/store fence after releasing the mutex, if 78 * necessary. 79 * 80 * MUTEX_CAS(ptr, old, new) 81 * Perform an atomic "compare and swap" operation and 82 * evaluate to true or false according to the success 83 * 84 * Otherwise, the following must be defined: 85 * 86 * MUTEX_INITIALIZE_SPIN(mtx, dodebug, minipl) 87 * Initialize a spin mutex. 88 * 89 * MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) 90 * Initialize an adaptive mutex. 91 * 92 * MUTEX_DESTROY(mtx) 93 * Tear down a mutex. 94 * 95 * MUTEX_ADAPTIVE_P(mtx) 96 * Evaluates to true if the mutex is an adaptive mutex. 97 * 98 * MUTEX_SPIN_P(mtx) 99 * Evaluates to true if the mutex is a spin mutex. 100 * 101 * MUTEX_OWNER(owner) 102 * Returns the owner of the adaptive mutex (LWP address). 103 * 104 * MUTEX_OWNED(owner) 105 * Returns non-zero if an adaptive mutex is currently 106 * held by an LWP. 107 * 108 * MUTEX_HAS_WAITERS(mtx) 109 * Returns true if the mutex has waiters. 110 * 111 * MUTEX_SET_WAITERS(mtx) 112 * Mark the mutex has having waiters. 113 * 114 * MUTEX_ACQUIRE(mtx, owner) 115 * Try to acquire an adaptive mutex such that: 116 * if (lock held OR waiters) 117 * return 0; 118 * else 119 * return 1; 120 * Must be MP/interrupt atomic. 121 * 122 * MUTEX_RELEASE(mtx) 123 * Release the lock and clear the "has waiters" indication. 124 * Must be interrupt atomic, need not be MP safe. 125 * 126 * MUTEX_DEBUG_P(mtx) 127 * Evaluates to true if the mutex is initialized with 128 * dodebug==true. Only used in the LOCKDEBUG case. 129 * 130 * Machine dependent code may optionally provide stubs for the following 131 * functions to implement the easy (unlocked / no waiters) cases. If 132 * these stubs are provided, __HAVE_MUTEX_STUBS should be defined. 133 * 134 * mutex_enter() 135 * mutex_exit() 136 * 137 * Two additional stubs may be implemented that handle only the spinlock 138 * case, primarily for the scheduler. These should not be documented for 139 * or used by device drivers. __HAVE_SPIN_MUTEX_STUBS should be defined 140 * if these are provided: 141 * 142 * mutex_spin_enter() 143 * mutex_spin_exit() 144 */ 145 146 #if defined(_KERNEL_OPT) 147 #include "opt_lockdebug.h" 148 #endif 149 150 #if !defined(_KERNEL) 151 #include <sys/types.h> 152 #include <sys/inttypes.h> 153 #endif 154 155 typedef enum kmutex_type_t { 156 MUTEX_SPIN = 0, /* To get a spin mutex at IPL_NONE */ 157 MUTEX_ADAPTIVE = 1, /* For porting code written for Solaris */ 158 MUTEX_DEFAULT = 2, /* The only native, endorsed type */ 159 MUTEX_DRIVER = 3, /* For porting code written for Solaris */ 160 MUTEX_NODEBUG = 4 /* Disables LOCKDEBUG; use with care */ 161 } kmutex_type_t; 162 163 typedef struct kmutex kmutex_t; 164 165 #if defined(__MUTEX_PRIVATE) 166 167 #define MUTEX_THREAD ((uintptr_t)-16L) 168 169 #define MUTEX_BIT_SPIN 0x01 170 #define MUTEX_BIT_WAITERS 0x02 171 #define MUTEX_BIT_DEBUG 0x04 172 173 #define MUTEX_SPIN_IPL(mtx) ((mtx)->mtx_ipl) 174 #define MUTEX_SPIN_OLDSPL(ci) ((ci)->ci_mtx_oldspl) 175 176 void mutex_vector_enter(kmutex_t *); 177 void mutex_vector_exit(kmutex_t *); 178 void mutex_spin_retry(kmutex_t *); 179 void mutex_wakeup(kmutex_t *); 180 181 #endif /* __MUTEX_PRIVATE */ 182 183 #ifdef _KERNEL 184 #include <sys/intr.h> 185 #endif 186 187 #include <machine/mutex.h> 188 189 /* 190 * Return true if no spin mutexes are held by the current CPU. 191 */ 192 #ifndef MUTEX_NO_SPIN_ACTIVE_P 193 #define MUTEX_NO_SPIN_ACTIVE_P(ci) ((ci)->ci_mtx_count == 0) 194 #endif 195 196 #ifdef _KERNEL 197 198 void mutex_init(kmutex_t *, kmutex_type_t, int); 199 void mutex_destroy(kmutex_t *); 200 201 void mutex_enter(kmutex_t *); 202 void mutex_exit(kmutex_t *); 203 204 void mutex_spin_enter(kmutex_t *); 205 void mutex_spin_exit(kmutex_t *); 206 207 int mutex_tryenter(kmutex_t *); 208 209 int mutex_owned(kmutex_t *); 210 211 #endif /* _KERNEL */ 212 213 #endif /* _SYS_MUTEX_H_ */ 214