1 /* $NetBSD: mutex.h,v 1.11 2010/11/16 09:35:14 uebayasi Exp $ */ 2 3 /*- 4 * Copyright (c) 2002, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _HPPA_MUTEX_H_ 33 #define _HPPA_MUTEX_H_ 34 35 /* 36 * The HPPA mutex implementation is troublesome, because HPPA lacks 37 * a compare-and-set operation, yet there are many SMP HPPA machines 38 * in circulation. SMP for spin mutexes is easy - we don't need to 39 * know who owns the lock. For adaptive mutexes, we need an owner 40 * field and additional interlock 41 */ 42 43 #ifndef __ASSEMBLER__ 44 45 #include <machine/lock.h> 46 47 struct kmutex { 48 union { 49 /* 50 * Only the 16 bytes aligned word of __cpu_simple_lock_t will 51 * be used. It's 16 bytes to simplify the allocation. 52 * See hppa/lock.h 53 */ 54 #ifdef __MUTEX_PRIVATE 55 struct { 56 __cpu_simple_lock_t mtxu_lock; /* 0-15 */ 57 volatile uint32_t mtxs_owner; /* 16-19 */ 58 ipl_cookie_t mtxs_ipl; /* 20-23 */ 59 volatile uint8_t mtxs_waiters; /* 24 */ 60 61 /* For LOCKDEBUG */ 62 uint8_t mtxs_dodebug; /* 25 */ 63 } s; 64 #endif 65 uint8_t mtxu_pad[32]; /* 0 - 32 */ 66 } u; 67 } __aligned (16); 68 #endif 69 70 #ifdef __MUTEX_PRIVATE 71 72 #define __HAVE_MUTEX_STUBS 1 73 74 #define mtx_lock u.s.mtxu_lock 75 #define mtx_owner u.s.mtxs_owner 76 #define mtx_ipl u.s.mtxs_ipl 77 #define mtx_waiters u.s.mtxs_waiters 78 #define mtx_dodebug u.s.mtxs_dodebug 79 80 /* Magic constants for mtx_owner */ 81 #define MUTEX_ADAPTIVE_UNOWNED 0xffffff00 82 #define MUTEX_SPIN_FLAG 0xffffff10 83 #define MUTEX_UNOWNED_OR_SPIN(x) (((x) & 0xffffffef) == 0xffffff00) 84 85 #ifndef __ASSEMBLER__ 86 87 static inline uintptr_t 88 MUTEX_OWNER(uintptr_t owner) 89 { 90 return owner; 91 } 92 93 static inline int 94 MUTEX_OWNED(uintptr_t owner) 95 { 96 return owner != MUTEX_ADAPTIVE_UNOWNED; 97 } 98 99 static inline int 100 MUTEX_SET_WAITERS(struct kmutex *mtx, uintptr_t owner) 101 { 102 mb_write(); 103 mtx->mtx_waiters = 1; 104 mb_memory(); 105 return mtx->mtx_owner != MUTEX_ADAPTIVE_UNOWNED; 106 } 107 108 static inline int 109 MUTEX_HAS_WAITERS(volatile struct kmutex *mtx) 110 { 111 return mtx->mtx_waiters != 0; 112 } 113 114 static inline void 115 MUTEX_INITIALIZE_SPIN(struct kmutex *mtx, bool dodebug, int ipl) 116 { 117 mtx->mtx_ipl = makeiplcookie(ipl); 118 mtx->mtx_dodebug = dodebug; 119 mtx->mtx_owner = MUTEX_SPIN_FLAG; 120 __cpu_simple_lock_init(&mtx->mtx_lock); 121 } 122 123 static inline void 124 MUTEX_INITIALIZE_ADAPTIVE(struct kmutex *mtx, bool dodebug) 125 { 126 mtx->mtx_dodebug = dodebug; 127 mtx->mtx_owner = MUTEX_ADAPTIVE_UNOWNED; 128 __cpu_simple_lock_init(&mtx->mtx_lock); 129 } 130 131 static inline void 132 MUTEX_DESTROY(struct kmutex *mtx) 133 { 134 mtx->mtx_owner = 0xffffffff; 135 } 136 137 static inline bool 138 MUTEX_DEBUG_P(struct kmutex *mtx) 139 { 140 return mtx->mtx_dodebug != 0; 141 } 142 143 static inline int 144 MUTEX_SPIN_P(volatile struct kmutex *mtx) 145 { 146 return mtx->mtx_owner == MUTEX_SPIN_FLAG; 147 } 148 149 static inline int 150 MUTEX_ADAPTIVE_P(volatile struct kmutex *mtx) 151 { 152 return mtx->mtx_owner != MUTEX_SPIN_FLAG; 153 } 154 155 /* Acquire an adaptive mutex */ 156 static inline int 157 MUTEX_ACQUIRE(struct kmutex *mtx, uintptr_t curthread) 158 { 159 if (!__cpu_simple_lock_try(&mtx->mtx_lock)) 160 return 0; 161 mtx->mtx_owner = curthread; 162 return 1; 163 } 164 165 /* Release an adaptive mutex */ 166 static inline void 167 MUTEX_RELEASE(struct kmutex *mtx) 168 { 169 mtx->mtx_owner = MUTEX_ADAPTIVE_UNOWNED; 170 __cpu_simple_unlock(&mtx->mtx_lock); 171 mtx->mtx_waiters = 0; 172 } 173 174 static inline void 175 MUTEX_CLEAR_WAITERS(struct kmutex *mtx) 176 { 177 mtx->mtx_waiters = 0; 178 } 179 180 #endif /* __ASSEMBLER__ */ 181 182 #endif /* __MUTEX_PRIVATE */ 183 184 #endif /* _HPPA_MUTEX_H_ */ 185