1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com> 4 * 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #include <assert.h> 31 #include <errno.h> 32 #include <unistd.h> 33 #include <sys/time.h> 34 35 #include "thr_private.h" 36 37 #define cpu_ccfence() __asm __volatile("" : : : "memory") 38 39 /* 40 * This function is used to acquire a contested lock. 41 * 42 * A *mtx value of 1 indicates locked normally. 43 * A *mtx value of 2 indicates locked and contested. 44 */ 45 int 46 __thr_umtx_lock(volatile umtx_t *mtx, int timo) 47 { 48 int v, errval, ret = 0; 49 50 /* contested */ 51 for (;;) { 52 v = *mtx; 53 cpu_ccfence(); 54 if (v == 0 && atomic_cmpset_acq_int(mtx, 0, 1)) 55 break; 56 if (v == 2 || atomic_cmpset_acq_int(mtx, 1, 2)) { 57 if (timo == 0) { 58 _umtx_sleep_err(mtx, 2, timo); 59 } else if ((errval = _umtx_sleep_err(mtx, 2, timo)) > 0) { 60 if (errval == EAGAIN) { 61 if (atomic_cmpset_acq_int(mtx, 0, 1)) 62 ret = 0; 63 else 64 ret = ETIMEDOUT; 65 break; 66 } 67 } 68 } 69 } 70 71 return (ret); 72 } 73 74 /* 75 * Release a mutex. A contested mutex has a value 76 * of 2, an uncontested mutex has a value of 1. 77 */ 78 void 79 __thr_umtx_unlock(volatile umtx_t *mtx) 80 { 81 int v; 82 83 for (;;) { 84 v = *mtx; 85 cpu_ccfence(); 86 if (atomic_cmpset_acq_int(mtx, v, 0)) { 87 if (v != 1) 88 _umtx_wakeup_err(mtx, 1); 89 break; 90 } 91 } 92 } 93 94 /* 95 * Low level timed umtx lock. This function must never return 96 * EINTR. 97 */ 98 int 99 __thr_umtx_timedlock(volatile umtx_t *mtx, const struct timespec *timeout) 100 { 101 struct timespec ts, ts2, ts3; 102 int timo, ret; 103 104 if ((timeout->tv_sec < 0) || 105 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) 106 return (ETIMEDOUT); 107 108 /* XXX there should have MONO timer! */ 109 clock_gettime(CLOCK_REALTIME, &ts); 110 TIMESPEC_ADD(&ts, &ts, timeout); 111 ts2 = *timeout; 112 113 for (;;) { 114 if (ts2.tv_nsec) { 115 timo = (int)(ts2.tv_nsec / 1000); 116 if (timo == 0) 117 timo = 1; 118 } else { 119 timo = 1000000; 120 } 121 ret = __thr_umtx_lock(mtx, timo); 122 if (ret != EINTR && ret != ETIMEDOUT) 123 break; 124 clock_gettime(CLOCK_REALTIME, &ts3); 125 TIMESPEC_SUB(&ts2, &ts, &ts3); 126 if (ts2.tv_sec < 0 || 127 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 128 ret = ETIMEDOUT; 129 break; 130 } 131 } 132 return (ret); 133 } 134 135 int 136 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout, 137 int clockid) 138 { 139 struct timespec ts, ts2, ts3; 140 int timo, errval, ret = 0; 141 142 cpu_ccfence(); 143 if (*mtx != exp) 144 return (0); 145 146 if (timeout == NULL) { 147 while ((errval = _umtx_sleep_err(mtx, exp, 10000000)) > 0) { 148 if (errval == EBUSY) 149 break; 150 if (errval == EINTR) { 151 ret = EINTR; 152 break; 153 } 154 #if 0 155 if (errval == ETIMEDOUT || errval == EWOULDBLOCK) { 156 if (*mtx != exp) { 157 fprintf(stderr, 158 "thr_umtx_wait: FAULT VALUE CHANGE " 159 "%d -> %d oncond %p\n", 160 exp, *mtx, mtx); 161 } 162 } 163 #endif 164 if (*mtx != exp) 165 return(0); 166 } 167 return (ret); 168 } 169 170 if ((timeout->tv_sec < 0) || 171 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) 172 return (ETIMEDOUT); 173 174 clock_gettime(clockid, &ts); 175 TIMESPEC_ADD(&ts, &ts, timeout); 176 ts2 = *timeout; 177 178 for (;;) { 179 if (ts2.tv_nsec) { 180 timo = (int)(ts2.tv_nsec / 1000); 181 if (timo == 0) 182 timo = 1; 183 } else { 184 timo = 1000000; 185 } 186 187 if ((errval = _umtx_sleep_err(mtx, exp, timo)) > 0) { 188 if (errval == EBUSY) { 189 ret = 0; 190 break; 191 } 192 if (errval == EINTR) { 193 ret = EINTR; 194 break; 195 } 196 } 197 198 clock_gettime(clockid, &ts3); 199 TIMESPEC_SUB(&ts2, &ts, &ts3); 200 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 201 ret = ETIMEDOUT; 202 break; 203 } 204 } 205 return (ret); 206 } 207 208 void 209 _thr_umtx_wake(volatile umtx_t *mtx, int count) 210 { 211 _umtx_wakeup_err(mtx, count); 212 } 213