1 /* $OpenBSD: sys_futex.c,v 1.4 2017/08/13 20:26:33 guenther Exp $ */ 2 3 /* 4 * Copyright (c) 2016-2017 Martin Pieuchot 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/proc.h> 22 #include <sys/kernel.h> 23 #include <sys/mount.h> 24 #include <sys/syscallargs.h> 25 #include <sys/pool.h> 26 #include <sys/time.h> 27 #include <sys/rwlock.h> 28 #include <sys/futex.h> 29 30 #ifdef KTRACE 31 #include <sys/ktrace.h> 32 #endif 33 34 /* 35 * Atomicity is only needed on MULTIPROCESSOR kernels. Fall back on 36 * copyin(9) until non-MULTIPROCESSOR architectures have a copyin32(9) 37 * implementation. 38 */ 39 #ifndef MULTIPROCESSOR 40 #define copyin32(uaddr, kaddr) copyin((uaddr), (kaddr), sizeof(uint32_t)) 41 #endif 42 43 /* 44 * Kernel representation of a futex. 45 */ 46 struct futex { 47 LIST_ENTRY(futex) ft_list; /* list of all futexes */ 48 TAILQ_HEAD(, proc) ft_threads; /* sleeping queue */ 49 uint32_t *ft_uaddr; /* userspace address */ 50 pid_t ft_pid; /* process identifier */ 51 unsigned int ft_refcnt; /* # of references */ 52 }; 53 54 /* Syscall helpers. */ 55 int futex_wait(uint32_t *, uint32_t, const struct timespec *); 56 int futex_wake(uint32_t *, uint32_t); 57 int futex_requeue(uint32_t *, uint32_t, uint32_t *, uint32_t); 58 59 /* Flags for futex_get(). */ 60 #define FT_CREATE 0x1 /* Create a futex if it doesn't exist. */ 61 62 struct futex *futex_get(uint32_t *, int); 63 void futex_put(struct futex *); 64 65 /* 66 * The global futex lock serialize futex(2) calls such that no wakeup 67 * event are lost, protect the global list of all futexes and their 68 * states. 69 */ 70 struct rwlock ftlock = RWLOCK_INITIALIZER("futex"); 71 static LIST_HEAD(, futex) ftlist; 72 struct pool ftpool; 73 74 75 void 76 futex_init(void) 77 { 78 pool_init(&ftpool, sizeof(struct futex), 0, IPL_NONE, 79 PR_WAITOK | PR_RWLOCK, "futexpl", NULL); 80 } 81 82 int 83 sys_futex(struct proc *p, void *v, register_t *retval) 84 { 85 struct sys_futex_args /* { 86 syscallarg(uint32_t *) f; 87 syscallarg(int) op; 88 syscallarg(inr) val; 89 syscallarg(const struct timespec *) timeout; 90 syscallarg(uint32_t *) g; 91 } */ *uap = v; 92 uint32_t *uaddr = SCARG(uap, f); 93 int op = SCARG(uap, op); 94 uint32_t val = SCARG(uap, val); 95 const struct timespec *timeout = SCARG(uap, timeout); 96 void *g = SCARG(uap, g); 97 98 switch (op) { 99 case FUTEX_WAIT: 100 KERNEL_LOCK(); 101 rw_enter_write(&ftlock); 102 *retval = futex_wait(uaddr, val, timeout); 103 rw_exit_write(&ftlock); 104 KERNEL_UNLOCK(); 105 break; 106 case FUTEX_WAKE: 107 rw_enter_write(&ftlock); 108 *retval = futex_wake(uaddr, val); 109 rw_exit_write(&ftlock); 110 break; 111 case FUTEX_REQUEUE: 112 rw_enter_write(&ftlock); 113 *retval = futex_requeue(uaddr, val, g, (unsigned long)timeout); 114 rw_exit_write(&ftlock); 115 break; 116 default: 117 *retval = ENOSYS; 118 break; 119 } 120 121 return 0; 122 } 123 124 /* 125 * Return an existing futex matching userspace address ``uaddr''. 126 * 127 * If such futex does not exist and FT_CREATE is given, create it. 128 */ 129 struct futex * 130 futex_get(uint32_t *uaddr, int flag) 131 { 132 struct futex *f; 133 134 rw_assert_wrlock(&ftlock); 135 136 LIST_FOREACH(f, &ftlist, ft_list) { 137 if (f->ft_uaddr == uaddr && f->ft_pid == curproc->p_p->ps_pid) { 138 f->ft_refcnt++; 139 break; 140 } 141 } 142 143 if ((f == NULL) && (flag & FT_CREATE)) { 144 /* 145 * We rely on the rwlock to ensure that no other thread 146 * create the same futex. 147 */ 148 f = pool_get(&ftpool, PR_WAITOK); 149 TAILQ_INIT(&f->ft_threads); 150 f->ft_uaddr = uaddr; 151 f->ft_pid = curproc->p_p->ps_pid; 152 f->ft_refcnt = 1; 153 LIST_INSERT_HEAD(&ftlist, f, ft_list); 154 } 155 156 return f; 157 } 158 159 /* 160 * Release a given futex. 161 */ 162 void 163 futex_put(struct futex *f) 164 { 165 rw_assert_wrlock(&ftlock); 166 167 KASSERT(f->ft_refcnt > 0); 168 169 --f->ft_refcnt; 170 if (f->ft_refcnt == 0) { 171 KASSERT(TAILQ_EMPTY(&f->ft_threads)); 172 LIST_REMOVE(f, ft_list); 173 pool_put(&ftpool, f); 174 } 175 } 176 177 /* 178 * Put the current thread on the sleep queue of the futex at address 179 * ``uaddr''. Let it sleep for the specified ``timeout'' time, or 180 * indefinitly if the argument is NULL. 181 */ 182 int 183 futex_wait(uint32_t *uaddr, uint32_t val, const struct timespec *timeout) 184 { 185 struct proc *p = curproc; 186 struct futex *f; 187 uint64_t to_ticks = 0; 188 uint32_t cval; 189 int error; 190 191 /* 192 * After reading the value a race is still possible but 193 * we deal with it by serializing all futex syscalls. 194 */ 195 rw_assert_wrlock(&ftlock); 196 197 /* 198 * Read user space futex value 199 */ 200 if ((error = copyin32(uaddr, &cval))) 201 return error; 202 203 /* If the value changed, stop here. */ 204 if (cval != val) 205 return EAGAIN; 206 207 if (timeout != NULL) { 208 struct timespec ts; 209 210 if ((error = copyin(timeout, &ts, sizeof(ts)))) 211 return error; 212 #ifdef KTRACE 213 if (KTRPOINT(p, KTR_STRUCT)) 214 ktrabstimespec(p, timeout); 215 #endif 216 to_ticks = (uint64_t)hz * ts.tv_sec + 217 (ts.tv_nsec + tick * 1000 - 1) / (tick * 1000) + 1; 218 if (to_ticks > INT_MAX) 219 to_ticks = INT_MAX; 220 } 221 222 f = futex_get(uaddr, FT_CREATE); 223 TAILQ_INSERT_TAIL(&f->ft_threads, p, p_fut_link); 224 p->p_futex = f; 225 226 error = rwsleep(p, &ftlock, PUSER|PCATCH, "fsleep", (int)to_ticks); 227 if (error == ERESTART) 228 error = EINTR; 229 else if (error == EWOULDBLOCK) { 230 /* A race occured between a wakeup and a timeout. */ 231 if (p->p_futex == NULL) 232 error = 0; 233 else 234 error = ETIMEDOUT; 235 } 236 237 /* Remove ourself if we haven't been awaken. */ 238 if ((f = p->p_futex) != NULL) { 239 p->p_futex = NULL; 240 TAILQ_REMOVE(&f->ft_threads, p, p_fut_link); 241 futex_put(f); 242 } 243 244 return error; 245 } 246 247 /* 248 * Wakeup at most ``n'' sibling threads sleeping on a futex at address 249 * ``uaddr'' and requeue at most ``m'' sibling threads on a futex at 250 * address ``uaddr2''. 251 */ 252 int 253 futex_requeue(uint32_t *uaddr, uint32_t n, uint32_t *uaddr2, uint32_t m) 254 { 255 struct futex *f, *g; 256 struct proc *p; 257 uint32_t count = 0; 258 259 rw_assert_wrlock(&ftlock); 260 261 f = futex_get(uaddr, 0); 262 if (f == NULL) 263 return 0; 264 265 while ((p = TAILQ_FIRST(&f->ft_threads)) != NULL && (count < (n + m))) { 266 p->p_futex = NULL; 267 TAILQ_REMOVE(&f->ft_threads, p, p_fut_link); 268 futex_put(f); 269 270 if (count < n) { 271 wakeup_one(p); 272 } else if (uaddr2 != NULL) { 273 g = futex_get(uaddr2, FT_CREATE); 274 TAILQ_INSERT_TAIL(&g->ft_threads, p, p_fut_link); 275 p->p_futex = g; 276 } 277 count++; 278 } 279 280 futex_put(f); 281 282 return count; 283 } 284 285 /* 286 * Wakeup at most ``n'' sibling threads sleeping on a futex at address 287 * ``uaddr''. 288 */ 289 int 290 futex_wake(uint32_t *uaddr, uint32_t n) 291 { 292 return futex_requeue(uaddr, n, NULL, 0); 293 } 294