1 /* $OpenBSD: wait.h,v 1.9 2023/01/01 01:34:58 jsg Exp $ */ 2 /* 3 * Copyright (c) 2013, 2014, 2015 Mark Kettenis 4 * Copyright (c) 2017 Martin Pieuchot 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _LINUX_WAIT_H 20 #define _LINUX_WAIT_H 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/mutex.h> 25 26 #include <linux/list.h> 27 #include <linux/errno.h> 28 #include <linux/spinlock.h> 29 30 struct wait_queue_entry { 31 unsigned int flags; 32 void *private; 33 int (*func)(struct wait_queue_entry *, unsigned, int, void *); 34 struct list_head entry; 35 }; 36 37 typedef struct wait_queue_entry wait_queue_entry_t; 38 39 extern struct mutex sch_mtx; 40 extern volatile struct proc *sch_proc; 41 extern volatile void *sch_ident; 42 extern int sch_priority; 43 44 struct wait_queue_head { 45 struct mutex lock; 46 struct list_head head; 47 }; 48 typedef struct wait_queue_head wait_queue_head_t; 49 50 static inline void 51 init_waitqueue_head(wait_queue_head_t *wqh) 52 { 53 mtx_init(&wqh->lock, IPL_TTY); 54 INIT_LIST_HEAD(&wqh->head); 55 } 56 57 #define __init_waitqueue_head(wqh, name, key) init_waitqueue_head(wqh) 58 59 int autoremove_wake_function(struct wait_queue_entry *, unsigned int, int, void *); 60 61 static inline void 62 init_wait_entry(wait_queue_entry_t *wqe, int flags) 63 { 64 wqe->flags = flags; 65 wqe->private = curproc; 66 wqe->func = autoremove_wake_function; 67 INIT_LIST_HEAD(&wqe->entry); 68 } 69 70 static inline void 71 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 72 { 73 list_add(&wqe->entry, &wqh->head); 74 } 75 76 static inline void 77 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 78 { 79 list_add_tail(&wqe->entry, &wqh->head); 80 } 81 82 static inline void 83 add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new) 84 { 85 mtx_enter(&head->lock); 86 __add_wait_queue(head, new); 87 mtx_leave(&head->lock); 88 } 89 90 static inline void 91 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 92 { 93 list_del(&wqe->entry); 94 } 95 96 static inline void 97 remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old) 98 { 99 mtx_enter(&head->lock); 100 __remove_wait_queue(head, old); 101 mtx_leave(&head->lock); 102 } 103 104 #define __wait_event_intr_timeout(wqh, condition, timo, prio) \ 105 ({ \ 106 long ret = timo; \ 107 do { \ 108 int __error; \ 109 unsigned long deadline; \ 110 \ 111 KASSERT(!cold); \ 112 \ 113 mtx_enter(&sch_mtx); \ 114 deadline = jiffies + ret; \ 115 __error = msleep(&wqh, &sch_mtx, prio, "drmweti", ret); \ 116 ret = deadline - jiffies; \ 117 if (__error == ERESTART || __error == EINTR) { \ 118 ret = -ERESTARTSYS; \ 119 mtx_leave(&sch_mtx); \ 120 break; \ 121 } \ 122 if ((timo) > 0 && (ret <= 0 || __error == EWOULDBLOCK)) { \ 123 mtx_leave(&sch_mtx); \ 124 ret = ((condition)) ? 1 : 0; \ 125 break; \ 126 } \ 127 mtx_leave(&sch_mtx); \ 128 } while (ret > 0 && !(condition)); \ 129 ret; \ 130 }) 131 132 /* 133 * Sleep until `condition' gets true. 134 */ 135 #define wait_event(wqh, condition) \ 136 do { \ 137 if (!(condition)) \ 138 __wait_event_intr_timeout(wqh, condition, 0, 0); \ 139 } while (0) 140 141 #define wait_event_killable(wqh, condition) \ 142 ({ \ 143 int __ret = 0; \ 144 if (!(condition)) \ 145 __ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \ 146 __ret; \ 147 }) 148 149 #define wait_event_interruptible(wqh, condition) \ 150 ({ \ 151 int __ret = 0; \ 152 if (!(condition)) \ 153 __ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \ 154 __ret; \ 155 }) 156 157 #define wait_event_interruptible_locked(wqh, condition) \ 158 ({ \ 159 int __ret = 0; \ 160 if (!(condition)) \ 161 __ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \ 162 __ret; \ 163 }) 164 165 /* 166 * Sleep until `condition' gets true or `timo' expires. 167 * 168 * Returns 0 if `condition' is still false when `timo' expires or 169 * the remaining (>=1) jiffies otherwise. 170 */ 171 #define wait_event_timeout(wqh, condition, timo) \ 172 ({ \ 173 long __ret = timo; \ 174 if (!(condition)) \ 175 __ret = __wait_event_intr_timeout(wqh, condition, timo, 0); \ 176 __ret; \ 177 }) 178 179 /* 180 * Sleep until `condition' gets true, `timo' expires or the process 181 * receives a signal. 182 * 183 * Returns -ERESTARTSYS if interrupted by a signal. 184 * Returns 0 if `condition' is still false when `timo' expires or 185 * the remaining (>=1) jiffies otherwise. 186 */ 187 #define wait_event_interruptible_timeout(wqh, condition, timo) \ 188 ({ \ 189 long __ret = timo; \ 190 if (!(condition)) \ 191 __ret = __wait_event_intr_timeout(wqh, condition, timo, PCATCH);\ 192 __ret; \ 193 }) 194 195 #define __wait_event_lock_irq(wqh, condition, mtx) \ 196 ({ \ 197 do { \ 198 KASSERT(!cold); \ 199 \ 200 mtx_leave(&(mtx)); \ 201 mtx_enter(&sch_mtx); \ 202 msleep(&wqh, &sch_mtx, 0, "drmweli", 0); \ 203 mtx_leave(&sch_mtx); \ 204 mtx_enter(&(mtx)); \ 205 } while (!(condition)); \ 206 }) 207 208 /* 209 * Sleep until `condition' gets true. 210 * called locked, condition checked under lock 211 */ 212 #define wait_event_lock_irq(wqh, condition, mtx) \ 213 do { \ 214 if (!(condition)) \ 215 __wait_event_lock_irq(wqh, condition, mtx); \ 216 } while (0) 217 218 static inline void 219 wake_up(wait_queue_head_t *wqh) 220 { 221 wait_queue_entry_t *wqe; 222 wait_queue_entry_t *tmp; 223 mtx_enter(&wqh->lock); 224 225 list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) { 226 KASSERT(wqe->func != NULL); 227 if (wqe->func != NULL) 228 wqe->func(wqe, 0, wqe->flags, NULL); 229 } 230 wakeup(wqh); 231 mtx_leave(&wqh->lock); 232 } 233 234 #define wake_up_all(wqh) wake_up(wqh) 235 236 static inline void 237 wake_up_all_locked(wait_queue_head_t *wqh) 238 { 239 wait_queue_entry_t *wqe; 240 wait_queue_entry_t *tmp; 241 242 list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) { 243 KASSERT(wqe->func != NULL); 244 if (wqe->func != NULL) 245 wqe->func(wqe, 0, wqe->flags, NULL); 246 } 247 wakeup(wqh); 248 } 249 250 #define wake_up_interruptible(wqh) wake_up(wqh) 251 #define wake_up_interruptible_poll(wqh, flags) wake_up(wqh) 252 253 #define DEFINE_WAIT(name) \ 254 struct wait_queue_entry name = { \ 255 .private = curproc, \ 256 .func = autoremove_wake_function, \ 257 .entry = LIST_HEAD_INIT((name).entry), \ 258 } 259 260 static inline void 261 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state) 262 { 263 if (wqe->flags == 0) { 264 mtx_enter(&sch_mtx); 265 wqe->flags = 1; 266 } 267 MUTEX_ASSERT_LOCKED(&sch_mtx); 268 if (list_empty(&wqe->entry)) 269 __add_wait_queue(wqh, wqe); 270 sch_proc = curproc; 271 sch_ident = wqe; 272 sch_priority = state; 273 } 274 275 static inline void 276 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 277 { 278 MUTEX_ASSERT_LOCKED(&sch_mtx); 279 sch_ident = NULL; 280 if (!list_empty(&wqe->entry)) 281 list_del_init(&wqe->entry); 282 mtx_leave(&sch_mtx); 283 } 284 285 #endif 286