1 /* $OpenBSD: wait.h,v 1.11 2023/07/28 09:46:13 claudio Exp $ */ 2 /* 3 * Copyright (c) 2013, 2014, 2015 Mark Kettenis 4 * Copyright (c) 2017 Martin Pieuchot 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _LINUX_WAIT_H 20 #define _LINUX_WAIT_H 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/mutex.h> 25 #include <sys/proc.h> 26 27 #include <linux/list.h> 28 #include <linux/errno.h> 29 #include <linux/spinlock.h> 30 31 struct wait_queue_entry { 32 unsigned int flags; 33 void *private; 34 int (*func)(struct wait_queue_entry *, unsigned, int, void *); 35 struct list_head entry; 36 }; 37 38 typedef struct wait_queue_entry wait_queue_entry_t; 39 40 struct wait_queue_head { 41 struct mutex lock; 42 struct list_head head; 43 }; 44 typedef struct wait_queue_head wait_queue_head_t; 45 46 void prepare_to_wait(wait_queue_head_t *, wait_queue_entry_t *, int); 47 void finish_wait(wait_queue_head_t *, wait_queue_entry_t *); 48 49 static inline void 50 init_waitqueue_head(wait_queue_head_t *wqh) 51 { 52 mtx_init(&wqh->lock, IPL_TTY); 53 INIT_LIST_HEAD(&wqh->head); 54 } 55 56 #define __init_waitqueue_head(wqh, name, key) init_waitqueue_head(wqh) 57 58 int autoremove_wake_function(struct wait_queue_entry *, unsigned int, int, void *); 59 60 static inline void 61 init_wait_entry(wait_queue_entry_t *wqe, int flags) 62 { 63 wqe->flags = flags; 64 wqe->private = curproc; 65 wqe->func = autoremove_wake_function; 66 INIT_LIST_HEAD(&wqe->entry); 67 } 68 69 static inline void 70 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 71 { 72 list_add(&wqe->entry, &wqh->head); 73 } 74 75 static inline void 76 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 77 { 78 list_add_tail(&wqe->entry, &wqh->head); 79 } 80 81 static inline void 82 add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new) 83 { 84 mtx_enter(&head->lock); 85 __add_wait_queue(head, new); 86 mtx_leave(&head->lock); 87 } 88 89 static inline void 90 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 91 { 92 list_del(&wqe->entry); 93 } 94 95 static inline void 96 remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old) 97 { 98 mtx_enter(&head->lock); 99 __remove_wait_queue(head, old); 100 mtx_leave(&head->lock); 101 } 102 103 #define __wait_event_intr_timeout(wqh, condition, timo, prio) \ 104 ({ \ 105 long __ret = timo; \ 106 struct wait_queue_entry __wq_entry; \ 107 \ 108 init_wait_entry(&__wq_entry, 0); \ 109 do { \ 110 int __error, __wait; \ 111 unsigned long deadline; \ 112 \ 113 KASSERT(!cold); \ 114 \ 115 prepare_to_wait(&wqh, &__wq_entry, prio); \ 116 deadline = jiffies + __ret; \ 117 \ 118 __wait = !(condition); \ 119 \ 120 __error = sleep_finish(__ret, __wait); \ 121 if ((timo) > 0) \ 122 __ret = deadline - jiffies; \ 123 \ 124 if (__error == ERESTART || __error == EINTR) { \ 125 __ret = -ERESTARTSYS; \ 126 break; \ 127 } \ 128 if ((timo) > 0 && (__ret <= 0 || __error == EWOULDBLOCK)) { \ 129 __ret = ((condition)) ? 1 : 0; \ 130 break; \ 131 } \ 132 } while (__ret > 0 && !(condition)); \ 133 finish_wait(&wqh, &__wq_entry); \ 134 __ret; \ 135 }) 136 137 /* 138 * Sleep until `condition' gets true. 139 */ 140 #define wait_event(wqh, condition) \ 141 do { \ 142 if (!(condition)) \ 143 __wait_event_intr_timeout(wqh, condition, 0, 0); \ 144 } while (0) 145 146 #define wait_event_killable(wqh, condition) \ 147 ({ \ 148 int __ret = 0; \ 149 if (!(condition)) \ 150 __ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \ 151 __ret; \ 152 }) 153 154 #define wait_event_interruptible(wqh, condition) \ 155 ({ \ 156 int __ret = 0; \ 157 if (!(condition)) \ 158 __ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \ 159 __ret; \ 160 }) 161 162 #define __wait_event_intr_locked(wqh, condition) \ 163 ({ \ 164 struct wait_queue_entry __wq_entry; \ 165 int __error; \ 166 \ 167 init_wait_entry(&__wq_entry, 0); \ 168 do { \ 169 KASSERT(!cold); \ 170 \ 171 if (list_empty(&__wq_entry.entry)) \ 172 __add_wait_queue_entry_tail(&wqh, &__wq_entry); \ 173 set_current_state(TASK_INTERRUPTIBLE); \ 174 \ 175 mtx_leave(&(wqh).lock); \ 176 __error = sleep_finish(0, 1); \ 177 mtx_enter(&(wqh).lock); \ 178 if (__error == ERESTART || __error == EINTR) { \ 179 __error = -ERESTARTSYS; \ 180 break; \ 181 } \ 182 } while (!(condition)); \ 183 __remove_wait_queue(&(wqh), &__wq_entry); \ 184 __set_current_state(TASK_RUNNING); \ 185 __error; \ 186 }) 187 188 #define wait_event_interruptible_locked(wqh, condition) \ 189 ({ \ 190 int __ret = 0; \ 191 if (!(condition)) \ 192 __ret = __wait_event_intr_locked(wqh, condition); \ 193 __ret; \ 194 }) 195 196 /* 197 * Sleep until `condition' gets true or `timo' expires. 198 * 199 * Returns 0 if `condition' is still false when `timo' expires or 200 * the remaining (>=1) jiffies otherwise. 201 */ 202 #define wait_event_timeout(wqh, condition, timo) \ 203 ({ \ 204 long __ret = timo; \ 205 if (!(condition)) \ 206 __ret = __wait_event_intr_timeout(wqh, condition, timo, 0); \ 207 __ret; \ 208 }) 209 210 /* 211 * Sleep until `condition' gets true, `timo' expires or the process 212 * receives a signal. 213 * 214 * Returns -ERESTARTSYS if interrupted by a signal. 215 * Returns 0 if `condition' is still false when `timo' expires or 216 * the remaining (>=1) jiffies otherwise. 217 */ 218 #define wait_event_interruptible_timeout(wqh, condition, timo) \ 219 ({ \ 220 long __ret = timo; \ 221 if (!(condition)) \ 222 __ret = __wait_event_intr_timeout(wqh, condition, timo, PCATCH);\ 223 __ret; \ 224 }) 225 226 #define __wait_event_lock_irq(wqh, condition, mtx) \ 227 ({ \ 228 struct wait_queue_entry __wq_entry; \ 229 \ 230 init_wait_entry(&__wq_entry, 0); \ 231 do { \ 232 int __wait; \ 233 \ 234 KASSERT(!cold); \ 235 \ 236 prepare_to_wait(&wqh, &__wq_entry, 0); \ 237 \ 238 __wait = !(condition); \ 239 \ 240 mtx_leave(&(mtx)); \ 241 sleep_finish(0, __wait); \ 242 mtx_enter(&(mtx)); \ 243 } while (!(condition)); \ 244 finish_wait(&wqh, &__wq_entry); \ 245 }) 246 247 /* 248 * Sleep until `condition' gets true. 249 * called locked, condition checked under lock 250 */ 251 #define wait_event_lock_irq(wqh, condition, mtx) \ 252 do { \ 253 if (!(condition)) \ 254 __wait_event_lock_irq(wqh, condition, mtx); \ 255 } while (0) 256 257 static inline void 258 wake_up(wait_queue_head_t *wqh) 259 { 260 wait_queue_entry_t *wqe; 261 wait_queue_entry_t *tmp; 262 mtx_enter(&wqh->lock); 263 264 list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) { 265 KASSERT(wqe->func != NULL); 266 if (wqe->func != NULL) 267 wqe->func(wqe, 0, wqe->flags, NULL); 268 } 269 mtx_leave(&wqh->lock); 270 } 271 272 #define wake_up_all(wqh) wake_up(wqh) 273 274 static inline void 275 wake_up_all_locked(wait_queue_head_t *wqh) 276 { 277 wait_queue_entry_t *wqe; 278 wait_queue_entry_t *tmp; 279 280 list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) { 281 KASSERT(wqe->func != NULL); 282 if (wqe->func != NULL) 283 wqe->func(wqe, 0, wqe->flags, NULL); 284 } 285 } 286 287 #define wake_up_interruptible(wqh) wake_up(wqh) 288 #define wake_up_interruptible_poll(wqh, flags) wake_up(wqh) 289 290 #define DEFINE_WAIT(name) \ 291 struct wait_queue_entry name = { \ 292 .private = curproc, \ 293 .func = autoremove_wake_function, \ 294 .entry = LIST_HEAD_INIT((name).entry), \ 295 } 296 297 #endif 298