xref: /openbsd-src/sys/dev/pci/drm/include/linux/wait.h (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: wait.h,v 1.8 2021/07/07 02:38:36 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013, 2014, 2015 Mark Kettenis
4  * Copyright (c) 2017 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _LINUX_WAIT_H
20 #define _LINUX_WAIT_H
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/mutex.h>
25 
26 #include <linux/list.h>
27 #include <linux/errno.h>
28 #include <linux/spinlock.h>
29 
30 struct wait_queue_entry {
31 	unsigned int flags;
32 	void *private;
33 	int (*func)(struct wait_queue_entry *, unsigned, int, void *);
34 	struct list_head entry;
35 };
36 
37 typedef struct wait_queue_entry wait_queue_entry_t;
38 
39 extern struct mutex sch_mtx;
40 extern volatile struct proc *sch_proc;
41 extern volatile void *sch_ident;
42 extern int sch_priority;
43 
44 struct wait_queue_head {
45 	struct mutex lock;
46 	struct list_head head;
47 };
48 typedef struct wait_queue_head wait_queue_head_t;
49 
50 static inline void
51 init_waitqueue_head(wait_queue_head_t *wqh)
52 {
53 	mtx_init(&wqh->lock, IPL_TTY);
54 	INIT_LIST_HEAD(&wqh->head);
55 }
56 
57 #define __init_waitqueue_head(wqh, name, key)	init_waitqueue_head(wqh)
58 
59 int autoremove_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
60 
61 static inline void
62 init_wait_entry(wait_queue_entry_t *wqe, int flags)
63 {
64 	wqe->flags = flags;
65 	wqe->private = curproc;
66 	wqe->func = autoremove_wake_function;
67 	INIT_LIST_HEAD(&wqe->entry);
68 }
69 
70 static inline void
71 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
72 {
73 	list_add(&wqe->entry, &wqh->head);
74 }
75 
76 static inline void
77 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
78 {
79 	list_add_tail(&wqe->entry, &wqh->head);
80 }
81 
82 static inline void
83 add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new)
84 {
85 	mtx_enter(&head->lock);
86 	__add_wait_queue(head, new);
87 	mtx_leave(&head->lock);
88 }
89 
90 static inline void
91 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
92 {
93 	list_del(&wqe->entry);
94 }
95 
96 static inline void
97 remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old)
98 {
99 	mtx_enter(&head->lock);
100 	__remove_wait_queue(head, old);
101 	mtx_leave(&head->lock);
102 }
103 
104 #define __wait_event_intr_timeout(wqh, condition, timo, prio)		\
105 ({									\
106 	long ret = timo;						\
107 	do {								\
108 		int __error;						\
109 		unsigned long deadline;					\
110 									\
111 		KASSERT(!cold);						\
112 									\
113 		mtx_enter(&sch_mtx);					\
114 		deadline = jiffies + ret;				\
115 		__error = msleep(&wqh, &sch_mtx, prio, "drmweti", ret);	\
116 		ret = deadline - jiffies;				\
117 		if (__error == ERESTART || __error == EINTR) {		\
118 			ret = -ERESTARTSYS;				\
119 			mtx_leave(&sch_mtx);				\
120 			break;						\
121 		}							\
122 		if ((timo) > 0 && (ret <= 0 || __error == EWOULDBLOCK)) { \
123 			mtx_leave(&sch_mtx);				\
124 			ret = ((condition)) ? 1 : 0;			\
125 			break;						\
126  		}							\
127 		mtx_leave(&sch_mtx);					\
128 	} while (ret > 0 && !(condition));				\
129 	ret;								\
130 })
131 
132 /*
133  * Sleep until `condition' gets true.
134  */
135 #define wait_event(wqh, condition) 		\
136 do {						\
137 	if (!(condition))			\
138 		__wait_event_intr_timeout(wqh, condition, 0, 0); \
139 } while (0)
140 
141 #define wait_event_killable(wqh, condition) 		\
142 ({						\
143 	int __ret = 0;				\
144 	if (!(condition))			\
145 		__ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \
146 	__ret;					\
147 })
148 
149 #define wait_event_interruptible(wqh, condition) 		\
150 ({						\
151 	int __ret = 0;				\
152 	if (!(condition))			\
153 		__ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \
154 	__ret;					\
155 })
156 
157 #define wait_event_interruptible_locked(wqh, condition) 		\
158 ({						\
159 	int __ret = 0;				\
160 	if (!(condition))			\
161 		__ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \
162 	__ret;					\
163 })
164 
165 /*
166  * Sleep until `condition' gets true or `timo' expires.
167  *
168  * Returns 0 if `condition' is still false when `timo' expires or
169  * the remaining (>=1) jiffies otherwise.
170  */
171 #define wait_event_timeout(wqh, condition, timo)	\
172 ({						\
173 	long __ret = timo;			\
174 	if (!(condition))			\
175 		__ret = __wait_event_intr_timeout(wqh, condition, timo, 0); \
176 	__ret;					\
177 })
178 
179 /*
180  * Sleep until `condition' gets true, `timo' expires or the process
181  * receives a signal.
182  *
183  * Returns -ERESTARTSYS if interrupted by a signal.
184  * Returns 0 if `condition' is still false when `timo' expires or
185  * the remaining (>=1) jiffies otherwise.
186  */
187 #define wait_event_interruptible_timeout(wqh, condition, timo) \
188 ({						\
189 	long __ret = timo;			\
190 	if (!(condition))			\
191 		__ret = __wait_event_intr_timeout(wqh, condition, timo, PCATCH);\
192 	__ret;					\
193 })
194 
195 static inline void
196 wake_up(wait_queue_head_t *wqh)
197 {
198 	wait_queue_entry_t *wqe;
199 	wait_queue_entry_t *tmp;
200 	mtx_enter(&wqh->lock);
201 
202 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
203 		KASSERT(wqe->func != NULL);
204 		if (wqe->func != NULL)
205 			wqe->func(wqe, 0, wqe->flags, NULL);
206 	}
207 	wakeup(wqh);
208 	mtx_leave(&wqh->lock);
209 }
210 
211 #define wake_up_all(wqh)			wake_up(wqh)
212 
213 static inline void
214 wake_up_all_locked(wait_queue_head_t *wqh)
215 {
216 	wait_queue_entry_t *wqe;
217 	wait_queue_entry_t *tmp;
218 
219 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
220 		KASSERT(wqe->func != NULL);
221 		if (wqe->func != NULL)
222 			wqe->func(wqe, 0, wqe->flags, NULL);
223 	}
224 	wakeup(wqh);
225 }
226 
227 #define wake_up_interruptible(wqh)		wake_up(wqh)
228 #define wake_up_interruptible_poll(wqh, flags)	wake_up(wqh)
229 
230 #define	DEFINE_WAIT(name)				\
231 	struct wait_queue_entry name = {		\
232 		.private = curproc,			\
233 		.func = autoremove_wake_function,	\
234 		.entry = LIST_HEAD_INIT((name).entry),	\
235 	}
236 
237 static inline void
238 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
239 {
240 	if (wqe->flags == 0) {
241 		mtx_enter(&sch_mtx);
242 		wqe->flags = 1;
243 	}
244 	MUTEX_ASSERT_LOCKED(&sch_mtx);
245 	if (list_empty(&wqe->entry))
246 		__add_wait_queue(wqh, wqe);
247 	sch_proc = curproc;
248 	sch_ident = wqe;
249 	sch_priority = state;
250 }
251 
252 static inline void
253 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
254 {
255 	MUTEX_ASSERT_LOCKED(&sch_mtx);
256 	sch_ident = NULL;
257 	if (!list_empty(&wqe->entry))
258 		list_del_init(&wqe->entry);
259 	mtx_leave(&sch_mtx);
260 }
261 
262 #endif
263