xref: /openbsd-src/sys/dev/pci/drm/include/linux/wait.h (revision c020cf82e0cc147236f01a8dca7052034cf9d30d)
1 /*	$OpenBSD: wait.h,v 1.5 2020/06/08 04:48:15 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013, 2014, 2015 Mark Kettenis
4  * Copyright (c) 2017 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _LINUX_WAIT_H
20 #define _LINUX_WAIT_H
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/mutex.h>
25 
26 #include <linux/list.h>
27 #include <linux/errno.h>
28 #include <linux/spinlock.h>
29 
30 struct wait_queue_entry {
31 	unsigned int flags;
32 	void *private;
33 	int (*func)(struct wait_queue_entry *, unsigned, int, void *);
34 	struct proc *proc;
35 	struct list_head entry;
36 };
37 
38 typedef struct wait_queue_entry wait_queue_entry_t;
39 
40 extern struct mutex sch_mtx;
41 extern volatile struct proc *sch_proc;
42 extern volatile void *sch_ident;
43 extern int sch_priority;
44 
45 struct wait_queue_head {
46 	struct mutex lock;
47 	unsigned int count;
48 	struct list_head head;
49 };
50 typedef struct wait_queue_head wait_queue_head_t;
51 
52 static inline void
53 init_waitqueue_head(wait_queue_head_t *wqh)
54 {
55 	mtx_init(&wqh->lock, IPL_TTY);
56 	wqh->count = 0;
57 	INIT_LIST_HEAD(&wqh->head);
58 }
59 
60 #define __init_waitqueue_head(wq, name, key)	init_waitqueue_head(wq)
61 
62 int default_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
63 int autoremove_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
64 
65 static inline void
66 init_wait_entry(wait_queue_entry_t *wqe, int flags)
67 {
68 	wqe->flags = flags;
69 	wqe->private = NULL;
70 	wqe->func = autoremove_wake_function;
71 	wqe->proc = NULL;
72 	INIT_LIST_HEAD(&wqe->entry);
73 }
74 
75 static inline void
76 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
77 {
78 	list_add(&wqe->entry, &wqh->head);
79 }
80 
81 static inline void
82 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
83 {
84 	list_add_tail(&wqe->entry, &wqh->head);
85 }
86 
87 static inline void
88 add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new)
89 {
90 	mtx_enter(&head->lock);
91 	new->proc = curproc;
92 	__add_wait_queue(head, new);
93 	mtx_leave(&head->lock);
94 }
95 
96 static inline void
97 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
98 {
99 	list_del(&wqe->entry);
100 }
101 
102 static inline void
103 remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old)
104 {
105 	mtx_enter(&head->lock);
106 	__remove_wait_queue(head, old);
107 	old->proc = NULL;
108 	mtx_leave(&head->lock);
109 }
110 
111 #define __wait_event_intr_timeout(wq, condition, timo, prio)		\
112 ({									\
113 	long ret = timo;						\
114 	do {								\
115 		int deadline, __error;					\
116 									\
117 		KASSERT(!cold);						\
118 									\
119 		mtx_enter(&sch_mtx);					\
120 		atomic_inc_int(&(wq).count);				\
121 		deadline = ticks + ret;					\
122 		__error = msleep(&wq, &sch_mtx, prio, "drmweti", ret);	\
123 		ret = deadline - ticks;					\
124 		atomic_dec_int(&(wq).count);				\
125 		if (__error == ERESTART || __error == EINTR) {		\
126 			ret = -ERESTARTSYS;				\
127 			mtx_leave(&sch_mtx);				\
128 			break;						\
129 		}							\
130 		if ((timo) > 0 && (ret <= 0 || __error == EWOULDBLOCK)) { \
131 			mtx_leave(&sch_mtx);				\
132 			ret = ((condition)) ? 1 : 0;			\
133 			break;						\
134  		}							\
135 		mtx_leave(&sch_mtx);					\
136 	} while (ret > 0 && !(condition));				\
137 	ret;								\
138 })
139 
140 /*
141  * Sleep until `condition' gets true.
142  */
143 #define wait_event(wq, condition) 		\
144 do {						\
145 	if (!(condition))			\
146 		__wait_event_intr_timeout(wq, condition, 0, 0); \
147 } while (0)
148 
149 #define wait_event_killable(wq, condition) 		\
150 ({						\
151 	int __ret = 0;				\
152 	if (!(condition))			\
153 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
154 	__ret;					\
155 })
156 
157 #define wait_event_interruptible(wq, condition) 		\
158 ({						\
159 	int __ret = 0;				\
160 	if (!(condition))			\
161 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
162 	__ret;					\
163 })
164 
165 #define wait_event_interruptible_locked(wq, condition) 		\
166 ({						\
167 	int __ret = 0;				\
168 	if (!(condition))			\
169 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
170 	__ret;					\
171 })
172 
173 /*
174  * Sleep until `condition' gets true or `timo' expires.
175  *
176  * Returns 0 if `condition' is still false when `timo' expires or
177  * the remaining (>=1) ticks otherwise.
178  */
179 #define wait_event_timeout(wq, condition, timo)	\
180 ({						\
181 	long __ret = timo;			\
182 	if (!(condition))			\
183 		__ret = __wait_event_intr_timeout(wq, condition, timo, 0); \
184 	__ret;					\
185 })
186 
187 /*
188  * Sleep until `condition' gets true, `timo' expires or the process
189  * receives a signal.
190  *
191  * Returns -ERESTARTSYS if interrupted by a signal.
192  * Returns 0 if `condition' is still false when `timo' expires or
193  * the remaining (>=1) ticks otherwise.
194  */
195 #define wait_event_interruptible_timeout(wq, condition, timo) \
196 ({						\
197 	long __ret = timo;			\
198 	if (!(condition))			\
199 		__ret = __wait_event_intr_timeout(wq, condition, timo, PCATCH);\
200 	__ret;					\
201 })
202 
203 static inline void
204 wake_up(wait_queue_head_t *wqh)
205 {
206 	wait_queue_entry_t *wqe;
207 	wait_queue_entry_t *tmp;
208 	mtx_enter(&wqh->lock);
209 
210 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
211 		if (wqe->func != NULL)
212 			wqe->func(wqe, 0, wqe->flags, NULL);
213 	}
214 	wakeup(wqh);
215 	mtx_leave(&wqh->lock);
216 }
217 
218 #define wake_up_all(wq)			wake_up(wq)
219 
220 static inline void
221 wake_up_all_locked(wait_queue_head_t *wqh)
222 {
223 	wait_queue_entry_t *wqe;
224 	wait_queue_entry_t *tmp;
225 
226 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
227 		if (wqe->func != NULL)
228 			wqe->func(wqe, 0, wqe->flags, NULL);
229 	}
230 	wakeup(wqh);
231 }
232 
233 #define wake_up_interruptible(wq)	wake_up(wq)
234 #define waitqueue_active(wq)		((wq)->count > 0)
235 
236 #define	DEFINE_WAIT(name)				\
237 	struct wait_queue_entry name = {		\
238 		.private = NULL,			\
239 		.func = autoremove_wake_function,	\
240 		.entry = LIST_HEAD_INIT((name).entry),	\
241 	}
242 #define	DEFINE_WAIT_FUNC(name, cb)			\
243 	struct wait_queue_entry name = {		\
244 		.private = NULL,			\
245 		.func = cb,				\
246 		.entry = LIST_HEAD_INIT((name).entry),	\
247 	}
248 
249 static inline void
250 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
251 {
252 	if (wqe->flags == 0) {
253 		mtx_enter(&sch_mtx);
254 		wqe->flags = 1;
255 	}
256 	MUTEX_ASSERT_LOCKED(&sch_mtx);
257 	if (list_empty(&wqe->entry))
258 		__add_wait_queue(wqh, wqe);
259 	sch_proc = curproc;
260 	sch_ident = wqe;
261 	sch_priority = state;
262 }
263 
264 static inline void
265 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
266 {
267 	MUTEX_ASSERT_LOCKED(&sch_mtx);
268 	sch_ident = NULL;
269 	if (!list_empty(&wqe->entry))
270 		list_del_init(&wqe->entry);
271 	mtx_leave(&sch_mtx);
272 }
273 
274 #endif
275