xref: /openbsd-src/sys/dev/pci/drm/include/linux/wait.h (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: wait.h,v 1.4 2019/05/08 23:35:23 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013, 2014, 2015 Mark Kettenis
4  * Copyright (c) 2017 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _LINUX_WAIT_H
20 #define _LINUX_WAIT_H
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/mutex.h>
25 
26 #include <linux/list.h>
27 #include <linux/errno.h>
28 #include <linux/spinlock.h>
29 
30 struct wait_queue_entry {
31 	unsigned int flags;
32 	void *private;
33 	int (*func)(struct wait_queue_entry *, unsigned, int, void *);
34 	struct proc *proc;
35 	struct list_head entry;
36 };
37 
38 typedef struct wait_queue_entry wait_queue_entry_t;
39 
40 extern struct mutex sch_mtx;
41 extern volatile struct proc *sch_proc;
42 extern volatile void *sch_ident;
43 extern int sch_priority;
44 
45 struct wait_queue_head {
46 	struct mutex lock;
47 	unsigned int count;
48 	struct list_head head;
49 };
50 typedef struct wait_queue_head wait_queue_head_t;
51 
52 static inline void
53 init_waitqueue_head(wait_queue_head_t *wqh)
54 {
55 	mtx_init(&wqh->lock, IPL_TTY);
56 	wqh->count = 0;
57 	INIT_LIST_HEAD(&wqh->head);
58 }
59 
60 #define __init_waitqueue_head(wq, name, key)	init_waitqueue_head(wq)
61 
62 int default_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
63 int autoremove_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
64 
65 static inline void
66 init_wait_entry(wait_queue_entry_t *wqe, int flags)
67 {
68 	wqe->flags = flags;
69 	wqe->private = NULL;
70 	wqe->func = autoremove_wake_function;
71 	wqe->proc = NULL;
72 	INIT_LIST_HEAD(&wqe->entry);
73 }
74 
75 static inline void
76 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
77 {
78 	list_add(&wqe->entry, &wqh->head);
79 }
80 
81 static inline void
82 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
83 {
84 	list_add_tail(&wqe->entry, &wqh->head);
85 }
86 
87 static inline void
88 add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new)
89 {
90 	mtx_enter(&head->lock);
91 	new->proc = curproc;
92 	__add_wait_queue(head, new);
93 	mtx_leave(&head->lock);
94 }
95 
96 static inline void
97 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
98 {
99 	list_del(&wqe->entry);
100 }
101 
102 static inline void
103 remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old)
104 {
105 	mtx_enter(&head->lock);
106 	__remove_wait_queue(head, old);
107 	old->proc = NULL;
108 	mtx_leave(&head->lock);
109 }
110 
111 #define __wait_event_intr_timeout(wq, condition, timo, prio)		\
112 ({									\
113 	long ret = timo;						\
114 	do {								\
115 		int deadline, __error;					\
116 									\
117 		KASSERT(!cold);						\
118 									\
119 		mtx_enter(&sch_mtx);					\
120 		atomic_inc_int(&(wq).count);				\
121 		deadline = ticks + ret;					\
122 		__error = msleep(&wq, &sch_mtx, prio, "drmweti", ret);	\
123 		ret = deadline - ticks;					\
124 		atomic_dec_int(&(wq).count);				\
125 		if (__error == ERESTART || __error == EINTR) {		\
126 			ret = -ERESTARTSYS;				\
127 			mtx_leave(&sch_mtx);				\
128 			break;						\
129 		}							\
130 		if ((timo) > 0 && (ret <= 0 || __error == EWOULDBLOCK)) { \
131 			mtx_leave(&sch_mtx);				\
132 			ret = ((condition)) ? 1 : 0;			\
133 			break;						\
134  		}							\
135 		mtx_leave(&sch_mtx);					\
136 	} while (ret > 0 && !(condition));				\
137 	ret;								\
138 })
139 
140 /*
141  * Sleep until `condition' gets true.
142  */
143 #define wait_event(wq, condition) 		\
144 do {						\
145 	if (!(condition))			\
146 		__wait_event_intr_timeout(wq, condition, 0, 0); \
147 } while (0)
148 
149 #define wait_event_killable(wq, condition) 		\
150 do {						\
151 	if (!(condition))			\
152 		__wait_event_intr_timeout(wq, condition, 0, PCATCH); \
153 } while (0)
154 
155 #define wait_event_interruptible(wq, condition) 		\
156 ({						\
157 	int __ret = 0;				\
158 	if (!(condition))			\
159 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
160 	__ret;					\
161 })
162 
163 #define wait_event_interruptible_locked(wq, condition) 		\
164 ({						\
165 	int __ret = 0;				\
166 	if (!(condition))			\
167 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
168 	__ret;					\
169 })
170 
171 /*
172  * Sleep until `condition' gets true or `timo' expires.
173  *
174  * Returns 0 if `condition' is still false when `timo' expires or
175  * the remaining (>=1) ticks otherwise.
176  */
177 #define wait_event_timeout(wq, condition, timo)	\
178 ({						\
179 	long __ret = timo;			\
180 	if (!(condition))			\
181 		__ret = __wait_event_intr_timeout(wq, condition, timo, 0); \
182 	__ret;					\
183 })
184 
185 /*
186  * Sleep until `condition' gets true, `timo' expires or the process
187  * receives a signal.
188  *
189  * Returns -ERESTARTSYS if interrupted by a signal.
190  * Returns 0 if `condition' is still false when `timo' expires or
191  * the remaining (>=1) ticks otherwise.
192  */
193 #define wait_event_interruptible_timeout(wq, condition, timo) \
194 ({						\
195 	long __ret = timo;			\
196 	if (!(condition))			\
197 		__ret = __wait_event_intr_timeout(wq, condition, timo, PCATCH);\
198 	__ret;					\
199 })
200 
201 static inline void
202 wake_up(wait_queue_head_t *wqh)
203 {
204 	wait_queue_entry_t *wqe;
205 	wait_queue_entry_t *tmp;
206 	mtx_enter(&wqh->lock);
207 
208 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
209 		if (wqe->func != NULL)
210 			wqe->func(wqe, 0, wqe->flags, NULL);
211 	}
212 	wakeup(wqh);
213 	mtx_leave(&wqh->lock);
214 }
215 
216 #define wake_up_all(wq)			wake_up(wq)
217 
218 static inline void
219 wake_up_all_locked(wait_queue_head_t *wqh)
220 {
221 	wait_queue_entry_t *wqe;
222 	wait_queue_entry_t *tmp;
223 
224 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
225 		if (wqe->func != NULL)
226 			wqe->func(wqe, 0, wqe->flags, NULL);
227 	}
228 	wakeup(wqh);
229 }
230 
231 #define wake_up_interruptible(wq)	wake_up(wq)
232 #define waitqueue_active(wq)		((wq)->count > 0)
233 
234 #define	DEFINE_WAIT(name)				\
235 	struct wait_queue_entry name = {		\
236 		.private = NULL,			\
237 		.func = autoremove_wake_function,	\
238 		.entry = LIST_HEAD_INIT((name).entry),	\
239 	}
240 #define	DEFINE_WAIT_FUNC(name, cb)			\
241 	struct wait_queue_entry name = {		\
242 		.private = NULL,			\
243 		.func = cb,				\
244 		.entry = LIST_HEAD_INIT((name).entry),	\
245 	}
246 
247 static inline void
248 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
249 {
250 	if (wqe->flags == 0) {
251 		mtx_enter(&sch_mtx);
252 		wqe->flags = 1;
253 	}
254 	MUTEX_ASSERT_LOCKED(&sch_mtx);
255 	if (list_empty(&wqe->entry))
256 		__add_wait_queue(wqh, wqe);
257 	sch_proc = curproc;
258 	sch_ident = wqe;
259 	sch_priority = state;
260 }
261 
262 static inline void
263 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
264 {
265 	MUTEX_ASSERT_LOCKED(&sch_mtx);
266 	sch_ident = NULL;
267 	if (!list_empty(&wqe->entry))
268 		list_del_init(&wqe->entry);
269 	mtx_leave(&sch_mtx);
270 }
271 
272 #endif
273