xref: /openbsd-src/sys/dev/pci/drm/include/linux/wait.h (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /*	$OpenBSD: wait.h,v 1.3 2019/05/01 07:22:24 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013, 2014, 2015 Mark Kettenis
4  * Copyright (c) 2017 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _LINUX_WAIT_H
20 #define _LINUX_WAIT_H
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/mutex.h>
25 
26 #include <linux/list.h>
27 #include <linux/errno.h>
28 #include <linux/spinlock.h>
29 
30 struct wait_queue_entry {
31 	unsigned int flags;
32 	void *private;
33 	int (*func)(struct wait_queue_entry *, unsigned, int, void *);
34 	struct proc *proc;
35 	struct list_head entry;
36 };
37 
38 typedef struct wait_queue_entry wait_queue_entry_t;
39 
40 extern struct mutex sch_mtx;
41 extern volatile struct proc *sch_proc;
42 extern volatile void *sch_ident;
43 extern int sch_priority;
44 
45 struct wait_queue_head {
46 	struct mutex lock;
47 	unsigned int count;
48 	struct list_head head;
49 };
50 typedef struct wait_queue_head wait_queue_head_t;
51 
52 static inline void
53 init_waitqueue_head(wait_queue_head_t *wqh)
54 {
55 	mtx_init(&wqh->lock, IPL_TTY);
56 	wqh->count = 0;
57 	INIT_LIST_HEAD(&wqh->head);
58 }
59 
60 #define __init_waitqueue_head(wq, name, key)	init_waitqueue_head(wq)
61 
62 int default_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
63 int autoremove_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
64 
65 static inline void
66 init_wait_entry(wait_queue_entry_t *wqe, int flags)
67 {
68 	wqe->flags = flags;
69 	wqe->private = NULL;
70 	wqe->func = autoremove_wake_function;
71 	wqe->proc = NULL;
72 	INIT_LIST_HEAD(&wqe->entry);
73 }
74 
75 static inline void
76 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
77 {
78 	list_add(&wqe->entry, &wqh->head);
79 }
80 
81 static inline void
82 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
83 {
84 	list_add_tail(&wqe->entry, &wqh->head);
85 }
86 
87 static inline void
88 add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new)
89 {
90 	mtx_enter(&head->lock);
91 	new->proc = curproc;
92 	__add_wait_queue(head, new);
93 	mtx_leave(&head->lock);
94 }
95 
96 static inline void
97 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
98 {
99 	list_del(&wqe->entry);
100 }
101 
102 static inline void
103 remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old)
104 {
105 	mtx_enter(&head->lock);
106 	__remove_wait_queue(head, old);
107 	old->proc = NULL;
108 	mtx_leave(&head->lock);
109 }
110 
111 #define __wait_event_intr_timeout(wq, condition, timo, prio)		\
112 ({									\
113 	long ret = timo;						\
114 	do {								\
115 		int deadline, __error;					\
116 									\
117 		KASSERT(!cold);						\
118 									\
119 		mtx_enter(&sch_mtx);					\
120 		atomic_inc_int(&(wq).count);				\
121 		deadline = ticks + ret;					\
122 		__error = msleep(&wq, &sch_mtx, prio, "drmweti", ret);	\
123 		ret = deadline - ticks;					\
124 		atomic_dec_int(&(wq).count);				\
125 		if (__error == ERESTART || __error == EINTR) {		\
126 			ret = -ERESTARTSYS;				\
127 			mtx_leave(&sch_mtx);				\
128 			break;						\
129 		}							\
130 		if ((timo) > 0 && (ret <= 0 || __error == EWOULDBLOCK)) { \
131 			mtx_leave(&sch_mtx);				\
132 			ret = ((condition)) ? 1 : 0;			\
133 			break;						\
134  		}							\
135 		mtx_leave(&sch_mtx);					\
136 	} while (ret > 0 && !(condition));				\
137 	ret;								\
138 })
139 
140 /*
141  * Sleep until `condition' gets true.
142  */
143 #define wait_event(wq, condition) 		\
144 do {						\
145 	if (!(condition))			\
146 		__wait_event_intr_timeout(wq, condition, 0, 0); \
147 } while (0)
148 
149 #define wait_event_interruptible(wq, condition) 		\
150 ({						\
151 	int __ret = 0;				\
152 	if (!(condition))			\
153 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
154 	__ret;					\
155 })
156 
157 #define wait_event_interruptible_locked(wq, condition) 		\
158 ({						\
159 	int __ret = 0;				\
160 	if (!(condition))			\
161 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
162 	__ret;					\
163 })
164 
165 /*
166  * Sleep until `condition' gets true or `timo' expires.
167  *
168  * Returns 0 if `condition' is still false when `timo' expires or
169  * the remaining (>=1) ticks otherwise.
170  */
171 #define wait_event_timeout(wq, condition, timo)	\
172 ({						\
173 	long __ret = timo;			\
174 	if (!(condition))			\
175 		__ret = __wait_event_intr_timeout(wq, condition, timo, 0); \
176 	__ret;					\
177 })
178 
179 /*
180  * Sleep until `condition' gets true, `timo' expires or the process
181  * receives a signal.
182  *
183  * Returns -ERESTARTSYS if interrupted by a signal.
184  * Returns 0 if `condition' is still false when `timo' expires or
185  * the remaining (>=1) ticks otherwise.
186  */
187 #define wait_event_interruptible_timeout(wq, condition, timo) \
188 ({						\
189 	long __ret = timo;			\
190 	if (!(condition))			\
191 		__ret = __wait_event_intr_timeout(wq, condition, timo, PCATCH);\
192 	__ret;					\
193 })
194 
195 static inline void
196 wake_up(wait_queue_head_t *wqh)
197 {
198 	wait_queue_entry_t *wqe;
199 	wait_queue_entry_t *tmp;
200 	mtx_enter(&wqh->lock);
201 
202 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
203 		if (wqe->func != NULL)
204 			wqe->func(wqe, 0, wqe->flags, NULL);
205 	}
206 	wakeup(wqh);
207 	mtx_leave(&wqh->lock);
208 }
209 
210 #define wake_up_all(wq)			wake_up(wq)
211 
212 static inline void
213 wake_up_all_locked(wait_queue_head_t *wqh)
214 {
215 	wait_queue_entry_t *wqe;
216 	wait_queue_entry_t *tmp;
217 
218 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
219 		if (wqe->func != NULL)
220 			wqe->func(wqe, 0, wqe->flags, NULL);
221 	}
222 	wakeup(wqh);
223 }
224 
225 #define wake_up_interruptible(wq)	wake_up(wq)
226 #define waitqueue_active(wq)		((wq)->count > 0)
227 
228 #define	DEFINE_WAIT(name)				\
229 	struct wait_queue_entry name = {		\
230 		.private = NULL,			\
231 		.func = autoremove_wake_function,	\
232 		.entry = LIST_HEAD_INIT((name).entry),	\
233 	}
234 #define	DEFINE_WAIT_FUNC(name, cb)			\
235 	struct wait_queue_entry name = {		\
236 		.private = NULL,			\
237 		.func = cb,				\
238 		.entry = LIST_HEAD_INIT((name).entry),	\
239 	}
240 
241 static inline void
242 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
243 {
244 	if (wqe->flags == 0) {
245 		mtx_enter(&sch_mtx);
246 		wqe->flags = 1;
247 	}
248 	MUTEX_ASSERT_LOCKED(&sch_mtx);
249 	if (list_empty(&wqe->entry))
250 		__add_wait_queue(wqh, wqe);
251 	sch_proc = curproc;
252 	sch_ident = wqe;
253 	sch_priority = state;
254 }
255 
256 static inline void
257 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
258 {
259 	MUTEX_ASSERT_LOCKED(&sch_mtx);
260 	sch_ident = NULL;
261 	if (!list_empty(&wqe->entry))
262 		list_del_init(&wqe->entry);
263 	mtx_leave(&sch_mtx);
264 }
265 
266 #endif
267