xref: /openbsd-src/sys/dev/pci/drm/include/linux/wait.h (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: wait.h,v 1.7 2020/12/13 03:15:52 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013, 2014, 2015 Mark Kettenis
4  * Copyright (c) 2017 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _LINUX_WAIT_H
20 #define _LINUX_WAIT_H
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/mutex.h>
25 
26 #include <linux/list.h>
27 #include <linux/errno.h>
28 #include <linux/spinlock.h>
29 
30 struct wait_queue_entry {
31 	unsigned int flags;
32 	void *private;
33 	int (*func)(struct wait_queue_entry *, unsigned, int, void *);
34 	struct proc *proc;
35 	struct list_head entry;
36 };
37 
38 typedef struct wait_queue_entry wait_queue_entry_t;
39 
40 extern struct mutex sch_mtx;
41 extern volatile struct proc *sch_proc;
42 extern volatile void *sch_ident;
43 extern int sch_priority;
44 
45 struct wait_queue_head {
46 	struct mutex lock;
47 	struct list_head head;
48 };
49 typedef struct wait_queue_head wait_queue_head_t;
50 
51 static inline void
52 init_waitqueue_head(wait_queue_head_t *wqh)
53 {
54 	mtx_init(&wqh->lock, IPL_TTY);
55 	INIT_LIST_HEAD(&wqh->head);
56 }
57 
58 #define __init_waitqueue_head(wq, name, key)	init_waitqueue_head(wq)
59 
60 int autoremove_wake_function(struct wait_queue_entry *, unsigned int, int, void *);
61 
62 static inline void
63 init_wait_entry(wait_queue_entry_t *wqe, int flags)
64 {
65 	wqe->flags = flags;
66 	wqe->private = NULL;
67 	wqe->func = autoremove_wake_function;
68 	wqe->proc = NULL;
69 	INIT_LIST_HEAD(&wqe->entry);
70 }
71 
72 static inline void
73 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
74 {
75 	list_add(&wqe->entry, &wqh->head);
76 }
77 
78 static inline void
79 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
80 {
81 	list_add_tail(&wqe->entry, &wqh->head);
82 }
83 
84 static inline void
85 add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new)
86 {
87 	mtx_enter(&head->lock);
88 	new->proc = curproc;
89 	__add_wait_queue(head, new);
90 	mtx_leave(&head->lock);
91 }
92 
93 static inline void
94 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
95 {
96 	list_del(&wqe->entry);
97 }
98 
99 static inline void
100 remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old)
101 {
102 	mtx_enter(&head->lock);
103 	__remove_wait_queue(head, old);
104 	old->proc = NULL;
105 	mtx_leave(&head->lock);
106 }
107 
108 #define __wait_event_intr_timeout(wq, condition, timo, prio)		\
109 ({									\
110 	long ret = timo;						\
111 	do {								\
112 		int __error;						\
113 		unsigned long deadline;					\
114 									\
115 		KASSERT(!cold);						\
116 									\
117 		mtx_enter(&sch_mtx);					\
118 		deadline = jiffies + ret;				\
119 		__error = msleep(&wq, &sch_mtx, prio, "drmweti", ret);	\
120 		ret = deadline - jiffies;				\
121 		if (__error == ERESTART || __error == EINTR) {		\
122 			ret = -ERESTARTSYS;				\
123 			mtx_leave(&sch_mtx);				\
124 			break;						\
125 		}							\
126 		if ((timo) > 0 && (ret <= 0 || __error == EWOULDBLOCK)) { \
127 			mtx_leave(&sch_mtx);				\
128 			ret = ((condition)) ? 1 : 0;			\
129 			break;						\
130  		}							\
131 		mtx_leave(&sch_mtx);					\
132 	} while (ret > 0 && !(condition));				\
133 	ret;								\
134 })
135 
136 /*
137  * Sleep until `condition' gets true.
138  */
139 #define wait_event(wq, condition) 		\
140 do {						\
141 	if (!(condition))			\
142 		__wait_event_intr_timeout(wq, condition, 0, 0); \
143 } while (0)
144 
145 #define wait_event_killable(wq, condition) 		\
146 ({						\
147 	int __ret = 0;				\
148 	if (!(condition))			\
149 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
150 	__ret;					\
151 })
152 
153 #define wait_event_interruptible(wq, condition) 		\
154 ({						\
155 	int __ret = 0;				\
156 	if (!(condition))			\
157 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
158 	__ret;					\
159 })
160 
161 #define wait_event_interruptible_locked(wq, condition) 		\
162 ({						\
163 	int __ret = 0;				\
164 	if (!(condition))			\
165 		__ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
166 	__ret;					\
167 })
168 
169 /*
170  * Sleep until `condition' gets true or `timo' expires.
171  *
172  * Returns 0 if `condition' is still false when `timo' expires or
173  * the remaining (>=1) jiffies otherwise.
174  */
175 #define wait_event_timeout(wq, condition, timo)	\
176 ({						\
177 	long __ret = timo;			\
178 	if (!(condition))			\
179 		__ret = __wait_event_intr_timeout(wq, condition, timo, 0); \
180 	__ret;					\
181 })
182 
183 /*
184  * Sleep until `condition' gets true, `timo' expires or the process
185  * receives a signal.
186  *
187  * Returns -ERESTARTSYS if interrupted by a signal.
188  * Returns 0 if `condition' is still false when `timo' expires or
189  * the remaining (>=1) jiffies otherwise.
190  */
191 #define wait_event_interruptible_timeout(wq, condition, timo) \
192 ({						\
193 	long __ret = timo;			\
194 	if (!(condition))			\
195 		__ret = __wait_event_intr_timeout(wq, condition, timo, PCATCH);\
196 	__ret;					\
197 })
198 
199 static inline void
200 wake_up(wait_queue_head_t *wqh)
201 {
202 	wait_queue_entry_t *wqe;
203 	wait_queue_entry_t *tmp;
204 	mtx_enter(&wqh->lock);
205 
206 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
207 		if (wqe->func != NULL)
208 			wqe->func(wqe, 0, wqe->flags, NULL);
209 	}
210 	wakeup(wqh);
211 	mtx_leave(&wqh->lock);
212 }
213 
214 #define wake_up_all(wq)			wake_up(wq)
215 
216 static inline void
217 wake_up_all_locked(wait_queue_head_t *wqh)
218 {
219 	wait_queue_entry_t *wqe;
220 	wait_queue_entry_t *tmp;
221 
222 	list_for_each_entry_safe(wqe, tmp, &wqh->head, entry) {
223 		if (wqe->func != NULL)
224 			wqe->func(wqe, 0, wqe->flags, NULL);
225 	}
226 	wakeup(wqh);
227 }
228 
229 #define wake_up_interruptible(wq)	wake_up(wq)
230 
231 #define	DEFINE_WAIT(name)				\
232 	struct wait_queue_entry name = {		\
233 		.private = NULL,			\
234 		.func = autoremove_wake_function,	\
235 		.entry = LIST_HEAD_INIT((name).entry),	\
236 	}
237 
238 static inline void
239 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
240 {
241 	if (wqe->flags == 0) {
242 		mtx_enter(&sch_mtx);
243 		wqe->flags = 1;
244 	}
245 	MUTEX_ASSERT_LOCKED(&sch_mtx);
246 	if (list_empty(&wqe->entry))
247 		__add_wait_queue(wqh, wqe);
248 	sch_proc = curproc;
249 	sch_ident = wqe;
250 	sch_priority = state;
251 }
252 
253 static inline void
254 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
255 {
256 	MUTEX_ASSERT_LOCKED(&sch_mtx);
257 	sch_ident = NULL;
258 	if (!list_empty(&wqe->entry))
259 		list_del_init(&wqe->entry);
260 	mtx_leave(&sch_mtx);
261 }
262 
263 #endif
264