xref: /openbsd-src/sys/dev/pci/drm/include/linux/workqueue.h (revision ff0e7be1ebbcc809ea8ad2b6dafe215824da9e46)
1 /*	$OpenBSD: workqueue.h,v 1.10 2023/03/21 09:44:35 jsg Exp $	*/
2 /*
3  * Copyright (c) 2015 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _LINUX_WORKQUEUE_H
19 #define _LINUX_WORKQUEUE_H
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/task.h>
24 #include <sys/timeout.h>
25 #include <linux/bitops.h>
26 #include <linux/atomic.h>
27 #include <linux/rcupdate.h>
28 #include <linux/lockdep.h>
29 #include <linux/timer.h>
30 
31 struct workqueue_struct;
32 
33 extern struct workqueue_struct *system_wq;
34 extern struct workqueue_struct *system_highpri_wq;
35 extern struct workqueue_struct *system_unbound_wq;
36 extern struct workqueue_struct *system_long_wq;
37 
38 #define WQ_HIGHPRI	1
39 #define WQ_FREEZABLE	2
40 #define WQ_UNBOUND	4
41 
42 #define WQ_UNBOUND_MAX_ACTIVE	4	/* matches nthreads in drm_linux.c */
43 
44 static inline struct workqueue_struct *
45 alloc_workqueue(const char *name, int flags, int max_active)
46 {
47 	struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0);
48 	return (struct workqueue_struct *)tq;
49 }
50 
51 static inline struct workqueue_struct *
52 alloc_ordered_workqueue(const char *name, int flags)
53 {
54 	struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0);
55 	return (struct workqueue_struct *)tq;
56 }
57 
58 static inline struct workqueue_struct *
59 create_singlethread_workqueue(const char *name)
60 {
61 	struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0);
62 	return (struct workqueue_struct *)tq;
63 }
64 
65 static inline void
66 destroy_workqueue(struct workqueue_struct *wq)
67 {
68 	taskq_destroy((struct taskq *)wq);
69 }
70 
71 struct work_struct {
72 	struct task task;
73 	struct taskq *tq;
74 };
75 
76 typedef void (*work_func_t)(struct work_struct *);
77 
78 static inline void
79 INIT_WORK(struct work_struct *work, work_func_t func)
80 {
81 	work->tq = NULL;
82 	task_set(&work->task, (void (*)(void *))func, work);
83 }
84 
85 #define INIT_WORK_ONSTACK(x, y)	INIT_WORK((x), (y))
86 
87 static inline bool
88 queue_work(struct workqueue_struct *wq, struct work_struct *work)
89 {
90 	work->tq = (struct taskq *)wq;
91 	return task_add(work->tq, &work->task);
92 }
93 
94 static inline void
95 cancel_work(struct work_struct *work)
96 {
97 	if (work->tq != NULL)
98 		task_del(work->tq, &work->task);
99 }
100 
101 static inline void
102 cancel_work_sync(struct work_struct *work)
103 {
104 	if (work->tq != NULL)
105 		task_del(work->tq, &work->task);
106 }
107 
108 #define work_pending(work)	task_pending(&(work)->task)
109 
110 struct delayed_work {
111 	struct work_struct work;
112 	struct timeout to;
113 	struct taskq *tq;
114 };
115 
116 #define system_power_efficient_wq ((struct workqueue_struct *)systq)
117 
118 static inline struct delayed_work *
119 to_delayed_work(struct work_struct *work)
120 {
121 	return container_of(work, struct delayed_work, work);
122 }
123 
124 static void
125 __delayed_work_tick(void *arg)
126 {
127 	struct delayed_work *dwork = arg;
128 
129 	task_add(dwork->tq, &dwork->work.task);
130 }
131 
132 static inline void
133 INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func)
134 {
135 	INIT_WORK(&dwork->work, func);
136 	timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
137 }
138 
139 static inline void
140 INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func)
141 {
142 	INIT_WORK(&dwork->work, func);
143 	timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
144 }
145 
146 #define __DELAYED_WORK_INITIALIZER(dw, fn, flags) {			\
147 	.to = TIMEOUT_INITIALIZER(__delayed_work_tick, &(dw)),		\
148 	.tq = NULL,							\
149 	.work.tq = NULL,						\
150 	.work.task = TASK_INITIALIZER((void (*)(void *))(fn), &(dw).work)	\
151 }
152 
153 static inline bool
154 schedule_work(struct work_struct *work)
155 {
156 	work->tq = (struct taskq *)system_wq;
157 	return task_add(work->tq, &work->task);
158 }
159 
160 static inline bool
161 schedule_delayed_work(struct delayed_work *dwork, int jiffies)
162 {
163 	dwork->tq = (struct taskq *)system_wq;
164 	return timeout_add(&dwork->to, jiffies);
165 }
166 
167 static inline bool
168 queue_delayed_work(struct workqueue_struct *wq,
169     struct delayed_work *dwork, int jiffies)
170 {
171 	dwork->tq = (struct taskq *)wq;
172 	return timeout_add(&dwork->to, jiffies);
173 }
174 
175 static inline bool
176 mod_delayed_work(struct workqueue_struct *wq,
177     struct delayed_work *dwork, int jiffies)
178 {
179 	dwork->tq = (struct taskq *)wq;
180 	return (timeout_add(&dwork->to, jiffies) == 0);
181 }
182 
183 static inline bool
184 cancel_delayed_work(struct delayed_work *dwork)
185 {
186 	if (dwork->tq == NULL)
187 		return false;
188 	if (timeout_del(&dwork->to))
189 		return true;
190 	return task_del(dwork->tq, &dwork->work.task);
191 }
192 
193 static inline bool
194 cancel_delayed_work_sync(struct delayed_work *dwork)
195 {
196 	if (dwork->tq == NULL)
197 		return false;
198 	if (timeout_del(&dwork->to))
199 		return true;
200 	return task_del(dwork->tq, &dwork->work.task);
201 }
202 
203 static inline bool
204 delayed_work_pending(struct delayed_work *dwork)
205 {
206 	if (timeout_pending(&dwork->to))
207 		return true;
208 	return task_pending(&dwork->work.task);
209 }
210 
211 void flush_workqueue(struct workqueue_struct *);
212 bool flush_work(struct work_struct *);
213 bool flush_delayed_work(struct delayed_work *);
214 
215 static inline void
216 flush_scheduled_work(void)
217 {
218 	flush_workqueue(system_wq);
219 }
220 
221 static inline void
222 drain_workqueue(struct workqueue_struct *wq)
223 {
224 	flush_workqueue(wq);
225 }
226 
227 static inline void
228 destroy_work_on_stack(struct work_struct *work)
229 {
230 	if (work->tq)
231 		task_del(work->tq, &work->task);
232 }
233 
234 static inline void
235 destroy_delayed_work_on_stack(struct delayed_work *dwork)
236 {
237 }
238 
239 struct rcu_work {
240 	struct work_struct work;
241 	struct rcu_head rcu;
242 };
243 
244 static inline void
245 INIT_RCU_WORK(struct rcu_work *work, work_func_t func)
246 {
247 	INIT_WORK(&work->work, func);
248 }
249 
250 static inline bool
251 queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *work)
252 {
253 	return queue_work(wq, &work->work);
254 }
255 
256 #endif
257