xref: /openbsd-src/sys/dev/pci/drm/include/linux/workqueue.h (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /*	$OpenBSD: workqueue.h,v 1.1 2019/04/14 10:14:53 jsg Exp $	*/
2 /*
3  * Copyright (c) 2015 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _LINUX_WORKQUEUE_H
19 #define _LINUX_WORKQUEUE_H
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/task.h>
24 #include <sys/timeout.h>
25 #include <linux/bitops.h>
26 #include <linux/atomic.h>
27 #include <linux/rcupdate.h>
28 #include <linux/kernel.h>
29 #include <linux/lockdep.h>
30 #include <linux/timer.h>
31 
32 struct workqueue_struct;
33 
34 extern struct workqueue_struct *system_wq;
35 extern struct workqueue_struct *system_unbound_wq;
36 extern struct workqueue_struct *system_long_wq;
37 
38 #define WQ_HIGHPRI	1
39 #define WQ_FREEZABLE	2
40 #define WQ_UNBOUND	4
41 
42 static inline struct workqueue_struct *
43 alloc_workqueue(const char *name, int flags, int max_active)
44 {
45 	struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0);
46 	return (struct workqueue_struct *)tq;
47 }
48 
49 static inline struct workqueue_struct *
50 alloc_ordered_workqueue(const char *name, int flags)
51 {
52 	struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0);
53 	return (struct workqueue_struct *)tq;
54 }
55 
56 static inline struct workqueue_struct *
57 create_singlethread_workqueue(const char *name)
58 {
59 	struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0);
60 	return (struct workqueue_struct *)tq;
61 }
62 
63 static inline void
64 destroy_workqueue(struct workqueue_struct *wq)
65 {
66 	taskq_destroy((struct taskq *)wq);
67 }
68 
69 struct work_struct {
70 	struct task task;
71 	struct taskq *tq;
72 };
73 
74 typedef void (*work_func_t)(struct work_struct *);
75 
76 static inline void
77 INIT_WORK(struct work_struct *work, work_func_t func)
78 {
79 	work->tq = (struct taskq *)system_wq;
80 	task_set(&work->task, (void (*)(void *))func, work);
81 }
82 
83 #define INIT_WORK_ONSTACK(x, y)	INIT_WORK((x), (y))
84 
85 static inline bool
86 queue_work(struct workqueue_struct *wq, struct work_struct *work)
87 {
88 	work->tq = (struct taskq *)wq;
89 	return task_add(work->tq, &work->task);
90 }
91 
92 static inline void
93 cancel_work_sync(struct work_struct *work)
94 {
95 	task_del(work->tq, &work->task);
96 }
97 
98 #define work_pending(work)	task_pending(&(work)->task)
99 
100 struct delayed_work {
101 	struct work_struct work;
102 	struct timeout to;
103 	struct taskq *tq;
104 };
105 
106 struct irq_work {
107 	struct task task;
108 	struct taskq *tq;
109 };
110 
111 typedef void (*irq_work_func_t)(struct irq_work *);
112 
113 static inline void
114 init_irq_work(struct irq_work *work, irq_work_func_t func)
115 {
116 	work->tq = (struct taskq *)system_wq;
117 	task_set(&work->task, (void (*)(void *))func, work);
118 }
119 
120 static inline bool
121 irq_work_queue(struct irq_work *work)
122 {
123 	return task_add(work->tq, &work->task);
124 }
125 
126 #define system_power_efficient_wq ((struct workqueue_struct *)systq)
127 
128 static inline struct delayed_work *
129 to_delayed_work(struct work_struct *work)
130 {
131 	return container_of(work, struct delayed_work, work);
132 }
133 
134 static void
135 __delayed_work_tick(void *arg)
136 {
137 	struct delayed_work *dwork = arg;
138 
139 	task_add(dwork->tq, &dwork->work.task);
140 }
141 
142 static inline void
143 INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func)
144 {
145 	INIT_WORK(&dwork->work, func);
146 	timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
147 }
148 
149 static inline void
150 INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func)
151 {
152 	INIT_WORK(&dwork->work, func);
153 	timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
154 }
155 
156 static inline bool
157 schedule_work(struct work_struct *work)
158 {
159 	return task_add(work->tq, &work->task);
160 }
161 
162 static inline bool
163 schedule_delayed_work(struct delayed_work *dwork, int jiffies)
164 {
165 	dwork->tq = (struct taskq *)system_wq;
166 	return timeout_add(&dwork->to, jiffies);
167 }
168 
169 static inline bool
170 queue_delayed_work(struct workqueue_struct *wq,
171     struct delayed_work *dwork, int jiffies)
172 {
173 	dwork->tq = (struct taskq *)wq;
174 	return timeout_add(&dwork->to, jiffies);
175 }
176 
177 static inline bool
178 mod_delayed_work(struct workqueue_struct *wq,
179     struct delayed_work *dwork, int jiffies)
180 {
181 	dwork->tq = (struct taskq *)wq;
182 	return (timeout_add(&dwork->to, jiffies) == 0);
183 }
184 
185 static inline bool
186 cancel_delayed_work(struct delayed_work *dwork)
187 {
188 	if (timeout_del(&dwork->to))
189 		return true;
190 	return task_del(dwork->tq, &dwork->work.task);
191 }
192 
193 static inline bool
194 cancel_delayed_work_sync(struct delayed_work *dwork)
195 {
196 	if (timeout_del(&dwork->to))
197 		return true;
198 	return task_del(dwork->tq, &dwork->work.task);
199 }
200 
201 static inline bool
202 delayed_work_pending(struct delayed_work *dwork)
203 {
204 	if (timeout_pending(&dwork->to))
205 		return true;
206 	return task_pending(&dwork->work.task);
207 }
208 
209 void flush_workqueue(struct workqueue_struct *);
210 bool flush_work(struct work_struct *);
211 bool flush_delayed_work(struct delayed_work *);
212 #define flush_scheduled_work()	flush_workqueue(system_wq)
213 #define drain_workqueue(x)	flush_workqueue(x)
214 
215 #define destroy_work_on_stack(x)
216 #define destroy_delayed_work_on_stack(x)
217 
218 #endif
219