1 /* $OpenBSD: workqueue.h,v 1.3 2020/06/08 04:48:15 jsg Exp $ */ 2 /* 3 * Copyright (c) 2015 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _LINUX_WORKQUEUE_H 19 #define _LINUX_WORKQUEUE_H 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/task.h> 24 #include <sys/timeout.h> 25 #include <linux/bitops.h> 26 #include <linux/atomic.h> 27 #include <linux/rcupdate.h> 28 #include <linux/kernel.h> 29 #include <linux/lockdep.h> 30 #include <linux/timer.h> 31 32 struct workqueue_struct; 33 34 extern struct workqueue_struct *system_wq; 35 extern struct workqueue_struct *system_highpri_wq; 36 extern struct workqueue_struct *system_unbound_wq; 37 extern struct workqueue_struct *system_long_wq; 38 39 #define WQ_HIGHPRI 1 40 #define WQ_FREEZABLE 2 41 #define WQ_UNBOUND 4 42 43 #define WQ_UNBOUND_MAX_ACTIVE 4 /* matches nthreads in drm_linux.c */ 44 45 static inline struct workqueue_struct * 46 alloc_workqueue(const char *name, int flags, int max_active) 47 { 48 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 49 return (struct workqueue_struct *)tq; 50 } 51 52 static inline struct workqueue_struct * 53 alloc_ordered_workqueue(const char *name, int flags) 54 { 55 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 56 return (struct workqueue_struct *)tq; 57 } 58 59 static inline struct workqueue_struct * 60 create_singlethread_workqueue(const char *name) 61 { 62 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 63 return (struct workqueue_struct *)tq; 64 } 65 66 static inline void 67 destroy_workqueue(struct workqueue_struct *wq) 68 { 69 taskq_destroy((struct taskq *)wq); 70 } 71 72 struct work_struct { 73 struct task task; 74 struct taskq *tq; 75 }; 76 77 typedef void (*work_func_t)(struct work_struct *); 78 79 static inline void 80 INIT_WORK(struct work_struct *work, work_func_t func) 81 { 82 work->tq = (struct taskq *)system_wq; 83 task_set(&work->task, (void (*)(void *))func, work); 84 } 85 86 #define INIT_WORK_ONSTACK(x, y) INIT_WORK((x), (y)) 87 88 static inline bool 89 queue_work(struct workqueue_struct *wq, struct work_struct *work) 90 { 91 work->tq = (struct taskq *)wq; 92 return task_add(work->tq, &work->task); 93 } 94 95 static inline void 96 cancel_work_sync(struct work_struct *work) 97 { 98 task_del(work->tq, &work->task); 99 } 100 101 #define work_pending(work) task_pending(&(work)->task) 102 103 struct delayed_work { 104 struct work_struct work; 105 struct timeout to; 106 struct taskq *tq; 107 }; 108 109 #define system_power_efficient_wq ((struct workqueue_struct *)systq) 110 111 static inline struct delayed_work * 112 to_delayed_work(struct work_struct *work) 113 { 114 return container_of(work, struct delayed_work, work); 115 } 116 117 static void 118 __delayed_work_tick(void *arg) 119 { 120 struct delayed_work *dwork = arg; 121 122 task_add(dwork->tq, &dwork->work.task); 123 } 124 125 static inline void 126 INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func) 127 { 128 INIT_WORK(&dwork->work, func); 129 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); 130 } 131 132 static inline void 133 INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func) 134 { 135 INIT_WORK(&dwork->work, func); 136 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); 137 } 138 139 static inline bool 140 schedule_work(struct work_struct *work) 141 { 142 return task_add(work->tq, &work->task); 143 } 144 145 static inline bool 146 schedule_delayed_work(struct delayed_work *dwork, int jiffies) 147 { 148 dwork->tq = (struct taskq *)system_wq; 149 return timeout_add(&dwork->to, jiffies); 150 } 151 152 static inline bool 153 queue_delayed_work(struct workqueue_struct *wq, 154 struct delayed_work *dwork, int jiffies) 155 { 156 dwork->tq = (struct taskq *)wq; 157 return timeout_add(&dwork->to, jiffies); 158 } 159 160 static inline bool 161 mod_delayed_work(struct workqueue_struct *wq, 162 struct delayed_work *dwork, int jiffies) 163 { 164 dwork->tq = (struct taskq *)wq; 165 return (timeout_add(&dwork->to, jiffies) == 0); 166 } 167 168 static inline bool 169 cancel_delayed_work(struct delayed_work *dwork) 170 { 171 if (timeout_del(&dwork->to)) 172 return true; 173 return task_del(dwork->tq, &dwork->work.task); 174 } 175 176 static inline bool 177 cancel_delayed_work_sync(struct delayed_work *dwork) 178 { 179 if (timeout_del(&dwork->to)) 180 return true; 181 return task_del(dwork->tq, &dwork->work.task); 182 } 183 184 static inline bool 185 delayed_work_pending(struct delayed_work *dwork) 186 { 187 if (timeout_pending(&dwork->to)) 188 return true; 189 return task_pending(&dwork->work.task); 190 } 191 192 void flush_workqueue(struct workqueue_struct *); 193 bool flush_work(struct work_struct *); 194 bool flush_delayed_work(struct delayed_work *); 195 #define flush_scheduled_work() flush_workqueue(system_wq) 196 #define drain_workqueue(x) flush_workqueue(x) 197 198 #define destroy_work_on_stack(x) 199 #define destroy_delayed_work_on_stack(x) 200 201 struct rcu_work { 202 struct work_struct work; 203 struct rcu_head rcu; 204 }; 205 206 static inline void 207 INIT_RCU_WORK(struct rcu_work *work, work_func_t func) 208 { 209 INIT_WORK(&work->work, func); 210 } 211 212 static inline bool 213 queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *work) 214 { 215 return queue_work(wq, &work->work); 216 } 217 218 #endif 219