1 /* $OpenBSD: workqueue.h,v 1.9 2023/01/01 01:34:58 jsg Exp $ */ 2 /* 3 * Copyright (c) 2015 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _LINUX_WORKQUEUE_H 19 #define _LINUX_WORKQUEUE_H 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/task.h> 24 #include <sys/timeout.h> 25 #include <linux/bitops.h> 26 #include <linux/atomic.h> 27 #include <linux/rcupdate.h> 28 #include <linux/kernel.h> 29 #include <linux/lockdep.h> 30 #include <linux/timer.h> 31 32 struct workqueue_struct; 33 34 extern struct workqueue_struct *system_wq; 35 extern struct workqueue_struct *system_highpri_wq; 36 extern struct workqueue_struct *system_unbound_wq; 37 extern struct workqueue_struct *system_long_wq; 38 39 #define WQ_HIGHPRI 1 40 #define WQ_FREEZABLE 2 41 #define WQ_UNBOUND 4 42 43 #define WQ_UNBOUND_MAX_ACTIVE 4 /* matches nthreads in drm_linux.c */ 44 45 static inline struct workqueue_struct * 46 alloc_workqueue(const char *name, int flags, int max_active) 47 { 48 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 49 return (struct workqueue_struct *)tq; 50 } 51 52 static inline struct workqueue_struct * 53 alloc_ordered_workqueue(const char *name, int flags) 54 { 55 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 56 return (struct workqueue_struct *)tq; 57 } 58 59 static inline struct workqueue_struct * 60 create_singlethread_workqueue(const char *name) 61 { 62 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 63 return (struct workqueue_struct *)tq; 64 } 65 66 static inline void 67 destroy_workqueue(struct workqueue_struct *wq) 68 { 69 taskq_destroy((struct taskq *)wq); 70 } 71 72 struct work_struct { 73 struct task task; 74 struct taskq *tq; 75 }; 76 77 typedef void (*work_func_t)(struct work_struct *); 78 79 static inline void 80 INIT_WORK(struct work_struct *work, work_func_t func) 81 { 82 work->tq = NULL; 83 task_set(&work->task, (void (*)(void *))func, work); 84 } 85 86 #define INIT_WORK_ONSTACK(x, y) INIT_WORK((x), (y)) 87 88 static inline bool 89 queue_work(struct workqueue_struct *wq, struct work_struct *work) 90 { 91 work->tq = (struct taskq *)wq; 92 return task_add(work->tq, &work->task); 93 } 94 95 static inline void 96 cancel_work(struct work_struct *work) 97 { 98 if (work->tq != NULL) 99 task_del(work->tq, &work->task); 100 } 101 102 static inline void 103 cancel_work_sync(struct work_struct *work) 104 { 105 if (work->tq != NULL) 106 task_del(work->tq, &work->task); 107 } 108 109 #define work_pending(work) task_pending(&(work)->task) 110 111 struct delayed_work { 112 struct work_struct work; 113 struct timeout to; 114 struct taskq *tq; 115 }; 116 117 #define system_power_efficient_wq ((struct workqueue_struct *)systq) 118 119 static inline struct delayed_work * 120 to_delayed_work(struct work_struct *work) 121 { 122 return container_of(work, struct delayed_work, work); 123 } 124 125 static void 126 __delayed_work_tick(void *arg) 127 { 128 struct delayed_work *dwork = arg; 129 130 task_add(dwork->tq, &dwork->work.task); 131 } 132 133 static inline void 134 INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func) 135 { 136 INIT_WORK(&dwork->work, func); 137 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); 138 } 139 140 static inline void 141 INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func) 142 { 143 INIT_WORK(&dwork->work, func); 144 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); 145 } 146 147 #define __DELAYED_WORK_INITIALIZER(dw, fn, flags) { \ 148 .to = TIMEOUT_INITIALIZER(__delayed_work_tick, &(dw)), \ 149 .tq = NULL, \ 150 .work.tq = NULL, \ 151 .work.task = TASK_INITIALIZER((void (*)(void *))(fn), &(dw).work) \ 152 } 153 154 static inline bool 155 schedule_work(struct work_struct *work) 156 { 157 work->tq = (struct taskq *)system_wq; 158 return task_add(work->tq, &work->task); 159 } 160 161 static inline bool 162 schedule_delayed_work(struct delayed_work *dwork, int jiffies) 163 { 164 dwork->tq = (struct taskq *)system_wq; 165 return timeout_add(&dwork->to, jiffies); 166 } 167 168 static inline bool 169 queue_delayed_work(struct workqueue_struct *wq, 170 struct delayed_work *dwork, int jiffies) 171 { 172 dwork->tq = (struct taskq *)wq; 173 return timeout_add(&dwork->to, jiffies); 174 } 175 176 static inline bool 177 mod_delayed_work(struct workqueue_struct *wq, 178 struct delayed_work *dwork, int jiffies) 179 { 180 dwork->tq = (struct taskq *)wq; 181 return (timeout_add(&dwork->to, jiffies) == 0); 182 } 183 184 static inline bool 185 cancel_delayed_work(struct delayed_work *dwork) 186 { 187 if (dwork->tq == NULL) 188 return false; 189 if (timeout_del(&dwork->to)) 190 return true; 191 return task_del(dwork->tq, &dwork->work.task); 192 } 193 194 static inline bool 195 cancel_delayed_work_sync(struct delayed_work *dwork) 196 { 197 if (dwork->tq == NULL) 198 return false; 199 if (timeout_del(&dwork->to)) 200 return true; 201 return task_del(dwork->tq, &dwork->work.task); 202 } 203 204 static inline bool 205 delayed_work_pending(struct delayed_work *dwork) 206 { 207 if (timeout_pending(&dwork->to)) 208 return true; 209 return task_pending(&dwork->work.task); 210 } 211 212 void flush_workqueue(struct workqueue_struct *); 213 bool flush_work(struct work_struct *); 214 bool flush_delayed_work(struct delayed_work *); 215 216 static inline void 217 flush_scheduled_work(void) 218 { 219 flush_workqueue(system_wq); 220 } 221 222 static inline void 223 drain_workqueue(struct workqueue_struct *wq) 224 { 225 flush_workqueue(wq); 226 } 227 228 static inline void 229 destroy_work_on_stack(struct work_struct *work) 230 { 231 if (work->tq) 232 task_del(work->tq, &work->task); 233 } 234 235 static inline void 236 destroy_delayed_work_on_stack(struct delayed_work *dwork) 237 { 238 } 239 240 struct rcu_work { 241 struct work_struct work; 242 struct rcu_head rcu; 243 }; 244 245 static inline void 246 INIT_RCU_WORK(struct rcu_work *work, work_func_t func) 247 { 248 INIT_WORK(&work->work, func); 249 } 250 251 static inline bool 252 queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *work) 253 { 254 return queue_work(wq, &work->work); 255 } 256 257 #endif 258