1 /* $OpenBSD: workqueue.h,v 1.2 2019/05/11 14:39:13 jsg Exp $ */ 2 /* 3 * Copyright (c) 2015 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _LINUX_WORKQUEUE_H 19 #define _LINUX_WORKQUEUE_H 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/task.h> 24 #include <sys/timeout.h> 25 #include <linux/bitops.h> 26 #include <linux/atomic.h> 27 #include <linux/rcupdate.h> 28 #include <linux/kernel.h> 29 #include <linux/lockdep.h> 30 #include <linux/timer.h> 31 32 struct workqueue_struct; 33 34 extern struct workqueue_struct *system_wq; 35 extern struct workqueue_struct *system_unbound_wq; 36 extern struct workqueue_struct *system_long_wq; 37 38 #define WQ_HIGHPRI 1 39 #define WQ_FREEZABLE 2 40 #define WQ_UNBOUND 4 41 42 static inline struct workqueue_struct * 43 alloc_workqueue(const char *name, int flags, int max_active) 44 { 45 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 46 return (struct workqueue_struct *)tq; 47 } 48 49 static inline struct workqueue_struct * 50 alloc_ordered_workqueue(const char *name, int flags) 51 { 52 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 53 return (struct workqueue_struct *)tq; 54 } 55 56 static inline struct workqueue_struct * 57 create_singlethread_workqueue(const char *name) 58 { 59 struct taskq *tq = taskq_create(name, 1, IPL_TTY, 0); 60 return (struct workqueue_struct *)tq; 61 } 62 63 static inline void 64 destroy_workqueue(struct workqueue_struct *wq) 65 { 66 taskq_destroy((struct taskq *)wq); 67 } 68 69 struct work_struct { 70 struct task task; 71 struct taskq *tq; 72 }; 73 74 typedef void (*work_func_t)(struct work_struct *); 75 76 static inline void 77 INIT_WORK(struct work_struct *work, work_func_t func) 78 { 79 work->tq = (struct taskq *)system_wq; 80 task_set(&work->task, (void (*)(void *))func, work); 81 } 82 83 #define INIT_WORK_ONSTACK(x, y) INIT_WORK((x), (y)) 84 85 static inline bool 86 queue_work(struct workqueue_struct *wq, struct work_struct *work) 87 { 88 work->tq = (struct taskq *)wq; 89 return task_add(work->tq, &work->task); 90 } 91 92 static inline void 93 cancel_work_sync(struct work_struct *work) 94 { 95 task_del(work->tq, &work->task); 96 } 97 98 #define work_pending(work) task_pending(&(work)->task) 99 100 struct delayed_work { 101 struct work_struct work; 102 struct timeout to; 103 struct taskq *tq; 104 }; 105 106 #define system_power_efficient_wq ((struct workqueue_struct *)systq) 107 108 static inline struct delayed_work * 109 to_delayed_work(struct work_struct *work) 110 { 111 return container_of(work, struct delayed_work, work); 112 } 113 114 static void 115 __delayed_work_tick(void *arg) 116 { 117 struct delayed_work *dwork = arg; 118 119 task_add(dwork->tq, &dwork->work.task); 120 } 121 122 static inline void 123 INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func) 124 { 125 INIT_WORK(&dwork->work, func); 126 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); 127 } 128 129 static inline void 130 INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func) 131 { 132 INIT_WORK(&dwork->work, func); 133 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); 134 } 135 136 static inline bool 137 schedule_work(struct work_struct *work) 138 { 139 return task_add(work->tq, &work->task); 140 } 141 142 static inline bool 143 schedule_delayed_work(struct delayed_work *dwork, int jiffies) 144 { 145 dwork->tq = (struct taskq *)system_wq; 146 return timeout_add(&dwork->to, jiffies); 147 } 148 149 static inline bool 150 queue_delayed_work(struct workqueue_struct *wq, 151 struct delayed_work *dwork, int jiffies) 152 { 153 dwork->tq = (struct taskq *)wq; 154 return timeout_add(&dwork->to, jiffies); 155 } 156 157 static inline bool 158 mod_delayed_work(struct workqueue_struct *wq, 159 struct delayed_work *dwork, int jiffies) 160 { 161 dwork->tq = (struct taskq *)wq; 162 return (timeout_add(&dwork->to, jiffies) == 0); 163 } 164 165 static inline bool 166 cancel_delayed_work(struct delayed_work *dwork) 167 { 168 if (timeout_del(&dwork->to)) 169 return true; 170 return task_del(dwork->tq, &dwork->work.task); 171 } 172 173 static inline bool 174 cancel_delayed_work_sync(struct delayed_work *dwork) 175 { 176 if (timeout_del(&dwork->to)) 177 return true; 178 return task_del(dwork->tq, &dwork->work.task); 179 } 180 181 static inline bool 182 delayed_work_pending(struct delayed_work *dwork) 183 { 184 if (timeout_pending(&dwork->to)) 185 return true; 186 return task_pending(&dwork->work.task); 187 } 188 189 void flush_workqueue(struct workqueue_struct *); 190 bool flush_work(struct work_struct *); 191 bool flush_delayed_work(struct delayed_work *); 192 #define flush_scheduled_work() flush_workqueue(system_wq) 193 #define drain_workqueue(x) flush_workqueue(x) 194 195 #define destroy_work_on_stack(x) 196 #define destroy_delayed_work_on_stack(x) 197 198 #endif 199