1ca2ad6bdSHans Petter Selasky /*-
216732c19SHans Petter Selasky * Copyright (c) 2017-2019 Hans Petter Selasky
3ca2ad6bdSHans Petter Selasky * All rights reserved.
4ca2ad6bdSHans Petter Selasky *
5ca2ad6bdSHans Petter Selasky * Redistribution and use in source and binary forms, with or without
6ca2ad6bdSHans Petter Selasky * modification, are permitted provided that the following conditions
7ca2ad6bdSHans Petter Selasky * are met:
8ca2ad6bdSHans Petter Selasky * 1. Redistributions of source code must retain the above copyright
9ca2ad6bdSHans Petter Selasky * notice unmodified, this list of conditions, and the following
10ca2ad6bdSHans Petter Selasky * disclaimer.
11ca2ad6bdSHans Petter Selasky * 2. Redistributions in binary form must reproduce the above copyright
12ca2ad6bdSHans Petter Selasky * notice, this list of conditions and the following disclaimer in the
13ca2ad6bdSHans Petter Selasky * documentation and/or other materials provided with the distribution.
14ca2ad6bdSHans Petter Selasky *
15ca2ad6bdSHans Petter Selasky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16ca2ad6bdSHans Petter Selasky * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17ca2ad6bdSHans Petter Selasky * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18ca2ad6bdSHans Petter Selasky * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19ca2ad6bdSHans Petter Selasky * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20ca2ad6bdSHans Petter Selasky * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21ca2ad6bdSHans Petter Selasky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22ca2ad6bdSHans Petter Selasky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23ca2ad6bdSHans Petter Selasky * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24ca2ad6bdSHans Petter Selasky * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25ca2ad6bdSHans Petter Selasky */
26ca2ad6bdSHans Petter Selasky
27ca2ad6bdSHans Petter Selasky #include <sys/cdefs.h>
28ca2ad6bdSHans Petter Selasky #include <linux/workqueue.h>
29ca2ad6bdSHans Petter Selasky #include <linux/wait.h>
30ca2ad6bdSHans Petter Selasky #include <linux/compat.h>
31ca2ad6bdSHans Petter Selasky #include <linux/spinlock.h>
322491b25cSEmmanuel Vadot #include <linux/rcupdate.h>
33ec25b6faSVladimir Kondratyev #include <linux/irq_work.h>
34ca2ad6bdSHans Petter Selasky
35ca2ad6bdSHans Petter Selasky #include <sys/kernel.h>
36ca2ad6bdSHans Petter Selasky
37ca2ad6bdSHans Petter Selasky /*
38ca2ad6bdSHans Petter Selasky * Define all work struct states
39ca2ad6bdSHans Petter Selasky */
40ca2ad6bdSHans Petter Selasky enum {
41ca2ad6bdSHans Petter Selasky WORK_ST_IDLE, /* idle - not started */
42ca2ad6bdSHans Petter Selasky WORK_ST_TIMER, /* timer is being started */
43ca2ad6bdSHans Petter Selasky WORK_ST_TASK, /* taskqueue is being queued */
44ca2ad6bdSHans Petter Selasky WORK_ST_EXEC, /* callback is being called */
45ca2ad6bdSHans Petter Selasky WORK_ST_CANCEL, /* cancel is being requested */
46ca2ad6bdSHans Petter Selasky WORK_ST_MAX,
47ca2ad6bdSHans Petter Selasky };
48ca2ad6bdSHans Petter Selasky
49ca2ad6bdSHans Petter Selasky /*
50ca2ad6bdSHans Petter Selasky * Define global workqueues
51ca2ad6bdSHans Petter Selasky */
52ca2ad6bdSHans Petter Selasky static struct workqueue_struct *linux_system_short_wq;
53ca2ad6bdSHans Petter Selasky static struct workqueue_struct *linux_system_long_wq;
54ca2ad6bdSHans Petter Selasky
55ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_wq;
56ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_long_wq;
57ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_unbound_wq;
587a13eebaSHans Petter Selasky struct workqueue_struct *system_highpri_wq;
59ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_power_efficient_wq;
60ca2ad6bdSHans Petter Selasky
61ec25b6faSVladimir Kondratyev struct taskqueue *linux_irq_work_tq;
62ec25b6faSVladimir Kondratyev
637a742c41SHans Petter Selasky static int linux_default_wq_cpus = 4;
647a742c41SHans Petter Selasky
65ca2ad6bdSHans Petter Selasky static void linux_delayed_work_timer_fn(void *);
66ca2ad6bdSHans Petter Selasky
67ca2ad6bdSHans Petter Selasky /*
68ca2ad6bdSHans Petter Selasky * This function atomically updates the work state and returns the
69ca2ad6bdSHans Petter Selasky * previous state at the time of update.
70ca2ad6bdSHans Petter Selasky */
7143ee32f7SHans Petter Selasky static uint8_t
linux_update_state(atomic_t * v,const uint8_t * pstate)72ca2ad6bdSHans Petter Selasky linux_update_state(atomic_t *v, const uint8_t *pstate)
73ca2ad6bdSHans Petter Selasky {
74ca2ad6bdSHans Petter Selasky int c, old;
75ca2ad6bdSHans Petter Selasky
76ca2ad6bdSHans Petter Selasky c = v->counter;
77ca2ad6bdSHans Petter Selasky
78ca2ad6bdSHans Petter Selasky while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
79ca2ad6bdSHans Petter Selasky c = old;
80ca2ad6bdSHans Petter Selasky
81ca2ad6bdSHans Petter Selasky return (c);
82ca2ad6bdSHans Petter Selasky }
83ca2ad6bdSHans Petter Selasky
84ca2ad6bdSHans Petter Selasky /*
85ca2ad6bdSHans Petter Selasky * A LinuxKPI task is allowed to free itself inside the callback function
86ca2ad6bdSHans Petter Selasky * and cannot safely be referred after the callback function has
87ca2ad6bdSHans Petter Selasky * completed. This function gives the linux_work_fn() function a hint,
88ca2ad6bdSHans Petter Selasky * that the task is not going away and can have its state checked
89ca2ad6bdSHans Petter Selasky * again. Without this extra hint LinuxKPI tasks cannot be serialized
908cf0d094SGordon Bergling * across multiple worker threads.
91ca2ad6bdSHans Petter Selasky */
9243ee32f7SHans Petter Selasky static bool
linux_work_exec_unblock(struct work_struct * work)93ca2ad6bdSHans Petter Selasky linux_work_exec_unblock(struct work_struct *work)
94ca2ad6bdSHans Petter Selasky {
95ca2ad6bdSHans Petter Selasky struct workqueue_struct *wq;
96ca2ad6bdSHans Petter Selasky struct work_exec *exec;
974c8ba7d9SHans Petter Selasky bool retval = false;
98ca2ad6bdSHans Petter Selasky
99ca2ad6bdSHans Petter Selasky wq = work->work_queue;
100ca2ad6bdSHans Petter Selasky if (unlikely(wq == NULL))
101ca2ad6bdSHans Petter Selasky goto done;
102ca2ad6bdSHans Petter Selasky
103ca2ad6bdSHans Petter Selasky WQ_EXEC_LOCK(wq);
104ca2ad6bdSHans Petter Selasky TAILQ_FOREACH(exec, &wq->exec_head, entry) {
105ca2ad6bdSHans Petter Selasky if (exec->target == work) {
106ca2ad6bdSHans Petter Selasky exec->target = NULL;
1074c8ba7d9SHans Petter Selasky retval = true;
108ca2ad6bdSHans Petter Selasky break;
109ca2ad6bdSHans Petter Selasky }
110ca2ad6bdSHans Petter Selasky }
111ca2ad6bdSHans Petter Selasky WQ_EXEC_UNLOCK(wq);
112ca2ad6bdSHans Petter Selasky done:
113ca2ad6bdSHans Petter Selasky return (retval);
114ca2ad6bdSHans Petter Selasky }
115ca2ad6bdSHans Petter Selasky
116ca2ad6bdSHans Petter Selasky static void
linux_delayed_work_enqueue(struct delayed_work * dwork)117ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(struct delayed_work *dwork)
118ca2ad6bdSHans Petter Selasky {
119ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
120ca2ad6bdSHans Petter Selasky
121ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
122ca2ad6bdSHans Petter Selasky taskqueue_enqueue(tq, &dwork->work.work_task);
123ca2ad6bdSHans Petter Selasky }
124ca2ad6bdSHans Petter Selasky
125ca2ad6bdSHans Petter Selasky /*
126ca2ad6bdSHans Petter Selasky * This function queues the given work structure on the given
127ca2ad6bdSHans Petter Selasky * workqueue. It returns non-zero if the work was successfully
128ca2ad6bdSHans Petter Selasky * [re-]queued. Else the work is already pending for completion.
129ca2ad6bdSHans Petter Selasky */
130ca2ad6bdSHans Petter Selasky bool
linux_queue_work_on(int cpu __unused,struct workqueue_struct * wq,struct work_struct * work)131ca2ad6bdSHans Petter Selasky linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,
132ca2ad6bdSHans Petter Selasky struct work_struct *work)
133ca2ad6bdSHans Petter Selasky {
134ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
135ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */
136ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
137ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
138ca2ad6bdSHans Petter Selasky [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */
139ca2ad6bdSHans Petter Selasky [WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */
140ca2ad6bdSHans Petter Selasky };
141ca2ad6bdSHans Petter Selasky
142ca2ad6bdSHans Petter Selasky if (atomic_read(&wq->draining) != 0)
143ca2ad6bdSHans Petter Selasky return (!work_pending(work));
144ca2ad6bdSHans Petter Selasky
145ca2ad6bdSHans Petter Selasky switch (linux_update_state(&work->state, states)) {
146ca2ad6bdSHans Petter Selasky case WORK_ST_EXEC:
147ca2ad6bdSHans Petter Selasky case WORK_ST_CANCEL:
148ca2ad6bdSHans Petter Selasky if (linux_work_exec_unblock(work) != 0)
1494c8ba7d9SHans Petter Selasky return (true);
150ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
151ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
152ca2ad6bdSHans Petter Selasky work->work_queue = wq;
153ca2ad6bdSHans Petter Selasky taskqueue_enqueue(wq->taskqueue, &work->work_task);
1544c8ba7d9SHans Petter Selasky return (true);
155ca2ad6bdSHans Petter Selasky default:
1564c8ba7d9SHans Petter Selasky return (false); /* already on a queue */
157ca2ad6bdSHans Petter Selasky }
158ca2ad6bdSHans Petter Selasky }
159ca2ad6bdSHans Petter Selasky
160ca2ad6bdSHans Petter Selasky /*
1612491b25cSEmmanuel Vadot * Callback func for linux_queue_rcu_work
1622491b25cSEmmanuel Vadot */
1632491b25cSEmmanuel Vadot static void
rcu_work_func(struct rcu_head * rcu)1642491b25cSEmmanuel Vadot rcu_work_func(struct rcu_head *rcu)
1652491b25cSEmmanuel Vadot {
1662491b25cSEmmanuel Vadot struct rcu_work *rwork;
1672491b25cSEmmanuel Vadot
1682491b25cSEmmanuel Vadot rwork = container_of(rcu, struct rcu_work, rcu);
1692491b25cSEmmanuel Vadot linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1702491b25cSEmmanuel Vadot }
1712491b25cSEmmanuel Vadot
1722491b25cSEmmanuel Vadot /*
1732491b25cSEmmanuel Vadot * This function queue a work after a grace period
1742491b25cSEmmanuel Vadot * If the work was already pending it returns false,
1752491b25cSEmmanuel Vadot * if not it calls call_rcu and returns true.
1762491b25cSEmmanuel Vadot */
1772491b25cSEmmanuel Vadot bool
linux_queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork)1782491b25cSEmmanuel Vadot linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1792491b25cSEmmanuel Vadot {
1802491b25cSEmmanuel Vadot
1812491b25cSEmmanuel Vadot if (!linux_work_pending(&rwork->work)) {
1822491b25cSEmmanuel Vadot rwork->wq = wq;
1832491b25cSEmmanuel Vadot linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func);
1842491b25cSEmmanuel Vadot return (true);
1852491b25cSEmmanuel Vadot }
1862491b25cSEmmanuel Vadot return (false);
1872491b25cSEmmanuel Vadot }
1882491b25cSEmmanuel Vadot
1892491b25cSEmmanuel Vadot /*
1902491b25cSEmmanuel Vadot * This function waits for the last execution of a work and then
1912491b25cSEmmanuel Vadot * flush the work.
1922491b25cSEmmanuel Vadot * It returns true if the work was pending and we waited, it returns
1932491b25cSEmmanuel Vadot * false otherwise.
1942491b25cSEmmanuel Vadot */
1952491b25cSEmmanuel Vadot bool
linux_flush_rcu_work(struct rcu_work * rwork)1962491b25cSEmmanuel Vadot linux_flush_rcu_work(struct rcu_work *rwork)
1972491b25cSEmmanuel Vadot {
1982491b25cSEmmanuel Vadot
1992491b25cSEmmanuel Vadot if (linux_work_pending(&rwork->work)) {
2002491b25cSEmmanuel Vadot linux_rcu_barrier(RCU_TYPE_REGULAR);
2012491b25cSEmmanuel Vadot linux_flush_work(&rwork->work);
2022491b25cSEmmanuel Vadot return (true);
2032491b25cSEmmanuel Vadot }
2042491b25cSEmmanuel Vadot return (linux_flush_work(&rwork->work));
2052491b25cSEmmanuel Vadot }
2062491b25cSEmmanuel Vadot
2072491b25cSEmmanuel Vadot /*
208ca2ad6bdSHans Petter Selasky * This function queues the given work structure on the given
20996cb1d70SKonstantin Belousov * workqueue after a given delay in ticks. It returns true if the
210ca2ad6bdSHans Petter Selasky * work was successfully [re-]queued. Else the work is already pending
211ca2ad6bdSHans Petter Selasky * for completion.
212ca2ad6bdSHans Petter Selasky */
213ca2ad6bdSHans Petter Selasky bool
linux_queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned delay)214ca2ad6bdSHans Petter Selasky linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
215ca2ad6bdSHans Petter Selasky struct delayed_work *dwork, unsigned delay)
216ca2ad6bdSHans Petter Selasky {
217ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
218ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */
219ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
220ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
221ca2ad6bdSHans Petter Selasky [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */
222ca2ad6bdSHans Petter Selasky [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */
223ca2ad6bdSHans Petter Selasky };
22405fe8245SKonstantin Belousov bool res;
225ca2ad6bdSHans Petter Selasky
226ca2ad6bdSHans Petter Selasky if (atomic_read(&wq->draining) != 0)
227ca2ad6bdSHans Petter Selasky return (!work_pending(&dwork->work));
228ca2ad6bdSHans Petter Selasky
22905fe8245SKonstantin Belousov mtx_lock(&dwork->timer.mtx);
230ca2ad6bdSHans Petter Selasky switch (linux_update_state(&dwork->work.state, states)) {
231ca2ad6bdSHans Petter Selasky case WORK_ST_EXEC:
232ca2ad6bdSHans Petter Selasky case WORK_ST_CANCEL:
23396cb1d70SKonstantin Belousov if (delay == 0 && linux_work_exec_unblock(&dwork->work)) {
234ca2ad6bdSHans Petter Selasky dwork->timer.expires = jiffies;
23505fe8245SKonstantin Belousov res = true;
23605fe8245SKonstantin Belousov goto out;
237ca2ad6bdSHans Petter Selasky }
238ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
239ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
240ca2ad6bdSHans Petter Selasky dwork->work.work_queue = wq;
241ca2ad6bdSHans Petter Selasky dwork->timer.expires = jiffies + delay;
242ca2ad6bdSHans Petter Selasky
243ca2ad6bdSHans Petter Selasky if (delay == 0) {
244ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(dwork);
245ca2ad6bdSHans Petter Selasky } else if (unlikely(cpu != WORK_CPU_UNBOUND)) {
246ca2ad6bdSHans Petter Selasky callout_reset_on(&dwork->timer.callout, delay,
247ca2ad6bdSHans Petter Selasky &linux_delayed_work_timer_fn, dwork, cpu);
248ca2ad6bdSHans Petter Selasky } else {
249ca2ad6bdSHans Petter Selasky callout_reset(&dwork->timer.callout, delay,
250ca2ad6bdSHans Petter Selasky &linux_delayed_work_timer_fn, dwork);
251ca2ad6bdSHans Petter Selasky }
25205fe8245SKonstantin Belousov res = true;
25305fe8245SKonstantin Belousov break;
254ca2ad6bdSHans Petter Selasky default:
25505fe8245SKonstantin Belousov res = false;
25605fe8245SKonstantin Belousov break;
257ca2ad6bdSHans Petter Selasky }
25805fe8245SKonstantin Belousov out:
25905fe8245SKonstantin Belousov mtx_unlock(&dwork->timer.mtx);
26005fe8245SKonstantin Belousov return (res);
261ca2ad6bdSHans Petter Selasky }
262ca2ad6bdSHans Petter Selasky
263ca2ad6bdSHans Petter Selasky void
linux_work_fn(void * context,int pending)264ca2ad6bdSHans Petter Selasky linux_work_fn(void *context, int pending)
265ca2ad6bdSHans Petter Selasky {
266ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
267ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
268ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */
269ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */
270ca2ad6bdSHans Petter Selasky [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */
2713a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */
272ca2ad6bdSHans Petter Selasky };
273ca2ad6bdSHans Petter Selasky struct work_struct *work;
274ca2ad6bdSHans Petter Selasky struct workqueue_struct *wq;
275ca2ad6bdSHans Petter Selasky struct work_exec exec;
276549dcdb3SHans Petter Selasky struct task_struct *task;
277ca2ad6bdSHans Petter Selasky
278549dcdb3SHans Petter Selasky task = current;
279ca2ad6bdSHans Petter Selasky
280ca2ad6bdSHans Petter Selasky /* setup local variables */
281ca2ad6bdSHans Petter Selasky work = context;
282ca2ad6bdSHans Petter Selasky wq = work->work_queue;
283ca2ad6bdSHans Petter Selasky
284ca2ad6bdSHans Petter Selasky /* store target pointer */
285ca2ad6bdSHans Petter Selasky exec.target = work;
286ca2ad6bdSHans Petter Selasky
287ca2ad6bdSHans Petter Selasky /* insert executor into list */
288ca2ad6bdSHans Petter Selasky WQ_EXEC_LOCK(wq);
289ca2ad6bdSHans Petter Selasky TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry);
290ca2ad6bdSHans Petter Selasky while (1) {
291ca2ad6bdSHans Petter Selasky switch (linux_update_state(&work->state, states)) {
292ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
293ca2ad6bdSHans Petter Selasky case WORK_ST_TASK:
2943a150601SAlexander Motin case WORK_ST_CANCEL:
295ca2ad6bdSHans Petter Selasky WQ_EXEC_UNLOCK(wq);
296ca2ad6bdSHans Petter Selasky
297549dcdb3SHans Petter Selasky /* set current work structure */
298549dcdb3SHans Petter Selasky task->work = work;
299549dcdb3SHans Petter Selasky
300ca2ad6bdSHans Petter Selasky /* call work function */
301ca2ad6bdSHans Petter Selasky work->func(work);
302ca2ad6bdSHans Petter Selasky
303549dcdb3SHans Petter Selasky /* set current work structure */
304549dcdb3SHans Petter Selasky task->work = NULL;
305549dcdb3SHans Petter Selasky
306ca2ad6bdSHans Petter Selasky WQ_EXEC_LOCK(wq);
307ca2ad6bdSHans Petter Selasky /* check if unblocked */
308ca2ad6bdSHans Petter Selasky if (exec.target != work) {
309ca2ad6bdSHans Petter Selasky /* reapply block */
310ca2ad6bdSHans Petter Selasky exec.target = work;
311ca2ad6bdSHans Petter Selasky break;
312ca2ad6bdSHans Petter Selasky }
313ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
314ca2ad6bdSHans Petter Selasky default:
315ca2ad6bdSHans Petter Selasky goto done;
316ca2ad6bdSHans Petter Selasky }
317ca2ad6bdSHans Petter Selasky }
318ca2ad6bdSHans Petter Selasky done:
319ca2ad6bdSHans Petter Selasky /* remove executor from list */
320ca2ad6bdSHans Petter Selasky TAILQ_REMOVE(&wq->exec_head, &exec, entry);
321ca2ad6bdSHans Petter Selasky WQ_EXEC_UNLOCK(wq);
322ca2ad6bdSHans Petter Selasky }
323ca2ad6bdSHans Petter Selasky
32487a567f1SHans Petter Selasky void
linux_delayed_work_fn(void * context,int pending)32587a567f1SHans Petter Selasky linux_delayed_work_fn(void *context, int pending)
32687a567f1SHans Petter Selasky {
32787a567f1SHans Petter Selasky struct delayed_work *dwork = context;
32887a567f1SHans Petter Selasky
32987a567f1SHans Petter Selasky /*
33087a567f1SHans Petter Selasky * Make sure the timer belonging to the delayed work gets
33187a567f1SHans Petter Selasky * drained before invoking the work function. Else the timer
33287a567f1SHans Petter Selasky * mutex may still be in use which can lead to use-after-free
33387a567f1SHans Petter Selasky * situations, because the work function might free the work
33487a567f1SHans Petter Selasky * structure before returning.
33587a567f1SHans Petter Selasky */
33687a567f1SHans Petter Selasky callout_drain(&dwork->timer.callout);
33787a567f1SHans Petter Selasky
33887a567f1SHans Petter Selasky linux_work_fn(&dwork->work, pending);
33987a567f1SHans Petter Selasky }
34087a567f1SHans Petter Selasky
341ca2ad6bdSHans Petter Selasky static void
linux_delayed_work_timer_fn(void * arg)342ca2ad6bdSHans Petter Selasky linux_delayed_work_timer_fn(void *arg)
343ca2ad6bdSHans Petter Selasky {
344ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
345ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
346ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */
347ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
3483a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
3493a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */
350ca2ad6bdSHans Petter Selasky };
351ca2ad6bdSHans Petter Selasky struct delayed_work *dwork = arg;
352ca2ad6bdSHans Petter Selasky
353ca2ad6bdSHans Petter Selasky switch (linux_update_state(&dwork->work.state, states)) {
354ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
3553a150601SAlexander Motin case WORK_ST_CANCEL:
356ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(dwork);
357ca2ad6bdSHans Petter Selasky break;
358ca2ad6bdSHans Petter Selasky default:
359ca2ad6bdSHans Petter Selasky break;
360ca2ad6bdSHans Petter Selasky }
361ca2ad6bdSHans Petter Selasky }
362ca2ad6bdSHans Petter Selasky
363ca2ad6bdSHans Petter Selasky /*
364*1b2f43a7SVladimir Kondratyev * This function cancels the given work structure in a
365*1b2f43a7SVladimir Kondratyev * non-blocking fashion. It returns non-zero if the work was
366*1b2f43a7SVladimir Kondratyev * successfully cancelled. Else the work may still be busy or already
367*1b2f43a7SVladimir Kondratyev * cancelled.
368*1b2f43a7SVladimir Kondratyev */
369*1b2f43a7SVladimir Kondratyev bool
linux_cancel_work(struct work_struct * work)370*1b2f43a7SVladimir Kondratyev linux_cancel_work(struct work_struct *work)
371*1b2f43a7SVladimir Kondratyev {
372*1b2f43a7SVladimir Kondratyev static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
373*1b2f43a7SVladimir Kondratyev [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
374*1b2f43a7SVladimir Kondratyev [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */
375*1b2f43a7SVladimir Kondratyev [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel */
376*1b2f43a7SVladimir Kondratyev [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
377*1b2f43a7SVladimir Kondratyev [WORK_ST_CANCEL] = WORK_ST_IDLE, /* can't happen */
378*1b2f43a7SVladimir Kondratyev };
379*1b2f43a7SVladimir Kondratyev struct taskqueue *tq;
380*1b2f43a7SVladimir Kondratyev
381*1b2f43a7SVladimir Kondratyev MPASS(atomic_read(&work->state) != WORK_ST_TIMER);
382*1b2f43a7SVladimir Kondratyev MPASS(atomic_read(&work->state) != WORK_ST_CANCEL);
383*1b2f43a7SVladimir Kondratyev
384*1b2f43a7SVladimir Kondratyev switch (linux_update_state(&work->state, states)) {
385*1b2f43a7SVladimir Kondratyev case WORK_ST_TASK:
386*1b2f43a7SVladimir Kondratyev tq = work->work_queue->taskqueue;
387*1b2f43a7SVladimir Kondratyev if (taskqueue_cancel(tq, &work->work_task, NULL) == 0)
388*1b2f43a7SVladimir Kondratyev return (true);
389*1b2f43a7SVladimir Kondratyev /* FALLTHROUGH */
390*1b2f43a7SVladimir Kondratyev default:
391*1b2f43a7SVladimir Kondratyev return (false);
392*1b2f43a7SVladimir Kondratyev }
393*1b2f43a7SVladimir Kondratyev }
394*1b2f43a7SVladimir Kondratyev
395*1b2f43a7SVladimir Kondratyev /*
396ca2ad6bdSHans Petter Selasky * This function cancels the given work structure in a synchronous
397ca2ad6bdSHans Petter Selasky * fashion. It returns non-zero if the work was successfully
398ca2ad6bdSHans Petter Selasky * cancelled. Else the work was already cancelled.
399ca2ad6bdSHans Petter Selasky */
400ca2ad6bdSHans Petter Selasky bool
linux_cancel_work_sync(struct work_struct * work)401ca2ad6bdSHans Petter Selasky linux_cancel_work_sync(struct work_struct *work)
402ca2ad6bdSHans Petter Selasky {
403ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
404ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
4053a150601SAlexander Motin [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */
4063a150601SAlexander Motin [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
4073a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
4083a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
409ca2ad6bdSHans Petter Selasky };
410ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
41116732c19SHans Petter Selasky bool retval = false;
412ca2ad6bdSHans Petter Selasky
413ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
414ca2ad6bdSHans Petter Selasky "linux_cancel_work_sync() might sleep");
41516732c19SHans Petter Selasky retry:
416ca2ad6bdSHans Petter Selasky switch (linux_update_state(&work->state, states)) {
417ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
4183a150601SAlexander Motin case WORK_ST_TIMER:
41916732c19SHans Petter Selasky return (retval);
4203a150601SAlexander Motin case WORK_ST_EXEC:
4213a150601SAlexander Motin tq = work->work_queue->taskqueue;
4223a150601SAlexander Motin if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
4233a150601SAlexander Motin taskqueue_drain(tq, &work->work_task);
42416732c19SHans Petter Selasky goto retry; /* work may have restarted itself */
425ca2ad6bdSHans Petter Selasky default:
426ca2ad6bdSHans Petter Selasky tq = work->work_queue->taskqueue;
427ca2ad6bdSHans Petter Selasky if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
428ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &work->work_task);
42916732c19SHans Petter Selasky retval = true;
43016732c19SHans Petter Selasky goto retry;
431ca2ad6bdSHans Petter Selasky }
432ca2ad6bdSHans Petter Selasky }
433ca2ad6bdSHans Petter Selasky
434ca2ad6bdSHans Petter Selasky /*
435ca2ad6bdSHans Petter Selasky * This function atomically stops the timer and callback. The timer
436ca2ad6bdSHans Petter Selasky * callback will not be called after this function returns. This
437ca2ad6bdSHans Petter Selasky * functions returns true when the timeout was cancelled. Else the
438ca2ad6bdSHans Petter Selasky * timeout was not started or has already been called.
439ca2ad6bdSHans Petter Selasky */
440ca2ad6bdSHans Petter Selasky static inline bool
linux_cancel_timer(struct delayed_work * dwork,bool drain)441ca2ad6bdSHans Petter Selasky linux_cancel_timer(struct delayed_work *dwork, bool drain)
442ca2ad6bdSHans Petter Selasky {
443ca2ad6bdSHans Petter Selasky bool cancelled;
444ca2ad6bdSHans Petter Selasky
445ca2ad6bdSHans Petter Selasky mtx_lock(&dwork->timer.mtx);
446ca2ad6bdSHans Petter Selasky cancelled = (callout_stop(&dwork->timer.callout) == 1);
447ca2ad6bdSHans Petter Selasky mtx_unlock(&dwork->timer.mtx);
448ca2ad6bdSHans Petter Selasky
449ca2ad6bdSHans Petter Selasky /* check if we should drain */
450ca2ad6bdSHans Petter Selasky if (drain)
451ca2ad6bdSHans Petter Selasky callout_drain(&dwork->timer.callout);
452ca2ad6bdSHans Petter Selasky return (cancelled);
453ca2ad6bdSHans Petter Selasky }
454ca2ad6bdSHans Petter Selasky
455ca2ad6bdSHans Petter Selasky /*
456ca2ad6bdSHans Petter Selasky * This function cancels the given delayed work structure in a
457ca2ad6bdSHans Petter Selasky * non-blocking fashion. It returns non-zero if the work was
458ca2ad6bdSHans Petter Selasky * successfully cancelled. Else the work may still be busy or already
459ca2ad6bdSHans Petter Selasky * cancelled.
460ca2ad6bdSHans Petter Selasky */
461ca2ad6bdSHans Petter Selasky bool
linux_cancel_delayed_work(struct delayed_work * dwork)462ca2ad6bdSHans Petter Selasky linux_cancel_delayed_work(struct delayed_work *dwork)
463ca2ad6bdSHans Petter Selasky {
464ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
465ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
4663a150601SAlexander Motin [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */
4673a150601SAlexander Motin [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */
4683a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
4693a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */
470ca2ad6bdSHans Petter Selasky };
471ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
472b58cf1cbSRyan Stone bool cancelled;
473ca2ad6bdSHans Petter Selasky
474b58cf1cbSRyan Stone mtx_lock(&dwork->timer.mtx);
475ca2ad6bdSHans Petter Selasky switch (linux_update_state(&dwork->work.state, states)) {
476ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
4773a150601SAlexander Motin case WORK_ST_CANCEL:
478b58cf1cbSRyan Stone cancelled = (callout_stop(&dwork->timer.callout) == 1);
479b58cf1cbSRyan Stone if (cancelled) {
4803a150601SAlexander Motin atomic_cmpxchg(&dwork->work.state,
4813a150601SAlexander Motin WORK_ST_CANCEL, WORK_ST_IDLE);
482b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
4834c8ba7d9SHans Petter Selasky return (true);
4843a150601SAlexander Motin }
485ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
486ca2ad6bdSHans Petter Selasky case WORK_ST_TASK:
487ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
4883a150601SAlexander Motin if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
4893a150601SAlexander Motin atomic_cmpxchg(&dwork->work.state,
4903a150601SAlexander Motin WORK_ST_CANCEL, WORK_ST_IDLE);
491b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
4924c8ba7d9SHans Petter Selasky return (true);
4933a150601SAlexander Motin }
494ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
495ca2ad6bdSHans Petter Selasky default:
496b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
4974c8ba7d9SHans Petter Selasky return (false);
498ca2ad6bdSHans Petter Selasky }
499ca2ad6bdSHans Petter Selasky }
500ca2ad6bdSHans Petter Selasky
501ca2ad6bdSHans Petter Selasky /*
502ca2ad6bdSHans Petter Selasky * This function cancels the given work structure in a synchronous
50396cb1d70SKonstantin Belousov * fashion. It returns true if the work was successfully
504ca2ad6bdSHans Petter Selasky * cancelled. Else the work was already cancelled.
505ca2ad6bdSHans Petter Selasky */
50605fe8245SKonstantin Belousov static bool
linux_cancel_delayed_work_sync_int(struct delayed_work * dwork)50705fe8245SKonstantin Belousov linux_cancel_delayed_work_sync_int(struct delayed_work *dwork)
508ca2ad6bdSHans Petter Selasky {
509ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
510ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
5113a150601SAlexander Motin [WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */
5123a150601SAlexander Motin [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
5133a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
5143a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
515ca2ad6bdSHans Petter Selasky };
516ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
517b58cf1cbSRyan Stone int ret, state;
518b58cf1cbSRyan Stone bool cancelled;
519ca2ad6bdSHans Petter Selasky
520ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
521ca2ad6bdSHans Petter Selasky "linux_cancel_delayed_work_sync() might sleep");
522b58cf1cbSRyan Stone mtx_lock(&dwork->timer.mtx);
523b58cf1cbSRyan Stone
524b58cf1cbSRyan Stone state = linux_update_state(&dwork->work.state, states);
525b58cf1cbSRyan Stone switch (state) {
526ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
527b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
52805fe8245SKonstantin Belousov return (false);
529ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
5303a150601SAlexander Motin case WORK_ST_CANCEL:
531b58cf1cbSRyan Stone cancelled = (callout_stop(&dwork->timer.callout) == 1);
532b58cf1cbSRyan Stone
533ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
534b58cf1cbSRyan Stone ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
535b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
536b58cf1cbSRyan Stone
537b58cf1cbSRyan Stone callout_drain(&dwork->timer.callout);
538ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &dwork->work.work_task);
539b58cf1cbSRyan Stone return (cancelled || (ret != 0));
540ca2ad6bdSHans Petter Selasky default:
541ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
542b58cf1cbSRyan Stone ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
543b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
544b58cf1cbSRyan Stone if (ret != 0)
545ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &dwork->work.work_task);
546b58cf1cbSRyan Stone return (ret != 0);
547ca2ad6bdSHans Petter Selasky }
548ca2ad6bdSHans Petter Selasky }
549ca2ad6bdSHans Petter Selasky
55005fe8245SKonstantin Belousov bool
linux_cancel_delayed_work_sync(struct delayed_work * dwork)55105fe8245SKonstantin Belousov linux_cancel_delayed_work_sync(struct delayed_work *dwork)
55205fe8245SKonstantin Belousov {
55305fe8245SKonstantin Belousov bool res;
55405fe8245SKonstantin Belousov
55505fe8245SKonstantin Belousov res = false;
55605fe8245SKonstantin Belousov while (linux_cancel_delayed_work_sync_int(dwork))
55705fe8245SKonstantin Belousov res = true;
55805fe8245SKonstantin Belousov return (res);
55905fe8245SKonstantin Belousov }
56005fe8245SKonstantin Belousov
561ca2ad6bdSHans Petter Selasky /*
562ca2ad6bdSHans Petter Selasky * This function waits until the given work structure is completed.
563ca2ad6bdSHans Petter Selasky * It returns non-zero if the work was successfully
564ca2ad6bdSHans Petter Selasky * waited for. Else the work was not waited for.
565ca2ad6bdSHans Petter Selasky */
566ca2ad6bdSHans Petter Selasky bool
linux_flush_work(struct work_struct * work)567ca2ad6bdSHans Petter Selasky linux_flush_work(struct work_struct *work)
568ca2ad6bdSHans Petter Selasky {
569ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
5704c8ba7d9SHans Petter Selasky bool retval;
571ca2ad6bdSHans Petter Selasky
572ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
573ca2ad6bdSHans Petter Selasky "linux_flush_work() might sleep");
574ca2ad6bdSHans Petter Selasky
575ca2ad6bdSHans Petter Selasky switch (atomic_read(&work->state)) {
576ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
5774c8ba7d9SHans Petter Selasky return (false);
578ca2ad6bdSHans Petter Selasky default:
579ca2ad6bdSHans Petter Selasky tq = work->work_queue->taskqueue;
580b44247b1SHans Petter Selasky retval = taskqueue_poll_is_busy(tq, &work->work_task);
581ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &work->work_task);
582b44247b1SHans Petter Selasky return (retval);
583ca2ad6bdSHans Petter Selasky }
584ca2ad6bdSHans Petter Selasky }
585ca2ad6bdSHans Petter Selasky
586ca2ad6bdSHans Petter Selasky /*
587ca2ad6bdSHans Petter Selasky * This function waits until the given delayed work structure is
588ca2ad6bdSHans Petter Selasky * completed. It returns non-zero if the work was successfully waited
589ca2ad6bdSHans Petter Selasky * for. Else the work was not waited for.
590ca2ad6bdSHans Petter Selasky */
591ca2ad6bdSHans Petter Selasky bool
linux_flush_delayed_work(struct delayed_work * dwork)592ca2ad6bdSHans Petter Selasky linux_flush_delayed_work(struct delayed_work *dwork)
593ca2ad6bdSHans Petter Selasky {
594ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
5954c8ba7d9SHans Petter Selasky bool retval;
596ca2ad6bdSHans Petter Selasky
597ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
598ca2ad6bdSHans Petter Selasky "linux_flush_delayed_work() might sleep");
599ca2ad6bdSHans Petter Selasky
600ca2ad6bdSHans Petter Selasky switch (atomic_read(&dwork->work.state)) {
601ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
6024c8ba7d9SHans Petter Selasky return (false);
603ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
604ca2ad6bdSHans Petter Selasky if (linux_cancel_timer(dwork, 1))
605ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(dwork);
606ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
607ca2ad6bdSHans Petter Selasky default:
608ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
609b44247b1SHans Petter Selasky retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task);
610ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &dwork->work.work_task);
611b44247b1SHans Petter Selasky return (retval);
612ca2ad6bdSHans Petter Selasky }
613ca2ad6bdSHans Petter Selasky }
614ca2ad6bdSHans Petter Selasky
615ca2ad6bdSHans Petter Selasky /*
616ca2ad6bdSHans Petter Selasky * This function returns true if the given work is pending, and not
617ca2ad6bdSHans Petter Selasky * yet executing:
618ca2ad6bdSHans Petter Selasky */
619ca2ad6bdSHans Petter Selasky bool
linux_work_pending(struct work_struct * work)620ca2ad6bdSHans Petter Selasky linux_work_pending(struct work_struct *work)
621ca2ad6bdSHans Petter Selasky {
622ca2ad6bdSHans Petter Selasky switch (atomic_read(&work->state)) {
623ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
624ca2ad6bdSHans Petter Selasky case WORK_ST_TASK:
6253a150601SAlexander Motin case WORK_ST_CANCEL:
6264c8ba7d9SHans Petter Selasky return (true);
627ca2ad6bdSHans Petter Selasky default:
6284c8ba7d9SHans Petter Selasky return (false);
629ca2ad6bdSHans Petter Selasky }
630ca2ad6bdSHans Petter Selasky }
631ca2ad6bdSHans Petter Selasky
632ca2ad6bdSHans Petter Selasky /*
633ca2ad6bdSHans Petter Selasky * This function returns true if the given work is busy.
634ca2ad6bdSHans Petter Selasky */
635ca2ad6bdSHans Petter Selasky bool
linux_work_busy(struct work_struct * work)636ca2ad6bdSHans Petter Selasky linux_work_busy(struct work_struct *work)
637ca2ad6bdSHans Petter Selasky {
638ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
639ca2ad6bdSHans Petter Selasky
640ca2ad6bdSHans Petter Selasky switch (atomic_read(&work->state)) {
641ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
6424c8ba7d9SHans Petter Selasky return (false);
643ca2ad6bdSHans Petter Selasky case WORK_ST_EXEC:
644ca2ad6bdSHans Petter Selasky tq = work->work_queue->taskqueue;
645ca2ad6bdSHans Petter Selasky return (taskqueue_poll_is_busy(tq, &work->work_task));
646ca2ad6bdSHans Petter Selasky default:
6474c8ba7d9SHans Petter Selasky return (true);
648ca2ad6bdSHans Petter Selasky }
649ca2ad6bdSHans Petter Selasky }
650ca2ad6bdSHans Petter Selasky
651ca2ad6bdSHans Petter Selasky struct workqueue_struct *
linux_create_workqueue_common(const char * name,int cpus)652ca2ad6bdSHans Petter Selasky linux_create_workqueue_common(const char *name, int cpus)
653ca2ad6bdSHans Petter Selasky {
654ca2ad6bdSHans Petter Selasky struct workqueue_struct *wq;
655ca2ad6bdSHans Petter Selasky
6567a742c41SHans Petter Selasky /*
6577a742c41SHans Petter Selasky * If zero CPUs are specified use the default number of CPUs:
6587a742c41SHans Petter Selasky */
6597a742c41SHans Petter Selasky if (cpus == 0)
6607a742c41SHans Petter Selasky cpus = linux_default_wq_cpus;
6617a742c41SHans Petter Selasky
662ca2ad6bdSHans Petter Selasky wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO);
663ca2ad6bdSHans Petter Selasky wq->taskqueue = taskqueue_create(name, M_WAITOK,
664ca2ad6bdSHans Petter Selasky taskqueue_thread_enqueue, &wq->taskqueue);
665ca2ad6bdSHans Petter Selasky atomic_set(&wq->draining, 0);
666ca2ad6bdSHans Petter Selasky taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
667ca2ad6bdSHans Petter Selasky TAILQ_INIT(&wq->exec_head);
668ca2ad6bdSHans Petter Selasky mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF);
669ca2ad6bdSHans Petter Selasky
670ca2ad6bdSHans Petter Selasky return (wq);
671ca2ad6bdSHans Petter Selasky }
672ca2ad6bdSHans Petter Selasky
673ca2ad6bdSHans Petter Selasky void
linux_destroy_workqueue(struct workqueue_struct * wq)674ca2ad6bdSHans Petter Selasky linux_destroy_workqueue(struct workqueue_struct *wq)
675ca2ad6bdSHans Petter Selasky {
676ca2ad6bdSHans Petter Selasky atomic_inc(&wq->draining);
677ca2ad6bdSHans Petter Selasky drain_workqueue(wq);
678ca2ad6bdSHans Petter Selasky taskqueue_free(wq->taskqueue);
679ca2ad6bdSHans Petter Selasky mtx_destroy(&wq->exec_mtx);
680ca2ad6bdSHans Petter Selasky kfree(wq);
681ca2ad6bdSHans Petter Selasky }
682ca2ad6bdSHans Petter Selasky
683ca2ad6bdSHans Petter Selasky void
linux_init_delayed_work(struct delayed_work * dwork,work_func_t func)684ca2ad6bdSHans Petter Selasky linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
685ca2ad6bdSHans Petter Selasky {
686ca2ad6bdSHans Petter Selasky memset(dwork, 0, sizeof(*dwork));
68787a567f1SHans Petter Selasky dwork->work.func = func;
68887a567f1SHans Petter Selasky TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork);
689ca2ad6bdSHans Petter Selasky mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL,
690ca2ad6bdSHans Petter Selasky MTX_DEF | MTX_NOWITNESS);
691ca2ad6bdSHans Petter Selasky callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0);
692ca2ad6bdSHans Petter Selasky }
693ca2ad6bdSHans Petter Selasky
694549dcdb3SHans Petter Selasky struct work_struct *
linux_current_work(void)695549dcdb3SHans Petter Selasky linux_current_work(void)
696549dcdb3SHans Petter Selasky {
697549dcdb3SHans Petter Selasky return (current->work);
698549dcdb3SHans Petter Selasky }
699549dcdb3SHans Petter Selasky
700ca2ad6bdSHans Petter Selasky static void
linux_work_init(void * arg)701ca2ad6bdSHans Petter Selasky linux_work_init(void *arg)
702ca2ad6bdSHans Petter Selasky {
703ca2ad6bdSHans Petter Selasky int max_wq_cpus = mp_ncpus + 1;
704ca2ad6bdSHans Petter Selasky
705ca2ad6bdSHans Petter Selasky /* avoid deadlock when there are too few threads */
706ca2ad6bdSHans Petter Selasky if (max_wq_cpus < 4)
707ca2ad6bdSHans Petter Selasky max_wq_cpus = 4;
708ca2ad6bdSHans Petter Selasky
7097a742c41SHans Petter Selasky /* set default number of CPUs */
7107a742c41SHans Petter Selasky linux_default_wq_cpus = max_wq_cpus;
7117a742c41SHans Petter Selasky
712ca2ad6bdSHans Petter Selasky linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus);
713ca2ad6bdSHans Petter Selasky linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus);
714ca2ad6bdSHans Petter Selasky
715ca2ad6bdSHans Petter Selasky /* populate the workqueue pointers */
716ca2ad6bdSHans Petter Selasky system_long_wq = linux_system_long_wq;
717ca2ad6bdSHans Petter Selasky system_wq = linux_system_short_wq;
718ca2ad6bdSHans Petter Selasky system_power_efficient_wq = linux_system_short_wq;
719ca2ad6bdSHans Petter Selasky system_unbound_wq = linux_system_short_wq;
7207a13eebaSHans Petter Selasky system_highpri_wq = linux_system_short_wq;
721ca2ad6bdSHans Petter Selasky }
7229657edd7SConrad Meyer SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL);
723ca2ad6bdSHans Petter Selasky
724ca2ad6bdSHans Petter Selasky static void
linux_work_uninit(void * arg)725ca2ad6bdSHans Petter Selasky linux_work_uninit(void *arg)
726ca2ad6bdSHans Petter Selasky {
727ca2ad6bdSHans Petter Selasky destroy_workqueue(linux_system_short_wq);
728ca2ad6bdSHans Petter Selasky destroy_workqueue(linux_system_long_wq);
729ca2ad6bdSHans Petter Selasky
730ca2ad6bdSHans Petter Selasky /* clear workqueue pointers */
731ca2ad6bdSHans Petter Selasky system_long_wq = NULL;
732ca2ad6bdSHans Petter Selasky system_wq = NULL;
733ca2ad6bdSHans Petter Selasky system_power_efficient_wq = NULL;
734ca2ad6bdSHans Petter Selasky system_unbound_wq = NULL;
7357a13eebaSHans Petter Selasky system_highpri_wq = NULL;
736ca2ad6bdSHans Petter Selasky }
7379657edd7SConrad Meyer SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL);
738ec25b6faSVladimir Kondratyev
739ec25b6faSVladimir Kondratyev void
linux_irq_work_fn(void * context,int pending)740ec25b6faSVladimir Kondratyev linux_irq_work_fn(void *context, int pending)
741ec25b6faSVladimir Kondratyev {
742ec25b6faSVladimir Kondratyev struct irq_work *irqw = context;
743ec25b6faSVladimir Kondratyev
744ec25b6faSVladimir Kondratyev irqw->func(irqw);
745ec25b6faSVladimir Kondratyev }
746ec25b6faSVladimir Kondratyev
747ec25b6faSVladimir Kondratyev static void
linux_irq_work_init_fn(void * context,int pending)748ec25b6faSVladimir Kondratyev linux_irq_work_init_fn(void *context, int pending)
749ec25b6faSVladimir Kondratyev {
750ec25b6faSVladimir Kondratyev /*
751ec25b6faSVladimir Kondratyev * LinuxKPI performs lazy allocation of memory structures required by
752ec25b6faSVladimir Kondratyev * current on the first access to it. As some irq_work clients read
753ec25b6faSVladimir Kondratyev * it with spinlock taken, we have to preallocate td_lkpi_task before
754ec25b6faSVladimir Kondratyev * first call to irq_work_queue(). As irq_work uses a single thread,
755ec25b6faSVladimir Kondratyev * it is enough to read current once at SYSINIT stage.
756ec25b6faSVladimir Kondratyev */
757ec25b6faSVladimir Kondratyev if (current == NULL)
758ec25b6faSVladimir Kondratyev panic("irq_work taskqueue is not initialized");
759ec25b6faSVladimir Kondratyev }
760ec25b6faSVladimir Kondratyev static struct task linux_irq_work_init_task =
761ec25b6faSVladimir Kondratyev TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task);
762ec25b6faSVladimir Kondratyev
763ec25b6faSVladimir Kondratyev static void
linux_irq_work_init(void * arg)764ec25b6faSVladimir Kondratyev linux_irq_work_init(void *arg)
765ec25b6faSVladimir Kondratyev {
766ec25b6faSVladimir Kondratyev linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq",
767ec25b6faSVladimir Kondratyev M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq);
768ec25b6faSVladimir Kondratyev taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT,
769ec25b6faSVladimir Kondratyev "linuxkpi_irq_wq");
770ec25b6faSVladimir Kondratyev taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task);
771ec25b6faSVladimir Kondratyev }
772ec25b6faSVladimir Kondratyev SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND,
773ec25b6faSVladimir Kondratyev linux_irq_work_init, NULL);
774ec25b6faSVladimir Kondratyev
775ec25b6faSVladimir Kondratyev static void
linux_irq_work_uninit(void * arg)776ec25b6faSVladimir Kondratyev linux_irq_work_uninit(void *arg)
777ec25b6faSVladimir Kondratyev {
778ec25b6faSVladimir Kondratyev taskqueue_drain_all(linux_irq_work_tq);
779ec25b6faSVladimir Kondratyev taskqueue_free(linux_irq_work_tq);
780ec25b6faSVladimir Kondratyev }
781ec25b6faSVladimir Kondratyev SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND,
782ec25b6faSVladimir Kondratyev linux_irq_work_uninit, NULL);
783