xref: /netbsd-src/sys/external/bsd/common/linux/linux_work.c (revision 04caf091723b866bc52f2b4f123a999c552bc7a2)
1*04caf091Sriastradh /*	$NetBSD: linux_work.c,v 1.61 2022/04/09 23:43:31 riastradh Exp $	*/
262976e36Sskrll 
362976e36Sskrll /*-
49ee469e1Sriastradh  * Copyright (c) 2018 The NetBSD Foundation, Inc.
562976e36Sskrll  * All rights reserved.
662976e36Sskrll  *
762976e36Sskrll  * This code is derived from software contributed to The NetBSD Foundation
862976e36Sskrll  * by Taylor R. Campbell.
962976e36Sskrll  *
1062976e36Sskrll  * Redistribution and use in source and binary forms, with or without
1162976e36Sskrll  * modification, are permitted provided that the following conditions
1262976e36Sskrll  * are met:
1362976e36Sskrll  * 1. Redistributions of source code must retain the above copyright
1462976e36Sskrll  *    notice, this list of conditions and the following disclaimer.
1562976e36Sskrll  * 2. Redistributions in binary form must reproduce the above copyright
1662976e36Sskrll  *    notice, this list of conditions and the following disclaimer in the
1762976e36Sskrll  *    documentation and/or other materials provided with the distribution.
1862976e36Sskrll  *
1962976e36Sskrll  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2062976e36Sskrll  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2162976e36Sskrll  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2262976e36Sskrll  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2362976e36Sskrll  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2462976e36Sskrll  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2562976e36Sskrll  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2662976e36Sskrll  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2762976e36Sskrll  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2862976e36Sskrll  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2962976e36Sskrll  * POSSIBILITY OF SUCH DAMAGE.
3062976e36Sskrll  */
3162976e36Sskrll 
3262976e36Sskrll #include <sys/cdefs.h>
33*04caf091Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.61 2022/04/09 23:43:31 riastradh Exp $");
3462976e36Sskrll 
3562976e36Sskrll #include <sys/types.h>
3662976e36Sskrll #include <sys/atomic.h>
3762976e36Sskrll #include <sys/callout.h>
3862976e36Sskrll #include <sys/condvar.h>
3962976e36Sskrll #include <sys/errno.h>
4062976e36Sskrll #include <sys/kmem.h>
419ee469e1Sriastradh #include <sys/kthread.h>
429ee469e1Sriastradh #include <sys/lwp.h>
4362976e36Sskrll #include <sys/mutex.h>
44282b3410Sryo #ifndef _MODULE
45282b3410Sryo #include <sys/once.h>
46282b3410Sryo #endif
4762976e36Sskrll #include <sys/queue.h>
48ccfee5f7Sriastradh #include <sys/sdt.h>
4962976e36Sskrll 
5062976e36Sskrll #include <linux/workqueue.h>
5162976e36Sskrll 
52e27b3435Sriastradh TAILQ_HEAD(work_head, work_struct);
53e27b3435Sriastradh TAILQ_HEAD(dwork_head, delayed_work);
54e27b3435Sriastradh 
5562976e36Sskrll struct workqueue_struct {
5662976e36Sskrll 	kmutex_t		wq_lock;
5762976e36Sskrll 	kcondvar_t		wq_cv;
58e27b3435Sriastradh 	struct dwork_head	wq_delayed; /* delayed work scheduled */
59888678acSriastradh 	struct work_head	wq_rcu;	    /* RCU work scheduled */
60e27b3435Sriastradh 	struct work_head	wq_queue;   /* work to run */
61e27b3435Sriastradh 	struct work_head	wq_dqueue;  /* delayed work to run now */
6262976e36Sskrll 	struct work_struct	*wq_current_work;
639ee469e1Sriastradh 	int			wq_flags;
64168b5894Sriastradh 	bool			wq_dying;
659ee469e1Sriastradh 	uint64_t		wq_gen;
66168b5894Sriastradh 	struct lwp		*wq_lwp;
67504d2eb4Sriastradh 	const char		*wq_name;
6862976e36Sskrll };
6962976e36Sskrll 
709ee469e1Sriastradh static void __dead	linux_workqueue_thread(void *);
719ee469e1Sriastradh static void		linux_workqueue_timeout(void *);
72e27b3435Sriastradh static bool		work_claimed(struct work_struct *,
73e27b3435Sriastradh 			    struct workqueue_struct *);
747d2f7a07Sriastradh static struct workqueue_struct *
75e27b3435Sriastradh 			work_queue(struct work_struct *);
76e27b3435Sriastradh static bool		acquire_work(struct work_struct *,
777d2f7a07Sriastradh 			    struct workqueue_struct *);
787d2f7a07Sriastradh static void		release_work(struct work_struct *,
797d2f7a07Sriastradh 			    struct workqueue_struct *);
8050219d6aSriastradh static void		wait_for_current_work(struct work_struct *,
8150219d6aSriastradh 			    struct workqueue_struct *);
82450ff8bbSriastradh static void		dw_callout_init(struct workqueue_struct *,
83450ff8bbSriastradh 			    struct delayed_work *);
84d2984e1fSriastradh static void		dw_callout_destroy(struct workqueue_struct *,
85d2984e1fSriastradh 			    struct delayed_work *);
86535de6e4Sriastradh static void		cancel_delayed_work_done(struct workqueue_struct *,
87535de6e4Sriastradh 			    struct delayed_work *);
8862976e36Sskrll 
89ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, acquire,
90ccfee5f7Sriastradh     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
91ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, release,
92ccfee5f7Sriastradh     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
93ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, queue,
94ccfee5f7Sriastradh     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
95888678acSriastradh SDT_PROBE_DEFINE2(sdt, linux, work, rcu,
96888678acSriastradh     "struct rcu_work *"/*work*/, "struct workqueue_struct *"/*wq*/);
97ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, cancel,
98ccfee5f7Sriastradh     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
99ccfee5f7Sriastradh SDT_PROBE_DEFINE3(sdt, linux, work, schedule,
100ccfee5f7Sriastradh     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/,
101ccfee5f7Sriastradh     "unsigned long"/*ticks*/);
102ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, timer,
103ccfee5f7Sriastradh     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
104ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, wait__start,
105ccfee5f7Sriastradh     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
106ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, wait__done,
107ccfee5f7Sriastradh     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
108ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, run,
109ccfee5f7Sriastradh     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
110ccfee5f7Sriastradh SDT_PROBE_DEFINE2(sdt, linux, work, done,
111ccfee5f7Sriastradh     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
112ccfee5f7Sriastradh SDT_PROBE_DEFINE1(sdt, linux, work, batch__start,
113ccfee5f7Sriastradh     "struct workqueue_struct *"/*wq*/);
114ccfee5f7Sriastradh SDT_PROBE_DEFINE1(sdt, linux, work, batch__done,
115ccfee5f7Sriastradh     "struct workqueue_struct *"/*wq*/);
11642de2e1bSriastradh SDT_PROBE_DEFINE1(sdt, linux, work, flush__self,
11742de2e1bSriastradh     "struct workqueue_struct *"/*wq*/);
118ccfee5f7Sriastradh SDT_PROBE_DEFINE1(sdt, linux, work, flush__start,
119ccfee5f7Sriastradh     "struct workqueue_struct *"/*wq*/);
120ccfee5f7Sriastradh SDT_PROBE_DEFINE1(sdt, linux, work, flush__done,
121ccfee5f7Sriastradh     "struct workqueue_struct *"/*wq*/);
122ccfee5f7Sriastradh 
1239ee469e1Sriastradh static specificdata_key_t workqueue_key __read_mostly;
12462976e36Sskrll 
125dfa0e026Sriastradh struct workqueue_struct	*system_highpri_wq __read_mostly;
1269ee469e1Sriastradh struct workqueue_struct	*system_long_wq __read_mostly;
1279ee469e1Sriastradh struct workqueue_struct	*system_power_efficient_wq __read_mostly;
1286fe9f2d2Sriastradh struct workqueue_struct	*system_unbound_wq __read_mostly;
129dfa0e026Sriastradh struct workqueue_struct	*system_wq __read_mostly;
13035ff64e8Sriastradh 
131e27b3435Sriastradh static inline uintptr_t
atomic_cas_uintptr(volatile uintptr_t * p,uintptr_t old,uintptr_t new)132e27b3435Sriastradh atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t old, uintptr_t new)
133e27b3435Sriastradh {
134e27b3435Sriastradh 
135e27b3435Sriastradh 	return (uintptr_t)atomic_cas_ptr(p, (void *)old, (void *)new);
136e27b3435Sriastradh }
137e27b3435Sriastradh 
13831bca411Sriastradh /*
13931bca411Sriastradh  * linux_workqueue_init()
14031bca411Sriastradh  *
14131bca411Sriastradh  *	Initialize the Linux workqueue subsystem.  Return 0 on success,
14231bca411Sriastradh  *	NetBSD error on failure.
14331bca411Sriastradh  */
144282b3410Sryo static int
linux_workqueue_init0(void)145282b3410Sryo linux_workqueue_init0(void)
14662976e36Sskrll {
1479ee469e1Sriastradh 	int error;
14835ff64e8Sriastradh 
1499ee469e1Sriastradh 	error = lwp_specific_key_create(&workqueue_key, NULL);
1509ee469e1Sriastradh 	if (error)
151dfa0e026Sriastradh 		goto out;
152ffba132aSriastradh 
153dfa0e026Sriastradh 	system_highpri_wq = alloc_ordered_workqueue("lnxhipwq", 0);
154dfa0e026Sriastradh 	if (system_highpri_wq == NULL) {
1559ee469e1Sriastradh 		error = ENOMEM;
156dfa0e026Sriastradh 		goto out;
1579ee469e1Sriastradh 	}
1589ee469e1Sriastradh 
1599ee469e1Sriastradh 	system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
1609ee469e1Sriastradh 	if (system_long_wq == NULL) {
1619ee469e1Sriastradh 		error = ENOMEM;
162dfa0e026Sriastradh 		goto out;
1639ee469e1Sriastradh 	}
16462976e36Sskrll 
165876a52a2Sriastradh 	system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
166f9418100Sriastradh 	if (system_power_efficient_wq == NULL) {
1679ee469e1Sriastradh 		error = ENOMEM;
168dfa0e026Sriastradh 		goto out;
1699ee469e1Sriastradh 	}
170876a52a2Sriastradh 
1716fe9f2d2Sriastradh 	system_unbound_wq = alloc_ordered_workqueue("lnxubdwq", 0);
1726fe9f2d2Sriastradh 	if (system_unbound_wq == NULL) {
1736fe9f2d2Sriastradh 		error = ENOMEM;
174dfa0e026Sriastradh 		goto out;
1756fe9f2d2Sriastradh 	}
1766fe9f2d2Sriastradh 
177dfa0e026Sriastradh 	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
178dfa0e026Sriastradh 	if (system_wq == NULL) {
179dfa0e026Sriastradh 		error = ENOMEM;
180dfa0e026Sriastradh 		goto out;
181dfa0e026Sriastradh 	}
182ffba132aSriastradh 
183dfa0e026Sriastradh 	/* Success!  */
184dfa0e026Sriastradh 	error = 0;
185dfa0e026Sriastradh 
186dfa0e026Sriastradh out:	if (error) {
187dfa0e026Sriastradh 		if (system_highpri_wq)
188dfa0e026Sriastradh 			destroy_workqueue(system_highpri_wq);
189dfa0e026Sriastradh 		if (system_long_wq)
190dfa0e026Sriastradh 			destroy_workqueue(system_long_wq);
191dfa0e026Sriastradh 		if (system_power_efficient_wq)
192dfa0e026Sriastradh 			destroy_workqueue(system_power_efficient_wq);
193dfa0e026Sriastradh 		if (system_unbound_wq)
1946fe9f2d2Sriastradh 			destroy_workqueue(system_unbound_wq);
195dfa0e026Sriastradh 		if (system_wq)
196dfa0e026Sriastradh 			destroy_workqueue(system_wq);
197dfa0e026Sriastradh 		if (workqueue_key)
198dfa0e026Sriastradh 			lwp_specific_key_delete(workqueue_key);
199dfa0e026Sriastradh 	}
200dfa0e026Sriastradh 
2019ee469e1Sriastradh 	return error;
20262976e36Sskrll }
20362976e36Sskrll 
20431bca411Sriastradh /*
20531bca411Sriastradh  * linux_workqueue_fini()
20631bca411Sriastradh  *
20731bca411Sriastradh  *	Destroy the Linux workqueue subsystem.  Never fails.
20831bca411Sriastradh  */
209282b3410Sryo static void
linux_workqueue_fini0(void)210282b3410Sryo linux_workqueue_fini0(void)
21162976e36Sskrll {
212ffba132aSriastradh 
2139ee469e1Sriastradh 	destroy_workqueue(system_power_efficient_wq);
214ffba132aSriastradh 	destroy_workqueue(system_long_wq);
21562976e36Sskrll 	destroy_workqueue(system_wq);
2169ee469e1Sriastradh 	lwp_specific_key_delete(workqueue_key);
21762976e36Sskrll }
218282b3410Sryo 
219282b3410Sryo #ifndef _MODULE
220282b3410Sryo static ONCE_DECL(linux_workqueue_init_once);
221282b3410Sryo #endif
222282b3410Sryo 
223282b3410Sryo int
linux_workqueue_init(void)224282b3410Sryo linux_workqueue_init(void)
225282b3410Sryo {
226282b3410Sryo #ifdef _MODULE
227282b3410Sryo 	return linux_workqueue_init0();
228282b3410Sryo #else
229282b3410Sryo 	return INIT_ONCE(&linux_workqueue_init_once, &linux_workqueue_init0);
230282b3410Sryo #endif
231282b3410Sryo }
232282b3410Sryo 
233282b3410Sryo void
linux_workqueue_fini(void)234282b3410Sryo linux_workqueue_fini(void)
235282b3410Sryo {
236282b3410Sryo #ifdef _MODULE
237282b3410Sryo 	return linux_workqueue_fini0();
238282b3410Sryo #else
239282b3410Sryo 	return FINI_ONCE(&linux_workqueue_init_once, &linux_workqueue_fini0);
240282b3410Sryo #endif
241282b3410Sryo }
24262976e36Sskrll 
24362976e36Sskrll /*
24462976e36Sskrll  * Workqueues
24562976e36Sskrll  */
24662976e36Sskrll 
24731bca411Sriastradh /*
248181b0344Sriastradh  * alloc_workqueue(name, flags, max_active)
24931bca411Sriastradh  *
250181b0344Sriastradh  *	Create a workqueue of the given name.  max_active is the
251181b0344Sriastradh  *	maximum number of work items in flight, or 0 for the default.
252181b0344Sriastradh  *	Return NULL on failure, pointer to struct workqueue_struct
253181b0344Sriastradh  *	object on success.
25431bca411Sriastradh  */
25562976e36Sskrll struct workqueue_struct *
alloc_workqueue(const char * name,int flags,unsigned max_active)256181b0344Sriastradh alloc_workqueue(const char *name, int flags, unsigned max_active)
25762976e36Sskrll {
25862976e36Sskrll 	struct workqueue_struct *wq;
25962976e36Sskrll 	int error;
26062976e36Sskrll 
261181b0344Sriastradh 	KASSERT(max_active == 0 || max_active == 1);
26262976e36Sskrll 
2637a09cf8aSriastradh 	wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
26462976e36Sskrll 
26595fc21bfSriastradh 	mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
26662976e36Sskrll 	cv_init(&wq->wq_cv, name);
26762976e36Sskrll 	TAILQ_INIT(&wq->wq_delayed);
268888678acSriastradh 	TAILQ_INIT(&wq->wq_rcu);
2699ee469e1Sriastradh 	TAILQ_INIT(&wq->wq_queue);
270e27b3435Sriastradh 	TAILQ_INIT(&wq->wq_dqueue);
27162976e36Sskrll 	wq->wq_current_work = NULL;
2727a09cf8aSriastradh 	wq->wq_flags = 0;
2737a09cf8aSriastradh 	wq->wq_dying = false;
274168b5894Sriastradh 	wq->wq_gen = 0;
275168b5894Sriastradh 	wq->wq_lwp = NULL;
276504d2eb4Sriastradh 	wq->wq_name = name;
27762976e36Sskrll 
2789ee469e1Sriastradh 	error = kthread_create(PRI_NONE,
2799ee469e1Sriastradh 	    KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
2809ee469e1Sriastradh 	    &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
2819ee469e1Sriastradh 	if (error)
2829ee469e1Sriastradh 		goto fail0;
28335ff64e8Sriastradh 
28462976e36Sskrll 	return wq;
2859ee469e1Sriastradh 
286e27b3435Sriastradh fail0:	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
287e27b3435Sriastradh 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
288888678acSriastradh 	KASSERT(TAILQ_EMPTY(&wq->wq_rcu));
2899ee469e1Sriastradh 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
2909ee469e1Sriastradh 	cv_destroy(&wq->wq_cv);
2919ee469e1Sriastradh 	mutex_destroy(&wq->wq_lock);
2929ee469e1Sriastradh 	kmem_free(wq, sizeof(*wq));
2939ee469e1Sriastradh 	return NULL;
29462976e36Sskrll }
29562976e36Sskrll 
29631bca411Sriastradh /*
297181b0344Sriastradh  * alloc_ordered_workqueue(name, flags)
298181b0344Sriastradh  *
299181b0344Sriastradh  *	Same as alloc_workqueue(name, flags, 1).
300181b0344Sriastradh  */
301181b0344Sriastradh struct workqueue_struct *
alloc_ordered_workqueue(const char * name,int flags)302181b0344Sriastradh alloc_ordered_workqueue(const char *name, int flags)
303181b0344Sriastradh {
304181b0344Sriastradh 
305181b0344Sriastradh 	return alloc_workqueue(name, flags, 1);
306181b0344Sriastradh }
307181b0344Sriastradh 
308181b0344Sriastradh /*
30931bca411Sriastradh  * destroy_workqueue(wq)
31031bca411Sriastradh  *
31131bca411Sriastradh  *	Destroy a workqueue created with wq.  Cancel any pending
31231bca411Sriastradh  *	delayed work.  Wait for all queued work to complete.
31331bca411Sriastradh  *
31431bca411Sriastradh  *	May sleep.
31531bca411Sriastradh  */
31662976e36Sskrll void
destroy_workqueue(struct workqueue_struct * wq)31762976e36Sskrll destroy_workqueue(struct workqueue_struct *wq)
31862976e36Sskrll {
31962976e36Sskrll 
32062976e36Sskrll 	/*
3219ee469e1Sriastradh 	 * Cancel all delayed work.  We do this first because any
3229ee469e1Sriastradh 	 * delayed work that that has already timed out, which we can't
3239ee469e1Sriastradh 	 * cancel, may have queued new work.
32462976e36Sskrll 	 */
32562976e36Sskrll 	mutex_enter(&wq->wq_lock);
3265f1b90f3Sriastradh 	while (!TAILQ_EMPTY(&wq->wq_delayed)) {
3275f1b90f3Sriastradh 		struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
3285f1b90f3Sriastradh 
329e27b3435Sriastradh 		KASSERT(work_queue(&dw->work) == wq);
3305f1b90f3Sriastradh 		KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
3315f1b90f3Sriastradh 			dw->dw_state == DELAYED_WORK_RESCHEDULED ||
3325f1b90f3Sriastradh 			dw->dw_state == DELAYED_WORK_CANCELLED),
3335f1b90f3Sriastradh 		    "delayed work %p in bad state: %d",
3345f1b90f3Sriastradh 		    dw, dw->dw_state);
3355f1b90f3Sriastradh 
3365f1b90f3Sriastradh 		/*
3375f1b90f3Sriastradh 		 * Mark it cancelled and try to stop the callout before
3385f1b90f3Sriastradh 		 * it starts.
3395f1b90f3Sriastradh 		 *
3405f1b90f3Sriastradh 		 * If it's too late and the callout has already begun
3415f1b90f3Sriastradh 		 * to execute, then it will notice that we asked to
3425f1b90f3Sriastradh 		 * cancel it and remove itself from the queue before
3435f1b90f3Sriastradh 		 * returning.
3445f1b90f3Sriastradh 		 *
3455f1b90f3Sriastradh 		 * If we stopped the callout before it started,
3465f1b90f3Sriastradh 		 * however, then we can safely destroy the callout and
3475f1b90f3Sriastradh 		 * dissociate it from the workqueue ourselves.
3485f1b90f3Sriastradh 		 */
349ccfee5f7Sriastradh 		SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
3505f1b90f3Sriastradh 		dw->dw_state = DELAYED_WORK_CANCELLED;
3519ee469e1Sriastradh 		if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
3525f1b90f3Sriastradh 			cancel_delayed_work_done(wq, dw);
35362976e36Sskrll 	}
35462976e36Sskrll 	mutex_exit(&wq->wq_lock);
35562976e36Sskrll 
356888678acSriastradh 	/* Wait for all scheduled RCU work to complete.  */
357888678acSriastradh 	mutex_enter(&wq->wq_lock);
358888678acSriastradh 	while (!TAILQ_EMPTY(&wq->wq_rcu))
359888678acSriastradh 		cv_wait(&wq->wq_cv, &wq->wq_lock);
360888678acSriastradh 	mutex_exit(&wq->wq_lock);
361888678acSriastradh 
3625f1b90f3Sriastradh 	/*
3635f1b90f3Sriastradh 	 * At this point, no new work can be put on the queue.
3645f1b90f3Sriastradh 	 */
36562976e36Sskrll 
3669ee469e1Sriastradh 	/* Tell the thread to exit.  */
3679ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
3689ee469e1Sriastradh 	wq->wq_dying = true;
3699ee469e1Sriastradh 	cv_broadcast(&wq->wq_cv);
3709ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
37162976e36Sskrll 
3729ee469e1Sriastradh 	/* Wait for it to exit.  */
3739ee469e1Sriastradh 	(void)kthread_join(wq->wq_lwp);
3749ee469e1Sriastradh 
3757a09cf8aSriastradh 	KASSERT(wq->wq_dying);
3767a09cf8aSriastradh 	KASSERT(wq->wq_flags == 0);
3779ee469e1Sriastradh 	KASSERT(wq->wq_current_work == NULL);
378e27b3435Sriastradh 	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
3799ee469e1Sriastradh 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
380888678acSriastradh 	KASSERT(TAILQ_EMPTY(&wq->wq_rcu));
3819ee469e1Sriastradh 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
38262976e36Sskrll 	cv_destroy(&wq->wq_cv);
38362976e36Sskrll 	mutex_destroy(&wq->wq_lock);
38462976e36Sskrll 
38562976e36Sskrll 	kmem_free(wq, sizeof(*wq));
38662976e36Sskrll }
38762976e36Sskrll 
38862976e36Sskrll /*
3899ee469e1Sriastradh  * Work thread and callout
39062976e36Sskrll  */
39162976e36Sskrll 
39231bca411Sriastradh /*
39331bca411Sriastradh  * linux_workqueue_thread(cookie)
39431bca411Sriastradh  *
39531bca411Sriastradh  *	Main function for a workqueue's worker thread.  Waits until
39631bca411Sriastradh  *	there is work queued, grabs a batch of work off the queue,
39731bca411Sriastradh  *	executes it all, bumps the generation number, and repeats,
39831bca411Sriastradh  *	until dying.
39931bca411Sriastradh  */
4009ee469e1Sriastradh static void __dead
linux_workqueue_thread(void * cookie)4019ee469e1Sriastradh linux_workqueue_thread(void *cookie)
40262976e36Sskrll {
4039ee469e1Sriastradh 	struct workqueue_struct *const wq = cookie;
404285f2419Sriastradh 	struct work_head *const q[2] = { &wq->wq_queue, &wq->wq_dqueue };
405285f2419Sriastradh 	struct work_struct marker, *work;
406e27b3435Sriastradh 	unsigned i;
4079ee469e1Sriastradh 
4089ee469e1Sriastradh 	lwp_setspecific(workqueue_key, wq);
4099ee469e1Sriastradh 
4109ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
4119ee469e1Sriastradh 	for (;;) {
4125f1b90f3Sriastradh 		/*
4135f1b90f3Sriastradh 		 * Wait until there's activity.  If there's no work and
4145f1b90f3Sriastradh 		 * we're dying, stop here.
4155f1b90f3Sriastradh 		 */
41670435d93Sriastradh 		if (TAILQ_EMPTY(&wq->wq_queue) &&
41770435d93Sriastradh 		    TAILQ_EMPTY(&wq->wq_dqueue)) {
41870435d93Sriastradh 			if (wq->wq_dying)
4199ee469e1Sriastradh 				break;
42070435d93Sriastradh 			cv_wait(&wq->wq_cv, &wq->wq_lock);
42170435d93Sriastradh 			continue;
4225f1b90f3Sriastradh 		}
4239ee469e1Sriastradh 
424285f2419Sriastradh 		/*
425285f2419Sriastradh 		 * Start a batch of work.  Use a marker to delimit when
426285f2419Sriastradh 		 * the batch ends so we can advance the generation
427285f2419Sriastradh 		 * after the batch.
428285f2419Sriastradh 		 */
429ccfee5f7Sriastradh 		SDT_PROBE1(sdt, linux, work, batch__start,  wq);
430e27b3435Sriastradh 		for (i = 0; i < 2; i++) {
431285f2419Sriastradh 			if (TAILQ_EMPTY(q[i]))
432285f2419Sriastradh 				continue;
433285f2419Sriastradh 			TAILQ_INSERT_TAIL(q[i], &marker, work_entry);
434285f2419Sriastradh 			while ((work = TAILQ_FIRST(q[i])) != &marker) {
435e27b3435Sriastradh 				void (*func)(struct work_struct *);
4369ee469e1Sriastradh 
437e27b3435Sriastradh 				KASSERT(work_queue(work) == wq);
438e27b3435Sriastradh 				KASSERT(work_claimed(work, wq));
439285f2419Sriastradh 				KASSERTMSG((q[i] != &wq->wq_dqueue ||
440e27b3435Sriastradh 					container_of(work, struct delayed_work,
441e27b3435Sriastradh 					    work)->dw_state ==
442e27b3435Sriastradh 					DELAYED_WORK_IDLE),
443e27b3435Sriastradh 				    "delayed work %p queued and scheduled",
444e27b3435Sriastradh 				    work);
445e27b3435Sriastradh 
446e27b3435Sriastradh 				TAILQ_REMOVE(q[i], work, work_entry);
4479ee469e1Sriastradh 				KASSERT(wq->wq_current_work == NULL);
4489ee469e1Sriastradh 				wq->wq_current_work = work;
449e27b3435Sriastradh 				func = work->func;
450e27b3435Sriastradh 				release_work(work, wq);
451e27b3435Sriastradh 				/* Can't dereference work after this point.  */
4529ee469e1Sriastradh 
4539ee469e1Sriastradh 				mutex_exit(&wq->wq_lock);
454ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, run,  work, wq);
455e27b3435Sriastradh 				(*func)(work);
456ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, done,  work, wq);
4579ee469e1Sriastradh 				mutex_enter(&wq->wq_lock);
4589ee469e1Sriastradh 
4599ee469e1Sriastradh 				KASSERT(wq->wq_current_work == work);
4609ee469e1Sriastradh 				wq->wq_current_work = NULL;
4619ee469e1Sriastradh 				cv_broadcast(&wq->wq_cv);
46262976e36Sskrll 			}
463285f2419Sriastradh 			TAILQ_REMOVE(q[i], &marker, work_entry);
464e27b3435Sriastradh 		}
46562976e36Sskrll 
4667e01b53dSriastradh 		/* Notify cancel that we've completed a batch of work.  */
4679ee469e1Sriastradh 		wq->wq_gen++;
4689ee469e1Sriastradh 		cv_broadcast(&wq->wq_cv);
469ccfee5f7Sriastradh 		SDT_PROBE1(sdt, linux, work, batch__done,  wq);
47062976e36Sskrll 	}
4719ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
47262976e36Sskrll 
4739ee469e1Sriastradh 	kthread_exit(0);
47462976e36Sskrll }
47562976e36Sskrll 
47631bca411Sriastradh /*
47731bca411Sriastradh  * linux_workqueue_timeout(cookie)
47831bca411Sriastradh  *
47931bca411Sriastradh  *	Delayed work timeout callback.
48031bca411Sriastradh  *
48131bca411Sriastradh  *	- If scheduled, queue it.
48231bca411Sriastradh  *	- If rescheduled, callout_schedule ourselves again.
48331bca411Sriastradh  *	- If cancelled, destroy the callout and release the work from
48431bca411Sriastradh  *        the workqueue.
48531bca411Sriastradh  */
48662976e36Sskrll static void
linux_workqueue_timeout(void * cookie)4879ee469e1Sriastradh linux_workqueue_timeout(void *cookie)
48862976e36Sskrll {
4899ee469e1Sriastradh 	struct delayed_work *const dw = cookie;
490e27b3435Sriastradh 	struct workqueue_struct *const wq = work_queue(&dw->work);
49162976e36Sskrll 
492e27b3435Sriastradh 	KASSERTMSG(wq != NULL,
493e27b3435Sriastradh 	    "delayed work %p state %d resched %d",
494e27b3435Sriastradh 	    dw, dw->dw_state, dw->dw_resched);
495ac596c7fSriastradh 
496ccfee5f7Sriastradh 	SDT_PROBE2(sdt, linux, work, timer,  dw, wq);
497ccfee5f7Sriastradh 
4989ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
499e27b3435Sriastradh 	KASSERT(work_queue(&dw->work) == wq);
5009ee469e1Sriastradh 	switch (dw->dw_state) {
5019ee469e1Sriastradh 	case DELAYED_WORK_IDLE:
5029ee469e1Sriastradh 		panic("delayed work callout uninitialized: %p", dw);
5039ee469e1Sriastradh 	case DELAYED_WORK_SCHEDULED:
504d2984e1fSriastradh 		dw_callout_destroy(wq, dw);
505e27b3435Sriastradh 		TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
5069ee469e1Sriastradh 		cv_broadcast(&wq->wq_cv);
507ccfee5f7Sriastradh 		SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
5089ee469e1Sriastradh 		break;
5099ee469e1Sriastradh 	case DELAYED_WORK_RESCHEDULED:
5101fca7189Sriastradh 		KASSERT(dw->dw_resched >= 0);
5111fca7189Sriastradh 		callout_schedule(&dw->dw_callout, dw->dw_resched);
5129ee469e1Sriastradh 		dw->dw_state = DELAYED_WORK_SCHEDULED;
5131fca7189Sriastradh 		dw->dw_resched = -1;
5149ee469e1Sriastradh 		break;
5159ee469e1Sriastradh 	case DELAYED_WORK_CANCELLED:
516535de6e4Sriastradh 		cancel_delayed_work_done(wq, dw);
517e27b3435Sriastradh 		/* Can't dereference dw after this point.  */
5185891db5cSriastradh 		goto out;
5199ee469e1Sriastradh 	default:
5209ee469e1Sriastradh 		panic("delayed work callout in bad state: %p", dw);
52162976e36Sskrll 	}
522dd8f93d7Sriastradh 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
523dd8f93d7Sriastradh 	    dw->dw_state == DELAYED_WORK_SCHEDULED);
5245891db5cSriastradh out:	mutex_exit(&wq->wq_lock);
5259ee469e1Sriastradh }
5269ee469e1Sriastradh 
52731bca411Sriastradh /*
52831bca411Sriastradh  * current_work()
52931bca411Sriastradh  *
53031bca411Sriastradh  *	If in a workqueue worker thread, return the work it is
53131bca411Sriastradh  *	currently executing.  Otherwise return NULL.
53231bca411Sriastradh  */
5339ee469e1Sriastradh struct work_struct *
current_work(void)5349ee469e1Sriastradh current_work(void)
5359ee469e1Sriastradh {
5369ee469e1Sriastradh 	struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
5379ee469e1Sriastradh 
5389ee469e1Sriastradh 	/* If we're not a workqueue thread, then there's no work.  */
5399ee469e1Sriastradh 	if (wq == NULL)
5409ee469e1Sriastradh 		return NULL;
5419ee469e1Sriastradh 
54262976e36Sskrll 	/*
5439ee469e1Sriastradh 	 * Otherwise, this should be possible only while work is in
5449ee469e1Sriastradh 	 * progress.  Return the current work item.
54562976e36Sskrll 	 */
5469ee469e1Sriastradh 	KASSERT(wq->wq_current_work != NULL);
5479ee469e1Sriastradh 	return wq->wq_current_work;
54862976e36Sskrll }
54962976e36Sskrll 
55062976e36Sskrll /*
55162976e36Sskrll  * Work
55262976e36Sskrll  */
55362976e36Sskrll 
55431bca411Sriastradh /*
55531bca411Sriastradh  * INIT_WORK(work, fn)
55631bca411Sriastradh  *
55731bca411Sriastradh  *	Initialize work for use with a workqueue to call fn in a worker
55831bca411Sriastradh  *	thread.  There is no corresponding destruction operation.
55931bca411Sriastradh  */
56062976e36Sskrll void
INIT_WORK(struct work_struct * work,void (* fn)(struct work_struct *))56162976e36Sskrll INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
56262976e36Sskrll {
56362976e36Sskrll 
564e27b3435Sriastradh 	work->work_owner = 0;
5651db7f3ecSriastradh 	work->func = fn;
56662976e36Sskrll }
56762976e36Sskrll 
56831bca411Sriastradh /*
569e27b3435Sriastradh  * work_claimed(work, wq)
570e27b3435Sriastradh  *
571e27b3435Sriastradh  *	True if work is currently claimed by a workqueue, meaning it is
572e27b3435Sriastradh  *	either on the queue or scheduled in a callout.  The workqueue
573e27b3435Sriastradh  *	must be wq, and caller must hold wq's lock.
574e27b3435Sriastradh  */
575e27b3435Sriastradh static bool
work_claimed(struct work_struct * work,struct workqueue_struct * wq)576e27b3435Sriastradh work_claimed(struct work_struct *work, struct workqueue_struct *wq)
577e27b3435Sriastradh {
578e27b3435Sriastradh 
579e27b3435Sriastradh 	KASSERT(work_queue(work) == wq);
580e27b3435Sriastradh 	KASSERT(mutex_owned(&wq->wq_lock));
581e27b3435Sriastradh 
5824f49735fSriastradh 	return atomic_load_relaxed(&work->work_owner) & 1;
583e27b3435Sriastradh }
584e27b3435Sriastradh 
585e27b3435Sriastradh /*
586f7bdb0aaSriastradh  * work_pending(work)
587f7bdb0aaSriastradh  *
588f7bdb0aaSriastradh  *	True if work is currently claimed by any workqueue, scheduled
589f7bdb0aaSriastradh  *	to run on that workqueue.
590f7bdb0aaSriastradh  */
591f7bdb0aaSriastradh bool
work_pending(const struct work_struct * work)59223727e60Sriastradh work_pending(const struct work_struct *work)
593f7bdb0aaSriastradh {
594f7bdb0aaSriastradh 
5954f49735fSriastradh 	return atomic_load_relaxed(&work->work_owner) & 1;
596f7bdb0aaSriastradh }
597f7bdb0aaSriastradh 
598f7bdb0aaSriastradh /*
599e27b3435Sriastradh  * work_queue(work)
600e27b3435Sriastradh  *
601e27b3435Sriastradh  *	Return the last queue that work was queued on, or NULL if it
602e27b3435Sriastradh  *	was never queued.
603e27b3435Sriastradh  */
604e27b3435Sriastradh static struct workqueue_struct *
work_queue(struct work_struct * work)605e27b3435Sriastradh work_queue(struct work_struct *work)
606e27b3435Sriastradh {
607e27b3435Sriastradh 
6084f49735fSriastradh 	return (struct workqueue_struct *)
6094f49735fSriastradh 	    (atomic_load_relaxed(&work->work_owner) & ~(uintptr_t)1);
610e27b3435Sriastradh }
611e27b3435Sriastradh 
612e27b3435Sriastradh /*
61331bca411Sriastradh  * acquire_work(work, wq)
61431bca411Sriastradh  *
615e27b3435Sriastradh  *	Try to claim work for wq.  If work is already claimed, it must
616e27b3435Sriastradh  *	be claimed by wq; return false.  If work is not already
617e27b3435Sriastradh  *	claimed, claim it, issue a memory barrier to match any prior
618e27b3435Sriastradh  *	release_work, and return true.
61931bca411Sriastradh  *
62031bca411Sriastradh  *	Caller must hold wq's lock.
62131bca411Sriastradh  */
622e27b3435Sriastradh static bool
acquire_work(struct work_struct * work,struct workqueue_struct * wq)6237d2f7a07Sriastradh acquire_work(struct work_struct *work, struct workqueue_struct *wq)
6247d2f7a07Sriastradh {
625e27b3435Sriastradh 	uintptr_t owner0, owner;
6267d2f7a07Sriastradh 
6277d2f7a07Sriastradh 	KASSERT(mutex_owned(&wq->wq_lock));
628e27b3435Sriastradh 	KASSERT(((uintptr_t)wq & 1) == 0);
6297d2f7a07Sriastradh 
630e27b3435Sriastradh 	owner = (uintptr_t)wq | 1;
631e27b3435Sriastradh 	do {
6324f49735fSriastradh 		owner0 = atomic_load_relaxed(&work->work_owner);
633e27b3435Sriastradh 		if (owner0 & 1) {
634e27b3435Sriastradh 			KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq);
635e27b3435Sriastradh 			return false;
6367d2f7a07Sriastradh 		}
637e27b3435Sriastradh 		KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq);
638e27b3435Sriastradh 	} while (atomic_cas_uintptr(&work->work_owner, owner0, owner) !=
639e27b3435Sriastradh 	    owner0);
6407d2f7a07Sriastradh 
641e27b3435Sriastradh 	KASSERT(work_queue(work) == wq);
642*04caf091Sriastradh 	membar_acquire();
643ccfee5f7Sriastradh 	SDT_PROBE2(sdt, linux, work, acquire,  work, wq);
644e27b3435Sriastradh 	return true;
6457d2f7a07Sriastradh }
6467d2f7a07Sriastradh 
64731bca411Sriastradh /*
64831bca411Sriastradh  * release_work(work, wq)
64931bca411Sriastradh  *
65031bca411Sriastradh  *	Issue a memory barrier to match any subsequent acquire_work and
65131bca411Sriastradh  *	dissociate work from wq.
65231bca411Sriastradh  *
65331bca411Sriastradh  *	Caller must hold wq's lock and work must be associated with wq.
65431bca411Sriastradh  */
6557d2f7a07Sriastradh static void
release_work(struct work_struct * work,struct workqueue_struct * wq)6567d2f7a07Sriastradh release_work(struct work_struct *work, struct workqueue_struct *wq)
6577d2f7a07Sriastradh {
6587d2f7a07Sriastradh 
659e27b3435Sriastradh 	KASSERT(work_queue(work) == wq);
6607d2f7a07Sriastradh 	KASSERT(mutex_owned(&wq->wq_lock));
6617d2f7a07Sriastradh 
662ccfee5f7Sriastradh 	SDT_PROBE2(sdt, linux, work, release,  work, wq);
663*04caf091Sriastradh 	membar_release();
664e27b3435Sriastradh 
665e27b3435Sriastradh 	/*
666e27b3435Sriastradh 	 * Non-interlocked r/m/w is safe here because nobody else can
667e27b3435Sriastradh 	 * write to this while the claimed bit is set and the workqueue
668e27b3435Sriastradh 	 * lock is held.
669e27b3435Sriastradh 	 */
6704f49735fSriastradh 	atomic_store_relaxed(&work->work_owner,
6714f49735fSriastradh 	    atomic_load_relaxed(&work->work_owner) & ~(uintptr_t)1);
6727d2f7a07Sriastradh }
6737d2f7a07Sriastradh 
67431bca411Sriastradh /*
67531bca411Sriastradh  * schedule_work(work)
67631bca411Sriastradh  *
67731bca411Sriastradh  *	If work is not already queued on system_wq, queue it to be run
67831bca411Sriastradh  *	by system_wq's worker thread when it next can.  True if it was
67931bca411Sriastradh  *	newly queued, false if it was already queued.  If the work was
68031bca411Sriastradh  *	already running, queue it to run again.
68131bca411Sriastradh  *
68231bca411Sriastradh  *	Caller must ensure work is not queued to run on a different
68331bca411Sriastradh  *	workqueue.
68431bca411Sriastradh  */
68562976e36Sskrll bool
schedule_work(struct work_struct * work)68662976e36Sskrll schedule_work(struct work_struct *work)
68762976e36Sskrll {
6889ee469e1Sriastradh 
68962976e36Sskrll 	return queue_work(system_wq, work);
69062976e36Sskrll }
69162976e36Sskrll 
69231bca411Sriastradh /*
69331bca411Sriastradh  * queue_work(wq, work)
69431bca411Sriastradh  *
69531bca411Sriastradh  *	If work is not already queued on wq, queue it to be run by wq's
69631bca411Sriastradh  *	worker thread when it next can.  True if it was newly queued,
69731bca411Sriastradh  *	false if it was already queued.  If the work was already
69831bca411Sriastradh  *	running, queue it to run again.
69931bca411Sriastradh  *
70031bca411Sriastradh  *	Caller must ensure work is not queued to run on a different
70131bca411Sriastradh  *	workqueue.
70231bca411Sriastradh  */
70362976e36Sskrll bool
queue_work(struct workqueue_struct * wq,struct work_struct * work)70462976e36Sskrll queue_work(struct workqueue_struct *wq, struct work_struct *work)
70562976e36Sskrll {
70662976e36Sskrll 	bool newly_queued;
70762976e36Sskrll 
70862976e36Sskrll 	KASSERT(wq != NULL);
70962976e36Sskrll 
7109ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
711e27b3435Sriastradh 	if (__predict_true(acquire_work(work, wq))) {
712a1f195daSriastradh 		/*
713a1f195daSriastradh 		 * It wasn't on any workqueue at all.  Put it on this
714a1f195daSriastradh 		 * one, and signal the worker thread that there is work
715a1f195daSriastradh 		 * to do.
716a1f195daSriastradh 		 */
7179ee469e1Sriastradh 		TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
718a1f195daSriastradh 		cv_broadcast(&wq->wq_cv);
719ccfee5f7Sriastradh 		SDT_PROBE2(sdt, linux, work, queue,  work, wq);
720a1f195daSriastradh 		newly_queued = true;
721a1f195daSriastradh 	} else {
722e27b3435Sriastradh 		/*
723e27b3435Sriastradh 		 * It was already on this workqueue.  Nothing to do
724e27b3435Sriastradh 		 * since it is already queued.
725e27b3435Sriastradh 		 */
72662976e36Sskrll 		newly_queued = false;
72762976e36Sskrll 	}
7289ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
72962976e36Sskrll 
73062976e36Sskrll 	return newly_queued;
73162976e36Sskrll }
73262976e36Sskrll 
73331bca411Sriastradh /*
73431bca411Sriastradh  * cancel_work(work)
73531bca411Sriastradh  *
73631bca411Sriastradh  *	If work was queued, remove it from the queue and return true.
737e27b3435Sriastradh  *	If work was not queued, return false.  Work may still be
738e27b3435Sriastradh  *	running when this returns.
73931bca411Sriastradh  */
74062976e36Sskrll bool
cancel_work(struct work_struct * work)7419ee469e1Sriastradh cancel_work(struct work_struct *work)
74262976e36Sskrll {
7439ee469e1Sriastradh 	struct workqueue_struct *wq;
74462976e36Sskrll 	bool cancelled_p = false;
74562976e36Sskrll 
74643c369bcSriastradh 	/* If there's no workqueue, nothing to cancel.   */
747e27b3435Sriastradh 	if ((wq = work_queue(work)) == NULL)
74843c369bcSriastradh 		goto out;
74943c369bcSriastradh 
7509ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
751e27b3435Sriastradh 	if (__predict_false(work_queue(work) != wq)) {
752a1f195daSriastradh 		/*
753a1f195daSriastradh 		 * It has finished execution or been cancelled by
754a1f195daSriastradh 		 * another thread, and has been moved off the
755a1f195daSriastradh 		 * workqueue, so it's too to cancel.
756a1f195daSriastradh 		 */
7579ee469e1Sriastradh 		cancelled_p = false;
7589ee469e1Sriastradh 	} else {
759e27b3435Sriastradh 		/* Check whether it's on the queue.  */
760e27b3435Sriastradh 		if (work_claimed(work, wq)) {
761a1f195daSriastradh 			/*
762e27b3435Sriastradh 			 * It is still on the queue.  Take it off the
763e27b3435Sriastradh 			 * queue and report successful cancellation.
764a1f195daSriastradh 			 */
7659ee469e1Sriastradh 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
766ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, cancel,  work, wq);
767e27b3435Sriastradh 			release_work(work, wq);
768e27b3435Sriastradh 			/* Can't dereference work after this point.  */
76962976e36Sskrll 			cancelled_p = true;
770e27b3435Sriastradh 		} else {
771e27b3435Sriastradh 			/* Not on the queue.  Couldn't cancel it.  */
772e27b3435Sriastradh 			cancelled_p = false;
773e27b3435Sriastradh 		}
77462976e36Sskrll 	}
7759ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
77662976e36Sskrll 
77743c369bcSriastradh out:	return cancelled_p;
77862976e36Sskrll }
77962976e36Sskrll 
78031bca411Sriastradh /*
78131bca411Sriastradh  * cancel_work_sync(work)
78231bca411Sriastradh  *
78331bca411Sriastradh  *	If work was queued, remove it from the queue and return true.
784e27b3435Sriastradh  *	If work was not queued, return false.  Either way, if work is
78531bca411Sriastradh  *	currently running, wait for it to complete.
78631bca411Sriastradh  *
78731bca411Sriastradh  *	May sleep.
78831bca411Sriastradh  */
7899ee469e1Sriastradh bool
cancel_work_sync(struct work_struct * work)7909ee469e1Sriastradh cancel_work_sync(struct work_struct *work)
79162976e36Sskrll {
79262976e36Sskrll 	struct workqueue_struct *wq;
7939ee469e1Sriastradh 	bool cancelled_p = false;
79462976e36Sskrll 
79543c369bcSriastradh 	/* If there's no workqueue, nothing to cancel.   */
796e27b3435Sriastradh 	if ((wq = work_queue(work)) == NULL)
79743c369bcSriastradh 		goto out;
79843c369bcSriastradh 
7999ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
800e27b3435Sriastradh 	if (__predict_false(work_queue(work) != wq)) {
801a1f195daSriastradh 		/*
802a1f195daSriastradh 		 * It has finished execution or been cancelled by
803a1f195daSriastradh 		 * another thread, and has been moved off the
804e27b3435Sriastradh 		 * workqueue, so it's too late to cancel.
805a1f195daSriastradh 		 */
8069ee469e1Sriastradh 		cancelled_p = false;
8079ee469e1Sriastradh 	} else {
808e27b3435Sriastradh 		/* Check whether it's on the queue.  */
809e27b3435Sriastradh 		if (work_claimed(work, wq)) {
810a1f195daSriastradh 			/*
811e27b3435Sriastradh 			 * It is still on the queue.  Take it off the
812e27b3435Sriastradh 			 * queue and report successful cancellation.
813a1f195daSriastradh 			 */
8149ee469e1Sriastradh 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
815ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, cancel,  work, wq);
816e27b3435Sriastradh 			release_work(work, wq);
817e27b3435Sriastradh 			/* Can't dereference work after this point.  */
8189ee469e1Sriastradh 			cancelled_p = true;
819e27b3435Sriastradh 		} else {
820e27b3435Sriastradh 			/* Not on the queue.  Couldn't cancel it.  */
821e27b3435Sriastradh 			cancelled_p = false;
822e27b3435Sriastradh 		}
823e27b3435Sriastradh 		/* If it's still running, wait for it to complete.  */
824e27b3435Sriastradh 		if (wq->wq_current_work == work)
825e27b3435Sriastradh 			wait_for_current_work(work, wq);
82662976e36Sskrll 	}
82762976e36Sskrll 	mutex_exit(&wq->wq_lock);
82862976e36Sskrll 
82943c369bcSriastradh out:	return cancelled_p;
83062976e36Sskrll }
83150219d6aSriastradh 
83250219d6aSriastradh /*
83350219d6aSriastradh  * wait_for_current_work(work, wq)
83450219d6aSriastradh  *
83550219d6aSriastradh  *	wq must be currently executing work.  Wait for it to finish.
836e27b3435Sriastradh  *
837e27b3435Sriastradh  *	Does not dereference work.
83850219d6aSriastradh  */
83950219d6aSriastradh static void
wait_for_current_work(struct work_struct * work,struct workqueue_struct * wq)84050219d6aSriastradh wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
84150219d6aSriastradh {
84250219d6aSriastradh 	uint64_t gen;
84350219d6aSriastradh 
84450219d6aSriastradh 	KASSERT(mutex_owned(&wq->wq_lock));
84550219d6aSriastradh 	KASSERT(wq->wq_current_work == work);
84650219d6aSriastradh 
84750219d6aSriastradh 	/* Wait only one generation in case it gets requeued quickly.  */
848ccfee5f7Sriastradh 	SDT_PROBE2(sdt, linux, work, wait__start,  work, wq);
84950219d6aSriastradh 	gen = wq->wq_gen;
85050219d6aSriastradh 	do {
85150219d6aSriastradh 		cv_wait(&wq->wq_cv, &wq->wq_lock);
85250219d6aSriastradh 	} while (wq->wq_current_work == work && wq->wq_gen == gen);
853ccfee5f7Sriastradh 	SDT_PROBE2(sdt, linux, work, wait__done,  work, wq);
85450219d6aSriastradh }
85562976e36Sskrll 
85662976e36Sskrll /*
85762976e36Sskrll  * Delayed work
85862976e36Sskrll  */
85962976e36Sskrll 
86031bca411Sriastradh /*
86131bca411Sriastradh  * INIT_DELAYED_WORK(dw, fn)
86231bca411Sriastradh  *
86331bca411Sriastradh  *	Initialize dw for use with a workqueue to call fn in a worker
86431bca411Sriastradh  *	thread after a delay.  There is no corresponding destruction
86531bca411Sriastradh  *	operation.
86631bca411Sriastradh  */
86762976e36Sskrll void
INIT_DELAYED_WORK(struct delayed_work * dw,void (* fn)(struct work_struct *))86862976e36Sskrll INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
86962976e36Sskrll {
8709ee469e1Sriastradh 
87162976e36Sskrll 	INIT_WORK(&dw->work, fn);
8729ee469e1Sriastradh 	dw->dw_state = DELAYED_WORK_IDLE;
8731fca7189Sriastradh 	dw->dw_resched = -1;
8749ee469e1Sriastradh 
8759ee469e1Sriastradh 	/*
8769ee469e1Sriastradh 	 * Defer callout_init until we are going to schedule the
8779ee469e1Sriastradh 	 * callout, which can then callout_destroy it, because
8789ee469e1Sriastradh 	 * otherwise since there's no DESTROY_DELAYED_WORK or anything
8799ee469e1Sriastradh 	 * we have no opportunity to call callout_destroy.
8809ee469e1Sriastradh 	 */
88162976e36Sskrll }
88262976e36Sskrll 
88331bca411Sriastradh /*
88431bca411Sriastradh  * schedule_delayed_work(dw, ticks)
88531bca411Sriastradh  *
88631bca411Sriastradh  *	If it is not currently scheduled, schedule dw to run after
88731bca411Sriastradh  *	ticks on system_wq.  If currently executing and not already
88831bca411Sriastradh  *	rescheduled, reschedule it.  True if it was newly scheduled,
88931bca411Sriastradh  *	false if it was already scheduled.
89031bca411Sriastradh  *
89131bca411Sriastradh  *	If ticks == 0, queue it to run as soon as the worker can,
89231bca411Sriastradh  *	without waiting for the next callout tick to run.
89331bca411Sriastradh  */
89462976e36Sskrll bool
schedule_delayed_work(struct delayed_work * dw,unsigned long ticks)89562976e36Sskrll schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
89662976e36Sskrll {
8979ee469e1Sriastradh 
89862976e36Sskrll 	return queue_delayed_work(system_wq, dw, ticks);
89962976e36Sskrll }
90062976e36Sskrll 
901a1f195daSriastradh /*
902450ff8bbSriastradh  * dw_callout_init(wq, dw)
903450ff8bbSriastradh  *
904450ff8bbSriastradh  *	Initialize the callout of dw and transition to
905450ff8bbSriastradh  *	DELAYED_WORK_SCHEDULED.  Caller must use callout_schedule.
906450ff8bbSriastradh  */
907450ff8bbSriastradh static void
dw_callout_init(struct workqueue_struct * wq,struct delayed_work * dw)908450ff8bbSriastradh dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
909450ff8bbSriastradh {
910450ff8bbSriastradh 
911450ff8bbSriastradh 	KASSERT(mutex_owned(&wq->wq_lock));
912e27b3435Sriastradh 	KASSERT(work_queue(&dw->work) == wq);
913450ff8bbSriastradh 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
914450ff8bbSriastradh 
915450ff8bbSriastradh 	callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
916450ff8bbSriastradh 	callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw);
917450ff8bbSriastradh 	TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
918450ff8bbSriastradh 	dw->dw_state = DELAYED_WORK_SCHEDULED;
919450ff8bbSriastradh }
920450ff8bbSriastradh 
921450ff8bbSriastradh /*
922d2984e1fSriastradh  * dw_callout_destroy(wq, dw)
923d2984e1fSriastradh  *
924d2984e1fSriastradh  *	Destroy the callout of dw and transition to DELAYED_WORK_IDLE.
925d2984e1fSriastradh  */
926d2984e1fSriastradh static void
dw_callout_destroy(struct workqueue_struct * wq,struct delayed_work * dw)927d2984e1fSriastradh dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
928d2984e1fSriastradh {
929d2984e1fSriastradh 
930d2984e1fSriastradh 	KASSERT(mutex_owned(&wq->wq_lock));
931e27b3435Sriastradh 	KASSERT(work_queue(&dw->work) == wq);
932d2984e1fSriastradh 	KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED ||
933d2984e1fSriastradh 	    dw->dw_state == DELAYED_WORK_RESCHEDULED ||
934d2984e1fSriastradh 	    dw->dw_state == DELAYED_WORK_CANCELLED);
935d2984e1fSriastradh 
936d2984e1fSriastradh 	TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
937d2984e1fSriastradh 	callout_destroy(&dw->dw_callout);
9381fca7189Sriastradh 	dw->dw_resched = -1;
939d2984e1fSriastradh 	dw->dw_state = DELAYED_WORK_IDLE;
940d2984e1fSriastradh }
941d2984e1fSriastradh 
942d2984e1fSriastradh /*
943a1f195daSriastradh  * cancel_delayed_work_done(wq, dw)
944a1f195daSriastradh  *
945a1f195daSriastradh  *	Complete cancellation of a delayed work: transition from
946a1f195daSriastradh  *	DELAYED_WORK_CANCELLED to DELAYED_WORK_IDLE and off the
947e27b3435Sriastradh  *	workqueue.  Caller must not dereference dw after this returns.
948a1f195daSriastradh  */
949535de6e4Sriastradh static void
cancel_delayed_work_done(struct workqueue_struct * wq,struct delayed_work * dw)950535de6e4Sriastradh cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
951535de6e4Sriastradh {
952535de6e4Sriastradh 
953535de6e4Sriastradh 	KASSERT(mutex_owned(&wq->wq_lock));
954e27b3435Sriastradh 	KASSERT(work_queue(&dw->work) == wq);
955535de6e4Sriastradh 	KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
956d2984e1fSriastradh 
957d2984e1fSriastradh 	dw_callout_destroy(wq, dw);
958535de6e4Sriastradh 	release_work(&dw->work, wq);
959e27b3435Sriastradh 	/* Can't dereference dw after this point.  */
960535de6e4Sriastradh }
961535de6e4Sriastradh 
962a1f195daSriastradh /*
963a1f195daSriastradh  * queue_delayed_work(wq, dw, ticks)
964a1f195daSriastradh  *
965a1f195daSriastradh  *	If it is not currently scheduled, schedule dw to run after
966e27b3435Sriastradh  *	ticks on wq.  If currently queued, remove it from the queue
967e27b3435Sriastradh  *	first.
96831bca411Sriastradh  *
96931bca411Sriastradh  *	If ticks == 0, queue it to run as soon as the worker can,
97031bca411Sriastradh  *	without waiting for the next callout tick to run.
971a1f195daSriastradh  */
97262976e36Sskrll bool
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dw,unsigned long ticks)97362976e36Sskrll queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
97462976e36Sskrll     unsigned long ticks)
97562976e36Sskrll {
97662976e36Sskrll 	bool newly_queued;
97762976e36Sskrll 
97862976e36Sskrll 	mutex_enter(&wq->wq_lock);
979e27b3435Sriastradh 	if (__predict_true(acquire_work(&dw->work, wq))) {
980a1f195daSriastradh 		/*
981a1f195daSriastradh 		 * It wasn't on any workqueue at all.  Schedule it to
982a1f195daSriastradh 		 * run on this one.
983a1f195daSriastradh 		 */
9849ee469e1Sriastradh 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
985a1f195daSriastradh 		if (ticks == 0) {
986e27b3435Sriastradh 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
987a1f195daSriastradh 			    work_entry);
988a1f195daSriastradh 			cv_broadcast(&wq->wq_cv);
989ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
990a1f195daSriastradh 		} else {
991a1f195daSriastradh 			/*
992a1f195daSriastradh 			 * Initialize a callout and schedule to run
993a1f195daSriastradh 			 * after a delay.
994a1f195daSriastradh 			 */
995450ff8bbSriastradh 			dw_callout_init(wq, dw);
996a1f195daSriastradh 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
997ccfee5f7Sriastradh 			SDT_PROBE3(sdt, linux, work, schedule,  dw, wq, ticks);
998a1f195daSriastradh 		}
99962976e36Sskrll 		newly_queued = true;
10009ee469e1Sriastradh 	} else {
1001e27b3435Sriastradh 		/* It was already on this workqueue.  */
1002a1f195daSriastradh 		switch (dw->dw_state) {
1003a1f195daSriastradh 		case DELAYED_WORK_IDLE:
1004a1f195daSriastradh 		case DELAYED_WORK_SCHEDULED:
1005a1f195daSriastradh 		case DELAYED_WORK_RESCHEDULED:
1006e27b3435Sriastradh 			/* On the queue or already scheduled.  Leave it.  */
1007e27b3435Sriastradh 			newly_queued = false;
1008a1f195daSriastradh 			break;
1009a1f195daSriastradh 		case DELAYED_WORK_CANCELLED:
1010a1f195daSriastradh 			/*
1011e27b3435Sriastradh 			 * Scheduled and the callout began, but it was
1012a1f195daSriastradh 			 * cancelled.  Reschedule it.
1013a1f195daSriastradh 			 */
10140466378aSriastradh 			if (ticks == 0) {
10150466378aSriastradh 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1016ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, queue,
1017ccfee5f7Sriastradh 				    &dw->work, wq);
10180466378aSriastradh 			} else {
1019a1f195daSriastradh 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
10201fca7189Sriastradh 				dw->dw_resched = MIN(INT_MAX, ticks);
1021ccfee5f7Sriastradh 				SDT_PROBE3(sdt, linux, work, schedule,
1022ccfee5f7Sriastradh 				    dw, wq, ticks);
10230466378aSriastradh 			}
1024e27b3435Sriastradh 			newly_queued = true;
1025a1f195daSriastradh 			break;
1026a1f195daSriastradh 		default:
1027a1f195daSriastradh 			panic("invalid delayed work state: %d",
1028a1f195daSriastradh 			    dw->dw_state);
1029a1f195daSriastradh 		}
103062976e36Sskrll 	}
10319ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
103262976e36Sskrll 
103362976e36Sskrll 	return newly_queued;
103462976e36Sskrll }
103562976e36Sskrll 
1036a1f195daSriastradh /*
1037a1f195daSriastradh  * mod_delayed_work(wq, dw, ticks)
1038a1f195daSriastradh  *
1039e27b3435Sriastradh  *	Schedule dw to run after ticks.  If scheduled or queued,
1040e27b3435Sriastradh  *	reschedule.  If ticks == 0, run without delay.
104131bca411Sriastradh  *
104231bca411Sriastradh  *	True if it modified the timer of an already scheduled work,
104331bca411Sriastradh  *	false if it newly scheduled the work.
1044a1f195daSriastradh  */
104562976e36Sskrll bool
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dw,unsigned long ticks)104662976e36Sskrll mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
104762976e36Sskrll     unsigned long ticks)
104862976e36Sskrll {
104962976e36Sskrll 	bool timer_modified;
105062976e36Sskrll 
105162976e36Sskrll 	mutex_enter(&wq->wq_lock);
1052e27b3435Sriastradh 	if (acquire_work(&dw->work, wq)) {
1053a1f195daSriastradh 		/*
1054a1f195daSriastradh 		 * It wasn't on any workqueue at all.  Schedule it to
1055a1f195daSriastradh 		 * run on this one.
1056a1f195daSriastradh 		 */
10579ee469e1Sriastradh 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
1058a1f195daSriastradh 		if (ticks == 0) {
1059a1f195daSriastradh 			/*
1060a1f195daSriastradh 			 * Run immediately: put it on the queue and
1061a1f195daSriastradh 			 * signal the worker thread.
1062a1f195daSriastradh 			 */
1063e27b3435Sriastradh 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1064a1f195daSriastradh 			    work_entry);
1065a1f195daSriastradh 			cv_broadcast(&wq->wq_cv);
1066ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
1067a1f195daSriastradh 		} else {
1068a1f195daSriastradh 			/*
1069a1f195daSriastradh 			 * Initialize a callout and schedule to run
1070a1f195daSriastradh 			 * after a delay.
1071a1f195daSriastradh 			 */
1072450ff8bbSriastradh 			dw_callout_init(wq, dw);
1073450ff8bbSriastradh 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
1074ccfee5f7Sriastradh 			SDT_PROBE3(sdt, linux, work, schedule,  dw, wq, ticks);
1075a1f195daSriastradh 		}
107662976e36Sskrll 		timer_modified = false;
10779ee469e1Sriastradh 	} else {
1078e27b3435Sriastradh 		/* It was already on this workqueue.  */
10799ee469e1Sriastradh 		switch (dw->dw_state) {
10809ee469e1Sriastradh 		case DELAYED_WORK_IDLE:
1081e27b3435Sriastradh 			/* On the queue.  */
1082a1f195daSriastradh 			if (ticks == 0) {
1083e27b3435Sriastradh 				/* Leave it be.  */
1084ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, cancel,
1085ccfee5f7Sriastradh 				    &dw->work, wq);
1086ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, queue,
1087ccfee5f7Sriastradh 				    &dw->work, wq);
1088a1f195daSriastradh 			} else {
1089e27b3435Sriastradh 				/* Remove from the queue and schedule.  */
1090e27b3435Sriastradh 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1091a1f195daSriastradh 				    work_entry);
1092450ff8bbSriastradh 				dw_callout_init(wq, dw);
1093450ff8bbSriastradh 				callout_schedule(&dw->dw_callout,
1094450ff8bbSriastradh 				    MIN(INT_MAX, ticks));
1095ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, cancel,
1096ccfee5f7Sriastradh 				    &dw->work, wq);
1097ccfee5f7Sriastradh 				SDT_PROBE3(sdt, linux, work, schedule,
1098ccfee5f7Sriastradh 				    dw, wq, ticks);
1099a1f195daSriastradh 			}
1100a1f195daSriastradh 			timer_modified = true;
11019ee469e1Sriastradh 			break;
11029ee469e1Sriastradh 		case DELAYED_WORK_SCHEDULED:
11039ee469e1Sriastradh 			/*
1104a1f195daSriastradh 			 * It is scheduled to run after a delay.  Try
1105a1f195daSriastradh 			 * to stop it and reschedule it; if we can't,
1106a1f195daSriastradh 			 * either reschedule it or cancel it to put it
1107a1f195daSriastradh 			 * on the queue, and inform the callout.
1108a1f195daSriastradh 			 */
1109a1f195daSriastradh 			if (callout_stop(&dw->dw_callout)) {
1110a1f195daSriastradh 				/* Can't stop, callout has begun.  */
1111a1f195daSriastradh 				if (ticks == 0) {
1112a1f195daSriastradh 					/*
1113a1f195daSriastradh 					 * We don't actually need to do
1114a1f195daSriastradh 					 * anything.  The callout will
1115a1f195daSriastradh 					 * queue it as soon as it gets
1116a1f195daSriastradh 					 * the lock.
1117a1f195daSriastradh 					 */
1118ccfee5f7Sriastradh 					SDT_PROBE2(sdt, linux, work, cancel,
1119ccfee5f7Sriastradh 					    &dw->work, wq);
1120ccfee5f7Sriastradh 					SDT_PROBE2(sdt, linux, work, queue,
1121ccfee5f7Sriastradh 					    &dw->work, wq);
1122a1f195daSriastradh 				} else {
11231fca7189Sriastradh 					/* Ask the callout to reschedule.  */
11249ee469e1Sriastradh 					dw->dw_state = DELAYED_WORK_RESCHEDULED;
11251fca7189Sriastradh 					dw->dw_resched = MIN(INT_MAX, ticks);
1126ccfee5f7Sriastradh 					SDT_PROBE2(sdt, linux, work, cancel,
1127ccfee5f7Sriastradh 					    &dw->work, wq);
1128ccfee5f7Sriastradh 					SDT_PROBE3(sdt, linux, work, schedule,
1129ccfee5f7Sriastradh 					    dw, wq, ticks);
1130a1f195daSriastradh 				}
11319ee469e1Sriastradh 			} else {
11321fca7189Sriastradh 				/* We stopped the callout before it began.  */
1133a1f195daSriastradh 				if (ticks == 0) {
1134a1f195daSriastradh 					/*
1135a1f195daSriastradh 					 * Run immediately: destroy the
1136a1f195daSriastradh 					 * callout, put it on the
1137a1f195daSriastradh 					 * queue, and signal the worker
1138a1f195daSriastradh 					 * thread.
1139a1f195daSriastradh 					 */
1140d2984e1fSriastradh 					dw_callout_destroy(wq, dw);
1141e27b3435Sriastradh 					TAILQ_INSERT_TAIL(&wq->wq_dqueue,
1142a1f195daSriastradh 					    &dw->work, work_entry);
1143a1f195daSriastradh 					cv_broadcast(&wq->wq_cv);
1144ccfee5f7Sriastradh 					SDT_PROBE2(sdt, linux, work, cancel,
1145ccfee5f7Sriastradh 					    &dw->work, wq);
1146ccfee5f7Sriastradh 					SDT_PROBE2(sdt, linux, work, queue,
1147ccfee5f7Sriastradh 					    &dw->work, wq);
1148a1f195daSriastradh 				} else {
1149a1f195daSriastradh 					/*
1150a1f195daSriastradh 					 * Reschedule the callout.  No
1151a1f195daSriastradh 					 * state change.
1152a1f195daSriastradh 					 */
1153a1f195daSriastradh 					callout_schedule(&dw->dw_callout,
1154a1f195daSriastradh 					    MIN(INT_MAX, ticks));
1155ccfee5f7Sriastradh 					SDT_PROBE2(sdt, linux, work, cancel,
1156ccfee5f7Sriastradh 					    &dw->work, wq);
1157ccfee5f7Sriastradh 					SDT_PROBE3(sdt, linux, work, schedule,
1158ccfee5f7Sriastradh 					    dw, wq, ticks);
1159a1f195daSriastradh 				}
11609ee469e1Sriastradh 			}
116162976e36Sskrll 			timer_modified = true;
116262976e36Sskrll 			break;
11639ee469e1Sriastradh 		case DELAYED_WORK_RESCHEDULED:
11641fca7189Sriastradh 			/*
11651fca7189Sriastradh 			 * Someone rescheduled it after the callout
11661fca7189Sriastradh 			 * started but before the poor thing even had a
11671fca7189Sriastradh 			 * chance to acquire the lock.
11681fca7189Sriastradh 			 */
11691fca7189Sriastradh 			if (ticks == 0) {
11701fca7189Sriastradh 				/*
11711fca7189Sriastradh 				 * We can just switch back to
11721fca7189Sriastradh 				 * DELAYED_WORK_SCHEDULED so that the
11731fca7189Sriastradh 				 * callout will queue the work as soon
11741fca7189Sriastradh 				 * as it gets the lock.
11751fca7189Sriastradh 				 */
11761fca7189Sriastradh 				dw->dw_state = DELAYED_WORK_SCHEDULED;
11771fca7189Sriastradh 				dw->dw_resched = -1;
1178ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, cancel,
1179ccfee5f7Sriastradh 				    &dw->work, wq);
1180ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, queue,
1181ccfee5f7Sriastradh 				    &dw->work, wq);
11821fca7189Sriastradh 			} else {
11831fca7189Sriastradh 				/* Change the rescheduled time.  */
11841fca7189Sriastradh 				dw->dw_resched = ticks;
1185ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, cancel,
1186ccfee5f7Sriastradh 				    &dw->work, wq);
1187ccfee5f7Sriastradh 				SDT_PROBE3(sdt, linux, work, schedule,
1188ccfee5f7Sriastradh 				    dw, wq, ticks);
11891fca7189Sriastradh 			}
11901fca7189Sriastradh 			timer_modified = true;
11911fca7189Sriastradh 			break;
11929ee469e1Sriastradh 		case DELAYED_WORK_CANCELLED:
11939ee469e1Sriastradh 			/*
11941fca7189Sriastradh 			 * Someone cancelled it after the callout
11951fca7189Sriastradh 			 * started but before the poor thing even had a
11961fca7189Sriastradh 			 * chance to acquire the lock.
11979ee469e1Sriastradh 			 */
1198a1f195daSriastradh 			if (ticks == 0) {
1199a1f195daSriastradh 				/*
1200a1f195daSriastradh 				 * We can just switch back to
1201a1f195daSriastradh 				 * DELAYED_WORK_SCHEDULED so that the
1202a1f195daSriastradh 				 * callout will queue the work as soon
1203a1f195daSriastradh 				 * as it gets the lock.
1204a1f195daSriastradh 				 */
1205a1f195daSriastradh 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1206ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, queue,
1207ccfee5f7Sriastradh 				    &dw->work, wq);
1208a1f195daSriastradh 			} else {
1209e27b3435Sriastradh 				/* Ask it to reschedule.  */
12109ee469e1Sriastradh 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
12111fca7189Sriastradh 				dw->dw_resched = MIN(INT_MAX, ticks);
1212ccfee5f7Sriastradh 				SDT_PROBE3(sdt, linux, work, schedule,
1213ccfee5f7Sriastradh 				    dw, wq, ticks);
1214a1f195daSriastradh 			}
1215e27b3435Sriastradh 			timer_modified = false;
121662976e36Sskrll 			break;
121762976e36Sskrll 		default:
1218a1f195daSriastradh 			panic("invalid delayed work state: %d", dw->dw_state);
121962976e36Sskrll 		}
12209ee469e1Sriastradh 	}
12219ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
122262976e36Sskrll 
122362976e36Sskrll 	return timer_modified;
122462976e36Sskrll }
122562976e36Sskrll 
122631bca411Sriastradh /*
122731bca411Sriastradh  * cancel_delayed_work(dw)
122831bca411Sriastradh  *
122931bca411Sriastradh  *	If work was scheduled or queued, remove it from the schedule or
123031bca411Sriastradh  *	queue and return true.  If work was not scheduled or queued,
123131bca411Sriastradh  *	return false.  Note that work may already be running; if it
123231bca411Sriastradh  *	hasn't been rescheduled or requeued, then cancel_delayed_work
123331bca411Sriastradh  *	will return false, and either way, cancel_delayed_work will NOT
123431bca411Sriastradh  *	wait for the work to complete.
123531bca411Sriastradh  */
123662976e36Sskrll bool
cancel_delayed_work(struct delayed_work * dw)123762976e36Sskrll cancel_delayed_work(struct delayed_work *dw)
123862976e36Sskrll {
12399ee469e1Sriastradh 	struct workqueue_struct *wq;
12409ee469e1Sriastradh 	bool cancelled_p;
124162976e36Sskrll 
1242ac596c7fSriastradh 	/* If there's no workqueue, nothing to cancel.   */
1243e27b3435Sriastradh 	if ((wq = work_queue(&dw->work)) == NULL)
1244ac596c7fSriastradh 		return false;
1245ac596c7fSriastradh 
12469ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
1247e27b3435Sriastradh 	if (__predict_false(work_queue(&dw->work) != wq)) {
12489ee469e1Sriastradh 		cancelled_p = false;
12499ee469e1Sriastradh 	} else {
12509ee469e1Sriastradh 		switch (dw->dw_state) {
12519ee469e1Sriastradh 		case DELAYED_WORK_IDLE:
1252a1f195daSriastradh 			/*
12531fca7189Sriastradh 			 * It is either on the queue or already running
12541fca7189Sriastradh 			 * or both.
1255a1f195daSriastradh 			 */
1256e27b3435Sriastradh 			if (work_claimed(&dw->work, wq)) {
1257e27b3435Sriastradh 				/* On the queue.  Remove and release.  */
1258e27b3435Sriastradh 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
12599ee469e1Sriastradh 				    work_entry);
1260ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, cancel,
1261ccfee5f7Sriastradh 				    &dw->work, wq);
1262e27b3435Sriastradh 				release_work(&dw->work, wq);
1263e27b3435Sriastradh 				/* Can't dereference dw after this point.  */
126462976e36Sskrll 				cancelled_p = true;
12651fca7189Sriastradh 			} else {
1266e27b3435Sriastradh 				/* Not on the queue, so didn't cancel.  */
12671fca7189Sriastradh 				cancelled_p = false;
126862976e36Sskrll 			}
12699ee469e1Sriastradh 			break;
12709ee469e1Sriastradh 		case DELAYED_WORK_SCHEDULED:
12719ee469e1Sriastradh 			/*
127201334e69Sriastradh 			 * If it is scheduled, mark it cancelled and
127301334e69Sriastradh 			 * try to stop the callout before it starts.
127401334e69Sriastradh 			 *
127501334e69Sriastradh 			 * If it's too late and the callout has already
127601334e69Sriastradh 			 * begun to execute, tough.
127701334e69Sriastradh 			 *
127801334e69Sriastradh 			 * If we stopped the callout before it started,
127901334e69Sriastradh 			 * however, then destroy the callout and
128001334e69Sriastradh 			 * dissociate it from the workqueue ourselves.
12819ee469e1Sriastradh 			 */
12829ee469e1Sriastradh 			dw->dw_state = DELAYED_WORK_CANCELLED;
128301334e69Sriastradh 			cancelled_p = true;
1284ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
12859cb8c351Sriastradh 			if (!callout_stop(&dw->dw_callout))
1286535de6e4Sriastradh 				cancel_delayed_work_done(wq, dw);
1287b8dd8a9bSriastradh 			break;
1288296411f7Sriastradh 		case DELAYED_WORK_RESCHEDULED:
1289296411f7Sriastradh 			/*
1290296411f7Sriastradh 			 * If it is being rescheduled, the callout has
1291296411f7Sriastradh 			 * already fired.  We must ask it to cancel.
1292296411f7Sriastradh 			 */
1293296411f7Sriastradh 			dw->dw_state = DELAYED_WORK_CANCELLED;
12941fca7189Sriastradh 			dw->dw_resched = -1;
1295296411f7Sriastradh 			cancelled_p = true;
1296ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1297296411f7Sriastradh 			break;
1298296411f7Sriastradh 		case DELAYED_WORK_CANCELLED:
1299296411f7Sriastradh 			/*
1300296411f7Sriastradh 			 * If it is being cancelled, the callout has
1301296411f7Sriastradh 			 * already fired.  There is nothing more for us
1302296411f7Sriastradh 			 * to do.  Someone else claims credit for
1303296411f7Sriastradh 			 * cancelling it.
1304296411f7Sriastradh 			 */
1305296411f7Sriastradh 			cancelled_p = false;
1306296411f7Sriastradh 			break;
13079ee469e1Sriastradh 		default:
13089ee469e1Sriastradh 			panic("invalid delayed work state: %d",
13099ee469e1Sriastradh 			    dw->dw_state);
13109ee469e1Sriastradh 		}
13119ee469e1Sriastradh 	}
13129ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
131362976e36Sskrll 
131462976e36Sskrll 	return cancelled_p;
131562976e36Sskrll }
131662976e36Sskrll 
131731bca411Sriastradh /*
131831bca411Sriastradh  * cancel_delayed_work_sync(dw)
131931bca411Sriastradh  *
132031bca411Sriastradh  *	If work was scheduled or queued, remove it from the schedule or
132131bca411Sriastradh  *	queue and return true.  If work was not scheduled or queued,
132231bca411Sriastradh  *	return false.  Note that work may already be running; if it
132331bca411Sriastradh  *	hasn't been rescheduled or requeued, then cancel_delayed_work
132431bca411Sriastradh  *	will return false; either way, wait for it to complete.
132531bca411Sriastradh  */
132662976e36Sskrll bool
cancel_delayed_work_sync(struct delayed_work * dw)132762976e36Sskrll cancel_delayed_work_sync(struct delayed_work *dw)
132862976e36Sskrll {
13299ee469e1Sriastradh 	struct workqueue_struct *wq;
1330551d54d1Sriastradh 	bool cancelled_p;
133162976e36Sskrll 
1332551d54d1Sriastradh 	/* If there's no workqueue, nothing to cancel.  */
1333e27b3435Sriastradh 	if ((wq = work_queue(&dw->work)) == NULL)
1334551d54d1Sriastradh 		return false;
1335ac596c7fSriastradh 
13369ee469e1Sriastradh 	mutex_enter(&wq->wq_lock);
1337e27b3435Sriastradh 	if (__predict_false(work_queue(&dw->work) != wq)) {
13389ee469e1Sriastradh 		cancelled_p = false;
13399ee469e1Sriastradh 	} else {
134061b3d586Sriastradh 		switch (dw->dw_state) {
13419ee469e1Sriastradh 		case DELAYED_WORK_IDLE:
13421fca7189Sriastradh 			/*
13431fca7189Sriastradh 			 * It is either on the queue or already running
13441fca7189Sriastradh 			 * or both.
13451fca7189Sriastradh 			 */
1346e27b3435Sriastradh 			if (work_claimed(&dw->work, wq)) {
1347e27b3435Sriastradh 				/* On the queue.  Remove and release.  */
1348e27b3435Sriastradh 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1349a1f195daSriastradh 				    work_entry);
1350ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, cancel,
1351ccfee5f7Sriastradh 				    &dw->work, wq);
1352e27b3435Sriastradh 				release_work(&dw->work, wq);
1353e27b3435Sriastradh 				/* Can't dereference dw after this point.  */
135462976e36Sskrll 				cancelled_p = true;
1355e27b3435Sriastradh 			} else {
1356e27b3435Sriastradh 				/* Not on the queue, so didn't cancel. */
1357e27b3435Sriastradh 				cancelled_p = false;
135862976e36Sskrll 			}
1359e27b3435Sriastradh 			/* If it's still running, wait for it to complete.  */
1360e27b3435Sriastradh 			if (wq->wq_current_work == &dw->work)
1361e27b3435Sriastradh 				wait_for_current_work(&dw->work, wq);
13629ee469e1Sriastradh 			break;
13639ee469e1Sriastradh 		case DELAYED_WORK_SCHEDULED:
13649ee469e1Sriastradh 			/*
136561b3d586Sriastradh 			 * If it is scheduled, mark it cancelled and
136661b3d586Sriastradh 			 * try to stop the callout before it starts.
136761b3d586Sriastradh 			 *
136861b3d586Sriastradh 			 * If it's too late and the callout has already
136961b3d586Sriastradh 			 * begun to execute, we must wait for it to
1370551d54d1Sriastradh 			 * complete.  But we got in soon enough to ask
1371551d54d1Sriastradh 			 * the callout not to run, so we successfully
1372551d54d1Sriastradh 			 * cancelled it in that case.
137361b3d586Sriastradh 			 *
137461b3d586Sriastradh 			 * If we stopped the callout before it started,
13751fca7189Sriastradh 			 * then we must destroy the callout and
137661b3d586Sriastradh 			 * dissociate it from the workqueue ourselves.
13779ee469e1Sriastradh 			 */
13789ee469e1Sriastradh 			dw->dw_state = DELAYED_WORK_CANCELLED;
1379ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
13809cb8c351Sriastradh 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1381535de6e4Sriastradh 				cancel_delayed_work_done(wq, dw);
1382296411f7Sriastradh 			cancelled_p = true;
1383296411f7Sriastradh 			break;
1384296411f7Sriastradh 		case DELAYED_WORK_RESCHEDULED:
1385296411f7Sriastradh 			/*
1386296411f7Sriastradh 			 * If it is being rescheduled, the callout has
1387296411f7Sriastradh 			 * already fired.  We must ask it to cancel and
1388296411f7Sriastradh 			 * wait for it to complete.
1389296411f7Sriastradh 			 */
1390296411f7Sriastradh 			dw->dw_state = DELAYED_WORK_CANCELLED;
13911fca7189Sriastradh 			dw->dw_resched = -1;
1392ccfee5f7Sriastradh 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1393296411f7Sriastradh 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1394296411f7Sriastradh 			cancelled_p = true;
1395296411f7Sriastradh 			break;
1396296411f7Sriastradh 		case DELAYED_WORK_CANCELLED:
1397296411f7Sriastradh 			/*
1398296411f7Sriastradh 			 * If it is being cancelled, the callout has
1399296411f7Sriastradh 			 * already fired.  We need only wait for it to
1400296411f7Sriastradh 			 * complete.  Someone else, however, claims
1401296411f7Sriastradh 			 * credit for cancelling it.
1402296411f7Sriastradh 			 */
1403296411f7Sriastradh 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1404296411f7Sriastradh 			cancelled_p = false;
140561b3d586Sriastradh 			break;
14069ee469e1Sriastradh 		default:
14079ee469e1Sriastradh 			panic("invalid delayed work state: %d",
14089ee469e1Sriastradh 			    dw->dw_state);
14099ee469e1Sriastradh 		}
14109ee469e1Sriastradh 	}
14119ee469e1Sriastradh 	mutex_exit(&wq->wq_lock);
141262976e36Sskrll 
141362976e36Sskrll 	return cancelled_p;
141462976e36Sskrll }
14159ee469e1Sriastradh 
14169ee469e1Sriastradh /*
14179ee469e1Sriastradh  * Flush
14189ee469e1Sriastradh  */
141962976e36Sskrll 
142031bca411Sriastradh /*
142131bca411Sriastradh  * flush_scheduled_work()
142231bca411Sriastradh  *
142331bca411Sriastradh  *	Wait for all work queued on system_wq to complete.  This does
142431bca411Sriastradh  *	not include delayed work.
142531bca411Sriastradh  */
142694359bcaSriastradh void
flush_scheduled_work(void)14279ee469e1Sriastradh flush_scheduled_work(void)
14289ee469e1Sriastradh {
14299ee469e1Sriastradh 
14309ee469e1Sriastradh 	flush_workqueue(system_wq);
14319ee469e1Sriastradh }
14329ee469e1Sriastradh 
14337e01b53dSriastradh struct flush_work {
14347e01b53dSriastradh 	kmutex_t		fw_lock;
14357e01b53dSriastradh 	kcondvar_t		fw_cv;
14367e01b53dSriastradh 	struct work_struct	fw_work;
14377e01b53dSriastradh 	bool			fw_done;
14387e01b53dSriastradh };
14397e01b53dSriastradh 
14407e01b53dSriastradh static void
flush_work_cb(struct work_struct * work)14417e01b53dSriastradh flush_work_cb(struct work_struct *work)
14429ee469e1Sriastradh {
14437e01b53dSriastradh 	struct flush_work *fw = container_of(work, struct flush_work, fw_work);
14449ee469e1Sriastradh 
14457e01b53dSriastradh 	mutex_enter(&fw->fw_lock);
14467e01b53dSriastradh 	fw->fw_done = true;
14477e01b53dSriastradh 	cv_broadcast(&fw->fw_cv);
14487e01b53dSriastradh 	mutex_exit(&fw->fw_lock);
14495638bd9cSriastradh }
145068107256Sriastradh 
145131bca411Sriastradh /*
145231bca411Sriastradh  * flush_workqueue(wq)
145331bca411Sriastradh  *
145431bca411Sriastradh  *	Wait for all work queued on wq to complete.  This does not
145531bca411Sriastradh  *	include delayed work.
145631bca411Sriastradh  */
145768107256Sriastradh void
flush_workqueue(struct workqueue_struct * wq)145868107256Sriastradh flush_workqueue(struct workqueue_struct *wq)
145968107256Sriastradh {
14607e01b53dSriastradh 	struct flush_work fw;
146168107256Sriastradh 
1462e606f3ecSriastradh 	if (lwp_getspecific(workqueue_key) == wq) {
146342de2e1bSriastradh 		SDT_PROBE1(sdt, linux, work, flush__self,  wq);
1464e606f3ecSriastradh 		return;
1465e606f3ecSriastradh 	}
1466e606f3ecSriastradh 
14677e01b53dSriastradh 	mutex_init(&fw.fw_lock, MUTEX_DEFAULT, IPL_VM);
14687e01b53dSriastradh 	cv_init(&fw.fw_cv, "lxwqflsh");
14697e01b53dSriastradh 	INIT_WORK(&fw.fw_work, &flush_work_cb);
14707e01b53dSriastradh 	fw.fw_done = false;
14717e01b53dSriastradh 
14727e01b53dSriastradh 	SDT_PROBE1(sdt, linux, work, flush__start,  wq);
14737e01b53dSriastradh 	queue_work(wq, &fw.fw_work);
14747e01b53dSriastradh 
14757e01b53dSriastradh 	mutex_enter(&fw.fw_lock);
14767e01b53dSriastradh 	while (!fw.fw_done)
14777e01b53dSriastradh 		cv_wait(&fw.fw_cv, &fw.fw_lock);
14787e01b53dSriastradh 	mutex_exit(&fw.fw_lock);
14797e01b53dSriastradh 	SDT_PROBE1(sdt, linux, work, flush__done,  wq);
14807e01b53dSriastradh 
14817e01b53dSriastradh 	KASSERT(fw.fw_done);
14827e01b53dSriastradh 	/* no DESTROY_WORK */
14837e01b53dSriastradh 	cv_destroy(&fw.fw_cv);
14847e01b53dSriastradh 	mutex_destroy(&fw.fw_lock);
1485504d2eb4Sriastradh }
1486504d2eb4Sriastradh 
1487504d2eb4Sriastradh /*
1488504d2eb4Sriastradh  * drain_workqueue(wq)
1489504d2eb4Sriastradh  *
1490504d2eb4Sriastradh  *	Repeatedly flush wq until there is no more work.
1491504d2eb4Sriastradh  */
1492504d2eb4Sriastradh void
drain_workqueue(struct workqueue_struct * wq)1493504d2eb4Sriastradh drain_workqueue(struct workqueue_struct *wq)
1494504d2eb4Sriastradh {
1495504d2eb4Sriastradh 	unsigned ntries = 0;
14967e01b53dSriastradh 	bool done;
1497504d2eb4Sriastradh 
14987e01b53dSriastradh 	do {
1499504d2eb4Sriastradh 		if (ntries++ == 10 || (ntries % 100) == 0)
1500504d2eb4Sriastradh 			printf("linux workqueue %s"
1501504d2eb4Sriastradh 			    ": still clogged after %u flushes",
1502504d2eb4Sriastradh 			    wq->wq_name, ntries);
15037e01b53dSriastradh 		flush_workqueue(wq);
15047e01b53dSriastradh 		mutex_enter(&wq->wq_lock);
15057e01b53dSriastradh 		done = wq->wq_current_work == NULL;
15067e01b53dSriastradh 		done &= TAILQ_EMPTY(&wq->wq_queue);
15077e01b53dSriastradh 		done &= TAILQ_EMPTY(&wq->wq_dqueue);
15089ee469e1Sriastradh 		mutex_exit(&wq->wq_lock);
15097e01b53dSriastradh 	} while (!done);
15109ee469e1Sriastradh }
15119ee469e1Sriastradh 
151231bca411Sriastradh /*
151331bca411Sriastradh  * flush_work(work)
151431bca411Sriastradh  *
151531bca411Sriastradh  *	If work is queued or currently executing, wait for it to
151631bca411Sriastradh  *	complete.
15172ba00858Sriastradh  *
15182ba00858Sriastradh  *	Return true if we waited to flush it, false if it was already
15192ba00858Sriastradh  *	idle.
152031bca411Sriastradh  */
15212ba00858Sriastradh bool
flush_work(struct work_struct * work)15229ee469e1Sriastradh flush_work(struct work_struct *work)
15239ee469e1Sriastradh {
1524ac596c7fSriastradh 	struct workqueue_struct *wq;
15259ee469e1Sriastradh 
1526ac596c7fSriastradh 	/* If there's no workqueue, nothing to flush.  */
1527e27b3435Sriastradh 	if ((wq = work_queue(work)) == NULL)
15282ba00858Sriastradh 		return false;
15299ee469e1Sriastradh 
15309ee469e1Sriastradh 	flush_workqueue(wq);
15312ba00858Sriastradh 	return true;
15329ee469e1Sriastradh }
15339ee469e1Sriastradh 
153431bca411Sriastradh /*
153531bca411Sriastradh  * flush_delayed_work(dw)
153631bca411Sriastradh  *
1537a1c732f3Sriastradh  *	If dw is scheduled to run after a delay, queue it immediately
1538a1c732f3Sriastradh  *	instead.  Then, if dw is queued or currently executing, wait
1539a1c732f3Sriastradh  *	for it to complete.
154031bca411Sriastradh  */
15412ba00858Sriastradh bool
flush_delayed_work(struct delayed_work * dw)154294359bcaSriastradh flush_delayed_work(struct delayed_work *dw)
154394359bcaSriastradh {
1544ac596c7fSriastradh 	struct workqueue_struct *wq;
15452ba00858Sriastradh 	bool waited = false;
154694359bcaSriastradh 
1547ac596c7fSriastradh 	/* If there's no workqueue, nothing to flush.  */
1548e27b3435Sriastradh 	if ((wq = work_queue(&dw->work)) == NULL)
15492ba00858Sriastradh 		return false;
155094359bcaSriastradh 
155162976e36Sskrll 	mutex_enter(&wq->wq_lock);
1552e27b3435Sriastradh 	if (__predict_false(work_queue(&dw->work) != wq)) {
1553a1c732f3Sriastradh 		/*
1554a1c732f3Sriastradh 		 * Moved off the queue already (and possibly to another
1555a1c732f3Sriastradh 		 * queue, though that would be ill-advised), so it must
1556a1c732f3Sriastradh 		 * have completed, and we have nothing more to do.
1557a1c732f3Sriastradh 		 */
15582ba00858Sriastradh 		waited = false;
1559a1c732f3Sriastradh 	} else {
156068107256Sriastradh 		switch (dw->dw_state) {
15619ee469e1Sriastradh 		case DELAYED_WORK_IDLE:
156268107256Sriastradh 			/*
156368107256Sriastradh 			 * It has a workqueue assigned and the callout
156468107256Sriastradh 			 * is idle, so it must be in progress or on the
1565a1c732f3Sriastradh 			 * queue.  In that case, we'll wait for it to
15661fca7189Sriastradh 			 * complete.
15671fca7189Sriastradh 			 */
1568a1c732f3Sriastradh 			break;
1569a1c732f3Sriastradh 		case DELAYED_WORK_SCHEDULED:
1570a1c732f3Sriastradh 		case DELAYED_WORK_RESCHEDULED:
1571a1c732f3Sriastradh 		case DELAYED_WORK_CANCELLED:
1572a1c732f3Sriastradh 			/*
1573a1c732f3Sriastradh 			 * The callout is scheduled, and may have even
1574a1c732f3Sriastradh 			 * started.  Mark it as scheduled so that if
1575a1c732f3Sriastradh 			 * the callout has fired it will queue the work
1576a1c732f3Sriastradh 			 * itself.  Try to stop the callout -- if we
1577a1c732f3Sriastradh 			 * can, queue the work now; if we can't, wait
1578a1c732f3Sriastradh 			 * for the callout to complete, which entails
1579a1c732f3Sriastradh 			 * queueing it.
1580a1c732f3Sriastradh 			 */
1581a1c732f3Sriastradh 			dw->dw_state = DELAYED_WORK_SCHEDULED;
1582a1c732f3Sriastradh 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1583a1c732f3Sriastradh 				/*
1584a1c732f3Sriastradh 				 * We stopped it before it ran.  No
1585a1c732f3Sriastradh 				 * state change in the interim is
1586a1c732f3Sriastradh 				 * possible.  Destroy the callout and
1587a1c732f3Sriastradh 				 * queue it ourselves.
1588a1c732f3Sriastradh 				 */
1589a1c732f3Sriastradh 				KASSERT(dw->dw_state ==
1590a1c732f3Sriastradh 				    DELAYED_WORK_SCHEDULED);
1591a1c732f3Sriastradh 				dw_callout_destroy(wq, dw);
1592e27b3435Sriastradh 				TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1593a1c732f3Sriastradh 				    work_entry);
1594a1c732f3Sriastradh 				cv_broadcast(&wq->wq_cv);
1595ccfee5f7Sriastradh 				SDT_PROBE2(sdt, linux, work, queue,
1596ccfee5f7Sriastradh 				    &dw->work, wq);
1597a1c732f3Sriastradh 			}
15981fca7189Sriastradh 			break;
15999ee469e1Sriastradh 		default:
1600a1c732f3Sriastradh 			panic("invalid delayed work state: %d", dw->dw_state);
16019ee469e1Sriastradh 		}
1602a1c732f3Sriastradh 		/*
1603a1c732f3Sriastradh 		 * Waiting for the whole queue to flush is overkill,
1604a1c732f3Sriastradh 		 * but doesn't hurt.
1605a1c732f3Sriastradh 		 */
16067e01b53dSriastradh 		mutex_exit(&wq->wq_lock);
16077e01b53dSriastradh 		flush_workqueue(wq);
16087e01b53dSriastradh 		mutex_enter(&wq->wq_lock);
16092ba00858Sriastradh 		waited = true;
16109ee469e1Sriastradh 	}
161162976e36Sskrll 	mutex_exit(&wq->wq_lock);
16122ba00858Sriastradh 
16132ba00858Sriastradh 	return waited;
161462976e36Sskrll }
1615f7bdb0aaSriastradh 
1616f7bdb0aaSriastradh /*
1617f7bdb0aaSriastradh  * delayed_work_pending(dw)
1618f7bdb0aaSriastradh  *
1619f7bdb0aaSriastradh  *	True if dw is currently scheduled to execute, false if not.
1620f7bdb0aaSriastradh  */
1621f7bdb0aaSriastradh bool
delayed_work_pending(const struct delayed_work * dw)162223727e60Sriastradh delayed_work_pending(const struct delayed_work *dw)
1623f7bdb0aaSriastradh {
1624f7bdb0aaSriastradh 
1625f7bdb0aaSriastradh 	return work_pending(&dw->work);
1626f7bdb0aaSriastradh }
1627888678acSriastradh 
1628888678acSriastradh /*
1629888678acSriastradh  * INIT_RCU_WORK(rw, fn)
1630888678acSriastradh  *
1631888678acSriastradh  *	Initialize rw for use with a workqueue to call fn in a worker
1632888678acSriastradh  *	thread after an RCU grace period.  There is no corresponding
1633888678acSriastradh  *	destruction operation.
1634888678acSriastradh  */
1635888678acSriastradh void
INIT_RCU_WORK(struct rcu_work * rw,void (* fn)(struct work_struct *))1636888678acSriastradh INIT_RCU_WORK(struct rcu_work *rw, void (*fn)(struct work_struct *))
1637888678acSriastradh {
1638888678acSriastradh 
1639888678acSriastradh 	INIT_WORK(&rw->work, fn);
1640888678acSriastradh }
1641888678acSriastradh 
1642888678acSriastradh static void
queue_rcu_work_cb(struct rcu_head * r)1643888678acSriastradh queue_rcu_work_cb(struct rcu_head *r)
1644888678acSriastradh {
1645888678acSriastradh 	struct rcu_work *rw = container_of(r, struct rcu_work, rw_rcu);
1646888678acSriastradh 	struct workqueue_struct *wq = work_queue(&rw->work);
1647888678acSriastradh 
1648888678acSriastradh 	mutex_enter(&wq->wq_lock);
1649888678acSriastradh 	KASSERT(work_pending(&rw->work));
1650888678acSriastradh 	KASSERT(work_queue(&rw->work) == wq);
1651888678acSriastradh 	destroy_rcu_head(&rw->rw_rcu);
1652888678acSriastradh 	TAILQ_REMOVE(&wq->wq_rcu, &rw->work, work_entry);
1653888678acSriastradh 	TAILQ_INSERT_TAIL(&wq->wq_queue, &rw->work, work_entry);
1654888678acSriastradh 	cv_broadcast(&wq->wq_cv);
1655888678acSriastradh 	SDT_PROBE2(sdt, linux, work, queue,  &rw->work, wq);
1656888678acSriastradh 	mutex_exit(&wq->wq_lock);
1657888678acSriastradh }
1658888678acSriastradh 
1659888678acSriastradh /*
1660888678acSriastradh  * queue_rcu_work(wq, rw)
1661888678acSriastradh  *
1662888678acSriastradh  *	Schedule rw to run on wq after an RCU grace period.
1663888678acSriastradh  */
1664888678acSriastradh void
queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rw)1665888678acSriastradh queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rw)
1666888678acSriastradh {
1667888678acSriastradh 
1668888678acSriastradh 	mutex_enter(&wq->wq_lock);
1669888678acSriastradh 	if (acquire_work(&rw->work, wq)) {
1670888678acSriastradh 		init_rcu_head(&rw->rw_rcu);
1671888678acSriastradh 		SDT_PROBE2(sdt, linux, work, rcu,  rw, wq);
1672888678acSriastradh 		TAILQ_INSERT_TAIL(&wq->wq_rcu, &rw->work, work_entry);
1673888678acSriastradh 		call_rcu(&rw->rw_rcu, &queue_rcu_work_cb);
1674888678acSriastradh 	}
1675888678acSriastradh 	mutex_exit(&wq->wq_lock);
1676888678acSriastradh }
1677