xref: /netbsd-src/sys/external/bsd/common/linux/linux_work.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: linux_work.c,v 1.44 2019/03/19 08:17:46 ryo Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.44 2019/03/19 08:17:46 ryo Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #ifndef _MODULE
45 #include <sys/once.h>
46 #endif
47 #include <sys/queue.h>
48 #include <sys/sdt.h>
49 
50 #include <linux/workqueue.h>
51 
52 TAILQ_HEAD(work_head, work_struct);
53 TAILQ_HEAD(dwork_head, delayed_work);
54 
55 struct workqueue_struct {
56 	kmutex_t		wq_lock;
57 	kcondvar_t		wq_cv;
58 	struct dwork_head	wq_delayed; /* delayed work scheduled */
59 	struct work_head	wq_queue;   /* work to run */
60 	struct work_head	wq_dqueue;  /* delayed work to run now */
61 	struct work_struct	*wq_current_work;
62 	int			wq_flags;
63 	bool			wq_dying;
64 	uint64_t		wq_gen;
65 	struct lwp		*wq_lwp;
66 };
67 
68 static void __dead	linux_workqueue_thread(void *);
69 static void		linux_workqueue_timeout(void *);
70 static bool		work_claimed(struct work_struct *,
71 			    struct workqueue_struct *);
72 static struct workqueue_struct *
73 			work_queue(struct work_struct *);
74 static bool		acquire_work(struct work_struct *,
75 			    struct workqueue_struct *);
76 static void		release_work(struct work_struct *,
77 			    struct workqueue_struct *);
78 static void		wait_for_current_work(struct work_struct *,
79 			    struct workqueue_struct *);
80 static void		dw_callout_init(struct workqueue_struct *,
81 			    struct delayed_work *);
82 static void		dw_callout_destroy(struct workqueue_struct *,
83 			    struct delayed_work *);
84 static void		cancel_delayed_work_done(struct workqueue_struct *,
85 			    struct delayed_work *);
86 
87 SDT_PROBE_DEFINE2(sdt, linux, work, acquire,
88     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
89 SDT_PROBE_DEFINE2(sdt, linux, work, release,
90     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
91 SDT_PROBE_DEFINE2(sdt, linux, work, queue,
92     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
93 SDT_PROBE_DEFINE2(sdt, linux, work, cancel,
94     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
95 SDT_PROBE_DEFINE3(sdt, linux, work, schedule,
96     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/,
97     "unsigned long"/*ticks*/);
98 SDT_PROBE_DEFINE2(sdt, linux, work, timer,
99     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
100 SDT_PROBE_DEFINE2(sdt, linux, work, wait__start,
101     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
102 SDT_PROBE_DEFINE2(sdt, linux, work, wait__done,
103     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
104 SDT_PROBE_DEFINE2(sdt, linux, work, run,
105     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
106 SDT_PROBE_DEFINE2(sdt, linux, work, done,
107     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
108 SDT_PROBE_DEFINE1(sdt, linux, work, batch__start,
109     "struct workqueue_struct *"/*wq*/);
110 SDT_PROBE_DEFINE1(sdt, linux, work, batch__done,
111     "struct workqueue_struct *"/*wq*/);
112 SDT_PROBE_DEFINE1(sdt, linux, work, flush__start,
113     "struct workqueue_struct *"/*wq*/);
114 SDT_PROBE_DEFINE1(sdt, linux, work, flush__done,
115     "struct workqueue_struct *"/*wq*/);
116 
117 static specificdata_key_t workqueue_key __read_mostly;
118 
119 struct workqueue_struct	*system_wq __read_mostly;
120 struct workqueue_struct	*system_long_wq __read_mostly;
121 struct workqueue_struct	*system_power_efficient_wq __read_mostly;
122 
123 static inline uintptr_t
124 atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t old, uintptr_t new)
125 {
126 
127 	return (uintptr_t)atomic_cas_ptr(p, (void *)old, (void *)new);
128 }
129 
130 /*
131  * linux_workqueue_init()
132  *
133  *	Initialize the Linux workqueue subsystem.  Return 0 on success,
134  *	NetBSD error on failure.
135  */
136 static int
137 linux_workqueue_init0(void)
138 {
139 	int error;
140 
141 	error = lwp_specific_key_create(&workqueue_key, NULL);
142 	if (error)
143 		goto fail0;
144 
145 	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
146 	if (system_wq == NULL) {
147 		error = ENOMEM;
148 		goto fail1;
149 	}
150 
151 	system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
152 	if (system_long_wq == NULL) {
153 		error = ENOMEM;
154 		goto fail2;
155 	}
156 
157 	system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
158 	if (system_long_wq == NULL) {
159 		error = ENOMEM;
160 		goto fail3;
161 	}
162 
163 	return 0;
164 
165 fail4: __unused
166 	destroy_workqueue(system_power_efficient_wq);
167 fail3:	destroy_workqueue(system_long_wq);
168 fail2:	destroy_workqueue(system_wq);
169 fail1:	lwp_specific_key_delete(workqueue_key);
170 fail0:	KASSERT(error);
171 	return error;
172 }
173 
174 /*
175  * linux_workqueue_fini()
176  *
177  *	Destroy the Linux workqueue subsystem.  Never fails.
178  */
179 static void
180 linux_workqueue_fini0(void)
181 {
182 
183 	destroy_workqueue(system_power_efficient_wq);
184 	destroy_workqueue(system_long_wq);
185 	destroy_workqueue(system_wq);
186 	lwp_specific_key_delete(workqueue_key);
187 }
188 
189 #ifndef _MODULE
190 static ONCE_DECL(linux_workqueue_init_once);
191 #endif
192 
193 int
194 linux_workqueue_init(void)
195 {
196 #ifdef _MODULE
197 	return linux_workqueue_init0();
198 #else
199 	return INIT_ONCE(&linux_workqueue_init_once, &linux_workqueue_init0);
200 #endif
201 }
202 
203 void
204 linux_workqueue_fini(void)
205 {
206 #ifdef _MODULE
207 	return linux_workqueue_fini0();
208 #else
209 	return FINI_ONCE(&linux_workqueue_init_once, &linux_workqueue_fini0);
210 #endif
211 }
212 
213 /*
214  * Workqueues
215  */
216 
217 /*
218  * alloc_ordered_workqueue(name, flags)
219  *
220  *	Create a workqueue of the given name.  No flags are currently
221  *	defined.  Return NULL on failure, pointer to struct
222  *	workqueue_struct object on success.
223  */
224 struct workqueue_struct *
225 alloc_ordered_workqueue(const char *name, int flags)
226 {
227 	struct workqueue_struct *wq;
228 	int error;
229 
230 	KASSERT(flags == 0);
231 
232 	wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
233 
234 	mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
235 	cv_init(&wq->wq_cv, name);
236 	TAILQ_INIT(&wq->wq_delayed);
237 	TAILQ_INIT(&wq->wq_queue);
238 	TAILQ_INIT(&wq->wq_dqueue);
239 	wq->wq_current_work = NULL;
240 	wq->wq_flags = 0;
241 	wq->wq_dying = false;
242 	wq->wq_gen = 0;
243 	wq->wq_lwp = NULL;
244 
245 	error = kthread_create(PRI_NONE,
246 	    KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
247 	    &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
248 	if (error)
249 		goto fail0;
250 
251 	return wq;
252 
253 fail0:	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
254 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
255 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
256 	cv_destroy(&wq->wq_cv);
257 	mutex_destroy(&wq->wq_lock);
258 	kmem_free(wq, sizeof(*wq));
259 	return NULL;
260 }
261 
262 /*
263  * destroy_workqueue(wq)
264  *
265  *	Destroy a workqueue created with wq.  Cancel any pending
266  *	delayed work.  Wait for all queued work to complete.
267  *
268  *	May sleep.
269  */
270 void
271 destroy_workqueue(struct workqueue_struct *wq)
272 {
273 
274 	/*
275 	 * Cancel all delayed work.  We do this first because any
276 	 * delayed work that that has already timed out, which we can't
277 	 * cancel, may have queued new work.
278 	 */
279 	mutex_enter(&wq->wq_lock);
280 	while (!TAILQ_EMPTY(&wq->wq_delayed)) {
281 		struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
282 
283 		KASSERT(work_queue(&dw->work) == wq);
284 		KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
285 			dw->dw_state == DELAYED_WORK_RESCHEDULED ||
286 			dw->dw_state == DELAYED_WORK_CANCELLED),
287 		    "delayed work %p in bad state: %d",
288 		    dw, dw->dw_state);
289 
290 		/*
291 		 * Mark it cancelled and try to stop the callout before
292 		 * it starts.
293 		 *
294 		 * If it's too late and the callout has already begun
295 		 * to execute, then it will notice that we asked to
296 		 * cancel it and remove itself from the queue before
297 		 * returning.
298 		 *
299 		 * If we stopped the callout before it started,
300 		 * however, then we can safely destroy the callout and
301 		 * dissociate it from the workqueue ourselves.
302 		 */
303 		SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
304 		dw->dw_state = DELAYED_WORK_CANCELLED;
305 		if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
306 			cancel_delayed_work_done(wq, dw);
307 	}
308 	mutex_exit(&wq->wq_lock);
309 
310 	/*
311 	 * At this point, no new work can be put on the queue.
312 	 */
313 
314 	/* Tell the thread to exit.  */
315 	mutex_enter(&wq->wq_lock);
316 	wq->wq_dying = true;
317 	cv_broadcast(&wq->wq_cv);
318 	mutex_exit(&wq->wq_lock);
319 
320 	/* Wait for it to exit.  */
321 	(void)kthread_join(wq->wq_lwp);
322 
323 	KASSERT(wq->wq_dying);
324 	KASSERT(wq->wq_flags == 0);
325 	KASSERT(wq->wq_current_work == NULL);
326 	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
327 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
328 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
329 	cv_destroy(&wq->wq_cv);
330 	mutex_destroy(&wq->wq_lock);
331 
332 	kmem_free(wq, sizeof(*wq));
333 }
334 
335 /*
336  * Work thread and callout
337  */
338 
339 /*
340  * linux_workqueue_thread(cookie)
341  *
342  *	Main function for a workqueue's worker thread.  Waits until
343  *	there is work queued, grabs a batch of work off the queue,
344  *	executes it all, bumps the generation number, and repeats,
345  *	until dying.
346  */
347 static void __dead
348 linux_workqueue_thread(void *cookie)
349 {
350 	struct workqueue_struct *const wq = cookie;
351 	struct work_head queue, dqueue;
352 	struct work_head *const q[2] = { &queue, &dqueue };
353 	unsigned i;
354 
355 	lwp_setspecific(workqueue_key, wq);
356 
357 	mutex_enter(&wq->wq_lock);
358 	for (;;) {
359 		/*
360 		 * Wait until there's activity.  If there's no work and
361 		 * we're dying, stop here.
362 		 */
363 		if (TAILQ_EMPTY(&wq->wq_queue) &&
364 		    TAILQ_EMPTY(&wq->wq_dqueue)) {
365 			if (wq->wq_dying)
366 				break;
367 			cv_wait(&wq->wq_cv, &wq->wq_lock);
368 			continue;
369 		}
370 
371 		/* Grab a batch of work off the queue.  */
372 		SDT_PROBE1(sdt, linux, work, batch__start,  wq);
373 		TAILQ_INIT(&queue);
374 		TAILQ_INIT(&dqueue);
375 		TAILQ_CONCAT(&queue, &wq->wq_queue, work_entry);
376 		TAILQ_CONCAT(&dqueue, &wq->wq_dqueue, work_entry);
377 
378 		/* Process each work item in the batch.  */
379 		for (i = 0; i < 2; i++) {
380 			while (!TAILQ_EMPTY(q[i])) {
381 				struct work_struct *work = TAILQ_FIRST(q[i]);
382 				void (*func)(struct work_struct *);
383 
384 				KASSERT(work_queue(work) == wq);
385 				KASSERT(work_claimed(work, wq));
386 				KASSERTMSG((q[i] != &dqueue ||
387 					container_of(work, struct delayed_work,
388 					    work)->dw_state ==
389 					DELAYED_WORK_IDLE),
390 				    "delayed work %p queued and scheduled",
391 				    work);
392 
393 				TAILQ_REMOVE(q[i], work, work_entry);
394 				KASSERT(wq->wq_current_work == NULL);
395 				wq->wq_current_work = work;
396 				func = work->func;
397 				release_work(work, wq);
398 				/* Can't dereference work after this point.  */
399 
400 				mutex_exit(&wq->wq_lock);
401 				SDT_PROBE2(sdt, linux, work, run,  work, wq);
402 				(*func)(work);
403 				SDT_PROBE2(sdt, linux, work, done,  work, wq);
404 				mutex_enter(&wq->wq_lock);
405 
406 				KASSERT(wq->wq_current_work == work);
407 				wq->wq_current_work = NULL;
408 				cv_broadcast(&wq->wq_cv);
409 			}
410 		}
411 
412 		/* Notify flush that we've completed a batch of work.  */
413 		wq->wq_gen++;
414 		cv_broadcast(&wq->wq_cv);
415 		SDT_PROBE1(sdt, linux, work, batch__done,  wq);
416 	}
417 	mutex_exit(&wq->wq_lock);
418 
419 	kthread_exit(0);
420 }
421 
422 /*
423  * linux_workqueue_timeout(cookie)
424  *
425  *	Delayed work timeout callback.
426  *
427  *	- If scheduled, queue it.
428  *	- If rescheduled, callout_schedule ourselves again.
429  *	- If cancelled, destroy the callout and release the work from
430  *        the workqueue.
431  */
432 static void
433 linux_workqueue_timeout(void *cookie)
434 {
435 	struct delayed_work *const dw = cookie;
436 	struct workqueue_struct *const wq = work_queue(&dw->work);
437 
438 	KASSERTMSG(wq != NULL,
439 	    "delayed work %p state %d resched %d",
440 	    dw, dw->dw_state, dw->dw_resched);
441 
442 	SDT_PROBE2(sdt, linux, work, timer,  dw, wq);
443 
444 	mutex_enter(&wq->wq_lock);
445 	KASSERT(work_queue(&dw->work) == wq);
446 	switch (dw->dw_state) {
447 	case DELAYED_WORK_IDLE:
448 		panic("delayed work callout uninitialized: %p", dw);
449 	case DELAYED_WORK_SCHEDULED:
450 		dw_callout_destroy(wq, dw);
451 		TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
452 		cv_broadcast(&wq->wq_cv);
453 		SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
454 		break;
455 	case DELAYED_WORK_RESCHEDULED:
456 		KASSERT(dw->dw_resched >= 0);
457 		callout_schedule(&dw->dw_callout, dw->dw_resched);
458 		dw->dw_state = DELAYED_WORK_SCHEDULED;
459 		dw->dw_resched = -1;
460 		break;
461 	case DELAYED_WORK_CANCELLED:
462 		cancel_delayed_work_done(wq, dw);
463 		/* Can't dereference dw after this point.  */
464 		goto out;
465 	default:
466 		panic("delayed work callout in bad state: %p", dw);
467 	}
468 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
469 	    dw->dw_state == DELAYED_WORK_SCHEDULED);
470 out:	mutex_exit(&wq->wq_lock);
471 }
472 
473 /*
474  * current_work()
475  *
476  *	If in a workqueue worker thread, return the work it is
477  *	currently executing.  Otherwise return NULL.
478  */
479 struct work_struct *
480 current_work(void)
481 {
482 	struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
483 
484 	/* If we're not a workqueue thread, then there's no work.  */
485 	if (wq == NULL)
486 		return NULL;
487 
488 	/*
489 	 * Otherwise, this should be possible only while work is in
490 	 * progress.  Return the current work item.
491 	 */
492 	KASSERT(wq->wq_current_work != NULL);
493 	return wq->wq_current_work;
494 }
495 
496 /*
497  * Work
498  */
499 
500 /*
501  * INIT_WORK(work, fn)
502  *
503  *	Initialize work for use with a workqueue to call fn in a worker
504  *	thread.  There is no corresponding destruction operation.
505  */
506 void
507 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
508 {
509 
510 	work->work_owner = 0;
511 	work->func = fn;
512 }
513 
514 /*
515  * work_claimed(work, wq)
516  *
517  *	True if work is currently claimed by a workqueue, meaning it is
518  *	either on the queue or scheduled in a callout.  The workqueue
519  *	must be wq, and caller must hold wq's lock.
520  */
521 static bool
522 work_claimed(struct work_struct *work, struct workqueue_struct *wq)
523 {
524 
525 	KASSERT(work_queue(work) == wq);
526 	KASSERT(mutex_owned(&wq->wq_lock));
527 
528 	return work->work_owner & 1;
529 }
530 
531 /*
532  * work_queue(work)
533  *
534  *	Return the last queue that work was queued on, or NULL if it
535  *	was never queued.
536  */
537 static struct workqueue_struct *
538 work_queue(struct work_struct *work)
539 {
540 
541 	return (struct workqueue_struct *)(work->work_owner & ~(uintptr_t)1);
542 }
543 
544 /*
545  * acquire_work(work, wq)
546  *
547  *	Try to claim work for wq.  If work is already claimed, it must
548  *	be claimed by wq; return false.  If work is not already
549  *	claimed, claim it, issue a memory barrier to match any prior
550  *	release_work, and return true.
551  *
552  *	Caller must hold wq's lock.
553  */
554 static bool
555 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
556 {
557 	uintptr_t owner0, owner;
558 
559 	KASSERT(mutex_owned(&wq->wq_lock));
560 	KASSERT(((uintptr_t)wq & 1) == 0);
561 
562 	owner = (uintptr_t)wq | 1;
563 	do {
564 		owner0 = work->work_owner;
565 		if (owner0 & 1) {
566 			KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq);
567 			return false;
568 		}
569 		KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq);
570 	} while (atomic_cas_uintptr(&work->work_owner, owner0, owner) !=
571 	    owner0);
572 
573 	KASSERT(work_queue(work) == wq);
574 	membar_enter();
575 	SDT_PROBE2(sdt, linux, work, acquire,  work, wq);
576 	return true;
577 }
578 
579 /*
580  * release_work(work, wq)
581  *
582  *	Issue a memory barrier to match any subsequent acquire_work and
583  *	dissociate work from wq.
584  *
585  *	Caller must hold wq's lock and work must be associated with wq.
586  */
587 static void
588 release_work(struct work_struct *work, struct workqueue_struct *wq)
589 {
590 
591 	KASSERT(work_queue(work) == wq);
592 	KASSERT(mutex_owned(&wq->wq_lock));
593 
594 	SDT_PROBE2(sdt, linux, work, release,  work, wq);
595 	membar_exit();
596 
597 	/*
598 	 * Non-interlocked r/m/w is safe here because nobody else can
599 	 * write to this while the claimed bit is setand the workqueue
600 	 * lock is held.
601 	 */
602 	work->work_owner &= ~(uintptr_t)1;
603 }
604 
605 /*
606  * schedule_work(work)
607  *
608  *	If work is not already queued on system_wq, queue it to be run
609  *	by system_wq's worker thread when it next can.  True if it was
610  *	newly queued, false if it was already queued.  If the work was
611  *	already running, queue it to run again.
612  *
613  *	Caller must ensure work is not queued to run on a different
614  *	workqueue.
615  */
616 bool
617 schedule_work(struct work_struct *work)
618 {
619 
620 	return queue_work(system_wq, work);
621 }
622 
623 /*
624  * queue_work(wq, work)
625  *
626  *	If work is not already queued on wq, queue it to be run by wq's
627  *	worker thread when it next can.  True if it was newly queued,
628  *	false if it was already queued.  If the work was already
629  *	running, queue it to run again.
630  *
631  *	Caller must ensure work is not queued to run on a different
632  *	workqueue.
633  */
634 bool
635 queue_work(struct workqueue_struct *wq, struct work_struct *work)
636 {
637 	bool newly_queued;
638 
639 	KASSERT(wq != NULL);
640 
641 	mutex_enter(&wq->wq_lock);
642 	if (__predict_true(acquire_work(work, wq))) {
643 		/*
644 		 * It wasn't on any workqueue at all.  Put it on this
645 		 * one, and signal the worker thread that there is work
646 		 * to do.
647 		 */
648 		TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
649 		cv_broadcast(&wq->wq_cv);
650 		SDT_PROBE2(sdt, linux, work, queue,  work, wq);
651 		newly_queued = true;
652 	} else {
653 		/*
654 		 * It was already on this workqueue.  Nothing to do
655 		 * since it is already queued.
656 		 */
657 		newly_queued = false;
658 	}
659 	mutex_exit(&wq->wq_lock);
660 
661 	return newly_queued;
662 }
663 
664 /*
665  * cancel_work(work)
666  *
667  *	If work was queued, remove it from the queue and return true.
668  *	If work was not queued, return false.  Work may still be
669  *	running when this returns.
670  */
671 bool
672 cancel_work(struct work_struct *work)
673 {
674 	struct workqueue_struct *wq;
675 	bool cancelled_p = false;
676 
677 	/* If there's no workqueue, nothing to cancel.   */
678 	if ((wq = work_queue(work)) == NULL)
679 		goto out;
680 
681 	mutex_enter(&wq->wq_lock);
682 	if (__predict_false(work_queue(work) != wq)) {
683 		/*
684 		 * It has finished execution or been cancelled by
685 		 * another thread, and has been moved off the
686 		 * workqueue, so it's too to cancel.
687 		 */
688 		cancelled_p = false;
689 	} else {
690 		/* Check whether it's on the queue.  */
691 		if (work_claimed(work, wq)) {
692 			/*
693 			 * It is still on the queue.  Take it off the
694 			 * queue and report successful cancellation.
695 			 */
696 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
697 			SDT_PROBE2(sdt, linux, work, cancel,  work, wq);
698 			release_work(work, wq);
699 			/* Can't dereference work after this point.  */
700 			cancelled_p = true;
701 		} else {
702 			/* Not on the queue.  Couldn't cancel it.  */
703 			cancelled_p = false;
704 		}
705 	}
706 	mutex_exit(&wq->wq_lock);
707 
708 out:	return cancelled_p;
709 }
710 
711 /*
712  * cancel_work_sync(work)
713  *
714  *	If work was queued, remove it from the queue and return true.
715  *	If work was not queued, return false.  Either way, if work is
716  *	currently running, wait for it to complete.
717  *
718  *	May sleep.
719  */
720 bool
721 cancel_work_sync(struct work_struct *work)
722 {
723 	struct workqueue_struct *wq;
724 	bool cancelled_p = false;
725 
726 	/* If there's no workqueue, nothing to cancel.   */
727 	if ((wq = work_queue(work)) == NULL)
728 		goto out;
729 
730 	mutex_enter(&wq->wq_lock);
731 	if (__predict_false(work_queue(work) != wq)) {
732 		/*
733 		 * It has finished execution or been cancelled by
734 		 * another thread, and has been moved off the
735 		 * workqueue, so it's too late to cancel.
736 		 */
737 		cancelled_p = false;
738 	} else {
739 		/* Check whether it's on the queue.  */
740 		if (work_claimed(work, wq)) {
741 			/*
742 			 * It is still on the queue.  Take it off the
743 			 * queue and report successful cancellation.
744 			 */
745 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
746 			SDT_PROBE2(sdt, linux, work, cancel,  work, wq);
747 			release_work(work, wq);
748 			/* Can't dereference work after this point.  */
749 			cancelled_p = true;
750 		} else {
751 			/* Not on the queue.  Couldn't cancel it.  */
752 			cancelled_p = false;
753 		}
754 		/* If it's still running, wait for it to complete.  */
755 		if (wq->wq_current_work == work)
756 			wait_for_current_work(work, wq);
757 	}
758 	mutex_exit(&wq->wq_lock);
759 
760 out:	return cancelled_p;
761 }
762 
763 /*
764  * wait_for_current_work(work, wq)
765  *
766  *	wq must be currently executing work.  Wait for it to finish.
767  *
768  *	Does not dereference work.
769  */
770 static void
771 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
772 {
773 	uint64_t gen;
774 
775 	KASSERT(mutex_owned(&wq->wq_lock));
776 	KASSERT(wq->wq_current_work == work);
777 
778 	/* Wait only one generation in case it gets requeued quickly.  */
779 	SDT_PROBE2(sdt, linux, work, wait__start,  work, wq);
780 	gen = wq->wq_gen;
781 	do {
782 		cv_wait(&wq->wq_cv, &wq->wq_lock);
783 	} while (wq->wq_current_work == work && wq->wq_gen == gen);
784 	SDT_PROBE2(sdt, linux, work, wait__done,  work, wq);
785 }
786 
787 /*
788  * Delayed work
789  */
790 
791 /*
792  * INIT_DELAYED_WORK(dw, fn)
793  *
794  *	Initialize dw for use with a workqueue to call fn in a worker
795  *	thread after a delay.  There is no corresponding destruction
796  *	operation.
797  */
798 void
799 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
800 {
801 
802 	INIT_WORK(&dw->work, fn);
803 	dw->dw_state = DELAYED_WORK_IDLE;
804 	dw->dw_resched = -1;
805 
806 	/*
807 	 * Defer callout_init until we are going to schedule the
808 	 * callout, which can then callout_destroy it, because
809 	 * otherwise since there's no DESTROY_DELAYED_WORK or anything
810 	 * we have no opportunity to call callout_destroy.
811 	 */
812 }
813 
814 /*
815  * schedule_delayed_work(dw, ticks)
816  *
817  *	If it is not currently scheduled, schedule dw to run after
818  *	ticks on system_wq.  If currently executing and not already
819  *	rescheduled, reschedule it.  True if it was newly scheduled,
820  *	false if it was already scheduled.
821  *
822  *	If ticks == 0, queue it to run as soon as the worker can,
823  *	without waiting for the next callout tick to run.
824  */
825 bool
826 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
827 {
828 
829 	return queue_delayed_work(system_wq, dw, ticks);
830 }
831 
832 /*
833  * dw_callout_init(wq, dw)
834  *
835  *	Initialize the callout of dw and transition to
836  *	DELAYED_WORK_SCHEDULED.  Caller must use callout_schedule.
837  */
838 static void
839 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
840 {
841 
842 	KASSERT(mutex_owned(&wq->wq_lock));
843 	KASSERT(work_queue(&dw->work) == wq);
844 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
845 
846 	callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
847 	callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw);
848 	TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
849 	dw->dw_state = DELAYED_WORK_SCHEDULED;
850 }
851 
852 /*
853  * dw_callout_destroy(wq, dw)
854  *
855  *	Destroy the callout of dw and transition to DELAYED_WORK_IDLE.
856  */
857 static void
858 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
859 {
860 
861 	KASSERT(mutex_owned(&wq->wq_lock));
862 	KASSERT(work_queue(&dw->work) == wq);
863 	KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED ||
864 	    dw->dw_state == DELAYED_WORK_RESCHEDULED ||
865 	    dw->dw_state == DELAYED_WORK_CANCELLED);
866 
867 	TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
868 	callout_destroy(&dw->dw_callout);
869 	dw->dw_resched = -1;
870 	dw->dw_state = DELAYED_WORK_IDLE;
871 }
872 
873 /*
874  * cancel_delayed_work_done(wq, dw)
875  *
876  *	Complete cancellation of a delayed work: transition from
877  *	DELAYED_WORK_CANCELLED to DELAYED_WORK_IDLE and off the
878  *	workqueue.  Caller must not dereference dw after this returns.
879  */
880 static void
881 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
882 {
883 
884 	KASSERT(mutex_owned(&wq->wq_lock));
885 	KASSERT(work_queue(&dw->work) == wq);
886 	KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
887 
888 	dw_callout_destroy(wq, dw);
889 	release_work(&dw->work, wq);
890 	/* Can't dereference dw after this point.  */
891 }
892 
893 /*
894  * queue_delayed_work(wq, dw, ticks)
895  *
896  *	If it is not currently scheduled, schedule dw to run after
897  *	ticks on wq.  If currently queued, remove it from the queue
898  *	first.
899  *
900  *	If ticks == 0, queue it to run as soon as the worker can,
901  *	without waiting for the next callout tick to run.
902  */
903 bool
904 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
905     unsigned long ticks)
906 {
907 	bool newly_queued;
908 
909 	mutex_enter(&wq->wq_lock);
910 	if (__predict_true(acquire_work(&dw->work, wq))) {
911 		/*
912 		 * It wasn't on any workqueue at all.  Schedule it to
913 		 * run on this one.
914 		 */
915 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
916 		if (ticks == 0) {
917 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
918 			    work_entry);
919 			cv_broadcast(&wq->wq_cv);
920 			SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
921 		} else {
922 			/*
923 			 * Initialize a callout and schedule to run
924 			 * after a delay.
925 			 */
926 			dw_callout_init(wq, dw);
927 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
928 			SDT_PROBE3(sdt, linux, work, schedule,  dw, wq, ticks);
929 		}
930 		newly_queued = true;
931 	} else {
932 		/* It was already on this workqueue.  */
933 		switch (dw->dw_state) {
934 		case DELAYED_WORK_IDLE:
935 		case DELAYED_WORK_SCHEDULED:
936 		case DELAYED_WORK_RESCHEDULED:
937 			/* On the queue or already scheduled.  Leave it.  */
938 			newly_queued = false;
939 			break;
940 		case DELAYED_WORK_CANCELLED:
941 			/*
942 			 * Scheduled and the callout began, but it was
943 			 * cancelled.  Reschedule it.
944 			 */
945 			if (ticks == 0) {
946 				dw->dw_state = DELAYED_WORK_SCHEDULED;
947 				SDT_PROBE2(sdt, linux, work, queue,
948 				    &dw->work, wq);
949 			} else {
950 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
951 				dw->dw_resched = MIN(INT_MAX, ticks);
952 				SDT_PROBE3(sdt, linux, work, schedule,
953 				    dw, wq, ticks);
954 			}
955 			newly_queued = true;
956 			break;
957 		default:
958 			panic("invalid delayed work state: %d",
959 			    dw->dw_state);
960 		}
961 	}
962 	mutex_exit(&wq->wq_lock);
963 
964 	return newly_queued;
965 }
966 
967 /*
968  * mod_delayed_work(wq, dw, ticks)
969  *
970  *	Schedule dw to run after ticks.  If scheduled or queued,
971  *	reschedule.  If ticks == 0, run without delay.
972  *
973  *	True if it modified the timer of an already scheduled work,
974  *	false if it newly scheduled the work.
975  */
976 bool
977 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
978     unsigned long ticks)
979 {
980 	bool timer_modified;
981 
982 	mutex_enter(&wq->wq_lock);
983 	if (acquire_work(&dw->work, wq)) {
984 		/*
985 		 * It wasn't on any workqueue at all.  Schedule it to
986 		 * run on this one.
987 		 */
988 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
989 		if (ticks == 0) {
990 			/*
991 			 * Run immediately: put it on the queue and
992 			 * signal the worker thread.
993 			 */
994 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
995 			    work_entry);
996 			cv_broadcast(&wq->wq_cv);
997 			SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
998 		} else {
999 			/*
1000 			 * Initialize a callout and schedule to run
1001 			 * after a delay.
1002 			 */
1003 			dw_callout_init(wq, dw);
1004 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
1005 			SDT_PROBE3(sdt, linux, work, schedule,  dw, wq, ticks);
1006 		}
1007 		timer_modified = false;
1008 	} else {
1009 		/* It was already on this workqueue.  */
1010 		switch (dw->dw_state) {
1011 		case DELAYED_WORK_IDLE:
1012 			/* On the queue.  */
1013 			if (ticks == 0) {
1014 				/* Leave it be.  */
1015 				SDT_PROBE2(sdt, linux, work, cancel,
1016 				    &dw->work, wq);
1017 				SDT_PROBE2(sdt, linux, work, queue,
1018 				    &dw->work, wq);
1019 			} else {
1020 				/* Remove from the queue and schedule.  */
1021 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1022 				    work_entry);
1023 				dw_callout_init(wq, dw);
1024 				callout_schedule(&dw->dw_callout,
1025 				    MIN(INT_MAX, ticks));
1026 				SDT_PROBE2(sdt, linux, work, cancel,
1027 				    &dw->work, wq);
1028 				SDT_PROBE3(sdt, linux, work, schedule,
1029 				    dw, wq, ticks);
1030 			}
1031 			timer_modified = true;
1032 			break;
1033 		case DELAYED_WORK_SCHEDULED:
1034 			/*
1035 			 * It is scheduled to run after a delay.  Try
1036 			 * to stop it and reschedule it; if we can't,
1037 			 * either reschedule it or cancel it to put it
1038 			 * on the queue, and inform the callout.
1039 			 */
1040 			if (callout_stop(&dw->dw_callout)) {
1041 				/* Can't stop, callout has begun.  */
1042 				if (ticks == 0) {
1043 					/*
1044 					 * We don't actually need to do
1045 					 * anything.  The callout will
1046 					 * queue it as soon as it gets
1047 					 * the lock.
1048 					 */
1049 					SDT_PROBE2(sdt, linux, work, cancel,
1050 					    &dw->work, wq);
1051 					SDT_PROBE2(sdt, linux, work, queue,
1052 					    &dw->work, wq);
1053 				} else {
1054 					/* Ask the callout to reschedule.  */
1055 					dw->dw_state = DELAYED_WORK_RESCHEDULED;
1056 					dw->dw_resched = MIN(INT_MAX, ticks);
1057 					SDT_PROBE2(sdt, linux, work, cancel,
1058 					    &dw->work, wq);
1059 					SDT_PROBE3(sdt, linux, work, schedule,
1060 					    dw, wq, ticks);
1061 				}
1062 			} else {
1063 				/* We stopped the callout before it began.  */
1064 				if (ticks == 0) {
1065 					/*
1066 					 * Run immediately: destroy the
1067 					 * callout, put it on the
1068 					 * queue, and signal the worker
1069 					 * thread.
1070 					 */
1071 					dw_callout_destroy(wq, dw);
1072 					TAILQ_INSERT_TAIL(&wq->wq_dqueue,
1073 					    &dw->work, work_entry);
1074 					cv_broadcast(&wq->wq_cv);
1075 					SDT_PROBE2(sdt, linux, work, cancel,
1076 					    &dw->work, wq);
1077 					SDT_PROBE2(sdt, linux, work, queue,
1078 					    &dw->work, wq);
1079 				} else {
1080 					/*
1081 					 * Reschedule the callout.  No
1082 					 * state change.
1083 					 */
1084 					callout_schedule(&dw->dw_callout,
1085 					    MIN(INT_MAX, ticks));
1086 					SDT_PROBE2(sdt, linux, work, cancel,
1087 					    &dw->work, wq);
1088 					SDT_PROBE3(sdt, linux, work, schedule,
1089 					    dw, wq, ticks);
1090 				}
1091 			}
1092 			timer_modified = true;
1093 			break;
1094 		case DELAYED_WORK_RESCHEDULED:
1095 			/*
1096 			 * Someone rescheduled it after the callout
1097 			 * started but before the poor thing even had a
1098 			 * chance to acquire the lock.
1099 			 */
1100 			if (ticks == 0) {
1101 				/*
1102 				 * We can just switch back to
1103 				 * DELAYED_WORK_SCHEDULED so that the
1104 				 * callout will queue the work as soon
1105 				 * as it gets the lock.
1106 				 */
1107 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1108 				dw->dw_resched = -1;
1109 				SDT_PROBE2(sdt, linux, work, cancel,
1110 				    &dw->work, wq);
1111 				SDT_PROBE2(sdt, linux, work, queue,
1112 				    &dw->work, wq);
1113 			} else {
1114 				/* Change the rescheduled time.  */
1115 				dw->dw_resched = ticks;
1116 				SDT_PROBE2(sdt, linux, work, cancel,
1117 				    &dw->work, wq);
1118 				SDT_PROBE3(sdt, linux, work, schedule,
1119 				    dw, wq, ticks);
1120 			}
1121 			timer_modified = true;
1122 			break;
1123 		case DELAYED_WORK_CANCELLED:
1124 			/*
1125 			 * Someone cancelled it after the callout
1126 			 * started but before the poor thing even had a
1127 			 * chance to acquire the lock.
1128 			 */
1129 			if (ticks == 0) {
1130 				/*
1131 				 * We can just switch back to
1132 				 * DELAYED_WORK_SCHEDULED so that the
1133 				 * callout will queue the work as soon
1134 				 * as it gets the lock.
1135 				 */
1136 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1137 				SDT_PROBE2(sdt, linux, work, queue,
1138 				    &dw->work, wq);
1139 			} else {
1140 				/* Ask it to reschedule.  */
1141 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
1142 				dw->dw_resched = MIN(INT_MAX, ticks);
1143 				SDT_PROBE3(sdt, linux, work, schedule,
1144 				    dw, wq, ticks);
1145 			}
1146 			timer_modified = false;
1147 			break;
1148 		default:
1149 			panic("invalid delayed work state: %d", dw->dw_state);
1150 		}
1151 	}
1152 	mutex_exit(&wq->wq_lock);
1153 
1154 	return timer_modified;
1155 }
1156 
1157 /*
1158  * cancel_delayed_work(dw)
1159  *
1160  *	If work was scheduled or queued, remove it from the schedule or
1161  *	queue and return true.  If work was not scheduled or queued,
1162  *	return false.  Note that work may already be running; if it
1163  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1164  *	will return false, and either way, cancel_delayed_work will NOT
1165  *	wait for the work to complete.
1166  */
1167 bool
1168 cancel_delayed_work(struct delayed_work *dw)
1169 {
1170 	struct workqueue_struct *wq;
1171 	bool cancelled_p;
1172 
1173 	/* If there's no workqueue, nothing to cancel.   */
1174 	if ((wq = work_queue(&dw->work)) == NULL)
1175 		return false;
1176 
1177 	mutex_enter(&wq->wq_lock);
1178 	if (__predict_false(work_queue(&dw->work) != wq)) {
1179 		cancelled_p = false;
1180 	} else {
1181 		switch (dw->dw_state) {
1182 		case DELAYED_WORK_IDLE:
1183 			/*
1184 			 * It is either on the queue or already running
1185 			 * or both.
1186 			 */
1187 			if (work_claimed(&dw->work, wq)) {
1188 				/* On the queue.  Remove and release.  */
1189 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1190 				    work_entry);
1191 				SDT_PROBE2(sdt, linux, work, cancel,
1192 				    &dw->work, wq);
1193 				release_work(&dw->work, wq);
1194 				/* Can't dereference dw after this point.  */
1195 				cancelled_p = true;
1196 			} else {
1197 				/* Not on the queue, so didn't cancel.  */
1198 				cancelled_p = false;
1199 			}
1200 			break;
1201 		case DELAYED_WORK_SCHEDULED:
1202 			/*
1203 			 * If it is scheduled, mark it cancelled and
1204 			 * try to stop the callout before it starts.
1205 			 *
1206 			 * If it's too late and the callout has already
1207 			 * begun to execute, tough.
1208 			 *
1209 			 * If we stopped the callout before it started,
1210 			 * however, then destroy the callout and
1211 			 * dissociate it from the workqueue ourselves.
1212 			 */
1213 			dw->dw_state = DELAYED_WORK_CANCELLED;
1214 			cancelled_p = true;
1215 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1216 			if (!callout_stop(&dw->dw_callout))
1217 				cancel_delayed_work_done(wq, dw);
1218 			break;
1219 		case DELAYED_WORK_RESCHEDULED:
1220 			/*
1221 			 * If it is being rescheduled, the callout has
1222 			 * already fired.  We must ask it to cancel.
1223 			 */
1224 			dw->dw_state = DELAYED_WORK_CANCELLED;
1225 			dw->dw_resched = -1;
1226 			cancelled_p = true;
1227 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1228 			break;
1229 		case DELAYED_WORK_CANCELLED:
1230 			/*
1231 			 * If it is being cancelled, the callout has
1232 			 * already fired.  There is nothing more for us
1233 			 * to do.  Someone else claims credit for
1234 			 * cancelling it.
1235 			 */
1236 			cancelled_p = false;
1237 			break;
1238 		default:
1239 			panic("invalid delayed work state: %d",
1240 			    dw->dw_state);
1241 		}
1242 	}
1243 	mutex_exit(&wq->wq_lock);
1244 
1245 	return cancelled_p;
1246 }
1247 
1248 /*
1249  * cancel_delayed_work_sync(dw)
1250  *
1251  *	If work was scheduled or queued, remove it from the schedule or
1252  *	queue and return true.  If work was not scheduled or queued,
1253  *	return false.  Note that work may already be running; if it
1254  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1255  *	will return false; either way, wait for it to complete.
1256  */
1257 bool
1258 cancel_delayed_work_sync(struct delayed_work *dw)
1259 {
1260 	struct workqueue_struct *wq;
1261 	bool cancelled_p;
1262 
1263 	/* If there's no workqueue, nothing to cancel.  */
1264 	if ((wq = work_queue(&dw->work)) == NULL)
1265 		return false;
1266 
1267 	mutex_enter(&wq->wq_lock);
1268 	if (__predict_false(work_queue(&dw->work) != wq)) {
1269 		cancelled_p = false;
1270 	} else {
1271 		switch (dw->dw_state) {
1272 		case DELAYED_WORK_IDLE:
1273 			/*
1274 			 * It is either on the queue or already running
1275 			 * or both.
1276 			 */
1277 			if (work_claimed(&dw->work, wq)) {
1278 				/* On the queue.  Remove and release.  */
1279 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1280 				    work_entry);
1281 				SDT_PROBE2(sdt, linux, work, cancel,
1282 				    &dw->work, wq);
1283 				release_work(&dw->work, wq);
1284 				/* Can't dereference dw after this point.  */
1285 				cancelled_p = true;
1286 			} else {
1287 				/* Not on the queue, so didn't cancel. */
1288 				cancelled_p = false;
1289 			}
1290 			/* If it's still running, wait for it to complete.  */
1291 			if (wq->wq_current_work == &dw->work)
1292 				wait_for_current_work(&dw->work, wq);
1293 			break;
1294 		case DELAYED_WORK_SCHEDULED:
1295 			/*
1296 			 * If it is scheduled, mark it cancelled and
1297 			 * try to stop the callout before it starts.
1298 			 *
1299 			 * If it's too late and the callout has already
1300 			 * begun to execute, we must wait for it to
1301 			 * complete.  But we got in soon enough to ask
1302 			 * the callout not to run, so we successfully
1303 			 * cancelled it in that case.
1304 			 *
1305 			 * If we stopped the callout before it started,
1306 			 * then we must destroy the callout and
1307 			 * dissociate it from the workqueue ourselves.
1308 			 */
1309 			dw->dw_state = DELAYED_WORK_CANCELLED;
1310 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1311 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1312 				cancel_delayed_work_done(wq, dw);
1313 			cancelled_p = true;
1314 			break;
1315 		case DELAYED_WORK_RESCHEDULED:
1316 			/*
1317 			 * If it is being rescheduled, the callout has
1318 			 * already fired.  We must ask it to cancel and
1319 			 * wait for it to complete.
1320 			 */
1321 			dw->dw_state = DELAYED_WORK_CANCELLED;
1322 			dw->dw_resched = -1;
1323 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1324 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1325 			cancelled_p = true;
1326 			break;
1327 		case DELAYED_WORK_CANCELLED:
1328 			/*
1329 			 * If it is being cancelled, the callout has
1330 			 * already fired.  We need only wait for it to
1331 			 * complete.  Someone else, however, claims
1332 			 * credit for cancelling it.
1333 			 */
1334 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1335 			cancelled_p = false;
1336 			break;
1337 		default:
1338 			panic("invalid delayed work state: %d",
1339 			    dw->dw_state);
1340 		}
1341 	}
1342 	mutex_exit(&wq->wq_lock);
1343 
1344 	return cancelled_p;
1345 }
1346 
1347 /*
1348  * Flush
1349  */
1350 
1351 /*
1352  * flush_scheduled_work()
1353  *
1354  *	Wait for all work queued on system_wq to complete.  This does
1355  *	not include delayed work.
1356  */
1357 void
1358 flush_scheduled_work(void)
1359 {
1360 
1361 	flush_workqueue(system_wq);
1362 }
1363 
1364 /*
1365  * flush_workqueue_locked(wq)
1366  *
1367  *	Wait for all work queued on wq to complete.  This does not
1368  *	include delayed work.
1369  *
1370  *	Caller must hold wq's lock.
1371  */
1372 static void
1373 flush_workqueue_locked(struct workqueue_struct *wq)
1374 {
1375 	uint64_t gen;
1376 
1377 	KASSERT(mutex_owned(&wq->wq_lock));
1378 
1379 	/* Get the current generation number.  */
1380 	gen = wq->wq_gen;
1381 
1382 	/*
1383 	 * If there's a batch of work in progress, we must wait for the
1384 	 * worker thread to finish that batch.
1385 	 */
1386 	if (wq->wq_current_work != NULL)
1387 		gen++;
1388 
1389 	/*
1390 	 * If there's any work yet to be claimed from the queue by the
1391 	 * worker thread, we must wait for it to finish one more batch
1392 	 * too.
1393 	 */
1394 	if (!TAILQ_EMPTY(&wq->wq_queue) || !TAILQ_EMPTY(&wq->wq_dqueue))
1395 		gen++;
1396 
1397 	/* Wait until the generation number has caught up.  */
1398 	SDT_PROBE1(sdt, linux, work, flush__start,  wq);
1399 	while (wq->wq_gen < gen)
1400 		cv_wait(&wq->wq_cv, &wq->wq_lock);
1401 	SDT_PROBE1(sdt, linux, work, flush__done,  wq);
1402 }
1403 
1404 /*
1405  * flush_workqueue(wq)
1406  *
1407  *	Wait for all work queued on wq to complete.  This does not
1408  *	include delayed work.
1409  */
1410 void
1411 flush_workqueue(struct workqueue_struct *wq)
1412 {
1413 
1414 	mutex_enter(&wq->wq_lock);
1415 	flush_workqueue_locked(wq);
1416 	mutex_exit(&wq->wq_lock);
1417 }
1418 
1419 /*
1420  * flush_work(work)
1421  *
1422  *	If work is queued or currently executing, wait for it to
1423  *	complete.
1424  */
1425 void
1426 flush_work(struct work_struct *work)
1427 {
1428 	struct workqueue_struct *wq;
1429 
1430 	/* If there's no workqueue, nothing to flush.  */
1431 	if ((wq = work_queue(work)) == NULL)
1432 		return;
1433 
1434 	flush_workqueue(wq);
1435 }
1436 
1437 /*
1438  * flush_delayed_work(dw)
1439  *
1440  *	If dw is scheduled to run after a delay, queue it immediately
1441  *	instead.  Then, if dw is queued or currently executing, wait
1442  *	for it to complete.
1443  */
1444 void
1445 flush_delayed_work(struct delayed_work *dw)
1446 {
1447 	struct workqueue_struct *wq;
1448 
1449 	/* If there's no workqueue, nothing to flush.  */
1450 	if ((wq = work_queue(&dw->work)) == NULL)
1451 		return;
1452 
1453 	mutex_enter(&wq->wq_lock);
1454 	if (__predict_false(work_queue(&dw->work) != wq)) {
1455 		/*
1456 		 * Moved off the queue already (and possibly to another
1457 		 * queue, though that would be ill-advised), so it must
1458 		 * have completed, and we have nothing more to do.
1459 		 */
1460 	} else {
1461 		switch (dw->dw_state) {
1462 		case DELAYED_WORK_IDLE:
1463 			/*
1464 			 * It has a workqueue assigned and the callout
1465 			 * is idle, so it must be in progress or on the
1466 			 * queue.  In that case, we'll wait for it to
1467 			 * complete.
1468 			 */
1469 			break;
1470 		case DELAYED_WORK_SCHEDULED:
1471 		case DELAYED_WORK_RESCHEDULED:
1472 		case DELAYED_WORK_CANCELLED:
1473 			/*
1474 			 * The callout is scheduled, and may have even
1475 			 * started.  Mark it as scheduled so that if
1476 			 * the callout has fired it will queue the work
1477 			 * itself.  Try to stop the callout -- if we
1478 			 * can, queue the work now; if we can't, wait
1479 			 * for the callout to complete, which entails
1480 			 * queueing it.
1481 			 */
1482 			dw->dw_state = DELAYED_WORK_SCHEDULED;
1483 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1484 				/*
1485 				 * We stopped it before it ran.  No
1486 				 * state change in the interim is
1487 				 * possible.  Destroy the callout and
1488 				 * queue it ourselves.
1489 				 */
1490 				KASSERT(dw->dw_state ==
1491 				    DELAYED_WORK_SCHEDULED);
1492 				dw_callout_destroy(wq, dw);
1493 				TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1494 				    work_entry);
1495 				cv_broadcast(&wq->wq_cv);
1496 				SDT_PROBE2(sdt, linux, work, queue,
1497 				    &dw->work, wq);
1498 			}
1499 			break;
1500 		default:
1501 			panic("invalid delayed work state: %d", dw->dw_state);
1502 		}
1503 		/*
1504 		 * Waiting for the whole queue to flush is overkill,
1505 		 * but doesn't hurt.
1506 		 */
1507 		flush_workqueue_locked(wq);
1508 	}
1509 	mutex_exit(&wq->wq_lock);
1510 }
1511