xref: /netbsd-src/sys/external/bsd/common/linux/linux_work.c (revision e27b3435685dd073d078fcf9ca590f598c7bed38)
1 /*	$NetBSD: linux_work.c,v 1.39 2018/08/27 15:06:02 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.39 2018/08/27 15:06:02 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 
46 #include <linux/workqueue.h>
47 
48 TAILQ_HEAD(work_head, work_struct);
49 TAILQ_HEAD(dwork_head, delayed_work);
50 
51 struct workqueue_struct {
52 	kmutex_t		wq_lock;
53 	kcondvar_t		wq_cv;
54 	struct dwork_head	wq_delayed; /* delayed work scheduled */
55 	struct work_head	wq_queue;   /* work to run */
56 	struct work_head	wq_dqueue;  /* delayed work to run now */
57 	struct work_struct	*wq_current_work;
58 	int			wq_flags;
59 	bool			wq_dying;
60 	uint64_t		wq_gen;
61 	struct lwp		*wq_lwp;
62 };
63 
64 static void __dead	linux_workqueue_thread(void *);
65 static void		linux_workqueue_timeout(void *);
66 static bool		work_claimed(struct work_struct *,
67 			    struct workqueue_struct *);
68 static struct workqueue_struct *
69 			work_queue(struct work_struct *);
70 static bool		acquire_work(struct work_struct *,
71 			    struct workqueue_struct *);
72 static void		release_work(struct work_struct *,
73 			    struct workqueue_struct *);
74 static void		wait_for_current_work(struct work_struct *,
75 			    struct workqueue_struct *);
76 static void		dw_callout_init(struct workqueue_struct *,
77 			    struct delayed_work *);
78 static void		dw_callout_destroy(struct workqueue_struct *,
79 			    struct delayed_work *);
80 static void		cancel_delayed_work_done(struct workqueue_struct *,
81 			    struct delayed_work *);
82 
83 static specificdata_key_t workqueue_key __read_mostly;
84 
85 struct workqueue_struct	*system_wq __read_mostly;
86 struct workqueue_struct	*system_long_wq __read_mostly;
87 struct workqueue_struct	*system_power_efficient_wq __read_mostly;
88 
89 static inline uintptr_t
90 atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t old, uintptr_t new)
91 {
92 
93 	return (uintptr_t)atomic_cas_ptr(p, (void *)old, (void *)new);
94 }
95 
96 /*
97  * linux_workqueue_init()
98  *
99  *	Initialize the Linux workqueue subsystem.  Return 0 on success,
100  *	NetBSD error on failure.
101  */
102 int
103 linux_workqueue_init(void)
104 {
105 	int error;
106 
107 	error = lwp_specific_key_create(&workqueue_key, NULL);
108 	if (error)
109 		goto fail0;
110 
111 	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
112 	if (system_wq == NULL) {
113 		error = ENOMEM;
114 		goto fail1;
115 	}
116 
117 	system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
118 	if (system_long_wq == NULL) {
119 		error = ENOMEM;
120 		goto fail2;
121 	}
122 
123 	system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
124 	if (system_long_wq == NULL) {
125 		error = ENOMEM;
126 		goto fail3;
127 	}
128 
129 	return 0;
130 
131 fail4: __unused
132 	destroy_workqueue(system_power_efficient_wq);
133 fail3:	destroy_workqueue(system_long_wq);
134 fail2:	destroy_workqueue(system_wq);
135 fail1:	lwp_specific_key_delete(workqueue_key);
136 fail0:	KASSERT(error);
137 	return error;
138 }
139 
140 /*
141  * linux_workqueue_fini()
142  *
143  *	Destroy the Linux workqueue subsystem.  Never fails.
144  */
145 void
146 linux_workqueue_fini(void)
147 {
148 
149 	destroy_workqueue(system_power_efficient_wq);
150 	destroy_workqueue(system_long_wq);
151 	destroy_workqueue(system_wq);
152 	lwp_specific_key_delete(workqueue_key);
153 }
154 
155 /*
156  * Workqueues
157  */
158 
159 /*
160  * alloc_ordered_workqueue(name, flags)
161  *
162  *	Create a workqueue of the given name.  No flags are currently
163  *	defined.  Return NULL on failure, pointer to struct
164  *	workqueue_struct object on success.
165  */
166 struct workqueue_struct *
167 alloc_ordered_workqueue(const char *name, int flags)
168 {
169 	struct workqueue_struct *wq;
170 	int error;
171 
172 	KASSERT(flags == 0);
173 
174 	wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
175 
176 	mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
177 	cv_init(&wq->wq_cv, name);
178 	TAILQ_INIT(&wq->wq_delayed);
179 	TAILQ_INIT(&wq->wq_queue);
180 	TAILQ_INIT(&wq->wq_dqueue);
181 	wq->wq_current_work = NULL;
182 	wq->wq_flags = 0;
183 	wq->wq_dying = false;
184 	wq->wq_gen = 0;
185 	wq->wq_lwp = NULL;
186 
187 	error = kthread_create(PRI_NONE,
188 	    KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
189 	    &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
190 	if (error)
191 		goto fail0;
192 
193 	return wq;
194 
195 fail0:	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
196 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
197 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
198 	cv_destroy(&wq->wq_cv);
199 	mutex_destroy(&wq->wq_lock);
200 	kmem_free(wq, sizeof(*wq));
201 	return NULL;
202 }
203 
204 /*
205  * destroy_workqueue(wq)
206  *
207  *	Destroy a workqueue created with wq.  Cancel any pending
208  *	delayed work.  Wait for all queued work to complete.
209  *
210  *	May sleep.
211  */
212 void
213 destroy_workqueue(struct workqueue_struct *wq)
214 {
215 
216 	/*
217 	 * Cancel all delayed work.  We do this first because any
218 	 * delayed work that that has already timed out, which we can't
219 	 * cancel, may have queued new work.
220 	 */
221 	mutex_enter(&wq->wq_lock);
222 	while (!TAILQ_EMPTY(&wq->wq_delayed)) {
223 		struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
224 
225 		KASSERT(work_queue(&dw->work) == wq);
226 		KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
227 			dw->dw_state == DELAYED_WORK_RESCHEDULED ||
228 			dw->dw_state == DELAYED_WORK_CANCELLED),
229 		    "delayed work %p in bad state: %d",
230 		    dw, dw->dw_state);
231 
232 		/*
233 		 * Mark it cancelled and try to stop the callout before
234 		 * it starts.
235 		 *
236 		 * If it's too late and the callout has already begun
237 		 * to execute, then it will notice that we asked to
238 		 * cancel it and remove itself from the queue before
239 		 * returning.
240 		 *
241 		 * If we stopped the callout before it started,
242 		 * however, then we can safely destroy the callout and
243 		 * dissociate it from the workqueue ourselves.
244 		 */
245 		dw->dw_state = DELAYED_WORK_CANCELLED;
246 		if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
247 			cancel_delayed_work_done(wq, dw);
248 	}
249 	mutex_exit(&wq->wq_lock);
250 
251 	/*
252 	 * At this point, no new work can be put on the queue.
253 	 */
254 
255 	/* Tell the thread to exit.  */
256 	mutex_enter(&wq->wq_lock);
257 	wq->wq_dying = true;
258 	cv_broadcast(&wq->wq_cv);
259 	mutex_exit(&wq->wq_lock);
260 
261 	/* Wait for it to exit.  */
262 	(void)kthread_join(wq->wq_lwp);
263 
264 	KASSERT(wq->wq_dying);
265 	KASSERT(wq->wq_flags == 0);
266 	KASSERT(wq->wq_current_work == NULL);
267 	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
268 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
269 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
270 	cv_destroy(&wq->wq_cv);
271 	mutex_destroy(&wq->wq_lock);
272 
273 	kmem_free(wq, sizeof(*wq));
274 }
275 
276 /*
277  * Work thread and callout
278  */
279 
280 /*
281  * linux_workqueue_thread(cookie)
282  *
283  *	Main function for a workqueue's worker thread.  Waits until
284  *	there is work queued, grabs a batch of work off the queue,
285  *	executes it all, bumps the generation number, and repeats,
286  *	until dying.
287  */
288 static void __dead
289 linux_workqueue_thread(void *cookie)
290 {
291 	struct workqueue_struct *const wq = cookie;
292 	struct work_head queue, dqueue;
293 	struct work_head *const q[2] = { &queue, &dqueue };
294 	unsigned i;
295 
296 	lwp_setspecific(workqueue_key, wq);
297 
298 	mutex_enter(&wq->wq_lock);
299 	for (;;) {
300 		/*
301 		 * Wait until there's activity.  If there's no work and
302 		 * we're dying, stop here.
303 		 */
304 		while (TAILQ_EMPTY(&wq->wq_queue) &&
305 		    TAILQ_EMPTY(&wq->wq_dqueue) &&
306 		    !wq->wq_dying)
307 			cv_wait(&wq->wq_cv, &wq->wq_lock);
308 		if (wq->wq_dying) {
309 			KASSERT(TAILQ_EMPTY(&wq->wq_queue));
310 			KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
311 			break;
312 		}
313 
314 		/* Grab a batch of work off the queue.  */
315 		TAILQ_INIT(&queue);
316 		TAILQ_INIT(&dqueue);
317 		TAILQ_CONCAT(&queue, &wq->wq_queue, work_entry);
318 		TAILQ_CONCAT(&dqueue, &wq->wq_dqueue, work_entry);
319 
320 		/* Process each work item in the batch.  */
321 		for (i = 0; i < 2; i++) {
322 			while (!TAILQ_EMPTY(q[i])) {
323 				struct work_struct *work = TAILQ_FIRST(q[i]);
324 				void (*func)(struct work_struct *);
325 
326 				KASSERT(work_queue(work) == wq);
327 				KASSERT(work_claimed(work, wq));
328 				KASSERTMSG((q[i] != &dqueue ||
329 					container_of(work, struct delayed_work,
330 					    work)->dw_state ==
331 					DELAYED_WORK_IDLE),
332 				    "delayed work %p queued and scheduled",
333 				    work);
334 
335 				TAILQ_REMOVE(q[i], work, work_entry);
336 				KASSERT(wq->wq_current_work == NULL);
337 				wq->wq_current_work = work;
338 				func = work->func;
339 				release_work(work, wq);
340 				/* Can't dereference work after this point.  */
341 
342 				mutex_exit(&wq->wq_lock);
343 				(*func)(work);
344 				mutex_enter(&wq->wq_lock);
345 
346 				KASSERT(wq->wq_current_work == work);
347 				wq->wq_current_work = NULL;
348 				cv_broadcast(&wq->wq_cv);
349 			}
350 		}
351 
352 		/* Notify flush that we've completed a batch of work.  */
353 		wq->wq_gen++;
354 		cv_broadcast(&wq->wq_cv);
355 	}
356 	mutex_exit(&wq->wq_lock);
357 
358 	kthread_exit(0);
359 }
360 
361 /*
362  * linux_workqueue_timeout(cookie)
363  *
364  *	Delayed work timeout callback.
365  *
366  *	- If scheduled, queue it.
367  *	- If rescheduled, callout_schedule ourselves again.
368  *	- If cancelled, destroy the callout and release the work from
369  *        the workqueue.
370  */
371 static void
372 linux_workqueue_timeout(void *cookie)
373 {
374 	struct delayed_work *const dw = cookie;
375 	struct workqueue_struct *const wq = work_queue(&dw->work);
376 
377 	KASSERTMSG(wq != NULL,
378 	    "delayed work %p state %d resched %d",
379 	    dw, dw->dw_state, dw->dw_resched);
380 
381 	mutex_enter(&wq->wq_lock);
382 	KASSERT(work_queue(&dw->work) == wq);
383 	switch (dw->dw_state) {
384 	case DELAYED_WORK_IDLE:
385 		panic("delayed work callout uninitialized: %p", dw);
386 	case DELAYED_WORK_SCHEDULED:
387 		dw_callout_destroy(wq, dw);
388 		TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
389 		cv_broadcast(&wq->wq_cv);
390 		break;
391 	case DELAYED_WORK_RESCHEDULED:
392 		KASSERT(dw->dw_resched >= 0);
393 		callout_schedule(&dw->dw_callout, dw->dw_resched);
394 		dw->dw_state = DELAYED_WORK_SCHEDULED;
395 		dw->dw_resched = -1;
396 		break;
397 	case DELAYED_WORK_CANCELLED:
398 		cancel_delayed_work_done(wq, dw);
399 		/* Can't dereference dw after this point.  */
400 		goto out;
401 	default:
402 		panic("delayed work callout in bad state: %p", dw);
403 	}
404 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
405 	    dw->dw_state == DELAYED_WORK_SCHEDULED);
406 out:	mutex_exit(&wq->wq_lock);
407 }
408 
409 /*
410  * current_work()
411  *
412  *	If in a workqueue worker thread, return the work it is
413  *	currently executing.  Otherwise return NULL.
414  */
415 struct work_struct *
416 current_work(void)
417 {
418 	struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
419 
420 	/* If we're not a workqueue thread, then there's no work.  */
421 	if (wq == NULL)
422 		return NULL;
423 
424 	/*
425 	 * Otherwise, this should be possible only while work is in
426 	 * progress.  Return the current work item.
427 	 */
428 	KASSERT(wq->wq_current_work != NULL);
429 	return wq->wq_current_work;
430 }
431 
432 /*
433  * Work
434  */
435 
436 /*
437  * INIT_WORK(work, fn)
438  *
439  *	Initialize work for use with a workqueue to call fn in a worker
440  *	thread.  There is no corresponding destruction operation.
441  */
442 void
443 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
444 {
445 
446 	work->work_owner = 0;
447 	work->func = fn;
448 }
449 
450 /*
451  * work_claimed(work, wq)
452  *
453  *	True if work is currently claimed by a workqueue, meaning it is
454  *	either on the queue or scheduled in a callout.  The workqueue
455  *	must be wq, and caller must hold wq's lock.
456  */
457 static bool
458 work_claimed(struct work_struct *work, struct workqueue_struct *wq)
459 {
460 
461 	KASSERT(work_queue(work) == wq);
462 	KASSERT(mutex_owned(&wq->wq_lock));
463 
464 	return work->work_owner & 1;
465 }
466 
467 /*
468  * work_queue(work)
469  *
470  *	Return the last queue that work was queued on, or NULL if it
471  *	was never queued.
472  */
473 static struct workqueue_struct *
474 work_queue(struct work_struct *work)
475 {
476 
477 	return (struct workqueue_struct *)(work->work_owner & ~(uintptr_t)1);
478 }
479 
480 /*
481  * acquire_work(work, wq)
482  *
483  *	Try to claim work for wq.  If work is already claimed, it must
484  *	be claimed by wq; return false.  If work is not already
485  *	claimed, claim it, issue a memory barrier to match any prior
486  *	release_work, and return true.
487  *
488  *	Caller must hold wq's lock.
489  */
490 static bool
491 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
492 {
493 	uintptr_t owner0, owner;
494 
495 	KASSERT(mutex_owned(&wq->wq_lock));
496 	KASSERT(((uintptr_t)wq & 1) == 0);
497 
498 	owner = (uintptr_t)wq | 1;
499 	do {
500 		owner0 = work->work_owner;
501 		if (owner0 & 1) {
502 			KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq);
503 			return false;
504 		}
505 		KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq);
506 	} while (atomic_cas_uintptr(&work->work_owner, owner0, owner) !=
507 	    owner0);
508 
509 	KASSERT(work_queue(work) == wq);
510 	membar_enter();
511 	return true;
512 }
513 
514 /*
515  * release_work(work, wq)
516  *
517  *	Issue a memory barrier to match any subsequent acquire_work and
518  *	dissociate work from wq.
519  *
520  *	Caller must hold wq's lock and work must be associated with wq.
521  */
522 static void
523 release_work(struct work_struct *work, struct workqueue_struct *wq)
524 {
525 
526 	KASSERT(work_queue(work) == wq);
527 	KASSERT(mutex_owned(&wq->wq_lock));
528 
529 	membar_exit();
530 
531 	/*
532 	 * Non-interlocked r/m/w is safe here because nobody else can
533 	 * write to this while the claimed bit is setand the workqueue
534 	 * lock is held.
535 	 */
536 	work->work_owner &= ~(uintptr_t)1;
537 }
538 
539 /*
540  * schedule_work(work)
541  *
542  *	If work is not already queued on system_wq, queue it to be run
543  *	by system_wq's worker thread when it next can.  True if it was
544  *	newly queued, false if it was already queued.  If the work was
545  *	already running, queue it to run again.
546  *
547  *	Caller must ensure work is not queued to run on a different
548  *	workqueue.
549  */
550 bool
551 schedule_work(struct work_struct *work)
552 {
553 
554 	return queue_work(system_wq, work);
555 }
556 
557 /*
558  * queue_work(wq, work)
559  *
560  *	If work is not already queued on wq, queue it to be run by wq's
561  *	worker thread when it next can.  True if it was newly queued,
562  *	false if it was already queued.  If the work was already
563  *	running, queue it to run again.
564  *
565  *	Caller must ensure work is not queued to run on a different
566  *	workqueue.
567  */
568 bool
569 queue_work(struct workqueue_struct *wq, struct work_struct *work)
570 {
571 	bool newly_queued;
572 
573 	KASSERT(wq != NULL);
574 
575 	mutex_enter(&wq->wq_lock);
576 	if (__predict_true(acquire_work(work, wq))) {
577 		/*
578 		 * It wasn't on any workqueue at all.  Put it on this
579 		 * one, and signal the worker thread that there is work
580 		 * to do.
581 		 */
582 		TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
583 		cv_broadcast(&wq->wq_cv);
584 		newly_queued = true;
585 	} else {
586 		/*
587 		 * It was already on this workqueue.  Nothing to do
588 		 * since it is already queued.
589 		 */
590 		newly_queued = false;
591 	}
592 	mutex_exit(&wq->wq_lock);
593 
594 	return newly_queued;
595 }
596 
597 /*
598  * cancel_work(work)
599  *
600  *	If work was queued, remove it from the queue and return true.
601  *	If work was not queued, return false.  Work may still be
602  *	running when this returns.
603  */
604 bool
605 cancel_work(struct work_struct *work)
606 {
607 	struct workqueue_struct *wq;
608 	bool cancelled_p = false;
609 
610 	/* If there's no workqueue, nothing to cancel.   */
611 	if ((wq = work_queue(work)) == NULL)
612 		goto out;
613 
614 	mutex_enter(&wq->wq_lock);
615 	if (__predict_false(work_queue(work) != wq)) {
616 		/*
617 		 * It has finished execution or been cancelled by
618 		 * another thread, and has been moved off the
619 		 * workqueue, so it's too to cancel.
620 		 */
621 		cancelled_p = false;
622 	} else {
623 		/* Check whether it's on the queue.  */
624 		if (work_claimed(work, wq)) {
625 			/*
626 			 * It is still on the queue.  Take it off the
627 			 * queue and report successful cancellation.
628 			 */
629 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
630 			release_work(work, wq);
631 			/* Can't dereference work after this point.  */
632 			cancelled_p = true;
633 		} else {
634 			/* Not on the queue.  Couldn't cancel it.  */
635 			cancelled_p = false;
636 		}
637 	}
638 	mutex_exit(&wq->wq_lock);
639 
640 out:	return cancelled_p;
641 }
642 
643 /*
644  * cancel_work_sync(work)
645  *
646  *	If work was queued, remove it from the queue and return true.
647  *	If work was not queued, return false.  Either way, if work is
648  *	currently running, wait for it to complete.
649  *
650  *	May sleep.
651  */
652 bool
653 cancel_work_sync(struct work_struct *work)
654 {
655 	struct workqueue_struct *wq;
656 	bool cancelled_p = false;
657 
658 	/* If there's no workqueue, nothing to cancel.   */
659 	if ((wq = work_queue(work)) == NULL)
660 		goto out;
661 
662 	mutex_enter(&wq->wq_lock);
663 	if (__predict_false(work_queue(work) != wq)) {
664 		/*
665 		 * It has finished execution or been cancelled by
666 		 * another thread, and has been moved off the
667 		 * workqueue, so it's too late to cancel.
668 		 */
669 		cancelled_p = false;
670 	} else {
671 		/* Check whether it's on the queue.  */
672 		if (work_claimed(work, wq)) {
673 			/*
674 			 * It is still on the queue.  Take it off the
675 			 * queue and report successful cancellation.
676 			 */
677 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
678 			release_work(work, wq);
679 			/* Can't dereference work after this point.  */
680 			cancelled_p = true;
681 		} else {
682 			/* Not on the queue.  Couldn't cancel it.  */
683 			cancelled_p = false;
684 		}
685 		/* If it's still running, wait for it to complete.  */
686 		if (wq->wq_current_work == work)
687 			wait_for_current_work(work, wq);
688 	}
689 	mutex_exit(&wq->wq_lock);
690 
691 out:	return cancelled_p;
692 }
693 
694 /*
695  * wait_for_current_work(work, wq)
696  *
697  *	wq must be currently executing work.  Wait for it to finish.
698  *
699  *	Does not dereference work.
700  */
701 static void
702 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
703 {
704 	uint64_t gen;
705 
706 	KASSERT(mutex_owned(&wq->wq_lock));
707 	KASSERT(wq->wq_current_work == work);
708 
709 	/* Wait only one generation in case it gets requeued quickly.  */
710 	gen = wq->wq_gen;
711 	do {
712 		cv_wait(&wq->wq_cv, &wq->wq_lock);
713 	} while (wq->wq_current_work == work && wq->wq_gen == gen);
714 }
715 
716 /*
717  * Delayed work
718  */
719 
720 /*
721  * INIT_DELAYED_WORK(dw, fn)
722  *
723  *	Initialize dw for use with a workqueue to call fn in a worker
724  *	thread after a delay.  There is no corresponding destruction
725  *	operation.
726  */
727 void
728 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
729 {
730 
731 	INIT_WORK(&dw->work, fn);
732 	dw->dw_state = DELAYED_WORK_IDLE;
733 	dw->dw_resched = -1;
734 
735 	/*
736 	 * Defer callout_init until we are going to schedule the
737 	 * callout, which can then callout_destroy it, because
738 	 * otherwise since there's no DESTROY_DELAYED_WORK or anything
739 	 * we have no opportunity to call callout_destroy.
740 	 */
741 }
742 
743 /*
744  * schedule_delayed_work(dw, ticks)
745  *
746  *	If it is not currently scheduled, schedule dw to run after
747  *	ticks on system_wq.  If currently executing and not already
748  *	rescheduled, reschedule it.  True if it was newly scheduled,
749  *	false if it was already scheduled.
750  *
751  *	If ticks == 0, queue it to run as soon as the worker can,
752  *	without waiting for the next callout tick to run.
753  */
754 bool
755 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
756 {
757 
758 	return queue_delayed_work(system_wq, dw, ticks);
759 }
760 
761 /*
762  * dw_callout_init(wq, dw)
763  *
764  *	Initialize the callout of dw and transition to
765  *	DELAYED_WORK_SCHEDULED.  Caller must use callout_schedule.
766  */
767 static void
768 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
769 {
770 
771 	KASSERT(mutex_owned(&wq->wq_lock));
772 	KASSERT(work_queue(&dw->work) == wq);
773 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
774 
775 	callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
776 	callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw);
777 	TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
778 	dw->dw_state = DELAYED_WORK_SCHEDULED;
779 }
780 
781 /*
782  * dw_callout_destroy(wq, dw)
783  *
784  *	Destroy the callout of dw and transition to DELAYED_WORK_IDLE.
785  */
786 static void
787 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
788 {
789 
790 	KASSERT(mutex_owned(&wq->wq_lock));
791 	KASSERT(work_queue(&dw->work) == wq);
792 	KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED ||
793 	    dw->dw_state == DELAYED_WORK_RESCHEDULED ||
794 	    dw->dw_state == DELAYED_WORK_CANCELLED);
795 
796 	TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
797 	callout_destroy(&dw->dw_callout);
798 	dw->dw_resched = -1;
799 	dw->dw_state = DELAYED_WORK_IDLE;
800 }
801 
802 /*
803  * cancel_delayed_work_done(wq, dw)
804  *
805  *	Complete cancellation of a delayed work: transition from
806  *	DELAYED_WORK_CANCELLED to DELAYED_WORK_IDLE and off the
807  *	workqueue.  Caller must not dereference dw after this returns.
808  */
809 static void
810 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
811 {
812 
813 	KASSERT(mutex_owned(&wq->wq_lock));
814 	KASSERT(work_queue(&dw->work) == wq);
815 	KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
816 
817 	dw_callout_destroy(wq, dw);
818 	release_work(&dw->work, wq);
819 	/* Can't dereference dw after this point.  */
820 }
821 
822 /*
823  * queue_delayed_work(wq, dw, ticks)
824  *
825  *	If it is not currently scheduled, schedule dw to run after
826  *	ticks on wq.  If currently queued, remove it from the queue
827  *	first.
828  *
829  *	If ticks == 0, queue it to run as soon as the worker can,
830  *	without waiting for the next callout tick to run.
831  */
832 bool
833 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
834     unsigned long ticks)
835 {
836 	bool newly_queued;
837 
838 	mutex_enter(&wq->wq_lock);
839 	if (__predict_true(acquire_work(&dw->work, wq))) {
840 		/*
841 		 * It wasn't on any workqueue at all.  Schedule it to
842 		 * run on this one.
843 		 */
844 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
845 		if (ticks == 0) {
846 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
847 			    work_entry);
848 			cv_broadcast(&wq->wq_cv);
849 		} else {
850 			/*
851 			 * Initialize a callout and schedule to run
852 			 * after a delay.
853 			 */
854 			dw_callout_init(wq, dw);
855 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
856 		}
857 		newly_queued = true;
858 	} else {
859 		/* It was already on this workqueue.  */
860 		switch (dw->dw_state) {
861 		case DELAYED_WORK_IDLE:
862 		case DELAYED_WORK_SCHEDULED:
863 		case DELAYED_WORK_RESCHEDULED:
864 			/* On the queue or already scheduled.  Leave it.  */
865 			newly_queued = false;
866 			break;
867 		case DELAYED_WORK_CANCELLED:
868 			/*
869 			 * Scheduled and the callout began, but it was
870 			 * cancelled.  Reschedule it.
871 			 */
872 			dw->dw_state = DELAYED_WORK_RESCHEDULED;
873 			dw->dw_resched = MIN(INT_MAX, ticks);
874 			newly_queued = true;
875 			break;
876 		default:
877 			panic("invalid delayed work state: %d",
878 			    dw->dw_state);
879 		}
880 	}
881 	mutex_exit(&wq->wq_lock);
882 
883 	return newly_queued;
884 }
885 
886 /*
887  * mod_delayed_work(wq, dw, ticks)
888  *
889  *	Schedule dw to run after ticks.  If scheduled or queued,
890  *	reschedule.  If ticks == 0, run without delay.
891  *
892  *	True if it modified the timer of an already scheduled work,
893  *	false if it newly scheduled the work.
894  */
895 bool
896 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
897     unsigned long ticks)
898 {
899 	bool timer_modified;
900 
901 	mutex_enter(&wq->wq_lock);
902 	if (acquire_work(&dw->work, wq)) {
903 		/*
904 		 * It wasn't on any workqueue at all.  Schedule it to
905 		 * run on this one.
906 		 */
907 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
908 		if (ticks == 0) {
909 			/*
910 			 * Run immediately: put it on the queue and
911 			 * signal the worker thread.
912 			 */
913 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
914 			    work_entry);
915 			cv_broadcast(&wq->wq_cv);
916 		} else {
917 			/*
918 			 * Initialize a callout and schedule to run
919 			 * after a delay.
920 			 */
921 			dw_callout_init(wq, dw);
922 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
923 		}
924 		timer_modified = false;
925 	} else {
926 		/* It was already on this workqueue.  */
927 		switch (dw->dw_state) {
928 		case DELAYED_WORK_IDLE:
929 			/* On the queue.  */
930 			if (ticks == 0) {
931 				/* Leave it be.  */
932 			} else {
933 				/* Remove from the queue and schedule.  */
934 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
935 				    work_entry);
936 				dw_callout_init(wq, dw);
937 				callout_schedule(&dw->dw_callout,
938 				    MIN(INT_MAX, ticks));
939 			}
940 			timer_modified = true;
941 			break;
942 		case DELAYED_WORK_SCHEDULED:
943 			/*
944 			 * It is scheduled to run after a delay.  Try
945 			 * to stop it and reschedule it; if we can't,
946 			 * either reschedule it or cancel it to put it
947 			 * on the queue, and inform the callout.
948 			 */
949 			if (callout_stop(&dw->dw_callout)) {
950 				/* Can't stop, callout has begun.  */
951 				if (ticks == 0) {
952 					/*
953 					 * We don't actually need to do
954 					 * anything.  The callout will
955 					 * queue it as soon as it gets
956 					 * the lock.
957 					 */
958 				} else {
959 					/* Ask the callout to reschedule.  */
960 					dw->dw_state = DELAYED_WORK_RESCHEDULED;
961 					dw->dw_resched = MIN(INT_MAX, ticks);
962 				}
963 			} else {
964 				/* We stopped the callout before it began.  */
965 				if (ticks == 0) {
966 					/*
967 					 * Run immediately: destroy the
968 					 * callout, put it on the
969 					 * queue, and signal the worker
970 					 * thread.
971 					 */
972 					dw_callout_destroy(wq, dw);
973 					TAILQ_INSERT_TAIL(&wq->wq_dqueue,
974 					    &dw->work, work_entry);
975 					cv_broadcast(&wq->wq_cv);
976 				} else {
977 					/*
978 					 * Reschedule the callout.  No
979 					 * state change.
980 					 */
981 					callout_schedule(&dw->dw_callout,
982 					    MIN(INT_MAX, ticks));
983 				}
984 			}
985 			timer_modified = true;
986 			break;
987 		case DELAYED_WORK_RESCHEDULED:
988 			/*
989 			 * Someone rescheduled it after the callout
990 			 * started but before the poor thing even had a
991 			 * chance to acquire the lock.
992 			 */
993 			if (ticks == 0) {
994 				/*
995 				 * We can just switch back to
996 				 * DELAYED_WORK_SCHEDULED so that the
997 				 * callout will queue the work as soon
998 				 * as it gets the lock.
999 				 */
1000 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1001 				dw->dw_resched = -1;
1002 			} else {
1003 				/* Change the rescheduled time.  */
1004 				dw->dw_resched = ticks;
1005 			}
1006 			timer_modified = true;
1007 			break;
1008 		case DELAYED_WORK_CANCELLED:
1009 			/*
1010 			 * Someone cancelled it after the callout
1011 			 * started but before the poor thing even had a
1012 			 * chance to acquire the lock.
1013 			 */
1014 			if (ticks == 0) {
1015 				/*
1016 				 * We can just switch back to
1017 				 * DELAYED_WORK_SCHEDULED so that the
1018 				 * callout will queue the work as soon
1019 				 * as it gets the lock.
1020 				 */
1021 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1022 			} else {
1023 				/* Ask it to reschedule.  */
1024 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
1025 				dw->dw_resched = MIN(INT_MAX, ticks);
1026 			}
1027 			timer_modified = false;
1028 			break;
1029 		default:
1030 			panic("invalid delayed work state: %d", dw->dw_state);
1031 		}
1032 	}
1033 	mutex_exit(&wq->wq_lock);
1034 
1035 	return timer_modified;
1036 }
1037 
1038 /*
1039  * cancel_delayed_work(dw)
1040  *
1041  *	If work was scheduled or queued, remove it from the schedule or
1042  *	queue and return true.  If work was not scheduled or queued,
1043  *	return false.  Note that work may already be running; if it
1044  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1045  *	will return false, and either way, cancel_delayed_work will NOT
1046  *	wait for the work to complete.
1047  */
1048 bool
1049 cancel_delayed_work(struct delayed_work *dw)
1050 {
1051 	struct workqueue_struct *wq;
1052 	bool cancelled_p;
1053 
1054 	/* If there's no workqueue, nothing to cancel.   */
1055 	if ((wq = work_queue(&dw->work)) == NULL)
1056 		return false;
1057 
1058 	mutex_enter(&wq->wq_lock);
1059 	if (__predict_false(work_queue(&dw->work) != wq)) {
1060 		cancelled_p = false;
1061 	} else {
1062 		switch (dw->dw_state) {
1063 		case DELAYED_WORK_IDLE:
1064 			/*
1065 			 * It is either on the queue or already running
1066 			 * or both.
1067 			 */
1068 			if (work_claimed(&dw->work, wq)) {
1069 				/* On the queue.  Remove and release.  */
1070 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1071 				    work_entry);
1072 				release_work(&dw->work, wq);
1073 				/* Can't dereference dw after this point.  */
1074 				cancelled_p = true;
1075 			} else {
1076 				/* Not on the queue, so didn't cancel.  */
1077 				cancelled_p = false;
1078 			}
1079 			break;
1080 		case DELAYED_WORK_SCHEDULED:
1081 			/*
1082 			 * If it is scheduled, mark it cancelled and
1083 			 * try to stop the callout before it starts.
1084 			 *
1085 			 * If it's too late and the callout has already
1086 			 * begun to execute, tough.
1087 			 *
1088 			 * If we stopped the callout before it started,
1089 			 * however, then destroy the callout and
1090 			 * dissociate it from the workqueue ourselves.
1091 			 */
1092 			dw->dw_state = DELAYED_WORK_CANCELLED;
1093 			cancelled_p = true;
1094 			if (!callout_stop(&dw->dw_callout))
1095 				cancel_delayed_work_done(wq, dw);
1096 			break;
1097 		case DELAYED_WORK_RESCHEDULED:
1098 			/*
1099 			 * If it is being rescheduled, the callout has
1100 			 * already fired.  We must ask it to cancel.
1101 			 */
1102 			dw->dw_state = DELAYED_WORK_CANCELLED;
1103 			dw->dw_resched = -1;
1104 			cancelled_p = true;
1105 			break;
1106 		case DELAYED_WORK_CANCELLED:
1107 			/*
1108 			 * If it is being cancelled, the callout has
1109 			 * already fired.  There is nothing more for us
1110 			 * to do.  Someone else claims credit for
1111 			 * cancelling it.
1112 			 */
1113 			cancelled_p = false;
1114 			break;
1115 		default:
1116 			panic("invalid delayed work state: %d",
1117 			    dw->dw_state);
1118 		}
1119 	}
1120 	mutex_exit(&wq->wq_lock);
1121 
1122 	return cancelled_p;
1123 }
1124 
1125 /*
1126  * cancel_delayed_work_sync(dw)
1127  *
1128  *	If work was scheduled or queued, remove it from the schedule or
1129  *	queue and return true.  If work was not scheduled or queued,
1130  *	return false.  Note that work may already be running; if it
1131  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1132  *	will return false; either way, wait for it to complete.
1133  */
1134 bool
1135 cancel_delayed_work_sync(struct delayed_work *dw)
1136 {
1137 	struct workqueue_struct *wq;
1138 	bool cancelled_p;
1139 
1140 	/* If there's no workqueue, nothing to cancel.  */
1141 	if ((wq = work_queue(&dw->work)) == NULL)
1142 		return false;
1143 
1144 	mutex_enter(&wq->wq_lock);
1145 	if (__predict_false(work_queue(&dw->work) != wq)) {
1146 		cancelled_p = false;
1147 	} else {
1148 		switch (dw->dw_state) {
1149 		case DELAYED_WORK_IDLE:
1150 			/*
1151 			 * It is either on the queue or already running
1152 			 * or both.
1153 			 */
1154 			if (work_claimed(&dw->work, wq)) {
1155 				/* On the queue.  Remove and release.  */
1156 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1157 				    work_entry);
1158 				release_work(&dw->work, wq);
1159 				/* Can't dereference dw after this point.  */
1160 				cancelled_p = true;
1161 			} else {
1162 				/* Not on the queue, so didn't cancel. */
1163 				cancelled_p = false;
1164 			}
1165 			/* If it's still running, wait for it to complete.  */
1166 			if (wq->wq_current_work == &dw->work)
1167 				wait_for_current_work(&dw->work, wq);
1168 			break;
1169 		case DELAYED_WORK_SCHEDULED:
1170 			/*
1171 			 * If it is scheduled, mark it cancelled and
1172 			 * try to stop the callout before it starts.
1173 			 *
1174 			 * If it's too late and the callout has already
1175 			 * begun to execute, we must wait for it to
1176 			 * complete.  But we got in soon enough to ask
1177 			 * the callout not to run, so we successfully
1178 			 * cancelled it in that case.
1179 			 *
1180 			 * If we stopped the callout before it started,
1181 			 * then we must destroy the callout and
1182 			 * dissociate it from the workqueue ourselves.
1183 			 */
1184 			dw->dw_state = DELAYED_WORK_CANCELLED;
1185 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1186 				cancel_delayed_work_done(wq, dw);
1187 			cancelled_p = true;
1188 			break;
1189 		case DELAYED_WORK_RESCHEDULED:
1190 			/*
1191 			 * If it is being rescheduled, the callout has
1192 			 * already fired.  We must ask it to cancel and
1193 			 * wait for it to complete.
1194 			 */
1195 			dw->dw_state = DELAYED_WORK_CANCELLED;
1196 			dw->dw_resched = -1;
1197 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1198 			cancelled_p = true;
1199 			break;
1200 		case DELAYED_WORK_CANCELLED:
1201 			/*
1202 			 * If it is being cancelled, the callout has
1203 			 * already fired.  We need only wait for it to
1204 			 * complete.  Someone else, however, claims
1205 			 * credit for cancelling it.
1206 			 */
1207 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1208 			cancelled_p = false;
1209 			break;
1210 		default:
1211 			panic("invalid delayed work state: %d",
1212 			    dw->dw_state);
1213 		}
1214 	}
1215 	mutex_exit(&wq->wq_lock);
1216 
1217 	return cancelled_p;
1218 }
1219 
1220 /*
1221  * Flush
1222  */
1223 
1224 /*
1225  * flush_scheduled_work()
1226  *
1227  *	Wait for all work queued on system_wq to complete.  This does
1228  *	not include delayed work.
1229  */
1230 void
1231 flush_scheduled_work(void)
1232 {
1233 
1234 	flush_workqueue(system_wq);
1235 }
1236 
1237 /*
1238  * flush_workqueue_locked(wq)
1239  *
1240  *	Wait for all work queued on wq to complete.  This does not
1241  *	include delayed work.
1242  *
1243  *	Caller must hold wq's lock.
1244  */
1245 static void
1246 flush_workqueue_locked(struct workqueue_struct *wq)
1247 {
1248 	uint64_t gen;
1249 
1250 	KASSERT(mutex_owned(&wq->wq_lock));
1251 
1252 	/* Get the current generation number.  */
1253 	gen = wq->wq_gen;
1254 
1255 	/*
1256 	 * If there's a batch of work in progress, we must wait for the
1257 	 * worker thread to finish that batch.
1258 	 */
1259 	if (wq->wq_current_work != NULL)
1260 		gen++;
1261 
1262 	/*
1263 	 * If there's any work yet to be claimed from the queue by the
1264 	 * worker thread, we must wait for it to finish one more batch
1265 	 * too.
1266 	 */
1267 	if (!TAILQ_EMPTY(&wq->wq_queue) || !TAILQ_EMPTY(&wq->wq_dqueue))
1268 		gen++;
1269 
1270 	/* Wait until the generation number has caught up.  */
1271 	while (wq->wq_gen < gen)
1272 		cv_wait(&wq->wq_cv, &wq->wq_lock);
1273 }
1274 
1275 /*
1276  * flush_workqueue(wq)
1277  *
1278  *	Wait for all work queued on wq to complete.  This does not
1279  *	include delayed work.
1280  */
1281 void
1282 flush_workqueue(struct workqueue_struct *wq)
1283 {
1284 
1285 	mutex_enter(&wq->wq_lock);
1286 	flush_workqueue_locked(wq);
1287 	mutex_exit(&wq->wq_lock);
1288 }
1289 
1290 /*
1291  * flush_work(work)
1292  *
1293  *	If work is queued or currently executing, wait for it to
1294  *	complete.
1295  */
1296 void
1297 flush_work(struct work_struct *work)
1298 {
1299 	struct workqueue_struct *wq;
1300 
1301 	/* If there's no workqueue, nothing to flush.  */
1302 	if ((wq = work_queue(work)) == NULL)
1303 		return;
1304 
1305 	flush_workqueue(wq);
1306 }
1307 
1308 /*
1309  * flush_delayed_work(dw)
1310  *
1311  *	If dw is scheduled to run after a delay, queue it immediately
1312  *	instead.  Then, if dw is queued or currently executing, wait
1313  *	for it to complete.
1314  */
1315 void
1316 flush_delayed_work(struct delayed_work *dw)
1317 {
1318 	struct workqueue_struct *wq;
1319 
1320 	/* If there's no workqueue, nothing to flush.  */
1321 	if ((wq = work_queue(&dw->work)) == NULL)
1322 		return;
1323 
1324 	mutex_enter(&wq->wq_lock);
1325 	if (__predict_false(work_queue(&dw->work) != wq)) {
1326 		/*
1327 		 * Moved off the queue already (and possibly to another
1328 		 * queue, though that would be ill-advised), so it must
1329 		 * have completed, and we have nothing more to do.
1330 		 */
1331 	} else {
1332 		switch (dw->dw_state) {
1333 		case DELAYED_WORK_IDLE:
1334 			/*
1335 			 * It has a workqueue assigned and the callout
1336 			 * is idle, so it must be in progress or on the
1337 			 * queue.  In that case, we'll wait for it to
1338 			 * complete.
1339 			 */
1340 			break;
1341 		case DELAYED_WORK_SCHEDULED:
1342 		case DELAYED_WORK_RESCHEDULED:
1343 		case DELAYED_WORK_CANCELLED:
1344 			/*
1345 			 * The callout is scheduled, and may have even
1346 			 * started.  Mark it as scheduled so that if
1347 			 * the callout has fired it will queue the work
1348 			 * itself.  Try to stop the callout -- if we
1349 			 * can, queue the work now; if we can't, wait
1350 			 * for the callout to complete, which entails
1351 			 * queueing it.
1352 			 */
1353 			dw->dw_state = DELAYED_WORK_SCHEDULED;
1354 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1355 				/*
1356 				 * We stopped it before it ran.  No
1357 				 * state change in the interim is
1358 				 * possible.  Destroy the callout and
1359 				 * queue it ourselves.
1360 				 */
1361 				KASSERT(dw->dw_state ==
1362 				    DELAYED_WORK_SCHEDULED);
1363 				dw_callout_destroy(wq, dw);
1364 				TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1365 				    work_entry);
1366 				cv_broadcast(&wq->wq_cv);
1367 			}
1368 			break;
1369 		default:
1370 			panic("invalid delayed work state: %d", dw->dw_state);
1371 		}
1372 		/*
1373 		 * Waiting for the whole queue to flush is overkill,
1374 		 * but doesn't hurt.
1375 		 */
1376 		flush_workqueue_locked(wq);
1377 	}
1378 	mutex_exit(&wq->wq_lock);
1379 }
1380