xref: /netbsd-src/sys/external/bsd/common/linux/linux_work.c (revision 0466378a17f1c7d21a05203f30703b34d4501fb9)
1 /*	$NetBSD: linux_work.c,v 1.40 2018/08/27 15:06:20 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.40 2018/08/27 15:06:20 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 
46 #include <linux/workqueue.h>
47 
48 TAILQ_HEAD(work_head, work_struct);
49 TAILQ_HEAD(dwork_head, delayed_work);
50 
51 struct workqueue_struct {
52 	kmutex_t		wq_lock;
53 	kcondvar_t		wq_cv;
54 	struct dwork_head	wq_delayed; /* delayed work scheduled */
55 	struct work_head	wq_queue;   /* work to run */
56 	struct work_head	wq_dqueue;  /* delayed work to run now */
57 	struct work_struct	*wq_current_work;
58 	int			wq_flags;
59 	bool			wq_dying;
60 	uint64_t		wq_gen;
61 	struct lwp		*wq_lwp;
62 };
63 
64 static void __dead	linux_workqueue_thread(void *);
65 static void		linux_workqueue_timeout(void *);
66 static bool		work_claimed(struct work_struct *,
67 			    struct workqueue_struct *);
68 static struct workqueue_struct *
69 			work_queue(struct work_struct *);
70 static bool		acquire_work(struct work_struct *,
71 			    struct workqueue_struct *);
72 static void		release_work(struct work_struct *,
73 			    struct workqueue_struct *);
74 static void		wait_for_current_work(struct work_struct *,
75 			    struct workqueue_struct *);
76 static void		dw_callout_init(struct workqueue_struct *,
77 			    struct delayed_work *);
78 static void		dw_callout_destroy(struct workqueue_struct *,
79 			    struct delayed_work *);
80 static void		cancel_delayed_work_done(struct workqueue_struct *,
81 			    struct delayed_work *);
82 
83 static specificdata_key_t workqueue_key __read_mostly;
84 
85 struct workqueue_struct	*system_wq __read_mostly;
86 struct workqueue_struct	*system_long_wq __read_mostly;
87 struct workqueue_struct	*system_power_efficient_wq __read_mostly;
88 
89 static inline uintptr_t
90 atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t old, uintptr_t new)
91 {
92 
93 	return (uintptr_t)atomic_cas_ptr(p, (void *)old, (void *)new);
94 }
95 
96 /*
97  * linux_workqueue_init()
98  *
99  *	Initialize the Linux workqueue subsystem.  Return 0 on success,
100  *	NetBSD error on failure.
101  */
102 int
103 linux_workqueue_init(void)
104 {
105 	int error;
106 
107 	error = lwp_specific_key_create(&workqueue_key, NULL);
108 	if (error)
109 		goto fail0;
110 
111 	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
112 	if (system_wq == NULL) {
113 		error = ENOMEM;
114 		goto fail1;
115 	}
116 
117 	system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
118 	if (system_long_wq == NULL) {
119 		error = ENOMEM;
120 		goto fail2;
121 	}
122 
123 	system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
124 	if (system_long_wq == NULL) {
125 		error = ENOMEM;
126 		goto fail3;
127 	}
128 
129 	return 0;
130 
131 fail4: __unused
132 	destroy_workqueue(system_power_efficient_wq);
133 fail3:	destroy_workqueue(system_long_wq);
134 fail2:	destroy_workqueue(system_wq);
135 fail1:	lwp_specific_key_delete(workqueue_key);
136 fail0:	KASSERT(error);
137 	return error;
138 }
139 
140 /*
141  * linux_workqueue_fini()
142  *
143  *	Destroy the Linux workqueue subsystem.  Never fails.
144  */
145 void
146 linux_workqueue_fini(void)
147 {
148 
149 	destroy_workqueue(system_power_efficient_wq);
150 	destroy_workqueue(system_long_wq);
151 	destroy_workqueue(system_wq);
152 	lwp_specific_key_delete(workqueue_key);
153 }
154 
155 /*
156  * Workqueues
157  */
158 
159 /*
160  * alloc_ordered_workqueue(name, flags)
161  *
162  *	Create a workqueue of the given name.  No flags are currently
163  *	defined.  Return NULL on failure, pointer to struct
164  *	workqueue_struct object on success.
165  */
166 struct workqueue_struct *
167 alloc_ordered_workqueue(const char *name, int flags)
168 {
169 	struct workqueue_struct *wq;
170 	int error;
171 
172 	KASSERT(flags == 0);
173 
174 	wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
175 
176 	mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
177 	cv_init(&wq->wq_cv, name);
178 	TAILQ_INIT(&wq->wq_delayed);
179 	TAILQ_INIT(&wq->wq_queue);
180 	TAILQ_INIT(&wq->wq_dqueue);
181 	wq->wq_current_work = NULL;
182 	wq->wq_flags = 0;
183 	wq->wq_dying = false;
184 	wq->wq_gen = 0;
185 	wq->wq_lwp = NULL;
186 
187 	error = kthread_create(PRI_NONE,
188 	    KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
189 	    &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
190 	if (error)
191 		goto fail0;
192 
193 	return wq;
194 
195 fail0:	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
196 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
197 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
198 	cv_destroy(&wq->wq_cv);
199 	mutex_destroy(&wq->wq_lock);
200 	kmem_free(wq, sizeof(*wq));
201 	return NULL;
202 }
203 
204 /*
205  * destroy_workqueue(wq)
206  *
207  *	Destroy a workqueue created with wq.  Cancel any pending
208  *	delayed work.  Wait for all queued work to complete.
209  *
210  *	May sleep.
211  */
212 void
213 destroy_workqueue(struct workqueue_struct *wq)
214 {
215 
216 	/*
217 	 * Cancel all delayed work.  We do this first because any
218 	 * delayed work that that has already timed out, which we can't
219 	 * cancel, may have queued new work.
220 	 */
221 	mutex_enter(&wq->wq_lock);
222 	while (!TAILQ_EMPTY(&wq->wq_delayed)) {
223 		struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
224 
225 		KASSERT(work_queue(&dw->work) == wq);
226 		KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
227 			dw->dw_state == DELAYED_WORK_RESCHEDULED ||
228 			dw->dw_state == DELAYED_WORK_CANCELLED),
229 		    "delayed work %p in bad state: %d",
230 		    dw, dw->dw_state);
231 
232 		/*
233 		 * Mark it cancelled and try to stop the callout before
234 		 * it starts.
235 		 *
236 		 * If it's too late and the callout has already begun
237 		 * to execute, then it will notice that we asked to
238 		 * cancel it and remove itself from the queue before
239 		 * returning.
240 		 *
241 		 * If we stopped the callout before it started,
242 		 * however, then we can safely destroy the callout and
243 		 * dissociate it from the workqueue ourselves.
244 		 */
245 		dw->dw_state = DELAYED_WORK_CANCELLED;
246 		if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
247 			cancel_delayed_work_done(wq, dw);
248 	}
249 	mutex_exit(&wq->wq_lock);
250 
251 	/*
252 	 * At this point, no new work can be put on the queue.
253 	 */
254 
255 	/* Tell the thread to exit.  */
256 	mutex_enter(&wq->wq_lock);
257 	wq->wq_dying = true;
258 	cv_broadcast(&wq->wq_cv);
259 	mutex_exit(&wq->wq_lock);
260 
261 	/* Wait for it to exit.  */
262 	(void)kthread_join(wq->wq_lwp);
263 
264 	KASSERT(wq->wq_dying);
265 	KASSERT(wq->wq_flags == 0);
266 	KASSERT(wq->wq_current_work == NULL);
267 	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
268 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
269 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
270 	cv_destroy(&wq->wq_cv);
271 	mutex_destroy(&wq->wq_lock);
272 
273 	kmem_free(wq, sizeof(*wq));
274 }
275 
276 /*
277  * Work thread and callout
278  */
279 
280 /*
281  * linux_workqueue_thread(cookie)
282  *
283  *	Main function for a workqueue's worker thread.  Waits until
284  *	there is work queued, grabs a batch of work off the queue,
285  *	executes it all, bumps the generation number, and repeats,
286  *	until dying.
287  */
288 static void __dead
289 linux_workqueue_thread(void *cookie)
290 {
291 	struct workqueue_struct *const wq = cookie;
292 	struct work_head queue, dqueue;
293 	struct work_head *const q[2] = { &queue, &dqueue };
294 	unsigned i;
295 
296 	lwp_setspecific(workqueue_key, wq);
297 
298 	mutex_enter(&wq->wq_lock);
299 	for (;;) {
300 		/*
301 		 * Wait until there's activity.  If there's no work and
302 		 * we're dying, stop here.
303 		 */
304 		while (TAILQ_EMPTY(&wq->wq_queue) &&
305 		    TAILQ_EMPTY(&wq->wq_dqueue) &&
306 		    !wq->wq_dying)
307 			cv_wait(&wq->wq_cv, &wq->wq_lock);
308 		if (wq->wq_dying) {
309 			KASSERT(TAILQ_EMPTY(&wq->wq_queue));
310 			KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
311 			break;
312 		}
313 
314 		/* Grab a batch of work off the queue.  */
315 		TAILQ_INIT(&queue);
316 		TAILQ_INIT(&dqueue);
317 		TAILQ_CONCAT(&queue, &wq->wq_queue, work_entry);
318 		TAILQ_CONCAT(&dqueue, &wq->wq_dqueue, work_entry);
319 
320 		/* Process each work item in the batch.  */
321 		for (i = 0; i < 2; i++) {
322 			while (!TAILQ_EMPTY(q[i])) {
323 				struct work_struct *work = TAILQ_FIRST(q[i]);
324 				void (*func)(struct work_struct *);
325 
326 				KASSERT(work_queue(work) == wq);
327 				KASSERT(work_claimed(work, wq));
328 				KASSERTMSG((q[i] != &dqueue ||
329 					container_of(work, struct delayed_work,
330 					    work)->dw_state ==
331 					DELAYED_WORK_IDLE),
332 				    "delayed work %p queued and scheduled",
333 				    work);
334 
335 				TAILQ_REMOVE(q[i], work, work_entry);
336 				KASSERT(wq->wq_current_work == NULL);
337 				wq->wq_current_work = work;
338 				func = work->func;
339 				release_work(work, wq);
340 				/* Can't dereference work after this point.  */
341 
342 				mutex_exit(&wq->wq_lock);
343 				(*func)(work);
344 				mutex_enter(&wq->wq_lock);
345 
346 				KASSERT(wq->wq_current_work == work);
347 				wq->wq_current_work = NULL;
348 				cv_broadcast(&wq->wq_cv);
349 			}
350 		}
351 
352 		/* Notify flush that we've completed a batch of work.  */
353 		wq->wq_gen++;
354 		cv_broadcast(&wq->wq_cv);
355 	}
356 	mutex_exit(&wq->wq_lock);
357 
358 	kthread_exit(0);
359 }
360 
361 /*
362  * linux_workqueue_timeout(cookie)
363  *
364  *	Delayed work timeout callback.
365  *
366  *	- If scheduled, queue it.
367  *	- If rescheduled, callout_schedule ourselves again.
368  *	- If cancelled, destroy the callout and release the work from
369  *        the workqueue.
370  */
371 static void
372 linux_workqueue_timeout(void *cookie)
373 {
374 	struct delayed_work *const dw = cookie;
375 	struct workqueue_struct *const wq = work_queue(&dw->work);
376 
377 	KASSERTMSG(wq != NULL,
378 	    "delayed work %p state %d resched %d",
379 	    dw, dw->dw_state, dw->dw_resched);
380 
381 	mutex_enter(&wq->wq_lock);
382 	KASSERT(work_queue(&dw->work) == wq);
383 	switch (dw->dw_state) {
384 	case DELAYED_WORK_IDLE:
385 		panic("delayed work callout uninitialized: %p", dw);
386 	case DELAYED_WORK_SCHEDULED:
387 		dw_callout_destroy(wq, dw);
388 		TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
389 		cv_broadcast(&wq->wq_cv);
390 		break;
391 	case DELAYED_WORK_RESCHEDULED:
392 		KASSERT(dw->dw_resched >= 0);
393 		callout_schedule(&dw->dw_callout, dw->dw_resched);
394 		dw->dw_state = DELAYED_WORK_SCHEDULED;
395 		dw->dw_resched = -1;
396 		break;
397 	case DELAYED_WORK_CANCELLED:
398 		cancel_delayed_work_done(wq, dw);
399 		/* Can't dereference dw after this point.  */
400 		goto out;
401 	default:
402 		panic("delayed work callout in bad state: %p", dw);
403 	}
404 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
405 	    dw->dw_state == DELAYED_WORK_SCHEDULED);
406 out:	mutex_exit(&wq->wq_lock);
407 }
408 
409 /*
410  * current_work()
411  *
412  *	If in a workqueue worker thread, return the work it is
413  *	currently executing.  Otherwise return NULL.
414  */
415 struct work_struct *
416 current_work(void)
417 {
418 	struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
419 
420 	/* If we're not a workqueue thread, then there's no work.  */
421 	if (wq == NULL)
422 		return NULL;
423 
424 	/*
425 	 * Otherwise, this should be possible only while work is in
426 	 * progress.  Return the current work item.
427 	 */
428 	KASSERT(wq->wq_current_work != NULL);
429 	return wq->wq_current_work;
430 }
431 
432 /*
433  * Work
434  */
435 
436 /*
437  * INIT_WORK(work, fn)
438  *
439  *	Initialize work for use with a workqueue to call fn in a worker
440  *	thread.  There is no corresponding destruction operation.
441  */
442 void
443 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
444 {
445 
446 	work->work_owner = 0;
447 	work->func = fn;
448 }
449 
450 /*
451  * work_claimed(work, wq)
452  *
453  *	True if work is currently claimed by a workqueue, meaning it is
454  *	either on the queue or scheduled in a callout.  The workqueue
455  *	must be wq, and caller must hold wq's lock.
456  */
457 static bool
458 work_claimed(struct work_struct *work, struct workqueue_struct *wq)
459 {
460 
461 	KASSERT(work_queue(work) == wq);
462 	KASSERT(mutex_owned(&wq->wq_lock));
463 
464 	return work->work_owner & 1;
465 }
466 
467 /*
468  * work_queue(work)
469  *
470  *	Return the last queue that work was queued on, or NULL if it
471  *	was never queued.
472  */
473 static struct workqueue_struct *
474 work_queue(struct work_struct *work)
475 {
476 
477 	return (struct workqueue_struct *)(work->work_owner & ~(uintptr_t)1);
478 }
479 
480 /*
481  * acquire_work(work, wq)
482  *
483  *	Try to claim work for wq.  If work is already claimed, it must
484  *	be claimed by wq; return false.  If work is not already
485  *	claimed, claim it, issue a memory barrier to match any prior
486  *	release_work, and return true.
487  *
488  *	Caller must hold wq's lock.
489  */
490 static bool
491 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
492 {
493 	uintptr_t owner0, owner;
494 
495 	KASSERT(mutex_owned(&wq->wq_lock));
496 	KASSERT(((uintptr_t)wq & 1) == 0);
497 
498 	owner = (uintptr_t)wq | 1;
499 	do {
500 		owner0 = work->work_owner;
501 		if (owner0 & 1) {
502 			KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq);
503 			return false;
504 		}
505 		KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq);
506 	} while (atomic_cas_uintptr(&work->work_owner, owner0, owner) !=
507 	    owner0);
508 
509 	KASSERT(work_queue(work) == wq);
510 	membar_enter();
511 	return true;
512 }
513 
514 /*
515  * release_work(work, wq)
516  *
517  *	Issue a memory barrier to match any subsequent acquire_work and
518  *	dissociate work from wq.
519  *
520  *	Caller must hold wq's lock and work must be associated with wq.
521  */
522 static void
523 release_work(struct work_struct *work, struct workqueue_struct *wq)
524 {
525 
526 	KASSERT(work_queue(work) == wq);
527 	KASSERT(mutex_owned(&wq->wq_lock));
528 
529 	membar_exit();
530 
531 	/*
532 	 * Non-interlocked r/m/w is safe here because nobody else can
533 	 * write to this while the claimed bit is setand the workqueue
534 	 * lock is held.
535 	 */
536 	work->work_owner &= ~(uintptr_t)1;
537 }
538 
539 /*
540  * schedule_work(work)
541  *
542  *	If work is not already queued on system_wq, queue it to be run
543  *	by system_wq's worker thread when it next can.  True if it was
544  *	newly queued, false if it was already queued.  If the work was
545  *	already running, queue it to run again.
546  *
547  *	Caller must ensure work is not queued to run on a different
548  *	workqueue.
549  */
550 bool
551 schedule_work(struct work_struct *work)
552 {
553 
554 	return queue_work(system_wq, work);
555 }
556 
557 /*
558  * queue_work(wq, work)
559  *
560  *	If work is not already queued on wq, queue it to be run by wq's
561  *	worker thread when it next can.  True if it was newly queued,
562  *	false if it was already queued.  If the work was already
563  *	running, queue it to run again.
564  *
565  *	Caller must ensure work is not queued to run on a different
566  *	workqueue.
567  */
568 bool
569 queue_work(struct workqueue_struct *wq, struct work_struct *work)
570 {
571 	bool newly_queued;
572 
573 	KASSERT(wq != NULL);
574 
575 	mutex_enter(&wq->wq_lock);
576 	if (__predict_true(acquire_work(work, wq))) {
577 		/*
578 		 * It wasn't on any workqueue at all.  Put it on this
579 		 * one, and signal the worker thread that there is work
580 		 * to do.
581 		 */
582 		TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
583 		cv_broadcast(&wq->wq_cv);
584 		newly_queued = true;
585 	} else {
586 		/*
587 		 * It was already on this workqueue.  Nothing to do
588 		 * since it is already queued.
589 		 */
590 		newly_queued = false;
591 	}
592 	mutex_exit(&wq->wq_lock);
593 
594 	return newly_queued;
595 }
596 
597 /*
598  * cancel_work(work)
599  *
600  *	If work was queued, remove it from the queue and return true.
601  *	If work was not queued, return false.  Work may still be
602  *	running when this returns.
603  */
604 bool
605 cancel_work(struct work_struct *work)
606 {
607 	struct workqueue_struct *wq;
608 	bool cancelled_p = false;
609 
610 	/* If there's no workqueue, nothing to cancel.   */
611 	if ((wq = work_queue(work)) == NULL)
612 		goto out;
613 
614 	mutex_enter(&wq->wq_lock);
615 	if (__predict_false(work_queue(work) != wq)) {
616 		/*
617 		 * It has finished execution or been cancelled by
618 		 * another thread, and has been moved off the
619 		 * workqueue, so it's too to cancel.
620 		 */
621 		cancelled_p = false;
622 	} else {
623 		/* Check whether it's on the queue.  */
624 		if (work_claimed(work, wq)) {
625 			/*
626 			 * It is still on the queue.  Take it off the
627 			 * queue and report successful cancellation.
628 			 */
629 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
630 			release_work(work, wq);
631 			/* Can't dereference work after this point.  */
632 			cancelled_p = true;
633 		} else {
634 			/* Not on the queue.  Couldn't cancel it.  */
635 			cancelled_p = false;
636 		}
637 	}
638 	mutex_exit(&wq->wq_lock);
639 
640 out:	return cancelled_p;
641 }
642 
643 /*
644  * cancel_work_sync(work)
645  *
646  *	If work was queued, remove it from the queue and return true.
647  *	If work was not queued, return false.  Either way, if work is
648  *	currently running, wait for it to complete.
649  *
650  *	May sleep.
651  */
652 bool
653 cancel_work_sync(struct work_struct *work)
654 {
655 	struct workqueue_struct *wq;
656 	bool cancelled_p = false;
657 
658 	/* If there's no workqueue, nothing to cancel.   */
659 	if ((wq = work_queue(work)) == NULL)
660 		goto out;
661 
662 	mutex_enter(&wq->wq_lock);
663 	if (__predict_false(work_queue(work) != wq)) {
664 		/*
665 		 * It has finished execution or been cancelled by
666 		 * another thread, and has been moved off the
667 		 * workqueue, so it's too late to cancel.
668 		 */
669 		cancelled_p = false;
670 	} else {
671 		/* Check whether it's on the queue.  */
672 		if (work_claimed(work, wq)) {
673 			/*
674 			 * It is still on the queue.  Take it off the
675 			 * queue and report successful cancellation.
676 			 */
677 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
678 			release_work(work, wq);
679 			/* Can't dereference work after this point.  */
680 			cancelled_p = true;
681 		} else {
682 			/* Not on the queue.  Couldn't cancel it.  */
683 			cancelled_p = false;
684 		}
685 		/* If it's still running, wait for it to complete.  */
686 		if (wq->wq_current_work == work)
687 			wait_for_current_work(work, wq);
688 	}
689 	mutex_exit(&wq->wq_lock);
690 
691 out:	return cancelled_p;
692 }
693 
694 /*
695  * wait_for_current_work(work, wq)
696  *
697  *	wq must be currently executing work.  Wait for it to finish.
698  *
699  *	Does not dereference work.
700  */
701 static void
702 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
703 {
704 	uint64_t gen;
705 
706 	KASSERT(mutex_owned(&wq->wq_lock));
707 	KASSERT(wq->wq_current_work == work);
708 
709 	/* Wait only one generation in case it gets requeued quickly.  */
710 	gen = wq->wq_gen;
711 	do {
712 		cv_wait(&wq->wq_cv, &wq->wq_lock);
713 	} while (wq->wq_current_work == work && wq->wq_gen == gen);
714 }
715 
716 /*
717  * Delayed work
718  */
719 
720 /*
721  * INIT_DELAYED_WORK(dw, fn)
722  *
723  *	Initialize dw for use with a workqueue to call fn in a worker
724  *	thread after a delay.  There is no corresponding destruction
725  *	operation.
726  */
727 void
728 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
729 {
730 
731 	INIT_WORK(&dw->work, fn);
732 	dw->dw_state = DELAYED_WORK_IDLE;
733 	dw->dw_resched = -1;
734 
735 	/*
736 	 * Defer callout_init until we are going to schedule the
737 	 * callout, which can then callout_destroy it, because
738 	 * otherwise since there's no DESTROY_DELAYED_WORK or anything
739 	 * we have no opportunity to call callout_destroy.
740 	 */
741 }
742 
743 /*
744  * schedule_delayed_work(dw, ticks)
745  *
746  *	If it is not currently scheduled, schedule dw to run after
747  *	ticks on system_wq.  If currently executing and not already
748  *	rescheduled, reschedule it.  True if it was newly scheduled,
749  *	false if it was already scheduled.
750  *
751  *	If ticks == 0, queue it to run as soon as the worker can,
752  *	without waiting for the next callout tick to run.
753  */
754 bool
755 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
756 {
757 
758 	return queue_delayed_work(system_wq, dw, ticks);
759 }
760 
761 /*
762  * dw_callout_init(wq, dw)
763  *
764  *	Initialize the callout of dw and transition to
765  *	DELAYED_WORK_SCHEDULED.  Caller must use callout_schedule.
766  */
767 static void
768 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
769 {
770 
771 	KASSERT(mutex_owned(&wq->wq_lock));
772 	KASSERT(work_queue(&dw->work) == wq);
773 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
774 
775 	callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
776 	callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw);
777 	TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
778 	dw->dw_state = DELAYED_WORK_SCHEDULED;
779 }
780 
781 /*
782  * dw_callout_destroy(wq, dw)
783  *
784  *	Destroy the callout of dw and transition to DELAYED_WORK_IDLE.
785  */
786 static void
787 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
788 {
789 
790 	KASSERT(mutex_owned(&wq->wq_lock));
791 	KASSERT(work_queue(&dw->work) == wq);
792 	KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED ||
793 	    dw->dw_state == DELAYED_WORK_RESCHEDULED ||
794 	    dw->dw_state == DELAYED_WORK_CANCELLED);
795 
796 	TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
797 	callout_destroy(&dw->dw_callout);
798 	dw->dw_resched = -1;
799 	dw->dw_state = DELAYED_WORK_IDLE;
800 }
801 
802 /*
803  * cancel_delayed_work_done(wq, dw)
804  *
805  *	Complete cancellation of a delayed work: transition from
806  *	DELAYED_WORK_CANCELLED to DELAYED_WORK_IDLE and off the
807  *	workqueue.  Caller must not dereference dw after this returns.
808  */
809 static void
810 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
811 {
812 
813 	KASSERT(mutex_owned(&wq->wq_lock));
814 	KASSERT(work_queue(&dw->work) == wq);
815 	KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
816 
817 	dw_callout_destroy(wq, dw);
818 	release_work(&dw->work, wq);
819 	/* Can't dereference dw after this point.  */
820 }
821 
822 /*
823  * queue_delayed_work(wq, dw, ticks)
824  *
825  *	If it is not currently scheduled, schedule dw to run after
826  *	ticks on wq.  If currently queued, remove it from the queue
827  *	first.
828  *
829  *	If ticks == 0, queue it to run as soon as the worker can,
830  *	without waiting for the next callout tick to run.
831  */
832 bool
833 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
834     unsigned long ticks)
835 {
836 	bool newly_queued;
837 
838 	mutex_enter(&wq->wq_lock);
839 	if (__predict_true(acquire_work(&dw->work, wq))) {
840 		/*
841 		 * It wasn't on any workqueue at all.  Schedule it to
842 		 * run on this one.
843 		 */
844 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
845 		if (ticks == 0) {
846 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
847 			    work_entry);
848 			cv_broadcast(&wq->wq_cv);
849 		} else {
850 			/*
851 			 * Initialize a callout and schedule to run
852 			 * after a delay.
853 			 */
854 			dw_callout_init(wq, dw);
855 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
856 		}
857 		newly_queued = true;
858 	} else {
859 		/* It was already on this workqueue.  */
860 		switch (dw->dw_state) {
861 		case DELAYED_WORK_IDLE:
862 		case DELAYED_WORK_SCHEDULED:
863 		case DELAYED_WORK_RESCHEDULED:
864 			/* On the queue or already scheduled.  Leave it.  */
865 			newly_queued = false;
866 			break;
867 		case DELAYED_WORK_CANCELLED:
868 			/*
869 			 * Scheduled and the callout began, but it was
870 			 * cancelled.  Reschedule it.
871 			 */
872 			if (ticks == 0) {
873 				dw->dw_state = DELAYED_WORK_SCHEDULED;
874 			} else {
875 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
876 				dw->dw_resched = MIN(INT_MAX, ticks);
877 			}
878 			newly_queued = true;
879 			break;
880 		default:
881 			panic("invalid delayed work state: %d",
882 			    dw->dw_state);
883 		}
884 	}
885 	mutex_exit(&wq->wq_lock);
886 
887 	return newly_queued;
888 }
889 
890 /*
891  * mod_delayed_work(wq, dw, ticks)
892  *
893  *	Schedule dw to run after ticks.  If scheduled or queued,
894  *	reschedule.  If ticks == 0, run without delay.
895  *
896  *	True if it modified the timer of an already scheduled work,
897  *	false if it newly scheduled the work.
898  */
899 bool
900 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
901     unsigned long ticks)
902 {
903 	bool timer_modified;
904 
905 	mutex_enter(&wq->wq_lock);
906 	if (acquire_work(&dw->work, wq)) {
907 		/*
908 		 * It wasn't on any workqueue at all.  Schedule it to
909 		 * run on this one.
910 		 */
911 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
912 		if (ticks == 0) {
913 			/*
914 			 * Run immediately: put it on the queue and
915 			 * signal the worker thread.
916 			 */
917 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
918 			    work_entry);
919 			cv_broadcast(&wq->wq_cv);
920 		} else {
921 			/*
922 			 * Initialize a callout and schedule to run
923 			 * after a delay.
924 			 */
925 			dw_callout_init(wq, dw);
926 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
927 		}
928 		timer_modified = false;
929 	} else {
930 		/* It was already on this workqueue.  */
931 		switch (dw->dw_state) {
932 		case DELAYED_WORK_IDLE:
933 			/* On the queue.  */
934 			if (ticks == 0) {
935 				/* Leave it be.  */
936 			} else {
937 				/* Remove from the queue and schedule.  */
938 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
939 				    work_entry);
940 				dw_callout_init(wq, dw);
941 				callout_schedule(&dw->dw_callout,
942 				    MIN(INT_MAX, ticks));
943 			}
944 			timer_modified = true;
945 			break;
946 		case DELAYED_WORK_SCHEDULED:
947 			/*
948 			 * It is scheduled to run after a delay.  Try
949 			 * to stop it and reschedule it; if we can't,
950 			 * either reschedule it or cancel it to put it
951 			 * on the queue, and inform the callout.
952 			 */
953 			if (callout_stop(&dw->dw_callout)) {
954 				/* Can't stop, callout has begun.  */
955 				if (ticks == 0) {
956 					/*
957 					 * We don't actually need to do
958 					 * anything.  The callout will
959 					 * queue it as soon as it gets
960 					 * the lock.
961 					 */
962 				} else {
963 					/* Ask the callout to reschedule.  */
964 					dw->dw_state = DELAYED_WORK_RESCHEDULED;
965 					dw->dw_resched = MIN(INT_MAX, ticks);
966 				}
967 			} else {
968 				/* We stopped the callout before it began.  */
969 				if (ticks == 0) {
970 					/*
971 					 * Run immediately: destroy the
972 					 * callout, put it on the
973 					 * queue, and signal the worker
974 					 * thread.
975 					 */
976 					dw_callout_destroy(wq, dw);
977 					TAILQ_INSERT_TAIL(&wq->wq_dqueue,
978 					    &dw->work, work_entry);
979 					cv_broadcast(&wq->wq_cv);
980 				} else {
981 					/*
982 					 * Reschedule the callout.  No
983 					 * state change.
984 					 */
985 					callout_schedule(&dw->dw_callout,
986 					    MIN(INT_MAX, ticks));
987 				}
988 			}
989 			timer_modified = true;
990 			break;
991 		case DELAYED_WORK_RESCHEDULED:
992 			/*
993 			 * Someone rescheduled it after the callout
994 			 * started but before the poor thing even had a
995 			 * chance to acquire the lock.
996 			 */
997 			if (ticks == 0) {
998 				/*
999 				 * We can just switch back to
1000 				 * DELAYED_WORK_SCHEDULED so that the
1001 				 * callout will queue the work as soon
1002 				 * as it gets the lock.
1003 				 */
1004 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1005 				dw->dw_resched = -1;
1006 			} else {
1007 				/* Change the rescheduled time.  */
1008 				dw->dw_resched = ticks;
1009 			}
1010 			timer_modified = true;
1011 			break;
1012 		case DELAYED_WORK_CANCELLED:
1013 			/*
1014 			 * Someone cancelled it after the callout
1015 			 * started but before the poor thing even had a
1016 			 * chance to acquire the lock.
1017 			 */
1018 			if (ticks == 0) {
1019 				/*
1020 				 * We can just switch back to
1021 				 * DELAYED_WORK_SCHEDULED so that the
1022 				 * callout will queue the work as soon
1023 				 * as it gets the lock.
1024 				 */
1025 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1026 			} else {
1027 				/* Ask it to reschedule.  */
1028 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
1029 				dw->dw_resched = MIN(INT_MAX, ticks);
1030 			}
1031 			timer_modified = false;
1032 			break;
1033 		default:
1034 			panic("invalid delayed work state: %d", dw->dw_state);
1035 		}
1036 	}
1037 	mutex_exit(&wq->wq_lock);
1038 
1039 	return timer_modified;
1040 }
1041 
1042 /*
1043  * cancel_delayed_work(dw)
1044  *
1045  *	If work was scheduled or queued, remove it from the schedule or
1046  *	queue and return true.  If work was not scheduled or queued,
1047  *	return false.  Note that work may already be running; if it
1048  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1049  *	will return false, and either way, cancel_delayed_work will NOT
1050  *	wait for the work to complete.
1051  */
1052 bool
1053 cancel_delayed_work(struct delayed_work *dw)
1054 {
1055 	struct workqueue_struct *wq;
1056 	bool cancelled_p;
1057 
1058 	/* If there's no workqueue, nothing to cancel.   */
1059 	if ((wq = work_queue(&dw->work)) == NULL)
1060 		return false;
1061 
1062 	mutex_enter(&wq->wq_lock);
1063 	if (__predict_false(work_queue(&dw->work) != wq)) {
1064 		cancelled_p = false;
1065 	} else {
1066 		switch (dw->dw_state) {
1067 		case DELAYED_WORK_IDLE:
1068 			/*
1069 			 * It is either on the queue or already running
1070 			 * or both.
1071 			 */
1072 			if (work_claimed(&dw->work, wq)) {
1073 				/* On the queue.  Remove and release.  */
1074 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1075 				    work_entry);
1076 				release_work(&dw->work, wq);
1077 				/* Can't dereference dw after this point.  */
1078 				cancelled_p = true;
1079 			} else {
1080 				/* Not on the queue, so didn't cancel.  */
1081 				cancelled_p = false;
1082 			}
1083 			break;
1084 		case DELAYED_WORK_SCHEDULED:
1085 			/*
1086 			 * If it is scheduled, mark it cancelled and
1087 			 * try to stop the callout before it starts.
1088 			 *
1089 			 * If it's too late and the callout has already
1090 			 * begun to execute, tough.
1091 			 *
1092 			 * If we stopped the callout before it started,
1093 			 * however, then destroy the callout and
1094 			 * dissociate it from the workqueue ourselves.
1095 			 */
1096 			dw->dw_state = DELAYED_WORK_CANCELLED;
1097 			cancelled_p = true;
1098 			if (!callout_stop(&dw->dw_callout))
1099 				cancel_delayed_work_done(wq, dw);
1100 			break;
1101 		case DELAYED_WORK_RESCHEDULED:
1102 			/*
1103 			 * If it is being rescheduled, the callout has
1104 			 * already fired.  We must ask it to cancel.
1105 			 */
1106 			dw->dw_state = DELAYED_WORK_CANCELLED;
1107 			dw->dw_resched = -1;
1108 			cancelled_p = true;
1109 			break;
1110 		case DELAYED_WORK_CANCELLED:
1111 			/*
1112 			 * If it is being cancelled, the callout has
1113 			 * already fired.  There is nothing more for us
1114 			 * to do.  Someone else claims credit for
1115 			 * cancelling it.
1116 			 */
1117 			cancelled_p = false;
1118 			break;
1119 		default:
1120 			panic("invalid delayed work state: %d",
1121 			    dw->dw_state);
1122 		}
1123 	}
1124 	mutex_exit(&wq->wq_lock);
1125 
1126 	return cancelled_p;
1127 }
1128 
1129 /*
1130  * cancel_delayed_work_sync(dw)
1131  *
1132  *	If work was scheduled or queued, remove it from the schedule or
1133  *	queue and return true.  If work was not scheduled or queued,
1134  *	return false.  Note that work may already be running; if it
1135  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1136  *	will return false; either way, wait for it to complete.
1137  */
1138 bool
1139 cancel_delayed_work_sync(struct delayed_work *dw)
1140 {
1141 	struct workqueue_struct *wq;
1142 	bool cancelled_p;
1143 
1144 	/* If there's no workqueue, nothing to cancel.  */
1145 	if ((wq = work_queue(&dw->work)) == NULL)
1146 		return false;
1147 
1148 	mutex_enter(&wq->wq_lock);
1149 	if (__predict_false(work_queue(&dw->work) != wq)) {
1150 		cancelled_p = false;
1151 	} else {
1152 		switch (dw->dw_state) {
1153 		case DELAYED_WORK_IDLE:
1154 			/*
1155 			 * It is either on the queue or already running
1156 			 * or both.
1157 			 */
1158 			if (work_claimed(&dw->work, wq)) {
1159 				/* On the queue.  Remove and release.  */
1160 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1161 				    work_entry);
1162 				release_work(&dw->work, wq);
1163 				/* Can't dereference dw after this point.  */
1164 				cancelled_p = true;
1165 			} else {
1166 				/* Not on the queue, so didn't cancel. */
1167 				cancelled_p = false;
1168 			}
1169 			/* If it's still running, wait for it to complete.  */
1170 			if (wq->wq_current_work == &dw->work)
1171 				wait_for_current_work(&dw->work, wq);
1172 			break;
1173 		case DELAYED_WORK_SCHEDULED:
1174 			/*
1175 			 * If it is scheduled, mark it cancelled and
1176 			 * try to stop the callout before it starts.
1177 			 *
1178 			 * If it's too late and the callout has already
1179 			 * begun to execute, we must wait for it to
1180 			 * complete.  But we got in soon enough to ask
1181 			 * the callout not to run, so we successfully
1182 			 * cancelled it in that case.
1183 			 *
1184 			 * If we stopped the callout before it started,
1185 			 * then we must destroy the callout and
1186 			 * dissociate it from the workqueue ourselves.
1187 			 */
1188 			dw->dw_state = DELAYED_WORK_CANCELLED;
1189 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1190 				cancel_delayed_work_done(wq, dw);
1191 			cancelled_p = true;
1192 			break;
1193 		case DELAYED_WORK_RESCHEDULED:
1194 			/*
1195 			 * If it is being rescheduled, the callout has
1196 			 * already fired.  We must ask it to cancel and
1197 			 * wait for it to complete.
1198 			 */
1199 			dw->dw_state = DELAYED_WORK_CANCELLED;
1200 			dw->dw_resched = -1;
1201 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1202 			cancelled_p = true;
1203 			break;
1204 		case DELAYED_WORK_CANCELLED:
1205 			/*
1206 			 * If it is being cancelled, the callout has
1207 			 * already fired.  We need only wait for it to
1208 			 * complete.  Someone else, however, claims
1209 			 * credit for cancelling it.
1210 			 */
1211 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1212 			cancelled_p = false;
1213 			break;
1214 		default:
1215 			panic("invalid delayed work state: %d",
1216 			    dw->dw_state);
1217 		}
1218 	}
1219 	mutex_exit(&wq->wq_lock);
1220 
1221 	return cancelled_p;
1222 }
1223 
1224 /*
1225  * Flush
1226  */
1227 
1228 /*
1229  * flush_scheduled_work()
1230  *
1231  *	Wait for all work queued on system_wq to complete.  This does
1232  *	not include delayed work.
1233  */
1234 void
1235 flush_scheduled_work(void)
1236 {
1237 
1238 	flush_workqueue(system_wq);
1239 }
1240 
1241 /*
1242  * flush_workqueue_locked(wq)
1243  *
1244  *	Wait for all work queued on wq to complete.  This does not
1245  *	include delayed work.
1246  *
1247  *	Caller must hold wq's lock.
1248  */
1249 static void
1250 flush_workqueue_locked(struct workqueue_struct *wq)
1251 {
1252 	uint64_t gen;
1253 
1254 	KASSERT(mutex_owned(&wq->wq_lock));
1255 
1256 	/* Get the current generation number.  */
1257 	gen = wq->wq_gen;
1258 
1259 	/*
1260 	 * If there's a batch of work in progress, we must wait for the
1261 	 * worker thread to finish that batch.
1262 	 */
1263 	if (wq->wq_current_work != NULL)
1264 		gen++;
1265 
1266 	/*
1267 	 * If there's any work yet to be claimed from the queue by the
1268 	 * worker thread, we must wait for it to finish one more batch
1269 	 * too.
1270 	 */
1271 	if (!TAILQ_EMPTY(&wq->wq_queue) || !TAILQ_EMPTY(&wq->wq_dqueue))
1272 		gen++;
1273 
1274 	/* Wait until the generation number has caught up.  */
1275 	while (wq->wq_gen < gen)
1276 		cv_wait(&wq->wq_cv, &wq->wq_lock);
1277 }
1278 
1279 /*
1280  * flush_workqueue(wq)
1281  *
1282  *	Wait for all work queued on wq to complete.  This does not
1283  *	include delayed work.
1284  */
1285 void
1286 flush_workqueue(struct workqueue_struct *wq)
1287 {
1288 
1289 	mutex_enter(&wq->wq_lock);
1290 	flush_workqueue_locked(wq);
1291 	mutex_exit(&wq->wq_lock);
1292 }
1293 
1294 /*
1295  * flush_work(work)
1296  *
1297  *	If work is queued or currently executing, wait for it to
1298  *	complete.
1299  */
1300 void
1301 flush_work(struct work_struct *work)
1302 {
1303 	struct workqueue_struct *wq;
1304 
1305 	/* If there's no workqueue, nothing to flush.  */
1306 	if ((wq = work_queue(work)) == NULL)
1307 		return;
1308 
1309 	flush_workqueue(wq);
1310 }
1311 
1312 /*
1313  * flush_delayed_work(dw)
1314  *
1315  *	If dw is scheduled to run after a delay, queue it immediately
1316  *	instead.  Then, if dw is queued or currently executing, wait
1317  *	for it to complete.
1318  */
1319 void
1320 flush_delayed_work(struct delayed_work *dw)
1321 {
1322 	struct workqueue_struct *wq;
1323 
1324 	/* If there's no workqueue, nothing to flush.  */
1325 	if ((wq = work_queue(&dw->work)) == NULL)
1326 		return;
1327 
1328 	mutex_enter(&wq->wq_lock);
1329 	if (__predict_false(work_queue(&dw->work) != wq)) {
1330 		/*
1331 		 * Moved off the queue already (and possibly to another
1332 		 * queue, though that would be ill-advised), so it must
1333 		 * have completed, and we have nothing more to do.
1334 		 */
1335 	} else {
1336 		switch (dw->dw_state) {
1337 		case DELAYED_WORK_IDLE:
1338 			/*
1339 			 * It has a workqueue assigned and the callout
1340 			 * is idle, so it must be in progress or on the
1341 			 * queue.  In that case, we'll wait for it to
1342 			 * complete.
1343 			 */
1344 			break;
1345 		case DELAYED_WORK_SCHEDULED:
1346 		case DELAYED_WORK_RESCHEDULED:
1347 		case DELAYED_WORK_CANCELLED:
1348 			/*
1349 			 * The callout is scheduled, and may have even
1350 			 * started.  Mark it as scheduled so that if
1351 			 * the callout has fired it will queue the work
1352 			 * itself.  Try to stop the callout -- if we
1353 			 * can, queue the work now; if we can't, wait
1354 			 * for the callout to complete, which entails
1355 			 * queueing it.
1356 			 */
1357 			dw->dw_state = DELAYED_WORK_SCHEDULED;
1358 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1359 				/*
1360 				 * We stopped it before it ran.  No
1361 				 * state change in the interim is
1362 				 * possible.  Destroy the callout and
1363 				 * queue it ourselves.
1364 				 */
1365 				KASSERT(dw->dw_state ==
1366 				    DELAYED_WORK_SCHEDULED);
1367 				dw_callout_destroy(wq, dw);
1368 				TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1369 				    work_entry);
1370 				cv_broadcast(&wq->wq_cv);
1371 			}
1372 			break;
1373 		default:
1374 			panic("invalid delayed work state: %d", dw->dw_state);
1375 		}
1376 		/*
1377 		 * Waiting for the whole queue to flush is overkill,
1378 		 * but doesn't hurt.
1379 		 */
1380 		flush_workqueue_locked(wq);
1381 	}
1382 	mutex_exit(&wq->wq_lock);
1383 }
1384