xref: /netbsd-src/sys/external/bsd/common/linux/linux_work.c (revision 6fe9f2d2065b3c41bcf0974fb4e09d5c2275374c)
1 /*	$NetBSD: linux_work.c,v 1.47 2021/12/19 00:49:00 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.47 2021/12/19 00:49:00 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #ifndef _MODULE
45 #include <sys/once.h>
46 #endif
47 #include <sys/queue.h>
48 #include <sys/sdt.h>
49 
50 #include <linux/workqueue.h>
51 
52 TAILQ_HEAD(work_head, work_struct);
53 TAILQ_HEAD(dwork_head, delayed_work);
54 
55 struct workqueue_struct {
56 	kmutex_t		wq_lock;
57 	kcondvar_t		wq_cv;
58 	struct dwork_head	wq_delayed; /* delayed work scheduled */
59 	struct work_head	wq_queue;   /* work to run */
60 	struct work_head	wq_dqueue;  /* delayed work to run now */
61 	struct work_struct	*wq_current_work;
62 	int			wq_flags;
63 	bool			wq_dying;
64 	uint64_t		wq_gen;
65 	struct lwp		*wq_lwp;
66 };
67 
68 static void __dead	linux_workqueue_thread(void *);
69 static void		linux_workqueue_timeout(void *);
70 static bool		work_claimed(struct work_struct *,
71 			    struct workqueue_struct *);
72 static struct workqueue_struct *
73 			work_queue(struct work_struct *);
74 static bool		acquire_work(struct work_struct *,
75 			    struct workqueue_struct *);
76 static void		release_work(struct work_struct *,
77 			    struct workqueue_struct *);
78 static void		wait_for_current_work(struct work_struct *,
79 			    struct workqueue_struct *);
80 static void		dw_callout_init(struct workqueue_struct *,
81 			    struct delayed_work *);
82 static void		dw_callout_destroy(struct workqueue_struct *,
83 			    struct delayed_work *);
84 static void		cancel_delayed_work_done(struct workqueue_struct *,
85 			    struct delayed_work *);
86 
87 SDT_PROBE_DEFINE2(sdt, linux, work, acquire,
88     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
89 SDT_PROBE_DEFINE2(sdt, linux, work, release,
90     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
91 SDT_PROBE_DEFINE2(sdt, linux, work, queue,
92     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
93 SDT_PROBE_DEFINE2(sdt, linux, work, cancel,
94     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
95 SDT_PROBE_DEFINE3(sdt, linux, work, schedule,
96     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/,
97     "unsigned long"/*ticks*/);
98 SDT_PROBE_DEFINE2(sdt, linux, work, timer,
99     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
100 SDT_PROBE_DEFINE2(sdt, linux, work, wait__start,
101     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
102 SDT_PROBE_DEFINE2(sdt, linux, work, wait__done,
103     "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
104 SDT_PROBE_DEFINE2(sdt, linux, work, run,
105     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
106 SDT_PROBE_DEFINE2(sdt, linux, work, done,
107     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
108 SDT_PROBE_DEFINE1(sdt, linux, work, batch__start,
109     "struct workqueue_struct *"/*wq*/);
110 SDT_PROBE_DEFINE1(sdt, linux, work, batch__done,
111     "struct workqueue_struct *"/*wq*/);
112 SDT_PROBE_DEFINE1(sdt, linux, work, flush__start,
113     "struct workqueue_struct *"/*wq*/);
114 SDT_PROBE_DEFINE1(sdt, linux, work, flush__done,
115     "struct workqueue_struct *"/*wq*/);
116 
117 static specificdata_key_t workqueue_key __read_mostly;
118 
119 struct workqueue_struct	*system_wq __read_mostly;
120 struct workqueue_struct	*system_long_wq __read_mostly;
121 struct workqueue_struct	*system_power_efficient_wq __read_mostly;
122 struct workqueue_struct	*system_unbound_wq __read_mostly;
123 
124 static inline uintptr_t
125 atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t old, uintptr_t new)
126 {
127 
128 	return (uintptr_t)atomic_cas_ptr(p, (void *)old, (void *)new);
129 }
130 
131 /*
132  * linux_workqueue_init()
133  *
134  *	Initialize the Linux workqueue subsystem.  Return 0 on success,
135  *	NetBSD error on failure.
136  */
137 static int
138 linux_workqueue_init0(void)
139 {
140 	int error;
141 
142 	error = lwp_specific_key_create(&workqueue_key, NULL);
143 	if (error)
144 		goto fail0;
145 
146 	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
147 	if (system_wq == NULL) {
148 		error = ENOMEM;
149 		goto fail1;
150 	}
151 
152 	system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
153 	if (system_long_wq == NULL) {
154 		error = ENOMEM;
155 		goto fail2;
156 	}
157 
158 	system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
159 	if (system_power_efficient_wq == NULL) {
160 		error = ENOMEM;
161 		goto fail3;
162 	}
163 
164 	system_unbound_wq = alloc_ordered_workqueue("lnxubdwq", 0);
165 	if (system_unbound_wq == NULL) {
166 		error = ENOMEM;
167 		goto fail4;
168 	}
169 
170 	return 0;
171 
172 fail5: __unused
173 	destroy_workqueue(system_unbound_wq);
174 fail4:	destroy_workqueue(system_power_efficient_wq);
175 fail3:	destroy_workqueue(system_long_wq);
176 fail2:	destroy_workqueue(system_wq);
177 fail1:	lwp_specific_key_delete(workqueue_key);
178 fail0:	KASSERT(error);
179 	return error;
180 }
181 
182 /*
183  * linux_workqueue_fini()
184  *
185  *	Destroy the Linux workqueue subsystem.  Never fails.
186  */
187 static void
188 linux_workqueue_fini0(void)
189 {
190 
191 	destroy_workqueue(system_power_efficient_wq);
192 	destroy_workqueue(system_long_wq);
193 	destroy_workqueue(system_wq);
194 	lwp_specific_key_delete(workqueue_key);
195 }
196 
197 #ifndef _MODULE
198 static ONCE_DECL(linux_workqueue_init_once);
199 #endif
200 
201 int
202 linux_workqueue_init(void)
203 {
204 #ifdef _MODULE
205 	return linux_workqueue_init0();
206 #else
207 	return INIT_ONCE(&linux_workqueue_init_once, &linux_workqueue_init0);
208 #endif
209 }
210 
211 void
212 linux_workqueue_fini(void)
213 {
214 #ifdef _MODULE
215 	return linux_workqueue_fini0();
216 #else
217 	return FINI_ONCE(&linux_workqueue_init_once, &linux_workqueue_fini0);
218 #endif
219 }
220 
221 /*
222  * Workqueues
223  */
224 
225 /*
226  * alloc_ordered_workqueue(name, flags)
227  *
228  *	Create a workqueue of the given name.  No flags are currently
229  *	defined.  Return NULL on failure, pointer to struct
230  *	workqueue_struct object on success.
231  */
232 struct workqueue_struct *
233 alloc_ordered_workqueue(const char *name, int flags)
234 {
235 	struct workqueue_struct *wq;
236 	int error;
237 
238 	KASSERT(flags == 0);
239 
240 	wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
241 
242 	mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
243 	cv_init(&wq->wq_cv, name);
244 	TAILQ_INIT(&wq->wq_delayed);
245 	TAILQ_INIT(&wq->wq_queue);
246 	TAILQ_INIT(&wq->wq_dqueue);
247 	wq->wq_current_work = NULL;
248 	wq->wq_flags = 0;
249 	wq->wq_dying = false;
250 	wq->wq_gen = 0;
251 	wq->wq_lwp = NULL;
252 
253 	error = kthread_create(PRI_NONE,
254 	    KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
255 	    &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
256 	if (error)
257 		goto fail0;
258 
259 	return wq;
260 
261 fail0:	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
262 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
263 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
264 	cv_destroy(&wq->wq_cv);
265 	mutex_destroy(&wq->wq_lock);
266 	kmem_free(wq, sizeof(*wq));
267 	return NULL;
268 }
269 
270 /*
271  * destroy_workqueue(wq)
272  *
273  *	Destroy a workqueue created with wq.  Cancel any pending
274  *	delayed work.  Wait for all queued work to complete.
275  *
276  *	May sleep.
277  */
278 void
279 destroy_workqueue(struct workqueue_struct *wq)
280 {
281 
282 	/*
283 	 * Cancel all delayed work.  We do this first because any
284 	 * delayed work that that has already timed out, which we can't
285 	 * cancel, may have queued new work.
286 	 */
287 	mutex_enter(&wq->wq_lock);
288 	while (!TAILQ_EMPTY(&wq->wq_delayed)) {
289 		struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
290 
291 		KASSERT(work_queue(&dw->work) == wq);
292 		KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
293 			dw->dw_state == DELAYED_WORK_RESCHEDULED ||
294 			dw->dw_state == DELAYED_WORK_CANCELLED),
295 		    "delayed work %p in bad state: %d",
296 		    dw, dw->dw_state);
297 
298 		/*
299 		 * Mark it cancelled and try to stop the callout before
300 		 * it starts.
301 		 *
302 		 * If it's too late and the callout has already begun
303 		 * to execute, then it will notice that we asked to
304 		 * cancel it and remove itself from the queue before
305 		 * returning.
306 		 *
307 		 * If we stopped the callout before it started,
308 		 * however, then we can safely destroy the callout and
309 		 * dissociate it from the workqueue ourselves.
310 		 */
311 		SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
312 		dw->dw_state = DELAYED_WORK_CANCELLED;
313 		if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
314 			cancel_delayed_work_done(wq, dw);
315 	}
316 	mutex_exit(&wq->wq_lock);
317 
318 	/*
319 	 * At this point, no new work can be put on the queue.
320 	 */
321 
322 	/* Tell the thread to exit.  */
323 	mutex_enter(&wq->wq_lock);
324 	wq->wq_dying = true;
325 	cv_broadcast(&wq->wq_cv);
326 	mutex_exit(&wq->wq_lock);
327 
328 	/* Wait for it to exit.  */
329 	(void)kthread_join(wq->wq_lwp);
330 
331 	KASSERT(wq->wq_dying);
332 	KASSERT(wq->wq_flags == 0);
333 	KASSERT(wq->wq_current_work == NULL);
334 	KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
335 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
336 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
337 	cv_destroy(&wq->wq_cv);
338 	mutex_destroy(&wq->wq_lock);
339 
340 	kmem_free(wq, sizeof(*wq));
341 }
342 
343 /*
344  * Work thread and callout
345  */
346 
347 /*
348  * linux_workqueue_thread(cookie)
349  *
350  *	Main function for a workqueue's worker thread.  Waits until
351  *	there is work queued, grabs a batch of work off the queue,
352  *	executes it all, bumps the generation number, and repeats,
353  *	until dying.
354  */
355 static void __dead
356 linux_workqueue_thread(void *cookie)
357 {
358 	struct workqueue_struct *const wq = cookie;
359 	struct work_head *const q[2] = { &wq->wq_queue, &wq->wq_dqueue };
360 	struct work_struct marker, *work;
361 	unsigned i;
362 
363 	lwp_setspecific(workqueue_key, wq);
364 
365 	mutex_enter(&wq->wq_lock);
366 	for (;;) {
367 		/*
368 		 * Wait until there's activity.  If there's no work and
369 		 * we're dying, stop here.
370 		 */
371 		if (TAILQ_EMPTY(&wq->wq_queue) &&
372 		    TAILQ_EMPTY(&wq->wq_dqueue)) {
373 			if (wq->wq_dying)
374 				break;
375 			cv_wait(&wq->wq_cv, &wq->wq_lock);
376 			continue;
377 		}
378 
379 		/*
380 		 * Start a batch of work.  Use a marker to delimit when
381 		 * the batch ends so we can advance the generation
382 		 * after the batch.
383 		 */
384 		SDT_PROBE1(sdt, linux, work, batch__start,  wq);
385 		for (i = 0; i < 2; i++) {
386 			if (TAILQ_EMPTY(q[i]))
387 				continue;
388 			TAILQ_INSERT_TAIL(q[i], &marker, work_entry);
389 			while ((work = TAILQ_FIRST(q[i])) != &marker) {
390 				void (*func)(struct work_struct *);
391 
392 				KASSERT(work_queue(work) == wq);
393 				KASSERT(work_claimed(work, wq));
394 				KASSERTMSG((q[i] != &wq->wq_dqueue ||
395 					container_of(work, struct delayed_work,
396 					    work)->dw_state ==
397 					DELAYED_WORK_IDLE),
398 				    "delayed work %p queued and scheduled",
399 				    work);
400 
401 				TAILQ_REMOVE(q[i], work, work_entry);
402 				KASSERT(wq->wq_current_work == NULL);
403 				wq->wq_current_work = work;
404 				func = work->func;
405 				release_work(work, wq);
406 				/* Can't dereference work after this point.  */
407 
408 				mutex_exit(&wq->wq_lock);
409 				SDT_PROBE2(sdt, linux, work, run,  work, wq);
410 				(*func)(work);
411 				SDT_PROBE2(sdt, linux, work, done,  work, wq);
412 				mutex_enter(&wq->wq_lock);
413 
414 				KASSERT(wq->wq_current_work == work);
415 				wq->wq_current_work = NULL;
416 				cv_broadcast(&wq->wq_cv);
417 			}
418 			TAILQ_REMOVE(q[i], &marker, work_entry);
419 		}
420 
421 		/* Notify flush that we've completed a batch of work.  */
422 		wq->wq_gen++;
423 		cv_broadcast(&wq->wq_cv);
424 		SDT_PROBE1(sdt, linux, work, batch__done,  wq);
425 	}
426 	mutex_exit(&wq->wq_lock);
427 
428 	kthread_exit(0);
429 }
430 
431 /*
432  * linux_workqueue_timeout(cookie)
433  *
434  *	Delayed work timeout callback.
435  *
436  *	- If scheduled, queue it.
437  *	- If rescheduled, callout_schedule ourselves again.
438  *	- If cancelled, destroy the callout and release the work from
439  *        the workqueue.
440  */
441 static void
442 linux_workqueue_timeout(void *cookie)
443 {
444 	struct delayed_work *const dw = cookie;
445 	struct workqueue_struct *const wq = work_queue(&dw->work);
446 
447 	KASSERTMSG(wq != NULL,
448 	    "delayed work %p state %d resched %d",
449 	    dw, dw->dw_state, dw->dw_resched);
450 
451 	SDT_PROBE2(sdt, linux, work, timer,  dw, wq);
452 
453 	mutex_enter(&wq->wq_lock);
454 	KASSERT(work_queue(&dw->work) == wq);
455 	switch (dw->dw_state) {
456 	case DELAYED_WORK_IDLE:
457 		panic("delayed work callout uninitialized: %p", dw);
458 	case DELAYED_WORK_SCHEDULED:
459 		dw_callout_destroy(wq, dw);
460 		TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
461 		cv_broadcast(&wq->wq_cv);
462 		SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
463 		break;
464 	case DELAYED_WORK_RESCHEDULED:
465 		KASSERT(dw->dw_resched >= 0);
466 		callout_schedule(&dw->dw_callout, dw->dw_resched);
467 		dw->dw_state = DELAYED_WORK_SCHEDULED;
468 		dw->dw_resched = -1;
469 		break;
470 	case DELAYED_WORK_CANCELLED:
471 		cancel_delayed_work_done(wq, dw);
472 		/* Can't dereference dw after this point.  */
473 		goto out;
474 	default:
475 		panic("delayed work callout in bad state: %p", dw);
476 	}
477 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
478 	    dw->dw_state == DELAYED_WORK_SCHEDULED);
479 out:	mutex_exit(&wq->wq_lock);
480 }
481 
482 /*
483  * current_work()
484  *
485  *	If in a workqueue worker thread, return the work it is
486  *	currently executing.  Otherwise return NULL.
487  */
488 struct work_struct *
489 current_work(void)
490 {
491 	struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
492 
493 	/* If we're not a workqueue thread, then there's no work.  */
494 	if (wq == NULL)
495 		return NULL;
496 
497 	/*
498 	 * Otherwise, this should be possible only while work is in
499 	 * progress.  Return the current work item.
500 	 */
501 	KASSERT(wq->wq_current_work != NULL);
502 	return wq->wq_current_work;
503 }
504 
505 /*
506  * Work
507  */
508 
509 /*
510  * INIT_WORK(work, fn)
511  *
512  *	Initialize work for use with a workqueue to call fn in a worker
513  *	thread.  There is no corresponding destruction operation.
514  */
515 void
516 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
517 {
518 
519 	work->work_owner = 0;
520 	work->func = fn;
521 }
522 
523 /*
524  * work_claimed(work, wq)
525  *
526  *	True if work is currently claimed by a workqueue, meaning it is
527  *	either on the queue or scheduled in a callout.  The workqueue
528  *	must be wq, and caller must hold wq's lock.
529  */
530 static bool
531 work_claimed(struct work_struct *work, struct workqueue_struct *wq)
532 {
533 
534 	KASSERT(work_queue(work) == wq);
535 	KASSERT(mutex_owned(&wq->wq_lock));
536 
537 	return work->work_owner & 1;
538 }
539 
540 /*
541  * work_queue(work)
542  *
543  *	Return the last queue that work was queued on, or NULL if it
544  *	was never queued.
545  */
546 static struct workqueue_struct *
547 work_queue(struct work_struct *work)
548 {
549 
550 	return (struct workqueue_struct *)(work->work_owner & ~(uintptr_t)1);
551 }
552 
553 /*
554  * acquire_work(work, wq)
555  *
556  *	Try to claim work for wq.  If work is already claimed, it must
557  *	be claimed by wq; return false.  If work is not already
558  *	claimed, claim it, issue a memory barrier to match any prior
559  *	release_work, and return true.
560  *
561  *	Caller must hold wq's lock.
562  */
563 static bool
564 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
565 {
566 	uintptr_t owner0, owner;
567 
568 	KASSERT(mutex_owned(&wq->wq_lock));
569 	KASSERT(((uintptr_t)wq & 1) == 0);
570 
571 	owner = (uintptr_t)wq | 1;
572 	do {
573 		owner0 = work->work_owner;
574 		if (owner0 & 1) {
575 			KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq);
576 			return false;
577 		}
578 		KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq);
579 	} while (atomic_cas_uintptr(&work->work_owner, owner0, owner) !=
580 	    owner0);
581 
582 	KASSERT(work_queue(work) == wq);
583 	membar_enter();
584 	SDT_PROBE2(sdt, linux, work, acquire,  work, wq);
585 	return true;
586 }
587 
588 /*
589  * release_work(work, wq)
590  *
591  *	Issue a memory barrier to match any subsequent acquire_work and
592  *	dissociate work from wq.
593  *
594  *	Caller must hold wq's lock and work must be associated with wq.
595  */
596 static void
597 release_work(struct work_struct *work, struct workqueue_struct *wq)
598 {
599 
600 	KASSERT(work_queue(work) == wq);
601 	KASSERT(mutex_owned(&wq->wq_lock));
602 
603 	SDT_PROBE2(sdt, linux, work, release,  work, wq);
604 	membar_exit();
605 
606 	/*
607 	 * Non-interlocked r/m/w is safe here because nobody else can
608 	 * write to this while the claimed bit is setand the workqueue
609 	 * lock is held.
610 	 */
611 	work->work_owner &= ~(uintptr_t)1;
612 }
613 
614 /*
615  * schedule_work(work)
616  *
617  *	If work is not already queued on system_wq, queue it to be run
618  *	by system_wq's worker thread when it next can.  True if it was
619  *	newly queued, false if it was already queued.  If the work was
620  *	already running, queue it to run again.
621  *
622  *	Caller must ensure work is not queued to run on a different
623  *	workqueue.
624  */
625 bool
626 schedule_work(struct work_struct *work)
627 {
628 
629 	return queue_work(system_wq, work);
630 }
631 
632 /*
633  * queue_work(wq, work)
634  *
635  *	If work is not already queued on wq, queue it to be run by wq's
636  *	worker thread when it next can.  True if it was newly queued,
637  *	false if it was already queued.  If the work was already
638  *	running, queue it to run again.
639  *
640  *	Caller must ensure work is not queued to run on a different
641  *	workqueue.
642  */
643 bool
644 queue_work(struct workqueue_struct *wq, struct work_struct *work)
645 {
646 	bool newly_queued;
647 
648 	KASSERT(wq != NULL);
649 
650 	mutex_enter(&wq->wq_lock);
651 	if (__predict_true(acquire_work(work, wq))) {
652 		/*
653 		 * It wasn't on any workqueue at all.  Put it on this
654 		 * one, and signal the worker thread that there is work
655 		 * to do.
656 		 */
657 		TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
658 		cv_broadcast(&wq->wq_cv);
659 		SDT_PROBE2(sdt, linux, work, queue,  work, wq);
660 		newly_queued = true;
661 	} else {
662 		/*
663 		 * It was already on this workqueue.  Nothing to do
664 		 * since it is already queued.
665 		 */
666 		newly_queued = false;
667 	}
668 	mutex_exit(&wq->wq_lock);
669 
670 	return newly_queued;
671 }
672 
673 /*
674  * cancel_work(work)
675  *
676  *	If work was queued, remove it from the queue and return true.
677  *	If work was not queued, return false.  Work may still be
678  *	running when this returns.
679  */
680 bool
681 cancel_work(struct work_struct *work)
682 {
683 	struct workqueue_struct *wq;
684 	bool cancelled_p = false;
685 
686 	/* If there's no workqueue, nothing to cancel.   */
687 	if ((wq = work_queue(work)) == NULL)
688 		goto out;
689 
690 	mutex_enter(&wq->wq_lock);
691 	if (__predict_false(work_queue(work) != wq)) {
692 		/*
693 		 * It has finished execution or been cancelled by
694 		 * another thread, and has been moved off the
695 		 * workqueue, so it's too to cancel.
696 		 */
697 		cancelled_p = false;
698 	} else {
699 		/* Check whether it's on the queue.  */
700 		if (work_claimed(work, wq)) {
701 			/*
702 			 * It is still on the queue.  Take it off the
703 			 * queue and report successful cancellation.
704 			 */
705 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
706 			SDT_PROBE2(sdt, linux, work, cancel,  work, wq);
707 			release_work(work, wq);
708 			/* Can't dereference work after this point.  */
709 			cancelled_p = true;
710 		} else {
711 			/* Not on the queue.  Couldn't cancel it.  */
712 			cancelled_p = false;
713 		}
714 	}
715 	mutex_exit(&wq->wq_lock);
716 
717 out:	return cancelled_p;
718 }
719 
720 /*
721  * cancel_work_sync(work)
722  *
723  *	If work was queued, remove it from the queue and return true.
724  *	If work was not queued, return false.  Either way, if work is
725  *	currently running, wait for it to complete.
726  *
727  *	May sleep.
728  */
729 bool
730 cancel_work_sync(struct work_struct *work)
731 {
732 	struct workqueue_struct *wq;
733 	bool cancelled_p = false;
734 
735 	/* If there's no workqueue, nothing to cancel.   */
736 	if ((wq = work_queue(work)) == NULL)
737 		goto out;
738 
739 	mutex_enter(&wq->wq_lock);
740 	if (__predict_false(work_queue(work) != wq)) {
741 		/*
742 		 * It has finished execution or been cancelled by
743 		 * another thread, and has been moved off the
744 		 * workqueue, so it's too late to cancel.
745 		 */
746 		cancelled_p = false;
747 	} else {
748 		/* Check whether it's on the queue.  */
749 		if (work_claimed(work, wq)) {
750 			/*
751 			 * It is still on the queue.  Take it off the
752 			 * queue and report successful cancellation.
753 			 */
754 			TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
755 			SDT_PROBE2(sdt, linux, work, cancel,  work, wq);
756 			release_work(work, wq);
757 			/* Can't dereference work after this point.  */
758 			cancelled_p = true;
759 		} else {
760 			/* Not on the queue.  Couldn't cancel it.  */
761 			cancelled_p = false;
762 		}
763 		/* If it's still running, wait for it to complete.  */
764 		if (wq->wq_current_work == work)
765 			wait_for_current_work(work, wq);
766 	}
767 	mutex_exit(&wq->wq_lock);
768 
769 out:	return cancelled_p;
770 }
771 
772 /*
773  * wait_for_current_work(work, wq)
774  *
775  *	wq must be currently executing work.  Wait for it to finish.
776  *
777  *	Does not dereference work.
778  */
779 static void
780 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
781 {
782 	uint64_t gen;
783 
784 	KASSERT(mutex_owned(&wq->wq_lock));
785 	KASSERT(wq->wq_current_work == work);
786 
787 	/* Wait only one generation in case it gets requeued quickly.  */
788 	SDT_PROBE2(sdt, linux, work, wait__start,  work, wq);
789 	gen = wq->wq_gen;
790 	do {
791 		cv_wait(&wq->wq_cv, &wq->wq_lock);
792 	} while (wq->wq_current_work == work && wq->wq_gen == gen);
793 	SDT_PROBE2(sdt, linux, work, wait__done,  work, wq);
794 }
795 
796 /*
797  * Delayed work
798  */
799 
800 /*
801  * INIT_DELAYED_WORK(dw, fn)
802  *
803  *	Initialize dw for use with a workqueue to call fn in a worker
804  *	thread after a delay.  There is no corresponding destruction
805  *	operation.
806  */
807 void
808 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
809 {
810 
811 	INIT_WORK(&dw->work, fn);
812 	dw->dw_state = DELAYED_WORK_IDLE;
813 	dw->dw_resched = -1;
814 
815 	/*
816 	 * Defer callout_init until we are going to schedule the
817 	 * callout, which can then callout_destroy it, because
818 	 * otherwise since there's no DESTROY_DELAYED_WORK or anything
819 	 * we have no opportunity to call callout_destroy.
820 	 */
821 }
822 
823 /*
824  * schedule_delayed_work(dw, ticks)
825  *
826  *	If it is not currently scheduled, schedule dw to run after
827  *	ticks on system_wq.  If currently executing and not already
828  *	rescheduled, reschedule it.  True if it was newly scheduled,
829  *	false if it was already scheduled.
830  *
831  *	If ticks == 0, queue it to run as soon as the worker can,
832  *	without waiting for the next callout tick to run.
833  */
834 bool
835 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
836 {
837 
838 	return queue_delayed_work(system_wq, dw, ticks);
839 }
840 
841 /*
842  * dw_callout_init(wq, dw)
843  *
844  *	Initialize the callout of dw and transition to
845  *	DELAYED_WORK_SCHEDULED.  Caller must use callout_schedule.
846  */
847 static void
848 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
849 {
850 
851 	KASSERT(mutex_owned(&wq->wq_lock));
852 	KASSERT(work_queue(&dw->work) == wq);
853 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
854 
855 	callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
856 	callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw);
857 	TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
858 	dw->dw_state = DELAYED_WORK_SCHEDULED;
859 }
860 
861 /*
862  * dw_callout_destroy(wq, dw)
863  *
864  *	Destroy the callout of dw and transition to DELAYED_WORK_IDLE.
865  */
866 static void
867 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
868 {
869 
870 	KASSERT(mutex_owned(&wq->wq_lock));
871 	KASSERT(work_queue(&dw->work) == wq);
872 	KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED ||
873 	    dw->dw_state == DELAYED_WORK_RESCHEDULED ||
874 	    dw->dw_state == DELAYED_WORK_CANCELLED);
875 
876 	TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
877 	callout_destroy(&dw->dw_callout);
878 	dw->dw_resched = -1;
879 	dw->dw_state = DELAYED_WORK_IDLE;
880 }
881 
882 /*
883  * cancel_delayed_work_done(wq, dw)
884  *
885  *	Complete cancellation of a delayed work: transition from
886  *	DELAYED_WORK_CANCELLED to DELAYED_WORK_IDLE and off the
887  *	workqueue.  Caller must not dereference dw after this returns.
888  */
889 static void
890 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
891 {
892 
893 	KASSERT(mutex_owned(&wq->wq_lock));
894 	KASSERT(work_queue(&dw->work) == wq);
895 	KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
896 
897 	dw_callout_destroy(wq, dw);
898 	release_work(&dw->work, wq);
899 	/* Can't dereference dw after this point.  */
900 }
901 
902 /*
903  * queue_delayed_work(wq, dw, ticks)
904  *
905  *	If it is not currently scheduled, schedule dw to run after
906  *	ticks on wq.  If currently queued, remove it from the queue
907  *	first.
908  *
909  *	If ticks == 0, queue it to run as soon as the worker can,
910  *	without waiting for the next callout tick to run.
911  */
912 bool
913 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
914     unsigned long ticks)
915 {
916 	bool newly_queued;
917 
918 	mutex_enter(&wq->wq_lock);
919 	if (__predict_true(acquire_work(&dw->work, wq))) {
920 		/*
921 		 * It wasn't on any workqueue at all.  Schedule it to
922 		 * run on this one.
923 		 */
924 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
925 		if (ticks == 0) {
926 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
927 			    work_entry);
928 			cv_broadcast(&wq->wq_cv);
929 			SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
930 		} else {
931 			/*
932 			 * Initialize a callout and schedule to run
933 			 * after a delay.
934 			 */
935 			dw_callout_init(wq, dw);
936 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
937 			SDT_PROBE3(sdt, linux, work, schedule,  dw, wq, ticks);
938 		}
939 		newly_queued = true;
940 	} else {
941 		/* It was already on this workqueue.  */
942 		switch (dw->dw_state) {
943 		case DELAYED_WORK_IDLE:
944 		case DELAYED_WORK_SCHEDULED:
945 		case DELAYED_WORK_RESCHEDULED:
946 			/* On the queue or already scheduled.  Leave it.  */
947 			newly_queued = false;
948 			break;
949 		case DELAYED_WORK_CANCELLED:
950 			/*
951 			 * Scheduled and the callout began, but it was
952 			 * cancelled.  Reschedule it.
953 			 */
954 			if (ticks == 0) {
955 				dw->dw_state = DELAYED_WORK_SCHEDULED;
956 				SDT_PROBE2(sdt, linux, work, queue,
957 				    &dw->work, wq);
958 			} else {
959 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
960 				dw->dw_resched = MIN(INT_MAX, ticks);
961 				SDT_PROBE3(sdt, linux, work, schedule,
962 				    dw, wq, ticks);
963 			}
964 			newly_queued = true;
965 			break;
966 		default:
967 			panic("invalid delayed work state: %d",
968 			    dw->dw_state);
969 		}
970 	}
971 	mutex_exit(&wq->wq_lock);
972 
973 	return newly_queued;
974 }
975 
976 /*
977  * mod_delayed_work(wq, dw, ticks)
978  *
979  *	Schedule dw to run after ticks.  If scheduled or queued,
980  *	reschedule.  If ticks == 0, run without delay.
981  *
982  *	True if it modified the timer of an already scheduled work,
983  *	false if it newly scheduled the work.
984  */
985 bool
986 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
987     unsigned long ticks)
988 {
989 	bool timer_modified;
990 
991 	mutex_enter(&wq->wq_lock);
992 	if (acquire_work(&dw->work, wq)) {
993 		/*
994 		 * It wasn't on any workqueue at all.  Schedule it to
995 		 * run on this one.
996 		 */
997 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
998 		if (ticks == 0) {
999 			/*
1000 			 * Run immediately: put it on the queue and
1001 			 * signal the worker thread.
1002 			 */
1003 			TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1004 			    work_entry);
1005 			cv_broadcast(&wq->wq_cv);
1006 			SDT_PROBE2(sdt, linux, work, queue,  &dw->work, wq);
1007 		} else {
1008 			/*
1009 			 * Initialize a callout and schedule to run
1010 			 * after a delay.
1011 			 */
1012 			dw_callout_init(wq, dw);
1013 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
1014 			SDT_PROBE3(sdt, linux, work, schedule,  dw, wq, ticks);
1015 		}
1016 		timer_modified = false;
1017 	} else {
1018 		/* It was already on this workqueue.  */
1019 		switch (dw->dw_state) {
1020 		case DELAYED_WORK_IDLE:
1021 			/* On the queue.  */
1022 			if (ticks == 0) {
1023 				/* Leave it be.  */
1024 				SDT_PROBE2(sdt, linux, work, cancel,
1025 				    &dw->work, wq);
1026 				SDT_PROBE2(sdt, linux, work, queue,
1027 				    &dw->work, wq);
1028 			} else {
1029 				/* Remove from the queue and schedule.  */
1030 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1031 				    work_entry);
1032 				dw_callout_init(wq, dw);
1033 				callout_schedule(&dw->dw_callout,
1034 				    MIN(INT_MAX, ticks));
1035 				SDT_PROBE2(sdt, linux, work, cancel,
1036 				    &dw->work, wq);
1037 				SDT_PROBE3(sdt, linux, work, schedule,
1038 				    dw, wq, ticks);
1039 			}
1040 			timer_modified = true;
1041 			break;
1042 		case DELAYED_WORK_SCHEDULED:
1043 			/*
1044 			 * It is scheduled to run after a delay.  Try
1045 			 * to stop it and reschedule it; if we can't,
1046 			 * either reschedule it or cancel it to put it
1047 			 * on the queue, and inform the callout.
1048 			 */
1049 			if (callout_stop(&dw->dw_callout)) {
1050 				/* Can't stop, callout has begun.  */
1051 				if (ticks == 0) {
1052 					/*
1053 					 * We don't actually need to do
1054 					 * anything.  The callout will
1055 					 * queue it as soon as it gets
1056 					 * the lock.
1057 					 */
1058 					SDT_PROBE2(sdt, linux, work, cancel,
1059 					    &dw->work, wq);
1060 					SDT_PROBE2(sdt, linux, work, queue,
1061 					    &dw->work, wq);
1062 				} else {
1063 					/* Ask the callout to reschedule.  */
1064 					dw->dw_state = DELAYED_WORK_RESCHEDULED;
1065 					dw->dw_resched = MIN(INT_MAX, ticks);
1066 					SDT_PROBE2(sdt, linux, work, cancel,
1067 					    &dw->work, wq);
1068 					SDT_PROBE3(sdt, linux, work, schedule,
1069 					    dw, wq, ticks);
1070 				}
1071 			} else {
1072 				/* We stopped the callout before it began.  */
1073 				if (ticks == 0) {
1074 					/*
1075 					 * Run immediately: destroy the
1076 					 * callout, put it on the
1077 					 * queue, and signal the worker
1078 					 * thread.
1079 					 */
1080 					dw_callout_destroy(wq, dw);
1081 					TAILQ_INSERT_TAIL(&wq->wq_dqueue,
1082 					    &dw->work, work_entry);
1083 					cv_broadcast(&wq->wq_cv);
1084 					SDT_PROBE2(sdt, linux, work, cancel,
1085 					    &dw->work, wq);
1086 					SDT_PROBE2(sdt, linux, work, queue,
1087 					    &dw->work, wq);
1088 				} else {
1089 					/*
1090 					 * Reschedule the callout.  No
1091 					 * state change.
1092 					 */
1093 					callout_schedule(&dw->dw_callout,
1094 					    MIN(INT_MAX, ticks));
1095 					SDT_PROBE2(sdt, linux, work, cancel,
1096 					    &dw->work, wq);
1097 					SDT_PROBE3(sdt, linux, work, schedule,
1098 					    dw, wq, ticks);
1099 				}
1100 			}
1101 			timer_modified = true;
1102 			break;
1103 		case DELAYED_WORK_RESCHEDULED:
1104 			/*
1105 			 * Someone rescheduled it after the callout
1106 			 * started but before the poor thing even had a
1107 			 * chance to acquire the lock.
1108 			 */
1109 			if (ticks == 0) {
1110 				/*
1111 				 * We can just switch back to
1112 				 * DELAYED_WORK_SCHEDULED so that the
1113 				 * callout will queue the work as soon
1114 				 * as it gets the lock.
1115 				 */
1116 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1117 				dw->dw_resched = -1;
1118 				SDT_PROBE2(sdt, linux, work, cancel,
1119 				    &dw->work, wq);
1120 				SDT_PROBE2(sdt, linux, work, queue,
1121 				    &dw->work, wq);
1122 			} else {
1123 				/* Change the rescheduled time.  */
1124 				dw->dw_resched = ticks;
1125 				SDT_PROBE2(sdt, linux, work, cancel,
1126 				    &dw->work, wq);
1127 				SDT_PROBE3(sdt, linux, work, schedule,
1128 				    dw, wq, ticks);
1129 			}
1130 			timer_modified = true;
1131 			break;
1132 		case DELAYED_WORK_CANCELLED:
1133 			/*
1134 			 * Someone cancelled it after the callout
1135 			 * started but before the poor thing even had a
1136 			 * chance to acquire the lock.
1137 			 */
1138 			if (ticks == 0) {
1139 				/*
1140 				 * We can just switch back to
1141 				 * DELAYED_WORK_SCHEDULED so that the
1142 				 * callout will queue the work as soon
1143 				 * as it gets the lock.
1144 				 */
1145 				dw->dw_state = DELAYED_WORK_SCHEDULED;
1146 				SDT_PROBE2(sdt, linux, work, queue,
1147 				    &dw->work, wq);
1148 			} else {
1149 				/* Ask it to reschedule.  */
1150 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
1151 				dw->dw_resched = MIN(INT_MAX, ticks);
1152 				SDT_PROBE3(sdt, linux, work, schedule,
1153 				    dw, wq, ticks);
1154 			}
1155 			timer_modified = false;
1156 			break;
1157 		default:
1158 			panic("invalid delayed work state: %d", dw->dw_state);
1159 		}
1160 	}
1161 	mutex_exit(&wq->wq_lock);
1162 
1163 	return timer_modified;
1164 }
1165 
1166 /*
1167  * cancel_delayed_work(dw)
1168  *
1169  *	If work was scheduled or queued, remove it from the schedule or
1170  *	queue and return true.  If work was not scheduled or queued,
1171  *	return false.  Note that work may already be running; if it
1172  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1173  *	will return false, and either way, cancel_delayed_work will NOT
1174  *	wait for the work to complete.
1175  */
1176 bool
1177 cancel_delayed_work(struct delayed_work *dw)
1178 {
1179 	struct workqueue_struct *wq;
1180 	bool cancelled_p;
1181 
1182 	/* If there's no workqueue, nothing to cancel.   */
1183 	if ((wq = work_queue(&dw->work)) == NULL)
1184 		return false;
1185 
1186 	mutex_enter(&wq->wq_lock);
1187 	if (__predict_false(work_queue(&dw->work) != wq)) {
1188 		cancelled_p = false;
1189 	} else {
1190 		switch (dw->dw_state) {
1191 		case DELAYED_WORK_IDLE:
1192 			/*
1193 			 * It is either on the queue or already running
1194 			 * or both.
1195 			 */
1196 			if (work_claimed(&dw->work, wq)) {
1197 				/* On the queue.  Remove and release.  */
1198 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1199 				    work_entry);
1200 				SDT_PROBE2(sdt, linux, work, cancel,
1201 				    &dw->work, wq);
1202 				release_work(&dw->work, wq);
1203 				/* Can't dereference dw after this point.  */
1204 				cancelled_p = true;
1205 			} else {
1206 				/* Not on the queue, so didn't cancel.  */
1207 				cancelled_p = false;
1208 			}
1209 			break;
1210 		case DELAYED_WORK_SCHEDULED:
1211 			/*
1212 			 * If it is scheduled, mark it cancelled and
1213 			 * try to stop the callout before it starts.
1214 			 *
1215 			 * If it's too late and the callout has already
1216 			 * begun to execute, tough.
1217 			 *
1218 			 * If we stopped the callout before it started,
1219 			 * however, then destroy the callout and
1220 			 * dissociate it from the workqueue ourselves.
1221 			 */
1222 			dw->dw_state = DELAYED_WORK_CANCELLED;
1223 			cancelled_p = true;
1224 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1225 			if (!callout_stop(&dw->dw_callout))
1226 				cancel_delayed_work_done(wq, dw);
1227 			break;
1228 		case DELAYED_WORK_RESCHEDULED:
1229 			/*
1230 			 * If it is being rescheduled, the callout has
1231 			 * already fired.  We must ask it to cancel.
1232 			 */
1233 			dw->dw_state = DELAYED_WORK_CANCELLED;
1234 			dw->dw_resched = -1;
1235 			cancelled_p = true;
1236 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1237 			break;
1238 		case DELAYED_WORK_CANCELLED:
1239 			/*
1240 			 * If it is being cancelled, the callout has
1241 			 * already fired.  There is nothing more for us
1242 			 * to do.  Someone else claims credit for
1243 			 * cancelling it.
1244 			 */
1245 			cancelled_p = false;
1246 			break;
1247 		default:
1248 			panic("invalid delayed work state: %d",
1249 			    dw->dw_state);
1250 		}
1251 	}
1252 	mutex_exit(&wq->wq_lock);
1253 
1254 	return cancelled_p;
1255 }
1256 
1257 /*
1258  * cancel_delayed_work_sync(dw)
1259  *
1260  *	If work was scheduled or queued, remove it from the schedule or
1261  *	queue and return true.  If work was not scheduled or queued,
1262  *	return false.  Note that work may already be running; if it
1263  *	hasn't been rescheduled or requeued, then cancel_delayed_work
1264  *	will return false; either way, wait for it to complete.
1265  */
1266 bool
1267 cancel_delayed_work_sync(struct delayed_work *dw)
1268 {
1269 	struct workqueue_struct *wq;
1270 	bool cancelled_p;
1271 
1272 	/* If there's no workqueue, nothing to cancel.  */
1273 	if ((wq = work_queue(&dw->work)) == NULL)
1274 		return false;
1275 
1276 	mutex_enter(&wq->wq_lock);
1277 	if (__predict_false(work_queue(&dw->work) != wq)) {
1278 		cancelled_p = false;
1279 	} else {
1280 		switch (dw->dw_state) {
1281 		case DELAYED_WORK_IDLE:
1282 			/*
1283 			 * It is either on the queue or already running
1284 			 * or both.
1285 			 */
1286 			if (work_claimed(&dw->work, wq)) {
1287 				/* On the queue.  Remove and release.  */
1288 				TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1289 				    work_entry);
1290 				SDT_PROBE2(sdt, linux, work, cancel,
1291 				    &dw->work, wq);
1292 				release_work(&dw->work, wq);
1293 				/* Can't dereference dw after this point.  */
1294 				cancelled_p = true;
1295 			} else {
1296 				/* Not on the queue, so didn't cancel. */
1297 				cancelled_p = false;
1298 			}
1299 			/* If it's still running, wait for it to complete.  */
1300 			if (wq->wq_current_work == &dw->work)
1301 				wait_for_current_work(&dw->work, wq);
1302 			break;
1303 		case DELAYED_WORK_SCHEDULED:
1304 			/*
1305 			 * If it is scheduled, mark it cancelled and
1306 			 * try to stop the callout before it starts.
1307 			 *
1308 			 * If it's too late and the callout has already
1309 			 * begun to execute, we must wait for it to
1310 			 * complete.  But we got in soon enough to ask
1311 			 * the callout not to run, so we successfully
1312 			 * cancelled it in that case.
1313 			 *
1314 			 * If we stopped the callout before it started,
1315 			 * then we must destroy the callout and
1316 			 * dissociate it from the workqueue ourselves.
1317 			 */
1318 			dw->dw_state = DELAYED_WORK_CANCELLED;
1319 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1320 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1321 				cancel_delayed_work_done(wq, dw);
1322 			cancelled_p = true;
1323 			break;
1324 		case DELAYED_WORK_RESCHEDULED:
1325 			/*
1326 			 * If it is being rescheduled, the callout has
1327 			 * already fired.  We must ask it to cancel and
1328 			 * wait for it to complete.
1329 			 */
1330 			dw->dw_state = DELAYED_WORK_CANCELLED;
1331 			dw->dw_resched = -1;
1332 			SDT_PROBE2(sdt, linux, work, cancel,  &dw->work, wq);
1333 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1334 			cancelled_p = true;
1335 			break;
1336 		case DELAYED_WORK_CANCELLED:
1337 			/*
1338 			 * If it is being cancelled, the callout has
1339 			 * already fired.  We need only wait for it to
1340 			 * complete.  Someone else, however, claims
1341 			 * credit for cancelling it.
1342 			 */
1343 			(void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1344 			cancelled_p = false;
1345 			break;
1346 		default:
1347 			panic("invalid delayed work state: %d",
1348 			    dw->dw_state);
1349 		}
1350 	}
1351 	mutex_exit(&wq->wq_lock);
1352 
1353 	return cancelled_p;
1354 }
1355 
1356 /*
1357  * Flush
1358  */
1359 
1360 /*
1361  * flush_scheduled_work()
1362  *
1363  *	Wait for all work queued on system_wq to complete.  This does
1364  *	not include delayed work.
1365  */
1366 void
1367 flush_scheduled_work(void)
1368 {
1369 
1370 	flush_workqueue(system_wq);
1371 }
1372 
1373 /*
1374  * flush_workqueue_locked(wq)
1375  *
1376  *	Wait for all work queued on wq to complete.  This does not
1377  *	include delayed work.
1378  *
1379  *	Caller must hold wq's lock.
1380  */
1381 static void
1382 flush_workqueue_locked(struct workqueue_struct *wq)
1383 {
1384 	uint64_t gen;
1385 
1386 	KASSERT(mutex_owned(&wq->wq_lock));
1387 
1388 	/* Get the current generation number.  */
1389 	gen = wq->wq_gen;
1390 
1391 	/*
1392 	 * If there's a batch of work in progress, we must wait for the
1393 	 * worker thread to finish that batch.
1394 	 */
1395 	if (wq->wq_current_work != NULL)
1396 		gen++;
1397 
1398 	/*
1399 	 * If there's any work yet to be claimed from the queue by the
1400 	 * worker thread, we must wait for it to finish one more batch
1401 	 * too.
1402 	 */
1403 	if (!TAILQ_EMPTY(&wq->wq_queue) || !TAILQ_EMPTY(&wq->wq_dqueue))
1404 		gen++;
1405 
1406 	/* Wait until the generation number has caught up.  */
1407 	SDT_PROBE1(sdt, linux, work, flush__start,  wq);
1408 	while (wq->wq_gen < gen)
1409 		cv_wait(&wq->wq_cv, &wq->wq_lock);
1410 	SDT_PROBE1(sdt, linux, work, flush__done,  wq);
1411 }
1412 
1413 /*
1414  * flush_workqueue(wq)
1415  *
1416  *	Wait for all work queued on wq to complete.  This does not
1417  *	include delayed work.
1418  */
1419 void
1420 flush_workqueue(struct workqueue_struct *wq)
1421 {
1422 
1423 	mutex_enter(&wq->wq_lock);
1424 	flush_workqueue_locked(wq);
1425 	mutex_exit(&wq->wq_lock);
1426 }
1427 
1428 /*
1429  * flush_work(work)
1430  *
1431  *	If work is queued or currently executing, wait for it to
1432  *	complete.
1433  */
1434 void
1435 flush_work(struct work_struct *work)
1436 {
1437 	struct workqueue_struct *wq;
1438 
1439 	/* If there's no workqueue, nothing to flush.  */
1440 	if ((wq = work_queue(work)) == NULL)
1441 		return;
1442 
1443 	flush_workqueue(wq);
1444 }
1445 
1446 /*
1447  * flush_delayed_work(dw)
1448  *
1449  *	If dw is scheduled to run after a delay, queue it immediately
1450  *	instead.  Then, if dw is queued or currently executing, wait
1451  *	for it to complete.
1452  */
1453 void
1454 flush_delayed_work(struct delayed_work *dw)
1455 {
1456 	struct workqueue_struct *wq;
1457 
1458 	/* If there's no workqueue, nothing to flush.  */
1459 	if ((wq = work_queue(&dw->work)) == NULL)
1460 		return;
1461 
1462 	mutex_enter(&wq->wq_lock);
1463 	if (__predict_false(work_queue(&dw->work) != wq)) {
1464 		/*
1465 		 * Moved off the queue already (and possibly to another
1466 		 * queue, though that would be ill-advised), so it must
1467 		 * have completed, and we have nothing more to do.
1468 		 */
1469 	} else {
1470 		switch (dw->dw_state) {
1471 		case DELAYED_WORK_IDLE:
1472 			/*
1473 			 * It has a workqueue assigned and the callout
1474 			 * is idle, so it must be in progress or on the
1475 			 * queue.  In that case, we'll wait for it to
1476 			 * complete.
1477 			 */
1478 			break;
1479 		case DELAYED_WORK_SCHEDULED:
1480 		case DELAYED_WORK_RESCHEDULED:
1481 		case DELAYED_WORK_CANCELLED:
1482 			/*
1483 			 * The callout is scheduled, and may have even
1484 			 * started.  Mark it as scheduled so that if
1485 			 * the callout has fired it will queue the work
1486 			 * itself.  Try to stop the callout -- if we
1487 			 * can, queue the work now; if we can't, wait
1488 			 * for the callout to complete, which entails
1489 			 * queueing it.
1490 			 */
1491 			dw->dw_state = DELAYED_WORK_SCHEDULED;
1492 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1493 				/*
1494 				 * We stopped it before it ran.  No
1495 				 * state change in the interim is
1496 				 * possible.  Destroy the callout and
1497 				 * queue it ourselves.
1498 				 */
1499 				KASSERT(dw->dw_state ==
1500 				    DELAYED_WORK_SCHEDULED);
1501 				dw_callout_destroy(wq, dw);
1502 				TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1503 				    work_entry);
1504 				cv_broadcast(&wq->wq_cv);
1505 				SDT_PROBE2(sdt, linux, work, queue,
1506 				    &dw->work, wq);
1507 			}
1508 			break;
1509 		default:
1510 			panic("invalid delayed work state: %d", dw->dw_state);
1511 		}
1512 		/*
1513 		 * Waiting for the whole queue to flush is overkill,
1514 		 * but doesn't hurt.
1515 		 */
1516 		flush_workqueue_locked(wq);
1517 	}
1518 	mutex_exit(&wq->wq_lock);
1519 }
1520