1 /* $NetBSD: linux_work.c,v 1.51 2021/12/19 01:24:13 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.51 2021/12/19 01:24:13 riastradh Exp $"); 34 35 #include <sys/types.h> 36 #include <sys/atomic.h> 37 #include <sys/callout.h> 38 #include <sys/condvar.h> 39 #include <sys/errno.h> 40 #include <sys/kmem.h> 41 #include <sys/kthread.h> 42 #include <sys/lwp.h> 43 #include <sys/mutex.h> 44 #ifndef _MODULE 45 #include <sys/once.h> 46 #endif 47 #include <sys/queue.h> 48 #include <sys/sdt.h> 49 50 #include <linux/workqueue.h> 51 52 TAILQ_HEAD(work_head, work_struct); 53 TAILQ_HEAD(dwork_head, delayed_work); 54 55 struct workqueue_struct { 56 kmutex_t wq_lock; 57 kcondvar_t wq_cv; 58 struct dwork_head wq_delayed; /* delayed work scheduled */ 59 struct work_head wq_queue; /* work to run */ 60 struct work_head wq_dqueue; /* delayed work to run now */ 61 struct work_struct *wq_current_work; 62 int wq_flags; 63 bool wq_dying; 64 uint64_t wq_gen; 65 struct lwp *wq_lwp; 66 const char *wq_name; 67 }; 68 69 static void __dead linux_workqueue_thread(void *); 70 static void linux_workqueue_timeout(void *); 71 static bool work_claimed(struct work_struct *, 72 struct workqueue_struct *); 73 static struct workqueue_struct * 74 work_queue(struct work_struct *); 75 static bool acquire_work(struct work_struct *, 76 struct workqueue_struct *); 77 static void release_work(struct work_struct *, 78 struct workqueue_struct *); 79 static void wait_for_current_work(struct work_struct *, 80 struct workqueue_struct *); 81 static void dw_callout_init(struct workqueue_struct *, 82 struct delayed_work *); 83 static void dw_callout_destroy(struct workqueue_struct *, 84 struct delayed_work *); 85 static void cancel_delayed_work_done(struct workqueue_struct *, 86 struct delayed_work *); 87 88 SDT_PROBE_DEFINE2(sdt, linux, work, acquire, 89 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/); 90 SDT_PROBE_DEFINE2(sdt, linux, work, release, 91 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/); 92 SDT_PROBE_DEFINE2(sdt, linux, work, queue, 93 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/); 94 SDT_PROBE_DEFINE2(sdt, linux, work, cancel, 95 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/); 96 SDT_PROBE_DEFINE3(sdt, linux, work, schedule, 97 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/, 98 "unsigned long"/*ticks*/); 99 SDT_PROBE_DEFINE2(sdt, linux, work, timer, 100 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/); 101 SDT_PROBE_DEFINE2(sdt, linux, work, wait__start, 102 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/); 103 SDT_PROBE_DEFINE2(sdt, linux, work, wait__done, 104 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/); 105 SDT_PROBE_DEFINE2(sdt, linux, work, run, 106 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/); 107 SDT_PROBE_DEFINE2(sdt, linux, work, done, 108 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/); 109 SDT_PROBE_DEFINE1(sdt, linux, work, batch__start, 110 "struct workqueue_struct *"/*wq*/); 111 SDT_PROBE_DEFINE1(sdt, linux, work, batch__done, 112 "struct workqueue_struct *"/*wq*/); 113 SDT_PROBE_DEFINE1(sdt, linux, work, flush__start, 114 "struct workqueue_struct *"/*wq*/); 115 SDT_PROBE_DEFINE1(sdt, linux, work, flush__done, 116 "struct workqueue_struct *"/*wq*/); 117 118 static specificdata_key_t workqueue_key __read_mostly; 119 120 struct workqueue_struct *system_wq __read_mostly; 121 struct workqueue_struct *system_long_wq __read_mostly; 122 struct workqueue_struct *system_power_efficient_wq __read_mostly; 123 struct workqueue_struct *system_unbound_wq __read_mostly; 124 125 static inline uintptr_t 126 atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t old, uintptr_t new) 127 { 128 129 return (uintptr_t)atomic_cas_ptr(p, (void *)old, (void *)new); 130 } 131 132 /* 133 * linux_workqueue_init() 134 * 135 * Initialize the Linux workqueue subsystem. Return 0 on success, 136 * NetBSD error on failure. 137 */ 138 static int 139 linux_workqueue_init0(void) 140 { 141 int error; 142 143 error = lwp_specific_key_create(&workqueue_key, NULL); 144 if (error) 145 goto fail0; 146 147 system_wq = alloc_ordered_workqueue("lnxsyswq", 0); 148 if (system_wq == NULL) { 149 error = ENOMEM; 150 goto fail1; 151 } 152 153 system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0); 154 if (system_long_wq == NULL) { 155 error = ENOMEM; 156 goto fail2; 157 } 158 159 system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0); 160 if (system_power_efficient_wq == NULL) { 161 error = ENOMEM; 162 goto fail3; 163 } 164 165 system_unbound_wq = alloc_ordered_workqueue("lnxubdwq", 0); 166 if (system_unbound_wq == NULL) { 167 error = ENOMEM; 168 goto fail4; 169 } 170 171 return 0; 172 173 fail5: __unused 174 destroy_workqueue(system_unbound_wq); 175 fail4: destroy_workqueue(system_power_efficient_wq); 176 fail3: destroy_workqueue(system_long_wq); 177 fail2: destroy_workqueue(system_wq); 178 fail1: lwp_specific_key_delete(workqueue_key); 179 fail0: KASSERT(error); 180 return error; 181 } 182 183 /* 184 * linux_workqueue_fini() 185 * 186 * Destroy the Linux workqueue subsystem. Never fails. 187 */ 188 static void 189 linux_workqueue_fini0(void) 190 { 191 192 destroy_workqueue(system_power_efficient_wq); 193 destroy_workqueue(system_long_wq); 194 destroy_workqueue(system_wq); 195 lwp_specific_key_delete(workqueue_key); 196 } 197 198 #ifndef _MODULE 199 static ONCE_DECL(linux_workqueue_init_once); 200 #endif 201 202 int 203 linux_workqueue_init(void) 204 { 205 #ifdef _MODULE 206 return linux_workqueue_init0(); 207 #else 208 return INIT_ONCE(&linux_workqueue_init_once, &linux_workqueue_init0); 209 #endif 210 } 211 212 void 213 linux_workqueue_fini(void) 214 { 215 #ifdef _MODULE 216 return linux_workqueue_fini0(); 217 #else 218 return FINI_ONCE(&linux_workqueue_init_once, &linux_workqueue_fini0); 219 #endif 220 } 221 222 /* 223 * Workqueues 224 */ 225 226 /* 227 * alloc_ordered_workqueue(name, flags) 228 * 229 * Create a workqueue of the given name. No flags are currently 230 * defined. Return NULL on failure, pointer to struct 231 * workqueue_struct object on success. 232 */ 233 struct workqueue_struct * 234 alloc_ordered_workqueue(const char *name, int flags) 235 { 236 struct workqueue_struct *wq; 237 int error; 238 239 KASSERT(flags == 0); 240 241 wq = kmem_zalloc(sizeof(*wq), KM_SLEEP); 242 243 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM); 244 cv_init(&wq->wq_cv, name); 245 TAILQ_INIT(&wq->wq_delayed); 246 TAILQ_INIT(&wq->wq_queue); 247 TAILQ_INIT(&wq->wq_dqueue); 248 wq->wq_current_work = NULL; 249 wq->wq_flags = 0; 250 wq->wq_dying = false; 251 wq->wq_gen = 0; 252 wq->wq_lwp = NULL; 253 wq->wq_name = name; 254 255 error = kthread_create(PRI_NONE, 256 KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL, 257 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name); 258 if (error) 259 goto fail0; 260 261 return wq; 262 263 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_dqueue)); 264 KASSERT(TAILQ_EMPTY(&wq->wq_queue)); 265 KASSERT(TAILQ_EMPTY(&wq->wq_delayed)); 266 cv_destroy(&wq->wq_cv); 267 mutex_destroy(&wq->wq_lock); 268 kmem_free(wq, sizeof(*wq)); 269 return NULL; 270 } 271 272 /* 273 * destroy_workqueue(wq) 274 * 275 * Destroy a workqueue created with wq. Cancel any pending 276 * delayed work. Wait for all queued work to complete. 277 * 278 * May sleep. 279 */ 280 void 281 destroy_workqueue(struct workqueue_struct *wq) 282 { 283 284 /* 285 * Cancel all delayed work. We do this first because any 286 * delayed work that that has already timed out, which we can't 287 * cancel, may have queued new work. 288 */ 289 mutex_enter(&wq->wq_lock); 290 while (!TAILQ_EMPTY(&wq->wq_delayed)) { 291 struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed); 292 293 KASSERT(work_queue(&dw->work) == wq); 294 KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED || 295 dw->dw_state == DELAYED_WORK_RESCHEDULED || 296 dw->dw_state == DELAYED_WORK_CANCELLED), 297 "delayed work %p in bad state: %d", 298 dw, dw->dw_state); 299 300 /* 301 * Mark it cancelled and try to stop the callout before 302 * it starts. 303 * 304 * If it's too late and the callout has already begun 305 * to execute, then it will notice that we asked to 306 * cancel it and remove itself from the queue before 307 * returning. 308 * 309 * If we stopped the callout before it started, 310 * however, then we can safely destroy the callout and 311 * dissociate it from the workqueue ourselves. 312 */ 313 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq); 314 dw->dw_state = DELAYED_WORK_CANCELLED; 315 if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) 316 cancel_delayed_work_done(wq, dw); 317 } 318 mutex_exit(&wq->wq_lock); 319 320 /* 321 * At this point, no new work can be put on the queue. 322 */ 323 324 /* Tell the thread to exit. */ 325 mutex_enter(&wq->wq_lock); 326 wq->wq_dying = true; 327 cv_broadcast(&wq->wq_cv); 328 mutex_exit(&wq->wq_lock); 329 330 /* Wait for it to exit. */ 331 (void)kthread_join(wq->wq_lwp); 332 333 KASSERT(wq->wq_dying); 334 KASSERT(wq->wq_flags == 0); 335 KASSERT(wq->wq_current_work == NULL); 336 KASSERT(TAILQ_EMPTY(&wq->wq_dqueue)); 337 KASSERT(TAILQ_EMPTY(&wq->wq_queue)); 338 KASSERT(TAILQ_EMPTY(&wq->wq_delayed)); 339 cv_destroy(&wq->wq_cv); 340 mutex_destroy(&wq->wq_lock); 341 342 kmem_free(wq, sizeof(*wq)); 343 } 344 345 /* 346 * Work thread and callout 347 */ 348 349 /* 350 * linux_workqueue_thread(cookie) 351 * 352 * Main function for a workqueue's worker thread. Waits until 353 * there is work queued, grabs a batch of work off the queue, 354 * executes it all, bumps the generation number, and repeats, 355 * until dying. 356 */ 357 static void __dead 358 linux_workqueue_thread(void *cookie) 359 { 360 struct workqueue_struct *const wq = cookie; 361 struct work_head *const q[2] = { &wq->wq_queue, &wq->wq_dqueue }; 362 struct work_struct marker, *work; 363 unsigned i; 364 365 lwp_setspecific(workqueue_key, wq); 366 367 mutex_enter(&wq->wq_lock); 368 for (;;) { 369 /* 370 * Wait until there's activity. If there's no work and 371 * we're dying, stop here. 372 */ 373 if (TAILQ_EMPTY(&wq->wq_queue) && 374 TAILQ_EMPTY(&wq->wq_dqueue)) { 375 if (wq->wq_dying) 376 break; 377 cv_wait(&wq->wq_cv, &wq->wq_lock); 378 continue; 379 } 380 381 /* 382 * Start a batch of work. Use a marker to delimit when 383 * the batch ends so we can advance the generation 384 * after the batch. 385 */ 386 SDT_PROBE1(sdt, linux, work, batch__start, wq); 387 for (i = 0; i < 2; i++) { 388 if (TAILQ_EMPTY(q[i])) 389 continue; 390 TAILQ_INSERT_TAIL(q[i], &marker, work_entry); 391 while ((work = TAILQ_FIRST(q[i])) != &marker) { 392 void (*func)(struct work_struct *); 393 394 KASSERT(work_queue(work) == wq); 395 KASSERT(work_claimed(work, wq)); 396 KASSERTMSG((q[i] != &wq->wq_dqueue || 397 container_of(work, struct delayed_work, 398 work)->dw_state == 399 DELAYED_WORK_IDLE), 400 "delayed work %p queued and scheduled", 401 work); 402 403 TAILQ_REMOVE(q[i], work, work_entry); 404 KASSERT(wq->wq_current_work == NULL); 405 wq->wq_current_work = work; 406 func = work->func; 407 release_work(work, wq); 408 /* Can't dereference work after this point. */ 409 410 mutex_exit(&wq->wq_lock); 411 SDT_PROBE2(sdt, linux, work, run, work, wq); 412 (*func)(work); 413 SDT_PROBE2(sdt, linux, work, done, work, wq); 414 mutex_enter(&wq->wq_lock); 415 416 KASSERT(wq->wq_current_work == work); 417 wq->wq_current_work = NULL; 418 cv_broadcast(&wq->wq_cv); 419 } 420 TAILQ_REMOVE(q[i], &marker, work_entry); 421 } 422 423 /* Notify flush that we've completed a batch of work. */ 424 wq->wq_gen++; 425 cv_broadcast(&wq->wq_cv); 426 SDT_PROBE1(sdt, linux, work, batch__done, wq); 427 } 428 mutex_exit(&wq->wq_lock); 429 430 kthread_exit(0); 431 } 432 433 /* 434 * linux_workqueue_timeout(cookie) 435 * 436 * Delayed work timeout callback. 437 * 438 * - If scheduled, queue it. 439 * - If rescheduled, callout_schedule ourselves again. 440 * - If cancelled, destroy the callout and release the work from 441 * the workqueue. 442 */ 443 static void 444 linux_workqueue_timeout(void *cookie) 445 { 446 struct delayed_work *const dw = cookie; 447 struct workqueue_struct *const wq = work_queue(&dw->work); 448 449 KASSERTMSG(wq != NULL, 450 "delayed work %p state %d resched %d", 451 dw, dw->dw_state, dw->dw_resched); 452 453 SDT_PROBE2(sdt, linux, work, timer, dw, wq); 454 455 mutex_enter(&wq->wq_lock); 456 KASSERT(work_queue(&dw->work) == wq); 457 switch (dw->dw_state) { 458 case DELAYED_WORK_IDLE: 459 panic("delayed work callout uninitialized: %p", dw); 460 case DELAYED_WORK_SCHEDULED: 461 dw_callout_destroy(wq, dw); 462 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry); 463 cv_broadcast(&wq->wq_cv); 464 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq); 465 break; 466 case DELAYED_WORK_RESCHEDULED: 467 KASSERT(dw->dw_resched >= 0); 468 callout_schedule(&dw->dw_callout, dw->dw_resched); 469 dw->dw_state = DELAYED_WORK_SCHEDULED; 470 dw->dw_resched = -1; 471 break; 472 case DELAYED_WORK_CANCELLED: 473 cancel_delayed_work_done(wq, dw); 474 /* Can't dereference dw after this point. */ 475 goto out; 476 default: 477 panic("delayed work callout in bad state: %p", dw); 478 } 479 KASSERT(dw->dw_state == DELAYED_WORK_IDLE || 480 dw->dw_state == DELAYED_WORK_SCHEDULED); 481 out: mutex_exit(&wq->wq_lock); 482 } 483 484 /* 485 * current_work() 486 * 487 * If in a workqueue worker thread, return the work it is 488 * currently executing. Otherwise return NULL. 489 */ 490 struct work_struct * 491 current_work(void) 492 { 493 struct workqueue_struct *wq = lwp_getspecific(workqueue_key); 494 495 /* If we're not a workqueue thread, then there's no work. */ 496 if (wq == NULL) 497 return NULL; 498 499 /* 500 * Otherwise, this should be possible only while work is in 501 * progress. Return the current work item. 502 */ 503 KASSERT(wq->wq_current_work != NULL); 504 return wq->wq_current_work; 505 } 506 507 /* 508 * Work 509 */ 510 511 /* 512 * INIT_WORK(work, fn) 513 * 514 * Initialize work for use with a workqueue to call fn in a worker 515 * thread. There is no corresponding destruction operation. 516 */ 517 void 518 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *)) 519 { 520 521 work->work_owner = 0; 522 work->func = fn; 523 } 524 525 /* 526 * work_claimed(work, wq) 527 * 528 * True if work is currently claimed by a workqueue, meaning it is 529 * either on the queue or scheduled in a callout. The workqueue 530 * must be wq, and caller must hold wq's lock. 531 */ 532 static bool 533 work_claimed(struct work_struct *work, struct workqueue_struct *wq) 534 { 535 536 KASSERT(work_queue(work) == wq); 537 KASSERT(mutex_owned(&wq->wq_lock)); 538 539 return work->work_owner & 1; 540 } 541 542 /* 543 * work_pending(work) 544 * 545 * True if work is currently claimed by any workqueue, scheduled 546 * to run on that workqueue. 547 */ 548 bool 549 work_pending(const struct work_struct *work) 550 { 551 552 return work->work_owner & 1; 553 } 554 555 /* 556 * work_queue(work) 557 * 558 * Return the last queue that work was queued on, or NULL if it 559 * was never queued. 560 */ 561 static struct workqueue_struct * 562 work_queue(struct work_struct *work) 563 { 564 565 return (struct workqueue_struct *)(work->work_owner & ~(uintptr_t)1); 566 } 567 568 /* 569 * acquire_work(work, wq) 570 * 571 * Try to claim work for wq. If work is already claimed, it must 572 * be claimed by wq; return false. If work is not already 573 * claimed, claim it, issue a memory barrier to match any prior 574 * release_work, and return true. 575 * 576 * Caller must hold wq's lock. 577 */ 578 static bool 579 acquire_work(struct work_struct *work, struct workqueue_struct *wq) 580 { 581 uintptr_t owner0, owner; 582 583 KASSERT(mutex_owned(&wq->wq_lock)); 584 KASSERT(((uintptr_t)wq & 1) == 0); 585 586 owner = (uintptr_t)wq | 1; 587 do { 588 owner0 = work->work_owner; 589 if (owner0 & 1) { 590 KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq); 591 return false; 592 } 593 KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq); 594 } while (atomic_cas_uintptr(&work->work_owner, owner0, owner) != 595 owner0); 596 597 KASSERT(work_queue(work) == wq); 598 membar_enter(); 599 SDT_PROBE2(sdt, linux, work, acquire, work, wq); 600 return true; 601 } 602 603 /* 604 * release_work(work, wq) 605 * 606 * Issue a memory barrier to match any subsequent acquire_work and 607 * dissociate work from wq. 608 * 609 * Caller must hold wq's lock and work must be associated with wq. 610 */ 611 static void 612 release_work(struct work_struct *work, struct workqueue_struct *wq) 613 { 614 615 KASSERT(work_queue(work) == wq); 616 KASSERT(mutex_owned(&wq->wq_lock)); 617 618 SDT_PROBE2(sdt, linux, work, release, work, wq); 619 membar_exit(); 620 621 /* 622 * Non-interlocked r/m/w is safe here because nobody else can 623 * write to this while the claimed bit is setand the workqueue 624 * lock is held. 625 */ 626 work->work_owner &= ~(uintptr_t)1; 627 } 628 629 /* 630 * schedule_work(work) 631 * 632 * If work is not already queued on system_wq, queue it to be run 633 * by system_wq's worker thread when it next can. True if it was 634 * newly queued, false if it was already queued. If the work was 635 * already running, queue it to run again. 636 * 637 * Caller must ensure work is not queued to run on a different 638 * workqueue. 639 */ 640 bool 641 schedule_work(struct work_struct *work) 642 { 643 644 return queue_work(system_wq, work); 645 } 646 647 /* 648 * queue_work(wq, work) 649 * 650 * If work is not already queued on wq, queue it to be run by wq's 651 * worker thread when it next can. True if it was newly queued, 652 * false if it was already queued. If the work was already 653 * running, queue it to run again. 654 * 655 * Caller must ensure work is not queued to run on a different 656 * workqueue. 657 */ 658 bool 659 queue_work(struct workqueue_struct *wq, struct work_struct *work) 660 { 661 bool newly_queued; 662 663 KASSERT(wq != NULL); 664 665 mutex_enter(&wq->wq_lock); 666 if (__predict_true(acquire_work(work, wq))) { 667 /* 668 * It wasn't on any workqueue at all. Put it on this 669 * one, and signal the worker thread that there is work 670 * to do. 671 */ 672 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry); 673 cv_broadcast(&wq->wq_cv); 674 SDT_PROBE2(sdt, linux, work, queue, work, wq); 675 newly_queued = true; 676 } else { 677 /* 678 * It was already on this workqueue. Nothing to do 679 * since it is already queued. 680 */ 681 newly_queued = false; 682 } 683 mutex_exit(&wq->wq_lock); 684 685 return newly_queued; 686 } 687 688 /* 689 * cancel_work(work) 690 * 691 * If work was queued, remove it from the queue and return true. 692 * If work was not queued, return false. Work may still be 693 * running when this returns. 694 */ 695 bool 696 cancel_work(struct work_struct *work) 697 { 698 struct workqueue_struct *wq; 699 bool cancelled_p = false; 700 701 /* If there's no workqueue, nothing to cancel. */ 702 if ((wq = work_queue(work)) == NULL) 703 goto out; 704 705 mutex_enter(&wq->wq_lock); 706 if (__predict_false(work_queue(work) != wq)) { 707 /* 708 * It has finished execution or been cancelled by 709 * another thread, and has been moved off the 710 * workqueue, so it's too to cancel. 711 */ 712 cancelled_p = false; 713 } else { 714 /* Check whether it's on the queue. */ 715 if (work_claimed(work, wq)) { 716 /* 717 * It is still on the queue. Take it off the 718 * queue and report successful cancellation. 719 */ 720 TAILQ_REMOVE(&wq->wq_queue, work, work_entry); 721 SDT_PROBE2(sdt, linux, work, cancel, work, wq); 722 release_work(work, wq); 723 /* Can't dereference work after this point. */ 724 cancelled_p = true; 725 } else { 726 /* Not on the queue. Couldn't cancel it. */ 727 cancelled_p = false; 728 } 729 } 730 mutex_exit(&wq->wq_lock); 731 732 out: return cancelled_p; 733 } 734 735 /* 736 * cancel_work_sync(work) 737 * 738 * If work was queued, remove it from the queue and return true. 739 * If work was not queued, return false. Either way, if work is 740 * currently running, wait for it to complete. 741 * 742 * May sleep. 743 */ 744 bool 745 cancel_work_sync(struct work_struct *work) 746 { 747 struct workqueue_struct *wq; 748 bool cancelled_p = false; 749 750 /* If there's no workqueue, nothing to cancel. */ 751 if ((wq = work_queue(work)) == NULL) 752 goto out; 753 754 mutex_enter(&wq->wq_lock); 755 if (__predict_false(work_queue(work) != wq)) { 756 /* 757 * It has finished execution or been cancelled by 758 * another thread, and has been moved off the 759 * workqueue, so it's too late to cancel. 760 */ 761 cancelled_p = false; 762 } else { 763 /* Check whether it's on the queue. */ 764 if (work_claimed(work, wq)) { 765 /* 766 * It is still on the queue. Take it off the 767 * queue and report successful cancellation. 768 */ 769 TAILQ_REMOVE(&wq->wq_queue, work, work_entry); 770 SDT_PROBE2(sdt, linux, work, cancel, work, wq); 771 release_work(work, wq); 772 /* Can't dereference work after this point. */ 773 cancelled_p = true; 774 } else { 775 /* Not on the queue. Couldn't cancel it. */ 776 cancelled_p = false; 777 } 778 /* If it's still running, wait for it to complete. */ 779 if (wq->wq_current_work == work) 780 wait_for_current_work(work, wq); 781 } 782 mutex_exit(&wq->wq_lock); 783 784 out: return cancelled_p; 785 } 786 787 /* 788 * wait_for_current_work(work, wq) 789 * 790 * wq must be currently executing work. Wait for it to finish. 791 * 792 * Does not dereference work. 793 */ 794 static void 795 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq) 796 { 797 uint64_t gen; 798 799 KASSERT(mutex_owned(&wq->wq_lock)); 800 KASSERT(wq->wq_current_work == work); 801 802 /* Wait only one generation in case it gets requeued quickly. */ 803 SDT_PROBE2(sdt, linux, work, wait__start, work, wq); 804 gen = wq->wq_gen; 805 do { 806 cv_wait(&wq->wq_cv, &wq->wq_lock); 807 } while (wq->wq_current_work == work && wq->wq_gen == gen); 808 SDT_PROBE2(sdt, linux, work, wait__done, work, wq); 809 } 810 811 /* 812 * Delayed work 813 */ 814 815 /* 816 * INIT_DELAYED_WORK(dw, fn) 817 * 818 * Initialize dw for use with a workqueue to call fn in a worker 819 * thread after a delay. There is no corresponding destruction 820 * operation. 821 */ 822 void 823 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *)) 824 { 825 826 INIT_WORK(&dw->work, fn); 827 dw->dw_state = DELAYED_WORK_IDLE; 828 dw->dw_resched = -1; 829 830 /* 831 * Defer callout_init until we are going to schedule the 832 * callout, which can then callout_destroy it, because 833 * otherwise since there's no DESTROY_DELAYED_WORK or anything 834 * we have no opportunity to call callout_destroy. 835 */ 836 } 837 838 /* 839 * schedule_delayed_work(dw, ticks) 840 * 841 * If it is not currently scheduled, schedule dw to run after 842 * ticks on system_wq. If currently executing and not already 843 * rescheduled, reschedule it. True if it was newly scheduled, 844 * false if it was already scheduled. 845 * 846 * If ticks == 0, queue it to run as soon as the worker can, 847 * without waiting for the next callout tick to run. 848 */ 849 bool 850 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks) 851 { 852 853 return queue_delayed_work(system_wq, dw, ticks); 854 } 855 856 /* 857 * dw_callout_init(wq, dw) 858 * 859 * Initialize the callout of dw and transition to 860 * DELAYED_WORK_SCHEDULED. Caller must use callout_schedule. 861 */ 862 static void 863 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw) 864 { 865 866 KASSERT(mutex_owned(&wq->wq_lock)); 867 KASSERT(work_queue(&dw->work) == wq); 868 KASSERT(dw->dw_state == DELAYED_WORK_IDLE); 869 870 callout_init(&dw->dw_callout, CALLOUT_MPSAFE); 871 callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw); 872 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry); 873 dw->dw_state = DELAYED_WORK_SCHEDULED; 874 } 875 876 /* 877 * dw_callout_destroy(wq, dw) 878 * 879 * Destroy the callout of dw and transition to DELAYED_WORK_IDLE. 880 */ 881 static void 882 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw) 883 { 884 885 KASSERT(mutex_owned(&wq->wq_lock)); 886 KASSERT(work_queue(&dw->work) == wq); 887 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED || 888 dw->dw_state == DELAYED_WORK_RESCHEDULED || 889 dw->dw_state == DELAYED_WORK_CANCELLED); 890 891 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); 892 callout_destroy(&dw->dw_callout); 893 dw->dw_resched = -1; 894 dw->dw_state = DELAYED_WORK_IDLE; 895 } 896 897 /* 898 * cancel_delayed_work_done(wq, dw) 899 * 900 * Complete cancellation of a delayed work: transition from 901 * DELAYED_WORK_CANCELLED to DELAYED_WORK_IDLE and off the 902 * workqueue. Caller must not dereference dw after this returns. 903 */ 904 static void 905 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw) 906 { 907 908 KASSERT(mutex_owned(&wq->wq_lock)); 909 KASSERT(work_queue(&dw->work) == wq); 910 KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED); 911 912 dw_callout_destroy(wq, dw); 913 release_work(&dw->work, wq); 914 /* Can't dereference dw after this point. */ 915 } 916 917 /* 918 * queue_delayed_work(wq, dw, ticks) 919 * 920 * If it is not currently scheduled, schedule dw to run after 921 * ticks on wq. If currently queued, remove it from the queue 922 * first. 923 * 924 * If ticks == 0, queue it to run as soon as the worker can, 925 * without waiting for the next callout tick to run. 926 */ 927 bool 928 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw, 929 unsigned long ticks) 930 { 931 bool newly_queued; 932 933 mutex_enter(&wq->wq_lock); 934 if (__predict_true(acquire_work(&dw->work, wq))) { 935 /* 936 * It wasn't on any workqueue at all. Schedule it to 937 * run on this one. 938 */ 939 KASSERT(dw->dw_state == DELAYED_WORK_IDLE); 940 if (ticks == 0) { 941 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, 942 work_entry); 943 cv_broadcast(&wq->wq_cv); 944 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq); 945 } else { 946 /* 947 * Initialize a callout and schedule to run 948 * after a delay. 949 */ 950 dw_callout_init(wq, dw); 951 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks)); 952 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks); 953 } 954 newly_queued = true; 955 } else { 956 /* It was already on this workqueue. */ 957 switch (dw->dw_state) { 958 case DELAYED_WORK_IDLE: 959 case DELAYED_WORK_SCHEDULED: 960 case DELAYED_WORK_RESCHEDULED: 961 /* On the queue or already scheduled. Leave it. */ 962 newly_queued = false; 963 break; 964 case DELAYED_WORK_CANCELLED: 965 /* 966 * Scheduled and the callout began, but it was 967 * cancelled. Reschedule it. 968 */ 969 if (ticks == 0) { 970 dw->dw_state = DELAYED_WORK_SCHEDULED; 971 SDT_PROBE2(sdt, linux, work, queue, 972 &dw->work, wq); 973 } else { 974 dw->dw_state = DELAYED_WORK_RESCHEDULED; 975 dw->dw_resched = MIN(INT_MAX, ticks); 976 SDT_PROBE3(sdt, linux, work, schedule, 977 dw, wq, ticks); 978 } 979 newly_queued = true; 980 break; 981 default: 982 panic("invalid delayed work state: %d", 983 dw->dw_state); 984 } 985 } 986 mutex_exit(&wq->wq_lock); 987 988 return newly_queued; 989 } 990 991 /* 992 * mod_delayed_work(wq, dw, ticks) 993 * 994 * Schedule dw to run after ticks. If scheduled or queued, 995 * reschedule. If ticks == 0, run without delay. 996 * 997 * True if it modified the timer of an already scheduled work, 998 * false if it newly scheduled the work. 999 */ 1000 bool 1001 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw, 1002 unsigned long ticks) 1003 { 1004 bool timer_modified; 1005 1006 mutex_enter(&wq->wq_lock); 1007 if (acquire_work(&dw->work, wq)) { 1008 /* 1009 * It wasn't on any workqueue at all. Schedule it to 1010 * run on this one. 1011 */ 1012 KASSERT(dw->dw_state == DELAYED_WORK_IDLE); 1013 if (ticks == 0) { 1014 /* 1015 * Run immediately: put it on the queue and 1016 * signal the worker thread. 1017 */ 1018 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, 1019 work_entry); 1020 cv_broadcast(&wq->wq_cv); 1021 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq); 1022 } else { 1023 /* 1024 * Initialize a callout and schedule to run 1025 * after a delay. 1026 */ 1027 dw_callout_init(wq, dw); 1028 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks)); 1029 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks); 1030 } 1031 timer_modified = false; 1032 } else { 1033 /* It was already on this workqueue. */ 1034 switch (dw->dw_state) { 1035 case DELAYED_WORK_IDLE: 1036 /* On the queue. */ 1037 if (ticks == 0) { 1038 /* Leave it be. */ 1039 SDT_PROBE2(sdt, linux, work, cancel, 1040 &dw->work, wq); 1041 SDT_PROBE2(sdt, linux, work, queue, 1042 &dw->work, wq); 1043 } else { 1044 /* Remove from the queue and schedule. */ 1045 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work, 1046 work_entry); 1047 dw_callout_init(wq, dw); 1048 callout_schedule(&dw->dw_callout, 1049 MIN(INT_MAX, ticks)); 1050 SDT_PROBE2(sdt, linux, work, cancel, 1051 &dw->work, wq); 1052 SDT_PROBE3(sdt, linux, work, schedule, 1053 dw, wq, ticks); 1054 } 1055 timer_modified = true; 1056 break; 1057 case DELAYED_WORK_SCHEDULED: 1058 /* 1059 * It is scheduled to run after a delay. Try 1060 * to stop it and reschedule it; if we can't, 1061 * either reschedule it or cancel it to put it 1062 * on the queue, and inform the callout. 1063 */ 1064 if (callout_stop(&dw->dw_callout)) { 1065 /* Can't stop, callout has begun. */ 1066 if (ticks == 0) { 1067 /* 1068 * We don't actually need to do 1069 * anything. The callout will 1070 * queue it as soon as it gets 1071 * the lock. 1072 */ 1073 SDT_PROBE2(sdt, linux, work, cancel, 1074 &dw->work, wq); 1075 SDT_PROBE2(sdt, linux, work, queue, 1076 &dw->work, wq); 1077 } else { 1078 /* Ask the callout to reschedule. */ 1079 dw->dw_state = DELAYED_WORK_RESCHEDULED; 1080 dw->dw_resched = MIN(INT_MAX, ticks); 1081 SDT_PROBE2(sdt, linux, work, cancel, 1082 &dw->work, wq); 1083 SDT_PROBE3(sdt, linux, work, schedule, 1084 dw, wq, ticks); 1085 } 1086 } else { 1087 /* We stopped the callout before it began. */ 1088 if (ticks == 0) { 1089 /* 1090 * Run immediately: destroy the 1091 * callout, put it on the 1092 * queue, and signal the worker 1093 * thread. 1094 */ 1095 dw_callout_destroy(wq, dw); 1096 TAILQ_INSERT_TAIL(&wq->wq_dqueue, 1097 &dw->work, work_entry); 1098 cv_broadcast(&wq->wq_cv); 1099 SDT_PROBE2(sdt, linux, work, cancel, 1100 &dw->work, wq); 1101 SDT_PROBE2(sdt, linux, work, queue, 1102 &dw->work, wq); 1103 } else { 1104 /* 1105 * Reschedule the callout. No 1106 * state change. 1107 */ 1108 callout_schedule(&dw->dw_callout, 1109 MIN(INT_MAX, ticks)); 1110 SDT_PROBE2(sdt, linux, work, cancel, 1111 &dw->work, wq); 1112 SDT_PROBE3(sdt, linux, work, schedule, 1113 dw, wq, ticks); 1114 } 1115 } 1116 timer_modified = true; 1117 break; 1118 case DELAYED_WORK_RESCHEDULED: 1119 /* 1120 * Someone rescheduled it after the callout 1121 * started but before the poor thing even had a 1122 * chance to acquire the lock. 1123 */ 1124 if (ticks == 0) { 1125 /* 1126 * We can just switch back to 1127 * DELAYED_WORK_SCHEDULED so that the 1128 * callout will queue the work as soon 1129 * as it gets the lock. 1130 */ 1131 dw->dw_state = DELAYED_WORK_SCHEDULED; 1132 dw->dw_resched = -1; 1133 SDT_PROBE2(sdt, linux, work, cancel, 1134 &dw->work, wq); 1135 SDT_PROBE2(sdt, linux, work, queue, 1136 &dw->work, wq); 1137 } else { 1138 /* Change the rescheduled time. */ 1139 dw->dw_resched = ticks; 1140 SDT_PROBE2(sdt, linux, work, cancel, 1141 &dw->work, wq); 1142 SDT_PROBE3(sdt, linux, work, schedule, 1143 dw, wq, ticks); 1144 } 1145 timer_modified = true; 1146 break; 1147 case DELAYED_WORK_CANCELLED: 1148 /* 1149 * Someone cancelled it after the callout 1150 * started but before the poor thing even had a 1151 * chance to acquire the lock. 1152 */ 1153 if (ticks == 0) { 1154 /* 1155 * We can just switch back to 1156 * DELAYED_WORK_SCHEDULED so that the 1157 * callout will queue the work as soon 1158 * as it gets the lock. 1159 */ 1160 dw->dw_state = DELAYED_WORK_SCHEDULED; 1161 SDT_PROBE2(sdt, linux, work, queue, 1162 &dw->work, wq); 1163 } else { 1164 /* Ask it to reschedule. */ 1165 dw->dw_state = DELAYED_WORK_RESCHEDULED; 1166 dw->dw_resched = MIN(INT_MAX, ticks); 1167 SDT_PROBE3(sdt, linux, work, schedule, 1168 dw, wq, ticks); 1169 } 1170 timer_modified = false; 1171 break; 1172 default: 1173 panic("invalid delayed work state: %d", dw->dw_state); 1174 } 1175 } 1176 mutex_exit(&wq->wq_lock); 1177 1178 return timer_modified; 1179 } 1180 1181 /* 1182 * cancel_delayed_work(dw) 1183 * 1184 * If work was scheduled or queued, remove it from the schedule or 1185 * queue and return true. If work was not scheduled or queued, 1186 * return false. Note that work may already be running; if it 1187 * hasn't been rescheduled or requeued, then cancel_delayed_work 1188 * will return false, and either way, cancel_delayed_work will NOT 1189 * wait for the work to complete. 1190 */ 1191 bool 1192 cancel_delayed_work(struct delayed_work *dw) 1193 { 1194 struct workqueue_struct *wq; 1195 bool cancelled_p; 1196 1197 /* If there's no workqueue, nothing to cancel. */ 1198 if ((wq = work_queue(&dw->work)) == NULL) 1199 return false; 1200 1201 mutex_enter(&wq->wq_lock); 1202 if (__predict_false(work_queue(&dw->work) != wq)) { 1203 cancelled_p = false; 1204 } else { 1205 switch (dw->dw_state) { 1206 case DELAYED_WORK_IDLE: 1207 /* 1208 * It is either on the queue or already running 1209 * or both. 1210 */ 1211 if (work_claimed(&dw->work, wq)) { 1212 /* On the queue. Remove and release. */ 1213 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work, 1214 work_entry); 1215 SDT_PROBE2(sdt, linux, work, cancel, 1216 &dw->work, wq); 1217 release_work(&dw->work, wq); 1218 /* Can't dereference dw after this point. */ 1219 cancelled_p = true; 1220 } else { 1221 /* Not on the queue, so didn't cancel. */ 1222 cancelled_p = false; 1223 } 1224 break; 1225 case DELAYED_WORK_SCHEDULED: 1226 /* 1227 * If it is scheduled, mark it cancelled and 1228 * try to stop the callout before it starts. 1229 * 1230 * If it's too late and the callout has already 1231 * begun to execute, tough. 1232 * 1233 * If we stopped the callout before it started, 1234 * however, then destroy the callout and 1235 * dissociate it from the workqueue ourselves. 1236 */ 1237 dw->dw_state = DELAYED_WORK_CANCELLED; 1238 cancelled_p = true; 1239 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq); 1240 if (!callout_stop(&dw->dw_callout)) 1241 cancel_delayed_work_done(wq, dw); 1242 break; 1243 case DELAYED_WORK_RESCHEDULED: 1244 /* 1245 * If it is being rescheduled, the callout has 1246 * already fired. We must ask it to cancel. 1247 */ 1248 dw->dw_state = DELAYED_WORK_CANCELLED; 1249 dw->dw_resched = -1; 1250 cancelled_p = true; 1251 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq); 1252 break; 1253 case DELAYED_WORK_CANCELLED: 1254 /* 1255 * If it is being cancelled, the callout has 1256 * already fired. There is nothing more for us 1257 * to do. Someone else claims credit for 1258 * cancelling it. 1259 */ 1260 cancelled_p = false; 1261 break; 1262 default: 1263 panic("invalid delayed work state: %d", 1264 dw->dw_state); 1265 } 1266 } 1267 mutex_exit(&wq->wq_lock); 1268 1269 return cancelled_p; 1270 } 1271 1272 /* 1273 * cancel_delayed_work_sync(dw) 1274 * 1275 * If work was scheduled or queued, remove it from the schedule or 1276 * queue and return true. If work was not scheduled or queued, 1277 * return false. Note that work may already be running; if it 1278 * hasn't been rescheduled or requeued, then cancel_delayed_work 1279 * will return false; either way, wait for it to complete. 1280 */ 1281 bool 1282 cancel_delayed_work_sync(struct delayed_work *dw) 1283 { 1284 struct workqueue_struct *wq; 1285 bool cancelled_p; 1286 1287 /* If there's no workqueue, nothing to cancel. */ 1288 if ((wq = work_queue(&dw->work)) == NULL) 1289 return false; 1290 1291 mutex_enter(&wq->wq_lock); 1292 if (__predict_false(work_queue(&dw->work) != wq)) { 1293 cancelled_p = false; 1294 } else { 1295 switch (dw->dw_state) { 1296 case DELAYED_WORK_IDLE: 1297 /* 1298 * It is either on the queue or already running 1299 * or both. 1300 */ 1301 if (work_claimed(&dw->work, wq)) { 1302 /* On the queue. Remove and release. */ 1303 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work, 1304 work_entry); 1305 SDT_PROBE2(sdt, linux, work, cancel, 1306 &dw->work, wq); 1307 release_work(&dw->work, wq); 1308 /* Can't dereference dw after this point. */ 1309 cancelled_p = true; 1310 } else { 1311 /* Not on the queue, so didn't cancel. */ 1312 cancelled_p = false; 1313 } 1314 /* If it's still running, wait for it to complete. */ 1315 if (wq->wq_current_work == &dw->work) 1316 wait_for_current_work(&dw->work, wq); 1317 break; 1318 case DELAYED_WORK_SCHEDULED: 1319 /* 1320 * If it is scheduled, mark it cancelled and 1321 * try to stop the callout before it starts. 1322 * 1323 * If it's too late and the callout has already 1324 * begun to execute, we must wait for it to 1325 * complete. But we got in soon enough to ask 1326 * the callout not to run, so we successfully 1327 * cancelled it in that case. 1328 * 1329 * If we stopped the callout before it started, 1330 * then we must destroy the callout and 1331 * dissociate it from the workqueue ourselves. 1332 */ 1333 dw->dw_state = DELAYED_WORK_CANCELLED; 1334 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq); 1335 if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) 1336 cancel_delayed_work_done(wq, dw); 1337 cancelled_p = true; 1338 break; 1339 case DELAYED_WORK_RESCHEDULED: 1340 /* 1341 * If it is being rescheduled, the callout has 1342 * already fired. We must ask it to cancel and 1343 * wait for it to complete. 1344 */ 1345 dw->dw_state = DELAYED_WORK_CANCELLED; 1346 dw->dw_resched = -1; 1347 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq); 1348 (void)callout_halt(&dw->dw_callout, &wq->wq_lock); 1349 cancelled_p = true; 1350 break; 1351 case DELAYED_WORK_CANCELLED: 1352 /* 1353 * If it is being cancelled, the callout has 1354 * already fired. We need only wait for it to 1355 * complete. Someone else, however, claims 1356 * credit for cancelling it. 1357 */ 1358 (void)callout_halt(&dw->dw_callout, &wq->wq_lock); 1359 cancelled_p = false; 1360 break; 1361 default: 1362 panic("invalid delayed work state: %d", 1363 dw->dw_state); 1364 } 1365 } 1366 mutex_exit(&wq->wq_lock); 1367 1368 return cancelled_p; 1369 } 1370 1371 /* 1372 * Flush 1373 */ 1374 1375 /* 1376 * flush_scheduled_work() 1377 * 1378 * Wait for all work queued on system_wq to complete. This does 1379 * not include delayed work. 1380 */ 1381 void 1382 flush_scheduled_work(void) 1383 { 1384 1385 flush_workqueue(system_wq); 1386 } 1387 1388 /* 1389 * flush_workqueue_locked(wq) 1390 * 1391 * Wait for all work queued on wq to complete. This does not 1392 * include delayed work. True if there was work to be flushed, 1393 * false it the queue was empty. 1394 * 1395 * Caller must hold wq's lock. 1396 */ 1397 static bool 1398 flush_workqueue_locked(struct workqueue_struct *wq) 1399 { 1400 uint64_t gen; 1401 bool work_queued = false; 1402 1403 KASSERT(mutex_owned(&wq->wq_lock)); 1404 1405 /* Get the current generation number. */ 1406 gen = wq->wq_gen; 1407 1408 /* 1409 * If there's a batch of work in progress, we must wait for the 1410 * worker thread to finish that batch. 1411 */ 1412 if (wq->wq_current_work != NULL) { 1413 gen++; 1414 work_queued = true; 1415 } 1416 1417 /* 1418 * If there's any work yet to be claimed from the queue by the 1419 * worker thread, we must wait for it to finish one more batch 1420 * too. 1421 */ 1422 if (!TAILQ_EMPTY(&wq->wq_queue) || !TAILQ_EMPTY(&wq->wq_dqueue)) { 1423 gen++; 1424 work_queued = true; 1425 } 1426 1427 /* Wait until the generation number has caught up. */ 1428 SDT_PROBE1(sdt, linux, work, flush__start, wq); 1429 while (wq->wq_gen < gen) 1430 cv_wait(&wq->wq_cv, &wq->wq_lock); 1431 SDT_PROBE1(sdt, linux, work, flush__done, wq); 1432 1433 /* Return whether we had to wait for anything. */ 1434 return work_queued; 1435 } 1436 1437 /* 1438 * flush_workqueue(wq) 1439 * 1440 * Wait for all work queued on wq to complete. This does not 1441 * include delayed work. 1442 */ 1443 void 1444 flush_workqueue(struct workqueue_struct *wq) 1445 { 1446 1447 mutex_enter(&wq->wq_lock); 1448 (void)flush_workqueue_locked(wq); 1449 mutex_exit(&wq->wq_lock); 1450 } 1451 1452 /* 1453 * drain_workqueue(wq) 1454 * 1455 * Repeatedly flush wq until there is no more work. 1456 */ 1457 void 1458 drain_workqueue(struct workqueue_struct *wq) 1459 { 1460 unsigned ntries = 0; 1461 1462 mutex_enter(&wq->wq_lock); 1463 while (flush_workqueue_locked(wq)) { 1464 if (ntries++ == 10 || (ntries % 100) == 0) 1465 printf("linux workqueue %s" 1466 ": still clogged after %u flushes", 1467 wq->wq_name, ntries); 1468 } 1469 mutex_exit(&wq->wq_lock); 1470 } 1471 1472 /* 1473 * flush_work(work) 1474 * 1475 * If work is queued or currently executing, wait for it to 1476 * complete. 1477 * 1478 * Return true if we waited to flush it, false if it was already 1479 * idle. 1480 */ 1481 bool 1482 flush_work(struct work_struct *work) 1483 { 1484 struct workqueue_struct *wq; 1485 1486 /* If there's no workqueue, nothing to flush. */ 1487 if ((wq = work_queue(work)) == NULL) 1488 return false; 1489 1490 flush_workqueue(wq); 1491 return true; 1492 } 1493 1494 /* 1495 * flush_delayed_work(dw) 1496 * 1497 * If dw is scheduled to run after a delay, queue it immediately 1498 * instead. Then, if dw is queued or currently executing, wait 1499 * for it to complete. 1500 */ 1501 bool 1502 flush_delayed_work(struct delayed_work *dw) 1503 { 1504 struct workqueue_struct *wq; 1505 bool waited = false; 1506 1507 /* If there's no workqueue, nothing to flush. */ 1508 if ((wq = work_queue(&dw->work)) == NULL) 1509 return false; 1510 1511 mutex_enter(&wq->wq_lock); 1512 if (__predict_false(work_queue(&dw->work) != wq)) { 1513 /* 1514 * Moved off the queue already (and possibly to another 1515 * queue, though that would be ill-advised), so it must 1516 * have completed, and we have nothing more to do. 1517 */ 1518 waited = false; 1519 } else { 1520 switch (dw->dw_state) { 1521 case DELAYED_WORK_IDLE: 1522 /* 1523 * It has a workqueue assigned and the callout 1524 * is idle, so it must be in progress or on the 1525 * queue. In that case, we'll wait for it to 1526 * complete. 1527 */ 1528 break; 1529 case DELAYED_WORK_SCHEDULED: 1530 case DELAYED_WORK_RESCHEDULED: 1531 case DELAYED_WORK_CANCELLED: 1532 /* 1533 * The callout is scheduled, and may have even 1534 * started. Mark it as scheduled so that if 1535 * the callout has fired it will queue the work 1536 * itself. Try to stop the callout -- if we 1537 * can, queue the work now; if we can't, wait 1538 * for the callout to complete, which entails 1539 * queueing it. 1540 */ 1541 dw->dw_state = DELAYED_WORK_SCHEDULED; 1542 if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) { 1543 /* 1544 * We stopped it before it ran. No 1545 * state change in the interim is 1546 * possible. Destroy the callout and 1547 * queue it ourselves. 1548 */ 1549 KASSERT(dw->dw_state == 1550 DELAYED_WORK_SCHEDULED); 1551 dw_callout_destroy(wq, dw); 1552 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, 1553 work_entry); 1554 cv_broadcast(&wq->wq_cv); 1555 SDT_PROBE2(sdt, linux, work, queue, 1556 &dw->work, wq); 1557 } 1558 break; 1559 default: 1560 panic("invalid delayed work state: %d", dw->dw_state); 1561 } 1562 /* 1563 * Waiting for the whole queue to flush is overkill, 1564 * but doesn't hurt. 1565 */ 1566 (void)flush_workqueue_locked(wq); 1567 waited = true; 1568 } 1569 mutex_exit(&wq->wq_lock); 1570 1571 return waited; 1572 } 1573 1574 /* 1575 * delayed_work_pending(dw) 1576 * 1577 * True if dw is currently scheduled to execute, false if not. 1578 */ 1579 bool 1580 delayed_work_pending(const struct delayed_work *dw) 1581 { 1582 1583 return work_pending(&dw->work); 1584 } 1585