1 /* $NetBSD: kern_timeout.c,v 1.66 2020/06/27 01:26:32 rin Exp $ */ 2 3 /*- 4 * Copyright (c) 2003, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org> 34 * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org> 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. The name of the author may not be used to endorse or promote products 47 * derived from this software without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 50 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 51 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 52 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 53 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 59 */ 60 61 #include <sys/cdefs.h> 62 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.66 2020/06/27 01:26:32 rin Exp $"); 63 64 /* 65 * Timeouts are kept in a hierarchical timing wheel. The c_time is the 66 * value of c_cpu->cc_ticks when the timeout should be called. There are 67 * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and 68 * Hierarchical Timing Wheels: Efficient Data Structures for Implementing 69 * a Timer Facility" by George Varghese and Tony Lauck. 70 * 71 * Some of the "math" in here is a bit tricky. We have to beware of 72 * wrapping ints. 73 * 74 * We use the fact that any element added to the queue must be added with 75 * a positive time. That means that any element `to' on the queue cannot 76 * be scheduled to timeout further in time than INT_MAX, but c->c_time can 77 * be positive or negative so comparing it with anything is dangerous. 78 * The only way we can use the c->c_time value in any predictable way is 79 * when we calculate how far in the future `to' will timeout - "c->c_time 80 * - c->c_cpu->cc_ticks". The result will always be positive for future 81 * timeouts and 0 or negative for due timeouts. 82 */ 83 84 #define _CALLOUT_PRIVATE 85 86 #include <sys/param.h> 87 #include <sys/systm.h> 88 #include <sys/kernel.h> 89 #include <sys/callout.h> 90 #include <sys/lwp.h> 91 #include <sys/mutex.h> 92 #include <sys/proc.h> 93 #include <sys/sleepq.h> 94 #include <sys/syncobj.h> 95 #include <sys/evcnt.h> 96 #include <sys/intr.h> 97 #include <sys/cpu.h> 98 #include <sys/kmem.h> 99 100 #ifdef DDB 101 #include <machine/db_machdep.h> 102 #include <ddb/db_interface.h> 103 #include <ddb/db_access.h> 104 #include <ddb/db_cpu.h> 105 #include <ddb/db_sym.h> 106 #include <ddb/db_output.h> 107 #endif 108 109 #define BUCKETS 1024 110 #define WHEELSIZE 256 111 #define WHEELMASK 255 112 #define WHEELBITS 8 113 114 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK) 115 116 #define BUCKET(cc, rel, abs) \ 117 (((rel) <= (1 << (2*WHEELBITS))) \ 118 ? ((rel) <= (1 << WHEELBITS)) \ 119 ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))] \ 120 : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \ 121 : ((rel) <= (1 << (3*WHEELBITS))) \ 122 ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \ 123 : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE]) 124 125 #define MOVEBUCKET(cc, wheel, time) \ 126 CIRCQ_APPEND(&(cc)->cc_todo, \ 127 &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE]) 128 129 /* 130 * Circular queue definitions. 131 */ 132 133 #define CIRCQ_INIT(list) \ 134 do { \ 135 (list)->cq_next_l = (list); \ 136 (list)->cq_prev_l = (list); \ 137 } while (/*CONSTCOND*/0) 138 139 #define CIRCQ_INSERT(elem, list) \ 140 do { \ 141 (elem)->cq_prev_e = (list)->cq_prev_e; \ 142 (elem)->cq_next_l = (list); \ 143 (list)->cq_prev_l->cq_next_l = (elem); \ 144 (list)->cq_prev_l = (elem); \ 145 } while (/*CONSTCOND*/0) 146 147 #define CIRCQ_APPEND(fst, snd) \ 148 do { \ 149 if (!CIRCQ_EMPTY(snd)) { \ 150 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \ 151 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \ 152 (snd)->cq_prev_l->cq_next_l = (fst); \ 153 (fst)->cq_prev_l = (snd)->cq_prev_l; \ 154 CIRCQ_INIT(snd); \ 155 } \ 156 } while (/*CONSTCOND*/0) 157 158 #define CIRCQ_REMOVE(elem) \ 159 do { \ 160 (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \ 161 (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \ 162 } while (/*CONSTCOND*/0) 163 164 #define CIRCQ_FIRST(list) ((list)->cq_next_e) 165 #define CIRCQ_NEXT(elem) ((elem)->cq_next_e) 166 #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list)) 167 #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list)) 168 169 struct callout_cpu { 170 kmutex_t *cc_lock; 171 sleepq_t cc_sleepq; 172 u_int cc_nwait; 173 u_int cc_ticks; 174 lwp_t *cc_lwp; 175 callout_impl_t *cc_active; 176 callout_impl_t *cc_cancel; 177 struct evcnt cc_ev_late; 178 struct evcnt cc_ev_block; 179 struct callout_circq cc_todo; /* Worklist */ 180 struct callout_circq cc_wheel[BUCKETS]; /* Queues of timeouts */ 181 char cc_name1[12]; 182 char cc_name2[12]; 183 }; 184 185 #ifdef DDB 186 static struct callout_cpu ccb; 187 #endif 188 189 #ifndef CRASH /* _KERNEL */ 190 static void callout_softclock(void *); 191 static void callout_wait(callout_impl_t *, void *, kmutex_t *); 192 193 static struct callout_cpu callout_cpu0 __cacheline_aligned; 194 static void *callout_sih __read_mostly; 195 196 static inline kmutex_t * 197 callout_lock(callout_impl_t *c) 198 { 199 struct callout_cpu *cc; 200 kmutex_t *lock; 201 202 for (;;) { 203 cc = c->c_cpu; 204 lock = cc->cc_lock; 205 mutex_spin_enter(lock); 206 if (__predict_true(cc == c->c_cpu)) 207 return lock; 208 mutex_spin_exit(lock); 209 } 210 } 211 212 /* 213 * callout_startup: 214 * 215 * Initialize the callout facility, called at system startup time. 216 * Do just enough to allow callouts to be safely registered. 217 */ 218 void 219 callout_startup(void) 220 { 221 struct callout_cpu *cc; 222 int b; 223 224 KASSERT(curcpu()->ci_data.cpu_callout == NULL); 225 226 cc = &callout_cpu0; 227 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 228 CIRCQ_INIT(&cc->cc_todo); 229 for (b = 0; b < BUCKETS; b++) 230 CIRCQ_INIT(&cc->cc_wheel[b]); 231 curcpu()->ci_data.cpu_callout = cc; 232 } 233 234 /* 235 * callout_init_cpu: 236 * 237 * Per-CPU initialization. 238 */ 239 CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t)); 240 241 void 242 callout_init_cpu(struct cpu_info *ci) 243 { 244 struct callout_cpu *cc; 245 int b; 246 247 if ((cc = ci->ci_data.cpu_callout) == NULL) { 248 cc = kmem_zalloc(sizeof(*cc), KM_SLEEP); 249 cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 250 CIRCQ_INIT(&cc->cc_todo); 251 for (b = 0; b < BUCKETS; b++) 252 CIRCQ_INIT(&cc->cc_wheel[b]); 253 } else { 254 /* Boot CPU, one time only. */ 255 callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 256 callout_softclock, NULL); 257 if (callout_sih == NULL) 258 panic("callout_init_cpu (2)"); 259 } 260 261 sleepq_init(&cc->cc_sleepq); 262 263 snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u", 264 cpu_index(ci)); 265 evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC, 266 NULL, "callout", cc->cc_name1); 267 268 snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u", 269 cpu_index(ci)); 270 evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC, 271 NULL, "callout", cc->cc_name2); 272 273 ci->ci_data.cpu_callout = cc; 274 } 275 276 /* 277 * callout_init: 278 * 279 * Initialize a callout structure. This must be quick, so we fill 280 * only the minimum number of fields. 281 */ 282 void 283 callout_init(callout_t *cs, u_int flags) 284 { 285 callout_impl_t *c = (callout_impl_t *)cs; 286 struct callout_cpu *cc; 287 288 KASSERT((flags & ~CALLOUT_FLAGMASK) == 0); 289 290 cc = curcpu()->ci_data.cpu_callout; 291 c->c_func = NULL; 292 c->c_magic = CALLOUT_MAGIC; 293 if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) { 294 c->c_flags = flags; 295 c->c_cpu = cc; 296 return; 297 } 298 c->c_flags = flags | CALLOUT_BOUND; 299 c->c_cpu = &callout_cpu0; 300 } 301 302 /* 303 * callout_destroy: 304 * 305 * Destroy a callout structure. The callout must be stopped. 306 */ 307 void 308 callout_destroy(callout_t *cs) 309 { 310 callout_impl_t *c = (callout_impl_t *)cs; 311 312 KASSERTMSG(c->c_magic == CALLOUT_MAGIC, 313 "callout %p: c_magic (%#x) != CALLOUT_MAGIC (%#x)", 314 c, c->c_magic, CALLOUT_MAGIC); 315 /* 316 * It's not necessary to lock in order to see the correct value 317 * of c->c_flags. If the callout could potentially have been 318 * running, the current thread should have stopped it. 319 */ 320 KASSERTMSG((c->c_flags & CALLOUT_PENDING) == 0, 321 "pending callout %p: c_func (%p) c_flags (%#x) destroyed from %p", 322 c, c->c_func, c->c_flags, __builtin_return_address(0)); 323 KASSERTMSG(c->c_cpu->cc_lwp == curlwp || c->c_cpu->cc_active != c, 324 "running callout %p: c_func (%p) c_flags (%#x) destroyed from %p", 325 c, c->c_func, c->c_flags, __builtin_return_address(0)); 326 c->c_magic = 0; 327 } 328 329 /* 330 * callout_schedule_locked: 331 * 332 * Schedule a callout to run. The function and argument must 333 * already be set in the callout structure. Must be called with 334 * callout_lock. 335 */ 336 static void 337 callout_schedule_locked(callout_impl_t *c, kmutex_t *lock, int to_ticks) 338 { 339 struct callout_cpu *cc, *occ; 340 int old_time; 341 342 KASSERT(to_ticks >= 0); 343 KASSERT(c->c_func != NULL); 344 345 /* Initialize the time here, it won't change. */ 346 occ = c->c_cpu; 347 c->c_flags &= ~(CALLOUT_FIRED | CALLOUT_INVOKING); 348 349 /* 350 * If this timeout is already scheduled and now is moved 351 * earlier, reschedule it now. Otherwise leave it in place 352 * and let it be rescheduled later. 353 */ 354 if ((c->c_flags & CALLOUT_PENDING) != 0) { 355 /* Leave on existing CPU. */ 356 old_time = c->c_time; 357 c->c_time = to_ticks + occ->cc_ticks; 358 if (c->c_time - old_time < 0) { 359 CIRCQ_REMOVE(&c->c_list); 360 CIRCQ_INSERT(&c->c_list, &occ->cc_todo); 361 } 362 mutex_spin_exit(lock); 363 return; 364 } 365 366 cc = curcpu()->ci_data.cpu_callout; 367 if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ || 368 !mutex_tryenter(cc->cc_lock)) { 369 /* Leave on existing CPU. */ 370 c->c_time = to_ticks + occ->cc_ticks; 371 c->c_flags |= CALLOUT_PENDING; 372 CIRCQ_INSERT(&c->c_list, &occ->cc_todo); 373 } else { 374 /* Move to this CPU. */ 375 c->c_cpu = cc; 376 c->c_time = to_ticks + cc->cc_ticks; 377 c->c_flags |= CALLOUT_PENDING; 378 CIRCQ_INSERT(&c->c_list, &cc->cc_todo); 379 mutex_spin_exit(cc->cc_lock); 380 } 381 mutex_spin_exit(lock); 382 } 383 384 /* 385 * callout_reset: 386 * 387 * Reset a callout structure with a new function and argument, and 388 * schedule it to run. 389 */ 390 void 391 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg) 392 { 393 callout_impl_t *c = (callout_impl_t *)cs; 394 kmutex_t *lock; 395 396 KASSERT(c->c_magic == CALLOUT_MAGIC); 397 KASSERT(func != NULL); 398 399 lock = callout_lock(c); 400 c->c_func = func; 401 c->c_arg = arg; 402 callout_schedule_locked(c, lock, to_ticks); 403 } 404 405 /* 406 * callout_schedule: 407 * 408 * Schedule a callout to run. The function and argument must 409 * already be set in the callout structure. 410 */ 411 void 412 callout_schedule(callout_t *cs, int to_ticks) 413 { 414 callout_impl_t *c = (callout_impl_t *)cs; 415 kmutex_t *lock; 416 417 KASSERT(c->c_magic == CALLOUT_MAGIC); 418 419 lock = callout_lock(c); 420 callout_schedule_locked(c, lock, to_ticks); 421 } 422 423 /* 424 * callout_stop: 425 * 426 * Try to cancel a pending callout. It may be too late: the callout 427 * could be running on another CPU. If called from interrupt context, 428 * the callout could already be in progress at a lower priority. 429 */ 430 bool 431 callout_stop(callout_t *cs) 432 { 433 callout_impl_t *c = (callout_impl_t *)cs; 434 struct callout_cpu *cc; 435 kmutex_t *lock; 436 bool expired; 437 438 KASSERT(c->c_magic == CALLOUT_MAGIC); 439 440 lock = callout_lock(c); 441 442 if ((c->c_flags & CALLOUT_PENDING) != 0) 443 CIRCQ_REMOVE(&c->c_list); 444 expired = ((c->c_flags & CALLOUT_FIRED) != 0); 445 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); 446 447 cc = c->c_cpu; 448 if (cc->cc_active == c) { 449 /* 450 * This is for non-MPSAFE callouts only. To synchronize 451 * effectively we must be called with kernel_lock held. 452 * It's also taken in callout_softclock. 453 */ 454 cc->cc_cancel = c; 455 } 456 457 mutex_spin_exit(lock); 458 459 return expired; 460 } 461 462 /* 463 * callout_halt: 464 * 465 * Cancel a pending callout. If in-flight, block until it completes. 466 * May not be called from a hard interrupt handler. If the callout 467 * can take locks, the caller of callout_halt() must not hold any of 468 * those locks, otherwise the two could deadlock. If 'interlock' is 469 * non-NULL and we must wait for the callout to complete, it will be 470 * released and re-acquired before returning. 471 */ 472 bool 473 callout_halt(callout_t *cs, void *interlock) 474 { 475 callout_impl_t *c = (callout_impl_t *)cs; 476 kmutex_t *lock; 477 int flags; 478 479 KASSERT(c->c_magic == CALLOUT_MAGIC); 480 KASSERT(!cpu_intr_p()); 481 KASSERT(interlock == NULL || mutex_owned(interlock)); 482 483 /* Fast path. */ 484 lock = callout_lock(c); 485 flags = c->c_flags; 486 if ((flags & CALLOUT_PENDING) != 0) 487 CIRCQ_REMOVE(&c->c_list); 488 c->c_flags = flags & ~(CALLOUT_PENDING|CALLOUT_FIRED); 489 if (__predict_false(flags & CALLOUT_FIRED)) { 490 callout_wait(c, interlock, lock); 491 return true; 492 } 493 mutex_spin_exit(lock); 494 return false; 495 } 496 497 /* 498 * callout_wait: 499 * 500 * Slow path for callout_halt(). Deliberately marked __noinline to 501 * prevent unneeded overhead in the caller. 502 */ 503 static void __noinline 504 callout_wait(callout_impl_t *c, void *interlock, kmutex_t *lock) 505 { 506 struct callout_cpu *cc; 507 struct lwp *l; 508 kmutex_t *relock; 509 510 l = curlwp; 511 relock = NULL; 512 for (;;) { 513 /* 514 * At this point we know the callout is not pending, but it 515 * could be running on a CPU somewhere. That can be curcpu 516 * in a few cases: 517 * 518 * - curlwp is a higher priority soft interrupt 519 * - the callout blocked on a lock and is currently asleep 520 * - the callout itself has called callout_halt() (nice!) 521 */ 522 cc = c->c_cpu; 523 if (__predict_true(cc->cc_active != c || cc->cc_lwp == l)) 524 break; 525 526 /* It's running - need to wait for it to complete. */ 527 if (interlock != NULL) { 528 /* 529 * Avoid potential scheduler lock order problems by 530 * dropping the interlock without the callout lock 531 * held; then retry. 532 */ 533 mutex_spin_exit(lock); 534 mutex_exit(interlock); 535 relock = interlock; 536 interlock = NULL; 537 } else { 538 /* XXX Better to do priority inheritance. */ 539 KASSERT(l->l_wchan == NULL); 540 cc->cc_nwait++; 541 cc->cc_ev_block.ev_count++; 542 l->l_kpriority = true; 543 sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock); 544 sleepq_enqueue(&cc->cc_sleepq, cc, "callout", 545 &sleep_syncobj, false); 546 sleepq_block(0, false); 547 } 548 549 /* 550 * Re-lock the callout and check the state of play again. 551 * It's a common design pattern for callouts to re-schedule 552 * themselves so put a stop to it again if needed. 553 */ 554 lock = callout_lock(c); 555 if ((c->c_flags & CALLOUT_PENDING) != 0) 556 CIRCQ_REMOVE(&c->c_list); 557 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); 558 } 559 560 mutex_spin_exit(lock); 561 if (__predict_false(relock != NULL)) 562 mutex_enter(relock); 563 } 564 565 #ifdef notyet 566 /* 567 * callout_bind: 568 * 569 * Bind a callout so that it will only execute on one CPU. 570 * The callout must be stopped, and must be MPSAFE. 571 * 572 * XXX Disabled for now until it is decided how to handle 573 * offlined CPUs. We may want weak+strong binding. 574 */ 575 void 576 callout_bind(callout_t *cs, struct cpu_info *ci) 577 { 578 callout_impl_t *c = (callout_impl_t *)cs; 579 struct callout_cpu *cc; 580 kmutex_t *lock; 581 582 KASSERT((c->c_flags & CALLOUT_PENDING) == 0); 583 KASSERT(c->c_cpu->cc_active != c); 584 KASSERT(c->c_magic == CALLOUT_MAGIC); 585 KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0); 586 587 lock = callout_lock(c); 588 cc = ci->ci_data.cpu_callout; 589 c->c_flags |= CALLOUT_BOUND; 590 if (c->c_cpu != cc) { 591 /* 592 * Assigning c_cpu effectively unlocks the callout 593 * structure, as we don't hold the new CPU's lock. 594 * Issue memory barrier to prevent accesses being 595 * reordered. 596 */ 597 membar_exit(); 598 c->c_cpu = cc; 599 } 600 mutex_spin_exit(lock); 601 } 602 #endif 603 604 void 605 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg) 606 { 607 callout_impl_t *c = (callout_impl_t *)cs; 608 kmutex_t *lock; 609 610 KASSERT(c->c_magic == CALLOUT_MAGIC); 611 KASSERT(func != NULL); 612 613 lock = callout_lock(c); 614 c->c_func = func; 615 c->c_arg = arg; 616 mutex_spin_exit(lock); 617 } 618 619 bool 620 callout_expired(callout_t *cs) 621 { 622 callout_impl_t *c = (callout_impl_t *)cs; 623 kmutex_t *lock; 624 bool rv; 625 626 KASSERT(c->c_magic == CALLOUT_MAGIC); 627 628 lock = callout_lock(c); 629 rv = ((c->c_flags & CALLOUT_FIRED) != 0); 630 mutex_spin_exit(lock); 631 632 return rv; 633 } 634 635 bool 636 callout_active(callout_t *cs) 637 { 638 callout_impl_t *c = (callout_impl_t *)cs; 639 kmutex_t *lock; 640 bool rv; 641 642 KASSERT(c->c_magic == CALLOUT_MAGIC); 643 644 lock = callout_lock(c); 645 rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0); 646 mutex_spin_exit(lock); 647 648 return rv; 649 } 650 651 bool 652 callout_pending(callout_t *cs) 653 { 654 callout_impl_t *c = (callout_impl_t *)cs; 655 kmutex_t *lock; 656 bool rv; 657 658 KASSERT(c->c_magic == CALLOUT_MAGIC); 659 660 lock = callout_lock(c); 661 rv = ((c->c_flags & CALLOUT_PENDING) != 0); 662 mutex_spin_exit(lock); 663 664 return rv; 665 } 666 667 bool 668 callout_invoking(callout_t *cs) 669 { 670 callout_impl_t *c = (callout_impl_t *)cs; 671 kmutex_t *lock; 672 bool rv; 673 674 KASSERT(c->c_magic == CALLOUT_MAGIC); 675 676 lock = callout_lock(c); 677 rv = ((c->c_flags & CALLOUT_INVOKING) != 0); 678 mutex_spin_exit(lock); 679 680 return rv; 681 } 682 683 void 684 callout_ack(callout_t *cs) 685 { 686 callout_impl_t *c = (callout_impl_t *)cs; 687 kmutex_t *lock; 688 689 KASSERT(c->c_magic == CALLOUT_MAGIC); 690 691 lock = callout_lock(c); 692 c->c_flags &= ~CALLOUT_INVOKING; 693 mutex_spin_exit(lock); 694 } 695 696 /* 697 * callout_hardclock: 698 * 699 * Called from hardclock() once every tick. We schedule a soft 700 * interrupt if there is work to be done. 701 */ 702 void 703 callout_hardclock(void) 704 { 705 struct callout_cpu *cc; 706 int needsoftclock, ticks; 707 708 cc = curcpu()->ci_data.cpu_callout; 709 mutex_spin_enter(cc->cc_lock); 710 711 ticks = ++cc->cc_ticks; 712 713 MOVEBUCKET(cc, 0, ticks); 714 if (MASKWHEEL(0, ticks) == 0) { 715 MOVEBUCKET(cc, 1, ticks); 716 if (MASKWHEEL(1, ticks) == 0) { 717 MOVEBUCKET(cc, 2, ticks); 718 if (MASKWHEEL(2, ticks) == 0) 719 MOVEBUCKET(cc, 3, ticks); 720 } 721 } 722 723 needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo); 724 mutex_spin_exit(cc->cc_lock); 725 726 if (needsoftclock) 727 softint_schedule(callout_sih); 728 } 729 730 /* 731 * callout_softclock: 732 * 733 * Soft interrupt handler, scheduled above if there is work to 734 * be done. Callouts are made in soft interrupt context. 735 */ 736 static void 737 callout_softclock(void *v) 738 { 739 callout_impl_t *c; 740 struct callout_cpu *cc; 741 void (*func)(void *); 742 void *arg; 743 int mpsafe, count, ticks, delta; 744 lwp_t *l; 745 746 l = curlwp; 747 KASSERT(l->l_cpu == curcpu()); 748 cc = l->l_cpu->ci_data.cpu_callout; 749 750 mutex_spin_enter(cc->cc_lock); 751 cc->cc_lwp = l; 752 while (!CIRCQ_EMPTY(&cc->cc_todo)) { 753 c = CIRCQ_FIRST(&cc->cc_todo); 754 KASSERT(c->c_magic == CALLOUT_MAGIC); 755 KASSERT(c->c_func != NULL); 756 KASSERT(c->c_cpu == cc); 757 KASSERT((c->c_flags & CALLOUT_PENDING) != 0); 758 KASSERT((c->c_flags & CALLOUT_FIRED) == 0); 759 CIRCQ_REMOVE(&c->c_list); 760 761 /* If due run it, otherwise insert it into the right bucket. */ 762 ticks = cc->cc_ticks; 763 delta = (int)((unsigned)c->c_time - (unsigned)ticks); 764 if (delta > 0) { 765 CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time)); 766 continue; 767 } 768 if (delta < 0) 769 cc->cc_ev_late.ev_count++; 770 771 c->c_flags = (c->c_flags & ~CALLOUT_PENDING) | 772 (CALLOUT_FIRED | CALLOUT_INVOKING); 773 mpsafe = (c->c_flags & CALLOUT_MPSAFE); 774 func = c->c_func; 775 arg = c->c_arg; 776 cc->cc_active = c; 777 778 mutex_spin_exit(cc->cc_lock); 779 KASSERT(func != NULL); 780 if (__predict_false(!mpsafe)) { 781 KERNEL_LOCK(1, NULL); 782 (*func)(arg); 783 KERNEL_UNLOCK_ONE(NULL); 784 } else 785 (*func)(arg); 786 mutex_spin_enter(cc->cc_lock); 787 788 /* 789 * We can't touch 'c' here because it might be 790 * freed already. If LWPs waiting for callout 791 * to complete, awaken them. 792 */ 793 cc->cc_active = NULL; 794 if ((count = cc->cc_nwait) != 0) { 795 cc->cc_nwait = 0; 796 /* sleepq_wake() drops the lock. */ 797 sleepq_wake(&cc->cc_sleepq, cc, count, cc->cc_lock); 798 mutex_spin_enter(cc->cc_lock); 799 } 800 } 801 cc->cc_lwp = NULL; 802 mutex_spin_exit(cc->cc_lock); 803 } 804 #endif /* !CRASH */ 805 806 #ifdef DDB 807 static void 808 db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *kbucket, 809 struct callout_circq *bucket) 810 { 811 callout_impl_t *c, ci; 812 db_expr_t offset; 813 const char *name; 814 static char question[] = "?"; 815 int b; 816 817 if (CIRCQ_LAST(bucket, kbucket)) 818 return; 819 820 for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) { 821 db_read_bytes((db_addr_t)c, sizeof(ci), (char *)&ci); 822 c = &ci; 823 db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name, 824 &offset); 825 name = name ? name : question; 826 b = (bucket - cc->cc_wheel); 827 if (b < 0) 828 b = -WHEELSIZE; 829 db_printf("%9d %2d/%-4d %16lx %s\n", 830 c->c_time - cc->cc_ticks, b / WHEELSIZE, b, 831 (u_long)c->c_arg, name); 832 if (CIRCQ_LAST(&c->c_list, kbucket)) 833 break; 834 } 835 } 836 837 void 838 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif) 839 { 840 struct callout_cpu *cc; 841 struct cpu_info *ci; 842 int b; 843 844 #ifndef CRASH 845 db_printf("hardclock_ticks now: %d\n", getticks()); 846 #endif 847 db_printf(" ticks wheel arg func\n"); 848 849 /* 850 * Don't lock the callwheel; all the other CPUs are paused 851 * anyhow, and we might be called in a circumstance where 852 * some other CPU was paused while holding the lock. 853 */ 854 for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) { 855 db_read_bytes((db_addr_t)ci + 856 offsetof(struct cpu_info, ci_data.cpu_callout), 857 sizeof(cc), (char *)&cc); 858 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb); 859 db_show_callout_bucket(&ccb, &cc->cc_todo, &ccb.cc_todo); 860 } 861 for (b = 0; b < BUCKETS; b++) { 862 for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) { 863 db_read_bytes((db_addr_t)ci + 864 offsetof(struct cpu_info, ci_data.cpu_callout), 865 sizeof(cc), (char *)&cc); 866 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb); 867 db_show_callout_bucket(&ccb, &cc->cc_wheel[b], 868 &ccb.cc_wheel[b]); 869 } 870 } 871 } 872 #endif /* DDB */ 873