1 /* $NetBSD: kern_timeout.c,v 1.26 2007/08/01 23:23:41 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2003, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org> 41 * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org> 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 57 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 58 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 59 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 60 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 61 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 62 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 63 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 64 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 65 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.26 2007/08/01 23:23:41 ad Exp $"); 70 71 /* 72 * Timeouts are kept in a hierarchical timing wheel. The c_time is the 73 * value of the global variable "hardclock_ticks" when the timeout should 74 * be called. There are four levels with 256 buckets each. See 'Scheme 7' 75 * in "Hashed and Hierarchical Timing Wheels: Efficient Data Structures 76 * for Implementing a Timer Facility" by George Varghese and Tony Lauck. 77 * 78 * Some of the "math" in here is a bit tricky. We have to beware of 79 * wrapping ints. 80 * 81 * We use the fact that any element added to the queue must be added with 82 * a positive time. That means that any element `to' on the queue cannot 83 * be scheduled to timeout further in time than INT_MAX, but c->c_time can 84 * be positive or negative so comparing it with anything is dangerous. 85 * The only way we can use the c->c_time value in any predictable way is 86 * when we calculate how far in the future `to' will timeout - "c->c_time 87 * - hardclock_ticks". The result will always be positive for future 88 * timeouts and 0 or negative for due timeouts. 89 */ 90 91 #define _CALLOUT_PRIVATE 92 93 #include <sys/param.h> 94 #include <sys/systm.h> 95 #include <sys/kernel.h> 96 #include <sys/lock.h> 97 #include <sys/callout.h> 98 #include <sys/mutex.h> 99 #include <sys/proc.h> 100 #include <sys/sleepq.h> 101 #include <sys/syncobj.h> 102 #include <sys/evcnt.h> 103 104 #include <machine/intr.h> 105 106 #ifdef DDB 107 #include <machine/db_machdep.h> 108 #include <ddb/db_interface.h> 109 #include <ddb/db_access.h> 110 #include <ddb/db_sym.h> 111 #include <ddb/db_output.h> 112 #endif 113 114 #define BUCKETS 1024 115 #define WHEELSIZE 256 116 #define WHEELMASK 255 117 #define WHEELBITS 8 118 119 static struct callout_circq timeout_wheel[BUCKETS]; /* Queues of timeouts */ 120 static struct callout_circq timeout_todo; /* Worklist */ 121 122 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK) 123 124 #define BUCKET(rel, abs) \ 125 (((rel) <= (1 << (2*WHEELBITS))) \ 126 ? ((rel) <= (1 << WHEELBITS)) \ 127 ? &timeout_wheel[MASKWHEEL(0, (abs))] \ 128 : &timeout_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \ 129 : ((rel) <= (1 << (3*WHEELBITS))) \ 130 ? &timeout_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \ 131 : &timeout_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE]) 132 133 #define MOVEBUCKET(wheel, time) \ 134 CIRCQ_APPEND(&timeout_todo, \ 135 &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE]) 136 137 /* 138 * Circular queue definitions. 139 */ 140 141 #define CIRCQ_INIT(list) \ 142 do { \ 143 (list)->cq_next_l = (list); \ 144 (list)->cq_prev_l = (list); \ 145 } while (/*CONSTCOND*/0) 146 147 #define CIRCQ_INSERT(elem, list) \ 148 do { \ 149 (elem)->cq_prev_e = (list)->cq_prev_e; \ 150 (elem)->cq_next_l = (list); \ 151 (list)->cq_prev_l->cq_next_l = (elem); \ 152 (list)->cq_prev_l = (elem); \ 153 } while (/*CONSTCOND*/0) 154 155 #define CIRCQ_APPEND(fst, snd) \ 156 do { \ 157 if (!CIRCQ_EMPTY(snd)) { \ 158 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \ 159 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \ 160 (snd)->cq_prev_l->cq_next_l = (fst); \ 161 (fst)->cq_prev_l = (snd)->cq_prev_l; \ 162 CIRCQ_INIT(snd); \ 163 } \ 164 } while (/*CONSTCOND*/0) 165 166 #define CIRCQ_REMOVE(elem) \ 167 do { \ 168 (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \ 169 (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \ 170 } while (/*CONSTCOND*/0) 171 172 #define CIRCQ_FIRST(list) ((list)->cq_next_e) 173 #define CIRCQ_NEXT(elem) ((elem)->cq_next_e) 174 #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list)) 175 #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list)) 176 177 static void callout_softclock(void *); 178 179 /* 180 * All wheels are locked with the same lock (which must also block out 181 * all interrupts). Eventually this should become per-CPU. 182 */ 183 kmutex_t callout_lock; 184 sleepq_t callout_sleepq; 185 void *callout_si; 186 187 static struct evcnt callout_ev_late; 188 static struct evcnt callout_ev_block; 189 190 /* 191 * callout_barrier: 192 * 193 * If the callout is already running, wait until it completes. 194 * XXX This should do priority inheritance. 195 */ 196 static void 197 callout_barrier(callout_impl_t *c) 198 { 199 extern syncobj_t sleep_syncobj; 200 struct cpu_info *ci; 201 struct lwp *l; 202 203 l = curlwp; 204 205 if ((c->c_flags & CALLOUT_MPSAFE) == 0) { 206 /* 207 * Note: we must be called with the kernel lock held, 208 * as we use it to synchronize with callout_softclock(). 209 */ 210 ci = c->c_oncpu; 211 ci->ci_data.cpu_callout_cancel = c; 212 return; 213 } 214 215 while ((ci = c->c_oncpu) != NULL && ci->ci_data.cpu_callout == c) { 216 KASSERT(l->l_wchan == NULL); 217 218 ci->ci_data.cpu_callout_nwait++; 219 callout_ev_block.ev_count++; 220 221 sleepq_enter(&callout_sleepq, l); 222 sleepq_enqueue(&callout_sleepq, sched_kpri(l), ci, 223 "callout", &sleep_syncobj); 224 sleepq_block(0, false); 225 mutex_spin_enter(&callout_lock); 226 } 227 } 228 229 /* 230 * callout_running: 231 * 232 * Return non-zero if callout 'c' is currently executing. 233 */ 234 static inline bool 235 callout_running(callout_impl_t *c) 236 { 237 struct cpu_info *ci; 238 239 if ((ci = c->c_oncpu) == NULL) 240 return false; 241 if (ci->ci_data.cpu_callout != c) 242 return false; 243 if (c->c_onlwp == curlwp) 244 return false; 245 return true; 246 } 247 248 /* 249 * callout_startup: 250 * 251 * Initialize the callout facility, called at system startup time. 252 */ 253 void 254 callout_startup(void) 255 { 256 int b; 257 258 KASSERT(sizeof(callout_impl_t) <= sizeof(callout_t)); 259 260 CIRCQ_INIT(&timeout_todo); 261 for (b = 0; b < BUCKETS; b++) 262 CIRCQ_INIT(&timeout_wheel[b]); 263 264 mutex_init(&callout_lock, MUTEX_SPIN, IPL_SCHED); 265 sleepq_init(&callout_sleepq, &callout_lock); 266 267 evcnt_attach_dynamic(&callout_ev_late, EVCNT_TYPE_MISC, 268 NULL, "callout", "late"); 269 evcnt_attach_dynamic(&callout_ev_block, EVCNT_TYPE_MISC, 270 NULL, "callout", "block waiting"); 271 } 272 273 /* 274 * callout_startup2: 275 * 276 * Complete initialization once soft interrupts are available. 277 */ 278 void 279 callout_startup2(void) 280 { 281 282 callout_si = softintr_establish(IPL_SOFTCLOCK, 283 callout_softclock, NULL); 284 if (callout_si == NULL) 285 panic("callout_startup2: unable to register softclock intr"); 286 } 287 288 /* 289 * callout_init: 290 * 291 * Initialize a callout structure. 292 */ 293 void 294 callout_init(callout_t *cs, u_int flags) 295 { 296 callout_impl_t *c = (callout_impl_t *)cs; 297 298 KASSERT((flags & ~CALLOUT_FLAGMASK) == 0); 299 300 memset(c, 0, sizeof(*c)); 301 c->c_flags = flags; 302 c->c_magic = CALLOUT_MAGIC; 303 } 304 305 /* 306 * callout_destroy: 307 * 308 * Destroy a callout structure. The callout must be stopped. 309 */ 310 void 311 callout_destroy(callout_t *cs) 312 { 313 callout_impl_t *c = (callout_impl_t *)cs; 314 315 /* 316 * It's not necessary to lock in order to see the correct value 317 * of c->c_flags. If the callout could potentially have been 318 * running, the current thread should have stopped it. 319 */ 320 KASSERT((c->c_flags & CALLOUT_PENDING) == 0); 321 if (c->c_oncpu != NULL) { 322 KASSERT( 323 ((struct cpu_info *)c->c_oncpu)->ci_data.cpu_callout != c); 324 } 325 KASSERT(c->c_magic == CALLOUT_MAGIC); 326 327 c->c_magic = 0; 328 } 329 330 331 /* 332 * callout_reset: 333 * 334 * Reset a callout structure with a new function and argument, and 335 * schedule it to run. 336 */ 337 void 338 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg) 339 { 340 callout_impl_t *c = (callout_impl_t *)cs; 341 int old_time; 342 343 KASSERT(to_ticks >= 0); 344 KASSERT(c->c_magic == CALLOUT_MAGIC); 345 KASSERT(func != NULL); 346 347 mutex_spin_enter(&callout_lock); 348 349 /* Initialize the time here, it won't change. */ 350 old_time = c->c_time; 351 c->c_time = to_ticks + hardclock_ticks; 352 c->c_flags &= ~CALLOUT_FIRED; 353 354 c->c_func = func; 355 c->c_arg = arg; 356 357 /* 358 * If this timeout is already scheduled and now is moved 359 * earlier, reschedule it now. Otherwise leave it in place 360 * and let it be rescheduled later. 361 */ 362 if ((c->c_flags & CALLOUT_PENDING) != 0) { 363 if (c->c_time - old_time < 0) { 364 CIRCQ_REMOVE(&c->c_list); 365 CIRCQ_INSERT(&c->c_list, &timeout_todo); 366 } 367 } else { 368 c->c_flags |= CALLOUT_PENDING; 369 CIRCQ_INSERT(&c->c_list, &timeout_todo); 370 } 371 372 mutex_spin_exit(&callout_lock); 373 } 374 375 /* 376 * callout_schedule: 377 * 378 * Schedule a callout to run. The function and argument must 379 * already be set in the callout structure. 380 */ 381 void 382 callout_schedule(callout_t *cs, int to_ticks) 383 { 384 callout_impl_t *c = (callout_impl_t *)cs; 385 int old_time; 386 387 KASSERT(to_ticks >= 0); 388 KASSERT(c->c_magic == CALLOUT_MAGIC); 389 KASSERT(c->c_func != NULL); 390 391 mutex_spin_enter(&callout_lock); 392 393 /* Initialize the time here, it won't change. */ 394 old_time = c->c_time; 395 c->c_time = to_ticks + hardclock_ticks; 396 c->c_flags &= ~CALLOUT_FIRED; 397 398 /* 399 * If this timeout is already scheduled and now is moved 400 * earlier, reschedule it now. Otherwise leave it in place 401 * and let it be rescheduled later. 402 */ 403 if ((c->c_flags & CALLOUT_PENDING) != 0) { 404 if (c->c_time - old_time < 0) { 405 CIRCQ_REMOVE(&c->c_list); 406 CIRCQ_INSERT(&c->c_list, &timeout_todo); 407 } 408 } else { 409 c->c_flags |= CALLOUT_PENDING; 410 CIRCQ_INSERT(&c->c_list, &timeout_todo); 411 } 412 413 mutex_spin_exit(&callout_lock); 414 } 415 416 /* 417 * callout_stop: 418 * 419 * Cancel a pending callout. 420 */ 421 bool 422 callout_stop(callout_t *cs) 423 { 424 callout_impl_t *c = (callout_impl_t *)cs; 425 bool expired; 426 427 KASSERT(c->c_magic == CALLOUT_MAGIC); 428 429 mutex_spin_enter(&callout_lock); 430 431 if (callout_running(c)) 432 callout_barrier(c); 433 434 if ((c->c_flags & CALLOUT_PENDING) != 0) 435 CIRCQ_REMOVE(&c->c_list); 436 437 expired = ((c->c_flags & CALLOUT_FIRED) != 0); 438 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); 439 440 mutex_spin_exit(&callout_lock); 441 442 return expired; 443 } 444 445 void 446 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg) 447 { 448 callout_impl_t *c = (callout_impl_t *)cs; 449 450 KASSERT(c->c_magic == CALLOUT_MAGIC); 451 452 mutex_spin_enter(&callout_lock); 453 c->c_func = func; 454 c->c_arg = arg; 455 mutex_spin_exit(&callout_lock); 456 } 457 458 bool 459 callout_expired(callout_t *cs) 460 { 461 callout_impl_t *c = (callout_impl_t *)cs; 462 bool rv; 463 464 KASSERT(c->c_magic == CALLOUT_MAGIC); 465 466 mutex_spin_enter(&callout_lock); 467 rv = ((c->c_flags & CALLOUT_FIRED) != 0); 468 mutex_spin_exit(&callout_lock); 469 470 return rv; 471 } 472 473 bool 474 callout_active(callout_t *cs) 475 { 476 callout_impl_t *c = (callout_impl_t *)cs; 477 bool rv; 478 479 KASSERT(c->c_magic == CALLOUT_MAGIC); 480 481 mutex_spin_enter(&callout_lock); 482 rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0); 483 mutex_spin_exit(&callout_lock); 484 485 return rv; 486 } 487 488 bool 489 callout_pending(callout_t *cs) 490 { 491 callout_impl_t *c = (callout_impl_t *)cs; 492 bool rv; 493 494 KASSERT(c->c_magic == CALLOUT_MAGIC); 495 496 mutex_spin_enter(&callout_lock); 497 rv = ((c->c_flags & CALLOUT_PENDING) != 0); 498 mutex_spin_exit(&callout_lock); 499 500 return rv; 501 } 502 503 bool 504 callout_invoking(callout_t *cs) 505 { 506 callout_impl_t *c = (callout_impl_t *)cs; 507 bool rv; 508 509 KASSERT(c->c_magic == CALLOUT_MAGIC); 510 511 mutex_spin_enter(&callout_lock); 512 rv = ((c->c_flags & CALLOUT_INVOKING) != 0); 513 mutex_spin_exit(&callout_lock); 514 515 return rv; 516 } 517 518 void 519 callout_ack(callout_t *cs) 520 { 521 callout_impl_t *c = (callout_impl_t *)cs; 522 523 KASSERT(c->c_magic == CALLOUT_MAGIC); 524 525 mutex_spin_enter(&callout_lock); 526 c->c_flags &= ~CALLOUT_INVOKING; 527 mutex_spin_exit(&callout_lock); 528 } 529 530 /* 531 * This is called from hardclock() once every tick. 532 * We schedule callout_softclock() if there is work 533 * to be done. 534 */ 535 void 536 callout_hardclock(void) 537 { 538 int needsoftclock; 539 540 mutex_spin_enter(&callout_lock); 541 542 MOVEBUCKET(0, hardclock_ticks); 543 if (MASKWHEEL(0, hardclock_ticks) == 0) { 544 MOVEBUCKET(1, hardclock_ticks); 545 if (MASKWHEEL(1, hardclock_ticks) == 0) { 546 MOVEBUCKET(2, hardclock_ticks); 547 if (MASKWHEEL(2, hardclock_ticks) == 0) 548 MOVEBUCKET(3, hardclock_ticks); 549 } 550 } 551 552 needsoftclock = !CIRCQ_EMPTY(&timeout_todo); 553 mutex_spin_exit(&callout_lock); 554 555 if (needsoftclock) 556 softintr_schedule(callout_si); 557 } 558 559 /* ARGSUSED */ 560 static void 561 callout_softclock(void *v) 562 { 563 callout_impl_t *c; 564 struct cpu_info *ci; 565 void (*func)(void *); 566 void *arg; 567 u_int mpsafe, count; 568 lwp_t *l; 569 570 l = curlwp; 571 ci = l->l_cpu; 572 573 mutex_spin_enter(&callout_lock); 574 575 while (!CIRCQ_EMPTY(&timeout_todo)) { 576 c = CIRCQ_FIRST(&timeout_todo); 577 KASSERT(c->c_magic == CALLOUT_MAGIC); 578 KASSERT(c->c_func != NULL); 579 KASSERT((c->c_flags & CALLOUT_PENDING) != 0); 580 KASSERT((c->c_flags & CALLOUT_FIRED) == 0); 581 CIRCQ_REMOVE(&c->c_list); 582 583 /* If due run it, otherwise insert it into the right bucket. */ 584 if (c->c_time - hardclock_ticks > 0) { 585 CIRCQ_INSERT(&c->c_list, 586 BUCKET((c->c_time - hardclock_ticks), c->c_time)); 587 } else { 588 if (c->c_time - hardclock_ticks < 0) 589 callout_ev_late.ev_count++; 590 591 c->c_flags ^= (CALLOUT_PENDING | CALLOUT_FIRED); 592 mpsafe = (c->c_flags & CALLOUT_MPSAFE); 593 func = c->c_func; 594 arg = c->c_arg; 595 c->c_oncpu = ci; 596 c->c_onlwp = l; 597 598 mutex_spin_exit(&callout_lock); 599 if (!mpsafe) { 600 KERNEL_LOCK(1, curlwp); 601 if (ci->ci_data.cpu_callout_cancel != c) 602 (*func)(arg); 603 KERNEL_UNLOCK_ONE(curlwp); 604 } else 605 (*func)(arg); 606 mutex_spin_enter(&callout_lock); 607 608 /* 609 * We can't touch 'c' here because it might be 610 * freed already. If LWPs waiting for callout 611 * to complete, awaken them. 612 */ 613 ci->ci_data.cpu_callout_cancel = NULL; 614 ci->ci_data.cpu_callout = NULL; 615 if ((count = ci->ci_data.cpu_callout_nwait) != 0) { 616 ci->ci_data.cpu_callout_nwait = 0; 617 /* sleepq_wake() drops the lock. */ 618 sleepq_wake(&callout_sleepq, ci, count); 619 mutex_spin_enter(&callout_lock); 620 } 621 } 622 } 623 624 mutex_spin_exit(&callout_lock); 625 } 626 627 #ifdef DDB 628 static void 629 db_show_callout_bucket(struct callout_circq *bucket) 630 { 631 callout_impl_t *c; 632 db_expr_t offset; 633 const char *name; 634 static char question[] = "?"; 635 636 if (CIRCQ_EMPTY(bucket)) 637 return; 638 639 for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) { 640 db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name, 641 &offset); 642 name = name ? name : question; 643 #ifdef _LP64 644 #define POINTER_WIDTH "%16lx" 645 #else 646 #define POINTER_WIDTH "%8lx" 647 #endif 648 db_printf("%9d %2d/%-4d " POINTER_WIDTH " %s\n", 649 c->c_time - hardclock_ticks, 650 (int)((bucket - timeout_wheel) / WHEELSIZE), 651 (int)(bucket - timeout_wheel), (u_long) c->c_arg, name); 652 653 if (CIRCQ_LAST(&c->c_list, bucket)) 654 break; 655 } 656 } 657 658 void 659 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif) 660 { 661 int b; 662 663 db_printf("hardclock_ticks now: %d\n", hardclock_ticks); 664 #ifdef _LP64 665 db_printf(" ticks wheel arg func\n"); 666 #else 667 db_printf(" ticks wheel arg func\n"); 668 #endif 669 670 /* 671 * Don't lock the callwheel; all the other CPUs are paused 672 * anyhow, and we might be called in a circumstance where 673 * some other CPU was paused while holding the lock. 674 */ 675 676 db_show_callout_bucket(&timeout_todo); 677 for (b = 0; b < BUCKETS; b++) 678 db_show_callout_bucket(&timeout_wheel[b]); 679 } 680 #endif /* DDB */ 681