xref: /openbsd-src/sys/kern/kern_timeout.c (revision 09467b48e8bc8b4905716062da846024139afbf2)
1 /*	$OpenBSD: kern_timeout.c,v 1.77 2020/08/01 08:40:20 anton Exp $	*/
2 /*
3  * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
4  * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
17  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
19  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kthread.h>
31 #include <sys/proc.h>
32 #include <sys/timeout.h>
33 #include <sys/mutex.h>
34 #include <sys/kernel.h>
35 #include <sys/queue.h>			/* _Q_INVALIDATE */
36 #include <sys/sysctl.h>
37 #include <sys/witness.h>
38 
39 #ifdef DDB
40 #include <machine/db_machdep.h>
41 #include <ddb/db_interface.h>
42 #include <ddb/db_sym.h>
43 #include <ddb/db_output.h>
44 #endif
45 
46 #include "kcov.h"
47 #if NKCOV > 0
48 #include <sys/kcov.h>
49 #endif
50 
51 /*
52  * Locks used to protect global variables in this file:
53  *
54  *	I	immutable after initialization
55  *	T	timeout_mutex
56  */
57 struct mutex timeout_mutex = MUTEX_INITIALIZER(IPL_HIGH);
58 
59 void *softclock_si;			/* [I] softclock() interrupt handle */
60 struct timeoutstat tostat;		/* [T] statistics and totals */
61 
62 /*
63  * Timeouts are kept in a hierarchical timing wheel. The to_time is the value
64  * of the global variable "ticks" when the timeout should be called. There are
65  * four levels with 256 buckets each.
66  */
67 #define BUCKETS 1024
68 #define WHEELSIZE 256
69 #define WHEELMASK 255
70 #define WHEELBITS 8
71 
72 struct circq timeout_wheel[BUCKETS];	/* [T] Queues of timeouts */
73 struct circq timeout_new;		/* [T] New, unscheduled timeouts */
74 struct circq timeout_todo;		/* [T] Due or needs rescheduling */
75 struct circq timeout_proc;		/* [T] Due + needs process context */
76 
77 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
78 
79 #define BUCKET(rel, abs)						\
80     (timeout_wheel[							\
81 	((rel) <= (1 << (2*WHEELBITS)))					\
82 	    ? ((rel) <= (1 << WHEELBITS))				\
83 		? MASKWHEEL(0, (abs))					\
84 		: MASKWHEEL(1, (abs)) + WHEELSIZE			\
85 	    : ((rel) <= (1 << (3*WHEELBITS)))				\
86 		? MASKWHEEL(2, (abs)) + 2*WHEELSIZE			\
87 		: MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
88 
89 #define MOVEBUCKET(wheel, time)						\
90     CIRCQ_CONCAT(&timeout_todo,						\
91         &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
92 
93 /*
94  * Circular queue definitions.
95  */
96 
97 #define CIRCQ_INIT(elem) do {			\
98 	(elem)->next = (elem);			\
99 	(elem)->prev = (elem);			\
100 } while (0)
101 
102 #define CIRCQ_INSERT_TAIL(list, elem) do {	\
103 	(elem)->prev = (list)->prev;		\
104 	(elem)->next = (list);			\
105 	(list)->prev->next = (elem);		\
106 	(list)->prev = (elem);			\
107 	tostat.tos_pending++;			\
108 } while (0)
109 
110 #define CIRCQ_CONCAT(fst, snd) do {		\
111 	if (!CIRCQ_EMPTY(snd)) {		\
112 		(fst)->prev->next = (snd)->next;\
113 		(snd)->next->prev = (fst)->prev;\
114 		(snd)->prev->next = (fst);      \
115 		(fst)->prev = (snd)->prev;      \
116 		CIRCQ_INIT(snd);		\
117 	}					\
118 } while (0)
119 
120 #define CIRCQ_REMOVE(elem) do {			\
121 	(elem)->next->prev = (elem)->prev;      \
122 	(elem)->prev->next = (elem)->next;      \
123 	_Q_INVALIDATE((elem)->prev);		\
124 	_Q_INVALIDATE((elem)->next);		\
125 	tostat.tos_pending--;			\
126 } while (0)
127 
128 #define CIRCQ_FIRST(elem) ((elem)->next)
129 
130 #define CIRCQ_EMPTY(elem) (CIRCQ_FIRST(elem) == (elem))
131 
132 #define CIRCQ_FOREACH(elem, list)		\
133 	for ((elem) = CIRCQ_FIRST(list);	\
134 	    (elem) != (list);			\
135 	    (elem) = CIRCQ_FIRST(elem))
136 
137 #ifdef WITNESS
138 struct lock_object timeout_sleeplock_obj = {
139 	.lo_name = "timeout",
140 	.lo_flags = LO_WITNESS | LO_INITIALIZED | LO_SLEEPABLE |
141 	    (LO_CLASS_RWLOCK << LO_CLASSSHIFT)
142 };
143 struct lock_object timeout_spinlock_obj = {
144 	.lo_name = "timeout",
145 	.lo_flags = LO_WITNESS | LO_INITIALIZED |
146 	    (LO_CLASS_MUTEX << LO_CLASSSHIFT)
147 };
148 struct lock_type timeout_sleeplock_type = {
149 	.lt_name = "timeout"
150 };
151 struct lock_type timeout_spinlock_type = {
152 	.lt_name = "timeout"
153 };
154 #define TIMEOUT_LOCK_OBJ(needsproc) \
155 	((needsproc) ? &timeout_sleeplock_obj : &timeout_spinlock_obj)
156 #endif
157 
158 void softclock(void *);
159 void softclock_create_thread(void *);
160 void softclock_thread(void *);
161 void timeout_proc_barrier(void *);
162 
163 /*
164  * The first thing in a struct timeout is its struct circq, so we
165  * can get back from a pointer to the latter to a pointer to the
166  * whole timeout with just a cast.
167  */
168 static inline struct timeout *
169 timeout_from_circq(struct circq *p)
170 {
171 	return ((struct timeout *)(p));
172 }
173 
174 static inline void
175 timeout_sync_order(int needsproc)
176 {
177 	WITNESS_CHECKORDER(TIMEOUT_LOCK_OBJ(needsproc), LOP_NEWORDER, NULL);
178 }
179 
180 static inline void
181 timeout_sync_enter(int needsproc)
182 {
183 	timeout_sync_order(needsproc);
184 	WITNESS_LOCK(TIMEOUT_LOCK_OBJ(needsproc), 0);
185 }
186 
187 static inline void
188 timeout_sync_leave(int needsproc)
189 {
190 	WITNESS_UNLOCK(TIMEOUT_LOCK_OBJ(needsproc), 0);
191 }
192 
193 /*
194  * Some of the "math" in here is a bit tricky.
195  *
196  * We have to beware of wrapping ints.
197  * We use the fact that any element added to the queue must be added with a
198  * positive time. That means that any element `to' on the queue cannot be
199  * scheduled to timeout further in time than INT_MAX, but to->to_time can
200  * be positive or negative so comparing it with anything is dangerous.
201  * The only way we can use the to->to_time value in any predictable way
202  * is when we calculate how far in the future `to' will timeout -
203  * "to->to_time - ticks". The result will always be positive for future
204  * timeouts and 0 or negative for due timeouts.
205  */
206 
207 void
208 timeout_startup(void)
209 {
210 	int b;
211 
212 	CIRCQ_INIT(&timeout_new);
213 	CIRCQ_INIT(&timeout_todo);
214 	CIRCQ_INIT(&timeout_proc);
215 	for (b = 0; b < nitems(timeout_wheel); b++)
216 		CIRCQ_INIT(&timeout_wheel[b]);
217 }
218 
219 void
220 timeout_proc_init(void)
221 {
222 	softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL);
223 	if (softclock_si == NULL)
224 		panic("%s: unable to register softclock interrupt", __func__);
225 
226 	WITNESS_INIT(&timeout_sleeplock_obj, &timeout_sleeplock_type);
227 	WITNESS_INIT(&timeout_spinlock_obj, &timeout_spinlock_type);
228 
229 	kthread_create_deferred(softclock_create_thread, NULL);
230 }
231 
232 void
233 timeout_set(struct timeout *new, void (*fn)(void *), void *arg)
234 {
235 	timeout_set_flags(new, fn, arg, 0);
236 }
237 
238 void
239 timeout_set_flags(struct timeout *to, void (*fn)(void *), void *arg, int flags)
240 {
241 	to->to_func = fn;
242 	to->to_arg = arg;
243 	to->to_flags = flags | TIMEOUT_INITIALIZED;
244 }
245 
246 void
247 timeout_set_proc(struct timeout *new, void (*fn)(void *), void *arg)
248 {
249 	timeout_set_flags(new, fn, arg, TIMEOUT_PROC);
250 }
251 
252 int
253 timeout_add(struct timeout *new, int to_ticks)
254 {
255 	int old_time;
256 	int ret = 1;
257 
258 	KASSERT(ISSET(new->to_flags, TIMEOUT_INITIALIZED));
259 	KASSERT(to_ticks >= 0);
260 
261 	mtx_enter(&timeout_mutex);
262 
263 	/* Initialize the time here, it won't change. */
264 	old_time = new->to_time;
265 	new->to_time = to_ticks + ticks;
266 	CLR(new->to_flags, TIMEOUT_TRIGGERED);
267 
268 	/*
269 	 * If this timeout already is scheduled and now is moved
270 	 * earlier, reschedule it now. Otherwise leave it in place
271 	 * and let it be rescheduled later.
272 	 */
273 	if (ISSET(new->to_flags, TIMEOUT_ONQUEUE)) {
274 		if (new->to_time - ticks < old_time - ticks) {
275 			CIRCQ_REMOVE(&new->to_list);
276 			CIRCQ_INSERT_TAIL(&timeout_new, &new->to_list);
277 		}
278 		tostat.tos_readded++;
279 		ret = 0;
280 	} else {
281 		SET(new->to_flags, TIMEOUT_ONQUEUE);
282 		CIRCQ_INSERT_TAIL(&timeout_new, &new->to_list);
283 	}
284 #if NKCOV > 0
285 	new->to_process = curproc->p_p;
286 #endif
287 	tostat.tos_added++;
288 	mtx_leave(&timeout_mutex);
289 
290 	return ret;
291 }
292 
293 int
294 timeout_add_tv(struct timeout *to, const struct timeval *tv)
295 {
296 	uint64_t to_ticks;
297 
298 	to_ticks = (uint64_t)hz * tv->tv_sec + tv->tv_usec / tick;
299 	if (to_ticks > INT_MAX)
300 		to_ticks = INT_MAX;
301 	if (to_ticks == 0 && tv->tv_usec > 0)
302 		to_ticks = 1;
303 
304 	return timeout_add(to, (int)to_ticks);
305 }
306 
307 int
308 timeout_add_ts(struct timeout *to, const struct timespec *ts)
309 {
310 	uint64_t to_ticks;
311 
312 	to_ticks = (uint64_t)hz * ts->tv_sec + ts->tv_nsec / (tick * 1000);
313 	if (to_ticks > INT_MAX)
314 		to_ticks = INT_MAX;
315 	if (to_ticks == 0 && ts->tv_nsec > 0)
316 		to_ticks = 1;
317 
318 	return timeout_add(to, (int)to_ticks);
319 }
320 
321 int
322 timeout_add_bt(struct timeout *to, const struct bintime *bt)
323 {
324 	uint64_t to_ticks;
325 
326 	to_ticks = (uint64_t)hz * bt->sec + (long)(((uint64_t)1000000 *
327 	    (uint32_t)(bt->frac >> 32)) >> 32) / tick;
328 	if (to_ticks > INT_MAX)
329 		to_ticks = INT_MAX;
330 	if (to_ticks == 0 && bt->frac > 0)
331 		to_ticks = 1;
332 
333 	return timeout_add(to, (int)to_ticks);
334 }
335 
336 int
337 timeout_add_sec(struct timeout *to, int secs)
338 {
339 	uint64_t to_ticks;
340 
341 	to_ticks = (uint64_t)hz * secs;
342 	if (to_ticks > INT_MAX)
343 		to_ticks = INT_MAX;
344 	if (to_ticks == 0)
345 		to_ticks = 1;
346 
347 	return timeout_add(to, (int)to_ticks);
348 }
349 
350 int
351 timeout_add_msec(struct timeout *to, int msecs)
352 {
353 	uint64_t to_ticks;
354 
355 	to_ticks = (uint64_t)msecs * 1000 / tick;
356 	if (to_ticks > INT_MAX)
357 		to_ticks = INT_MAX;
358 	if (to_ticks == 0 && msecs > 0)
359 		to_ticks = 1;
360 
361 	return timeout_add(to, (int)to_ticks);
362 }
363 
364 int
365 timeout_add_usec(struct timeout *to, int usecs)
366 {
367 	int to_ticks = usecs / tick;
368 
369 	if (to_ticks == 0 && usecs > 0)
370 		to_ticks = 1;
371 
372 	return timeout_add(to, to_ticks);
373 }
374 
375 int
376 timeout_add_nsec(struct timeout *to, int nsecs)
377 {
378 	int to_ticks = nsecs / (tick * 1000);
379 
380 	if (to_ticks == 0 && nsecs > 0)
381 		to_ticks = 1;
382 
383 	return timeout_add(to, to_ticks);
384 }
385 
386 int
387 timeout_del(struct timeout *to)
388 {
389 	int ret = 0;
390 
391 	mtx_enter(&timeout_mutex);
392 	if (ISSET(to->to_flags, TIMEOUT_ONQUEUE)) {
393 		CIRCQ_REMOVE(&to->to_list);
394 		CLR(to->to_flags, TIMEOUT_ONQUEUE);
395 		tostat.tos_cancelled++;
396 		ret = 1;
397 	}
398 	CLR(to->to_flags, TIMEOUT_TRIGGERED);
399 	tostat.tos_deleted++;
400 	mtx_leave(&timeout_mutex);
401 
402 	return ret;
403 }
404 
405 int
406 timeout_del_barrier(struct timeout *to)
407 {
408 	int removed;
409 
410 	timeout_sync_order(ISSET(to->to_flags, TIMEOUT_PROC));
411 
412 	removed = timeout_del(to);
413 	if (!removed)
414 		timeout_barrier(to);
415 
416 	return removed;
417 }
418 
419 void
420 timeout_barrier(struct timeout *to)
421 {
422 	int needsproc = ISSET(to->to_flags, TIMEOUT_PROC);
423 
424 	timeout_sync_order(needsproc);
425 
426 	if (!needsproc) {
427 		KERNEL_LOCK();
428 		splx(splsoftclock());
429 		KERNEL_UNLOCK();
430 	} else {
431 		struct cond c = COND_INITIALIZER();
432 		struct timeout barrier;
433 
434 		timeout_set_proc(&barrier, timeout_proc_barrier, &c);
435 
436 		mtx_enter(&timeout_mutex);
437 		SET(barrier.to_flags, TIMEOUT_ONQUEUE);
438 		CIRCQ_INSERT_TAIL(&timeout_proc, &barrier.to_list);
439 		mtx_leave(&timeout_mutex);
440 
441 		wakeup_one(&timeout_proc);
442 
443 		cond_wait(&c, "tmobar");
444 	}
445 }
446 
447 void
448 timeout_proc_barrier(void *arg)
449 {
450 	struct cond *c = arg;
451 
452 	cond_signal(c);
453 }
454 
455 /*
456  * This is called from hardclock() on the primary CPU at the start of
457  * every tick.
458  */
459 void
460 timeout_hardclock_update(void)
461 {
462 	int need_softclock = 1;
463 
464 	mtx_enter(&timeout_mutex);
465 
466 	MOVEBUCKET(0, ticks);
467 	if (MASKWHEEL(0, ticks) == 0) {
468 		MOVEBUCKET(1, ticks);
469 		if (MASKWHEEL(1, ticks) == 0) {
470 			MOVEBUCKET(2, ticks);
471 			if (MASKWHEEL(2, ticks) == 0)
472 				MOVEBUCKET(3, ticks);
473 		}
474 	}
475 
476 	if (CIRCQ_EMPTY(&timeout_new) && CIRCQ_EMPTY(&timeout_todo))
477 		need_softclock = 0;
478 
479 	mtx_leave(&timeout_mutex);
480 
481 	if (need_softclock)
482 		softintr_schedule(softclock_si);
483 }
484 
485 void
486 timeout_run(struct timeout *to)
487 {
488 	void (*fn)(void *);
489 	void *arg;
490 	int needsproc;
491 
492 	MUTEX_ASSERT_LOCKED(&timeout_mutex);
493 
494 	CLR(to->to_flags, TIMEOUT_ONQUEUE);
495 	SET(to->to_flags, TIMEOUT_TRIGGERED);
496 
497 	fn = to->to_func;
498 	arg = to->to_arg;
499 	needsproc = ISSET(to->to_flags, TIMEOUT_PROC);
500 
501 	mtx_leave(&timeout_mutex);
502 	timeout_sync_enter(needsproc);
503 #if NKCOV > 0
504 	kcov_remote_enter(KCOV_REMOTE_COMMON, to->to_process);
505 #endif
506 	fn(arg);
507 #if NKCOV > 0
508 	kcov_remote_leave(KCOV_REMOTE_COMMON, to->to_process);
509 #endif
510 	timeout_sync_leave(needsproc);
511 	mtx_enter(&timeout_mutex);
512 }
513 
514 /*
515  * Timeouts are processed here instead of timeout_hardclock_update()
516  * to avoid doing any more work at IPL_CLOCK than absolutely necessary.
517  * Down here at IPL_SOFTCLOCK other interrupts can be serviced promptly
518  * so the system remains responsive even if there is a surge of timeouts.
519  */
520 void
521 softclock(void *arg)
522 {
523 	struct circq *bucket;
524 	struct timeout *first_new, *to;
525 	int delta, needsproc, new;
526 
527 	first_new = NULL;
528 	new = 0;
529 
530 	mtx_enter(&timeout_mutex);
531 	if (!CIRCQ_EMPTY(&timeout_new))
532 		first_new = timeout_from_circq(CIRCQ_FIRST(&timeout_new));
533 	CIRCQ_CONCAT(&timeout_todo, &timeout_new);
534 	while (!CIRCQ_EMPTY(&timeout_todo)) {
535 		to = timeout_from_circq(CIRCQ_FIRST(&timeout_todo));
536 		CIRCQ_REMOVE(&to->to_list);
537 		if (to == first_new)
538 			new = 1;
539 
540 		/*
541 		 * If due run it or defer execution to the thread,
542 		 * otherwise insert it into the right bucket.
543 		 */
544 		delta = to->to_time - ticks;
545 		if (delta > 0) {
546 			bucket = &BUCKET(delta, to->to_time);
547 			CIRCQ_INSERT_TAIL(bucket, &to->to_list);
548 			tostat.tos_scheduled++;
549 			if (!new)
550 				tostat.tos_rescheduled++;
551 			continue;
552 		}
553 		if (!new && delta < 0)
554 			tostat.tos_late++;
555 		if (ISSET(to->to_flags, TIMEOUT_PROC)) {
556 			CIRCQ_INSERT_TAIL(&timeout_proc, &to->to_list);
557 			continue;
558 		}
559 		timeout_run(to);
560 		tostat.tos_run_softclock++;
561 	}
562 	tostat.tos_softclocks++;
563 	needsproc = !CIRCQ_EMPTY(&timeout_proc);
564 	mtx_leave(&timeout_mutex);
565 
566 	if (needsproc)
567 		wakeup(&timeout_proc);
568 }
569 
570 void
571 softclock_create_thread(void *arg)
572 {
573 	if (kthread_create(softclock_thread, NULL, NULL, "softclock"))
574 		panic("fork softclock");
575 }
576 
577 void
578 softclock_thread(void *arg)
579 {
580 	CPU_INFO_ITERATOR cii;
581 	struct cpu_info *ci;
582 	struct sleep_state sls;
583 	struct timeout *to;
584 	int s;
585 
586 	KERNEL_ASSERT_LOCKED();
587 
588 	/* Be conservative for the moment */
589 	CPU_INFO_FOREACH(cii, ci) {
590 		if (CPU_IS_PRIMARY(ci))
591 			break;
592 	}
593 	KASSERT(ci != NULL);
594 	sched_peg_curproc(ci);
595 
596 	s = splsoftclock();
597 	for (;;) {
598 		sleep_setup(&sls, &timeout_proc, PSWP, "bored");
599 		sleep_finish(&sls, CIRCQ_EMPTY(&timeout_proc));
600 
601 		mtx_enter(&timeout_mutex);
602 		while (!CIRCQ_EMPTY(&timeout_proc)) {
603 			to = timeout_from_circq(CIRCQ_FIRST(&timeout_proc));
604 			CIRCQ_REMOVE(&to->to_list);
605 			timeout_run(to);
606 			tostat.tos_run_thread++;
607 		}
608 		tostat.tos_thread_wakeups++;
609 		mtx_leave(&timeout_mutex);
610 	}
611 	splx(s);
612 }
613 
614 #ifndef SMALL_KERNEL
615 void
616 timeout_adjust_ticks(int adj)
617 {
618 	struct timeout *to;
619 	struct circq *p;
620 	int new_ticks, b;
621 
622 	/* adjusting the monotonic clock backwards would be a Bad Thing */
623 	if (adj <= 0)
624 		return;
625 
626 	mtx_enter(&timeout_mutex);
627 	new_ticks = ticks + adj;
628 	for (b = 0; b < nitems(timeout_wheel); b++) {
629 		p = CIRCQ_FIRST(&timeout_wheel[b]);
630 		while (p != &timeout_wheel[b]) {
631 			to = timeout_from_circq(p);
632 			p = CIRCQ_FIRST(p);
633 
634 			/* when moving a timeout forward need to reinsert it */
635 			if (to->to_time - ticks < adj)
636 				to->to_time = new_ticks;
637 			CIRCQ_REMOVE(&to->to_list);
638 			CIRCQ_INSERT_TAIL(&timeout_todo, &to->to_list);
639 		}
640 	}
641 	ticks = new_ticks;
642 	mtx_leave(&timeout_mutex);
643 }
644 #endif
645 
646 int
647 timeout_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
648 {
649 	struct timeoutstat status;
650 
651 	mtx_enter(&timeout_mutex);
652 	memcpy(&status, &tostat, sizeof(status));
653 	mtx_leave(&timeout_mutex);
654 
655 	return sysctl_rdstruct(oldp, oldlenp, newp, &status, sizeof(status));
656 }
657 
658 #ifdef DDB
659 void db_show_callout_bucket(struct circq *);
660 
661 void
662 db_show_callout_bucket(struct circq *bucket)
663 {
664 	char buf[8];
665 	struct timeout *to;
666 	struct circq *p;
667 	db_expr_t offset;
668 	char *name, *where;
669 	int width = sizeof(long) * 2;
670 
671 	CIRCQ_FOREACH(p, bucket) {
672 		to = timeout_from_circq(p);
673 		db_find_sym_and_offset((vaddr_t)to->to_func, &name, &offset);
674 		name = name ? name : "?";
675 		if (bucket == &timeout_todo)
676 			where = "softint";
677 		else if (bucket == &timeout_proc)
678 			where = "thread";
679 		else if (bucket == &timeout_new)
680 			where = "new";
681 		else {
682 			snprintf(buf, sizeof(buf), "%3ld/%1ld",
683 			    (bucket - timeout_wheel) % WHEELSIZE,
684 			    (bucket - timeout_wheel) / WHEELSIZE);
685 			where = buf;
686 		}
687 		db_printf("%9d  %7s  0x%0*lx  %s\n",
688 		    to->to_time - ticks, where, width, (ulong)to->to_arg, name);
689 	}
690 }
691 
692 void
693 db_show_callout(db_expr_t addr, int haddr, db_expr_t count, char *modif)
694 {
695 	int width = sizeof(long) * 2 + 2;
696 	int b;
697 
698 	db_printf("ticks now: %d\n", ticks);
699 	db_printf("%9s  %7s  %*s  func\n", "ticks", "wheel", width, "arg");
700 
701 	db_show_callout_bucket(&timeout_new);
702 	db_show_callout_bucket(&timeout_todo);
703 	db_show_callout_bucket(&timeout_proc);
704 	for (b = 0; b < nitems(timeout_wheel); b++)
705 		db_show_callout_bucket(&timeout_wheel[b]);
706 }
707 #endif
708