xref: /netbsd-src/sys/kern/kern_timeout.c (revision cac8e449158efc7261bebc8657cbb0125a2cfdde)
1 /*	$NetBSD: kern_timeout.c,v 1.41 2008/07/02 14:47:34 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
34  * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
50  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
51  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
52  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
53  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.41 2008/07/02 14:47:34 matt Exp $");
63 
64 /*
65  * Timeouts are kept in a hierarchical timing wheel.  The c_time is the
66  * value of c_cpu->cc_ticks when the timeout should be called.  There are
67  * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and
68  * Hierarchical Timing Wheels: Efficient Data Structures for Implementing
69  * a Timer Facility" by George Varghese and Tony Lauck.
70  *
71  * Some of the "math" in here is a bit tricky.  We have to beware of
72  * wrapping ints.
73  *
74  * We use the fact that any element added to the queue must be added with
75  * a positive time.  That means that any element `to' on the queue cannot
76  * be scheduled to timeout further in time than INT_MAX, but c->c_time can
77  * be positive or negative so comparing it with anything is dangerous.
78  * The only way we can use the c->c_time value in any predictable way is
79  * when we calculate how far in the future `to' will timeout - "c->c_time
80  * - c->c_cpu->cc_ticks".  The result will always be positive for future
81  * timeouts and 0 or negative for due timeouts.
82  */
83 
84 #define	_CALLOUT_PRIVATE
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/callout.h>
90 #include <sys/mutex.h>
91 #include <sys/proc.h>
92 #include <sys/sleepq.h>
93 #include <sys/syncobj.h>
94 #include <sys/evcnt.h>
95 #include <sys/intr.h>
96 #include <sys/cpu.h>
97 #include <sys/kmem.h>
98 
99 #ifdef DDB
100 #include <machine/db_machdep.h>
101 #include <ddb/db_interface.h>
102 #include <ddb/db_access.h>
103 #include <ddb/db_sym.h>
104 #include <ddb/db_output.h>
105 #endif
106 
107 #define BUCKETS		1024
108 #define WHEELSIZE	256
109 #define WHEELMASK	255
110 #define WHEELBITS	8
111 
112 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
113 
114 #define BUCKET(cc, rel, abs)						\
115     (((rel) <= (1 << (2*WHEELBITS)))					\
116     	? ((rel) <= (1 << WHEELBITS))					\
117             ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))]			\
118             : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE]		\
119         : ((rel) <= (1 << (3*WHEELBITS)))				\
120             ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE]	\
121             : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
122 
123 #define MOVEBUCKET(cc, wheel, time)					\
124     CIRCQ_APPEND(&(cc)->cc_todo,					\
125         &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
126 
127 /*
128  * Circular queue definitions.
129  */
130 
131 #define CIRCQ_INIT(list)						\
132 do {									\
133         (list)->cq_next_l = (list);					\
134         (list)->cq_prev_l = (list);					\
135 } while (/*CONSTCOND*/0)
136 
137 #define CIRCQ_INSERT(elem, list)					\
138 do {									\
139         (elem)->cq_prev_e = (list)->cq_prev_e;				\
140         (elem)->cq_next_l = (list);					\
141         (list)->cq_prev_l->cq_next_l = (elem);				\
142         (list)->cq_prev_l = (elem);					\
143 } while (/*CONSTCOND*/0)
144 
145 #define CIRCQ_APPEND(fst, snd)						\
146 do {									\
147         if (!CIRCQ_EMPTY(snd)) {					\
148                 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l;		\
149                 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l;		\
150                 (snd)->cq_prev_l->cq_next_l = (fst);			\
151                 (fst)->cq_prev_l = (snd)->cq_prev_l;			\
152                 CIRCQ_INIT(snd);					\
153         }								\
154 } while (/*CONSTCOND*/0)
155 
156 #define CIRCQ_REMOVE(elem)						\
157 do {									\
158         (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e;		\
159         (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e;		\
160 } while (/*CONSTCOND*/0)
161 
162 #define CIRCQ_FIRST(list)	((list)->cq_next_e)
163 #define CIRCQ_NEXT(elem)	((elem)->cq_next_e)
164 #define CIRCQ_LAST(elem,list)	((elem)->cq_next_l == (list))
165 #define CIRCQ_EMPTY(list)	((list)->cq_next_l == (list))
166 
167 static void	callout_softclock(void *);
168 
169 struct callout_cpu {
170 	kmutex_t	cc_lock;
171 	sleepq_t	cc_sleepq;
172 	u_int		cc_nwait;
173 	u_int		cc_ticks;
174 	lwp_t		*cc_lwp;
175 	callout_impl_t	*cc_active;
176 	callout_impl_t	*cc_cancel;
177 	struct evcnt	cc_ev_late;
178 	struct evcnt	cc_ev_block;
179 	struct callout_circq cc_todo;		/* Worklist */
180 	struct callout_circq cc_wheel[BUCKETS];	/* Queues of timeouts */
181 	char		cc_name1[12];
182 	char		cc_name2[12];
183 };
184 
185 static struct callout_cpu callout_cpu0;
186 static void *callout_sih;
187 
188 static inline kmutex_t *
189 callout_lock(callout_impl_t *c)
190 {
191 	kmutex_t *lock;
192 
193 	for (;;) {
194 		lock = &c->c_cpu->cc_lock;
195 		mutex_spin_enter(lock);
196 		if (__predict_true(lock == &c->c_cpu->cc_lock))
197 			return lock;
198 		mutex_spin_exit(lock);
199 	}
200 }
201 
202 /*
203  * callout_startup:
204  *
205  *	Initialize the callout facility, called at system startup time.
206  *	Do just enough to allow callouts to be safely registered.
207  */
208 void
209 callout_startup(void)
210 {
211 	struct callout_cpu *cc;
212 	int b;
213 
214 	KASSERT(curcpu()->ci_data.cpu_callout == NULL);
215 
216 	cc = &callout_cpu0;
217 	mutex_init(&cc->cc_lock, MUTEX_DEFAULT, IPL_SCHED);
218 	CIRCQ_INIT(&cc->cc_todo);
219 	for (b = 0; b < BUCKETS; b++)
220 		CIRCQ_INIT(&cc->cc_wheel[b]);
221 	curcpu()->ci_data.cpu_callout = cc;
222 }
223 
224 /*
225  * callout_init_cpu:
226  *
227  *	Per-CPU initialization.
228  */
229 void
230 callout_init_cpu(struct cpu_info *ci)
231 {
232 	struct callout_cpu *cc;
233 	int b;
234 
235 	CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t));
236 
237 	if ((cc = ci->ci_data.cpu_callout) == NULL) {
238 		cc = kmem_zalloc(sizeof(*cc), KM_SLEEP);
239 		if (cc == NULL)
240 			panic("callout_init_cpu (1)");
241 		mutex_init(&cc->cc_lock, MUTEX_DEFAULT, IPL_SCHED);
242 		CIRCQ_INIT(&cc->cc_todo);
243 		for (b = 0; b < BUCKETS; b++)
244 			CIRCQ_INIT(&cc->cc_wheel[b]);
245 	} else {
246 		/* Boot CPU, one time only. */
247 		callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
248 		    callout_softclock, NULL);
249 		if (callout_sih == NULL)
250 			panic("callout_init_cpu (2)");
251 	}
252 
253 	sleepq_init(&cc->cc_sleepq);
254 
255 	snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u",
256 	    cpu_index(ci));
257 	evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC,
258 	    NULL, "callout", cc->cc_name1);
259 
260 	snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u",
261 	    cpu_index(ci));
262 	evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC,
263 	    NULL, "callout", cc->cc_name2);
264 
265 	ci->ci_data.cpu_callout = cc;
266 }
267 
268 /*
269  * callout_init:
270  *
271  *	Initialize a callout structure.  This must be quick, so we fill
272  *	only the minimum number of fields.
273  */
274 void
275 callout_init(callout_t *cs, u_int flags)
276 {
277 	callout_impl_t *c = (callout_impl_t *)cs;
278 	struct callout_cpu *cc;
279 
280 	KASSERT((flags & ~CALLOUT_FLAGMASK) == 0);
281 
282 	cc = curcpu()->ci_data.cpu_callout;
283 	c->c_func = NULL;
284 	c->c_magic = CALLOUT_MAGIC;
285 	if (__predict_true((flags & CALLOUT_MPSAFE) != 0 && cc != NULL)) {
286 		c->c_flags = flags;
287 		c->c_cpu = cc;
288 		return;
289 	}
290 	c->c_flags = flags | CALLOUT_BOUND;
291 	c->c_cpu = &callout_cpu0;
292 }
293 
294 /*
295  * callout_destroy:
296  *
297  *	Destroy a callout structure.  The callout must be stopped.
298  */
299 void
300 callout_destroy(callout_t *cs)
301 {
302 	callout_impl_t *c = (callout_impl_t *)cs;
303 
304 	/*
305 	 * It's not necessary to lock in order to see the correct value
306 	 * of c->c_flags.  If the callout could potentially have been
307 	 * running, the current thread should have stopped it.
308 	 */
309 	KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
310 	KASSERT(c->c_cpu->cc_lwp == curlwp || c->c_cpu->cc_active != c);
311 	KASSERT(c->c_magic == CALLOUT_MAGIC);
312 	c->c_magic = 0;
313 }
314 
315 /*
316  * callout_schedule_locked:
317  *
318  *	Schedule a callout to run.  The function and argument must
319  *	already be set in the callout structure.  Must be called with
320  *	callout_lock.
321  */
322 static void
323 callout_schedule_locked(callout_impl_t *c, kmutex_t *lock, int to_ticks)
324 {
325 	struct callout_cpu *cc, *occ;
326 	int old_time;
327 
328 	KASSERT(to_ticks >= 0);
329 	KASSERT(c->c_func != NULL);
330 
331 	/* Initialize the time here, it won't change. */
332 	occ = c->c_cpu;
333 	c->c_flags &= ~CALLOUT_FIRED;
334 
335 	/*
336 	 * If this timeout is already scheduled and now is moved
337 	 * earlier, reschedule it now.  Otherwise leave it in place
338 	 * and let it be rescheduled later.
339 	 */
340 	if ((c->c_flags & CALLOUT_PENDING) != 0) {
341 		/* Leave on existing CPU. */
342 		old_time = c->c_time;
343 		c->c_time = to_ticks + occ->cc_ticks;
344 		if (c->c_time - old_time < 0) {
345 			CIRCQ_REMOVE(&c->c_list);
346 			CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
347 		}
348 		mutex_spin_exit(lock);
349 		return;
350 	}
351 
352 	cc = curcpu()->ci_data.cpu_callout;
353 	if ((c->c_flags & CALLOUT_BOUND) != 0 || cc == occ ||
354 	    !mutex_tryenter(&cc->cc_lock)) {
355 		/* Leave on existing CPU. */
356 		c->c_time = to_ticks + occ->cc_ticks;
357 		c->c_flags |= CALLOUT_PENDING;
358 		CIRCQ_INSERT(&c->c_list, &occ->cc_todo);
359 	} else {
360 		/* Move to this CPU. */
361 		c->c_cpu = cc;
362 		c->c_time = to_ticks + cc->cc_ticks;
363 		c->c_flags |= CALLOUT_PENDING;
364 		CIRCQ_INSERT(&c->c_list, &cc->cc_todo);
365 		mutex_spin_exit(&cc->cc_lock);
366 	}
367 	mutex_spin_exit(lock);
368 }
369 
370 /*
371  * callout_reset:
372  *
373  *	Reset a callout structure with a new function and argument, and
374  *	schedule it to run.
375  */
376 void
377 callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg)
378 {
379 	callout_impl_t *c = (callout_impl_t *)cs;
380 	kmutex_t *lock;
381 
382 	KASSERT(c->c_magic == CALLOUT_MAGIC);
383 
384 	lock = callout_lock(c);
385 	c->c_func = func;
386 	c->c_arg = arg;
387 	callout_schedule_locked(c, lock, to_ticks);
388 }
389 
390 /*
391  * callout_schedule:
392  *
393  *	Schedule a callout to run.  The function and argument must
394  *	already be set in the callout structure.
395  */
396 void
397 callout_schedule(callout_t *cs, int to_ticks)
398 {
399 	callout_impl_t *c = (callout_impl_t *)cs;
400 	kmutex_t *lock;
401 
402 	KASSERT(c->c_magic == CALLOUT_MAGIC);
403 
404 	lock = callout_lock(c);
405 	callout_schedule_locked(c, lock, to_ticks);
406 }
407 
408 /*
409  * callout_stop:
410  *
411  *	Try to cancel a pending callout.  It may be too late: the callout
412  *	could be running on another CPU.  If called from interrupt context,
413  *	the callout could already be in progress at a lower priority.
414  */
415 bool
416 callout_stop(callout_t *cs)
417 {
418 	callout_impl_t *c = (callout_impl_t *)cs;
419 	struct callout_cpu *cc;
420 	kmutex_t *lock;
421 	bool expired;
422 
423 	KASSERT(c->c_magic == CALLOUT_MAGIC);
424 
425 	lock = callout_lock(c);
426 
427 	if ((c->c_flags & CALLOUT_PENDING) != 0)
428 		CIRCQ_REMOVE(&c->c_list);
429 	expired = ((c->c_flags & CALLOUT_FIRED) != 0);
430 	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
431 
432 	cc = c->c_cpu;
433 	if (cc->cc_active == c) {
434 		/*
435 		 * This is for non-MPSAFE callouts only.  To synchronize
436 		 * effectively we must be called with kernel_lock held.
437 		 * It's also taken in callout_softclock.
438 		 */
439 		cc->cc_cancel = c;
440 	}
441 
442 	mutex_spin_exit(lock);
443 
444 	return expired;
445 }
446 
447 /*
448  * callout_halt:
449  *
450  *	Cancel a pending callout.  If in-flight, block until it completes.
451  *	May not be called from a hard interrupt handler.  If the callout
452  * 	can take locks, the caller of callout_halt() must not hold any of
453  *	those locks, otherwise the two could deadlock.  If 'interlock' is
454  *	non-NULL and we must wait for the callout to complete, it will be
455  *	released and re-acquired before returning.
456  */
457 bool
458 callout_halt(callout_t *cs, void *interlock)
459 {
460 	callout_impl_t *c = (callout_impl_t *)cs;
461 	struct callout_cpu *cc;
462 	struct lwp *l;
463 	kmutex_t *lock, *relock;
464 	bool expired;
465 
466 	KASSERT(c->c_magic == CALLOUT_MAGIC);
467 	KASSERT(!cpu_intr_p());
468 
469 	lock = callout_lock(c);
470 	relock = NULL;
471 
472 	expired = ((c->c_flags & CALLOUT_FIRED) != 0);
473 	if ((c->c_flags & CALLOUT_PENDING) != 0)
474 		CIRCQ_REMOVE(&c->c_list);
475 	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
476 
477 	l = curlwp;
478 	for (;;) {
479 		cc = c->c_cpu;
480 		if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
481 			break;
482 		if (interlock != NULL) {
483 			/*
484 			 * Avoid potential scheduler lock order problems by
485 			 * dropping the interlock without the callout lock
486 			 * held.
487 			 */
488 			mutex_spin_exit(lock);
489 			mutex_exit(interlock);
490 			relock = interlock;
491 			interlock = NULL;
492 		} else {
493 			/* XXX Better to do priority inheritance. */
494 			KASSERT(l->l_wchan == NULL);
495 			cc->cc_nwait++;
496 			cc->cc_ev_block.ev_count++;
497 			l->l_kpriority = true;
498 			sleepq_enter(&cc->cc_sleepq, l, &cc->cc_lock);
499 			sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
500 			    &sleep_syncobj);
501 			sleepq_block(0, false);
502 		}
503 		lock = callout_lock(c);
504 	}
505 
506 	mutex_spin_exit(lock);
507 	if (__predict_false(relock != NULL))
508 		mutex_enter(relock);
509 
510 	return expired;
511 }
512 
513 #ifdef notyet
514 /*
515  * callout_bind:
516  *
517  *	Bind a callout so that it will only execute on one CPU.
518  *	The callout must be stopped, and must be MPSAFE.
519  *
520  *	XXX Disabled for now until it is decided how to handle
521  *	offlined CPUs.  We may want weak+strong binding.
522  */
523 void
524 callout_bind(callout_t *cs, struct cpu_info *ci)
525 {
526 	callout_impl_t *c = (callout_impl_t *)cs;
527 	struct callout_cpu *cc;
528 	kmutex_t *lock;
529 
530 	KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
531 	KASSERT(c->c_cpu->cc_active != c);
532 	KASSERT(c->c_magic == CALLOUT_MAGIC);
533 	KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0);
534 
535 	lock = callout_lock(c);
536 	cc = ci->ci_data.cpu_callout;
537 	c->c_flags |= CALLOUT_BOUND;
538 	if (c->c_cpu != cc) {
539 		/*
540 		 * Assigning c_cpu effectively unlocks the callout
541 		 * structure, as we don't hold the new CPU's lock.
542 		 * Issue memory barrier to prevent accesses being
543 		 * reordered.
544 		 */
545 		membar_exit();
546 		c->c_cpu = cc;
547 	}
548 	mutex_spin_exit(lock);
549 }
550 #endif
551 
552 void
553 callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
554 {
555 	callout_impl_t *c = (callout_impl_t *)cs;
556 	kmutex_t *lock;
557 
558 	KASSERT(c->c_magic == CALLOUT_MAGIC);
559 
560 	lock = callout_lock(c);
561 	c->c_func = func;
562 	c->c_arg = arg;
563 	mutex_spin_exit(lock);
564 }
565 
566 bool
567 callout_expired(callout_t *cs)
568 {
569 	callout_impl_t *c = (callout_impl_t *)cs;
570 	kmutex_t *lock;
571 	bool rv;
572 
573 	KASSERT(c->c_magic == CALLOUT_MAGIC);
574 
575 	lock = callout_lock(c);
576 	rv = ((c->c_flags & CALLOUT_FIRED) != 0);
577 	mutex_spin_exit(lock);
578 
579 	return rv;
580 }
581 
582 bool
583 callout_active(callout_t *cs)
584 {
585 	callout_impl_t *c = (callout_impl_t *)cs;
586 	kmutex_t *lock;
587 	bool rv;
588 
589 	KASSERT(c->c_magic == CALLOUT_MAGIC);
590 
591 	lock = callout_lock(c);
592 	rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
593 	mutex_spin_exit(lock);
594 
595 	return rv;
596 }
597 
598 bool
599 callout_pending(callout_t *cs)
600 {
601 	callout_impl_t *c = (callout_impl_t *)cs;
602 	kmutex_t *lock;
603 	bool rv;
604 
605 	KASSERT(c->c_magic == CALLOUT_MAGIC);
606 
607 	lock = callout_lock(c);
608 	rv = ((c->c_flags & CALLOUT_PENDING) != 0);
609 	mutex_spin_exit(lock);
610 
611 	return rv;
612 }
613 
614 bool
615 callout_invoking(callout_t *cs)
616 {
617 	callout_impl_t *c = (callout_impl_t *)cs;
618 	kmutex_t *lock;
619 	bool rv;
620 
621 	KASSERT(c->c_magic == CALLOUT_MAGIC);
622 
623 	lock = callout_lock(c);
624 	rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
625 	mutex_spin_exit(lock);
626 
627 	return rv;
628 }
629 
630 void
631 callout_ack(callout_t *cs)
632 {
633 	callout_impl_t *c = (callout_impl_t *)cs;
634 	kmutex_t *lock;
635 
636 	KASSERT(c->c_magic == CALLOUT_MAGIC);
637 
638 	lock = callout_lock(c);
639 	c->c_flags &= ~CALLOUT_INVOKING;
640 	mutex_spin_exit(lock);
641 }
642 
643 /*
644  * callout_hardclock:
645  *
646  *	Called from hardclock() once every tick.  We schedule a soft
647  *	interrupt if there is work to be done.
648  */
649 void
650 callout_hardclock(void)
651 {
652 	struct callout_cpu *cc;
653 	int needsoftclock, ticks;
654 
655 	cc = curcpu()->ci_data.cpu_callout;
656 	mutex_spin_enter(&cc->cc_lock);
657 
658 	ticks = ++cc->cc_ticks;
659 
660 	MOVEBUCKET(cc, 0, ticks);
661 	if (MASKWHEEL(0, ticks) == 0) {
662 		MOVEBUCKET(cc, 1, ticks);
663 		if (MASKWHEEL(1, ticks) == 0) {
664 			MOVEBUCKET(cc, 2, ticks);
665 			if (MASKWHEEL(2, ticks) == 0)
666 				MOVEBUCKET(cc, 3, ticks);
667 		}
668 	}
669 
670 	needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo);
671 	mutex_spin_exit(&cc->cc_lock);
672 
673 	if (needsoftclock)
674 		softint_schedule(callout_sih);
675 }
676 
677 /*
678  * callout_softclock:
679  *
680  *	Soft interrupt handler, scheduled above if there is work to
681  * 	be done.  Callouts are made in soft interrupt context.
682  */
683 static void
684 callout_softclock(void *v)
685 {
686 	callout_impl_t *c;
687 	struct callout_cpu *cc;
688 	void (*func)(void *);
689 	void *arg;
690 	int mpsafe, count, ticks, delta;
691 	lwp_t *l;
692 
693 	l = curlwp;
694 	KASSERT(l->l_cpu == curcpu());
695 	cc = l->l_cpu->ci_data.cpu_callout;
696 
697 	mutex_spin_enter(&cc->cc_lock);
698 	cc->cc_lwp = l;
699 	while (!CIRCQ_EMPTY(&cc->cc_todo)) {
700 		c = CIRCQ_FIRST(&cc->cc_todo);
701 		KASSERT(c->c_magic == CALLOUT_MAGIC);
702 		KASSERT(c->c_func != NULL);
703 		KASSERT(c->c_cpu == cc);
704 		KASSERT((c->c_flags & CALLOUT_PENDING) != 0);
705 		KASSERT((c->c_flags & CALLOUT_FIRED) == 0);
706 		CIRCQ_REMOVE(&c->c_list);
707 
708 		/* If due run it, otherwise insert it into the right bucket. */
709 		ticks = cc->cc_ticks;
710 		delta = c->c_time - ticks;
711 		if (delta > 0) {
712 			CIRCQ_INSERT(&c->c_list, BUCKET(cc, delta, c->c_time));
713 			continue;
714 		}
715 		if (delta < 0)
716 			cc->cc_ev_late.ev_count++;
717 
718 		c->c_flags ^= (CALLOUT_PENDING | CALLOUT_FIRED);
719 		mpsafe = (c->c_flags & CALLOUT_MPSAFE);
720 		func = c->c_func;
721 		arg = c->c_arg;
722 		cc->cc_active = c;
723 
724 		mutex_spin_exit(&cc->cc_lock);
725 		if (!mpsafe) {
726 			KERNEL_LOCK(1, NULL);
727 			(*func)(arg);
728 			KERNEL_UNLOCK_ONE(NULL);
729 		} else
730 			(*func)(arg);
731 		mutex_spin_enter(&cc->cc_lock);
732 
733 		/*
734 		 * We can't touch 'c' here because it might be
735 		 * freed already.  If LWPs waiting for callout
736 		 * to complete, awaken them.
737 		 */
738 		cc->cc_active = NULL;
739 		if ((count = cc->cc_nwait) != 0) {
740 			cc->cc_nwait = 0;
741 			/* sleepq_wake() drops the lock. */
742 			sleepq_wake(&cc->cc_sleepq, cc, count, &cc->cc_lock);
743 			mutex_spin_enter(&cc->cc_lock);
744 		}
745 	}
746 	cc->cc_lwp = NULL;
747 	mutex_spin_exit(&cc->cc_lock);
748 }
749 
750 #ifdef DDB
751 static void
752 db_show_callout_bucket(struct callout_cpu *cc, struct callout_circq *bucket)
753 {
754 	callout_impl_t *c;
755 	db_expr_t offset;
756 	const char *name;
757 	static char question[] = "?";
758 	int b;
759 
760 	if (CIRCQ_EMPTY(bucket))
761 		return;
762 
763 	for (c = CIRCQ_FIRST(bucket); /*nothing*/; c = CIRCQ_NEXT(&c->c_list)) {
764 		db_find_sym_and_offset((db_addr_t)(intptr_t)c->c_func, &name,
765 		    &offset);
766 		name = name ? name : question;
767 		b = (bucket - cc->cc_wheel);
768 		if (b < 0)
769 			b = -WHEELSIZE;
770 		db_printf("%9d %2d/%-4d %16lx  %s\n",
771 		    c->c_time - cc->cc_ticks, b / WHEELSIZE, b,
772 		    (u_long)c->c_arg, name);
773 		if (CIRCQ_LAST(&c->c_list, bucket))
774 			break;
775 	}
776 }
777 
778 void
779 db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
780 {
781 	CPU_INFO_ITERATOR cii;
782 	struct callout_cpu *cc;
783 	struct cpu_info *ci;
784 	int b;
785 
786 	db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
787 	db_printf("    ticks  wheel               arg  func\n");
788 
789 	/*
790 	 * Don't lock the callwheel; all the other CPUs are paused
791 	 * anyhow, and we might be called in a circumstance where
792 	 * some other CPU was paused while holding the lock.
793 	 */
794 	for (CPU_INFO_FOREACH(cii, ci)) {
795 		cc = ci->ci_data.cpu_callout;
796 		db_show_callout_bucket(cc, &cc->cc_todo);
797 	}
798 	for (b = 0; b < BUCKETS; b++) {
799 		for (CPU_INFO_FOREACH(cii, ci)) {
800 			cc = ci->ci_data.cpu_callout;
801 			db_show_callout_bucket(cc, &cc->cc_wheel[b]);
802 		}
803 	}
804 }
805 #endif /* DDB */
806