xref: /netbsd-src/sys/kern/kern_timeout.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: kern_timeout.c,v 1.6 2003/05/17 15:53:42 mjl Exp $	*/
2 
3 /*-
4  * Copyright (c) 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
41  * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  *
48  * 1. Redistributions of source code must retain the above copyright
49  *    notice, this list of conditions and the following disclaimer.
50  * 2. Redistributions in binary form must reproduce the above copyright
51  *    notice, this list of conditions and the following disclaimer in the
52  *    documentation and/or other materials provided with the distribution.
53  * 3. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
57  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
58  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
59  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
60  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
61  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
62  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
63  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
64  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
65  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66  */
67 
68 /*
69  * Adapted from OpenBSD: kern_timeout.c,v 1.15 2002/12/08 04:21:07 art Exp,
70  * modified to match NetBSD's pre-existing callout API.
71  */
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/lock.h>
77 #include <sys/callout.h>
78 
79 #ifdef DDB
80 #include <machine/db_machdep.h>
81 #include <ddb/db_interface.h>
82 #include <ddb/db_access.h>
83 #include <ddb/db_sym.h>
84 #include <ddb/db_output.h>
85 #endif
86 
87 /*
88  * Timeouts are kept in a hierarchical timing wheel. The c_time is the value
89  * of the global variable "hardclock_ticks" when the timeout should be called.
90  * There are four levels with 256 buckets each. See 'Scheme 7' in
91  * "Hashed and Hierarchical Timing Wheels: Efficient Data Structures for
92  * Implementing a Timer Facility" by George Varghese and Tony Lauck.
93  */
94 #define BUCKETS 1024
95 #define WHEELSIZE 256
96 #define WHEELMASK 255
97 #define WHEELBITS 8
98 
99 static struct callout_circq timeout_wheel[BUCKETS];	/* Queues of timeouts */
100 static struct callout_circq timeout_todo;		/* Worklist */
101 
102 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
103 
104 #define BUCKET(rel, abs)						\
105     (((rel) <= (1 << (2*WHEELBITS)))					\
106     	? ((rel) <= (1 << WHEELBITS))					\
107             ? &timeout_wheel[MASKWHEEL(0, (abs))]			\
108             : &timeout_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE]		\
109         : ((rel) <= (1 << (3*WHEELBITS)))				\
110             ? &timeout_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE]		\
111             : &timeout_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
112 
113 #define MOVEBUCKET(wheel, time)						\
114     CIRCQ_APPEND(&timeout_todo,						\
115         &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
116 
117 /*
118  * All wheels are locked with the same lock (which must also block out all
119  * interrupts).
120  */
121 static struct simplelock callout_slock;
122 
123 #define	CALLOUT_LOCK(s)							\
124 do {									\
125 	s = splsched();							\
126 	simple_lock(&callout_slock);					\
127 } while (/*CONSTCOND*/0)
128 
129 #define	CALLOUT_UNLOCK(s)						\
130 do {									\
131 	simple_unlock(&callout_slock);					\
132 	splx((s));							\
133 } while (/*CONSTCOND*/0)
134 
135 /*
136  * Circular queue definitions.
137  */
138 
139 #define CIRCQ_INIT(elem)						\
140 do {									\
141         (elem)->cq_next = (elem);					\
142         (elem)->cq_prev = (elem);					\
143 } while (/*CONSTCOND*/0)
144 
145 #define CIRCQ_INSERT(elem, list)					\
146 do {									\
147         (elem)->cq_prev = (list)->cq_prev;				\
148         (elem)->cq_next = (list);					\
149         (list)->cq_prev->cq_next = (elem);				\
150         (list)->cq_prev = (elem);					\
151 } while (/*CONSTCOND*/0)
152 
153 #define CIRCQ_APPEND(fst, snd)						\
154 do {									\
155         if (!CIRCQ_EMPTY(snd)) {					\
156                 (fst)->cq_prev->cq_next = (snd)->cq_next;		\
157                 (snd)->cq_next->cq_prev = (fst)->cq_prev;		\
158                 (snd)->cq_prev->cq_next = (fst);			\
159                 (fst)->cq_prev = (snd)->cq_prev;			\
160                 CIRCQ_INIT(snd);					\
161         }								\
162 } while (/*CONSTCOND*/0)
163 
164 #define CIRCQ_REMOVE(elem)						\
165 do {									\
166         (elem)->cq_next->cq_prev = (elem)->cq_prev;			\
167         (elem)->cq_prev->cq_next = (elem)->cq_next;			\
168 } while (/*CONSTCOND*/0)
169 
170 #define CIRCQ_FIRST(elem) ((elem)->cq_next)
171 
172 #define CIRCQ_EMPTY(elem) (CIRCQ_FIRST(elem) == (elem))
173 
174 /*
175  * Some of the "math" in here is a bit tricky.
176  *
177  * We have to beware of wrapping ints.
178  * We use the fact that any element added to the queue must be added with a
179  * positive time. That means that any element `to' on the queue cannot be
180  * scheduled to timeout further in time than INT_MAX, but c->c_time can
181  * be positive or negative so comparing it with anything is dangerous.
182  * The only way we can use the c->c_time value in any predictable way
183  * is when we calculate how far in the future `to' will timeout -
184  * "c->c_time - hardclock_ticks". The result will always be positive for
185  * future timeouts and 0 or negative for due timeouts.
186  */
187 
188 #ifdef CALLOUT_EVENT_COUNTERS
189 static struct evcnt callout_ev_late;
190 #endif
191 
192 /*
193  * callout_startup:
194  *
195  *	Initialize the callout facility, called at system startup time.
196  */
197 void
198 callout_startup(void)
199 {
200 	int b;
201 
202 	CIRCQ_INIT(&timeout_todo);
203 	for (b = 0; b < BUCKETS; b++)
204 		CIRCQ_INIT(&timeout_wheel[b]);
205 	simple_lock_init(&callout_slock);
206 
207 #ifdef CALLOUT_EVENT_COUNTERS
208 	evcnt_attach_dynamic(&callout_ev_late, EVCNT_TYPE_MISC,
209 	    NULL, "callout", "late");
210 #endif
211 }
212 
213 /*
214  * callout_init:
215  *
216  *	Initialize a callout structure.
217  */
218 void
219 callout_init(struct callout *c)
220 {
221 
222 	memset(c, 0, sizeof(*c));
223 }
224 
225 /*
226  * callout_setfunc:
227  *
228  *	Initialize a callout structure and set the function and
229  *	argument.
230  */
231 void
232 callout_setfunc(struct callout *c, void (*func)(void *), void *arg)
233 {
234 
235 	memset(c, 0, sizeof(*c));
236 	c->c_func = func;
237 	c->c_arg = arg;
238 }
239 
240 /*
241  * callout_reset:
242  *
243  *	Reset a callout structure with a new function and argument, and
244  *	schedule it to run.
245  */
246 void
247 callout_reset(struct callout *c, int to_ticks, void (*func)(void *), void *arg)
248 {
249 	int s, old_time;
250 
251 	KASSERT(to_ticks >= 0);
252 
253 	CALLOUT_LOCK(s);
254 
255 	/* Initialize the time here, it won't change. */
256 	old_time = c->c_time;
257 	c->c_time = to_ticks + hardclock_ticks;
258 	c->c_flags &= ~CALLOUT_FIRED;
259 
260 	c->c_func = func;
261 	c->c_arg = arg;
262 
263 	/*
264 	 * If this timeout is already scheduled and now is moved
265 	 * earlier, reschedule it now. Otherwise leave it in place
266 	 * and let it be rescheduled later.
267 	 */
268 	if (callout_pending(c)) {
269 		if (c->c_time - old_time < 0) {
270 			CIRCQ_REMOVE(&c->c_list);
271 			CIRCQ_INSERT(&c->c_list, &timeout_todo);
272 		}
273 	} else {
274 		c->c_flags |= CALLOUT_PENDING;
275 		CIRCQ_INSERT(&c->c_list, &timeout_todo);
276 	}
277 
278 	CALLOUT_UNLOCK(s);
279 }
280 
281 /*
282  * callout_schedule:
283  *
284  *	Schedule a callout to run.  The function and argument must
285  *	already be set in the callout structure.
286  */
287 void
288 callout_schedule(struct callout *c, int to_ticks)
289 {
290 	int s, old_time;
291 
292 	KASSERT(to_ticks >= 0);
293 
294 	CALLOUT_LOCK(s);
295 
296 	/* Initialize the time here, it won't change. */
297 	old_time = c->c_time;
298 	c->c_time = to_ticks + hardclock_ticks;
299 	c->c_flags &= ~CALLOUT_FIRED;
300 
301 	/*
302 	 * If this timeout is already scheduled and now is moved
303 	 * earlier, reschedule it now. Otherwise leave it in place
304 	 * and let it be rescheduled later.
305 	 */
306 	if (callout_pending(c)) {
307 		if (c->c_time - old_time < 0) {
308 			CIRCQ_REMOVE(&c->c_list);
309 			CIRCQ_INSERT(&c->c_list, &timeout_todo);
310 		}
311 	} else {
312 		c->c_flags |= CALLOUT_PENDING;
313 		CIRCQ_INSERT(&c->c_list, &timeout_todo);
314 	}
315 
316 	CALLOUT_UNLOCK(s);
317 }
318 
319 /*
320  * callout_stop:
321  *
322  *	Cancel a pending callout.
323  */
324 void
325 callout_stop(struct callout *c)
326 {
327 	int s;
328 
329 	CALLOUT_LOCK(s);
330 
331 	if (callout_pending(c))
332 		CIRCQ_REMOVE(&c->c_list);
333 
334 	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);
335 
336 	CALLOUT_UNLOCK(s);
337 }
338 
339 /*
340  * This is called from hardclock() once every tick.
341  * We return !0 if we need to schedule a softclock.
342  */
343 int
344 callout_hardclock(void)
345 {
346 	int s;
347 	int needsoftclock;
348 
349 	CALLOUT_LOCK(s);
350 
351 	MOVEBUCKET(0, hardclock_ticks);
352 	if (MASKWHEEL(0, hardclock_ticks) == 0) {
353 		MOVEBUCKET(1, hardclock_ticks);
354 		if (MASKWHEEL(1, hardclock_ticks) == 0) {
355 			MOVEBUCKET(2, hardclock_ticks);
356 			if (MASKWHEEL(2, hardclock_ticks) == 0)
357 				MOVEBUCKET(3, hardclock_ticks);
358 		}
359 	}
360 
361 	needsoftclock = !CIRCQ_EMPTY(&timeout_todo);
362 	CALLOUT_UNLOCK(s);
363 
364 	return needsoftclock;
365 }
366 
367 /* ARGSUSED */
368 void
369 softclock(void *v)
370 {
371 	struct callout *c;
372 	void (*func)(void *);
373 	void *arg;
374 	int s;
375 
376 	CALLOUT_LOCK(s);
377 
378 	while (!CIRCQ_EMPTY(&timeout_todo)) {
379 
380 		c = (struct callout *)CIRCQ_FIRST(&timeout_todo); /* XXX */
381 		CIRCQ_REMOVE(&c->c_list);
382 
383 		/* If due run it, otherwise insert it into the right bucket. */
384 		if (c->c_time - hardclock_ticks > 0) {
385 			CIRCQ_INSERT(&c->c_list,
386 			    BUCKET((c->c_time - hardclock_ticks), c->c_time));
387 		} else {
388 #ifdef CALLOUT_EVENT_COUNTERS
389 			if (c->c_time - hardclock_ticks < 0)
390 				callout_ev_late.ev_count++;
391 #endif
392 			c->c_flags = (c->c_flags  & ~CALLOUT_PENDING) |
393 			    CALLOUT_FIRED;
394 
395 			func = c->c_func;
396 			arg = c->c_arg;
397 
398 			CALLOUT_UNLOCK(s);
399 			(*func)(arg);
400 			CALLOUT_LOCK(s);
401 		}
402 	}
403 
404 	CALLOUT_UNLOCK(s);
405 }
406 
407 #ifdef DDB
408 static void
409 db_show_callout_bucket(struct callout_circq *bucket)
410 {
411 	struct callout *c;
412 	struct callout_circq *p;
413 	db_expr_t offset;
414 	char *name;
415 
416 	for (p = CIRCQ_FIRST(bucket); p != bucket; p = CIRCQ_FIRST(p)) {
417 		c = (struct callout *)p; /* XXX */
418 		db_find_sym_and_offset((db_addr_t)c->c_func, &name, &offset);
419 		name = name ? name : "?";
420 #ifdef _LP64
421 #define	POINTER_WIDTH	"%16lx"
422 #else
423 #define	POINTER_WIDTH	"%8lx"
424 #endif
425 		db_printf("%9d %2d/%-4d " POINTER_WIDTH "  %s\n",
426 		    c->c_time - hardclock_ticks,
427 		    (int)((bucket - timeout_wheel) / WHEELSIZE),
428 		    (int)(bucket - timeout_wheel), (u_long) c->c_arg, name);
429 	}
430 }
431 
432 void
433 db_show_callout(db_expr_t addr, int haddr, db_expr_t count, char *modif)
434 {
435 	int b;
436 
437 	db_printf("hardclock_ticks now: %d\n", hardclock_ticks);
438 #ifdef _LP64
439 	db_printf("    ticks  wheel               arg  func\n");
440 #else
441 	db_printf("    ticks  wheel       arg  func\n");
442 #endif
443 
444 	/*
445 	 * Don't lock the callwheel; all the other CPUs are paused
446 	 * anyhow, and we might be called in a circumstance where
447 	 * some other CPU was paused while holding the lock.
448 	 */
449 
450 	db_show_callout_bucket(&timeout_todo);
451 	for (b = 0; b < BUCKETS; b++)
452 		db_show_callout_bucket(&timeout_wheel[b]);
453 }
454 #endif /* DDB */
455