xref: /openbsd-src/sys/kern/kern_timeout.c (revision d4741794dd2f512d997014f8bd85fbb24d935059)
1 /*	$OpenBSD: kern_timeout.c,v 1.50 2016/10/03 11:54:29 dlg Exp $	*/
2 /*
3  * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
4  * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
17  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
19  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kthread.h>
31 #include <sys/timeout.h>
32 #include <sys/mutex.h>
33 #include <sys/kernel.h>
34 #include <sys/queue.h>			/* _Q_INVALIDATE */
35 
36 #ifdef DDB
37 #include <machine/db_machdep.h>
38 #include <ddb/db_interface.h>
39 #include <ddb/db_sym.h>
40 #include <ddb/db_output.h>
41 #endif
42 
43 /*
44  * Timeouts are kept in a hierarchical timing wheel. The to_time is the value
45  * of the global variable "ticks" when the timeout should be called. There are
46  * four levels with 256 buckets each. See 'Scheme 7' in
47  * "Hashed and Hierarchical Timing Wheels: Efficient Data Structures for
48  * Implementing a Timer Facility" by George Varghese and Tony Lauck.
49  */
50 #define BUCKETS 1024
51 #define WHEELSIZE 256
52 #define WHEELMASK 255
53 #define WHEELBITS 8
54 
55 struct circq timeout_wheel[BUCKETS];	/* Queues of timeouts */
56 struct circq timeout_todo;		/* Worklist */
57 struct circq timeout_proc;		/* Due timeouts needing proc. context */
58 
59 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
60 
61 #define BUCKET(rel, abs)						\
62     (timeout_wheel[							\
63 	((rel) <= (1 << (2*WHEELBITS)))					\
64 	    ? ((rel) <= (1 << WHEELBITS))				\
65 		? MASKWHEEL(0, (abs))					\
66 		: MASKWHEEL(1, (abs)) + WHEELSIZE			\
67 	    : ((rel) <= (1 << (3*WHEELBITS)))				\
68 		? MASKWHEEL(2, (abs)) + 2*WHEELSIZE			\
69 		: MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
70 
71 #define MOVEBUCKET(wheel, time)						\
72     CIRCQ_APPEND(&timeout_todo,						\
73         &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
74 
75 /*
76  * The first thing in a struct timeout is its struct circq, so we
77  * can get back from a pointer to the latter to a pointer to the
78  * whole timeout with just a cast.
79  */
80 static __inline struct timeout *
81 timeout_from_circq(struct circq *p)
82 {
83 	return ((struct timeout *)(p));
84 }
85 
86 /*
87  * All wheels are locked with the same mutex.
88  *
89  * We need locking since the timeouts are manipulated from hardclock that's
90  * not behind the big lock.
91  */
92 struct mutex timeout_mutex = MUTEX_INITIALIZER(IPL_HIGH);
93 
94 /*
95  * Circular queue definitions.
96  */
97 
98 #define CIRCQ_INIT(elem) do {                   \
99         (elem)->next = (elem);                  \
100         (elem)->prev = (elem);                  \
101 } while (0)
102 
103 #define CIRCQ_INSERT(elem, list) do {           \
104         (elem)->prev = (list)->prev;            \
105         (elem)->next = (list);                  \
106         (list)->prev->next = (elem);            \
107         (list)->prev = (elem);                  \
108 } while (0)
109 
110 #define CIRCQ_APPEND(fst, snd) do {             \
111         if (!CIRCQ_EMPTY(snd)) {                \
112                 (fst)->prev->next = (snd)->next;\
113                 (snd)->next->prev = (fst)->prev;\
114                 (snd)->prev->next = (fst);      \
115                 (fst)->prev = (snd)->prev;      \
116                 CIRCQ_INIT(snd);                \
117         }                                       \
118 } while (0)
119 
120 #define CIRCQ_REMOVE(elem) do {                 \
121         (elem)->next->prev = (elem)->prev;      \
122         (elem)->prev->next = (elem)->next;      \
123 	_Q_INVALIDATE((elem)->prev);		\
124 	_Q_INVALIDATE((elem)->next);		\
125 } while (0)
126 
127 #define CIRCQ_FIRST(elem) ((elem)->next)
128 
129 #define CIRCQ_EMPTY(elem) (CIRCQ_FIRST(elem) == (elem))
130 
131 void softclock_thread(void *);
132 void softclock_create_thread(void *);
133 
134 /*
135  * Some of the "math" in here is a bit tricky.
136  *
137  * We have to beware of wrapping ints.
138  * We use the fact that any element added to the queue must be added with a
139  * positive time. That means that any element `to' on the queue cannot be
140  * scheduled to timeout further in time than INT_MAX, but to->to_time can
141  * be positive or negative so comparing it with anything is dangerous.
142  * The only way we can use the to->to_time value in any predictable way
143  * is when we calculate how far in the future `to' will timeout -
144  * "to->to_time - ticks". The result will always be positive for future
145  * timeouts and 0 or negative for due timeouts.
146  */
147 
148 void
149 timeout_startup(void)
150 {
151 	int b;
152 
153 	CIRCQ_INIT(&timeout_todo);
154 	CIRCQ_INIT(&timeout_proc);
155 	for (b = 0; b < nitems(timeout_wheel); b++)
156 		CIRCQ_INIT(&timeout_wheel[b]);
157 }
158 
159 void
160 timeout_proc_init(void)
161 {
162 	kthread_create_deferred(softclock_create_thread, NULL);
163 }
164 
165 void
166 timeout_set(struct timeout *new, void (*fn)(void *), void *arg)
167 {
168 	new->to_func = fn;
169 	new->to_arg = arg;
170 	new->to_flags = TIMEOUT_INITIALIZED;
171 }
172 
173 void
174 timeout_set_proc(struct timeout *new, void (*fn)(void *), void *arg)
175 {
176 	timeout_set(new, fn, arg);
177 	new->to_flags |= TIMEOUT_NEEDPROCCTX;
178 }
179 
180 int
181 timeout_add(struct timeout *new, int to_ticks)
182 {
183 	int old_time;
184 	int ret = 1;
185 
186 #ifdef DIAGNOSTIC
187 	if (!(new->to_flags & TIMEOUT_INITIALIZED))
188 		panic("timeout_add: not initialized");
189 	if (to_ticks < 0)
190 		panic("timeout_add: to_ticks (%d) < 0", to_ticks);
191 #endif
192 
193 	mtx_enter(&timeout_mutex);
194 	/* Initialize the time here, it won't change. */
195 	old_time = new->to_time;
196 	new->to_time = to_ticks + ticks;
197 	new->to_flags &= ~TIMEOUT_TRIGGERED;
198 
199 	/*
200 	 * If this timeout already is scheduled and now is moved
201 	 * earlier, reschedule it now. Otherwise leave it in place
202 	 * and let it be rescheduled later.
203 	 */
204 	if (new->to_flags & TIMEOUT_ONQUEUE) {
205 		if (new->to_time - ticks < old_time - ticks) {
206 			CIRCQ_REMOVE(&new->to_list);
207 			CIRCQ_INSERT(&new->to_list, &timeout_todo);
208 		}
209 		ret = 0;
210 	} else {
211 		new->to_flags |= TIMEOUT_ONQUEUE;
212 		CIRCQ_INSERT(&new->to_list, &timeout_todo);
213 	}
214 	mtx_leave(&timeout_mutex);
215 
216 	return (ret);
217 }
218 
219 int
220 timeout_add_tv(struct timeout *to, const struct timeval *tv)
221 {
222 	uint64_t to_ticks;
223 
224 	to_ticks = (uint64_t)hz * tv->tv_sec + tv->tv_usec / tick;
225 	if (to_ticks > INT_MAX)
226 		to_ticks = INT_MAX;
227 	if (to_ticks == 0 && tv->tv_usec > 0)
228 		to_ticks = 1;
229 
230 	return (timeout_add(to, (int)to_ticks));
231 }
232 
233 int
234 timeout_add_ts(struct timeout *to, const struct timespec *ts)
235 {
236 	uint64_t to_ticks;
237 
238 	to_ticks = (uint64_t)hz * ts->tv_sec + ts->tv_nsec / (tick * 1000);
239 	if (to_ticks > INT_MAX)
240 		to_ticks = INT_MAX;
241 	if (to_ticks == 0 && ts->tv_nsec > 0)
242 		to_ticks = 1;
243 
244 	return (timeout_add(to, (int)to_ticks));
245 }
246 
247 int
248 timeout_add_bt(struct timeout *to, const struct bintime *bt)
249 {
250 	uint64_t to_ticks;
251 
252 	to_ticks = (uint64_t)hz * bt->sec + (long)(((uint64_t)1000000 *
253 	    (uint32_t)(bt->frac >> 32)) >> 32) / tick;
254 	if (to_ticks > INT_MAX)
255 		to_ticks = INT_MAX;
256 	if (to_ticks == 0 && bt->frac > 0)
257 		to_ticks = 1;
258 
259 	return (timeout_add(to, (int)to_ticks));
260 }
261 
262 int
263 timeout_add_sec(struct timeout *to, int secs)
264 {
265 	uint64_t to_ticks;
266 
267 	to_ticks = (uint64_t)hz * secs;
268 	if (to_ticks > INT_MAX)
269 		to_ticks = INT_MAX;
270 
271 	return (timeout_add(to, (int)to_ticks));
272 }
273 
274 int
275 timeout_add_msec(struct timeout *to, int msecs)
276 {
277 	uint64_t to_ticks;
278 
279 	to_ticks = (uint64_t)msecs * 1000 / tick;
280 	if (to_ticks > INT_MAX)
281 		to_ticks = INT_MAX;
282 	if (to_ticks == 0 && msecs > 0)
283 		to_ticks = 1;
284 
285 	return (timeout_add(to, (int)to_ticks));
286 }
287 
288 int
289 timeout_add_usec(struct timeout *to, int usecs)
290 {
291 	int to_ticks = usecs / tick;
292 
293 	if (to_ticks == 0 && usecs > 0)
294 		to_ticks = 1;
295 
296 	return (timeout_add(to, to_ticks));
297 }
298 
299 int
300 timeout_add_nsec(struct timeout *to, int nsecs)
301 {
302 	int to_ticks = nsecs / (tick * 1000);
303 
304 	if (to_ticks == 0 && nsecs > 0)
305 		to_ticks = 1;
306 
307 	return (timeout_add(to, to_ticks));
308 }
309 
310 int
311 timeout_del(struct timeout *to)
312 {
313 	int ret = 0;
314 
315 	mtx_enter(&timeout_mutex);
316 	if (to->to_flags & TIMEOUT_ONQUEUE) {
317 		CIRCQ_REMOVE(&to->to_list);
318 		to->to_flags &= ~TIMEOUT_ONQUEUE;
319 		ret = 1;
320 	}
321 	to->to_flags &= ~TIMEOUT_TRIGGERED;
322 	mtx_leave(&timeout_mutex);
323 
324 	return (ret);
325 }
326 
327 /*
328  * This is called from hardclock() once every tick.
329  * We return !0 if we need to schedule a softclock.
330  */
331 int
332 timeout_hardclock_update(void)
333 {
334 	int ret;
335 
336 	mtx_enter(&timeout_mutex);
337 
338 	MOVEBUCKET(0, ticks);
339 	if (MASKWHEEL(0, ticks) == 0) {
340 		MOVEBUCKET(1, ticks);
341 		if (MASKWHEEL(1, ticks) == 0) {
342 			MOVEBUCKET(2, ticks);
343 			if (MASKWHEEL(2, ticks) == 0)
344 				MOVEBUCKET(3, ticks);
345 		}
346 	}
347 	ret = !CIRCQ_EMPTY(&timeout_todo);
348 	mtx_leave(&timeout_mutex);
349 
350 	return (ret);
351 }
352 
353 void
354 timeout_run(struct timeout *to)
355 {
356 	void (*fn)(void *);
357 	void *arg;
358 
359 	MUTEX_ASSERT_LOCKED(&timeout_mutex);
360 
361 	to->to_flags &= ~TIMEOUT_ONQUEUE;
362 	to->to_flags |= TIMEOUT_TRIGGERED;
363 
364 	fn = to->to_func;
365 	arg = to->to_arg;
366 
367 	mtx_leave(&timeout_mutex);
368 	fn(arg);
369 	mtx_enter(&timeout_mutex);
370 }
371 
372 void
373 softclock(void *arg)
374 {
375 	int delta;
376 	struct circq *bucket;
377 	struct timeout *to;
378 	int needsproc = 0;
379 
380 	mtx_enter(&timeout_mutex);
381 	while (!CIRCQ_EMPTY(&timeout_todo)) {
382 		to = timeout_from_circq(CIRCQ_FIRST(&timeout_todo));
383 		CIRCQ_REMOVE(&to->to_list);
384 
385 		/*
386 		 * If due run it or defer execution to the thread,
387 		 * otherwise insert it into the right bucket.
388 		 */
389 		delta = to->to_time - ticks;
390 		if (delta > 0) {
391 			bucket = &BUCKET(delta, to->to_time);
392 			CIRCQ_INSERT(&to->to_list, bucket);
393 		} else if (to->to_flags & TIMEOUT_NEEDPROCCTX) {
394 			CIRCQ_INSERT(&to->to_list, &timeout_proc);
395 			needsproc = 1;
396 		} else {
397 #ifdef DEBUG
398 			if (delta < 0)
399 				printf("timeout delayed %d\n", delta);
400 #endif
401 			timeout_run(to);
402 		}
403 	}
404 	mtx_leave(&timeout_mutex);
405 
406 	if (needsproc)
407 		wakeup(&timeout_proc);
408 }
409 
410 void
411 softclock_create_thread(void *arg)
412 {
413 	if (kthread_create(softclock_thread, NULL, NULL, "softclock"))
414 		panic("fork softclock");
415 }
416 
417 void
418 softclock_thread(void *arg)
419 {
420 	CPU_INFO_ITERATOR cii;
421 	struct cpu_info *ci;
422 	struct sleep_state sls;
423 	struct timeout *to;
424 
425 	KERNEL_ASSERT_LOCKED();
426 
427 	/* Be conservative for the moment */
428 	CPU_INFO_FOREACH(cii, ci) {
429 		if (CPU_IS_PRIMARY(ci))
430 			break;
431 	}
432 	KASSERT(ci != NULL);
433 	sched_peg_curproc(ci);
434 
435 	for (;;) {
436 		sleep_setup(&sls, &timeout_proc, PSWP, "bored");
437 		sleep_finish(&sls, CIRCQ_EMPTY(&timeout_proc));
438 
439 		mtx_enter(&timeout_mutex);
440 		while (!CIRCQ_EMPTY(&timeout_proc)) {
441 			to = timeout_from_circq(CIRCQ_FIRST(&timeout_proc));
442 			CIRCQ_REMOVE(&to->to_list);
443 			timeout_run(to);
444 		}
445 		mtx_leave(&timeout_mutex);
446 	}
447 }
448 
449 #ifndef SMALL_KERNEL
450 void
451 timeout_adjust_ticks(int adj)
452 {
453 	struct timeout *to;
454 	struct circq *p;
455 	int new_ticks, b;
456 
457 	/* adjusting the monotonic clock backwards would be a Bad Thing */
458 	if (adj <= 0)
459 		return;
460 
461 	mtx_enter(&timeout_mutex);
462 	new_ticks = ticks + adj;
463 	for (b = 0; b < nitems(timeout_wheel); b++) {
464 		p = CIRCQ_FIRST(&timeout_wheel[b]);
465 		while (p != &timeout_wheel[b]) {
466 			to = timeout_from_circq(p);
467 			p = CIRCQ_FIRST(p);
468 
469 			/* when moving a timeout forward need to reinsert it */
470 			if (to->to_time - ticks < adj)
471 				to->to_time = new_ticks;
472 			CIRCQ_REMOVE(&to->to_list);
473 			CIRCQ_INSERT(&to->to_list, &timeout_todo);
474 		}
475 	}
476 	ticks = new_ticks;
477 	mtx_leave(&timeout_mutex);
478 }
479 #endif
480 
481 #ifdef DDB
482 void db_show_callout_bucket(struct circq *);
483 
484 void
485 db_show_callout_bucket(struct circq *bucket)
486 {
487 	struct timeout *to;
488 	struct circq *p;
489 	db_expr_t offset;
490 	char *name;
491 
492 	for (p = CIRCQ_FIRST(bucket); p != bucket; p = CIRCQ_FIRST(p)) {
493 		to = timeout_from_circq(p);
494 		db_find_sym_and_offset((db_addr_t)to->to_func, &name, &offset);
495 		name = name ? name : "?";
496 		db_printf("%9d %2td/%-4td %p  %s\n", to->to_time - ticks,
497 		    (bucket - timeout_wheel) / WHEELSIZE,
498 		    bucket - timeout_wheel, to->to_arg, name);
499 	}
500 }
501 
502 void
503 db_show_callout(db_expr_t addr, int haddr, db_expr_t count, char *modif)
504 {
505 	int b;
506 
507 	db_printf("ticks now: %d\n", ticks);
508 	db_printf("    ticks  wheel       arg  func\n");
509 
510 	db_show_callout_bucket(&timeout_todo);
511 	for (b = 0; b < nitems(timeout_wheel); b++)
512 		db_show_callout_bucket(&timeout_wheel[b]);
513 }
514 #endif
515