1 /* $NetBSD: kern_timeout.c,v 1.9 2003/08/03 19:14:59 he Exp $ */ 2 3 /*- 4 * Copyright (c) 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org> 41 * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org> 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 57 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 58 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 59 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 60 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 61 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 62 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 63 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 64 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 65 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.9 2003/08/03 19:14:59 he Exp $"); 70 71 /* 72 * Adapted from OpenBSD: kern_timeout.c,v 1.15 2002/12/08 04:21:07 art Exp, 73 * modified to match NetBSD's pre-existing callout API. 74 */ 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/lock.h> 80 #include <sys/callout.h> 81 82 #ifdef DDB 83 #include <machine/db_machdep.h> 84 #include <ddb/db_interface.h> 85 #include <ddb/db_access.h> 86 #include <ddb/db_sym.h> 87 #include <ddb/db_output.h> 88 #endif 89 90 /* 91 * Timeouts are kept in a hierarchical timing wheel. The c_time is the value 92 * of the global variable "hardclock_ticks" when the timeout should be called. 93 * There are four levels with 256 buckets each. See 'Scheme 7' in 94 * "Hashed and Hierarchical Timing Wheels: Efficient Data Structures for 95 * Implementing a Timer Facility" by George Varghese and Tony Lauck. 96 */ 97 #define BUCKETS 1024 98 #define WHEELSIZE 256 99 #define WHEELMASK 255 100 #define WHEELBITS 8 101 102 static struct callout_circq timeout_wheel[BUCKETS]; /* Queues of timeouts */ 103 static struct callout_circq timeout_todo; /* Worklist */ 104 105 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK) 106 107 #define BUCKET(rel, abs) \ 108 (((rel) <= (1 << (2*WHEELBITS))) \ 109 ? ((rel) <= (1 << WHEELBITS)) \ 110 ? &timeout_wheel[MASKWHEEL(0, (abs))] \ 111 : &timeout_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \ 112 : ((rel) <= (1 << (3*WHEELBITS))) \ 113 ? &timeout_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \ 114 : &timeout_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE]) 115 116 #define MOVEBUCKET(wheel, time) \ 117 CIRCQ_APPEND(&timeout_todo, \ 118 &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE]) 119 120 /* 121 * All wheels are locked with the same lock (which must also block out all 122 * interrupts). 123 */ 124 static struct simplelock callout_slock; 125 126 #define CALLOUT_LOCK(s) \ 127 do { \ 128 s = splsched(); \ 129 simple_lock(&callout_slock); \ 130 } while (/*CONSTCOND*/0) 131 132 #define CALLOUT_UNLOCK(s) \ 133 do { \ 134 simple_unlock(&callout_slock); \ 135 splx((s)); \ 136 } while (/*CONSTCOND*/0) 137 138 /* 139 * Circular queue definitions. 140 */ 141 142 #define CIRCQ_INIT(elem) \ 143 do { \ 144 (elem)->cq_next = (elem); \ 145 (elem)->cq_prev = (elem); \ 146 } while (/*CONSTCOND*/0) 147 148 #define CIRCQ_INSERT(elem, list) \ 149 do { \ 150 (elem)->cq_prev = (list)->cq_prev; \ 151 (elem)->cq_next = (list); \ 152 (list)->cq_prev->cq_next = (elem); \ 153 (list)->cq_prev = (elem); \ 154 } while (/*CONSTCOND*/0) 155 156 #define CIRCQ_APPEND(fst, snd) \ 157 do { \ 158 if (!CIRCQ_EMPTY(snd)) { \ 159 (fst)->cq_prev->cq_next = (snd)->cq_next; \ 160 (snd)->cq_next->cq_prev = (fst)->cq_prev; \ 161 (snd)->cq_prev->cq_next = (fst); \ 162 (fst)->cq_prev = (snd)->cq_prev; \ 163 CIRCQ_INIT(snd); \ 164 } \ 165 } while (/*CONSTCOND*/0) 166 167 #define CIRCQ_REMOVE(elem) \ 168 do { \ 169 (elem)->cq_next->cq_prev = (elem)->cq_prev; \ 170 (elem)->cq_prev->cq_next = (elem)->cq_next; \ 171 } while (/*CONSTCOND*/0) 172 173 #define CIRCQ_FIRST(elem) ((elem)->cq_next) 174 175 #define CIRCQ_EMPTY(elem) (CIRCQ_FIRST(elem) == (elem)) 176 177 /* 178 * Some of the "math" in here is a bit tricky. 179 * 180 * We have to beware of wrapping ints. 181 * We use the fact that any element added to the queue must be added with a 182 * positive time. That means that any element `to' on the queue cannot be 183 * scheduled to timeout further in time than INT_MAX, but c->c_time can 184 * be positive or negative so comparing it with anything is dangerous. 185 * The only way we can use the c->c_time value in any predictable way 186 * is when we calculate how far in the future `to' will timeout - 187 * "c->c_time - hardclock_ticks". The result will always be positive for 188 * future timeouts and 0 or negative for due timeouts. 189 */ 190 191 #ifdef CALLOUT_EVENT_COUNTERS 192 static struct evcnt callout_ev_late; 193 #endif 194 195 /* 196 * callout_startup: 197 * 198 * Initialize the callout facility, called at system startup time. 199 */ 200 void 201 callout_startup(void) 202 { 203 int b; 204 205 CIRCQ_INIT(&timeout_todo); 206 for (b = 0; b < BUCKETS; b++) 207 CIRCQ_INIT(&timeout_wheel[b]); 208 simple_lock_init(&callout_slock); 209 210 #ifdef CALLOUT_EVENT_COUNTERS 211 evcnt_attach_dynamic(&callout_ev_late, EVCNT_TYPE_MISC, 212 NULL, "callout", "late"); 213 #endif 214 } 215 216 /* 217 * callout_init: 218 * 219 * Initialize a callout structure. 220 */ 221 void 222 callout_init(struct callout *c) 223 { 224 225 memset(c, 0, sizeof(*c)); 226 } 227 228 /* 229 * callout_setfunc: 230 * 231 * Initialize a callout structure and set the function and 232 * argument. 233 */ 234 void 235 callout_setfunc(struct callout *c, void (*func)(void *), void *arg) 236 { 237 238 memset(c, 0, sizeof(*c)); 239 c->c_func = func; 240 c->c_arg = arg; 241 } 242 243 /* 244 * callout_reset: 245 * 246 * Reset a callout structure with a new function and argument, and 247 * schedule it to run. 248 */ 249 void 250 callout_reset(struct callout *c, int to_ticks, void (*func)(void *), void *arg) 251 { 252 int s, old_time; 253 254 KASSERT(to_ticks >= 0); 255 256 CALLOUT_LOCK(s); 257 258 /* Initialize the time here, it won't change. */ 259 old_time = c->c_time; 260 c->c_time = to_ticks + hardclock_ticks; 261 c->c_flags &= ~(CALLOUT_FIRED|CALLOUT_INVOKING); 262 263 c->c_func = func; 264 c->c_arg = arg; 265 266 /* 267 * If this timeout is already scheduled and now is moved 268 * earlier, reschedule it now. Otherwise leave it in place 269 * and let it be rescheduled later. 270 */ 271 if (callout_pending(c)) { 272 if (c->c_time - old_time < 0) { 273 CIRCQ_REMOVE(&c->c_list); 274 CIRCQ_INSERT(&c->c_list, &timeout_todo); 275 } 276 } else { 277 c->c_flags |= CALLOUT_PENDING; 278 CIRCQ_INSERT(&c->c_list, &timeout_todo); 279 } 280 281 CALLOUT_UNLOCK(s); 282 } 283 284 /* 285 * callout_schedule: 286 * 287 * Schedule a callout to run. The function and argument must 288 * already be set in the callout structure. 289 */ 290 void 291 callout_schedule(struct callout *c, int to_ticks) 292 { 293 int s, old_time; 294 295 KASSERT(to_ticks >= 0); 296 297 CALLOUT_LOCK(s); 298 299 /* Initialize the time here, it won't change. */ 300 old_time = c->c_time; 301 c->c_time = to_ticks + hardclock_ticks; 302 c->c_flags &= ~(CALLOUT_FIRED|CALLOUT_INVOKING); 303 304 /* 305 * If this timeout is already scheduled and now is moved 306 * earlier, reschedule it now. Otherwise leave it in place 307 * and let it be rescheduled later. 308 */ 309 if (callout_pending(c)) { 310 if (c->c_time - old_time < 0) { 311 CIRCQ_REMOVE(&c->c_list); 312 CIRCQ_INSERT(&c->c_list, &timeout_todo); 313 } 314 } else { 315 c->c_flags |= CALLOUT_PENDING; 316 CIRCQ_INSERT(&c->c_list, &timeout_todo); 317 } 318 319 CALLOUT_UNLOCK(s); 320 } 321 322 /* 323 * callout_stop: 324 * 325 * Cancel a pending callout. 326 */ 327 void 328 callout_stop(struct callout *c) 329 { 330 int s; 331 332 CALLOUT_LOCK(s); 333 334 if (callout_pending(c)) 335 CIRCQ_REMOVE(&c->c_list); 336 337 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); 338 339 CALLOUT_UNLOCK(s); 340 } 341 342 /* 343 * This is called from hardclock() once every tick. 344 * We return !0 if we need to schedule a softclock. 345 */ 346 int 347 callout_hardclock(void) 348 { 349 int s; 350 int needsoftclock; 351 352 CALLOUT_LOCK(s); 353 354 MOVEBUCKET(0, hardclock_ticks); 355 if (MASKWHEEL(0, hardclock_ticks) == 0) { 356 MOVEBUCKET(1, hardclock_ticks); 357 if (MASKWHEEL(1, hardclock_ticks) == 0) { 358 MOVEBUCKET(2, hardclock_ticks); 359 if (MASKWHEEL(2, hardclock_ticks) == 0) 360 MOVEBUCKET(3, hardclock_ticks); 361 } 362 } 363 364 needsoftclock = !CIRCQ_EMPTY(&timeout_todo); 365 CALLOUT_UNLOCK(s); 366 367 return needsoftclock; 368 } 369 370 /* ARGSUSED */ 371 void 372 softclock(void *v) 373 { 374 struct callout *c; 375 void (*func)(void *); 376 void *arg; 377 int s; 378 379 CALLOUT_LOCK(s); 380 381 while (!CIRCQ_EMPTY(&timeout_todo)) { 382 383 c = (struct callout *)CIRCQ_FIRST(&timeout_todo); /* XXX */ 384 CIRCQ_REMOVE(&c->c_list); 385 386 /* If due run it, otherwise insert it into the right bucket. */ 387 if (c->c_time - hardclock_ticks > 0) { 388 CIRCQ_INSERT(&c->c_list, 389 BUCKET((c->c_time - hardclock_ticks), c->c_time)); 390 } else { 391 #ifdef CALLOUT_EVENT_COUNTERS 392 if (c->c_time - hardclock_ticks < 0) 393 callout_ev_late.ev_count++; 394 #endif 395 c->c_flags = (c->c_flags & ~CALLOUT_PENDING) | 396 (CALLOUT_FIRED|CALLOUT_INVOKING); 397 398 func = c->c_func; 399 arg = c->c_arg; 400 401 CALLOUT_UNLOCK(s); 402 (*func)(arg); 403 CALLOUT_LOCK(s); 404 } 405 } 406 407 CALLOUT_UNLOCK(s); 408 } 409 410 #ifdef DDB 411 static void 412 db_show_callout_bucket(struct callout_circq *bucket) 413 { 414 struct callout *c; 415 struct callout_circq *p; 416 db_expr_t offset; 417 char *name; 418 419 for (p = CIRCQ_FIRST(bucket); p != bucket; p = CIRCQ_FIRST(p)) { 420 c = (struct callout *)p; /* XXX */ 421 db_find_sym_and_offset((db_addr_t)c->c_func, &name, &offset); 422 name = name ? name : "?"; 423 #ifdef _LP64 424 #define POINTER_WIDTH "%16lx" 425 #else 426 #define POINTER_WIDTH "%8lx" 427 #endif 428 db_printf("%9d %2d/%-4d " POINTER_WIDTH " %s\n", 429 c->c_time - hardclock_ticks, 430 (int)((bucket - timeout_wheel) / WHEELSIZE), 431 (int)(bucket - timeout_wheel), (u_long) c->c_arg, name); 432 } 433 } 434 435 void 436 db_show_callout(db_expr_t addr, int haddr, db_expr_t count, char *modif) 437 { 438 int b; 439 440 db_printf("hardclock_ticks now: %d\n", hardclock_ticks); 441 #ifdef _LP64 442 db_printf(" ticks wheel arg func\n"); 443 #else 444 db_printf(" ticks wheel arg func\n"); 445 #endif 446 447 /* 448 * Don't lock the callwheel; all the other CPUs are paused 449 * anyhow, and we might be called in a circumstance where 450 * some other CPU was paused while holding the lock. 451 */ 452 453 db_show_callout_bucket(&timeout_todo); 454 for (b = 0; b < BUCKETS; b++) 455 db_show_callout_bucket(&timeout_wheel[b]); 456 } 457 #endif /* DDB */ 458