1 /* $NetBSD: kern_time.c,v 1.54 2000/09/19 23:26:25 bjh21 Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1982, 1986, 1989, 1993 41 * The Regents of the University of California. All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 72 */ 73 74 #include "fs_nfs.h" 75 #include "opt_nfs.h" 76 #include "opt_nfsserver.h" 77 78 #include <sys/param.h> 79 #include <sys/resourcevar.h> 80 #include <sys/kernel.h> 81 #include <sys/systm.h> 82 #include <sys/proc.h> 83 #include <sys/vnode.h> 84 #include <sys/signalvar.h> 85 #include <sys/syslog.h> 86 87 #include <sys/mount.h> 88 #include <sys/syscallargs.h> 89 90 #include <uvm/uvm_extern.h> 91 92 #if defined(NFS) || defined(NFSSERVER) 93 #include <nfs/rpcv2.h> 94 #include <nfs/nfsproto.h> 95 #include <nfs/nfs_var.h> 96 #endif 97 98 #include <machine/cpu.h> 99 100 /* 101 * Time of day and interval timer support. 102 * 103 * These routines provide the kernel entry points to get and set 104 * the time-of-day and per-process interval timers. Subroutines 105 * here provide support for adding and subtracting timeval structures 106 * and decrementing interval timers, optionally reloading the interval 107 * timers when they expire. 108 */ 109 110 /* This function is used by clock_settime and settimeofday */ 111 int 112 settime(tv) 113 struct timeval *tv; 114 { 115 struct timeval delta; 116 struct cpu_info *ci; 117 int s; 118 119 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 120 s = splclock(); 121 timersub(tv, &time, &delta); 122 if ((delta.tv_sec < 0 || delta.tv_usec < 0) && securelevel > 1) 123 return (EPERM); 124 #ifdef notyet 125 if ((delta.tv_sec < 86400) && securelevel > 0) 126 return (EPERM); 127 #endif 128 time = *tv; 129 (void) spllowersoftclock(); 130 timeradd(&boottime, &delta, &boottime); 131 /* 132 * XXXSMP 133 * This is wrong. We should traverse a list of all 134 * CPUs and add the delta to the runtime of those 135 * CPUs which have a process on them. 136 */ 137 ci = curcpu(); 138 timeradd(&ci->ci_schedstate.spc_runtime, &delta, 139 &ci->ci_schedstate.spc_runtime); 140 # if (defined(NFS) && !defined (NFS_V2_ONLY)) || defined(NFSSERVER) 141 nqnfs_lease_updatetime(delta.tv_sec); 142 # endif 143 splx(s); 144 resettodr(); 145 return (0); 146 } 147 148 /* ARGSUSED */ 149 int 150 sys_clock_gettime(p, v, retval) 151 struct proc *p; 152 void *v; 153 register_t *retval; 154 { 155 struct sys_clock_gettime_args /* { 156 syscallarg(clockid_t) clock_id; 157 syscallarg(struct timespec *) tp; 158 } */ *uap = v; 159 clockid_t clock_id; 160 struct timeval atv; 161 struct timespec ats; 162 163 clock_id = SCARG(uap, clock_id); 164 if (clock_id != CLOCK_REALTIME) 165 return (EINVAL); 166 167 microtime(&atv); 168 TIMEVAL_TO_TIMESPEC(&atv,&ats); 169 170 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 171 } 172 173 /* ARGSUSED */ 174 int 175 sys_clock_settime(p, v, retval) 176 struct proc *p; 177 void *v; 178 register_t *retval; 179 { 180 struct sys_clock_settime_args /* { 181 syscallarg(clockid_t) clock_id; 182 syscallarg(const struct timespec *) tp; 183 } */ *uap = v; 184 clockid_t clock_id; 185 struct timeval atv; 186 struct timespec ats; 187 int error; 188 189 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 190 return (error); 191 192 clock_id = SCARG(uap, clock_id); 193 if (clock_id != CLOCK_REALTIME) 194 return (EINVAL); 195 196 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 197 return (error); 198 199 TIMESPEC_TO_TIMEVAL(&atv,&ats); 200 if ((error = settime(&atv))) 201 return (error); 202 203 return 0; 204 } 205 206 int 207 sys_clock_getres(p, v, retval) 208 struct proc *p; 209 void *v; 210 register_t *retval; 211 { 212 struct sys_clock_getres_args /* { 213 syscallarg(clockid_t) clock_id; 214 syscallarg(struct timespec *) tp; 215 } */ *uap = v; 216 clockid_t clock_id; 217 struct timespec ts; 218 int error = 0; 219 220 clock_id = SCARG(uap, clock_id); 221 if (clock_id != CLOCK_REALTIME) 222 return (EINVAL); 223 224 if (SCARG(uap, tp)) { 225 ts.tv_sec = 0; 226 ts.tv_nsec = 1000000000 / hz; 227 228 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 229 } 230 231 return error; 232 } 233 234 /* ARGSUSED */ 235 int 236 sys_nanosleep(p, v, retval) 237 struct proc *p; 238 void *v; 239 register_t *retval; 240 { 241 static int nanowait; 242 struct sys_nanosleep_args/* { 243 syscallarg(struct timespec *) rqtp; 244 syscallarg(struct timespec *) rmtp; 245 } */ *uap = v; 246 struct timespec rqt; 247 struct timespec rmt; 248 struct timeval atv, utv; 249 int error, s, timo; 250 251 error = copyin((caddr_t)SCARG(uap, rqtp), (caddr_t)&rqt, 252 sizeof(struct timespec)); 253 if (error) 254 return (error); 255 256 TIMESPEC_TO_TIMEVAL(&atv,&rqt) 257 if (itimerfix(&atv)) 258 return (EINVAL); 259 260 s = splclock(); 261 timeradd(&atv,&time,&atv); 262 timo = hzto(&atv); 263 /* 264 * Avoid inadvertantly sleeping forever 265 */ 266 if (timo == 0) 267 timo = 1; 268 splx(s); 269 270 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo); 271 if (error == ERESTART) 272 error = EINTR; 273 if (error == EWOULDBLOCK) 274 error = 0; 275 276 if (SCARG(uap, rmtp)) { 277 int error; 278 279 s = splclock(); 280 utv = time; 281 splx(s); 282 283 timersub(&atv, &utv, &utv); 284 if (utv.tv_sec < 0) 285 timerclear(&utv); 286 287 TIMEVAL_TO_TIMESPEC(&utv,&rmt); 288 error = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp), 289 sizeof(rmt)); 290 if (error) 291 return (error); 292 } 293 294 return error; 295 } 296 297 /* ARGSUSED */ 298 int 299 sys_gettimeofday(p, v, retval) 300 struct proc *p; 301 void *v; 302 register_t *retval; 303 { 304 struct sys_gettimeofday_args /* { 305 syscallarg(struct timeval *) tp; 306 syscallarg(struct timezone *) tzp; 307 } */ *uap = v; 308 struct timeval atv; 309 int error = 0; 310 struct timezone tzfake; 311 312 if (SCARG(uap, tp)) { 313 microtime(&atv); 314 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 315 if (error) 316 return (error); 317 } 318 if (SCARG(uap, tzp)) { 319 /* 320 * NetBSD has no kernel notion of time zone, so we just 321 * fake up a timezone struct and return it if demanded. 322 */ 323 tzfake.tz_minuteswest = 0; 324 tzfake.tz_dsttime = 0; 325 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 326 } 327 return (error); 328 } 329 330 /* ARGSUSED */ 331 int 332 sys_settimeofday(p, v, retval) 333 struct proc *p; 334 void *v; 335 register_t *retval; 336 { 337 struct sys_settimeofday_args /* { 338 syscallarg(const struct timeval *) tv; 339 syscallarg(const struct timezone *) tzp; 340 } */ *uap = v; 341 struct timeval atv; 342 struct timezone atz; 343 int error; 344 345 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 346 return (error); 347 /* Verify all parameters before changing time. */ 348 if (SCARG(uap, tv) && (error = copyin(SCARG(uap, tv), 349 &atv, sizeof(atv)))) 350 return (error); 351 /* XXX since we don't use tz, probably no point in doing copyin. */ 352 if (SCARG(uap, tzp) && (error = copyin(SCARG(uap, tzp), 353 &atz, sizeof(atz)))) 354 return (error); 355 if (SCARG(uap, tv)) 356 if ((error = settime(&atv))) 357 return (error); 358 /* 359 * NetBSD has no kernel notion of time zone, and only an 360 * obsolete program would try to set it, so we log a warning. 361 */ 362 if (SCARG(uap, tzp)) 363 log(LOG_WARNING, "pid %d attempted to set the " 364 "(obsolete) kernel time zone\n", p->p_pid); 365 return (0); 366 } 367 368 int tickdelta; /* current clock skew, us. per tick */ 369 long timedelta; /* unapplied time correction, us. */ 370 long bigadj = 1000000; /* use 10x skew above bigadj us. */ 371 372 /* ARGSUSED */ 373 int 374 sys_adjtime(p, v, retval) 375 struct proc *p; 376 void *v; 377 register_t *retval; 378 { 379 struct sys_adjtime_args /* { 380 syscallarg(const struct timeval *) delta; 381 syscallarg(struct timeval *) olddelta; 382 } */ *uap = v; 383 struct timeval atv; 384 long ndelta, ntickdelta, odelta; 385 int s, error; 386 387 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 388 return (error); 389 390 error = copyin(SCARG(uap, delta), &atv, sizeof(struct timeval)); 391 if (error) 392 return (error); 393 if (SCARG(uap, olddelta) != NULL && 394 uvm_useracc((caddr_t)SCARG(uap, olddelta), sizeof(struct timeval), 395 B_WRITE) == FALSE) 396 return (EFAULT); 397 398 /* 399 * Compute the total correction and the rate at which to apply it. 400 * Round the adjustment down to a whole multiple of the per-tick 401 * delta, so that after some number of incremental changes in 402 * hardclock(), tickdelta will become zero, lest the correction 403 * overshoot and start taking us away from the desired final time. 404 */ 405 ndelta = atv.tv_sec * 1000000 + atv.tv_usec; 406 if (ndelta > bigadj || ndelta < -bigadj) 407 ntickdelta = 10 * tickadj; 408 else 409 ntickdelta = tickadj; 410 if (ndelta % ntickdelta) 411 ndelta = ndelta / ntickdelta * ntickdelta; 412 413 /* 414 * To make hardclock()'s job easier, make the per-tick delta negative 415 * if we want time to run slower; then hardclock can simply compute 416 * tick + tickdelta, and subtract tickdelta from timedelta. 417 */ 418 if (ndelta < 0) 419 ntickdelta = -ntickdelta; 420 s = splclock(); 421 odelta = timedelta; 422 timedelta = ndelta; 423 tickdelta = ntickdelta; 424 splx(s); 425 426 if (SCARG(uap, olddelta)) { 427 atv.tv_sec = odelta / 1000000; 428 atv.tv_usec = odelta % 1000000; 429 (void) copyout(&atv, SCARG(uap, olddelta), 430 sizeof(struct timeval)); 431 } 432 return (0); 433 } 434 435 /* 436 * Get value of an interval timer. The process virtual and 437 * profiling virtual time timers are kept in the p_stats area, since 438 * they can be swapped out. These are kept internally in the 439 * way they are specified externally: in time until they expire. 440 * 441 * The real time interval timer is kept in the process table slot 442 * for the process, and its value (it_value) is kept as an 443 * absolute time rather than as a delta, so that it is easy to keep 444 * periodic real-time signals from drifting. 445 * 446 * Virtual time timers are processed in the hardclock() routine of 447 * kern_clock.c. The real time timer is processed by a timeout 448 * routine, called from the softclock() routine. Since a callout 449 * may be delayed in real time due to interrupt processing in the system, 450 * it is possible for the real time timeout routine (realitexpire, given below), 451 * to be delayed in real time past when it is supposed to occur. It 452 * does not suffice, therefore, to reload the real timer .it_value from the 453 * real time timers .it_interval. Rather, we compute the next time in 454 * absolute time the timer should go off. 455 */ 456 /* ARGSUSED */ 457 int 458 sys_getitimer(p, v, retval) 459 struct proc *p; 460 void *v; 461 register_t *retval; 462 { 463 struct sys_getitimer_args /* { 464 syscallarg(int) which; 465 syscallarg(struct itimerval *) itv; 466 } */ *uap = v; 467 int which = SCARG(uap, which); 468 struct itimerval aitv; 469 int s; 470 471 if ((u_int)which > ITIMER_PROF) 472 return (EINVAL); 473 s = splclock(); 474 if (which == ITIMER_REAL) { 475 /* 476 * Convert from absolute to relative time in .it_value 477 * part of real time timer. If time for real time timer 478 * has passed return 0, else return difference between 479 * current time and time for the timer to go off. 480 */ 481 aitv = p->p_realtimer; 482 if (timerisset(&aitv.it_value)) { 483 if (timercmp(&aitv.it_value, &time, <)) 484 timerclear(&aitv.it_value); 485 else 486 timersub(&aitv.it_value, &time, &aitv.it_value); 487 } 488 } else 489 aitv = p->p_stats->p_timer[which]; 490 splx(s); 491 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 492 } 493 494 /* ARGSUSED */ 495 int 496 sys_setitimer(p, v, retval) 497 struct proc *p; 498 void *v; 499 register_t *retval; 500 { 501 struct sys_setitimer_args /* { 502 syscallarg(int) which; 503 syscallarg(const struct itimerval *) itv; 504 syscallarg(struct itimerval *) oitv; 505 } */ *uap = v; 506 int which = SCARG(uap, which); 507 struct sys_getitimer_args getargs; 508 struct itimerval aitv; 509 const struct itimerval *itvp; 510 int s, error; 511 512 if ((u_int)which > ITIMER_PROF) 513 return (EINVAL); 514 itvp = SCARG(uap, itv); 515 if (itvp && (error = copyin(itvp, &aitv, sizeof(struct itimerval)))) 516 return (error); 517 if (SCARG(uap, oitv) != NULL) { 518 SCARG(&getargs, which) = which; 519 SCARG(&getargs, itv) = SCARG(uap, oitv); 520 if ((error = sys_getitimer(p, &getargs, retval)) != 0) 521 return (error); 522 } 523 if (itvp == 0) 524 return (0); 525 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) 526 return (EINVAL); 527 s = splclock(); 528 if (which == ITIMER_REAL) { 529 callout_stop(&p->p_realit_ch); 530 if (timerisset(&aitv.it_value)) { 531 /* 532 * Don't need to check hzto() return value, here. 533 * callout_reset() does it for us. 534 */ 535 timeradd(&aitv.it_value, &time, &aitv.it_value); 536 callout_reset(&p->p_realit_ch, hzto(&aitv.it_value), 537 realitexpire, p); 538 } 539 p->p_realtimer = aitv; 540 } else 541 p->p_stats->p_timer[which] = aitv; 542 splx(s); 543 return (0); 544 } 545 546 /* 547 * Real interval timer expired: 548 * send process whose timer expired an alarm signal. 549 * If time is not set up to reload, then just return. 550 * Else compute next time timer should go off which is > current time. 551 * This is where delay in processing this timeout causes multiple 552 * SIGALRM calls to be compressed into one. 553 */ 554 void 555 realitexpire(arg) 556 void *arg; 557 { 558 struct proc *p; 559 int s; 560 561 p = (struct proc *)arg; 562 psignal(p, SIGALRM); 563 if (!timerisset(&p->p_realtimer.it_interval)) { 564 timerclear(&p->p_realtimer.it_value); 565 return; 566 } 567 for (;;) { 568 s = splclock(); 569 timeradd(&p->p_realtimer.it_value, 570 &p->p_realtimer.it_interval, &p->p_realtimer.it_value); 571 if (timercmp(&p->p_realtimer.it_value, &time, >)) { 572 /* 573 * Don't need to check hzto() return value, here. 574 * callout_reset() does it for us. 575 */ 576 callout_reset(&p->p_realit_ch, 577 hzto(&p->p_realtimer.it_value), realitexpire, p); 578 splx(s); 579 return; 580 } 581 splx(s); 582 } 583 } 584 585 /* 586 * Check that a proposed value to load into the .it_value or 587 * .it_interval part of an interval timer is acceptable, and 588 * fix it to have at least minimal value (i.e. if it is less 589 * than the resolution of the clock, round it up.) 590 */ 591 int 592 itimerfix(tv) 593 struct timeval *tv; 594 { 595 596 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 597 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 598 return (EINVAL); 599 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 600 tv->tv_usec = tick; 601 return (0); 602 } 603 604 /* 605 * Decrement an interval timer by a specified number 606 * of microseconds, which must be less than a second, 607 * i.e. < 1000000. If the timer expires, then reload 608 * it. In this case, carry over (usec - old value) to 609 * reduce the value reloaded into the timer so that 610 * the timer does not drift. This routine assumes 611 * that it is called in a context where the timers 612 * on which it is operating cannot change in value. 613 */ 614 int 615 itimerdecr(itp, usec) 616 struct itimerval *itp; 617 int usec; 618 { 619 620 if (itp->it_value.tv_usec < usec) { 621 if (itp->it_value.tv_sec == 0) { 622 /* expired, and already in next interval */ 623 usec -= itp->it_value.tv_usec; 624 goto expire; 625 } 626 itp->it_value.tv_usec += 1000000; 627 itp->it_value.tv_sec--; 628 } 629 itp->it_value.tv_usec -= usec; 630 usec = 0; 631 if (timerisset(&itp->it_value)) 632 return (1); 633 /* expired, exactly at end of interval */ 634 expire: 635 if (timerisset(&itp->it_interval)) { 636 itp->it_value = itp->it_interval; 637 itp->it_value.tv_usec -= usec; 638 if (itp->it_value.tv_usec < 0) { 639 itp->it_value.tv_usec += 1000000; 640 itp->it_value.tv_sec--; 641 } 642 } else 643 itp->it_value.tv_usec = 0; /* sec is already 0 */ 644 return (0); 645 } 646 647 /* 648 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9) 649 * for usage and rationale. 650 */ 651 int 652 ratecheck(lasttime, mininterval) 653 struct timeval *lasttime; 654 const struct timeval *mininterval; 655 { 656 struct timeval tv, delta; 657 int s, rv = 0; 658 659 s = splclock(); 660 tv = mono_time; 661 splx(s); 662 663 timersub(&tv, lasttime, &delta); 664 665 /* 666 * check for 0,0 is so that the message will be seen at least once, 667 * even if interval is huge. 668 */ 669 if (timercmp(&delta, mininterval, >=) || 670 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 671 *lasttime = tv; 672 rv = 1; 673 } 674 675 return (rv); 676 } 677 678 /* 679 * ppsratecheck(): packets (or events) per second limitation. 680 */ 681 int 682 ppsratecheck(lasttime, curpps, maxpps) 683 struct timeval *lasttime; 684 int *curpps; 685 int maxpps; /* maximum pps allowed */ 686 { 687 struct timeval tv, delta; 688 int s, rv; 689 690 s = splclock(); 691 tv = mono_time; 692 splx(s); 693 694 timersub(&tv, lasttime, &delta); 695 696 /* 697 * check for 0,0 is so that the message will be seen at least once. 698 * if more than one second have passed since the last update of 699 * lasttime, reset the counter. 700 * 701 * we do increment *curpps even in *curpps < maxpps case, as some may 702 * try to use *curpps for stat purposes as well. 703 */ 704 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 705 delta.tv_sec >= 1) { 706 *lasttime = tv; 707 *curpps = 0; 708 rv = 1; 709 } else if (maxpps < 0) 710 rv = 1; 711 else if (*curpps < maxpps) 712 rv = 1; 713 else 714 rv = 0; 715 716 #if 1 /*DIAGNOSTIC?*/ 717 /* be careful about wrap-around */ 718 if (*curpps + 1 > *curpps) 719 *curpps = *curpps + 1; 720 #else 721 /* 722 * assume that there's not too many calls to this function. 723 * not sure if the assumption holds, as it depends on *caller's* 724 * behavior, not the behavior of this function. 725 * IMHO it is wrong to make assumption on the caller's behavior, 726 * so the above #if is #if 1, not #ifdef DIAGNOSTIC. 727 */ 728 *curpps = *curpps + 1; 729 #endif 730 731 return (rv); 732 } 733