1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 35 * $DragonFly: src/sys/kern/kern_time.c,v 1.11 2003/11/20 06:05:30 dillon Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/buf.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/sysunion.h> 48 #include <sys/proc.h> 49 #include <sys/time.h> 50 #include <sys/vnode.h> 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 #include <sys/msgport2.h> 54 55 struct timezone tz; 56 57 /* 58 * Time of day and interval timer support. 59 * 60 * These routines provide the kernel entry points to get and set 61 * the time-of-day and per-process interval timers. Subroutines 62 * here provide support for adding and subtracting timeval structures 63 * and decrementing interval timers, optionally reloading the interval 64 * timers when they expire. 65 */ 66 67 static int nanosleep1 (struct timespec *rqt, 68 struct timespec *rmt); 69 static int settime (struct timeval *); 70 static void timevalfix (struct timeval *); 71 static void no_lease_updatetime (int); 72 73 static void 74 no_lease_updatetime(deltat) 75 int deltat; 76 { 77 } 78 79 void (*lease_updatetime) (int) = no_lease_updatetime; 80 81 static int 82 settime(tv) 83 struct timeval *tv; 84 { 85 struct timeval delta, tv1, tv2; 86 static struct timeval maxtime, laststep; 87 struct timespec ts; 88 int s; 89 90 s = splclock(); 91 microtime(&tv1); 92 delta = *tv; 93 timevalsub(&delta, &tv1); 94 95 /* 96 * If the system is secure, we do not allow the time to be 97 * set to a value earlier than 1 second less than the highest 98 * time we have yet seen. The worst a miscreant can do in 99 * this circumstance is "freeze" time. He couldn't go 100 * back to the past. 101 * 102 * We similarly do not allow the clock to be stepped more 103 * than one second, nor more than once per second. This allows 104 * a miscreant to make the clock march double-time, but no worse. 105 */ 106 if (securelevel > 1) { 107 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 108 /* 109 * Update maxtime to latest time we've seen. 110 */ 111 if (tv1.tv_sec > maxtime.tv_sec) 112 maxtime = tv1; 113 tv2 = *tv; 114 timevalsub(&tv2, &maxtime); 115 if (tv2.tv_sec < -1) { 116 tv->tv_sec = maxtime.tv_sec - 1; 117 printf("Time adjustment clamped to -1 second\n"); 118 } 119 } else { 120 if (tv1.tv_sec == laststep.tv_sec) { 121 splx(s); 122 return (EPERM); 123 } 124 if (delta.tv_sec > 1) { 125 tv->tv_sec = tv1.tv_sec + 1; 126 printf("Time adjustment clamped to +1 second\n"); 127 } 128 laststep = *tv; 129 } 130 } 131 132 ts.tv_sec = tv->tv_sec; 133 ts.tv_nsec = tv->tv_usec * 1000; 134 set_timecounter(&ts); 135 (void) splsoftclock(); 136 lease_updatetime(delta.tv_sec); 137 splx(s); 138 resettodr(); 139 return (0); 140 } 141 142 /* ARGSUSED */ 143 int 144 clock_gettime(struct clock_gettime_args *uap) 145 { 146 struct timespec ats; 147 148 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 149 return (EINVAL); 150 nanotime(&ats); 151 return (copyout(&ats, SCARG(uap, tp), sizeof(ats))); 152 } 153 154 /* ARGSUSED */ 155 int 156 clock_settime(struct clock_settime_args *uap) 157 { 158 struct thread *td = curthread; 159 struct timeval atv; 160 struct timespec ats; 161 int error; 162 163 if ((error = suser(td)) != 0) 164 return (error); 165 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 166 return (EINVAL); 167 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 168 return (error); 169 if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000) 170 return (EINVAL); 171 /* XXX Don't convert nsec->usec and back */ 172 TIMESPEC_TO_TIMEVAL(&atv, &ats); 173 if ((error = settime(&atv))) 174 return (error); 175 return (0); 176 } 177 178 int 179 clock_getres(struct clock_getres_args *uap) 180 { 181 struct timespec ts; 182 int error; 183 184 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 185 return (EINVAL); 186 error = 0; 187 if (SCARG(uap, tp)) { 188 ts.tv_sec = 0; 189 /* 190 * Round up the result of the division cheaply by adding 1. 191 * Rounding up is especially important if rounding down 192 * would give 0. Perfect rounding is unimportant. 193 */ 194 ts.tv_nsec = 1000000000 / timecounter->tc_frequency + 1; 195 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 196 } 197 return (error); 198 } 199 200 static int nanowait; 201 202 static int 203 nanosleep1(struct timespec *rqt, struct timespec *rmt) 204 { 205 struct timespec ts, ts2, ts3; 206 struct timeval tv; 207 int error; 208 209 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 210 return (EINVAL); 211 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 212 return (0); 213 getnanouptime(&ts); 214 timespecadd(&ts, rqt); 215 TIMESPEC_TO_TIMEVAL(&tv, rqt); 216 for (;;) { 217 error = tsleep(&nanowait, PCATCH, "nanslp", 218 tvtohz(&tv)); 219 getnanouptime(&ts2); 220 if (error != EWOULDBLOCK) { 221 if (error == ERESTART) 222 error = EINTR; 223 if (rmt != NULL) { 224 timespecsub(&ts, &ts2); 225 if (ts.tv_sec < 0) 226 timespecclear(&ts); 227 *rmt = ts; 228 } 229 return (error); 230 } 231 if (timespeccmp(&ts2, &ts, >=)) 232 return (0); 233 ts3 = ts; 234 timespecsub(&ts3, &ts2); 235 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 236 } 237 } 238 239 static void nanosleep_done(void *arg); 240 static void nanosleep_copyout(union sysunion *sysun); 241 242 /* ARGSUSED */ 243 int 244 nanosleep(struct nanosleep_args *uap) 245 { 246 int error; 247 struct sysmsg_sleep *smsleep = &uap->sysmsg.sm.sleep; 248 249 error = copyin(uap->rqtp, &smsleep->rqt, sizeof(smsleep->rqt)); 250 if (error) 251 return (error); 252 /* 253 * YYY clean this up to always use the callout, note that an abort 254 * implementation should record the residual in the async case. 255 */ 256 if (uap->sysmsg.lmsg.ms_flags & MSGF_ASYNC) { 257 quad_t ticks; 258 259 ticks = (quad_t)smsleep->rqt.tv_nsec * hz / 1000000000LL; 260 if (smsleep->rqt.tv_sec) 261 ticks += (quad_t)smsleep->rqt.tv_sec * hz; 262 if (ticks <= 0) { 263 if (ticks == 0) 264 error = 0; 265 else 266 error = EINVAL; 267 } else { 268 uap->sysmsg.copyout = nanosleep_copyout; 269 callout_init(&smsleep->timer); 270 callout_reset(&smsleep->timer, ticks, nanosleep_done, uap); 271 error = EASYNC; 272 } 273 } else { 274 /* 275 * Old synchronous sleep code, copyout the residual if 276 * nanosleep was interrupted. 277 */ 278 error = nanosleep1(&smsleep->rqt, &smsleep->rmt); 279 if (error && SCARG(uap, rmtp)) 280 error = copyout(&smsleep->rmt, SCARG(uap, rmtp), sizeof(smsleep->rmt)); 281 } 282 return (error); 283 } 284 285 /* 286 * Asynch completion for the nanosleep() syscall. This function may be 287 * called from any context and cannot legally access the originating 288 * thread, proc, or its user space. 289 * 290 * YYY change the callout interface API so we can simply assign the replymsg 291 * function to it directly. 292 */ 293 static void 294 nanosleep_done(void *arg) 295 { 296 struct nanosleep_args *uap = arg; 297 298 lwkt_replymsg(&uap->sysmsg.lmsg, 0); 299 } 300 301 /* 302 * Asynch return for the nanosleep() syscall, called in the context of the 303 * originating thread when it pulls the message off the reply port. This 304 * function is responsible for any copyouts to userland. Kernel threads 305 * which do their own internal system calls will not usually call the return 306 * function. 307 */ 308 static void 309 nanosleep_copyout(union sysunion *sysun) 310 { 311 struct nanosleep_args *uap = &sysun->nanosleep; 312 struct sysmsg_sleep *smsleep = &uap->sysmsg.sm.sleep; 313 314 if (sysun->lmsg.ms_error && uap->rmtp) { 315 sysun->lmsg.ms_error = 316 copyout(&smsleep->rmt, uap->rmtp, sizeof(smsleep->rmt)); 317 } 318 } 319 320 /* ARGSUSED */ 321 int 322 gettimeofday(struct gettimeofday_args *uap) 323 { 324 struct timeval atv; 325 int error = 0; 326 327 if (uap->tp) { 328 microtime(&atv); 329 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 330 sizeof (atv)))) 331 return (error); 332 } 333 if (uap->tzp) 334 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 335 sizeof (tz)); 336 return (error); 337 } 338 339 /* ARGSUSED */ 340 int 341 settimeofday(struct settimeofday_args *uap) 342 { 343 struct thread *td = curthread; 344 struct timeval atv; 345 struct timezone atz; 346 int error; 347 348 if ((error = suser(td))) 349 return (error); 350 /* Verify all parameters before changing time. */ 351 if (uap->tv) { 352 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 353 sizeof(atv)))) 354 return (error); 355 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 356 return (EINVAL); 357 } 358 if (uap->tzp && 359 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 360 return (error); 361 if (uap->tv && (error = settime(&atv))) 362 return (error); 363 if (uap->tzp) 364 tz = atz; 365 return (0); 366 } 367 368 int tickdelta; /* current clock skew, us. per tick */ 369 long timedelta; /* unapplied time correction, us. */ 370 static long bigadj = 1000000; /* use 10x skew above bigadj us. */ 371 372 /* ARGSUSED */ 373 int 374 adjtime(struct adjtime_args *uap) 375 { 376 struct thread *td = curthread; 377 struct timeval atv; 378 long ndelta, ntickdelta, odelta; 379 int s, error; 380 381 if ((error = suser(td))) 382 return (error); 383 if ((error = 384 copyin((caddr_t)uap->delta, (caddr_t)&atv, sizeof(struct timeval)))) 385 return (error); 386 387 /* 388 * Compute the total correction and the rate at which to apply it. 389 * Round the adjustment down to a whole multiple of the per-tick 390 * delta, so that after some number of incremental changes in 391 * hardclock(), tickdelta will become zero, lest the correction 392 * overshoot and start taking us away from the desired final time. 393 */ 394 ndelta = atv.tv_sec * 1000000 + atv.tv_usec; 395 if (ndelta > bigadj || ndelta < -bigadj) 396 ntickdelta = 10 * tickadj; 397 else 398 ntickdelta = tickadj; 399 if (ndelta % ntickdelta) 400 ndelta = ndelta / ntickdelta * ntickdelta; 401 402 /* 403 * To make hardclock()'s job easier, make the per-tick delta negative 404 * if we want time to run slower; then hardclock can simply compute 405 * tick + tickdelta, and subtract tickdelta from timedelta. 406 */ 407 if (ndelta < 0) 408 ntickdelta = -ntickdelta; 409 s = splclock(); 410 odelta = timedelta; 411 timedelta = ndelta; 412 tickdelta = ntickdelta; 413 splx(s); 414 415 if (uap->olddelta) { 416 atv.tv_sec = odelta / 1000000; 417 atv.tv_usec = odelta % 1000000; 418 (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta, 419 sizeof(struct timeval)); 420 } 421 return (0); 422 } 423 424 /* 425 * Get value of an interval timer. The process virtual and 426 * profiling virtual time timers are kept in the p_stats area, since 427 * they can be swapped out. These are kept internally in the 428 * way they are specified externally: in time until they expire. 429 * 430 * The real time interval timer is kept in the process table slot 431 * for the process, and its value (it_value) is kept as an 432 * absolute time rather than as a delta, so that it is easy to keep 433 * periodic real-time signals from drifting. 434 * 435 * Virtual time timers are processed in the hardclock() routine of 436 * kern_clock.c. The real time timer is processed by a timeout 437 * routine, called from the softclock() routine. Since a callout 438 * may be delayed in real time due to interrupt processing in the system, 439 * it is possible for the real time timeout routine (realitexpire, given below), 440 * to be delayed in real time past when it is supposed to occur. It 441 * does not suffice, therefore, to reload the real timer .it_value from the 442 * real time timers .it_interval. Rather, we compute the next time in 443 * absolute time the timer should go off. 444 */ 445 /* ARGSUSED */ 446 int 447 getitimer(struct getitimer_args *uap) 448 { 449 struct proc *p = curproc; 450 struct timeval ctv; 451 struct itimerval aitv; 452 int s; 453 454 if (uap->which > ITIMER_PROF) 455 return (EINVAL); 456 s = splclock(); /* XXX still needed ? */ 457 if (uap->which == ITIMER_REAL) { 458 /* 459 * Convert from absolute to relative time in .it_value 460 * part of real time timer. If time for real time timer 461 * has passed return 0, else return difference between 462 * current time and time for the timer to go off. 463 */ 464 aitv = p->p_realtimer; 465 if (timevalisset(&aitv.it_value)) { 466 getmicrouptime(&ctv); 467 if (timevalcmp(&aitv.it_value, &ctv, <)) 468 timevalclear(&aitv.it_value); 469 else 470 timevalsub(&aitv.it_value, &ctv); 471 } 472 } else 473 aitv = p->p_stats->p_timer[uap->which]; 474 splx(s); 475 return (copyout((caddr_t)&aitv, (caddr_t)uap->itv, 476 sizeof (struct itimerval))); 477 } 478 479 /* ARGSUSED */ 480 int 481 setitimer(struct setitimer_args *uap) 482 { 483 struct itimerval aitv; 484 struct timeval ctv; 485 struct itimerval *itvp; 486 struct proc *p = curproc; 487 int s, error; 488 489 if (uap->which > ITIMER_PROF) 490 return (EINVAL); 491 itvp = uap->itv; 492 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 493 sizeof(struct itimerval)))) 494 return (error); 495 if ((uap->itv = uap->oitv) && 496 (error = getitimer((struct getitimer_args *)uap))) 497 return (error); 498 if (itvp == 0) 499 return (0); 500 if (itimerfix(&aitv.it_value)) 501 return (EINVAL); 502 if (!timevalisset(&aitv.it_value)) 503 timevalclear(&aitv.it_interval); 504 else if (itimerfix(&aitv.it_interval)) 505 return (EINVAL); 506 s = splclock(); /* XXX: still needed ? */ 507 if (uap->which == ITIMER_REAL) { 508 if (timevalisset(&p->p_realtimer.it_value)) 509 untimeout(realitexpire, (caddr_t)p, p->p_ithandle); 510 if (timevalisset(&aitv.it_value)) 511 p->p_ithandle = timeout(realitexpire, (caddr_t)p, 512 tvtohz(&aitv.it_value)); 513 getmicrouptime(&ctv); 514 timevaladd(&aitv.it_value, &ctv); 515 p->p_realtimer = aitv; 516 } else 517 p->p_stats->p_timer[uap->which] = aitv; 518 splx(s); 519 return (0); 520 } 521 522 /* 523 * Real interval timer expired: 524 * send process whose timer expired an alarm signal. 525 * If time is not set up to reload, then just return. 526 * Else compute next time timer should go off which is > current time. 527 * This is where delay in processing this timeout causes multiple 528 * SIGALRM calls to be compressed into one. 529 * tvtohz() always adds 1 to allow for the time until the next clock 530 * interrupt being strictly less than 1 clock tick, but we don't want 531 * that here since we want to appear to be in sync with the clock 532 * interrupt even when we're delayed. 533 */ 534 void 535 realitexpire(arg) 536 void *arg; 537 { 538 struct proc *p; 539 struct timeval ctv, ntv; 540 int s; 541 542 p = (struct proc *)arg; 543 psignal(p, SIGALRM); 544 if (!timevalisset(&p->p_realtimer.it_interval)) { 545 timevalclear(&p->p_realtimer.it_value); 546 return; 547 } 548 for (;;) { 549 s = splclock(); /* XXX: still neeeded ? */ 550 timevaladd(&p->p_realtimer.it_value, 551 &p->p_realtimer.it_interval); 552 getmicrouptime(&ctv); 553 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 554 ntv = p->p_realtimer.it_value; 555 timevalsub(&ntv, &ctv); 556 p->p_ithandle = timeout(realitexpire, (caddr_t)p, 557 tvtohz(&ntv) - 1); 558 splx(s); 559 return; 560 } 561 splx(s); 562 } 563 } 564 565 /* 566 * Check that a proposed value to load into the .it_value or 567 * .it_interval part of an interval timer is acceptable, and 568 * fix it to have at least minimal value (i.e. if it is less 569 * than the resolution of the clock, round it up.) 570 */ 571 int 572 itimerfix(tv) 573 struct timeval *tv; 574 { 575 576 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 577 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 578 return (EINVAL); 579 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 580 tv->tv_usec = tick; 581 return (0); 582 } 583 584 /* 585 * Decrement an interval timer by a specified number 586 * of microseconds, which must be less than a second, 587 * i.e. < 1000000. If the timer expires, then reload 588 * it. In this case, carry over (usec - old value) to 589 * reduce the value reloaded into the timer so that 590 * the timer does not drift. This routine assumes 591 * that it is called in a context where the timers 592 * on which it is operating cannot change in value. 593 */ 594 int 595 itimerdecr(itp, usec) 596 struct itimerval *itp; 597 int usec; 598 { 599 600 if (itp->it_value.tv_usec < usec) { 601 if (itp->it_value.tv_sec == 0) { 602 /* expired, and already in next interval */ 603 usec -= itp->it_value.tv_usec; 604 goto expire; 605 } 606 itp->it_value.tv_usec += 1000000; 607 itp->it_value.tv_sec--; 608 } 609 itp->it_value.tv_usec -= usec; 610 usec = 0; 611 if (timevalisset(&itp->it_value)) 612 return (1); 613 /* expired, exactly at end of interval */ 614 expire: 615 if (timevalisset(&itp->it_interval)) { 616 itp->it_value = itp->it_interval; 617 itp->it_value.tv_usec -= usec; 618 if (itp->it_value.tv_usec < 0) { 619 itp->it_value.tv_usec += 1000000; 620 itp->it_value.tv_sec--; 621 } 622 } else 623 itp->it_value.tv_usec = 0; /* sec is already 0 */ 624 return (0); 625 } 626 627 /* 628 * Add and subtract routines for timevals. 629 * N.B.: subtract routine doesn't deal with 630 * results which are before the beginning, 631 * it just gets very confused in this case. 632 * Caveat emptor. 633 */ 634 void 635 timevaladd(t1, t2) 636 struct timeval *t1, *t2; 637 { 638 639 t1->tv_sec += t2->tv_sec; 640 t1->tv_usec += t2->tv_usec; 641 timevalfix(t1); 642 } 643 644 void 645 timevalsub(t1, t2) 646 struct timeval *t1, *t2; 647 { 648 649 t1->tv_sec -= t2->tv_sec; 650 t1->tv_usec -= t2->tv_usec; 651 timevalfix(t1); 652 } 653 654 static void 655 timevalfix(t1) 656 struct timeval *t1; 657 { 658 659 if (t1->tv_usec < 0) { 660 t1->tv_sec--; 661 t1->tv_usec += 1000000; 662 } 663 if (t1->tv_usec >= 1000000) { 664 t1->tv_sec++; 665 t1->tv_usec -= 1000000; 666 } 667 } 668 669 /* 670 * ratecheck(): simple time-based rate-limit checking. 671 */ 672 int 673 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 674 { 675 struct timeval tv, delta; 676 int rv = 0; 677 678 getmicrouptime(&tv); /* NB: 10ms precision */ 679 delta = tv; 680 timevalsub(&delta, lasttime); 681 682 /* 683 * check for 0,0 is so that the message will be seen at least once, 684 * even if interval is huge. 685 */ 686 if (timevalcmp(&delta, mininterval, >=) || 687 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 688 *lasttime = tv; 689 rv = 1; 690 } 691 692 return (rv); 693 } 694 695 /* 696 * ppsratecheck(): packets (or events) per second limitation. 697 * 698 * Return 0 if the limit is to be enforced (e.g. the caller 699 * should drop a packet because of the rate limitation). 700 * 701 * maxpps of 0 always causes zero to be returned. maxpps of -1 702 * always causes 1 to be returned; this effectively defeats rate 703 * limiting. 704 * 705 * Note that we maintain the struct timeval for compatibility 706 * with other bsd systems. We reuse the storage and just monitor 707 * clock ticks for minimal overhead. 708 */ 709 int 710 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 711 { 712 int now; 713 714 /* 715 * Reset the last time and counter if this is the first call 716 * or more than a second has passed since the last update of 717 * lasttime. 718 */ 719 now = ticks; 720 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 721 lasttime->tv_sec = now; 722 *curpps = 1; 723 return (maxpps != 0); 724 } else { 725 (*curpps)++; /* NB: ignore potential overflow */ 726 return (maxpps < 0 || *curpps < maxpps); 727 } 728 } 729 730