1 /* $NetBSD: kern_time.c,v 1.60 2001/12/09 16:10:43 manu Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1982, 1986, 1989, 1993 41 * The Regents of the University of California. All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 72 */ 73 74 #include <sys/cdefs.h> 75 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.60 2001/12/09 16:10:43 manu Exp $"); 76 77 #include "fs_nfs.h" 78 #include "opt_nfs.h" 79 #include "opt_nfsserver.h" 80 81 #include <sys/param.h> 82 #include <sys/resourcevar.h> 83 #include <sys/kernel.h> 84 #include <sys/systm.h> 85 #include <sys/proc.h> 86 #include <sys/vnode.h> 87 #include <sys/signalvar.h> 88 #include <sys/syslog.h> 89 90 #include <sys/mount.h> 91 #include <sys/syscallargs.h> 92 93 #include <uvm/uvm_extern.h> 94 95 #if defined(NFS) || defined(NFSSERVER) 96 #include <nfs/rpcv2.h> 97 #include <nfs/nfsproto.h> 98 #include <nfs/nfs_var.h> 99 #endif 100 101 #include <machine/cpu.h> 102 103 /* 104 * Time of day and interval timer support. 105 * 106 * These routines provide the kernel entry points to get and set 107 * the time-of-day and per-process interval timers. Subroutines 108 * here provide support for adding and subtracting timeval structures 109 * and decrementing interval timers, optionally reloading the interval 110 * timers when they expire. 111 */ 112 113 /* This function is used by clock_settime and settimeofday */ 114 int 115 settime(tv) 116 struct timeval *tv; 117 { 118 struct timeval delta; 119 struct cpu_info *ci; 120 int s; 121 122 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 123 s = splclock(); 124 timersub(tv, &time, &delta); 125 if ((delta.tv_sec < 0 || delta.tv_usec < 0) && securelevel > 1) { 126 splx(s); 127 return (EPERM); 128 } 129 #ifdef notyet 130 if ((delta.tv_sec < 86400) && securelevel > 0) { 131 splx(s); 132 return (EPERM); 133 } 134 #endif 135 time = *tv; 136 (void) spllowersoftclock(); 137 timeradd(&boottime, &delta, &boottime); 138 /* 139 * XXXSMP 140 * This is wrong. We should traverse a list of all 141 * CPUs and add the delta to the runtime of those 142 * CPUs which have a process on them. 143 */ 144 ci = curcpu(); 145 timeradd(&ci->ci_schedstate.spc_runtime, &delta, 146 &ci->ci_schedstate.spc_runtime); 147 # if (defined(NFS) && !defined (NFS_V2_ONLY)) || defined(NFSSERVER) 148 nqnfs_lease_updatetime(delta.tv_sec); 149 # endif 150 splx(s); 151 resettodr(); 152 return (0); 153 } 154 155 /* ARGSUSED */ 156 int 157 sys_clock_gettime(p, v, retval) 158 struct proc *p; 159 void *v; 160 register_t *retval; 161 { 162 struct sys_clock_gettime_args /* { 163 syscallarg(clockid_t) clock_id; 164 syscallarg(struct timespec *) tp; 165 } */ *uap = v; 166 clockid_t clock_id; 167 struct timeval atv; 168 struct timespec ats; 169 170 clock_id = SCARG(uap, clock_id); 171 if (clock_id != CLOCK_REALTIME) 172 return (EINVAL); 173 174 microtime(&atv); 175 TIMEVAL_TO_TIMESPEC(&atv,&ats); 176 177 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 178 } 179 180 /* ARGSUSED */ 181 int 182 sys_clock_settime(p, v, retval) 183 struct proc *p; 184 void *v; 185 register_t *retval; 186 { 187 struct sys_clock_settime_args /* { 188 syscallarg(clockid_t) clock_id; 189 syscallarg(const struct timespec *) tp; 190 } */ *uap = v; 191 int error; 192 193 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 194 return (error); 195 196 return (clock_settime1(SCARG(uap, clock_id), SCARG(uap, tp))); 197 } 198 199 200 int 201 clock_settime1(clock_id, tp) 202 clockid_t clock_id; 203 const struct timespec *tp; 204 { 205 struct timespec ats; 206 struct timeval atv; 207 int error; 208 209 if ((error = copyin(tp, &ats, sizeof(ats))) != 0) 210 return (error); 211 212 if (clock_id != CLOCK_REALTIME) 213 return (EINVAL); 214 215 TIMESPEC_TO_TIMEVAL(&atv, &ats); 216 if ((error = settime(&atv)) != 0) 217 return (error); 218 219 return 0; 220 } 221 222 int 223 sys_clock_getres(p, v, retval) 224 struct proc *p; 225 void *v; 226 register_t *retval; 227 { 228 struct sys_clock_getres_args /* { 229 syscallarg(clockid_t) clock_id; 230 syscallarg(struct timespec *) tp; 231 } */ *uap = v; 232 clockid_t clock_id; 233 struct timespec ts; 234 int error = 0; 235 236 clock_id = SCARG(uap, clock_id); 237 if (clock_id != CLOCK_REALTIME) 238 return (EINVAL); 239 240 if (SCARG(uap, tp)) { 241 ts.tv_sec = 0; 242 ts.tv_nsec = 1000000000 / hz; 243 244 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 245 } 246 247 return error; 248 } 249 250 /* ARGSUSED */ 251 int 252 sys_nanosleep(p, v, retval) 253 struct proc *p; 254 void *v; 255 register_t *retval; 256 { 257 static int nanowait; 258 struct sys_nanosleep_args/* { 259 syscallarg(struct timespec *) rqtp; 260 syscallarg(struct timespec *) rmtp; 261 } */ *uap = v; 262 struct timespec rqt; 263 struct timespec rmt; 264 struct timeval atv, utv; 265 int error, s, timo; 266 267 error = copyin((caddr_t)SCARG(uap, rqtp), (caddr_t)&rqt, 268 sizeof(struct timespec)); 269 if (error) 270 return (error); 271 272 TIMESPEC_TO_TIMEVAL(&atv,&rqt) 273 if (itimerfix(&atv) || atv.tv_sec > 1000000000) 274 return (EINVAL); 275 276 s = splclock(); 277 timeradd(&atv,&time,&atv); 278 timo = hzto(&atv); 279 /* 280 * Avoid inadvertantly sleeping forever 281 */ 282 if (timo == 0) 283 timo = 1; 284 splx(s); 285 286 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo); 287 if (error == ERESTART) 288 error = EINTR; 289 if (error == EWOULDBLOCK) 290 error = 0; 291 292 if (SCARG(uap, rmtp)) { 293 int error; 294 295 s = splclock(); 296 utv = time; 297 splx(s); 298 299 timersub(&atv, &utv, &utv); 300 if (utv.tv_sec < 0) 301 timerclear(&utv); 302 303 TIMEVAL_TO_TIMESPEC(&utv,&rmt); 304 error = copyout((caddr_t)&rmt, (caddr_t)SCARG(uap,rmtp), 305 sizeof(rmt)); 306 if (error) 307 return (error); 308 } 309 310 return error; 311 } 312 313 /* ARGSUSED */ 314 int 315 sys_gettimeofday(p, v, retval) 316 struct proc *p; 317 void *v; 318 register_t *retval; 319 { 320 struct sys_gettimeofday_args /* { 321 syscallarg(struct timeval *) tp; 322 syscallarg(struct timezone *) tzp; 323 } */ *uap = v; 324 struct timeval atv; 325 int error = 0; 326 struct timezone tzfake; 327 328 if (SCARG(uap, tp)) { 329 microtime(&atv); 330 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 331 if (error) 332 return (error); 333 } 334 if (SCARG(uap, tzp)) { 335 /* 336 * NetBSD has no kernel notion of time zone, so we just 337 * fake up a timezone struct and return it if demanded. 338 */ 339 tzfake.tz_minuteswest = 0; 340 tzfake.tz_dsttime = 0; 341 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 342 } 343 return (error); 344 } 345 346 /* ARGSUSED */ 347 int 348 sys_settimeofday(p, v, retval) 349 struct proc *p; 350 void *v; 351 register_t *retval; 352 { 353 struct sys_settimeofday_args /* { 354 syscallarg(const struct timeval *) tv; 355 syscallarg(const struct timezone *) tzp; 356 } */ *uap = v; 357 int error; 358 359 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 360 return (error); 361 362 return settimeofday1(SCARG(uap, tv), SCARG(uap, tzp), p); 363 } 364 365 int 366 settimeofday1(utv, utzp, p) 367 const struct timeval *utv; 368 const struct timezone *utzp; 369 struct proc *p; 370 { 371 struct timeval atv; 372 struct timezone atz; 373 struct timeval *tv = NULL; 374 struct timezone *tzp = NULL; 375 int error; 376 377 /* Verify all parameters before changing time. */ 378 if (utv) { 379 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 380 return (error); 381 tv = &atv; 382 } 383 /* XXX since we don't use tz, probably no point in doing copyin. */ 384 if (utzp) { 385 if ((error = copyin(utzp, &atz, sizeof(atz))) != 0) 386 return (error); 387 tzp = &atz; 388 } 389 390 if (tv) 391 if ((error = settime(tv)) != 0) 392 return (error); 393 /* 394 * NetBSD has no kernel notion of time zone, and only an 395 * obsolete program would try to set it, so we log a warning. 396 */ 397 if (tzp) 398 log(LOG_WARNING, "pid %d attempted to set the " 399 "(obsolete) kernel time zone\n", p->p_pid); 400 return (0); 401 } 402 403 int tickdelta; /* current clock skew, us. per tick */ 404 long timedelta; /* unapplied time correction, us. */ 405 long bigadj = 1000000; /* use 10x skew above bigadj us. */ 406 407 /* ARGSUSED */ 408 int 409 sys_adjtime(p, v, retval) 410 struct proc *p; 411 void *v; 412 register_t *retval; 413 { 414 struct sys_adjtime_args /* { 415 syscallarg(const struct timeval *) delta; 416 syscallarg(struct timeval *) olddelta; 417 } */ *uap = v; 418 int error; 419 420 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 421 return (error); 422 423 return adjtime1(SCARG(uap, delta), SCARG(uap, olddelta), p); 424 } 425 426 int 427 adjtime1(delta, olddelta, p) 428 const struct timeval *delta; 429 struct timeval *olddelta; 430 struct proc *p; 431 { 432 struct timeval atv; 433 struct timeval *oatv = NULL; 434 long ndelta, ntickdelta, odelta; 435 int error; 436 int s; 437 438 error = copyin(delta, &atv, sizeof(struct timeval)); 439 if (error) 440 return (error); 441 442 if (olddelta != NULL) { 443 if (uvm_useracc((caddr_t)olddelta, 444 sizeof(struct timeval), B_WRITE) == FALSE) 445 return (EFAULT); 446 oatv = olddelta; 447 } 448 449 /* 450 * Compute the total correction and the rate at which to apply it. 451 * Round the adjustment down to a whole multiple of the per-tick 452 * delta, so that after some number of incremental changes in 453 * hardclock(), tickdelta will become zero, lest the correction 454 * overshoot and start taking us away from the desired final time. 455 */ 456 ndelta = atv.tv_sec * 1000000 + atv.tv_usec; 457 if (ndelta > bigadj || ndelta < -bigadj) 458 ntickdelta = 10 * tickadj; 459 else 460 ntickdelta = tickadj; 461 if (ndelta % ntickdelta) 462 ndelta = ndelta / ntickdelta * ntickdelta; 463 464 /* 465 * To make hardclock()'s job easier, make the per-tick delta negative 466 * if we want time to run slower; then hardclock can simply compute 467 * tick + tickdelta, and subtract tickdelta from timedelta. 468 */ 469 if (ndelta < 0) 470 ntickdelta = -ntickdelta; 471 s = splclock(); 472 odelta = timedelta; 473 timedelta = ndelta; 474 tickdelta = ntickdelta; 475 splx(s); 476 477 if (olddelta) { 478 atv.tv_sec = odelta / 1000000; 479 atv.tv_usec = odelta % 1000000; 480 (void) copyout(&atv, olddelta, sizeof(struct timeval)); 481 } 482 return (0); 483 } 484 485 /* 486 * Get value of an interval timer. The process virtual and 487 * profiling virtual time timers are kept in the p_stats area, since 488 * they can be swapped out. These are kept internally in the 489 * way they are specified externally: in time until they expire. 490 * 491 * The real time interval timer is kept in the process table slot 492 * for the process, and its value (it_value) is kept as an 493 * absolute time rather than as a delta, so that it is easy to keep 494 * periodic real-time signals from drifting. 495 * 496 * Virtual time timers are processed in the hardclock() routine of 497 * kern_clock.c. The real time timer is processed by a timeout 498 * routine, called from the softclock() routine. Since a callout 499 * may be delayed in real time due to interrupt processing in the system, 500 * it is possible for the real time timeout routine (realitexpire, given below), 501 * to be delayed in real time past when it is supposed to occur. It 502 * does not suffice, therefore, to reload the real timer .it_value from the 503 * real time timers .it_interval. Rather, we compute the next time in 504 * absolute time the timer should go off. 505 */ 506 /* ARGSUSED */ 507 int 508 sys_getitimer(p, v, retval) 509 struct proc *p; 510 void *v; 511 register_t *retval; 512 { 513 struct sys_getitimer_args /* { 514 syscallarg(int) which; 515 syscallarg(struct itimerval *) itv; 516 } */ *uap = v; 517 int which = SCARG(uap, which); 518 struct itimerval aitv; 519 int s; 520 521 if ((u_int)which > ITIMER_PROF) 522 return (EINVAL); 523 s = splclock(); 524 if (which == ITIMER_REAL) { 525 /* 526 * Convert from absolute to relative time in .it_value 527 * part of real time timer. If time for real time timer 528 * has passed return 0, else return difference between 529 * current time and time for the timer to go off. 530 */ 531 aitv = p->p_realtimer; 532 if (timerisset(&aitv.it_value)) { 533 if (timercmp(&aitv.it_value, &time, <)) 534 timerclear(&aitv.it_value); 535 else 536 timersub(&aitv.it_value, &time, &aitv.it_value); 537 } 538 } else 539 aitv = p->p_stats->p_timer[which]; 540 splx(s); 541 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 542 } 543 544 /* ARGSUSED */ 545 int 546 sys_setitimer(p, v, retval) 547 struct proc *p; 548 void *v; 549 register_t *retval; 550 { 551 struct sys_setitimer_args /* { 552 syscallarg(int) which; 553 syscallarg(const struct itimerval *) itv; 554 syscallarg(struct itimerval *) oitv; 555 } */ *uap = v; 556 int which = SCARG(uap, which); 557 struct sys_getitimer_args getargs; 558 struct itimerval aitv; 559 const struct itimerval *itvp; 560 int s, error; 561 562 if ((u_int)which > ITIMER_PROF) 563 return (EINVAL); 564 itvp = SCARG(uap, itv); 565 if (itvp && 566 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0)) 567 return (error); 568 if (SCARG(uap, oitv) != NULL) { 569 SCARG(&getargs, which) = which; 570 SCARG(&getargs, itv) = SCARG(uap, oitv); 571 if ((error = sys_getitimer(p, &getargs, retval)) != 0) 572 return (error); 573 } 574 if (itvp == 0) 575 return (0); 576 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) 577 return (EINVAL); 578 s = splclock(); 579 if (which == ITIMER_REAL) { 580 callout_stop(&p->p_realit_ch); 581 if (timerisset(&aitv.it_value)) { 582 /* 583 * Don't need to check hzto() return value, here. 584 * callout_reset() does it for us. 585 */ 586 timeradd(&aitv.it_value, &time, &aitv.it_value); 587 callout_reset(&p->p_realit_ch, hzto(&aitv.it_value), 588 realitexpire, p); 589 } 590 p->p_realtimer = aitv; 591 } else 592 p->p_stats->p_timer[which] = aitv; 593 splx(s); 594 return (0); 595 } 596 597 /* 598 * Real interval timer expired: 599 * send process whose timer expired an alarm signal. 600 * If time is not set up to reload, then just return. 601 * Else compute next time timer should go off which is > current time. 602 * This is where delay in processing this timeout causes multiple 603 * SIGALRM calls to be compressed into one. 604 */ 605 void 606 realitexpire(arg) 607 void *arg; 608 { 609 struct proc *p; 610 int s; 611 612 p = (struct proc *)arg; 613 psignal(p, SIGALRM); 614 if (!timerisset(&p->p_realtimer.it_interval)) { 615 timerclear(&p->p_realtimer.it_value); 616 return; 617 } 618 for (;;) { 619 s = splclock(); 620 timeradd(&p->p_realtimer.it_value, 621 &p->p_realtimer.it_interval, &p->p_realtimer.it_value); 622 if (timercmp(&p->p_realtimer.it_value, &time, >)) { 623 /* 624 * Don't need to check hzto() return value, here. 625 * callout_reset() does it for us. 626 */ 627 callout_reset(&p->p_realit_ch, 628 hzto(&p->p_realtimer.it_value), realitexpire, p); 629 splx(s); 630 return; 631 } 632 splx(s); 633 } 634 } 635 636 /* 637 * Check that a proposed value to load into the .it_value or 638 * .it_interval part of an interval timer is acceptable, and 639 * fix it to have at least minimal value (i.e. if it is less 640 * than the resolution of the clock, round it up.) 641 */ 642 int 643 itimerfix(tv) 644 struct timeval *tv; 645 { 646 647 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 648 return (EINVAL); 649 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 650 tv->tv_usec = tick; 651 return (0); 652 } 653 654 /* 655 * Decrement an interval timer by a specified number 656 * of microseconds, which must be less than a second, 657 * i.e. < 1000000. If the timer expires, then reload 658 * it. In this case, carry over (usec - old value) to 659 * reduce the value reloaded into the timer so that 660 * the timer does not drift. This routine assumes 661 * that it is called in a context where the timers 662 * on which it is operating cannot change in value. 663 */ 664 int 665 itimerdecr(itp, usec) 666 struct itimerval *itp; 667 int usec; 668 { 669 670 if (itp->it_value.tv_usec < usec) { 671 if (itp->it_value.tv_sec == 0) { 672 /* expired, and already in next interval */ 673 usec -= itp->it_value.tv_usec; 674 goto expire; 675 } 676 itp->it_value.tv_usec += 1000000; 677 itp->it_value.tv_sec--; 678 } 679 itp->it_value.tv_usec -= usec; 680 usec = 0; 681 if (timerisset(&itp->it_value)) 682 return (1); 683 /* expired, exactly at end of interval */ 684 expire: 685 if (timerisset(&itp->it_interval)) { 686 itp->it_value = itp->it_interval; 687 itp->it_value.tv_usec -= usec; 688 if (itp->it_value.tv_usec < 0) { 689 itp->it_value.tv_usec += 1000000; 690 itp->it_value.tv_sec--; 691 } 692 } else 693 itp->it_value.tv_usec = 0; /* sec is already 0 */ 694 return (0); 695 } 696 697 /* 698 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9) 699 * for usage and rationale. 700 */ 701 int 702 ratecheck(lasttime, mininterval) 703 struct timeval *lasttime; 704 const struct timeval *mininterval; 705 { 706 struct timeval tv, delta; 707 int s, rv = 0; 708 709 s = splclock(); 710 tv = mono_time; 711 splx(s); 712 713 timersub(&tv, lasttime, &delta); 714 715 /* 716 * check for 0,0 is so that the message will be seen at least once, 717 * even if interval is huge. 718 */ 719 if (timercmp(&delta, mininterval, >=) || 720 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 721 *lasttime = tv; 722 rv = 1; 723 } 724 725 return (rv); 726 } 727 728 /* 729 * ppsratecheck(): packets (or events) per second limitation. 730 */ 731 int 732 ppsratecheck(lasttime, curpps, maxpps) 733 struct timeval *lasttime; 734 int *curpps; 735 int maxpps; /* maximum pps allowed */ 736 { 737 struct timeval tv, delta; 738 int s, rv; 739 740 s = splclock(); 741 tv = mono_time; 742 splx(s); 743 744 timersub(&tv, lasttime, &delta); 745 746 /* 747 * check for 0,0 is so that the message will be seen at least once. 748 * if more than one second have passed since the last update of 749 * lasttime, reset the counter. 750 * 751 * we do increment *curpps even in *curpps < maxpps case, as some may 752 * try to use *curpps for stat purposes as well. 753 */ 754 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 755 delta.tv_sec >= 1) { 756 *lasttime = tv; 757 *curpps = 0; 758 rv = 1; 759 } else if (maxpps < 0) 760 rv = 1; 761 else if (*curpps < maxpps) 762 rv = 1; 763 else 764 rv = 0; 765 766 #if 1 /*DIAGNOSTIC?*/ 767 /* be careful about wrap-around */ 768 if (*curpps + 1 > *curpps) 769 *curpps = *curpps + 1; 770 #else 771 /* 772 * assume that there's not too many calls to this function. 773 * not sure if the assumption holds, as it depends on *caller's* 774 * behavior, not the behavior of this function. 775 * IMHO it is wrong to make assumption on the caller's behavior, 776 * so the above #if is #if 1, not #ifdef DIAGNOSTIC. 777 */ 778 *curpps = *curpps + 1; 779 #endif 780 781 return (rv); 782 } 783