1 /* $OpenBSD: kern_time.c,v 1.75 2012/05/24 07:17:42 guenther Exp $ */ 2 /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 33 */ 34 35 #include <sys/param.h> 36 #include <sys/resourcevar.h> 37 #include <sys/kernel.h> 38 #include <sys/systm.h> 39 #include <sys/proc.h> 40 #include <sys/ktrace.h> 41 #include <sys/vnode.h> 42 #include <sys/signalvar.h> 43 #ifdef __HAVE_TIMECOUNTER 44 #include <sys/timetc.h> 45 #endif 46 47 #include <sys/mount.h> 48 #include <sys/syscallargs.h> 49 50 #include <machine/cpu.h> 51 52 #ifdef __HAVE_TIMECOUNTER 53 struct timeval adjtimedelta; /* unapplied time correction */ 54 #else 55 int tickdelta; /* current clock skew, us. per tick */ 56 long timedelta; /* unapplied time correction, us. */ 57 long bigadj = 1000000; /* use 10x skew above bigadj us. */ 58 int64_t ntp_tick_permanent; 59 int64_t ntp_tick_acc; 60 #endif 61 62 void itimerround(struct timeval *); 63 64 /* 65 * Time of day and interval timer support. 66 * 67 * These routines provide the kernel entry points to get and set 68 * the time-of-day and per-process interval timers. Subroutines 69 * here provide support for adding and subtracting timeval structures 70 * and decrementing interval timers, optionally reloading the interval 71 * timers when they expire. 72 */ 73 74 /* This function is used by clock_settime and settimeofday */ 75 #ifdef __HAVE_TIMECOUNTER 76 int 77 settime(struct timespec *ts) 78 { 79 struct timespec now; 80 81 /* 82 * Adjtime in progress is meaningless or harmful after 83 * setting the clock. Cancel adjtime and then set new time. 84 */ 85 adjtimedelta.tv_usec = 0; 86 adjtimedelta.tv_sec = 0; 87 88 /* 89 * Don't allow the time to be set forward so far it will wrap 90 * and become negative, thus allowing an attacker to bypass 91 * the next check below. The cutoff is 1 year before rollover 92 * occurs, so even if the attacker uses adjtime(2) to move 93 * the time past the cutoff, it will take a very long time 94 * to get to the wrap point. 95 * 96 * XXX: we check against INT_MAX since on 64-bit 97 * platforms, sizeof(int) != sizeof(long) and 98 * time_t is 32 bits even when atv.tv_sec is 64 bits. 99 */ 100 if (ts->tv_sec > INT_MAX - 365*24*60*60) { 101 printf("denied attempt to set clock forward to %ld\n", 102 ts->tv_sec); 103 return (EPERM); 104 } 105 /* 106 * If the system is secure, we do not allow the time to be 107 * set to an earlier value (it may be slowed using adjtime, 108 * but not set back). This feature prevent interlopers from 109 * setting arbitrary time stamps on files. 110 */ 111 nanotime(&now); 112 if (securelevel > 1 && timespeccmp(ts, &now, <)) { 113 printf("denied attempt to set clock back %ld seconds\n", 114 now.tv_sec - ts->tv_sec); 115 return (EPERM); 116 } 117 118 tc_setrealtimeclock(ts); 119 resettodr(); 120 121 return (0); 122 } 123 #else 124 int 125 settime(struct timespec *ts) 126 { 127 struct timeval delta, tvv, *tv; 128 int s; 129 130 /* XXX - Ugh. */ 131 tv = &tvv; 132 tvv.tv_sec = ts->tv_sec; 133 tvv.tv_usec = ts->tv_nsec / 1000; 134 135 /* 136 * Don't allow the time to be set forward so far it will wrap 137 * and become negative, thus allowing an attacker to bypass 138 * the next check below. The cutoff is 1 year before rollover 139 * occurs, so even if the attacker uses adjtime(2) to move 140 * the time past the cutoff, it will take a very long time 141 * to get to the wrap point. 142 * 143 * XXX: we check against INT_MAX since on 64-bit 144 * platforms, sizeof(int) != sizeof(long) and 145 * time_t is 32 bits even when atv.tv_sec is 64 bits. 146 */ 147 if (tv->tv_sec > INT_MAX - 365*24*60*60) { 148 printf("denied attempt to set clock forward to %ld\n", 149 tv->tv_sec); 150 return (EPERM); 151 } 152 /* 153 * If the system is secure, we do not allow the time to be 154 * set to an earlier value (it may be slowed using adjtime, 155 * but not set back). This feature prevent interlopers from 156 * setting arbitrary time stamps on files. 157 */ 158 if (securelevel > 1 && timercmp(tv, &time, <)) { 159 printf("denied attempt to set clock back %ld seconds\n", 160 time_second - tv->tv_sec); 161 return (EPERM); 162 } 163 164 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 165 s = splclock(); 166 timersub(tv, &time, &delta); 167 time = *tv; 168 timeradd(&boottime, &delta, &boottime); 169 170 /* 171 * Adjtime in progress is meaningless or harmful after 172 * setting the clock. 173 */ 174 tickdelta = 0; 175 timedelta = 0; 176 177 splx(s); 178 resettodr(); 179 180 return (0); 181 } 182 #endif 183 184 int 185 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp) 186 { 187 struct timeval tv; 188 189 switch (clock_id) { 190 case CLOCK_REALTIME: 191 nanotime(tp); 192 break; 193 case CLOCK_MONOTONIC: 194 nanouptime(tp); 195 break; 196 case CLOCK_PROF: 197 microuptime(&tv); 198 timersub(&tv, &curcpu()->ci_schedstate.spc_runtime, &tv); 199 timeradd(&tv, &p->p_rtime, &tv); 200 tp->tv_sec = tv.tv_sec; 201 tp->tv_nsec = tv.tv_usec * 1000; 202 break; 203 default: 204 return (EINVAL); 205 } 206 return (0); 207 } 208 209 /* ARGSUSED */ 210 int 211 sys_clock_gettime(struct proc *p, void *v, register_t *retval) 212 { 213 struct sys_clock_gettime_args /* { 214 syscallarg(clockid_t) clock_id; 215 syscallarg(struct timespec *) tp; 216 } */ *uap = v; 217 struct timespec ats; 218 int error; 219 220 if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0) 221 return (error); 222 223 error = copyout(&ats, SCARG(uap, tp), sizeof(ats)); 224 #ifdef KTRACE 225 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) { 226 KERNEL_LOCK(); 227 ktrabstimespec(p, &ats); 228 KERNEL_UNLOCK(); 229 } 230 #endif 231 return (error); 232 } 233 234 /* ARGSUSED */ 235 int 236 sys_clock_settime(struct proc *p, void *v, register_t *retval) 237 { 238 struct sys_clock_settime_args /* { 239 syscallarg(clockid_t) clock_id; 240 syscallarg(const struct timespec *) tp; 241 } */ *uap = v; 242 struct timespec ats; 243 clockid_t clock_id; 244 int error; 245 246 if ((error = suser(p, 0)) != 0) 247 return (error); 248 249 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 250 return (error); 251 252 clock_id = SCARG(uap, clock_id); 253 switch (clock_id) { 254 case CLOCK_REALTIME: 255 if ((error = settime(&ats)) != 0) 256 return (error); 257 break; 258 default: /* Other clocks are read-only */ 259 return (EINVAL); 260 } 261 262 return (0); 263 } 264 265 int 266 sys_clock_getres(struct proc *p, void *v, register_t *retval) 267 { 268 struct sys_clock_getres_args /* { 269 syscallarg(clockid_t) clock_id; 270 syscallarg(struct timespec *) tp; 271 } */ *uap = v; 272 clockid_t clock_id; 273 struct timespec ts; 274 int error = 0; 275 276 clock_id = SCARG(uap, clock_id); 277 switch (clock_id) { 278 case CLOCK_REALTIME: 279 case CLOCK_MONOTONIC: 280 ts.tv_sec = 0; 281 ts.tv_nsec = 1000000000 / hz; 282 break; 283 default: 284 return (EINVAL); 285 } 286 287 if (SCARG(uap, tp)) { 288 error = copyout(&ts, SCARG(uap, tp), sizeof (ts)); 289 #ifdef KTRACE 290 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) { 291 KERNEL_LOCK(); 292 ktrreltimespec(p, &ts); 293 KERNEL_UNLOCK(); 294 } 295 #endif 296 } 297 298 return error; 299 } 300 301 /* ARGSUSED */ 302 int 303 sys_nanosleep(struct proc *p, void *v, register_t *retval) 304 { 305 static int nanowait; 306 struct sys_nanosleep_args/* { 307 syscallarg(const struct timespec *) rqtp; 308 syscallarg(struct timespec *) rmtp; 309 } */ *uap = v; 310 struct timespec rqt, rmt; 311 struct timespec sts, ets; 312 struct timespec *rmtp; 313 struct timeval tv; 314 int error, error1; 315 316 rmtp = SCARG(uap, rmtp); 317 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 318 if (error) 319 return (error); 320 #ifdef KTRACE 321 if (KTRPOINT(p, KTR_STRUCT)) { 322 KERNEL_LOCK(); 323 ktrreltimespec(p, &rqt); 324 KERNEL_UNLOCK(); 325 } 326 #endif 327 328 TIMESPEC_TO_TIMEVAL(&tv, &rqt); 329 if (itimerfix(&tv)) 330 return (EINVAL); 331 332 if (rmtp) 333 getnanouptime(&sts); 334 335 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", 336 MAX(1, tvtohz(&tv))); 337 if (error == ERESTART) 338 error = EINTR; 339 if (error == EWOULDBLOCK) 340 error = 0; 341 342 if (rmtp) { 343 getnanouptime(&ets); 344 345 timespecsub(&ets, &sts, &sts); 346 timespecsub(&rqt, &sts, &rmt); 347 348 if (rmt.tv_sec < 0) 349 timespecclear(&rmt); 350 351 error1 = copyout(&rmt, rmtp, sizeof(rmt)); 352 if (error1 != 0) 353 error = error1; 354 #ifdef KTRACE 355 if (error1 == 0 && KTRPOINT(p, KTR_STRUCT)) { 356 KERNEL_LOCK(); 357 ktrreltimespec(p, &rmt); 358 KERNEL_UNLOCK(); 359 } 360 #endif 361 } 362 363 return error; 364 } 365 366 /* ARGSUSED */ 367 int 368 sys_gettimeofday(struct proc *p, void *v, register_t *retval) 369 { 370 struct sys_gettimeofday_args /* { 371 syscallarg(struct timeval *) tp; 372 syscallarg(struct timezone *) tzp; 373 } */ *uap = v; 374 struct timeval atv; 375 struct timeval *tp; 376 struct timezone *tzp; 377 int error = 0; 378 379 tp = SCARG(uap, tp); 380 tzp = SCARG(uap, tzp); 381 382 if (tp) { 383 microtime(&atv); 384 if ((error = copyout(&atv, tp, sizeof (atv)))) 385 return (error); 386 #ifdef KTRACE 387 if (KTRPOINT(p, KTR_STRUCT)) { 388 KERNEL_LOCK(); 389 ktrabstimeval(p, &atv); 390 KERNEL_UNLOCK(); 391 } 392 #endif 393 } 394 if (tzp) 395 error = copyout(&tz, tzp, sizeof (tz)); 396 return (error); 397 } 398 399 /* ARGSUSED */ 400 int 401 sys_settimeofday(struct proc *p, void *v, register_t *retval) 402 { 403 struct sys_settimeofday_args /* { 404 syscallarg(const struct timeval *) tv; 405 syscallarg(const struct timezone *) tzp; 406 } */ *uap = v; 407 struct timezone atz; 408 struct timeval atv; 409 const struct timeval *tv; 410 const struct timezone *tzp; 411 int error; 412 413 tv = SCARG(uap, tv); 414 tzp = SCARG(uap, tzp); 415 416 if ((error = suser(p, 0))) 417 return (error); 418 /* Verify all parameters before changing time. */ 419 if (tv && (error = copyin(tv, &atv, sizeof(atv)))) 420 return (error); 421 if (tzp && (error = copyin(tzp, &atz, sizeof(atz)))) 422 return (error); 423 if (tv) { 424 struct timespec ts; 425 426 TIMEVAL_TO_TIMESPEC(&atv, &ts); 427 if ((error = settime(&ts)) != 0) 428 return (error); 429 } 430 if (tzp) 431 tz = atz; 432 return (0); 433 } 434 435 /* ARGSUSED */ 436 int 437 sys_adjfreq(struct proc *p, void *v, register_t *retval) 438 { 439 struct sys_adjfreq_args /* { 440 syscallarg(const int64_t *) freq; 441 syscallarg(int64_t *) oldfreq; 442 } */ *uap = v; 443 int error; 444 int64_t f; 445 const int64_t *freq = SCARG(uap, freq); 446 int64_t *oldfreq = SCARG(uap, oldfreq); 447 #ifndef __HAVE_TIMECOUNTER 448 int s; 449 450 if (oldfreq) { 451 f = ntp_tick_permanent * hz; 452 if ((error = copyout(&f, oldfreq, sizeof(int64_t)))) 453 return (error); 454 } 455 if (freq) { 456 if ((error = suser(p, 0))) 457 return (error); 458 if ((error = copyin(freq, &f, sizeof(int64_t)))) 459 return (error); 460 s = splclock(); 461 ntp_tick_permanent = f / hz; 462 splx(s); 463 } 464 #else 465 if (oldfreq) { 466 if ((error = tc_adjfreq(&f, NULL))) 467 return (error); 468 if ((error = copyout(&f, oldfreq, sizeof(f)))) 469 return (error); 470 } 471 if (freq) { 472 if ((error = suser(p, 0))) 473 return (error); 474 if ((error = copyin(freq, &f, sizeof(f)))) 475 return (error); 476 if ((error = tc_adjfreq(NULL, &f))) 477 return (error); 478 } 479 #endif 480 return (0); 481 } 482 483 /* ARGSUSED */ 484 int 485 sys_adjtime(struct proc *p, void *v, register_t *retval) 486 { 487 struct sys_adjtime_args /* { 488 syscallarg(const struct timeval *) delta; 489 syscallarg(struct timeval *) olddelta; 490 } */ *uap = v; 491 const struct timeval *delta = SCARG(uap, delta); 492 struct timeval *olddelta = SCARG(uap, olddelta); 493 #ifdef __HAVE_TIMECOUNTER 494 int error; 495 496 if (olddelta) 497 if ((error = copyout(&adjtimedelta, olddelta, 498 sizeof(struct timeval)))) 499 return (error); 500 501 if (delta) { 502 if ((error = suser(p, 0))) 503 return (error); 504 505 if ((error = copyin(delta, &adjtimedelta, 506 sizeof(struct timeval)))) 507 return (error); 508 } 509 510 /* Normalize the correction. */ 511 while (adjtimedelta.tv_usec >= 1000000) { 512 adjtimedelta.tv_usec -= 1000000; 513 adjtimedelta.tv_sec += 1; 514 } 515 while (adjtimedelta.tv_usec < 0) { 516 adjtimedelta.tv_usec += 1000000; 517 adjtimedelta.tv_sec -= 1; 518 } 519 return (0); 520 #else 521 struct timeval atv; 522 long ndelta, ntickdelta, odelta; 523 int s, error; 524 525 if (!delta) { 526 s = splclock(); 527 odelta = timedelta; 528 splx(s); 529 goto out; 530 } 531 if ((error = suser(p, 0))) 532 return (error); 533 if ((error = copyin(delta, &atv, sizeof(struct timeval)))) 534 return (error); 535 536 /* 537 * Compute the total correction and the rate at which to apply it. 538 * Round the adjustment down to a whole multiple of the per-tick 539 * delta, so that after some number of incremental changes in 540 * hardclock(), tickdelta will become zero, lest the correction 541 * overshoot and start taking us away from the desired final time. 542 */ 543 if (atv.tv_sec > LONG_MAX / 1000000L) 544 ndelta = LONG_MAX; 545 else if (atv.tv_sec < LONG_MIN / 1000000L) 546 ndelta = LONG_MIN; 547 else { 548 ndelta = atv.tv_sec * 1000000L; 549 odelta = ndelta; 550 ndelta += atv.tv_usec; 551 if (atv.tv_usec > 0 && ndelta <= odelta) 552 ndelta = LONG_MAX; 553 else if (atv.tv_usec < 0 && ndelta >= odelta) 554 ndelta = LONG_MIN; 555 } 556 557 if (ndelta > bigadj || ndelta < -bigadj) 558 ntickdelta = 10 * tickadj; 559 else 560 ntickdelta = tickadj; 561 if (ndelta % ntickdelta) 562 ndelta = ndelta / ntickdelta * ntickdelta; 563 564 /* 565 * To make hardclock()'s job easier, make the per-tick delta negative 566 * if we want time to run slower; then hardclock can simply compute 567 * tick + tickdelta, and subtract tickdelta from timedelta. 568 */ 569 if (ndelta < 0) 570 ntickdelta = -ntickdelta; 571 s = splclock(); 572 odelta = timedelta; 573 timedelta = ndelta; 574 tickdelta = ntickdelta; 575 splx(s); 576 577 out: 578 if (olddelta) { 579 atv.tv_sec = odelta / 1000000; 580 atv.tv_usec = odelta % 1000000; 581 if ((error = copyout(&atv, olddelta, sizeof(struct timeval)))) 582 return (error); 583 } 584 return (0); 585 #endif 586 } 587 588 589 /* 590 * Get value of an interval timer. The process virtual and 591 * profiling virtual time timers are kept internally in the 592 * way they are specified externally: in time until they expire. 593 * 594 * The real time interval timer's it_value, in contast, is kept as an 595 * absolute time rather than as a delta, so that it is easy to keep 596 * periodic real-time signals from drifting. 597 * 598 * Virtual time timers are processed in the hardclock() routine of 599 * kern_clock.c. The real time timer is processed by a timeout 600 * routine, called from the softclock() routine. Since a callout 601 * may be delayed in real time due to interrupt processing in the system, 602 * it is possible for the real time timeout routine (realitexpire, given below), 603 * to be delayed in real time past when it is supposed to occur. It 604 * does not suffice, therefore, to reload the real timer .it_value from the 605 * real time timers .it_interval. Rather, we compute the next time in 606 * absolute time the timer should go off. 607 */ 608 /* ARGSUSED */ 609 int 610 sys_getitimer(struct proc *p, void *v, register_t *retval) 611 { 612 struct sys_getitimer_args /* { 613 syscallarg(int) which; 614 syscallarg(struct itimerval *) itv; 615 } */ *uap = v; 616 struct itimerval aitv; 617 int s; 618 int which; 619 620 which = SCARG(uap, which); 621 622 if (which < ITIMER_REAL || which > ITIMER_PROF) 623 return (EINVAL); 624 s = splclock(); 625 aitv = p->p_p->ps_timer[which]; 626 627 if (which == ITIMER_REAL) { 628 struct timeval now; 629 630 getmicrouptime(&now); 631 /* 632 * Convert from absolute to relative time in .it_value 633 * part of real time timer. If time for real time timer 634 * has passed return 0, else return difference between 635 * current time and time for the timer to go off. 636 */ 637 if (timerisset(&aitv.it_value)) { 638 if (timercmp(&aitv.it_value, &now, <)) 639 timerclear(&aitv.it_value); 640 else 641 timersub(&aitv.it_value, &now, 642 &aitv.it_value); 643 } 644 } 645 splx(s); 646 return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval))); 647 } 648 649 /* ARGSUSED */ 650 int 651 sys_setitimer(struct proc *p, void *v, register_t *retval) 652 { 653 struct sys_setitimer_args /* { 654 syscallarg(int) which; 655 syscallarg(const struct itimerval *) itv; 656 syscallarg(struct itimerval *) oitv; 657 } */ *uap = v; 658 struct sys_getitimer_args getargs; 659 struct itimerval aitv; 660 const struct itimerval *itvp; 661 struct itimerval *oitv; 662 struct process *pr = p->p_p; 663 int error; 664 int timo; 665 int which; 666 667 which = SCARG(uap, which); 668 oitv = SCARG(uap, oitv); 669 670 if (which < ITIMER_REAL || which > ITIMER_PROF) 671 return (EINVAL); 672 itvp = SCARG(uap, itv); 673 if (itvp && (error = copyin((void *)itvp, (void *)&aitv, 674 sizeof(struct itimerval)))) 675 return (error); 676 if (oitv != NULL) { 677 SCARG(&getargs, which) = which; 678 SCARG(&getargs, itv) = oitv; 679 if ((error = sys_getitimer(p, &getargs, retval))) 680 return (error); 681 } 682 if (itvp == 0) 683 return (0); 684 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) 685 return (EINVAL); 686 if (which == ITIMER_REAL) { 687 struct timeval ctv; 688 689 timeout_del(&pr->ps_realit_to); 690 getmicrouptime(&ctv); 691 if (timerisset(&aitv.it_value)) { 692 timo = tvtohz(&aitv.it_value); 693 timeout_add(&pr->ps_realit_to, timo); 694 timeradd(&aitv.it_value, &ctv, &aitv.it_value); 695 } 696 pr->ps_timer[ITIMER_REAL] = aitv; 697 } else { 698 int s; 699 700 itimerround(&aitv.it_interval); 701 s = splclock(); 702 pr->ps_timer[which] = aitv; 703 if (which == ITIMER_VIRTUAL) 704 timeout_del(&pr->ps_virt_to); 705 if (which == ITIMER_PROF) 706 timeout_del(&pr->ps_prof_to); 707 splx(s); 708 } 709 710 return (0); 711 } 712 713 /* 714 * Real interval timer expired: 715 * send process whose timer expired an alarm signal. 716 * If time is not set up to reload, then just return. 717 * Else compute next time timer should go off which is > current time. 718 * This is where delay in processing this timeout causes multiple 719 * SIGALRM calls to be compressed into one. 720 */ 721 void 722 realitexpire(void *arg) 723 { 724 struct process *pr = arg; 725 struct itimerval *tp = &pr->ps_timer[ITIMER_REAL]; 726 727 psignal(pr->ps_mainproc, SIGALRM); 728 if (!timerisset(&tp->it_interval)) { 729 timerclear(&tp->it_value); 730 return; 731 } 732 for (;;) { 733 struct timeval ctv, ntv; 734 int timo; 735 736 timeradd(&tp->it_value, &tp->it_interval, &tp->it_value); 737 getmicrouptime(&ctv); 738 if (timercmp(&tp->it_value, &ctv, >)) { 739 ntv = tp->it_value; 740 timersub(&ntv, &ctv, &ntv); 741 timo = tvtohz(&ntv) - 1; 742 if (timo <= 0) 743 timo = 1; 744 if ((pr->ps_flags & PS_EXITING) == 0) 745 timeout_add(&pr->ps_realit_to, timo); 746 return; 747 } 748 } 749 } 750 751 /* 752 * Check that a timespec value is legit 753 */ 754 int 755 timespecfix(struct timespec *ts) 756 { 757 if (ts->tv_sec < 0 || ts->tv_sec > 100000000 || 758 ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 759 return (EINVAL); 760 return (0); 761 } 762 763 /* 764 * Check that a proposed value to load into the .it_value or 765 * .it_interval part of an interval timer is acceptable. 766 */ 767 int 768 itimerfix(struct timeval *tv) 769 { 770 771 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 772 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 773 return (EINVAL); 774 775 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 776 tv->tv_usec = tick; 777 778 return (0); 779 } 780 781 /* 782 * Nonzero timer interval smaller than the resolution of the 783 * system clock are rounded up. 784 */ 785 void 786 itimerround(struct timeval *tv) 787 { 788 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 789 tv->tv_usec = tick; 790 } 791 792 /* 793 * Decrement an interval timer by a specified number 794 * of microseconds, which must be less than a second, 795 * i.e. < 1000000. If the timer expires, then reload 796 * it. In this case, carry over (usec - old value) to 797 * reduce the value reloaded into the timer so that 798 * the timer does not drift. This routine assumes 799 * that it is called in a context where the timers 800 * on which it is operating cannot change in value. 801 */ 802 int 803 itimerdecr(struct itimerval *itp, int usec) 804 { 805 806 if (itp->it_value.tv_usec < usec) { 807 if (itp->it_value.tv_sec == 0) { 808 /* expired, and already in next interval */ 809 usec -= itp->it_value.tv_usec; 810 goto expire; 811 } 812 itp->it_value.tv_usec += 1000000; 813 itp->it_value.tv_sec--; 814 } 815 itp->it_value.tv_usec -= usec; 816 usec = 0; 817 if (timerisset(&itp->it_value)) 818 return (1); 819 /* expired, exactly at end of interval */ 820 expire: 821 if (timerisset(&itp->it_interval)) { 822 itp->it_value = itp->it_interval; 823 itp->it_value.tv_usec -= usec; 824 if (itp->it_value.tv_usec < 0) { 825 itp->it_value.tv_usec += 1000000; 826 itp->it_value.tv_sec--; 827 } 828 } else 829 itp->it_value.tv_usec = 0; /* sec is already 0 */ 830 return (0); 831 } 832 833 /* 834 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9) 835 * for usage and rationale. 836 */ 837 int 838 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 839 { 840 struct timeval tv, delta; 841 int rv = 0; 842 843 getmicrouptime(&tv); 844 845 timersub(&tv, lasttime, &delta); 846 847 /* 848 * check for 0,0 is so that the message will be seen at least once, 849 * even if interval is huge. 850 */ 851 if (timercmp(&delta, mininterval, >=) || 852 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 853 *lasttime = tv; 854 rv = 1; 855 } 856 857 return (rv); 858 } 859 860 /* 861 * ppsratecheck(): packets (or events) per second limitation. 862 */ 863 int 864 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 865 { 866 struct timeval tv, delta; 867 int rv; 868 869 microuptime(&tv); 870 871 timersub(&tv, lasttime, &delta); 872 873 /* 874 * check for 0,0 is so that the message will be seen at least once. 875 * if more than one second have passed since the last update of 876 * lasttime, reset the counter. 877 * 878 * we do increment *curpps even in *curpps < maxpps case, as some may 879 * try to use *curpps for stat purposes as well. 880 */ 881 if (maxpps == 0) 882 rv = 0; 883 else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 884 delta.tv_sec >= 1) { 885 *lasttime = tv; 886 *curpps = 0; 887 rv = 1; 888 } else if (maxpps < 0) 889 rv = 1; 890 else if (*curpps < maxpps) 891 rv = 1; 892 else 893 rv = 0; 894 895 #if 1 /*DIAGNOSTIC?*/ 896 /* be careful about wrap-around */ 897 if (*curpps + 1 > *curpps) 898 *curpps = *curpps + 1; 899 #else 900 /* 901 * assume that there's not too many calls to this function. 902 * not sure if the assumption holds, as it depends on *caller's* 903 * behavior, not the behavior of this function. 904 * IMHO it is wrong to make assumption on the caller's behavior, 905 * so the above #if is #if 1, not #ifdef DIAGNOSTIC. 906 */ 907 *curpps = *curpps + 1; 908 #endif 909 910 return (rv); 911 } 912