1 /* $OpenBSD: kern_time.c,v 1.99 2017/01/24 00:58:55 mpi Exp $ */ 2 /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 33 */ 34 35 #include <sys/param.h> 36 #include <sys/resourcevar.h> 37 #include <sys/kernel.h> 38 #include <sys/systm.h> 39 #include <sys/proc.h> 40 #include <sys/ktrace.h> 41 #include <sys/vnode.h> 42 #include <sys/signalvar.h> 43 #include <sys/pledge.h> 44 #include <sys/task.h> 45 #include <sys/timeout.h> 46 #include <sys/timetc.h> 47 48 #include <sys/mount.h> 49 #include <sys/syscallargs.h> 50 51 52 int64_t adjtimedelta; /* unapplied time correction (microseconds) */ 53 54 /* 55 * Time of day and interval timer support. 56 * 57 * These routines provide the kernel entry points to get and set 58 * the time-of-day and per-process interval timers. Subroutines 59 * here provide support for adding and subtracting timeval structures 60 * and decrementing interval timers, optionally reloading the interval 61 * timers when they expire. 62 */ 63 64 /* This function is used by clock_settime and settimeofday */ 65 int 66 settime(struct timespec *ts) 67 { 68 struct timespec now; 69 70 /* 71 * Adjtime in progress is meaningless or harmful after 72 * setting the clock. Cancel adjtime and then set new time. 73 */ 74 adjtimedelta = 0; 75 76 /* 77 * Don't allow the time to be set forward so far it will wrap 78 * and become negative, thus allowing an attacker to bypass 79 * the next check below. The cutoff is 1 year before rollover 80 * occurs, so even if the attacker uses adjtime(2) to move 81 * the time past the cutoff, it will take a very long time 82 * to get to the wrap point. 83 * 84 * XXX: we check against UINT_MAX until we can figure out 85 * how to deal with the hardware RTCs. 86 */ 87 if (ts->tv_sec > UINT_MAX - 365*24*60*60) { 88 printf("denied attempt to set clock forward to %lld\n", 89 (long long)ts->tv_sec); 90 return (EPERM); 91 } 92 /* 93 * If the system is secure, we do not allow the time to be 94 * set to an earlier value (it may be slowed using adjtime, 95 * but not set back). This feature prevent interlopers from 96 * setting arbitrary time stamps on files. 97 */ 98 nanotime(&now); 99 if (securelevel > 1 && timespeccmp(ts, &now, <)) { 100 printf("denied attempt to set clock back %lld seconds\n", 101 (long long)now.tv_sec - ts->tv_sec); 102 return (EPERM); 103 } 104 105 tc_setrealtimeclock(ts); 106 resettodr(); 107 108 return (0); 109 } 110 111 int 112 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp) 113 { 114 struct bintime bt; 115 struct proc *q; 116 117 switch (clock_id) { 118 case CLOCK_REALTIME: 119 nanotime(tp); 120 break; 121 case CLOCK_UPTIME: 122 binuptime(&bt); 123 bintime_sub(&bt, &naptime); 124 bintime2timespec(&bt, tp); 125 break; 126 case CLOCK_MONOTONIC: 127 nanouptime(tp); 128 break; 129 case CLOCK_PROCESS_CPUTIME_ID: 130 nanouptime(tp); 131 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 132 timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp); 133 timespecadd(tp, &p->p_rtime, tp); 134 break; 135 case CLOCK_THREAD_CPUTIME_ID: 136 nanouptime(tp); 137 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 138 timespecadd(tp, &p->p_tu.tu_runtime, tp); 139 timespecadd(tp, &p->p_rtime, tp); 140 break; 141 default: 142 /* check for clock from pthread_getcpuclockid() */ 143 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 144 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 145 if (q == NULL || q->p_p != p->p_p) 146 return (ESRCH); 147 *tp = q->p_tu.tu_runtime; 148 } else 149 return (EINVAL); 150 } 151 return (0); 152 } 153 154 int 155 sys_clock_gettime(struct proc *p, void *v, register_t *retval) 156 { 157 struct sys_clock_gettime_args /* { 158 syscallarg(clockid_t) clock_id; 159 syscallarg(struct timespec *) tp; 160 } */ *uap = v; 161 struct timespec ats; 162 int error; 163 164 memset(&ats, 0, sizeof(ats)); 165 if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0) 166 return (error); 167 168 error = copyout(&ats, SCARG(uap, tp), sizeof(ats)); 169 #ifdef KTRACE 170 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) { 171 KERNEL_LOCK(); 172 ktrabstimespec(p, &ats); 173 KERNEL_UNLOCK(); 174 } 175 #endif 176 return (error); 177 } 178 179 int 180 sys_clock_settime(struct proc *p, void *v, register_t *retval) 181 { 182 struct sys_clock_settime_args /* { 183 syscallarg(clockid_t) clock_id; 184 syscallarg(const struct timespec *) tp; 185 } */ *uap = v; 186 struct timespec ats; 187 clockid_t clock_id; 188 int error; 189 190 if ((error = suser(p, 0)) != 0) 191 return (error); 192 193 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 194 return (error); 195 196 clock_id = SCARG(uap, clock_id); 197 switch (clock_id) { 198 case CLOCK_REALTIME: 199 if ((error = settime(&ats)) != 0) 200 return (error); 201 break; 202 default: /* Other clocks are read-only */ 203 return (EINVAL); 204 } 205 206 return (0); 207 } 208 209 int 210 sys_clock_getres(struct proc *p, void *v, register_t *retval) 211 { 212 struct sys_clock_getres_args /* { 213 syscallarg(clockid_t) clock_id; 214 syscallarg(struct timespec *) tp; 215 } */ *uap = v; 216 clockid_t clock_id; 217 struct timespec ts; 218 struct proc *q; 219 int error = 0; 220 221 memset(&ts, 0, sizeof(ts)); 222 clock_id = SCARG(uap, clock_id); 223 switch (clock_id) { 224 case CLOCK_REALTIME: 225 case CLOCK_MONOTONIC: 226 case CLOCK_UPTIME: 227 case CLOCK_PROCESS_CPUTIME_ID: 228 case CLOCK_THREAD_CPUTIME_ID: 229 ts.tv_sec = 0; 230 ts.tv_nsec = 1000000000 / hz; 231 break; 232 default: 233 /* check for clock from pthread_getcpuclockid() */ 234 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 235 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 236 if (q == NULL || q->p_p != p->p_p) 237 return (ESRCH); 238 ts.tv_sec = 0; 239 ts.tv_nsec = 1000000000 / hz; 240 } else 241 return (EINVAL); 242 } 243 244 if (SCARG(uap, tp)) { 245 error = copyout(&ts, SCARG(uap, tp), sizeof (ts)); 246 #ifdef KTRACE 247 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) { 248 KERNEL_LOCK(); 249 ktrreltimespec(p, &ts); 250 KERNEL_UNLOCK(); 251 } 252 #endif 253 } 254 255 return error; 256 } 257 258 int 259 sys_nanosleep(struct proc *p, void *v, register_t *retval) 260 { 261 static int nanowait; 262 struct sys_nanosleep_args/* { 263 syscallarg(const struct timespec *) rqtp; 264 syscallarg(struct timespec *) rmtp; 265 } */ *uap = v; 266 struct timespec rqt, rmt; 267 struct timespec sts, ets; 268 struct timespec *rmtp; 269 struct timeval tv; 270 int error, error1; 271 272 rmtp = SCARG(uap, rmtp); 273 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 274 if (error) 275 return (error); 276 #ifdef KTRACE 277 if (KTRPOINT(p, KTR_STRUCT)) { 278 KERNEL_LOCK(); 279 ktrreltimespec(p, &rqt); 280 KERNEL_UNLOCK(); 281 } 282 #endif 283 284 TIMESPEC_TO_TIMEVAL(&tv, &rqt); 285 if (itimerfix(&tv)) 286 return (EINVAL); 287 288 if (rmtp) 289 getnanouptime(&sts); 290 291 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", 292 MAX(1, tvtohz(&tv))); 293 if (error == ERESTART) 294 error = EINTR; 295 if (error == EWOULDBLOCK) 296 error = 0; 297 298 if (rmtp) { 299 getnanouptime(&ets); 300 301 memset(&rmt, 0, sizeof(rmt)); 302 timespecsub(&ets, &sts, &sts); 303 timespecsub(&rqt, &sts, &rmt); 304 305 if (rmt.tv_sec < 0) 306 timespecclear(&rmt); 307 308 error1 = copyout(&rmt, rmtp, sizeof(rmt)); 309 if (error1 != 0) 310 error = error1; 311 #ifdef KTRACE 312 if (error1 == 0 && KTRPOINT(p, KTR_STRUCT)) { 313 KERNEL_LOCK(); 314 ktrreltimespec(p, &rmt); 315 KERNEL_UNLOCK(); 316 } 317 #endif 318 } 319 320 return error; 321 } 322 323 int 324 sys_gettimeofday(struct proc *p, void *v, register_t *retval) 325 { 326 struct sys_gettimeofday_args /* { 327 syscallarg(struct timeval *) tp; 328 syscallarg(struct timezone *) tzp; 329 } */ *uap = v; 330 struct timeval atv; 331 struct timeval *tp; 332 struct timezone *tzp; 333 int error = 0; 334 335 tp = SCARG(uap, tp); 336 tzp = SCARG(uap, tzp); 337 338 if (tp) { 339 memset(&atv, 0, sizeof(atv)); 340 microtime(&atv); 341 if ((error = copyout(&atv, tp, sizeof (atv)))) 342 return (error); 343 #ifdef KTRACE 344 if (KTRPOINT(p, KTR_STRUCT)) { 345 KERNEL_LOCK(); 346 ktrabstimeval(p, &atv); 347 KERNEL_UNLOCK(); 348 } 349 #endif 350 } 351 if (tzp) 352 error = copyout(&tz, tzp, sizeof (tz)); 353 return (error); 354 } 355 356 int 357 sys_settimeofday(struct proc *p, void *v, register_t *retval) 358 { 359 struct sys_settimeofday_args /* { 360 syscallarg(const struct timeval *) tv; 361 syscallarg(const struct timezone *) tzp; 362 } */ *uap = v; 363 struct timezone atz; 364 struct timeval atv; 365 const struct timeval *tv; 366 const struct timezone *tzp; 367 int error; 368 369 tv = SCARG(uap, tv); 370 tzp = SCARG(uap, tzp); 371 372 if ((error = suser(p, 0))) 373 return (error); 374 /* Verify all parameters before changing time. */ 375 if (tv && (error = copyin(tv, &atv, sizeof(atv)))) 376 return (error); 377 if (tzp && (error = copyin(tzp, &atz, sizeof(atz)))) 378 return (error); 379 if (tv) { 380 struct timespec ts; 381 382 TIMEVAL_TO_TIMESPEC(&atv, &ts); 383 if ((error = settime(&ts)) != 0) 384 return (error); 385 } 386 if (tzp) 387 tz = atz; 388 return (0); 389 } 390 391 int 392 sys_adjfreq(struct proc *p, void *v, register_t *retval) 393 { 394 struct sys_adjfreq_args /* { 395 syscallarg(const int64_t *) freq; 396 syscallarg(int64_t *) oldfreq; 397 } */ *uap = v; 398 int error; 399 int64_t f; 400 const int64_t *freq = SCARG(uap, freq); 401 int64_t *oldfreq = SCARG(uap, oldfreq); 402 if (oldfreq) { 403 if ((error = tc_adjfreq(&f, NULL))) 404 return (error); 405 if ((error = copyout(&f, oldfreq, sizeof(f)))) 406 return (error); 407 } 408 if (freq) { 409 if ((error = suser(p, 0))) 410 return (error); 411 if ((error = copyin(freq, &f, sizeof(f)))) 412 return (error); 413 if ((error = tc_adjfreq(NULL, &f))) 414 return (error); 415 } 416 return (0); 417 } 418 419 int 420 sys_adjtime(struct proc *p, void *v, register_t *retval) 421 { 422 struct sys_adjtime_args /* { 423 syscallarg(const struct timeval *) delta; 424 syscallarg(struct timeval *) olddelta; 425 } */ *uap = v; 426 const struct timeval *delta = SCARG(uap, delta); 427 struct timeval *olddelta = SCARG(uap, olddelta); 428 struct timeval atv; 429 int error; 430 431 error = pledge_adjtime(p, delta); 432 if (error) 433 return error; 434 435 if (olddelta) { 436 memset(&atv, 0, sizeof(atv)); 437 atv.tv_sec = adjtimedelta / 1000000; 438 atv.tv_usec = adjtimedelta % 1000000; 439 if (atv.tv_usec < 0) { 440 atv.tv_usec += 1000000; 441 atv.tv_sec--; 442 } 443 444 if ((error = copyout(&atv, olddelta, sizeof(struct timeval)))) 445 return (error); 446 } 447 448 if (delta) { 449 if ((error = suser(p, 0))) 450 return (error); 451 452 if ((error = copyin(delta, &atv, sizeof(struct timeval)))) 453 return (error); 454 455 /* XXX Check for overflow? */ 456 adjtimedelta = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec; 457 } 458 459 return (0); 460 } 461 462 463 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 464 465 /* 466 * Get value of an interval timer. The process virtual and 467 * profiling virtual time timers are kept internally in the 468 * way they are specified externally: in time until they expire. 469 * 470 * The real time interval timer's it_value, in contrast, is kept as an 471 * absolute time rather than as a delta, so that it is easy to keep 472 * periodic real-time signals from drifting. 473 * 474 * Virtual time timers are processed in the hardclock() routine of 475 * kern_clock.c. The real time timer is processed by a timeout 476 * routine, called from the softclock() routine. Since a callout 477 * may be delayed in real time due to interrupt processing in the system, 478 * it is possible for the real time timeout routine (realitexpire, given below), 479 * to be delayed in real time past when it is supposed to occur. It 480 * does not suffice, therefore, to reload the real timer .it_value from the 481 * real time timers .it_interval. Rather, we compute the next time in 482 * absolute time the timer should go off. 483 */ 484 int 485 sys_getitimer(struct proc *p, void *v, register_t *retval) 486 { 487 struct sys_getitimer_args /* { 488 syscallarg(int) which; 489 syscallarg(struct itimerval *) itv; 490 } */ *uap = v; 491 struct itimerval aitv; 492 int which; 493 494 which = SCARG(uap, which); 495 496 if (which < ITIMER_REAL || which > ITIMER_PROF) 497 return (EINVAL); 498 memset(&aitv, 0, sizeof(aitv)); 499 mtx_enter(&itimer_mtx); 500 aitv.it_interval.tv_sec = p->p_p->ps_timer[which].it_interval.tv_sec; 501 aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec; 502 aitv.it_value.tv_sec = p->p_p->ps_timer[which].it_value.tv_sec; 503 aitv.it_value.tv_usec = p->p_p->ps_timer[which].it_value.tv_usec; 504 mtx_leave(&itimer_mtx); 505 506 if (which == ITIMER_REAL) { 507 struct timeval now; 508 509 getmicrouptime(&now); 510 /* 511 * Convert from absolute to relative time in .it_value 512 * part of real time timer. If time for real time timer 513 * has passed return 0, else return difference between 514 * current time and time for the timer to go off. 515 */ 516 if (timerisset(&aitv.it_value)) { 517 if (timercmp(&aitv.it_value, &now, <)) 518 timerclear(&aitv.it_value); 519 else 520 timersub(&aitv.it_value, &now, 521 &aitv.it_value); 522 } 523 } 524 525 return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval))); 526 } 527 528 int 529 sys_setitimer(struct proc *p, void *v, register_t *retval) 530 { 531 struct sys_setitimer_args /* { 532 syscallarg(int) which; 533 syscallarg(const struct itimerval *) itv; 534 syscallarg(struct itimerval *) oitv; 535 } */ *uap = v; 536 struct sys_getitimer_args getargs; 537 struct itimerval aitv; 538 const struct itimerval *itvp; 539 struct itimerval *oitv; 540 struct process *pr = p->p_p; 541 int error; 542 int timo; 543 int which; 544 545 which = SCARG(uap, which); 546 oitv = SCARG(uap, oitv); 547 548 if (which < ITIMER_REAL || which > ITIMER_PROF) 549 return (EINVAL); 550 itvp = SCARG(uap, itv); 551 if (itvp && (error = copyin((void *)itvp, (void *)&aitv, 552 sizeof(struct itimerval)))) 553 return (error); 554 if (oitv != NULL) { 555 SCARG(&getargs, which) = which; 556 SCARG(&getargs, itv) = oitv; 557 if ((error = sys_getitimer(p, &getargs, retval))) 558 return (error); 559 } 560 if (itvp == 0) 561 return (0); 562 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) 563 return (EINVAL); 564 if (which == ITIMER_REAL) { 565 struct timeval ctv; 566 567 timeout_del(&pr->ps_realit_to); 568 getmicrouptime(&ctv); 569 if (timerisset(&aitv.it_value)) { 570 timo = tvtohz(&aitv.it_value); 571 timeout_add(&pr->ps_realit_to, timo); 572 timeradd(&aitv.it_value, &ctv, &aitv.it_value); 573 } 574 pr->ps_timer[ITIMER_REAL] = aitv; 575 } else { 576 itimerround(&aitv.it_interval); 577 mtx_enter(&itimer_mtx); 578 pr->ps_timer[which] = aitv; 579 mtx_leave(&itimer_mtx); 580 } 581 582 return (0); 583 } 584 585 /* 586 * Real interval timer expired: 587 * send process whose timer expired an alarm signal. 588 * If time is not set up to reload, then just return. 589 * Else compute next time timer should go off which is > current time. 590 * This is where delay in processing this timeout causes multiple 591 * SIGALRM calls to be compressed into one. 592 */ 593 void 594 realitexpire(void *arg) 595 { 596 struct process *pr = arg; 597 struct itimerval *tp = &pr->ps_timer[ITIMER_REAL]; 598 599 prsignal(pr, SIGALRM); 600 if (!timerisset(&tp->it_interval)) { 601 timerclear(&tp->it_value); 602 return; 603 } 604 for (;;) { 605 struct timeval ctv, ntv; 606 int timo; 607 608 timeradd(&tp->it_value, &tp->it_interval, &tp->it_value); 609 getmicrouptime(&ctv); 610 if (timercmp(&tp->it_value, &ctv, >)) { 611 ntv = tp->it_value; 612 timersub(&ntv, &ctv, &ntv); 613 timo = tvtohz(&ntv) - 1; 614 if (timo <= 0) 615 timo = 1; 616 if ((pr->ps_flags & PS_EXITING) == 0) 617 timeout_add(&pr->ps_realit_to, timo); 618 return; 619 } 620 } 621 } 622 623 /* 624 * Check that a timespec value is legit 625 */ 626 int 627 timespecfix(struct timespec *ts) 628 { 629 if (ts->tv_sec < 0 || 630 ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 631 return (EINVAL); 632 if (ts->tv_sec > 100000000) 633 ts->tv_sec = 100000000; 634 return (0); 635 } 636 637 /* 638 * Check that a proposed value to load into the .it_value or 639 * .it_interval part of an interval timer is acceptable. 640 */ 641 int 642 itimerfix(struct timeval *tv) 643 { 644 645 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 646 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 647 return (EINVAL); 648 649 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 650 tv->tv_usec = tick; 651 652 return (0); 653 } 654 655 /* 656 * Nonzero timer interval smaller than the resolution of the 657 * system clock are rounded up. 658 */ 659 void 660 itimerround(struct timeval *tv) 661 { 662 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 663 tv->tv_usec = tick; 664 } 665 666 /* 667 * Decrement an interval timer by a specified number 668 * of microseconds, which must be less than a second, 669 * i.e. < 1000000. If the timer expires, then reload 670 * it. In this case, carry over (usec - old value) to 671 * reduce the value reloaded into the timer so that 672 * the timer does not drift. This routine assumes 673 * that it is called in a context where the timers 674 * on which it is operating cannot change in value. 675 */ 676 int 677 itimerdecr(struct itimerval *itp, int usec) 678 { 679 mtx_enter(&itimer_mtx); 680 if (itp->it_value.tv_usec < usec) { 681 if (itp->it_value.tv_sec == 0) { 682 /* expired, and already in next interval */ 683 usec -= itp->it_value.tv_usec; 684 goto expire; 685 } 686 itp->it_value.tv_usec += 1000000; 687 itp->it_value.tv_sec--; 688 } 689 itp->it_value.tv_usec -= usec; 690 usec = 0; 691 if (timerisset(&itp->it_value)) { 692 mtx_leave(&itimer_mtx); 693 return (1); 694 } 695 /* expired, exactly at end of interval */ 696 expire: 697 if (timerisset(&itp->it_interval)) { 698 itp->it_value = itp->it_interval; 699 itp->it_value.tv_usec -= usec; 700 if (itp->it_value.tv_usec < 0) { 701 itp->it_value.tv_usec += 1000000; 702 itp->it_value.tv_sec--; 703 } 704 } else 705 itp->it_value.tv_usec = 0; /* sec is already 0 */ 706 mtx_leave(&itimer_mtx); 707 return (0); 708 } 709 710 /* 711 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9) 712 * for usage and rationale. 713 */ 714 int 715 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 716 { 717 struct timeval tv, delta; 718 int rv = 0; 719 720 getmicrouptime(&tv); 721 722 timersub(&tv, lasttime, &delta); 723 724 /* 725 * check for 0,0 is so that the message will be seen at least once, 726 * even if interval is huge. 727 */ 728 if (timercmp(&delta, mininterval, >=) || 729 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 730 *lasttime = tv; 731 rv = 1; 732 } 733 734 return (rv); 735 } 736 737 /* 738 * ppsratecheck(): packets (or events) per second limitation. 739 */ 740 int 741 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 742 { 743 struct timeval tv, delta; 744 int rv; 745 746 microuptime(&tv); 747 748 timersub(&tv, lasttime, &delta); 749 750 /* 751 * check for 0,0 is so that the message will be seen at least once. 752 * if more than one second have passed since the last update of 753 * lasttime, reset the counter. 754 * 755 * we do increment *curpps even in *curpps < maxpps case, as some may 756 * try to use *curpps for stat purposes as well. 757 */ 758 if (maxpps == 0) 759 rv = 0; 760 else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 761 delta.tv_sec >= 1) { 762 *lasttime = tv; 763 *curpps = 0; 764 rv = 1; 765 } else if (maxpps < 0) 766 rv = 1; 767 else if (*curpps < maxpps) 768 rv = 1; 769 else 770 rv = 0; 771 772 #if 1 /*DIAGNOSTIC?*/ 773 /* be careful about wrap-around */ 774 if (*curpps + 1 > *curpps) 775 *curpps = *curpps + 1; 776 #else 777 /* 778 * assume that there's not too many calls to this function. 779 * not sure if the assumption holds, as it depends on *caller's* 780 * behavior, not the behavior of this function. 781 * IMHO it is wrong to make assumption on the caller's behavior, 782 * so the above #if is #if 1, not #ifdef DIAGNOSTIC. 783 */ 784 *curpps = *curpps + 1; 785 #endif 786 787 return (rv); 788 } 789 790 791 #define RESETTODR_PERIOD 1800 792 793 void periodic_resettodr(void *); 794 void perform_resettodr(void *); 795 796 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL); 797 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL); 798 799 void 800 periodic_resettodr(void *arg __unused) 801 { 802 task_add(systq, &resettodr_task); 803 } 804 805 void 806 perform_resettodr(void *arg __unused) 807 { 808 resettodr(); 809 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 810 } 811 812 void 813 start_periodic_resettodr(void) 814 { 815 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 816 } 817 818 void 819 stop_periodic_resettodr(void) 820 { 821 timeout_del(&resettodr_to); 822 task_del(systq, &resettodr_task); 823 } 824