1 /* $OpenBSD: kern_time.c,v 1.119 2019/06/03 01:27:30 cheloha Exp $ */ 2 /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 33 */ 34 35 #include <sys/param.h> 36 #include <sys/resourcevar.h> 37 #include <sys/kernel.h> 38 #include <sys/systm.h> 39 #include <sys/rwlock.h> 40 #include <sys/proc.h> 41 #include <sys/ktrace.h> 42 #include <sys/vnode.h> 43 #include <sys/signalvar.h> 44 #include <sys/stdint.h> 45 #include <sys/pledge.h> 46 #include <sys/task.h> 47 #include <sys/timeout.h> 48 #include <sys/timetc.h> 49 50 #include <sys/mount.h> 51 #include <sys/syscallargs.h> 52 53 /* 54 * Time of day and interval timer support. 55 * 56 * These routines provide the kernel entry points to get and set 57 * the time-of-day and per-process interval timers. Subroutines 58 * here provide support for adding and subtracting timeval structures 59 * and decrementing interval timers, optionally reloading the interval 60 * timers when they expire. 61 */ 62 63 /* This function is used by clock_settime and settimeofday */ 64 int 65 settime(const struct timespec *ts) 66 { 67 struct timespec now; 68 69 /* 70 * Don't allow the time to be set forward so far it will wrap 71 * and become negative, thus allowing an attacker to bypass 72 * the next check below. The cutoff is 1 year before rollover 73 * occurs, so even if the attacker uses adjtime(2) to move 74 * the time past the cutoff, it will take a very long time 75 * to get to the wrap point. 76 * 77 * XXX: we check against UINT_MAX until we can figure out 78 * how to deal with the hardware RTCs. 79 */ 80 if (ts->tv_sec > UINT_MAX - 365*24*60*60) { 81 printf("denied attempt to set clock forward to %lld\n", 82 (long long)ts->tv_sec); 83 return (EPERM); 84 } 85 /* 86 * If the system is secure, we do not allow the time to be 87 * set to an earlier value (it may be slowed using adjtime, 88 * but not set back). This feature prevent interlopers from 89 * setting arbitrary time stamps on files. 90 */ 91 nanotime(&now); 92 if (securelevel > 1 && timespeccmp(ts, &now, <)) { 93 printf("denied attempt to set clock back %lld seconds\n", 94 (long long)now.tv_sec - ts->tv_sec); 95 return (EPERM); 96 } 97 98 tc_setrealtimeclock(ts); 99 KERNEL_LOCK(); 100 resettodr(); 101 KERNEL_UNLOCK(); 102 103 return (0); 104 } 105 106 int 107 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp) 108 { 109 struct bintime bt; 110 struct proc *q; 111 int error = 0; 112 113 switch (clock_id) { 114 case CLOCK_REALTIME: 115 nanotime(tp); 116 break; 117 case CLOCK_UPTIME: 118 binuptime(&bt); 119 bintimesub(&bt, &naptime, &bt); 120 BINTIME_TO_TIMESPEC(&bt, tp); 121 break; 122 case CLOCK_MONOTONIC: 123 case CLOCK_BOOTTIME: 124 nanouptime(tp); 125 break; 126 case CLOCK_PROCESS_CPUTIME_ID: 127 nanouptime(tp); 128 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 129 timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp); 130 timespecadd(tp, &p->p_rtime, tp); 131 break; 132 case CLOCK_THREAD_CPUTIME_ID: 133 nanouptime(tp); 134 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 135 timespecadd(tp, &p->p_tu.tu_runtime, tp); 136 timespecadd(tp, &p->p_rtime, tp); 137 break; 138 default: 139 /* check for clock from pthread_getcpuclockid() */ 140 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 141 KERNEL_LOCK(); 142 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 143 if (q == NULL || q->p_p != p->p_p) 144 error = ESRCH; 145 else 146 *tp = q->p_tu.tu_runtime; 147 KERNEL_UNLOCK(); 148 } else 149 error = EINVAL; 150 break; 151 } 152 return (error); 153 } 154 155 int 156 sys_clock_gettime(struct proc *p, void *v, register_t *retval) 157 { 158 struct sys_clock_gettime_args /* { 159 syscallarg(clockid_t) clock_id; 160 syscallarg(struct timespec *) tp; 161 } */ *uap = v; 162 struct timespec ats; 163 int error; 164 165 memset(&ats, 0, sizeof(ats)); 166 if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0) 167 return (error); 168 169 error = copyout(&ats, SCARG(uap, tp), sizeof(ats)); 170 #ifdef KTRACE 171 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 172 ktrabstimespec(p, &ats); 173 #endif 174 return (error); 175 } 176 177 int 178 sys_clock_settime(struct proc *p, void *v, register_t *retval) 179 { 180 struct sys_clock_settime_args /* { 181 syscallarg(clockid_t) clock_id; 182 syscallarg(const struct timespec *) tp; 183 } */ *uap = v; 184 struct timespec ats; 185 clockid_t clock_id; 186 int error; 187 188 if ((error = suser(p)) != 0) 189 return (error); 190 191 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 192 return (error); 193 194 clock_id = SCARG(uap, clock_id); 195 switch (clock_id) { 196 case CLOCK_REALTIME: 197 if (!timespecisvalid(&ats)) 198 return (EINVAL); 199 if ((error = settime(&ats)) != 0) 200 return (error); 201 break; 202 default: /* Other clocks are read-only */ 203 return (EINVAL); 204 } 205 206 return (0); 207 } 208 209 int 210 sys_clock_getres(struct proc *p, void *v, register_t *retval) 211 { 212 struct sys_clock_getres_args /* { 213 syscallarg(clockid_t) clock_id; 214 syscallarg(struct timespec *) tp; 215 } */ *uap = v; 216 clockid_t clock_id; 217 struct timespec ts; 218 struct proc *q; 219 int error = 0; 220 221 memset(&ts, 0, sizeof(ts)); 222 clock_id = SCARG(uap, clock_id); 223 switch (clock_id) { 224 case CLOCK_REALTIME: 225 case CLOCK_MONOTONIC: 226 case CLOCK_BOOTTIME: 227 case CLOCK_UPTIME: 228 case CLOCK_PROCESS_CPUTIME_ID: 229 case CLOCK_THREAD_CPUTIME_ID: 230 ts.tv_sec = 0; 231 ts.tv_nsec = 1000000000 / hz; 232 break; 233 default: 234 /* check for clock from pthread_getcpuclockid() */ 235 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 236 KERNEL_LOCK(); 237 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 238 if (q == NULL || q->p_p != p->p_p) 239 error = ESRCH; 240 else { 241 ts.tv_sec = 0; 242 ts.tv_nsec = 1000000000 / hz; 243 } 244 KERNEL_UNLOCK(); 245 } else 246 error = EINVAL; 247 break; 248 } 249 250 if (error == 0 && SCARG(uap, tp)) { 251 error = copyout(&ts, SCARG(uap, tp), sizeof (ts)); 252 #ifdef KTRACE 253 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 254 ktrreltimespec(p, &ts); 255 #endif 256 } 257 258 return error; 259 } 260 261 int 262 sys_nanosleep(struct proc *p, void *v, register_t *retval) 263 { 264 static int nanowait; 265 struct sys_nanosleep_args/* { 266 syscallarg(const struct timespec *) rqtp; 267 syscallarg(struct timespec *) rmtp; 268 } */ *uap = v; 269 struct timespec elapsed, remainder, request, start, stop; 270 struct timespec *rmtp; 271 int copyout_error, error; 272 273 rmtp = SCARG(uap, rmtp); 274 error = copyin(SCARG(uap, rqtp), &request, sizeof(request)); 275 if (error) 276 return (error); 277 #ifdef KTRACE 278 if (KTRPOINT(p, KTR_STRUCT)) 279 ktrreltimespec(p, &request); 280 #endif 281 282 if (request.tv_sec < 0 || !timespecisvalid(&request)) 283 return (EINVAL); 284 285 do { 286 getnanouptime(&start); 287 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", 288 MAX(1, tstohz(&request))); 289 getnanouptime(&stop); 290 timespecsub(&stop, &start, &elapsed); 291 timespecsub(&request, &elapsed, &request); 292 if (request.tv_sec < 0) 293 timespecclear(&request); 294 if (error != EWOULDBLOCK) 295 break; 296 } while (timespecisset(&request)); 297 298 if (error == ERESTART) 299 error = EINTR; 300 if (error == EWOULDBLOCK) 301 error = 0; 302 303 if (rmtp) { 304 memset(&remainder, 0, sizeof(remainder)); 305 remainder = request; 306 copyout_error = copyout(&remainder, rmtp, sizeof(remainder)); 307 if (copyout_error) 308 error = copyout_error; 309 #ifdef KTRACE 310 if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT)) 311 ktrreltimespec(p, &remainder); 312 #endif 313 } 314 315 return error; 316 } 317 318 int 319 sys_gettimeofday(struct proc *p, void *v, register_t *retval) 320 { 321 struct sys_gettimeofday_args /* { 322 syscallarg(struct timeval *) tp; 323 syscallarg(struct timezone *) tzp; 324 } */ *uap = v; 325 struct timeval atv; 326 struct timeval *tp; 327 struct timezone *tzp; 328 int error = 0; 329 330 tp = SCARG(uap, tp); 331 tzp = SCARG(uap, tzp); 332 333 if (tp) { 334 memset(&atv, 0, sizeof(atv)); 335 microtime(&atv); 336 if ((error = copyout(&atv, tp, sizeof (atv)))) 337 return (error); 338 #ifdef KTRACE 339 if (KTRPOINT(p, KTR_STRUCT)) 340 ktrabstimeval(p, &atv); 341 #endif 342 } 343 if (tzp) 344 error = copyout(&tz, tzp, sizeof (tz)); 345 return (error); 346 } 347 348 int 349 sys_settimeofday(struct proc *p, void *v, register_t *retval) 350 { 351 struct sys_settimeofday_args /* { 352 syscallarg(const struct timeval *) tv; 353 syscallarg(const struct timezone *) tzp; 354 } */ *uap = v; 355 struct timezone atz; 356 struct timeval atv; 357 const struct timeval *tv; 358 const struct timezone *tzp; 359 int error; 360 361 tv = SCARG(uap, tv); 362 tzp = SCARG(uap, tzp); 363 364 if ((error = suser(p))) 365 return (error); 366 /* Verify all parameters before changing time. */ 367 if (tv && (error = copyin(tv, &atv, sizeof(atv)))) 368 return (error); 369 if (tzp && (error = copyin(tzp, &atz, sizeof(atz)))) 370 return (error); 371 if (tv) { 372 struct timespec ts; 373 374 if (!timerisvalid(&atv)) 375 return (EINVAL); 376 TIMEVAL_TO_TIMESPEC(&atv, &ts); 377 if ((error = settime(&ts)) != 0) 378 return (error); 379 } 380 if (tzp) 381 tz = atz; 382 return (0); 383 } 384 385 int 386 sys_adjfreq(struct proc *p, void *v, register_t *retval) 387 { 388 struct sys_adjfreq_args /* { 389 syscallarg(const int64_t *) freq; 390 syscallarg(int64_t *) oldfreq; 391 } */ *uap = v; 392 int error = 0; 393 int64_t f; 394 const int64_t *freq = SCARG(uap, freq); 395 int64_t *oldfreq = SCARG(uap, oldfreq); 396 397 if (freq) { 398 if ((error = suser(p))) 399 return (error); 400 if ((error = copyin(freq, &f, sizeof(f)))) 401 return (error); 402 } 403 404 rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE); 405 if (oldfreq) { 406 tc_adjfreq(&f, NULL); 407 if ((error = copyout(&f, oldfreq, sizeof(f)))) 408 goto out; 409 } 410 if (freq) 411 tc_adjfreq(NULL, &f); 412 out: 413 rw_exit(&tc_lock); 414 return (error); 415 } 416 417 int 418 sys_adjtime(struct proc *p, void *v, register_t *retval) 419 { 420 struct sys_adjtime_args /* { 421 syscallarg(const struct timeval *) delta; 422 syscallarg(struct timeval *) olddelta; 423 } */ *uap = v; 424 struct timeval atv; 425 const struct timeval *delta = SCARG(uap, delta); 426 struct timeval *olddelta = SCARG(uap, olddelta); 427 int64_t adjustment, remaining; 428 int error; 429 430 error = pledge_adjtime(p, delta); 431 if (error) 432 return error; 433 434 if (delta) { 435 if ((error = suser(p))) 436 return (error); 437 if ((error = copyin(delta, &atv, sizeof(struct timeval)))) 438 return (error); 439 if (!timerisvalid(&atv)) 440 return (EINVAL); 441 442 if (atv.tv_sec >= 0) { 443 if (atv.tv_sec > INT64_MAX / 1000000) 444 return EINVAL; 445 adjustment = atv.tv_sec * 1000000; 446 if (atv.tv_usec > INT64_MAX - adjustment) 447 return EINVAL; 448 adjustment += atv.tv_usec; 449 } else { 450 if (atv.tv_sec < INT64_MIN / 1000000) 451 return EINVAL; 452 adjustment = atv.tv_sec * 1000000 + atv.tv_usec; 453 } 454 455 rw_enter_write(&tc_lock); 456 } 457 458 if (olddelta) { 459 tc_adjtime(&remaining, NULL); 460 memset(&atv, 0, sizeof(atv)); 461 atv.tv_sec = remaining / 1000000; 462 atv.tv_usec = remaining % 1000000; 463 if (atv.tv_usec < 0) { 464 atv.tv_usec += 1000000; 465 atv.tv_sec--; 466 } 467 468 if ((error = copyout(&atv, olddelta, sizeof(struct timeval)))) 469 goto out; 470 } 471 472 if (delta) 473 tc_adjtime(NULL, &adjustment); 474 out: 475 if (delta) 476 rw_exit_write(&tc_lock); 477 return (error); 478 } 479 480 481 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 482 483 /* 484 * Get value of an interval timer. The process virtual and 485 * profiling virtual time timers are kept internally in the 486 * way they are specified externally: in time until they expire. 487 * 488 * The real time interval timer's it_value, in contrast, is kept as an 489 * absolute time rather than as a delta, so that it is easy to keep 490 * periodic real-time signals from drifting. 491 * 492 * Virtual time timers are processed in the hardclock() routine of 493 * kern_clock.c. The real time timer is processed by a timeout 494 * routine, called from the softclock() routine. Since a callout 495 * may be delayed in real time due to interrupt processing in the system, 496 * it is possible for the real time timeout routine (realitexpire, given below), 497 * to be delayed in real time past when it is supposed to occur. It 498 * does not suffice, therefore, to reload the real timer .it_value from the 499 * real time timers .it_interval. Rather, we compute the next time in 500 * absolute time the timer should go off. 501 */ 502 int 503 sys_getitimer(struct proc *p, void *v, register_t *retval) 504 { 505 struct sys_getitimer_args /* { 506 syscallarg(int) which; 507 syscallarg(struct itimerval *) itv; 508 } */ *uap = v; 509 struct itimerval aitv; 510 int which; 511 512 which = SCARG(uap, which); 513 514 if (which < ITIMER_REAL || which > ITIMER_PROF) 515 return (EINVAL); 516 memset(&aitv, 0, sizeof(aitv)); 517 mtx_enter(&itimer_mtx); 518 aitv.it_interval.tv_sec = p->p_p->ps_timer[which].it_interval.tv_sec; 519 aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec; 520 aitv.it_value.tv_sec = p->p_p->ps_timer[which].it_value.tv_sec; 521 aitv.it_value.tv_usec = p->p_p->ps_timer[which].it_value.tv_usec; 522 mtx_leave(&itimer_mtx); 523 524 if (which == ITIMER_REAL) { 525 struct timeval now; 526 527 getmicrouptime(&now); 528 /* 529 * Convert from absolute to relative time in .it_value 530 * part of real time timer. If time for real time timer 531 * has passed return 0, else return difference between 532 * current time and time for the timer to go off. 533 */ 534 if (timerisset(&aitv.it_value)) { 535 if (timercmp(&aitv.it_value, &now, <)) 536 timerclear(&aitv.it_value); 537 else 538 timersub(&aitv.it_value, &now, 539 &aitv.it_value); 540 } 541 } 542 543 return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval))); 544 } 545 546 int 547 sys_setitimer(struct proc *p, void *v, register_t *retval) 548 { 549 struct sys_setitimer_args /* { 550 syscallarg(int) which; 551 syscallarg(const struct itimerval *) itv; 552 syscallarg(struct itimerval *) oitv; 553 } */ *uap = v; 554 struct sys_getitimer_args getargs; 555 struct itimerval aitv; 556 const struct itimerval *itvp; 557 struct itimerval *oitv; 558 struct process *pr = p->p_p; 559 int error; 560 int timo; 561 int which; 562 563 which = SCARG(uap, which); 564 oitv = SCARG(uap, oitv); 565 566 if (which < ITIMER_REAL || which > ITIMER_PROF) 567 return (EINVAL); 568 itvp = SCARG(uap, itv); 569 if (itvp && (error = copyin((void *)itvp, (void *)&aitv, 570 sizeof(struct itimerval)))) 571 return (error); 572 if (oitv != NULL) { 573 SCARG(&getargs, which) = which; 574 SCARG(&getargs, itv) = oitv; 575 if ((error = sys_getitimer(p, &getargs, retval))) 576 return (error); 577 } 578 if (itvp == 0) 579 return (0); 580 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) 581 return (EINVAL); 582 if (which == ITIMER_REAL) { 583 struct timeval ctv; 584 585 timeout_del(&pr->ps_realit_to); 586 getmicrouptime(&ctv); 587 if (timerisset(&aitv.it_value)) { 588 timo = tvtohz(&aitv.it_value); 589 timeout_add(&pr->ps_realit_to, timo); 590 timeradd(&aitv.it_value, &ctv, &aitv.it_value); 591 } 592 pr->ps_timer[ITIMER_REAL] = aitv; 593 } else { 594 itimerround(&aitv.it_interval); 595 mtx_enter(&itimer_mtx); 596 pr->ps_timer[which] = aitv; 597 mtx_leave(&itimer_mtx); 598 } 599 600 return (0); 601 } 602 603 /* 604 * Real interval timer expired: 605 * send process whose timer expired an alarm signal. 606 * If time is not set up to reload, then just return. 607 * Else compute next time timer should go off which is > current time. 608 * This is where delay in processing this timeout causes multiple 609 * SIGALRM calls to be compressed into one. 610 */ 611 void 612 realitexpire(void *arg) 613 { 614 struct process *pr = arg; 615 struct itimerval *tp = &pr->ps_timer[ITIMER_REAL]; 616 617 prsignal(pr, SIGALRM); 618 if (!timerisset(&tp->it_interval)) { 619 timerclear(&tp->it_value); 620 return; 621 } 622 for (;;) { 623 struct timeval ctv, ntv; 624 int timo; 625 626 timeradd(&tp->it_value, &tp->it_interval, &tp->it_value); 627 getmicrouptime(&ctv); 628 if (timercmp(&tp->it_value, &ctv, >)) { 629 ntv = tp->it_value; 630 timersub(&ntv, &ctv, &ntv); 631 timo = tvtohz(&ntv) - 1; 632 if (timo <= 0) 633 timo = 1; 634 if ((pr->ps_flags & PS_EXITING) == 0) 635 timeout_add(&pr->ps_realit_to, timo); 636 return; 637 } 638 } 639 } 640 641 /* 642 * Check that a timespec value is legit 643 */ 644 int 645 timespecfix(struct timespec *ts) 646 { 647 if (ts->tv_sec < 0 || 648 ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 649 return (EINVAL); 650 if (ts->tv_sec > 100000000) 651 ts->tv_sec = 100000000; 652 return (0); 653 } 654 655 /* 656 * Check that a proposed value to load into the .it_value or 657 * .it_interval part of an interval timer is acceptable. 658 */ 659 int 660 itimerfix(struct timeval *tv) 661 { 662 663 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 664 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 665 return (EINVAL); 666 667 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 668 tv->tv_usec = tick; 669 670 return (0); 671 } 672 673 /* 674 * Nonzero timer interval smaller than the resolution of the 675 * system clock are rounded up. 676 */ 677 void 678 itimerround(struct timeval *tv) 679 { 680 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 681 tv->tv_usec = tick; 682 } 683 684 /* 685 * Decrement an interval timer by a specified number 686 * of microseconds, which must be less than a second, 687 * i.e. < 1000000. If the timer expires, then reload 688 * it. In this case, carry over (usec - old value) to 689 * reduce the value reloaded into the timer so that 690 * the timer does not drift. This routine assumes 691 * that it is called in a context where the timers 692 * on which it is operating cannot change in value. 693 */ 694 int 695 itimerdecr(struct itimerval *itp, int usec) 696 { 697 mtx_enter(&itimer_mtx); 698 if (itp->it_value.tv_usec < usec) { 699 if (itp->it_value.tv_sec == 0) { 700 /* expired, and already in next interval */ 701 usec -= itp->it_value.tv_usec; 702 goto expire; 703 } 704 itp->it_value.tv_usec += 1000000; 705 itp->it_value.tv_sec--; 706 } 707 itp->it_value.tv_usec -= usec; 708 usec = 0; 709 if (timerisset(&itp->it_value)) { 710 mtx_leave(&itimer_mtx); 711 return (1); 712 } 713 /* expired, exactly at end of interval */ 714 expire: 715 if (timerisset(&itp->it_interval)) { 716 itp->it_value = itp->it_interval; 717 itp->it_value.tv_usec -= usec; 718 if (itp->it_value.tv_usec < 0) { 719 itp->it_value.tv_usec += 1000000; 720 itp->it_value.tv_sec--; 721 } 722 } else 723 itp->it_value.tv_usec = 0; /* sec is already 0 */ 724 mtx_leave(&itimer_mtx); 725 return (0); 726 } 727 728 /* 729 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9) 730 * for usage and rationale. 731 */ 732 int 733 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 734 { 735 struct timeval tv, delta; 736 int rv = 0; 737 738 getmicrouptime(&tv); 739 740 timersub(&tv, lasttime, &delta); 741 742 /* 743 * check for 0,0 is so that the message will be seen at least once, 744 * even if interval is huge. 745 */ 746 if (timercmp(&delta, mininterval, >=) || 747 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 748 *lasttime = tv; 749 rv = 1; 750 } 751 752 return (rv); 753 } 754 755 /* 756 * ppsratecheck(): packets (or events) per second limitation. 757 */ 758 int 759 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 760 { 761 struct timeval tv, delta; 762 int rv; 763 764 microuptime(&tv); 765 766 timersub(&tv, lasttime, &delta); 767 768 /* 769 * check for 0,0 is so that the message will be seen at least once. 770 * if more than one second have passed since the last update of 771 * lasttime, reset the counter. 772 * 773 * we do increment *curpps even in *curpps < maxpps case, as some may 774 * try to use *curpps for stat purposes as well. 775 */ 776 if (maxpps == 0) 777 rv = 0; 778 else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 779 delta.tv_sec >= 1) { 780 *lasttime = tv; 781 *curpps = 0; 782 rv = 1; 783 } else if (maxpps < 0) 784 rv = 1; 785 else if (*curpps < maxpps) 786 rv = 1; 787 else 788 rv = 0; 789 790 #if 1 /*DIAGNOSTIC?*/ 791 /* be careful about wrap-around */ 792 if (*curpps + 1 > *curpps) 793 *curpps = *curpps + 1; 794 #else 795 /* 796 * assume that there's not too many calls to this function. 797 * not sure if the assumption holds, as it depends on *caller's* 798 * behavior, not the behavior of this function. 799 * IMHO it is wrong to make assumption on the caller's behavior, 800 * so the above #if is #if 1, not #ifdef DIAGNOSTIC. 801 */ 802 *curpps = *curpps + 1; 803 #endif 804 805 return (rv); 806 } 807 808 809 #define RESETTODR_PERIOD 1800 810 811 void periodic_resettodr(void *); 812 void perform_resettodr(void *); 813 814 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL); 815 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL); 816 817 void 818 periodic_resettodr(void *arg __unused) 819 { 820 task_add(systq, &resettodr_task); 821 } 822 823 void 824 perform_resettodr(void *arg __unused) 825 { 826 resettodr(); 827 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 828 } 829 830 void 831 start_periodic_resettodr(void) 832 { 833 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 834 } 835 836 void 837 stop_periodic_resettodr(void) 838 { 839 timeout_del(&resettodr_to); 840 task_del(systq, &resettodr_task); 841 } 842