1 /* $OpenBSD: kern_time.c,v 1.114 2019/03/26 16:43:56 cheloha Exp $ */ 2 /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 33 */ 34 35 #include <sys/param.h> 36 #include <sys/resourcevar.h> 37 #include <sys/kernel.h> 38 #include <sys/systm.h> 39 #include <sys/rwlock.h> 40 #include <sys/proc.h> 41 #include <sys/ktrace.h> 42 #include <sys/vnode.h> 43 #include <sys/signalvar.h> 44 #include <sys/stdint.h> 45 #include <sys/pledge.h> 46 #include <sys/task.h> 47 #include <sys/timeout.h> 48 #include <sys/timetc.h> 49 50 #include <sys/mount.h> 51 #include <sys/syscallargs.h> 52 53 /* 54 * Time of day and interval timer support. 55 * 56 * These routines provide the kernel entry points to get and set 57 * the time-of-day and per-process interval timers. Subroutines 58 * here provide support for adding and subtracting timeval structures 59 * and decrementing interval timers, optionally reloading the interval 60 * timers when they expire. 61 */ 62 63 /* This function is used by clock_settime and settimeofday */ 64 int 65 settime(const struct timespec *ts) 66 { 67 struct timespec now; 68 69 /* 70 * Don't allow the time to be set forward so far it will wrap 71 * and become negative, thus allowing an attacker to bypass 72 * the next check below. The cutoff is 1 year before rollover 73 * occurs, so even if the attacker uses adjtime(2) to move 74 * the time past the cutoff, it will take a very long time 75 * to get to the wrap point. 76 * 77 * XXX: we check against UINT_MAX until we can figure out 78 * how to deal with the hardware RTCs. 79 */ 80 if (ts->tv_sec > UINT_MAX - 365*24*60*60) { 81 printf("denied attempt to set clock forward to %lld\n", 82 (long long)ts->tv_sec); 83 return (EPERM); 84 } 85 /* 86 * If the system is secure, we do not allow the time to be 87 * set to an earlier value (it may be slowed using adjtime, 88 * but not set back). This feature prevent interlopers from 89 * setting arbitrary time stamps on files. 90 */ 91 nanotime(&now); 92 if (securelevel > 1 && timespeccmp(ts, &now, <)) { 93 printf("denied attempt to set clock back %lld seconds\n", 94 (long long)now.tv_sec - ts->tv_sec); 95 return (EPERM); 96 } 97 98 tc_setrealtimeclock(ts); 99 resettodr(); 100 101 return (0); 102 } 103 104 int 105 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp) 106 { 107 struct bintime bt; 108 struct proc *q; 109 int error = 0; 110 111 switch (clock_id) { 112 case CLOCK_REALTIME: 113 nanotime(tp); 114 break; 115 case CLOCK_UPTIME: 116 binuptime(&bt); 117 bintime_sub(&bt, &naptime); 118 bintime2timespec(&bt, tp); 119 break; 120 case CLOCK_MONOTONIC: 121 case CLOCK_BOOTTIME: 122 nanouptime(tp); 123 break; 124 case CLOCK_PROCESS_CPUTIME_ID: 125 nanouptime(tp); 126 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 127 timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp); 128 timespecadd(tp, &p->p_rtime, tp); 129 break; 130 case CLOCK_THREAD_CPUTIME_ID: 131 nanouptime(tp); 132 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 133 timespecadd(tp, &p->p_tu.tu_runtime, tp); 134 timespecadd(tp, &p->p_rtime, tp); 135 break; 136 default: 137 /* check for clock from pthread_getcpuclockid() */ 138 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 139 KERNEL_LOCK(); 140 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 141 if (q == NULL || q->p_p != p->p_p) 142 error = ESRCH; 143 else 144 *tp = q->p_tu.tu_runtime; 145 KERNEL_UNLOCK(); 146 } else 147 error = EINVAL; 148 break; 149 } 150 return (error); 151 } 152 153 int 154 sys_clock_gettime(struct proc *p, void *v, register_t *retval) 155 { 156 struct sys_clock_gettime_args /* { 157 syscallarg(clockid_t) clock_id; 158 syscallarg(struct timespec *) tp; 159 } */ *uap = v; 160 struct timespec ats; 161 int error; 162 163 memset(&ats, 0, sizeof(ats)); 164 if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0) 165 return (error); 166 167 error = copyout(&ats, SCARG(uap, tp), sizeof(ats)); 168 #ifdef KTRACE 169 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 170 ktrabstimespec(p, &ats); 171 #endif 172 return (error); 173 } 174 175 int 176 sys_clock_settime(struct proc *p, void *v, register_t *retval) 177 { 178 struct sys_clock_settime_args /* { 179 syscallarg(clockid_t) clock_id; 180 syscallarg(const struct timespec *) tp; 181 } */ *uap = v; 182 struct timespec ats; 183 clockid_t clock_id; 184 int error; 185 186 if ((error = suser(p)) != 0) 187 return (error); 188 189 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 190 return (error); 191 192 clock_id = SCARG(uap, clock_id); 193 switch (clock_id) { 194 case CLOCK_REALTIME: 195 if (!timespecisvalid(&ats)) 196 return (EINVAL); 197 if ((error = settime(&ats)) != 0) 198 return (error); 199 break; 200 default: /* Other clocks are read-only */ 201 return (EINVAL); 202 } 203 204 return (0); 205 } 206 207 int 208 sys_clock_getres(struct proc *p, void *v, register_t *retval) 209 { 210 struct sys_clock_getres_args /* { 211 syscallarg(clockid_t) clock_id; 212 syscallarg(struct timespec *) tp; 213 } */ *uap = v; 214 clockid_t clock_id; 215 struct timespec ts; 216 struct proc *q; 217 int error = 0; 218 219 memset(&ts, 0, sizeof(ts)); 220 clock_id = SCARG(uap, clock_id); 221 switch (clock_id) { 222 case CLOCK_REALTIME: 223 case CLOCK_MONOTONIC: 224 case CLOCK_BOOTTIME: 225 case CLOCK_UPTIME: 226 case CLOCK_PROCESS_CPUTIME_ID: 227 case CLOCK_THREAD_CPUTIME_ID: 228 ts.tv_sec = 0; 229 ts.tv_nsec = 1000000000 / hz; 230 break; 231 default: 232 /* check for clock from pthread_getcpuclockid() */ 233 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 234 KERNEL_LOCK(); 235 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 236 if (q == NULL || q->p_p != p->p_p) 237 error = ESRCH; 238 else { 239 ts.tv_sec = 0; 240 ts.tv_nsec = 1000000000 / hz; 241 } 242 KERNEL_UNLOCK(); 243 } else 244 error = EINVAL; 245 break; 246 } 247 248 if (error == 0 && SCARG(uap, tp)) { 249 error = copyout(&ts, SCARG(uap, tp), sizeof (ts)); 250 #ifdef KTRACE 251 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 252 ktrreltimespec(p, &ts); 253 #endif 254 } 255 256 return error; 257 } 258 259 int 260 sys_nanosleep(struct proc *p, void *v, register_t *retval) 261 { 262 static int nanowait; 263 struct sys_nanosleep_args/* { 264 syscallarg(const struct timespec *) rqtp; 265 syscallarg(struct timespec *) rmtp; 266 } */ *uap = v; 267 struct timespec elapsed, remainder, request, start, stop; 268 struct timespec *rmtp; 269 int copyout_error, error; 270 271 rmtp = SCARG(uap, rmtp); 272 error = copyin(SCARG(uap, rqtp), &request, sizeof(request)); 273 if (error) 274 return (error); 275 #ifdef KTRACE 276 if (KTRPOINT(p, KTR_STRUCT)) 277 ktrreltimespec(p, &request); 278 #endif 279 280 if (request.tv_sec < 0 || !timespecisvalid(&request)) 281 return (EINVAL); 282 283 do { 284 getnanouptime(&start); 285 error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", 286 MAX(1, tstohz(&request))); 287 getnanouptime(&stop); 288 timespecsub(&stop, &start, &elapsed); 289 timespecsub(&request, &elapsed, &request); 290 if (request.tv_sec < 0) 291 timespecclear(&request); 292 if (error != EWOULDBLOCK) 293 break; 294 } while (timespecisset(&request)); 295 296 if (error == ERESTART) 297 error = EINTR; 298 if (error == EWOULDBLOCK) 299 error = 0; 300 301 if (rmtp) { 302 memset(&remainder, 0, sizeof(remainder)); 303 remainder = request; 304 copyout_error = copyout(&remainder, rmtp, sizeof(remainder)); 305 if (copyout_error) 306 error = copyout_error; 307 #ifdef KTRACE 308 if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT)) 309 ktrreltimespec(p, &remainder); 310 #endif 311 } 312 313 return error; 314 } 315 316 int 317 sys_gettimeofday(struct proc *p, void *v, register_t *retval) 318 { 319 struct sys_gettimeofday_args /* { 320 syscallarg(struct timeval *) tp; 321 syscallarg(struct timezone *) tzp; 322 } */ *uap = v; 323 struct timeval atv; 324 struct timeval *tp; 325 struct timezone *tzp; 326 int error = 0; 327 328 tp = SCARG(uap, tp); 329 tzp = SCARG(uap, tzp); 330 331 if (tp) { 332 memset(&atv, 0, sizeof(atv)); 333 microtime(&atv); 334 if ((error = copyout(&atv, tp, sizeof (atv)))) 335 return (error); 336 #ifdef KTRACE 337 if (KTRPOINT(p, KTR_STRUCT)) 338 ktrabstimeval(p, &atv); 339 #endif 340 } 341 if (tzp) 342 error = copyout(&tz, tzp, sizeof (tz)); 343 return (error); 344 } 345 346 int 347 sys_settimeofday(struct proc *p, void *v, register_t *retval) 348 { 349 struct sys_settimeofday_args /* { 350 syscallarg(const struct timeval *) tv; 351 syscallarg(const struct timezone *) tzp; 352 } */ *uap = v; 353 struct timezone atz; 354 struct timeval atv; 355 const struct timeval *tv; 356 const struct timezone *tzp; 357 int error; 358 359 tv = SCARG(uap, tv); 360 tzp = SCARG(uap, tzp); 361 362 if ((error = suser(p))) 363 return (error); 364 /* Verify all parameters before changing time. */ 365 if (tv && (error = copyin(tv, &atv, sizeof(atv)))) 366 return (error); 367 if (tzp && (error = copyin(tzp, &atz, sizeof(atz)))) 368 return (error); 369 if (tv) { 370 struct timespec ts; 371 372 if (!timerisvalid(&atv)) 373 return (EINVAL); 374 TIMEVAL_TO_TIMESPEC(&atv, &ts); 375 if ((error = settime(&ts)) != 0) 376 return (error); 377 } 378 if (tzp) 379 tz = atz; 380 return (0); 381 } 382 383 int 384 sys_adjfreq(struct proc *p, void *v, register_t *retval) 385 { 386 struct sys_adjfreq_args /* { 387 syscallarg(const int64_t *) freq; 388 syscallarg(int64_t *) oldfreq; 389 } */ *uap = v; 390 int error; 391 int64_t f; 392 const int64_t *freq = SCARG(uap, freq); 393 int64_t *oldfreq = SCARG(uap, oldfreq); 394 395 if (freq) { 396 if ((error = suser(p))) 397 return (error); 398 if ((error = copyin(freq, &f, sizeof(f)))) 399 return (error); 400 } 401 402 rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE); 403 if (oldfreq) { 404 tc_adjfreq(&f, NULL); 405 if ((error = copyout(&f, oldfreq, sizeof(f)))) 406 goto out; 407 } 408 if (freq) 409 tc_adjfreq(NULL, &f); 410 out: 411 rw_exit(&tc_lock); 412 return (error); 413 } 414 415 int 416 sys_adjtime(struct proc *p, void *v, register_t *retval) 417 { 418 struct sys_adjtime_args /* { 419 syscallarg(const struct timeval *) delta; 420 syscallarg(struct timeval *) olddelta; 421 } */ *uap = v; 422 struct timeval atv; 423 const struct timeval *delta = SCARG(uap, delta); 424 struct timeval *olddelta = SCARG(uap, olddelta); 425 int64_t adjustment, remaining; 426 int error; 427 428 error = pledge_adjtime(p, delta); 429 if (error) 430 return error; 431 432 if (delta) { 433 if ((error = suser(p))) 434 return (error); 435 if ((error = copyin(delta, &atv, sizeof(struct timeval)))) 436 return (error); 437 if (!timerisvalid(&atv)) 438 return (EINVAL); 439 440 if (atv.tv_sec >= 0) { 441 if (atv.tv_sec > INT64_MAX / 1000000) 442 return EINVAL; 443 adjustment = atv.tv_sec * 1000000; 444 if (atv.tv_usec > INT64_MAX - adjustment) 445 return EINVAL; 446 adjustment += atv.tv_usec; 447 } else { 448 if (atv.tv_sec < INT64_MIN / 1000000) 449 return EINVAL; 450 adjustment = atv.tv_sec * 1000000 + atv.tv_usec; 451 } 452 453 rw_enter_write(&tc_lock); 454 } 455 456 if (olddelta) { 457 tc_adjtime(&remaining, NULL); 458 memset(&atv, 0, sizeof(atv)); 459 atv.tv_sec = remaining / 1000000; 460 atv.tv_usec = remaining % 1000000; 461 if (atv.tv_usec < 0) { 462 atv.tv_usec += 1000000; 463 atv.tv_sec--; 464 } 465 466 if ((error = copyout(&atv, olddelta, sizeof(struct timeval)))) 467 goto out; 468 } 469 470 if (delta) 471 tc_adjtime(NULL, &adjustment); 472 out: 473 if (delta) 474 rw_exit_write(&tc_lock); 475 return (error); 476 } 477 478 479 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 480 481 /* 482 * Get value of an interval timer. The process virtual and 483 * profiling virtual time timers are kept internally in the 484 * way they are specified externally: in time until they expire. 485 * 486 * The real time interval timer's it_value, in contrast, is kept as an 487 * absolute time rather than as a delta, so that it is easy to keep 488 * periodic real-time signals from drifting. 489 * 490 * Virtual time timers are processed in the hardclock() routine of 491 * kern_clock.c. The real time timer is processed by a timeout 492 * routine, called from the softclock() routine. Since a callout 493 * may be delayed in real time due to interrupt processing in the system, 494 * it is possible for the real time timeout routine (realitexpire, given below), 495 * to be delayed in real time past when it is supposed to occur. It 496 * does not suffice, therefore, to reload the real timer .it_value from the 497 * real time timers .it_interval. Rather, we compute the next time in 498 * absolute time the timer should go off. 499 */ 500 int 501 sys_getitimer(struct proc *p, void *v, register_t *retval) 502 { 503 struct sys_getitimer_args /* { 504 syscallarg(int) which; 505 syscallarg(struct itimerval *) itv; 506 } */ *uap = v; 507 struct itimerval aitv; 508 int which; 509 510 which = SCARG(uap, which); 511 512 if (which < ITIMER_REAL || which > ITIMER_PROF) 513 return (EINVAL); 514 memset(&aitv, 0, sizeof(aitv)); 515 mtx_enter(&itimer_mtx); 516 aitv.it_interval.tv_sec = p->p_p->ps_timer[which].it_interval.tv_sec; 517 aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec; 518 aitv.it_value.tv_sec = p->p_p->ps_timer[which].it_value.tv_sec; 519 aitv.it_value.tv_usec = p->p_p->ps_timer[which].it_value.tv_usec; 520 mtx_leave(&itimer_mtx); 521 522 if (which == ITIMER_REAL) { 523 struct timeval now; 524 525 getmicrouptime(&now); 526 /* 527 * Convert from absolute to relative time in .it_value 528 * part of real time timer. If time for real time timer 529 * has passed return 0, else return difference between 530 * current time and time for the timer to go off. 531 */ 532 if (timerisset(&aitv.it_value)) { 533 if (timercmp(&aitv.it_value, &now, <)) 534 timerclear(&aitv.it_value); 535 else 536 timersub(&aitv.it_value, &now, 537 &aitv.it_value); 538 } 539 } 540 541 return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval))); 542 } 543 544 int 545 sys_setitimer(struct proc *p, void *v, register_t *retval) 546 { 547 struct sys_setitimer_args /* { 548 syscallarg(int) which; 549 syscallarg(const struct itimerval *) itv; 550 syscallarg(struct itimerval *) oitv; 551 } */ *uap = v; 552 struct sys_getitimer_args getargs; 553 struct itimerval aitv; 554 const struct itimerval *itvp; 555 struct itimerval *oitv; 556 struct process *pr = p->p_p; 557 int error; 558 int timo; 559 int which; 560 561 which = SCARG(uap, which); 562 oitv = SCARG(uap, oitv); 563 564 if (which < ITIMER_REAL || which > ITIMER_PROF) 565 return (EINVAL); 566 itvp = SCARG(uap, itv); 567 if (itvp && (error = copyin((void *)itvp, (void *)&aitv, 568 sizeof(struct itimerval)))) 569 return (error); 570 if (oitv != NULL) { 571 SCARG(&getargs, which) = which; 572 SCARG(&getargs, itv) = oitv; 573 if ((error = sys_getitimer(p, &getargs, retval))) 574 return (error); 575 } 576 if (itvp == 0) 577 return (0); 578 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) 579 return (EINVAL); 580 if (which == ITIMER_REAL) { 581 struct timeval ctv; 582 583 timeout_del(&pr->ps_realit_to); 584 getmicrouptime(&ctv); 585 if (timerisset(&aitv.it_value)) { 586 timo = tvtohz(&aitv.it_value); 587 timeout_add(&pr->ps_realit_to, timo); 588 timeradd(&aitv.it_value, &ctv, &aitv.it_value); 589 } 590 pr->ps_timer[ITIMER_REAL] = aitv; 591 } else { 592 itimerround(&aitv.it_interval); 593 mtx_enter(&itimer_mtx); 594 pr->ps_timer[which] = aitv; 595 mtx_leave(&itimer_mtx); 596 } 597 598 return (0); 599 } 600 601 /* 602 * Real interval timer expired: 603 * send process whose timer expired an alarm signal. 604 * If time is not set up to reload, then just return. 605 * Else compute next time timer should go off which is > current time. 606 * This is where delay in processing this timeout causes multiple 607 * SIGALRM calls to be compressed into one. 608 */ 609 void 610 realitexpire(void *arg) 611 { 612 struct process *pr = arg; 613 struct itimerval *tp = &pr->ps_timer[ITIMER_REAL]; 614 615 prsignal(pr, SIGALRM); 616 if (!timerisset(&tp->it_interval)) { 617 timerclear(&tp->it_value); 618 return; 619 } 620 for (;;) { 621 struct timeval ctv, ntv; 622 int timo; 623 624 timeradd(&tp->it_value, &tp->it_interval, &tp->it_value); 625 getmicrouptime(&ctv); 626 if (timercmp(&tp->it_value, &ctv, >)) { 627 ntv = tp->it_value; 628 timersub(&ntv, &ctv, &ntv); 629 timo = tvtohz(&ntv) - 1; 630 if (timo <= 0) 631 timo = 1; 632 if ((pr->ps_flags & PS_EXITING) == 0) 633 timeout_add(&pr->ps_realit_to, timo); 634 return; 635 } 636 } 637 } 638 639 /* 640 * Check that a timespec value is legit 641 */ 642 int 643 timespecfix(struct timespec *ts) 644 { 645 if (ts->tv_sec < 0 || 646 ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 647 return (EINVAL); 648 if (ts->tv_sec > 100000000) 649 ts->tv_sec = 100000000; 650 return (0); 651 } 652 653 /* 654 * Check that a proposed value to load into the .it_value or 655 * .it_interval part of an interval timer is acceptable. 656 */ 657 int 658 itimerfix(struct timeval *tv) 659 { 660 661 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 662 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 663 return (EINVAL); 664 665 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 666 tv->tv_usec = tick; 667 668 return (0); 669 } 670 671 /* 672 * Nonzero timer interval smaller than the resolution of the 673 * system clock are rounded up. 674 */ 675 void 676 itimerround(struct timeval *tv) 677 { 678 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 679 tv->tv_usec = tick; 680 } 681 682 /* 683 * Decrement an interval timer by a specified number 684 * of microseconds, which must be less than a second, 685 * i.e. < 1000000. If the timer expires, then reload 686 * it. In this case, carry over (usec - old value) to 687 * reduce the value reloaded into the timer so that 688 * the timer does not drift. This routine assumes 689 * that it is called in a context where the timers 690 * on which it is operating cannot change in value. 691 */ 692 int 693 itimerdecr(struct itimerval *itp, int usec) 694 { 695 mtx_enter(&itimer_mtx); 696 if (itp->it_value.tv_usec < usec) { 697 if (itp->it_value.tv_sec == 0) { 698 /* expired, and already in next interval */ 699 usec -= itp->it_value.tv_usec; 700 goto expire; 701 } 702 itp->it_value.tv_usec += 1000000; 703 itp->it_value.tv_sec--; 704 } 705 itp->it_value.tv_usec -= usec; 706 usec = 0; 707 if (timerisset(&itp->it_value)) { 708 mtx_leave(&itimer_mtx); 709 return (1); 710 } 711 /* expired, exactly at end of interval */ 712 expire: 713 if (timerisset(&itp->it_interval)) { 714 itp->it_value = itp->it_interval; 715 itp->it_value.tv_usec -= usec; 716 if (itp->it_value.tv_usec < 0) { 717 itp->it_value.tv_usec += 1000000; 718 itp->it_value.tv_sec--; 719 } 720 } else 721 itp->it_value.tv_usec = 0; /* sec is already 0 */ 722 mtx_leave(&itimer_mtx); 723 return (0); 724 } 725 726 /* 727 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9) 728 * for usage and rationale. 729 */ 730 int 731 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 732 { 733 struct timeval tv, delta; 734 int rv = 0; 735 736 getmicrouptime(&tv); 737 738 timersub(&tv, lasttime, &delta); 739 740 /* 741 * check for 0,0 is so that the message will be seen at least once, 742 * even if interval is huge. 743 */ 744 if (timercmp(&delta, mininterval, >=) || 745 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 746 *lasttime = tv; 747 rv = 1; 748 } 749 750 return (rv); 751 } 752 753 /* 754 * ppsratecheck(): packets (or events) per second limitation. 755 */ 756 int 757 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 758 { 759 struct timeval tv, delta; 760 int rv; 761 762 microuptime(&tv); 763 764 timersub(&tv, lasttime, &delta); 765 766 /* 767 * check for 0,0 is so that the message will be seen at least once. 768 * if more than one second have passed since the last update of 769 * lasttime, reset the counter. 770 * 771 * we do increment *curpps even in *curpps < maxpps case, as some may 772 * try to use *curpps for stat purposes as well. 773 */ 774 if (maxpps == 0) 775 rv = 0; 776 else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 777 delta.tv_sec >= 1) { 778 *lasttime = tv; 779 *curpps = 0; 780 rv = 1; 781 } else if (maxpps < 0) 782 rv = 1; 783 else if (*curpps < maxpps) 784 rv = 1; 785 else 786 rv = 0; 787 788 #if 1 /*DIAGNOSTIC?*/ 789 /* be careful about wrap-around */ 790 if (*curpps + 1 > *curpps) 791 *curpps = *curpps + 1; 792 #else 793 /* 794 * assume that there's not too many calls to this function. 795 * not sure if the assumption holds, as it depends on *caller's* 796 * behavior, not the behavior of this function. 797 * IMHO it is wrong to make assumption on the caller's behavior, 798 * so the above #if is #if 1, not #ifdef DIAGNOSTIC. 799 */ 800 *curpps = *curpps + 1; 801 #endif 802 803 return (rv); 804 } 805 806 807 #define RESETTODR_PERIOD 1800 808 809 void periodic_resettodr(void *); 810 void perform_resettodr(void *); 811 812 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL); 813 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL); 814 815 void 816 periodic_resettodr(void *arg __unused) 817 { 818 task_add(systq, &resettodr_task); 819 } 820 821 void 822 perform_resettodr(void *arg __unused) 823 { 824 resettodr(); 825 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 826 } 827 828 void 829 start_periodic_resettodr(void) 830 { 831 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 832 } 833 834 void 835 stop_periodic_resettodr(void) 836 { 837 timeout_del(&resettodr_to); 838 task_del(systq, &resettodr_task); 839 } 840