1 /* $OpenBSD: kern_time.c,v 1.133 2020/07/15 21:20:08 cheloha Exp $ */ 2 /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 33 */ 34 35 #include <sys/param.h> 36 #include <sys/resourcevar.h> 37 #include <sys/kernel.h> 38 #include <sys/systm.h> 39 #include <sys/rwlock.h> 40 #include <sys/proc.h> 41 #include <sys/ktrace.h> 42 #include <sys/vnode.h> 43 #include <sys/signalvar.h> 44 #include <sys/stdint.h> 45 #include <sys/pledge.h> 46 #include <sys/task.h> 47 #include <sys/timeout.h> 48 #include <sys/timetc.h> 49 50 #include <sys/mount.h> 51 #include <sys/syscallargs.h> 52 53 #include <dev/clock_subr.h> 54 55 /* 56 * Time of day and interval timer support. 57 * 58 * These routines provide the kernel entry points to get and set 59 * the time-of-day and per-process interval timers. Subroutines 60 * here provide support for adding and subtracting timeval structures 61 * and decrementing interval timers, optionally reloading the interval 62 * timers when they expire. 63 */ 64 65 /* This function is used by clock_settime and settimeofday */ 66 int 67 settime(const struct timespec *ts) 68 { 69 struct timespec now; 70 71 /* 72 * Don't allow the time to be set forward so far it will wrap 73 * and become negative, thus allowing an attacker to bypass 74 * the next check below. The cutoff is 1 year before rollover 75 * occurs, so even if the attacker uses adjtime(2) to move 76 * the time past the cutoff, it will take a very long time 77 * to get to the wrap point. 78 * 79 * XXX: we check against UINT_MAX until we can figure out 80 * how to deal with the hardware RTCs. 81 */ 82 if (ts->tv_sec > UINT_MAX - 365*24*60*60) { 83 printf("denied attempt to set clock forward to %lld\n", 84 (long long)ts->tv_sec); 85 return (EPERM); 86 } 87 /* 88 * If the system is secure, we do not allow the time to be 89 * set to an earlier value (it may be slowed using adjtime, 90 * but not set back). This feature prevent interlopers from 91 * setting arbitrary time stamps on files. 92 */ 93 nanotime(&now); 94 if (securelevel > 1 && timespeccmp(ts, &now, <=)) { 95 printf("denied attempt to set clock back %lld seconds\n", 96 (long long)now.tv_sec - ts->tv_sec); 97 return (EPERM); 98 } 99 100 tc_setrealtimeclock(ts); 101 KERNEL_LOCK(); 102 resettodr(); 103 KERNEL_UNLOCK(); 104 105 return (0); 106 } 107 108 int 109 clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp) 110 { 111 struct proc *q; 112 int error = 0; 113 114 switch (clock_id) { 115 case CLOCK_REALTIME: 116 nanotime(tp); 117 break; 118 case CLOCK_UPTIME: 119 nanoruntime(tp); 120 break; 121 case CLOCK_MONOTONIC: 122 case CLOCK_BOOTTIME: 123 nanouptime(tp); 124 break; 125 case CLOCK_PROCESS_CPUTIME_ID: 126 nanouptime(tp); 127 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 128 timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp); 129 timespecadd(tp, &p->p_rtime, tp); 130 break; 131 case CLOCK_THREAD_CPUTIME_ID: 132 nanouptime(tp); 133 timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp); 134 timespecadd(tp, &p->p_tu.tu_runtime, tp); 135 timespecadd(tp, &p->p_rtime, tp); 136 break; 137 default: 138 /* check for clock from pthread_getcpuclockid() */ 139 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 140 KERNEL_LOCK(); 141 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 142 if (q == NULL || q->p_p != p->p_p) 143 error = ESRCH; 144 else 145 *tp = q->p_tu.tu_runtime; 146 KERNEL_UNLOCK(); 147 } else 148 error = EINVAL; 149 break; 150 } 151 return (error); 152 } 153 154 int 155 sys_clock_gettime(struct proc *p, void *v, register_t *retval) 156 { 157 struct sys_clock_gettime_args /* { 158 syscallarg(clockid_t) clock_id; 159 syscallarg(struct timespec *) tp; 160 } */ *uap = v; 161 struct timespec ats; 162 int error; 163 164 memset(&ats, 0, sizeof(ats)); 165 if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0) 166 return (error); 167 168 error = copyout(&ats, SCARG(uap, tp), sizeof(ats)); 169 #ifdef KTRACE 170 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 171 ktrabstimespec(p, &ats); 172 #endif 173 return (error); 174 } 175 176 int 177 sys_clock_settime(struct proc *p, void *v, register_t *retval) 178 { 179 struct sys_clock_settime_args /* { 180 syscallarg(clockid_t) clock_id; 181 syscallarg(const struct timespec *) tp; 182 } */ *uap = v; 183 struct timespec ats; 184 clockid_t clock_id; 185 int error; 186 187 if ((error = suser(p)) != 0) 188 return (error); 189 190 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 191 return (error); 192 193 clock_id = SCARG(uap, clock_id); 194 switch (clock_id) { 195 case CLOCK_REALTIME: 196 if (!timespecisvalid(&ats)) 197 return (EINVAL); 198 if ((error = settime(&ats)) != 0) 199 return (error); 200 break; 201 default: /* Other clocks are read-only */ 202 return (EINVAL); 203 } 204 205 return (0); 206 } 207 208 int 209 sys_clock_getres(struct proc *p, void *v, register_t *retval) 210 { 211 struct sys_clock_getres_args /* { 212 syscallarg(clockid_t) clock_id; 213 syscallarg(struct timespec *) tp; 214 } */ *uap = v; 215 clockid_t clock_id; 216 struct bintime bt; 217 struct timespec ts; 218 struct proc *q; 219 u_int64_t scale; 220 int error = 0, realstathz; 221 222 memset(&ts, 0, sizeof(ts)); 223 realstathz = (stathz == 0) ? hz : stathz; 224 clock_id = SCARG(uap, clock_id); 225 226 switch (clock_id) { 227 case CLOCK_REALTIME: 228 case CLOCK_MONOTONIC: 229 case CLOCK_BOOTTIME: 230 case CLOCK_UPTIME: 231 memset(&bt, 0, sizeof(bt)); 232 rw_enter_read(&tc_lock); 233 scale = ((1ULL << 63) / tc_getfrequency()) * 2; 234 bt.frac = tc_getprecision() * scale; 235 rw_exit_read(&tc_lock); 236 BINTIME_TO_TIMESPEC(&bt, &ts); 237 break; 238 case CLOCK_PROCESS_CPUTIME_ID: 239 case CLOCK_THREAD_CPUTIME_ID: 240 ts.tv_nsec = 1000000000 / realstathz; 241 break; 242 default: 243 /* check for clock from pthread_getcpuclockid() */ 244 if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) { 245 KERNEL_LOCK(); 246 q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET); 247 if (q == NULL || q->p_p != p->p_p) 248 error = ESRCH; 249 else 250 ts.tv_nsec = 1000000000 / realstathz; 251 KERNEL_UNLOCK(); 252 } else 253 error = EINVAL; 254 break; 255 } 256 257 if (error == 0 && SCARG(uap, tp)) { 258 ts.tv_nsec = MAX(ts.tv_nsec, 1); 259 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 260 #ifdef KTRACE 261 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 262 ktrreltimespec(p, &ts); 263 #endif 264 } 265 266 return error; 267 } 268 269 int 270 sys_nanosleep(struct proc *p, void *v, register_t *retval) 271 { 272 static int chan; 273 struct sys_nanosleep_args/* { 274 syscallarg(const struct timespec *) rqtp; 275 syscallarg(struct timespec *) rmtp; 276 } */ *uap = v; 277 struct timespec elapsed, remainder, request, start, stop; 278 uint64_t nsecs; 279 struct timespec *rmtp; 280 int copyout_error, error; 281 282 rmtp = SCARG(uap, rmtp); 283 error = copyin(SCARG(uap, rqtp), &request, sizeof(request)); 284 if (error) 285 return (error); 286 #ifdef KTRACE 287 if (KTRPOINT(p, KTR_STRUCT)) 288 ktrreltimespec(p, &request); 289 #endif 290 291 if (request.tv_sec < 0 || !timespecisvalid(&request)) 292 return (EINVAL); 293 294 do { 295 getnanouptime(&start); 296 nsecs = MAX(1, MIN(TIMESPEC_TO_NSEC(&request), MAXTSLP)); 297 error = tsleep_nsec(&chan, PWAIT | PCATCH, "nanosleep", nsecs); 298 getnanouptime(&stop); 299 timespecsub(&stop, &start, &elapsed); 300 timespecsub(&request, &elapsed, &request); 301 if (request.tv_sec < 0) 302 timespecclear(&request); 303 if (error != EWOULDBLOCK) 304 break; 305 } while (timespecisset(&request)); 306 307 if (error == ERESTART) 308 error = EINTR; 309 if (error == EWOULDBLOCK) 310 error = 0; 311 312 if (rmtp) { 313 memset(&remainder, 0, sizeof(remainder)); 314 remainder = request; 315 copyout_error = copyout(&remainder, rmtp, sizeof(remainder)); 316 if (copyout_error) 317 error = copyout_error; 318 #ifdef KTRACE 319 if (copyout_error == 0 && KTRPOINT(p, KTR_STRUCT)) 320 ktrreltimespec(p, &remainder); 321 #endif 322 } 323 324 return error; 325 } 326 327 int 328 sys_gettimeofday(struct proc *p, void *v, register_t *retval) 329 { 330 struct sys_gettimeofday_args /* { 331 syscallarg(struct timeval *) tp; 332 syscallarg(struct timezone *) tzp; 333 } */ *uap = v; 334 struct timeval atv; 335 static const struct timezone zerotz = { 0, 0 }; 336 struct timeval *tp; 337 struct timezone *tzp; 338 int error = 0; 339 340 tp = SCARG(uap, tp); 341 tzp = SCARG(uap, tzp); 342 343 if (tp) { 344 memset(&atv, 0, sizeof(atv)); 345 microtime(&atv); 346 if ((error = copyout(&atv, tp, sizeof (atv)))) 347 return (error); 348 #ifdef KTRACE 349 if (KTRPOINT(p, KTR_STRUCT)) 350 ktrabstimeval(p, &atv); 351 #endif 352 } 353 if (tzp) 354 error = copyout(&zerotz, tzp, sizeof(zerotz)); 355 return (error); 356 } 357 358 int 359 sys_settimeofday(struct proc *p, void *v, register_t *retval) 360 { 361 struct sys_settimeofday_args /* { 362 syscallarg(const struct timeval *) tv; 363 syscallarg(const struct timezone *) tzp; 364 } */ *uap = v; 365 struct timezone atz; 366 struct timeval atv; 367 const struct timeval *tv; 368 const struct timezone *tzp; 369 int error; 370 371 tv = SCARG(uap, tv); 372 tzp = SCARG(uap, tzp); 373 374 if ((error = suser(p))) 375 return (error); 376 /* Verify all parameters before changing time. */ 377 if (tv && (error = copyin(tv, &atv, sizeof(atv)))) 378 return (error); 379 if (tzp && (error = copyin(tzp, &atz, sizeof(atz)))) 380 return (error); 381 if (tv) { 382 struct timespec ts; 383 384 if (!timerisvalid(&atv)) 385 return (EINVAL); 386 TIMEVAL_TO_TIMESPEC(&atv, &ts); 387 if ((error = settime(&ts)) != 0) 388 return (error); 389 } 390 391 return (0); 392 } 393 394 #define ADJFREQ_MAX (500000000LL << 32) 395 #define ADJFREQ_MIN (-500000000LL << 32) 396 397 int 398 sys_adjfreq(struct proc *p, void *v, register_t *retval) 399 { 400 struct sys_adjfreq_args /* { 401 syscallarg(const int64_t *) freq; 402 syscallarg(int64_t *) oldfreq; 403 } */ *uap = v; 404 int error = 0; 405 int64_t f, oldf; 406 const int64_t *freq = SCARG(uap, freq); 407 int64_t *oldfreq = SCARG(uap, oldfreq); 408 409 if (freq) { 410 if ((error = suser(p))) 411 return (error); 412 if ((error = copyin(freq, &f, sizeof(f)))) 413 return (error); 414 if (f < ADJFREQ_MIN || f > ADJFREQ_MAX) 415 return (EINVAL); 416 } 417 418 rw_enter(&tc_lock, (freq == NULL) ? RW_READ : RW_WRITE); 419 if (oldfreq) { 420 tc_adjfreq(&oldf, NULL); 421 if ((error = copyout(&oldf, oldfreq, sizeof(oldf)))) 422 goto out; 423 } 424 if (freq) 425 tc_adjfreq(NULL, &f); 426 out: 427 rw_exit(&tc_lock); 428 return (error); 429 } 430 431 int 432 sys_adjtime(struct proc *p, void *v, register_t *retval) 433 { 434 struct sys_adjtime_args /* { 435 syscallarg(const struct timeval *) delta; 436 syscallarg(struct timeval *) olddelta; 437 } */ *uap = v; 438 struct timeval atv; 439 const struct timeval *delta = SCARG(uap, delta); 440 struct timeval *olddelta = SCARG(uap, olddelta); 441 int64_t adjustment, remaining; 442 int error; 443 444 error = pledge_adjtime(p, delta); 445 if (error) 446 return error; 447 448 if (delta) { 449 if ((error = suser(p))) 450 return (error); 451 if ((error = copyin(delta, &atv, sizeof(struct timeval)))) 452 return (error); 453 if (!timerisvalid(&atv)) 454 return (EINVAL); 455 456 if (atv.tv_sec >= 0) { 457 if (atv.tv_sec > INT64_MAX / 1000000) 458 return EINVAL; 459 adjustment = atv.tv_sec * 1000000; 460 if (atv.tv_usec > INT64_MAX - adjustment) 461 return EINVAL; 462 adjustment += atv.tv_usec; 463 } else { 464 if (atv.tv_sec < INT64_MIN / 1000000) 465 return EINVAL; 466 adjustment = atv.tv_sec * 1000000 + atv.tv_usec; 467 } 468 469 rw_enter_write(&tc_lock); 470 } 471 472 if (olddelta) { 473 tc_adjtime(&remaining, NULL); 474 memset(&atv, 0, sizeof(atv)); 475 atv.tv_sec = remaining / 1000000; 476 atv.tv_usec = remaining % 1000000; 477 if (atv.tv_usec < 0) { 478 atv.tv_usec += 1000000; 479 atv.tv_sec--; 480 } 481 482 if ((error = copyout(&atv, olddelta, sizeof(struct timeval)))) 483 goto out; 484 } 485 486 if (delta) 487 tc_adjtime(NULL, &adjustment); 488 out: 489 if (delta) 490 rw_exit_write(&tc_lock); 491 return (error); 492 } 493 494 495 struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK); 496 497 /* 498 * Get value of an interval timer. The process virtual and 499 * profiling virtual time timers are kept internally in the 500 * way they are specified externally: in time until they expire. 501 * 502 * The real time interval timer's it_value, in contrast, is kept as an 503 * absolute time rather than as a delta, so that it is easy to keep 504 * periodic real-time signals from drifting. 505 * 506 * Virtual time timers are processed in the hardclock() routine of 507 * kern_clock.c. The real time timer is processed by a timeout 508 * routine, called from the softclock() routine. Since a callout 509 * may be delayed in real time due to interrupt processing in the system, 510 * it is possible for the real time timeout routine (realitexpire, given below), 511 * to be delayed in real time past when it is supposed to occur. It 512 * does not suffice, therefore, to reload the real timer .it_value from the 513 * real time timers .it_interval. Rather, we compute the next time in 514 * absolute time the timer should go off. 515 */ 516 int 517 sys_getitimer(struct proc *p, void *v, register_t *retval) 518 { 519 struct sys_getitimer_args /* { 520 syscallarg(int) which; 521 syscallarg(struct itimerval *) itv; 522 } */ *uap = v; 523 struct itimerval aitv; 524 struct itimerspec *itimer; 525 int which; 526 527 which = SCARG(uap, which); 528 529 if (which < ITIMER_REAL || which > ITIMER_PROF) 530 return (EINVAL); 531 itimer = &p->p_p->ps_timer[which]; 532 memset(&aitv, 0, sizeof(aitv)); 533 mtx_enter(&itimer_mtx); 534 TIMESPEC_TO_TIMEVAL(&aitv.it_interval, &itimer->it_interval); 535 TIMESPEC_TO_TIMEVAL(&aitv.it_value, &itimer->it_value); 536 mtx_leave(&itimer_mtx); 537 538 if (which == ITIMER_REAL) { 539 struct timeval now; 540 541 getmicrouptime(&now); 542 /* 543 * Convert from absolute to relative time in .it_value 544 * part of real time timer. If time for real time timer 545 * has passed return 0, else return difference between 546 * current time and time for the timer to go off. 547 */ 548 if (timerisset(&aitv.it_value)) { 549 if (timercmp(&aitv.it_value, &now, <)) 550 timerclear(&aitv.it_value); 551 else 552 timersub(&aitv.it_value, &now, 553 &aitv.it_value); 554 } 555 } 556 557 return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval))); 558 } 559 560 int 561 sys_setitimer(struct proc *p, void *v, register_t *retval) 562 { 563 struct sys_setitimer_args /* { 564 syscallarg(int) which; 565 syscallarg(const struct itimerval *) itv; 566 syscallarg(struct itimerval *) oitv; 567 } */ *uap = v; 568 struct sys_getitimer_args getargs; 569 struct itimerspec aits; 570 struct itimerval aitv; 571 const struct itimerval *itvp; 572 struct itimerval *oitv; 573 struct process *pr = p->p_p; 574 int error; 575 int timo; 576 int which; 577 578 which = SCARG(uap, which); 579 oitv = SCARG(uap, oitv); 580 581 if (which < ITIMER_REAL || which > ITIMER_PROF) 582 return (EINVAL); 583 itvp = SCARG(uap, itv); 584 if (itvp && (error = copyin((void *)itvp, (void *)&aitv, 585 sizeof(struct itimerval)))) 586 return (error); 587 if (oitv != NULL) { 588 SCARG(&getargs, which) = which; 589 SCARG(&getargs, itv) = oitv; 590 if ((error = sys_getitimer(p, &getargs, retval))) 591 return (error); 592 } 593 if (itvp == 0) 594 return (0); 595 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) 596 return (EINVAL); 597 TIMEVAL_TO_TIMESPEC(&aitv.it_value, &aits.it_value); 598 TIMEVAL_TO_TIMESPEC(&aitv.it_interval, &aits.it_interval); 599 if (which == ITIMER_REAL) { 600 struct timespec cts; 601 602 timeout_del(&pr->ps_realit_to); 603 getnanouptime(&cts); 604 if (timespecisset(&aits.it_value)) { 605 timo = tstohz(&aits.it_value); 606 timeout_add(&pr->ps_realit_to, timo); 607 timespecadd(&aits.it_value, &cts, &aits.it_value); 608 } 609 pr->ps_timer[ITIMER_REAL] = aits; 610 } else { 611 mtx_enter(&itimer_mtx); 612 pr->ps_timer[which] = aits; 613 mtx_leave(&itimer_mtx); 614 } 615 616 return (0); 617 } 618 619 /* 620 * Real interval timer expired: 621 * send process whose timer expired an alarm signal. 622 * If time is not set up to reload, then just return. 623 * Else compute next time timer should go off which is > current time. 624 * This is where delay in processing this timeout causes multiple 625 * SIGALRM calls to be compressed into one. 626 */ 627 void 628 realitexpire(void *arg) 629 { 630 struct process *pr = arg; 631 struct itimerspec *tp = &pr->ps_timer[ITIMER_REAL]; 632 633 prsignal(pr, SIGALRM); 634 if (!timespecisset(&tp->it_interval)) { 635 timespecclear(&tp->it_value); 636 return; 637 } 638 for (;;) { 639 struct timespec cts, nts; 640 int timo; 641 642 timespecadd(&tp->it_value, &tp->it_interval, &tp->it_value); 643 getnanouptime(&cts); 644 if (timespeccmp(&tp->it_value, &cts, >)) { 645 nts = tp->it_value; 646 timespecsub(&nts, &cts, &nts); 647 timo = tstohz(&nts) - 1; 648 if (timo <= 0) 649 timo = 1; 650 if ((pr->ps_flags & PS_EXITING) == 0) 651 timeout_add(&pr->ps_realit_to, timo); 652 return; 653 } 654 } 655 } 656 657 /* 658 * Check that a proposed value to load into the .it_value or 659 * .it_interval part of an interval timer is acceptable. 660 */ 661 int 662 itimerfix(struct timeval *tv) 663 { 664 665 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 666 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 667 return (EINVAL); 668 669 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 670 tv->tv_usec = tick; 671 672 return (0); 673 } 674 675 /* 676 * Decrement an interval timer by the given number of nanoseconds. 677 * If the timer expires and it is periodic then reload it. When reloading 678 * the timer we subtract any overrun from the next period so that the timer 679 * does not drift. 680 */ 681 int 682 itimerdecr(struct itimerspec *itp, long nsec) 683 { 684 struct timespec decrement; 685 686 NSEC_TO_TIMESPEC(nsec, &decrement); 687 688 mtx_enter(&itimer_mtx); 689 timespecsub(&itp->it_value, &decrement, &itp->it_value); 690 if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value)) { 691 mtx_leave(&itimer_mtx); 692 return (1); 693 } 694 if (!timespecisset(&itp->it_interval)) { 695 timespecclear(&itp->it_value); 696 mtx_leave(&itimer_mtx); 697 return (0); 698 } 699 while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value)) 700 timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value); 701 mtx_leave(&itimer_mtx); 702 return (0); 703 } 704 705 /* 706 * ratecheck(): simple time-based rate-limit checking. see ratecheck(9) 707 * for usage and rationale. 708 */ 709 int 710 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 711 { 712 struct timeval tv, delta; 713 int rv = 0; 714 715 getmicrouptime(&tv); 716 717 timersub(&tv, lasttime, &delta); 718 719 /* 720 * check for 0,0 is so that the message will be seen at least once, 721 * even if interval is huge. 722 */ 723 if (timercmp(&delta, mininterval, >=) || 724 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 725 *lasttime = tv; 726 rv = 1; 727 } 728 729 return (rv); 730 } 731 732 /* 733 * ppsratecheck(): packets (or events) per second limitation. 734 */ 735 int 736 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 737 { 738 struct timeval tv, delta; 739 int rv; 740 741 microuptime(&tv); 742 743 timersub(&tv, lasttime, &delta); 744 745 /* 746 * check for 0,0 is so that the message will be seen at least once. 747 * if more than one second have passed since the last update of 748 * lasttime, reset the counter. 749 * 750 * we do increment *curpps even in *curpps < maxpps case, as some may 751 * try to use *curpps for stat purposes as well. 752 */ 753 if (maxpps == 0) 754 rv = 0; 755 else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 756 delta.tv_sec >= 1) { 757 *lasttime = tv; 758 *curpps = 0; 759 rv = 1; 760 } else if (maxpps < 0) 761 rv = 1; 762 else if (*curpps < maxpps) 763 rv = 1; 764 else 765 rv = 0; 766 767 #if 1 /*DIAGNOSTIC?*/ 768 /* be careful about wrap-around */ 769 if (*curpps + 1 > *curpps) 770 *curpps = *curpps + 1; 771 #else 772 /* 773 * assume that there's not too many calls to this function. 774 * not sure if the assumption holds, as it depends on *caller's* 775 * behavior, not the behavior of this function. 776 * IMHO it is wrong to make assumption on the caller's behavior, 777 * so the above #if is #if 1, not #ifdef DIAGNOSTIC. 778 */ 779 *curpps = *curpps + 1; 780 #endif 781 782 return (rv); 783 } 784 785 todr_chip_handle_t todr_handle; 786 int inittodr_done; 787 788 #define MINYEAR ((OpenBSD / 100) - 1) /* minimum plausible year */ 789 790 /* 791 * inittodr: 792 * 793 * Initialize time from the time-of-day register. 794 */ 795 void 796 inittodr(time_t base) 797 { 798 time_t deltat; 799 struct timeval rtctime; 800 struct timespec ts; 801 int badbase; 802 803 inittodr_done = 1; 804 805 if (base < (MINYEAR - 1970) * SECYR) { 806 printf("WARNING: preposterous time in file system\n"); 807 /* read the system clock anyway */ 808 base = (MINYEAR - 1970) * SECYR; 809 badbase = 1; 810 } else 811 badbase = 0; 812 813 rtctime.tv_sec = base; 814 rtctime.tv_usec = 0; 815 816 if (todr_handle == NULL || 817 todr_gettime(todr_handle, &rtctime) != 0 || 818 rtctime.tv_sec < (MINYEAR - 1970) * SECYR) { 819 /* 820 * Believe the time in the file system for lack of 821 * anything better, resetting the TODR. 822 */ 823 rtctime.tv_sec = base; 824 rtctime.tv_usec = 0; 825 if (todr_handle != NULL && !badbase) 826 printf("WARNING: bad clock chip time\n"); 827 ts.tv_sec = rtctime.tv_sec; 828 ts.tv_nsec = rtctime.tv_usec * 1000; 829 tc_setclock(&ts); 830 goto bad; 831 } else { 832 ts.tv_sec = rtctime.tv_sec; 833 ts.tv_nsec = rtctime.tv_usec * 1000; 834 tc_setclock(&ts); 835 } 836 837 if (!badbase) { 838 /* 839 * See if we gained/lost two or more days; if 840 * so, assume something is amiss. 841 */ 842 deltat = rtctime.tv_sec - base; 843 if (deltat < 0) 844 deltat = -deltat; 845 if (deltat < 2 * SECDAY) 846 return; /* all is well */ 847 #ifndef SMALL_KERNEL 848 printf("WARNING: clock %s %lld days\n", 849 rtctime.tv_sec < base ? "lost" : "gained", 850 (long long)(deltat / SECDAY)); 851 #endif 852 } 853 bad: 854 printf("WARNING: CHECK AND RESET THE DATE!\n"); 855 } 856 857 /* 858 * resettodr: 859 * 860 * Reset the time-of-day register with the current time. 861 */ 862 void 863 resettodr(void) 864 { 865 struct timeval rtctime; 866 867 /* 868 * Skip writing the RTC if inittodr(9) never ran. We don't 869 * want to overwrite a reasonable value with a nonsense value. 870 */ 871 if (!inittodr_done) 872 return; 873 874 microtime(&rtctime); 875 876 if (todr_handle != NULL && 877 todr_settime(todr_handle, &rtctime) != 0) 878 printf("WARNING: can't update clock chip time\n"); 879 } 880 881 void 882 todr_attach(struct todr_chip_handle *todr) 883 { 884 todr_handle = todr; 885 } 886 887 #define RESETTODR_PERIOD 1800 888 889 void periodic_resettodr(void *); 890 void perform_resettodr(void *); 891 892 struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL); 893 struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL); 894 895 void 896 periodic_resettodr(void *arg __unused) 897 { 898 task_add(systq, &resettodr_task); 899 } 900 901 void 902 perform_resettodr(void *arg __unused) 903 { 904 resettodr(); 905 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 906 } 907 908 void 909 start_periodic_resettodr(void) 910 { 911 timeout_add_sec(&resettodr_to, RESETTODR_PERIOD); 912 } 913 914 void 915 stop_periodic_resettodr(void) 916 { 917 timeout_del(&resettodr_to); 918 task_del(systq, &resettodr_task); 919 } 920