1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/buf.h> 36 #include <sys/sysproto.h> 37 #include <sys/resourcevar.h> 38 #include <sys/signalvar.h> 39 #include <sys/kernel.h> 40 #include <sys/sysent.h> 41 #include <sys/sysunion.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/time.h> 45 #include <sys/vnode.h> 46 #include <sys/sysctl.h> 47 #include <sys/kern_syscall.h> 48 #include <vm/vm.h> 49 #include <vm/vm_extern.h> 50 51 #include <sys/msgport2.h> 52 #include <sys/spinlock2.h> 53 #include <sys/thread2.h> 54 55 extern struct spinlock ntp_spin; 56 57 struct timezone tz; 58 59 /* 60 * Time of day and interval timer support. 61 * 62 * These routines provide the kernel entry points to get and set 63 * the time-of-day and per-process interval timers. Subroutines 64 * here provide support for adding and subtracting timeval structures 65 * and decrementing interval timers, optionally reloading the interval 66 * timers when they expire. 67 */ 68 69 static int settime(struct timeval *); 70 static void timevalfix(struct timeval *); 71 72 /* 73 * Nanosleep tries very hard to sleep for a precisely requested time 74 * interval, down to 1uS. The administrator can impose a minimum delay 75 * and a delay below which we hard-loop instead of initiate a timer 76 * interrupt and sleep. 77 * 78 * For machines under high loads it might be beneficial to increase min_us 79 * to e.g. 1000uS (1ms) so spining processes sleep meaningfully. 80 */ 81 static int nanosleep_min_us = 10; 82 static int nanosleep_hard_us = 100; 83 static int gettimeofday_quick = 0; 84 SYSCTL_INT(_kern, OID_AUTO, nanosleep_min_us, CTLFLAG_RW, 85 &nanosleep_min_us, 0, ""); 86 SYSCTL_INT(_kern, OID_AUTO, nanosleep_hard_us, CTLFLAG_RW, 87 &nanosleep_hard_us, 0, ""); 88 SYSCTL_INT(_kern, OID_AUTO, gettimeofday_quick, CTLFLAG_RW, 89 &gettimeofday_quick, 0, ""); 90 91 static struct lock masterclock_lock = LOCK_INITIALIZER("mstrclk", 0, 0); 92 93 static int 94 settime(struct timeval *tv) 95 { 96 struct timeval delta, tv1, tv2; 97 static struct timeval maxtime, laststep; 98 struct timespec ts; 99 int origcpu; 100 101 if ((origcpu = mycpu->gd_cpuid) != 0) 102 lwkt_setcpu_self(globaldata_find(0)); 103 104 crit_enter(); 105 microtime(&tv1); 106 delta = *tv; 107 timevalsub(&delta, &tv1); 108 109 /* 110 * If the system is secure, we do not allow the time to be 111 * set to a value earlier than 1 second less than the highest 112 * time we have yet seen. The worst a miscreant can do in 113 * this circumstance is "freeze" time. He couldn't go 114 * back to the past. 115 * 116 * We similarly do not allow the clock to be stepped more 117 * than one second, nor more than once per second. This allows 118 * a miscreant to make the clock march double-time, but no worse. 119 */ 120 if (securelevel > 1) { 121 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 122 /* 123 * Update maxtime to latest time we've seen. 124 */ 125 if (tv1.tv_sec > maxtime.tv_sec) 126 maxtime = tv1; 127 tv2 = *tv; 128 timevalsub(&tv2, &maxtime); 129 if (tv2.tv_sec < -1) { 130 tv->tv_sec = maxtime.tv_sec - 1; 131 kprintf("Time adjustment clamped to -1 second\n"); 132 } 133 } else { 134 if (tv1.tv_sec == laststep.tv_sec) { 135 crit_exit(); 136 return (EPERM); 137 } 138 if (delta.tv_sec > 1) { 139 tv->tv_sec = tv1.tv_sec + 1; 140 kprintf("Time adjustment clamped to +1 second\n"); 141 } 142 laststep = *tv; 143 } 144 } 145 146 ts.tv_sec = tv->tv_sec; 147 ts.tv_nsec = tv->tv_usec * 1000; 148 set_timeofday(&ts); 149 crit_exit(); 150 151 if (origcpu != 0) 152 lwkt_setcpu_self(globaldata_find(origcpu)); 153 154 resettodr(); 155 return (0); 156 } 157 158 static void 159 get_process_cputime(struct proc *p, struct timespec *ats) 160 { 161 struct rusage ru; 162 163 lwkt_gettoken(&p->p_token); 164 calcru_proc(p, &ru); 165 lwkt_reltoken(&p->p_token); 166 timevaladd(&ru.ru_utime, &ru.ru_stime); 167 TIMEVAL_TO_TIMESPEC(&ru.ru_utime, ats); 168 } 169 170 static void 171 get_process_usertime(struct proc *p, struct timespec *ats) 172 { 173 struct rusage ru; 174 175 lwkt_gettoken(&p->p_token); 176 calcru_proc(p, &ru); 177 lwkt_reltoken(&p->p_token); 178 TIMEVAL_TO_TIMESPEC(&ru.ru_utime, ats); 179 } 180 181 static void 182 get_curthread_cputime(struct timespec *ats) 183 { 184 struct thread *td = curthread; 185 struct timeval sys, user; 186 187 calcru(td->td_lwp, &user, &sys); 188 timevaladd(&user, &sys); 189 TIMEVAL_TO_TIMESPEC(&user, ats); 190 } 191 192 /* 193 * MPSAFE 194 */ 195 int 196 kern_clock_gettime(clockid_t clock_id, struct timespec *ats) 197 { 198 struct proc *p; 199 200 p = curproc; 201 switch(clock_id) { 202 case CLOCK_REALTIME: 203 case CLOCK_REALTIME_PRECISE: 204 nanotime(ats); 205 break; 206 case CLOCK_REALTIME_FAST: 207 getnanotime(ats); 208 break; 209 case CLOCK_MONOTONIC: 210 case CLOCK_MONOTONIC_PRECISE: 211 case CLOCK_UPTIME: 212 case CLOCK_UPTIME_PRECISE: 213 nanouptime(ats); 214 break; 215 case CLOCK_MONOTONIC_FAST: 216 case CLOCK_UPTIME_FAST: 217 getnanouptime(ats); 218 break; 219 case CLOCK_VIRTUAL: 220 get_process_usertime(p, ats); 221 break; 222 case CLOCK_PROF: 223 case CLOCK_PROCESS_CPUTIME_ID: 224 get_process_cputime(p, ats); 225 break; 226 case CLOCK_SECOND: 227 ats->tv_sec = time_second; 228 ats->tv_nsec = 0; 229 break; 230 case CLOCK_THREAD_CPUTIME_ID: 231 get_curthread_cputime(ats); 232 break; 233 default: 234 return (EINVAL); 235 } 236 return (0); 237 } 238 239 /* 240 * MPSAFE 241 */ 242 int 243 sys_clock_gettime(struct clock_gettime_args *uap) 244 { 245 struct timespec ats; 246 int error; 247 248 error = kern_clock_gettime(uap->clock_id, &ats); 249 if (error == 0) 250 error = copyout(&ats, uap->tp, sizeof(ats)); 251 252 return (error); 253 } 254 255 int 256 kern_clock_settime(clockid_t clock_id, struct timespec *ats) 257 { 258 struct thread *td = curthread; 259 struct timeval atv; 260 int error; 261 262 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 263 return (error); 264 if (clock_id != CLOCK_REALTIME) 265 return (EINVAL); 266 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 267 return (EINVAL); 268 269 lockmgr(&masterclock_lock, LK_EXCLUSIVE); 270 TIMESPEC_TO_TIMEVAL(&atv, ats); 271 error = settime(&atv); 272 lockmgr(&masterclock_lock, LK_RELEASE); 273 274 return (error); 275 } 276 277 /* 278 * MPALMOSTSAFE 279 */ 280 int 281 sys_clock_settime(struct clock_settime_args *uap) 282 { 283 struct timespec ats; 284 int error; 285 286 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 287 return (error); 288 289 error = kern_clock_settime(uap->clock_id, &ats); 290 291 return (error); 292 } 293 294 /* 295 * MPSAFE 296 */ 297 int 298 kern_clock_getres(clockid_t clock_id, struct timespec *ts) 299 { 300 ts->tv_sec = 0; 301 switch(clock_id) { 302 case CLOCK_REALTIME: 303 case CLOCK_REALTIME_FAST: 304 case CLOCK_REALTIME_PRECISE: 305 case CLOCK_MONOTONIC: 306 case CLOCK_MONOTONIC_FAST: 307 case CLOCK_MONOTONIC_PRECISE: 308 case CLOCK_UPTIME: 309 case CLOCK_UPTIME_FAST: 310 case CLOCK_UPTIME_PRECISE: 311 /* 312 * Round up the result of the division cheaply 313 * by adding 1. Rounding up is especially important 314 * if rounding down would give 0. Perfect rounding 315 * is unimportant. 316 */ 317 ts->tv_nsec = 1000000000 / sys_cputimer->freq + 1; 318 break; 319 case CLOCK_VIRTUAL: 320 case CLOCK_PROF: 321 /* Accurately round up here because we can do so cheaply. */ 322 ts->tv_nsec = (1000000000 + hz - 1) / hz; 323 break; 324 case CLOCK_SECOND: 325 ts->tv_sec = 1; 326 ts->tv_nsec = 0; 327 break; 328 case CLOCK_THREAD_CPUTIME_ID: 329 case CLOCK_PROCESS_CPUTIME_ID: 330 ts->tv_nsec = 1000; 331 break; 332 default: 333 return (EINVAL); 334 } 335 336 return (0); 337 } 338 339 /* 340 * MPSAFE 341 */ 342 int 343 sys_clock_getres(struct clock_getres_args *uap) 344 { 345 int error; 346 struct timespec ts; 347 348 error = kern_clock_getres(uap->clock_id, &ts); 349 if (error == 0) 350 error = copyout(&ts, uap->tp, sizeof(ts)); 351 352 return (error); 353 } 354 355 /* 356 * nanosleep1() 357 * 358 * This is a general helper function for nanosleep() (aka sleep() aka 359 * usleep()). 360 * 361 * If there is less then one tick's worth of time left and 362 * we haven't done a yield, or the remaining microseconds is 363 * ridiculously low, do a yield. This avoids having 364 * to deal with systimer overheads when the system is under 365 * heavy loads. If we have done a yield already then use 366 * a systimer and an uninterruptable thread wait. 367 * 368 * If there is more then a tick's worth of time left, 369 * calculate the baseline ticks and use an interruptable 370 * tsleep, then handle the fine-grained delay on the next 371 * loop. This usually results in two sleeps occuring, a long one 372 * and a short one. 373 * 374 * MPSAFE 375 */ 376 static void 377 ns1_systimer(systimer_t info, int in_ipi __unused, 378 struct intrframe *frame __unused) 379 { 380 lwkt_schedule(info->data); 381 } 382 383 int 384 nanosleep1(struct timespec *rqt, struct timespec *rmt) 385 { 386 static int nanowait; 387 struct timespec ts, ts2, ts3; 388 struct timeval tv; 389 int error; 390 391 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 392 return (EINVAL); 393 /* XXX: imho this should return EINVAL at least for tv_sec < 0 */ 394 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 395 return (0); 396 nanouptime(&ts); 397 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 398 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 399 400 for (;;) { 401 int ticks; 402 struct systimer info; 403 404 ticks = tv.tv_usec / ustick; /* approximate */ 405 406 if (tv.tv_sec == 0 && ticks == 0) { 407 thread_t td = curthread; 408 if (tv.tv_usec > 0 && tv.tv_usec < nanosleep_min_us) 409 tv.tv_usec = nanosleep_min_us; 410 if (tv.tv_usec < nanosleep_hard_us) { 411 lwkt_user_yield(); 412 cpu_pause(); 413 } else { 414 crit_enter_quick(td); 415 systimer_init_oneshot(&info, ns1_systimer, 416 td, tv.tv_usec); 417 lwkt_deschedule_self(td); 418 crit_exit_quick(td); 419 lwkt_switch(); 420 systimer_del(&info); /* make sure it's gone */ 421 } 422 error = iscaught(td->td_lwp); 423 } else if (tv.tv_sec == 0) { 424 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 425 } else { 426 ticks = tvtohz_low(&tv); /* also handles overflow */ 427 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 428 } 429 nanouptime(&ts2); 430 if (error && error != EWOULDBLOCK) { 431 if (error == ERESTART) 432 error = EINTR; 433 if (rmt != NULL) { 434 timespecsub(&ts, &ts2); 435 if (ts.tv_sec < 0) 436 timespecclear(&ts); 437 *rmt = ts; 438 } 439 return (error); 440 } 441 if (timespeccmp(&ts2, &ts, >=)) 442 return (0); 443 ts3 = ts; 444 timespecsub(&ts3, &ts2); 445 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 446 } 447 } 448 449 /* 450 * MPSAFE 451 */ 452 int 453 sys_nanosleep(struct nanosleep_args *uap) 454 { 455 int error; 456 struct timespec rqt; 457 struct timespec rmt; 458 459 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 460 if (error) 461 return (error); 462 463 error = nanosleep1(&rqt, &rmt); 464 465 /* 466 * copyout the residual if nanosleep was interrupted. 467 */ 468 if (error && uap->rmtp) { 469 int error2; 470 471 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 472 if (error2) 473 error = error2; 474 } 475 return (error); 476 } 477 478 /* 479 * The gettimeofday() system call is supposed to return a fine-grained 480 * realtime stamp. However, acquiring a fine-grained stamp can create a 481 * bottleneck when multiple cpu cores are trying to accessing e.g. the 482 * HPET hardware timer all at the same time, so we have a sysctl that 483 * allows its behavior to be changed to a more coarse-grained timestamp 484 * which does not have to access a hardware timer. 485 */ 486 int 487 sys_gettimeofday(struct gettimeofday_args *uap) 488 { 489 struct timeval atv; 490 int error = 0; 491 492 if (uap->tp) { 493 if (gettimeofday_quick) 494 getmicrotime(&atv); 495 else 496 microtime(&atv); 497 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 498 sizeof (atv)))) 499 return (error); 500 } 501 if (uap->tzp) 502 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 503 sizeof (tz)); 504 return (error); 505 } 506 507 /* 508 * MPALMOSTSAFE 509 */ 510 int 511 sys_settimeofday(struct settimeofday_args *uap) 512 { 513 struct thread *td = curthread; 514 struct timeval atv; 515 struct timezone atz; 516 int error; 517 518 if ((error = priv_check(td, PRIV_SETTIMEOFDAY))) 519 return (error); 520 /* 521 * Verify all parameters before changing time. 522 * 523 * XXX: We do not allow the time to be set to 0.0, which also by 524 * happy coincidence works around a pkgsrc bulk build bug. 525 */ 526 if (uap->tv) { 527 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 528 sizeof(atv)))) 529 return (error); 530 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 531 return (EINVAL); 532 if (atv.tv_sec == 0 && atv.tv_usec == 0) 533 return (EINVAL); 534 } 535 if (uap->tzp && 536 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 537 return (error); 538 539 lockmgr(&masterclock_lock, LK_EXCLUSIVE); 540 if (uap->tv && (error = settime(&atv))) { 541 lockmgr(&masterclock_lock, LK_RELEASE); 542 return (error); 543 } 544 lockmgr(&masterclock_lock, LK_RELEASE); 545 546 if (uap->tzp) 547 tz = atz; 548 return (0); 549 } 550 551 /* 552 * WARNING! Run with ntp_spin held 553 */ 554 static void 555 kern_adjtime_common(void) 556 { 557 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 558 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 559 ntp_tick_delta = ntp_delta; 560 else if (ntp_delta > ntp_big_delta) 561 ntp_tick_delta = 10 * ntp_default_tick_delta; 562 else if (ntp_delta < -ntp_big_delta) 563 ntp_tick_delta = -10 * ntp_default_tick_delta; 564 else if (ntp_delta > 0) 565 ntp_tick_delta = ntp_default_tick_delta; 566 else 567 ntp_tick_delta = -ntp_default_tick_delta; 568 } 569 570 void 571 kern_adjtime(int64_t delta, int64_t *odelta) 572 { 573 spin_lock(&ntp_spin); 574 *odelta = ntp_delta; 575 ntp_delta = delta; 576 kern_adjtime_common(); 577 spin_unlock(&ntp_spin); 578 } 579 580 static void 581 kern_get_ntp_delta(int64_t *delta) 582 { 583 *delta = ntp_delta; 584 } 585 586 void 587 kern_reladjtime(int64_t delta) 588 { 589 spin_lock(&ntp_spin); 590 ntp_delta += delta; 591 kern_adjtime_common(); 592 spin_unlock(&ntp_spin); 593 } 594 595 static void 596 kern_adjfreq(int64_t rate) 597 { 598 spin_lock(&ntp_spin); 599 ntp_tick_permanent = rate; 600 spin_unlock(&ntp_spin); 601 } 602 603 /* 604 * MPALMOSTSAFE 605 */ 606 int 607 sys_adjtime(struct adjtime_args *uap) 608 { 609 struct thread *td = curthread; 610 struct timeval atv; 611 int64_t ndelta, odelta; 612 int error; 613 614 if ((error = priv_check(td, PRIV_ADJTIME))) 615 return (error); 616 error = copyin(uap->delta, &atv, sizeof(struct timeval)); 617 if (error) 618 return (error); 619 620 /* 621 * Compute the total correction and the rate at which to apply it. 622 * Round the adjustment down to a whole multiple of the per-tick 623 * delta, so that after some number of incremental changes in 624 * hardclock(), tickdelta will become zero, lest the correction 625 * overshoot and start taking us away from the desired final time. 626 */ 627 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 628 kern_adjtime(ndelta, &odelta); 629 630 if (uap->olddelta) { 631 atv.tv_sec = odelta / 1000000000; 632 atv.tv_usec = odelta % 1000000000 / 1000; 633 copyout(&atv, uap->olddelta, sizeof(struct timeval)); 634 } 635 return (0); 636 } 637 638 static int 639 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 640 { 641 int64_t delta; 642 int error; 643 644 if (req->newptr != NULL) { 645 if (priv_check(curthread, PRIV_ROOT)) 646 return (EPERM); 647 error = SYSCTL_IN(req, &delta, sizeof(delta)); 648 if (error) 649 return (error); 650 kern_reladjtime(delta); 651 } 652 653 if (req->oldptr) 654 kern_get_ntp_delta(&delta); 655 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 656 return (error); 657 } 658 659 /* 660 * delta is in nanoseconds. 661 */ 662 static int 663 sysctl_delta(SYSCTL_HANDLER_ARGS) 664 { 665 int64_t delta, old_delta; 666 int error; 667 668 if (req->newptr != NULL) { 669 if (priv_check(curthread, PRIV_ROOT)) 670 return (EPERM); 671 error = SYSCTL_IN(req, &delta, sizeof(delta)); 672 if (error) 673 return (error); 674 kern_adjtime(delta, &old_delta); 675 } 676 677 if (req->oldptr != NULL) 678 kern_get_ntp_delta(&old_delta); 679 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 680 return (error); 681 } 682 683 /* 684 * frequency is in nanoseconds per second shifted left 32. 685 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 686 */ 687 static int 688 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 689 { 690 int64_t freqdelta; 691 int error; 692 693 if (req->newptr != NULL) { 694 if (priv_check(curthread, PRIV_ROOT)) 695 return (EPERM); 696 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 697 if (error) 698 return (error); 699 700 freqdelta /= hz; 701 kern_adjfreq(freqdelta); 702 } 703 704 if (req->oldptr != NULL) 705 freqdelta = ntp_tick_permanent * hz; 706 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 707 if (error) 708 return (error); 709 710 return (0); 711 } 712 713 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 714 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 715 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 716 sysctl_adjfreq, "Q", "permanent correction per second"); 717 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 718 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 719 sysctl_delta, "Q", "one-time delta"); 720 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 721 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 722 "threshold for fast adjustment"); 723 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 724 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 725 "per-tick adjustment"); 726 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 727 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 728 "default per-tick adjustment"); 729 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 730 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 731 "next leap second"); 732 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 733 &ntp_leap_insert, 0, "insert or remove leap second"); 734 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 735 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 736 sysctl_adjtime, "Q", "relative adjust for delta"); 737 738 /* 739 * Get value of an interval timer. The process virtual and 740 * profiling virtual time timers are kept in the p_stats area, since 741 * they can be swapped out. These are kept internally in the 742 * way they are specified externally: in time until they expire. 743 * 744 * The real time interval timer is kept in the process table slot 745 * for the process, and its value (it_value) is kept as an 746 * absolute time rather than as a delta, so that it is easy to keep 747 * periodic real-time signals from drifting. 748 * 749 * Virtual time timers are processed in the hardclock() routine of 750 * kern_clock.c. The real time timer is processed by a timeout 751 * routine, called from the softclock() routine. Since a callout 752 * may be delayed in real time due to interrupt processing in the system, 753 * it is possible for the real time timeout routine (realitexpire, given below), 754 * to be delayed in real time past when it is supposed to occur. It 755 * does not suffice, therefore, to reload the real timer .it_value from the 756 * real time timers .it_interval. Rather, we compute the next time in 757 * absolute time the timer should go off. 758 * 759 * MPALMOSTSAFE 760 */ 761 int 762 sys_getitimer(struct getitimer_args *uap) 763 { 764 struct proc *p = curproc; 765 struct timeval ctv; 766 struct itimerval aitv; 767 768 if (uap->which > ITIMER_PROF) 769 return (EINVAL); 770 lwkt_gettoken(&p->p_token); 771 if (uap->which == ITIMER_REAL) { 772 /* 773 * Convert from absolute to relative time in .it_value 774 * part of real time timer. If time for real time timer 775 * has passed return 0, else return difference between 776 * current time and time for the timer to go off. 777 */ 778 aitv = p->p_realtimer; 779 if (timevalisset(&aitv.it_value)) { 780 getmicrouptime(&ctv); 781 if (timevalcmp(&aitv.it_value, &ctv, <)) 782 timevalclear(&aitv.it_value); 783 else 784 timevalsub(&aitv.it_value, &ctv); 785 } 786 } else { 787 aitv = p->p_timer[uap->which]; 788 } 789 lwkt_reltoken(&p->p_token); 790 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 791 } 792 793 /* 794 * MPALMOSTSAFE 795 */ 796 int 797 sys_setitimer(struct setitimer_args *uap) 798 { 799 struct itimerval aitv; 800 struct timeval ctv; 801 struct itimerval *itvp; 802 struct proc *p = curproc; 803 int error; 804 805 if (uap->which > ITIMER_PROF) 806 return (EINVAL); 807 itvp = uap->itv; 808 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 809 sizeof(struct itimerval)))) 810 return (error); 811 if ((uap->itv = uap->oitv) && 812 (error = sys_getitimer((struct getitimer_args *)uap))) 813 return (error); 814 if (itvp == NULL) 815 return (0); 816 if (itimerfix(&aitv.it_value)) 817 return (EINVAL); 818 if (!timevalisset(&aitv.it_value)) 819 timevalclear(&aitv.it_interval); 820 else if (itimerfix(&aitv.it_interval)) 821 return (EINVAL); 822 lwkt_gettoken(&p->p_token); 823 if (uap->which == ITIMER_REAL) { 824 if (timevalisset(&p->p_realtimer.it_value)) 825 callout_stop_sync(&p->p_ithandle); 826 if (timevalisset(&aitv.it_value)) 827 callout_reset(&p->p_ithandle, 828 tvtohz_high(&aitv.it_value), realitexpire, p); 829 getmicrouptime(&ctv); 830 timevaladd(&aitv.it_value, &ctv); 831 p->p_realtimer = aitv; 832 } else { 833 p->p_timer[uap->which] = aitv; 834 switch(uap->which) { 835 case ITIMER_VIRTUAL: 836 p->p_flags &= ~P_SIGVTALRM; 837 break; 838 case ITIMER_PROF: 839 p->p_flags &= ~P_SIGPROF; 840 break; 841 } 842 } 843 lwkt_reltoken(&p->p_token); 844 return (0); 845 } 846 847 /* 848 * Real interval timer expired: 849 * send process whose timer expired an alarm signal. 850 * If time is not set up to reload, then just return. 851 * Else compute next time timer should go off which is > current time. 852 * This is where delay in processing this timeout causes multiple 853 * SIGALRM calls to be compressed into one. 854 * tvtohz_high() always adds 1 to allow for the time until the next clock 855 * interrupt being strictly less than 1 clock tick, but we don't want 856 * that here since we want to appear to be in sync with the clock 857 * interrupt even when we're delayed. 858 */ 859 void 860 realitexpire(void *arg) 861 { 862 struct proc *p; 863 struct timeval ctv, ntv; 864 865 p = (struct proc *)arg; 866 PHOLD(p); 867 lwkt_gettoken(&p->p_token); 868 ksignal(p, SIGALRM); 869 if (!timevalisset(&p->p_realtimer.it_interval)) { 870 timevalclear(&p->p_realtimer.it_value); 871 goto done; 872 } 873 for (;;) { 874 timevaladd(&p->p_realtimer.it_value, 875 &p->p_realtimer.it_interval); 876 getmicrouptime(&ctv); 877 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 878 ntv = p->p_realtimer.it_value; 879 timevalsub(&ntv, &ctv); 880 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 881 realitexpire, p); 882 goto done; 883 } 884 } 885 done: 886 lwkt_reltoken(&p->p_token); 887 PRELE(p); 888 } 889 890 /* 891 * Used to validate itimer timeouts and utimes*() timespecs. 892 */ 893 int 894 itimerfix(struct timeval *tv) 895 { 896 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 897 return (EINVAL); 898 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < ustick) 899 tv->tv_usec = ustick; 900 return (0); 901 } 902 903 /* 904 * Used to validate timeouts and utimes*() timespecs. 905 */ 906 int 907 itimespecfix(struct timespec *ts) 908 { 909 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000ULL) 910 return (EINVAL); 911 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < nstick) 912 ts->tv_nsec = nstick; 913 return (0); 914 } 915 916 /* 917 * Decrement an interval timer by a specified number 918 * of microseconds, which must be less than a second, 919 * i.e. < 1000000. If the timer expires, then reload 920 * it. In this case, carry over (usec - old value) to 921 * reduce the value reloaded into the timer so that 922 * the timer does not drift. This routine assumes 923 * that it is called in a context where the timers 924 * on which it is operating cannot change in value. 925 */ 926 int 927 itimerdecr(struct itimerval *itp, int usec) 928 { 929 930 if (itp->it_value.tv_usec < usec) { 931 if (itp->it_value.tv_sec == 0) { 932 /* expired, and already in next interval */ 933 usec -= itp->it_value.tv_usec; 934 goto expire; 935 } 936 itp->it_value.tv_usec += 1000000; 937 itp->it_value.tv_sec--; 938 } 939 itp->it_value.tv_usec -= usec; 940 usec = 0; 941 if (timevalisset(&itp->it_value)) 942 return (1); 943 /* expired, exactly at end of interval */ 944 expire: 945 if (timevalisset(&itp->it_interval)) { 946 itp->it_value = itp->it_interval; 947 itp->it_value.tv_usec -= usec; 948 if (itp->it_value.tv_usec < 0) { 949 itp->it_value.tv_usec += 1000000; 950 itp->it_value.tv_sec--; 951 } 952 } else 953 itp->it_value.tv_usec = 0; /* sec is already 0 */ 954 return (0); 955 } 956 957 /* 958 * Add and subtract routines for timevals. 959 * N.B.: subtract routine doesn't deal with 960 * results which are before the beginning, 961 * it just gets very confused in this case. 962 * Caveat emptor. 963 */ 964 void 965 timevaladd(struct timeval *t1, const struct timeval *t2) 966 { 967 968 t1->tv_sec += t2->tv_sec; 969 t1->tv_usec += t2->tv_usec; 970 timevalfix(t1); 971 } 972 973 void 974 timevalsub(struct timeval *t1, const struct timeval *t2) 975 { 976 977 t1->tv_sec -= t2->tv_sec; 978 t1->tv_usec -= t2->tv_usec; 979 timevalfix(t1); 980 } 981 982 static void 983 timevalfix(struct timeval *t1) 984 { 985 986 if (t1->tv_usec < 0) { 987 t1->tv_sec--; 988 t1->tv_usec += 1000000; 989 } 990 if (t1->tv_usec >= 1000000) { 991 t1->tv_sec++; 992 t1->tv_usec -= 1000000; 993 } 994 } 995 996 /* 997 * ratecheck(): simple time-based rate-limit checking. 998 */ 999 int 1000 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 1001 { 1002 struct timeval tv, delta; 1003 int rv = 0; 1004 1005 getmicrouptime(&tv); /* NB: 10ms precision */ 1006 delta = tv; 1007 timevalsub(&delta, lasttime); 1008 1009 /* 1010 * check for 0,0 is so that the message will be seen at least once, 1011 * even if interval is huge. 1012 */ 1013 if (timevalcmp(&delta, mininterval, >=) || 1014 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 1015 *lasttime = tv; 1016 rv = 1; 1017 } 1018 1019 return (rv); 1020 } 1021 1022 /* 1023 * ppsratecheck(): packets (or events) per second limitation. 1024 * 1025 * Return 0 if the limit is to be enforced (e.g. the caller 1026 * should drop a packet because of the rate limitation). 1027 * 1028 * maxpps of 0 always causes zero to be returned. maxpps of -1 1029 * always causes 1 to be returned; this effectively defeats rate 1030 * limiting. 1031 * 1032 * Note that we maintain the struct timeval for compatibility 1033 * with other bsd systems. We reuse the storage and just monitor 1034 * clock ticks for minimal overhead. 1035 */ 1036 int 1037 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 1038 { 1039 int now; 1040 1041 /* 1042 * Reset the last time and counter if this is the first call 1043 * or more than a second has passed since the last update of 1044 * lasttime. 1045 */ 1046 now = ticks; 1047 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 1048 lasttime->tv_sec = now; 1049 *curpps = 1; 1050 return (maxpps != 0); 1051 } else { 1052 (*curpps)++; /* NB: ignore potential overflow */ 1053 return (maxpps < 0 || *curpps < maxpps); 1054 } 1055 } 1056