1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/buf.h> 36 #include <sys/sysproto.h> 37 #include <sys/resourcevar.h> 38 #include <sys/signalvar.h> 39 #include <sys/kernel.h> 40 #include <sys/sysent.h> 41 #include <sys/sysunion.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/time.h> 45 #include <sys/vnode.h> 46 #include <sys/sysctl.h> 47 #include <sys/kern_syscall.h> 48 #include <vm/vm.h> 49 #include <vm/vm_extern.h> 50 51 #include <sys/msgport2.h> 52 #include <sys/thread2.h> 53 #include <sys/mplock2.h> 54 55 struct timezone tz; 56 57 /* 58 * Time of day and interval timer support. 59 * 60 * These routines provide the kernel entry points to get and set 61 * the time-of-day and per-process interval timers. Subroutines 62 * here provide support for adding and subtracting timeval structures 63 * and decrementing interval timers, optionally reloading the interval 64 * timers when they expire. 65 */ 66 67 static int settime(struct timeval *); 68 static void timevalfix(struct timeval *); 69 70 /* 71 * Nanosleep tries very hard to sleep for a precisely requested time 72 * interval, down to 1uS. The administrator can impose a minimum delay 73 * and a delay below which we hard-loop instead of initiate a timer 74 * interrupt and sleep. 75 * 76 * For machines under high loads it might be beneficial to increase min_us 77 * to e.g. 1000uS (1ms) so spining processes sleep meaningfully. 78 */ 79 static int nanosleep_min_us = 10; 80 static int nanosleep_hard_us = 100; 81 static int gettimeofday_quick = 0; 82 SYSCTL_INT(_kern, OID_AUTO, nanosleep_min_us, CTLFLAG_RW, 83 &nanosleep_min_us, 0, "") 84 SYSCTL_INT(_kern, OID_AUTO, nanosleep_hard_us, CTLFLAG_RW, 85 &nanosleep_hard_us, 0, "") 86 SYSCTL_INT(_kern, OID_AUTO, gettimeofday_quick, CTLFLAG_RW, 87 &gettimeofday_quick, 0, "") 88 89 static int 90 settime(struct timeval *tv) 91 { 92 struct timeval delta, tv1, tv2; 93 static struct timeval maxtime, laststep; 94 struct timespec ts; 95 int origcpu; 96 97 if ((origcpu = mycpu->gd_cpuid) != 0) 98 lwkt_setcpu_self(globaldata_find(0)); 99 100 crit_enter(); 101 microtime(&tv1); 102 delta = *tv; 103 timevalsub(&delta, &tv1); 104 105 /* 106 * If the system is secure, we do not allow the time to be 107 * set to a value earlier than 1 second less than the highest 108 * time we have yet seen. The worst a miscreant can do in 109 * this circumstance is "freeze" time. He couldn't go 110 * back to the past. 111 * 112 * We similarly do not allow the clock to be stepped more 113 * than one second, nor more than once per second. This allows 114 * a miscreant to make the clock march double-time, but no worse. 115 */ 116 if (securelevel > 1) { 117 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 118 /* 119 * Update maxtime to latest time we've seen. 120 */ 121 if (tv1.tv_sec > maxtime.tv_sec) 122 maxtime = tv1; 123 tv2 = *tv; 124 timevalsub(&tv2, &maxtime); 125 if (tv2.tv_sec < -1) { 126 tv->tv_sec = maxtime.tv_sec - 1; 127 kprintf("Time adjustment clamped to -1 second\n"); 128 } 129 } else { 130 if (tv1.tv_sec == laststep.tv_sec) { 131 crit_exit(); 132 return (EPERM); 133 } 134 if (delta.tv_sec > 1) { 135 tv->tv_sec = tv1.tv_sec + 1; 136 kprintf("Time adjustment clamped to +1 second\n"); 137 } 138 laststep = *tv; 139 } 140 } 141 142 ts.tv_sec = tv->tv_sec; 143 ts.tv_nsec = tv->tv_usec * 1000; 144 set_timeofday(&ts); 145 crit_exit(); 146 147 if (origcpu != 0) 148 lwkt_setcpu_self(globaldata_find(origcpu)); 149 150 resettodr(); 151 return (0); 152 } 153 154 static void 155 get_curthread_cputime(struct timespec *ats) 156 { 157 struct thread *td = curthread; 158 159 crit_enter(); 160 /* 161 * These are 64-bit fields but the actual values should never reach 162 * the limit. We don't care about overflows. 163 */ 164 ats->tv_sec = td->td_uticks / 1000000; 165 ats->tv_sec += td->td_sticks / 1000000; 166 ats->tv_sec += td->td_iticks / 1000000; 167 ats->tv_nsec = (td->td_uticks % 1000000) * 1000; 168 ats->tv_nsec += (td->td_sticks % 1000000) * 1000; 169 ats->tv_nsec += (td->td_iticks % 1000000) * 1000; 170 crit_exit(); 171 } 172 173 /* 174 * MPSAFE 175 */ 176 int 177 kern_clock_gettime(clockid_t clock_id, struct timespec *ats) 178 { 179 int error = 0; 180 struct proc *p; 181 182 switch(clock_id) { 183 case CLOCK_REALTIME: 184 case CLOCK_REALTIME_PRECISE: 185 nanotime(ats); 186 break; 187 case CLOCK_REALTIME_FAST: 188 getnanotime(ats); 189 break; 190 case CLOCK_MONOTONIC: 191 case CLOCK_MONOTONIC_PRECISE: 192 case CLOCK_UPTIME: 193 case CLOCK_UPTIME_PRECISE: 194 nanouptime(ats); 195 break; 196 case CLOCK_MONOTONIC_FAST: 197 case CLOCK_UPTIME_FAST: 198 getnanouptime(ats); 199 break; 200 case CLOCK_VIRTUAL: 201 p = curproc; 202 ats->tv_sec = p->p_timer[ITIMER_VIRTUAL].it_value.tv_sec; 203 ats->tv_nsec = p->p_timer[ITIMER_VIRTUAL].it_value.tv_usec * 204 1000; 205 break; 206 case CLOCK_PROF: 207 p = curproc; 208 ats->tv_sec = p->p_timer[ITIMER_PROF].it_value.tv_sec; 209 ats->tv_nsec = p->p_timer[ITIMER_PROF].it_value.tv_usec * 210 1000; 211 break; 212 case CLOCK_SECOND: 213 ats->tv_sec = time_second; 214 ats->tv_nsec = 0; 215 break; 216 case CLOCK_THREAD_CPUTIME_ID: 217 get_curthread_cputime(ats); 218 break; 219 default: 220 error = EINVAL; 221 break; 222 } 223 return (error); 224 } 225 226 /* 227 * MPSAFE 228 */ 229 int 230 sys_clock_gettime(struct clock_gettime_args *uap) 231 { 232 struct timespec ats; 233 int error; 234 235 error = kern_clock_gettime(uap->clock_id, &ats); 236 if (error == 0) 237 error = copyout(&ats, uap->tp, sizeof(ats)); 238 239 return (error); 240 } 241 242 int 243 kern_clock_settime(clockid_t clock_id, struct timespec *ats) 244 { 245 struct thread *td = curthread; 246 struct timeval atv; 247 int error; 248 249 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 250 return (error); 251 if (clock_id != CLOCK_REALTIME) 252 return (EINVAL); 253 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 254 return (EINVAL); 255 256 TIMESPEC_TO_TIMEVAL(&atv, ats); 257 error = settime(&atv); 258 return (error); 259 } 260 261 /* 262 * MPALMOSTSAFE 263 */ 264 int 265 sys_clock_settime(struct clock_settime_args *uap) 266 { 267 struct timespec ats; 268 int error; 269 270 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 271 return (error); 272 273 get_mplock(); 274 error = kern_clock_settime(uap->clock_id, &ats); 275 rel_mplock(); 276 return (error); 277 } 278 279 /* 280 * MPSAFE 281 */ 282 int 283 kern_clock_getres(clockid_t clock_id, struct timespec *ts) 284 { 285 int error; 286 287 switch(clock_id) { 288 case CLOCK_REALTIME: 289 case CLOCK_REALTIME_FAST: 290 case CLOCK_REALTIME_PRECISE: 291 case CLOCK_MONOTONIC: 292 case CLOCK_MONOTONIC_FAST: 293 case CLOCK_MONOTONIC_PRECISE: 294 case CLOCK_UPTIME: 295 case CLOCK_UPTIME_FAST: 296 case CLOCK_UPTIME_PRECISE: 297 case CLOCK_THREAD_CPUTIME_ID: 298 /* 299 * Round up the result of the division cheaply 300 * by adding 1. Rounding up is especially important 301 * if rounding down would give 0. Perfect rounding 302 * is unimportant. 303 */ 304 ts->tv_sec = 0; 305 ts->tv_nsec = 1000000000 / sys_cputimer->freq + 1; 306 error = 0; 307 break; 308 case CLOCK_VIRTUAL: 309 case CLOCK_PROF: 310 /* Accurately round up here because we can do so cheaply. */ 311 ts->tv_sec = 0; 312 ts->tv_nsec = (1000000000 + hz - 1) / hz; 313 error = 0; 314 break; 315 case CLOCK_SECOND: 316 ts->tv_sec = 1; 317 ts->tv_nsec = 0; 318 error = 0; 319 break; 320 default: 321 error = EINVAL; 322 break; 323 } 324 325 return(error); 326 } 327 328 /* 329 * MPSAFE 330 */ 331 int 332 sys_clock_getres(struct clock_getres_args *uap) 333 { 334 int error; 335 struct timespec ts; 336 337 error = kern_clock_getres(uap->clock_id, &ts); 338 if (error == 0) 339 error = copyout(&ts, uap->tp, sizeof(ts)); 340 341 return (error); 342 } 343 344 /* 345 * nanosleep1() 346 * 347 * This is a general helper function for nanosleep() (aka sleep() aka 348 * usleep()). 349 * 350 * If there is less then one tick's worth of time left and 351 * we haven't done a yield, or the remaining microseconds is 352 * ridiculously low, do a yield. This avoids having 353 * to deal with systimer overheads when the system is under 354 * heavy loads. If we have done a yield already then use 355 * a systimer and an uninterruptable thread wait. 356 * 357 * If there is more then a tick's worth of time left, 358 * calculate the baseline ticks and use an interruptable 359 * tsleep, then handle the fine-grained delay on the next 360 * loop. This usually results in two sleeps occuring, a long one 361 * and a short one. 362 * 363 * MPSAFE 364 */ 365 static void 366 ns1_systimer(systimer_t info, int in_ipi __unused, 367 struct intrframe *frame __unused) 368 { 369 lwkt_schedule(info->data); 370 } 371 372 int 373 nanosleep1(struct timespec *rqt, struct timespec *rmt) 374 { 375 static int nanowait; 376 struct timespec ts, ts2, ts3; 377 struct timeval tv; 378 int error; 379 380 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 381 return (EINVAL); 382 /* XXX: imho this should return EINVAL at least for tv_sec < 0 */ 383 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 384 return (0); 385 nanouptime(&ts); 386 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 387 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 388 389 for (;;) { 390 int ticks; 391 struct systimer info; 392 393 ticks = tv.tv_usec / ustick; /* approximate */ 394 395 if (tv.tv_sec == 0 && ticks == 0) { 396 thread_t td = curthread; 397 if (tv.tv_usec > 0 && tv.tv_usec < nanosleep_min_us) 398 tv.tv_usec = nanosleep_min_us; 399 if (tv.tv_usec < nanosleep_hard_us) { 400 lwkt_user_yield(); 401 cpu_pause(); 402 } else { 403 crit_enter_quick(td); 404 systimer_init_oneshot(&info, ns1_systimer, 405 td, tv.tv_usec); 406 lwkt_deschedule_self(td); 407 crit_exit_quick(td); 408 lwkt_switch(); 409 systimer_del(&info); /* make sure it's gone */ 410 } 411 error = iscaught(td->td_lwp); 412 } else if (tv.tv_sec == 0) { 413 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 414 } else { 415 ticks = tvtohz_low(&tv); /* also handles overflow */ 416 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 417 } 418 nanouptime(&ts2); 419 if (error && error != EWOULDBLOCK) { 420 if (error == ERESTART) 421 error = EINTR; 422 if (rmt != NULL) { 423 timespecsub(&ts, &ts2); 424 if (ts.tv_sec < 0) 425 timespecclear(&ts); 426 *rmt = ts; 427 } 428 return (error); 429 } 430 if (timespeccmp(&ts2, &ts, >=)) 431 return (0); 432 ts3 = ts; 433 timespecsub(&ts3, &ts2); 434 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 435 } 436 } 437 438 /* 439 * MPSAFE 440 */ 441 int 442 sys_nanosleep(struct nanosleep_args *uap) 443 { 444 int error; 445 struct timespec rqt; 446 struct timespec rmt; 447 448 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 449 if (error) 450 return (error); 451 452 error = nanosleep1(&rqt, &rmt); 453 454 /* 455 * copyout the residual if nanosleep was interrupted. 456 */ 457 if (error && uap->rmtp) { 458 int error2; 459 460 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 461 if (error2) 462 error = error2; 463 } 464 return (error); 465 } 466 467 /* 468 * The gettimeofday() system call is supposed to return a fine-grained 469 * realtime stamp. However, acquiring a fine-grained stamp can create a 470 * bottleneck when multiple cpu cores are trying to accessing e.g. the 471 * HPET hardware timer all at the same time, so we have a sysctl that 472 * allows its behavior to be changed to a more coarse-grained timestamp 473 * which does not have to access a hardware timer. 474 */ 475 int 476 sys_gettimeofday(struct gettimeofday_args *uap) 477 { 478 struct timeval atv; 479 int error = 0; 480 481 if (uap->tp) { 482 if (gettimeofday_quick) 483 getmicrotime(&atv); 484 else 485 microtime(&atv); 486 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 487 sizeof (atv)))) 488 return (error); 489 } 490 if (uap->tzp) 491 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 492 sizeof (tz)); 493 return (error); 494 } 495 496 /* 497 * MPALMOSTSAFE 498 */ 499 int 500 sys_settimeofday(struct settimeofday_args *uap) 501 { 502 struct thread *td = curthread; 503 struct timeval atv; 504 struct timezone atz; 505 int error; 506 507 if ((error = priv_check(td, PRIV_SETTIMEOFDAY))) 508 return (error); 509 /* 510 * Verify all parameters before changing time. 511 * 512 * NOTE: We do not allow the time to be set to 0.0, which also by 513 * happy coincidence works around a pkgsrc bulk build bug. 514 */ 515 if (uap->tv) { 516 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 517 sizeof(atv)))) 518 return (error); 519 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 520 return (EINVAL); 521 if (atv.tv_sec == 0 && atv.tv_usec == 0) 522 return (EINVAL); 523 } 524 if (uap->tzp && 525 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 526 return (error); 527 528 get_mplock(); 529 if (uap->tv && (error = settime(&atv))) { 530 rel_mplock(); 531 return (error); 532 } 533 rel_mplock(); 534 if (uap->tzp) 535 tz = atz; 536 return (0); 537 } 538 539 static void 540 kern_adjtime_common(void) 541 { 542 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 543 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 544 ntp_tick_delta = ntp_delta; 545 else if (ntp_delta > ntp_big_delta) 546 ntp_tick_delta = 10 * ntp_default_tick_delta; 547 else if (ntp_delta < -ntp_big_delta) 548 ntp_tick_delta = -10 * ntp_default_tick_delta; 549 else if (ntp_delta > 0) 550 ntp_tick_delta = ntp_default_tick_delta; 551 else 552 ntp_tick_delta = -ntp_default_tick_delta; 553 } 554 555 void 556 kern_adjtime(int64_t delta, int64_t *odelta) 557 { 558 int origcpu; 559 560 if ((origcpu = mycpu->gd_cpuid) != 0) 561 lwkt_setcpu_self(globaldata_find(0)); 562 563 crit_enter(); 564 *odelta = ntp_delta; 565 ntp_delta = delta; 566 kern_adjtime_common(); 567 crit_exit(); 568 569 if (origcpu != 0) 570 lwkt_setcpu_self(globaldata_find(origcpu)); 571 } 572 573 static void 574 kern_get_ntp_delta(int64_t *delta) 575 { 576 int origcpu; 577 578 if ((origcpu = mycpu->gd_cpuid) != 0) 579 lwkt_setcpu_self(globaldata_find(0)); 580 581 crit_enter(); 582 *delta = ntp_delta; 583 crit_exit(); 584 585 if (origcpu != 0) 586 lwkt_setcpu_self(globaldata_find(origcpu)); 587 } 588 589 void 590 kern_reladjtime(int64_t delta) 591 { 592 int origcpu; 593 594 if ((origcpu = mycpu->gd_cpuid) != 0) 595 lwkt_setcpu_self(globaldata_find(0)); 596 597 crit_enter(); 598 ntp_delta += delta; 599 kern_adjtime_common(); 600 crit_exit(); 601 602 if (origcpu != 0) 603 lwkt_setcpu_self(globaldata_find(origcpu)); 604 } 605 606 static void 607 kern_adjfreq(int64_t rate) 608 { 609 int origcpu; 610 611 if ((origcpu = mycpu->gd_cpuid) != 0) 612 lwkt_setcpu_self(globaldata_find(0)); 613 614 crit_enter(); 615 ntp_tick_permanent = rate; 616 crit_exit(); 617 618 if (origcpu != 0) 619 lwkt_setcpu_self(globaldata_find(origcpu)); 620 } 621 622 /* 623 * MPALMOSTSAFE 624 */ 625 int 626 sys_adjtime(struct adjtime_args *uap) 627 { 628 struct thread *td = curthread; 629 struct timeval atv; 630 int64_t ndelta, odelta; 631 int error; 632 633 if ((error = priv_check(td, PRIV_ADJTIME))) 634 return (error); 635 error = copyin(uap->delta, &atv, sizeof(struct timeval)); 636 if (error) 637 return (error); 638 639 /* 640 * Compute the total correction and the rate at which to apply it. 641 * Round the adjustment down to a whole multiple of the per-tick 642 * delta, so that after some number of incremental changes in 643 * hardclock(), tickdelta will become zero, lest the correction 644 * overshoot and start taking us away from the desired final time. 645 */ 646 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 647 get_mplock(); 648 kern_adjtime(ndelta, &odelta); 649 rel_mplock(); 650 651 if (uap->olddelta) { 652 atv.tv_sec = odelta / 1000000000; 653 atv.tv_usec = odelta % 1000000000 / 1000; 654 copyout(&atv, uap->olddelta, sizeof(struct timeval)); 655 } 656 return (0); 657 } 658 659 static int 660 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 661 { 662 int64_t delta; 663 int error; 664 665 if (req->newptr != NULL) { 666 if (priv_check(curthread, PRIV_ROOT)) 667 return (EPERM); 668 error = SYSCTL_IN(req, &delta, sizeof(delta)); 669 if (error) 670 return (error); 671 kern_reladjtime(delta); 672 } 673 674 if (req->oldptr) 675 kern_get_ntp_delta(&delta); 676 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 677 return (error); 678 } 679 680 /* 681 * delta is in nanoseconds. 682 */ 683 static int 684 sysctl_delta(SYSCTL_HANDLER_ARGS) 685 { 686 int64_t delta, old_delta; 687 int error; 688 689 if (req->newptr != NULL) { 690 if (priv_check(curthread, PRIV_ROOT)) 691 return (EPERM); 692 error = SYSCTL_IN(req, &delta, sizeof(delta)); 693 if (error) 694 return (error); 695 kern_adjtime(delta, &old_delta); 696 } 697 698 if (req->oldptr != NULL) 699 kern_get_ntp_delta(&old_delta); 700 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 701 return (error); 702 } 703 704 /* 705 * frequency is in nanoseconds per second shifted left 32. 706 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 707 */ 708 static int 709 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 710 { 711 int64_t freqdelta; 712 int error; 713 714 if (req->newptr != NULL) { 715 if (priv_check(curthread, PRIV_ROOT)) 716 return (EPERM); 717 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 718 if (error) 719 return (error); 720 721 freqdelta /= hz; 722 kern_adjfreq(freqdelta); 723 } 724 725 if (req->oldptr != NULL) 726 freqdelta = ntp_tick_permanent * hz; 727 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 728 if (error) 729 return (error); 730 731 return (0); 732 } 733 734 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 735 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 736 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 737 sysctl_adjfreq, "Q", "permanent correction per second"); 738 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 739 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 740 sysctl_delta, "Q", "one-time delta"); 741 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 742 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 743 "threshold for fast adjustment"); 744 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 745 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 746 "per-tick adjustment"); 747 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 748 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 749 "default per-tick adjustment"); 750 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 751 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 752 "next leap second"); 753 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 754 &ntp_leap_insert, 0, "insert or remove leap second"); 755 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 756 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 757 sysctl_adjtime, "Q", "relative adjust for delta"); 758 759 /* 760 * Get value of an interval timer. The process virtual and 761 * profiling virtual time timers are kept in the p_stats area, since 762 * they can be swapped out. These are kept internally in the 763 * way they are specified externally: in time until they expire. 764 * 765 * The real time interval timer is kept in the process table slot 766 * for the process, and its value (it_value) is kept as an 767 * absolute time rather than as a delta, so that it is easy to keep 768 * periodic real-time signals from drifting. 769 * 770 * Virtual time timers are processed in the hardclock() routine of 771 * kern_clock.c. The real time timer is processed by a timeout 772 * routine, called from the softclock() routine. Since a callout 773 * may be delayed in real time due to interrupt processing in the system, 774 * it is possible for the real time timeout routine (realitexpire, given below), 775 * to be delayed in real time past when it is supposed to occur. It 776 * does not suffice, therefore, to reload the real timer .it_value from the 777 * real time timers .it_interval. Rather, we compute the next time in 778 * absolute time the timer should go off. 779 * 780 * MPALMOSTSAFE 781 */ 782 int 783 sys_getitimer(struct getitimer_args *uap) 784 { 785 struct proc *p = curproc; 786 struct timeval ctv; 787 struct itimerval aitv; 788 789 if (uap->which > ITIMER_PROF) 790 return (EINVAL); 791 lwkt_gettoken(&p->p_token); 792 if (uap->which == ITIMER_REAL) { 793 /* 794 * Convert from absolute to relative time in .it_value 795 * part of real time timer. If time for real time timer 796 * has passed return 0, else return difference between 797 * current time and time for the timer to go off. 798 */ 799 aitv = p->p_realtimer; 800 if (timevalisset(&aitv.it_value)) { 801 getmicrouptime(&ctv); 802 if (timevalcmp(&aitv.it_value, &ctv, <)) 803 timevalclear(&aitv.it_value); 804 else 805 timevalsub(&aitv.it_value, &ctv); 806 } 807 } else { 808 aitv = p->p_timer[uap->which]; 809 } 810 lwkt_reltoken(&p->p_token); 811 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 812 } 813 814 /* 815 * MPALMOSTSAFE 816 */ 817 int 818 sys_setitimer(struct setitimer_args *uap) 819 { 820 struct itimerval aitv; 821 struct timeval ctv; 822 struct itimerval *itvp; 823 struct proc *p = curproc; 824 int error; 825 826 if (uap->which > ITIMER_PROF) 827 return (EINVAL); 828 itvp = uap->itv; 829 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 830 sizeof(struct itimerval)))) 831 return (error); 832 if ((uap->itv = uap->oitv) && 833 (error = sys_getitimer((struct getitimer_args *)uap))) 834 return (error); 835 if (itvp == NULL) 836 return (0); 837 if (itimerfix(&aitv.it_value)) 838 return (EINVAL); 839 if (!timevalisset(&aitv.it_value)) 840 timevalclear(&aitv.it_interval); 841 else if (itimerfix(&aitv.it_interval)) 842 return (EINVAL); 843 lwkt_gettoken(&p->p_token); 844 if (uap->which == ITIMER_REAL) { 845 if (timevalisset(&p->p_realtimer.it_value)) 846 callout_stop_sync(&p->p_ithandle); 847 if (timevalisset(&aitv.it_value)) 848 callout_reset(&p->p_ithandle, 849 tvtohz_high(&aitv.it_value), realitexpire, p); 850 getmicrouptime(&ctv); 851 timevaladd(&aitv.it_value, &ctv); 852 p->p_realtimer = aitv; 853 } else { 854 p->p_timer[uap->which] = aitv; 855 switch(uap->which) { 856 case ITIMER_VIRTUAL: 857 p->p_flags &= ~P_SIGVTALRM; 858 break; 859 case ITIMER_PROF: 860 p->p_flags &= ~P_SIGPROF; 861 break; 862 } 863 } 864 lwkt_reltoken(&p->p_token); 865 return (0); 866 } 867 868 /* 869 * Real interval timer expired: 870 * send process whose timer expired an alarm signal. 871 * If time is not set up to reload, then just return. 872 * Else compute next time timer should go off which is > current time. 873 * This is where delay in processing this timeout causes multiple 874 * SIGALRM calls to be compressed into one. 875 * tvtohz_high() always adds 1 to allow for the time until the next clock 876 * interrupt being strictly less than 1 clock tick, but we don't want 877 * that here since we want to appear to be in sync with the clock 878 * interrupt even when we're delayed. 879 */ 880 void 881 realitexpire(void *arg) 882 { 883 struct proc *p; 884 struct timeval ctv, ntv; 885 886 p = (struct proc *)arg; 887 PHOLD(p); 888 lwkt_gettoken(&p->p_token); 889 ksignal(p, SIGALRM); 890 if (!timevalisset(&p->p_realtimer.it_interval)) { 891 timevalclear(&p->p_realtimer.it_value); 892 goto done; 893 } 894 for (;;) { 895 timevaladd(&p->p_realtimer.it_value, 896 &p->p_realtimer.it_interval); 897 getmicrouptime(&ctv); 898 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 899 ntv = p->p_realtimer.it_value; 900 timevalsub(&ntv, &ctv); 901 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 902 realitexpire, p); 903 goto done; 904 } 905 } 906 done: 907 lwkt_reltoken(&p->p_token); 908 PRELE(p); 909 } 910 911 /* 912 * Check that a proposed value to load into the .it_value or 913 * .it_interval part of an interval timer is acceptable, and 914 * fix it to have at least minimal value (i.e. if it is less 915 * than the resolution of the clock, round it up.) 916 * 917 * MPSAFE 918 */ 919 int 920 itimerfix(struct timeval *tv) 921 { 922 923 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 924 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 925 return (EINVAL); 926 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < ustick) 927 tv->tv_usec = ustick; 928 return (0); 929 } 930 931 /* 932 * Decrement an interval timer by a specified number 933 * of microseconds, which must be less than a second, 934 * i.e. < 1000000. If the timer expires, then reload 935 * it. In this case, carry over (usec - old value) to 936 * reduce the value reloaded into the timer so that 937 * the timer does not drift. This routine assumes 938 * that it is called in a context where the timers 939 * on which it is operating cannot change in value. 940 */ 941 int 942 itimerdecr(struct itimerval *itp, int usec) 943 { 944 945 if (itp->it_value.tv_usec < usec) { 946 if (itp->it_value.tv_sec == 0) { 947 /* expired, and already in next interval */ 948 usec -= itp->it_value.tv_usec; 949 goto expire; 950 } 951 itp->it_value.tv_usec += 1000000; 952 itp->it_value.tv_sec--; 953 } 954 itp->it_value.tv_usec -= usec; 955 usec = 0; 956 if (timevalisset(&itp->it_value)) 957 return (1); 958 /* expired, exactly at end of interval */ 959 expire: 960 if (timevalisset(&itp->it_interval)) { 961 itp->it_value = itp->it_interval; 962 itp->it_value.tv_usec -= usec; 963 if (itp->it_value.tv_usec < 0) { 964 itp->it_value.tv_usec += 1000000; 965 itp->it_value.tv_sec--; 966 } 967 } else 968 itp->it_value.tv_usec = 0; /* sec is already 0 */ 969 return (0); 970 } 971 972 /* 973 * Add and subtract routines for timevals. 974 * N.B.: subtract routine doesn't deal with 975 * results which are before the beginning, 976 * it just gets very confused in this case. 977 * Caveat emptor. 978 */ 979 void 980 timevaladd(struct timeval *t1, const struct timeval *t2) 981 { 982 983 t1->tv_sec += t2->tv_sec; 984 t1->tv_usec += t2->tv_usec; 985 timevalfix(t1); 986 } 987 988 void 989 timevalsub(struct timeval *t1, const struct timeval *t2) 990 { 991 992 t1->tv_sec -= t2->tv_sec; 993 t1->tv_usec -= t2->tv_usec; 994 timevalfix(t1); 995 } 996 997 static void 998 timevalfix(struct timeval *t1) 999 { 1000 1001 if (t1->tv_usec < 0) { 1002 t1->tv_sec--; 1003 t1->tv_usec += 1000000; 1004 } 1005 if (t1->tv_usec >= 1000000) { 1006 t1->tv_sec++; 1007 t1->tv_usec -= 1000000; 1008 } 1009 } 1010 1011 /* 1012 * ratecheck(): simple time-based rate-limit checking. 1013 */ 1014 int 1015 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 1016 { 1017 struct timeval tv, delta; 1018 int rv = 0; 1019 1020 getmicrouptime(&tv); /* NB: 10ms precision */ 1021 delta = tv; 1022 timevalsub(&delta, lasttime); 1023 1024 /* 1025 * check for 0,0 is so that the message will be seen at least once, 1026 * even if interval is huge. 1027 */ 1028 if (timevalcmp(&delta, mininterval, >=) || 1029 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 1030 *lasttime = tv; 1031 rv = 1; 1032 } 1033 1034 return (rv); 1035 } 1036 1037 /* 1038 * ppsratecheck(): packets (or events) per second limitation. 1039 * 1040 * Return 0 if the limit is to be enforced (e.g. the caller 1041 * should drop a packet because of the rate limitation). 1042 * 1043 * maxpps of 0 always causes zero to be returned. maxpps of -1 1044 * always causes 1 to be returned; this effectively defeats rate 1045 * limiting. 1046 * 1047 * Note that we maintain the struct timeval for compatibility 1048 * with other bsd systems. We reuse the storage and just monitor 1049 * clock ticks for minimal overhead. 1050 */ 1051 int 1052 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 1053 { 1054 int now; 1055 1056 /* 1057 * Reset the last time and counter if this is the first call 1058 * or more than a second has passed since the last update of 1059 * lasttime. 1060 */ 1061 now = ticks; 1062 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 1063 lasttime->tv_sec = now; 1064 *curpps = 1; 1065 return (maxpps != 0); 1066 } else { 1067 (*curpps)++; /* NB: ignore potential overflow */ 1068 return (maxpps < 0 || *curpps < maxpps); 1069 } 1070 } 1071