1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 35 * $DragonFly: src/sys/kern/kern_time.c,v 1.40 2008/04/02 14:16:16 sephe Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/buf.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/sysunion.h> 48 #include <sys/proc.h> 49 #include <sys/priv.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/sysctl.h> 53 #include <sys/kern_syscall.h> 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 57 #include <sys/msgport2.h> 58 #include <sys/thread2.h> 59 #include <sys/mplock2.h> 60 61 struct timezone tz; 62 63 /* 64 * Time of day and interval timer support. 65 * 66 * These routines provide the kernel entry points to get and set 67 * the time-of-day and per-process interval timers. Subroutines 68 * here provide support for adding and subtracting timeval structures 69 * and decrementing interval timers, optionally reloading the interval 70 * timers when they expire. 71 */ 72 73 static int nanosleep1(struct timespec *rqt, struct timespec *rmt); 74 static int settime(struct timeval *); 75 static void timevalfix(struct timeval *); 76 77 static int sleep_hard_us = 100; 78 SYSCTL_INT(_kern, OID_AUTO, sleep_hard_us, CTLFLAG_RW, &sleep_hard_us, 0, "") 79 80 static int 81 settime(struct timeval *tv) 82 { 83 struct timeval delta, tv1, tv2; 84 static struct timeval maxtime, laststep; 85 struct timespec ts; 86 int origcpu; 87 88 if ((origcpu = mycpu->gd_cpuid) != 0) 89 lwkt_setcpu_self(globaldata_find(0)); 90 91 crit_enter(); 92 microtime(&tv1); 93 delta = *tv; 94 timevalsub(&delta, &tv1); 95 96 /* 97 * If the system is secure, we do not allow the time to be 98 * set to a value earlier than 1 second less than the highest 99 * time we have yet seen. The worst a miscreant can do in 100 * this circumstance is "freeze" time. He couldn't go 101 * back to the past. 102 * 103 * We similarly do not allow the clock to be stepped more 104 * than one second, nor more than once per second. This allows 105 * a miscreant to make the clock march double-time, but no worse. 106 */ 107 if (securelevel > 1) { 108 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 109 /* 110 * Update maxtime to latest time we've seen. 111 */ 112 if (tv1.tv_sec > maxtime.tv_sec) 113 maxtime = tv1; 114 tv2 = *tv; 115 timevalsub(&tv2, &maxtime); 116 if (tv2.tv_sec < -1) { 117 tv->tv_sec = maxtime.tv_sec - 1; 118 kprintf("Time adjustment clamped to -1 second\n"); 119 } 120 } else { 121 if (tv1.tv_sec == laststep.tv_sec) { 122 crit_exit(); 123 return (EPERM); 124 } 125 if (delta.tv_sec > 1) { 126 tv->tv_sec = tv1.tv_sec + 1; 127 kprintf("Time adjustment clamped to +1 second\n"); 128 } 129 laststep = *tv; 130 } 131 } 132 133 ts.tv_sec = tv->tv_sec; 134 ts.tv_nsec = tv->tv_usec * 1000; 135 set_timeofday(&ts); 136 crit_exit(); 137 138 if (origcpu != 0) 139 lwkt_setcpu_self(globaldata_find(origcpu)); 140 141 resettodr(); 142 return (0); 143 } 144 145 /* 146 * MPSAFE 147 */ 148 int 149 kern_clock_gettime(clockid_t clock_id, struct timespec *ats) 150 { 151 int error = 0; 152 153 switch(clock_id) { 154 case CLOCK_REALTIME: 155 nanotime(ats); 156 break; 157 case CLOCK_MONOTONIC: 158 nanouptime(ats); 159 break; 160 default: 161 error = EINVAL; 162 break; 163 } 164 return (error); 165 } 166 167 /* 168 * MPSAFE 169 */ 170 int 171 sys_clock_gettime(struct clock_gettime_args *uap) 172 { 173 struct timespec ats; 174 int error; 175 176 error = kern_clock_gettime(uap->clock_id, &ats); 177 if (error == 0) 178 error = copyout(&ats, uap->tp, sizeof(ats)); 179 180 return (error); 181 } 182 183 int 184 kern_clock_settime(clockid_t clock_id, struct timespec *ats) 185 { 186 struct thread *td = curthread; 187 struct timeval atv; 188 int error; 189 190 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 191 return (error); 192 if (clock_id != CLOCK_REALTIME) 193 return (EINVAL); 194 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 195 return (EINVAL); 196 197 TIMESPEC_TO_TIMEVAL(&atv, ats); 198 error = settime(&atv); 199 return (error); 200 } 201 202 /* 203 * MPALMOSTSAFE 204 */ 205 int 206 sys_clock_settime(struct clock_settime_args *uap) 207 { 208 struct timespec ats; 209 int error; 210 211 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 212 return (error); 213 214 get_mplock(); 215 error = kern_clock_settime(uap->clock_id, &ats); 216 rel_mplock(); 217 return (error); 218 } 219 220 /* 221 * MPSAFE 222 */ 223 int 224 kern_clock_getres(clockid_t clock_id, struct timespec *ts) 225 { 226 int error; 227 228 switch(clock_id) { 229 case CLOCK_REALTIME: 230 case CLOCK_MONOTONIC: 231 /* 232 * Round up the result of the division cheaply 233 * by adding 1. Rounding up is especially important 234 * if rounding down would give 0. Perfect rounding 235 * is unimportant. 236 */ 237 ts->tv_sec = 0; 238 ts->tv_nsec = 1000000000 / sys_cputimer->freq + 1; 239 error = 0; 240 break; 241 default: 242 error = EINVAL; 243 break; 244 } 245 246 return(error); 247 } 248 249 /* 250 * MPSAFE 251 */ 252 int 253 sys_clock_getres(struct clock_getres_args *uap) 254 { 255 int error; 256 struct timespec ts; 257 258 error = kern_clock_getres(uap->clock_id, &ts); 259 if (error == 0) 260 error = copyout(&ts, uap->tp, sizeof(ts)); 261 262 return (error); 263 } 264 265 /* 266 * nanosleep1() 267 * 268 * This is a general helper function for nanosleep() (aka sleep() aka 269 * usleep()). 270 * 271 * If there is less then one tick's worth of time left and 272 * we haven't done a yield, or the remaining microseconds is 273 * ridiculously low, do a yield. This avoids having 274 * to deal with systimer overheads when the system is under 275 * heavy loads. If we have done a yield already then use 276 * a systimer and an uninterruptable thread wait. 277 * 278 * If there is more then a tick's worth of time left, 279 * calculate the baseline ticks and use an interruptable 280 * tsleep, then handle the fine-grained delay on the next 281 * loop. This usually results in two sleeps occuring, a long one 282 * and a short one. 283 * 284 * MPSAFE 285 */ 286 static void 287 ns1_systimer(systimer_t info) 288 { 289 lwkt_schedule(info->data); 290 } 291 292 static int 293 nanosleep1(struct timespec *rqt, struct timespec *rmt) 294 { 295 static int nanowait; 296 struct timespec ts, ts2, ts3; 297 struct timeval tv; 298 int error; 299 int tried_yield; 300 301 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 302 return (EINVAL); 303 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 304 return (0); 305 nanouptime(&ts); 306 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 307 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 308 tried_yield = 0; 309 310 for (;;) { 311 int ticks; 312 struct systimer info; 313 314 ticks = tv.tv_usec / ustick; /* approximate */ 315 316 if (tv.tv_sec == 0 && ticks == 0) { 317 thread_t td = curthread; 318 if (tried_yield || tv.tv_usec < sleep_hard_us) { 319 tried_yield = 0; 320 uio_yield(); 321 } else { 322 crit_enter_quick(td); 323 systimer_init_oneshot(&info, ns1_systimer, 324 td, tv.tv_usec); 325 lwkt_deschedule_self(td); 326 crit_exit_quick(td); 327 lwkt_switch(); 328 systimer_del(&info); /* make sure it's gone */ 329 } 330 error = iscaught(td->td_lwp); 331 } else if (tv.tv_sec == 0) { 332 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 333 } else { 334 ticks = tvtohz_low(&tv); /* also handles overflow */ 335 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 336 } 337 nanouptime(&ts2); 338 if (error && error != EWOULDBLOCK) { 339 if (error == ERESTART) 340 error = EINTR; 341 if (rmt != NULL) { 342 timespecsub(&ts, &ts2); 343 if (ts.tv_sec < 0) 344 timespecclear(&ts); 345 *rmt = ts; 346 } 347 return (error); 348 } 349 if (timespeccmp(&ts2, &ts, >=)) 350 return (0); 351 ts3 = ts; 352 timespecsub(&ts3, &ts2); 353 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 354 } 355 } 356 357 /* 358 * MPSAFE 359 */ 360 int 361 sys_nanosleep(struct nanosleep_args *uap) 362 { 363 int error; 364 struct timespec rqt; 365 struct timespec rmt; 366 367 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 368 if (error) 369 return (error); 370 371 error = nanosleep1(&rqt, &rmt); 372 373 /* 374 * copyout the residual if nanosleep was interrupted. 375 */ 376 if (error && uap->rmtp) { 377 int error2; 378 379 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 380 if (error2) 381 error = error2; 382 } 383 return (error); 384 } 385 386 /* 387 * MPSAFE 388 */ 389 int 390 sys_gettimeofday(struct gettimeofday_args *uap) 391 { 392 struct timeval atv; 393 int error = 0; 394 395 if (uap->tp) { 396 microtime(&atv); 397 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 398 sizeof (atv)))) 399 return (error); 400 } 401 if (uap->tzp) 402 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 403 sizeof (tz)); 404 return (error); 405 } 406 407 /* 408 * MPALMOSTSAFE 409 */ 410 int 411 sys_settimeofday(struct settimeofday_args *uap) 412 { 413 struct thread *td = curthread; 414 struct timeval atv; 415 struct timezone atz; 416 int error; 417 418 if ((error = priv_check(td, PRIV_SETTIMEOFDAY))) 419 return (error); 420 /* Verify all parameters before changing time. */ 421 if (uap->tv) { 422 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 423 sizeof(atv)))) 424 return (error); 425 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 426 return (EINVAL); 427 } 428 if (uap->tzp && 429 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 430 return (error); 431 432 get_mplock(); 433 if (uap->tv && (error = settime(&atv))) { 434 rel_mplock(); 435 return (error); 436 } 437 rel_mplock(); 438 if (uap->tzp) 439 tz = atz; 440 return (0); 441 } 442 443 static void 444 kern_adjtime_common(void) 445 { 446 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 447 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 448 ntp_tick_delta = ntp_delta; 449 else if (ntp_delta > ntp_big_delta) 450 ntp_tick_delta = 10 * ntp_default_tick_delta; 451 else if (ntp_delta < -ntp_big_delta) 452 ntp_tick_delta = -10 * ntp_default_tick_delta; 453 else if (ntp_delta > 0) 454 ntp_tick_delta = ntp_default_tick_delta; 455 else 456 ntp_tick_delta = -ntp_default_tick_delta; 457 } 458 459 void 460 kern_adjtime(int64_t delta, int64_t *odelta) 461 { 462 int origcpu; 463 464 if ((origcpu = mycpu->gd_cpuid) != 0) 465 lwkt_setcpu_self(globaldata_find(0)); 466 467 crit_enter(); 468 *odelta = ntp_delta; 469 ntp_delta = delta; 470 kern_adjtime_common(); 471 crit_exit(); 472 473 if (origcpu != 0) 474 lwkt_setcpu_self(globaldata_find(origcpu)); 475 } 476 477 static void 478 kern_get_ntp_delta(int64_t *delta) 479 { 480 int origcpu; 481 482 if ((origcpu = mycpu->gd_cpuid) != 0) 483 lwkt_setcpu_self(globaldata_find(0)); 484 485 crit_enter(); 486 *delta = ntp_delta; 487 crit_exit(); 488 489 if (origcpu != 0) 490 lwkt_setcpu_self(globaldata_find(origcpu)); 491 } 492 493 void 494 kern_reladjtime(int64_t delta) 495 { 496 int origcpu; 497 498 if ((origcpu = mycpu->gd_cpuid) != 0) 499 lwkt_setcpu_self(globaldata_find(0)); 500 501 crit_enter(); 502 ntp_delta += delta; 503 kern_adjtime_common(); 504 crit_exit(); 505 506 if (origcpu != 0) 507 lwkt_setcpu_self(globaldata_find(origcpu)); 508 } 509 510 static void 511 kern_adjfreq(int64_t rate) 512 { 513 int origcpu; 514 515 if ((origcpu = mycpu->gd_cpuid) != 0) 516 lwkt_setcpu_self(globaldata_find(0)); 517 518 crit_enter(); 519 ntp_tick_permanent = rate; 520 crit_exit(); 521 522 if (origcpu != 0) 523 lwkt_setcpu_self(globaldata_find(origcpu)); 524 } 525 526 /* 527 * MPALMOSTSAFE 528 */ 529 int 530 sys_adjtime(struct adjtime_args *uap) 531 { 532 struct thread *td = curthread; 533 struct timeval atv; 534 int64_t ndelta, odelta; 535 int error; 536 537 if ((error = priv_check(td, PRIV_ADJTIME))) 538 return (error); 539 error = copyin(uap->delta, &atv, sizeof(struct timeval)); 540 if (error) 541 return (error); 542 543 /* 544 * Compute the total correction and the rate at which to apply it. 545 * Round the adjustment down to a whole multiple of the per-tick 546 * delta, so that after some number of incremental changes in 547 * hardclock(), tickdelta will become zero, lest the correction 548 * overshoot and start taking us away from the desired final time. 549 */ 550 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 551 get_mplock(); 552 kern_adjtime(ndelta, &odelta); 553 rel_mplock(); 554 555 if (uap->olddelta) { 556 atv.tv_sec = odelta / 1000000000; 557 atv.tv_usec = odelta % 1000000000 / 1000; 558 copyout(&atv, uap->olddelta, sizeof(struct timeval)); 559 } 560 return (0); 561 } 562 563 static int 564 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 565 { 566 int64_t delta; 567 int error; 568 569 if (req->newptr != NULL) { 570 if (priv_check(curthread, PRIV_ROOT)) 571 return (EPERM); 572 error = SYSCTL_IN(req, &delta, sizeof(delta)); 573 if (error) 574 return (error); 575 kern_reladjtime(delta); 576 } 577 578 if (req->oldptr) 579 kern_get_ntp_delta(&delta); 580 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 581 return (error); 582 } 583 584 /* 585 * delta is in nanoseconds. 586 */ 587 static int 588 sysctl_delta(SYSCTL_HANDLER_ARGS) 589 { 590 int64_t delta, old_delta; 591 int error; 592 593 if (req->newptr != NULL) { 594 if (priv_check(curthread, PRIV_ROOT)) 595 return (EPERM); 596 error = SYSCTL_IN(req, &delta, sizeof(delta)); 597 if (error) 598 return (error); 599 kern_adjtime(delta, &old_delta); 600 } 601 602 if (req->oldptr != NULL) 603 kern_get_ntp_delta(&old_delta); 604 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 605 return (error); 606 } 607 608 /* 609 * frequency is in nanoseconds per second shifted left 32. 610 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 611 */ 612 static int 613 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 614 { 615 int64_t freqdelta; 616 int error; 617 618 if (req->newptr != NULL) { 619 if (priv_check(curthread, PRIV_ROOT)) 620 return (EPERM); 621 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 622 if (error) 623 return (error); 624 625 freqdelta /= hz; 626 kern_adjfreq(freqdelta); 627 } 628 629 if (req->oldptr != NULL) 630 freqdelta = ntp_tick_permanent * hz; 631 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 632 if (error) 633 return (error); 634 635 return (0); 636 } 637 638 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 639 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 640 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 641 sysctl_adjfreq, "Q", "permanent correction per second"); 642 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 643 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 644 sysctl_delta, "Q", "one-time delta"); 645 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 646 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 647 "threshold for fast adjustment"); 648 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 649 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 650 "per-tick adjustment"); 651 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 652 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 653 "default per-tick adjustment"); 654 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 655 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 656 "next leap second"); 657 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 658 &ntp_leap_insert, 0, "insert or remove leap second"); 659 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 660 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 661 sysctl_adjtime, "Q", "relative adjust for delta"); 662 663 /* 664 * Get value of an interval timer. The process virtual and 665 * profiling virtual time timers are kept in the p_stats area, since 666 * they can be swapped out. These are kept internally in the 667 * way they are specified externally: in time until they expire. 668 * 669 * The real time interval timer is kept in the process table slot 670 * for the process, and its value (it_value) is kept as an 671 * absolute time rather than as a delta, so that it is easy to keep 672 * periodic real-time signals from drifting. 673 * 674 * Virtual time timers are processed in the hardclock() routine of 675 * kern_clock.c. The real time timer is processed by a timeout 676 * routine, called from the softclock() routine. Since a callout 677 * may be delayed in real time due to interrupt processing in the system, 678 * it is possible for the real time timeout routine (realitexpire, given below), 679 * to be delayed in real time past when it is supposed to occur. It 680 * does not suffice, therefore, to reload the real timer .it_value from the 681 * real time timers .it_interval. Rather, we compute the next time in 682 * absolute time the timer should go off. 683 * 684 * MPALMOSTSAFE 685 */ 686 int 687 sys_getitimer(struct getitimer_args *uap) 688 { 689 struct proc *p = curproc; 690 struct timeval ctv; 691 struct itimerval aitv; 692 693 if (uap->which > ITIMER_PROF) 694 return (EINVAL); 695 get_mplock(); 696 crit_enter(); 697 if (uap->which == ITIMER_REAL) { 698 /* 699 * Convert from absolute to relative time in .it_value 700 * part of real time timer. If time for real time timer 701 * has passed return 0, else return difference between 702 * current time and time for the timer to go off. 703 */ 704 aitv = p->p_realtimer; 705 if (timevalisset(&aitv.it_value)) { 706 getmicrouptime(&ctv); 707 if (timevalcmp(&aitv.it_value, &ctv, <)) 708 timevalclear(&aitv.it_value); 709 else 710 timevalsub(&aitv.it_value, &ctv); 711 } 712 } else { 713 aitv = p->p_timer[uap->which]; 714 } 715 crit_exit(); 716 rel_mplock(); 717 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 718 } 719 720 /* 721 * MPALMOSTSAFE 722 */ 723 int 724 sys_setitimer(struct setitimer_args *uap) 725 { 726 struct itimerval aitv; 727 struct timeval ctv; 728 struct itimerval *itvp; 729 struct proc *p = curproc; 730 int error; 731 732 if (uap->which > ITIMER_PROF) 733 return (EINVAL); 734 itvp = uap->itv; 735 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 736 sizeof(struct itimerval)))) 737 return (error); 738 if ((uap->itv = uap->oitv) && 739 (error = sys_getitimer((struct getitimer_args *)uap))) 740 return (error); 741 if (itvp == 0) 742 return (0); 743 if (itimerfix(&aitv.it_value)) 744 return (EINVAL); 745 if (!timevalisset(&aitv.it_value)) 746 timevalclear(&aitv.it_interval); 747 else if (itimerfix(&aitv.it_interval)) 748 return (EINVAL); 749 get_mplock(); 750 crit_enter(); 751 if (uap->which == ITIMER_REAL) { 752 if (timevalisset(&p->p_realtimer.it_value)) 753 callout_stop(&p->p_ithandle); 754 if (timevalisset(&aitv.it_value)) 755 callout_reset(&p->p_ithandle, 756 tvtohz_high(&aitv.it_value), realitexpire, p); 757 getmicrouptime(&ctv); 758 timevaladd(&aitv.it_value, &ctv); 759 p->p_realtimer = aitv; 760 } else { 761 p->p_timer[uap->which] = aitv; 762 } 763 crit_exit(); 764 rel_mplock(); 765 return (0); 766 } 767 768 /* 769 * Real interval timer expired: 770 * send process whose timer expired an alarm signal. 771 * If time is not set up to reload, then just return. 772 * Else compute next time timer should go off which is > current time. 773 * This is where delay in processing this timeout causes multiple 774 * SIGALRM calls to be compressed into one. 775 * tvtohz_high() always adds 1 to allow for the time until the next clock 776 * interrupt being strictly less than 1 clock tick, but we don't want 777 * that here since we want to appear to be in sync with the clock 778 * interrupt even when we're delayed. 779 */ 780 void 781 realitexpire(void *arg) 782 { 783 struct proc *p; 784 struct timeval ctv, ntv; 785 786 p = (struct proc *)arg; 787 ksignal(p, SIGALRM); 788 if (!timevalisset(&p->p_realtimer.it_interval)) { 789 timevalclear(&p->p_realtimer.it_value); 790 return; 791 } 792 for (;;) { 793 crit_enter(); 794 timevaladd(&p->p_realtimer.it_value, 795 &p->p_realtimer.it_interval); 796 getmicrouptime(&ctv); 797 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 798 ntv = p->p_realtimer.it_value; 799 timevalsub(&ntv, &ctv); 800 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 801 realitexpire, p); 802 crit_exit(); 803 return; 804 } 805 crit_exit(); 806 } 807 } 808 809 /* 810 * Check that a proposed value to load into the .it_value or 811 * .it_interval part of an interval timer is acceptable, and 812 * fix it to have at least minimal value (i.e. if it is less 813 * than the resolution of the clock, round it up.) 814 * 815 * MPSAFE 816 */ 817 int 818 itimerfix(struct timeval *tv) 819 { 820 821 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 822 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 823 return (EINVAL); 824 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < ustick) 825 tv->tv_usec = ustick; 826 return (0); 827 } 828 829 /* 830 * Decrement an interval timer by a specified number 831 * of microseconds, which must be less than a second, 832 * i.e. < 1000000. If the timer expires, then reload 833 * it. In this case, carry over (usec - old value) to 834 * reduce the value reloaded into the timer so that 835 * the timer does not drift. This routine assumes 836 * that it is called in a context where the timers 837 * on which it is operating cannot change in value. 838 */ 839 int 840 itimerdecr(struct itimerval *itp, int usec) 841 { 842 843 if (itp->it_value.tv_usec < usec) { 844 if (itp->it_value.tv_sec == 0) { 845 /* expired, and already in next interval */ 846 usec -= itp->it_value.tv_usec; 847 goto expire; 848 } 849 itp->it_value.tv_usec += 1000000; 850 itp->it_value.tv_sec--; 851 } 852 itp->it_value.tv_usec -= usec; 853 usec = 0; 854 if (timevalisset(&itp->it_value)) 855 return (1); 856 /* expired, exactly at end of interval */ 857 expire: 858 if (timevalisset(&itp->it_interval)) { 859 itp->it_value = itp->it_interval; 860 itp->it_value.tv_usec -= usec; 861 if (itp->it_value.tv_usec < 0) { 862 itp->it_value.tv_usec += 1000000; 863 itp->it_value.tv_sec--; 864 } 865 } else 866 itp->it_value.tv_usec = 0; /* sec is already 0 */ 867 return (0); 868 } 869 870 /* 871 * Add and subtract routines for timevals. 872 * N.B.: subtract routine doesn't deal with 873 * results which are before the beginning, 874 * it just gets very confused in this case. 875 * Caveat emptor. 876 */ 877 void 878 timevaladd(struct timeval *t1, const struct timeval *t2) 879 { 880 881 t1->tv_sec += t2->tv_sec; 882 t1->tv_usec += t2->tv_usec; 883 timevalfix(t1); 884 } 885 886 void 887 timevalsub(struct timeval *t1, const struct timeval *t2) 888 { 889 890 t1->tv_sec -= t2->tv_sec; 891 t1->tv_usec -= t2->tv_usec; 892 timevalfix(t1); 893 } 894 895 static void 896 timevalfix(struct timeval *t1) 897 { 898 899 if (t1->tv_usec < 0) { 900 t1->tv_sec--; 901 t1->tv_usec += 1000000; 902 } 903 if (t1->tv_usec >= 1000000) { 904 t1->tv_sec++; 905 t1->tv_usec -= 1000000; 906 } 907 } 908 909 /* 910 * ratecheck(): simple time-based rate-limit checking. 911 */ 912 int 913 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 914 { 915 struct timeval tv, delta; 916 int rv = 0; 917 918 getmicrouptime(&tv); /* NB: 10ms precision */ 919 delta = tv; 920 timevalsub(&delta, lasttime); 921 922 /* 923 * check for 0,0 is so that the message will be seen at least once, 924 * even if interval is huge. 925 */ 926 if (timevalcmp(&delta, mininterval, >=) || 927 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 928 *lasttime = tv; 929 rv = 1; 930 } 931 932 return (rv); 933 } 934 935 /* 936 * ppsratecheck(): packets (or events) per second limitation. 937 * 938 * Return 0 if the limit is to be enforced (e.g. the caller 939 * should drop a packet because of the rate limitation). 940 * 941 * maxpps of 0 always causes zero to be returned. maxpps of -1 942 * always causes 1 to be returned; this effectively defeats rate 943 * limiting. 944 * 945 * Note that we maintain the struct timeval for compatibility 946 * with other bsd systems. We reuse the storage and just monitor 947 * clock ticks for minimal overhead. 948 */ 949 int 950 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 951 { 952 int now; 953 954 /* 955 * Reset the last time and counter if this is the first call 956 * or more than a second has passed since the last update of 957 * lasttime. 958 */ 959 now = ticks; 960 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 961 lasttime->tv_sec = now; 962 *curpps = 1; 963 return (maxpps != 0); 964 } else { 965 (*curpps)++; /* NB: ignore potential overflow */ 966 return (maxpps < 0 || *curpps < maxpps); 967 } 968 } 969 970