1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/buf.h> 40 #include <sys/sysproto.h> 41 #include <sys/resourcevar.h> 42 #include <sys/signalvar.h> 43 #include <sys/kernel.h> 44 #include <sys/sysent.h> 45 #include <sys/sysunion.h> 46 #include <sys/proc.h> 47 #include <sys/priv.h> 48 #include <sys/time.h> 49 #include <sys/vnode.h> 50 #include <sys/sysctl.h> 51 #include <sys/kern_syscall.h> 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 55 #include <sys/msgport2.h> 56 #include <sys/thread2.h> 57 #include <sys/mplock2.h> 58 59 struct timezone tz; 60 61 /* 62 * Time of day and interval timer support. 63 * 64 * These routines provide the kernel entry points to get and set 65 * the time-of-day and per-process interval timers. Subroutines 66 * here provide support for adding and subtracting timeval structures 67 * and decrementing interval timers, optionally reloading the interval 68 * timers when they expire. 69 */ 70 71 int nanosleep1(struct timespec *rqt, struct timespec *rmt); 72 static int settime(struct timeval *); 73 static void timevalfix(struct timeval *); 74 75 static int sleep_hard_us = 100; 76 SYSCTL_INT(_kern, OID_AUTO, sleep_hard_us, CTLFLAG_RW, &sleep_hard_us, 0, "") 77 78 static int 79 settime(struct timeval *tv) 80 { 81 struct timeval delta, tv1, tv2; 82 static struct timeval maxtime, laststep; 83 struct timespec ts; 84 int origcpu; 85 86 if ((origcpu = mycpu->gd_cpuid) != 0) 87 lwkt_setcpu_self(globaldata_find(0)); 88 89 crit_enter(); 90 microtime(&tv1); 91 delta = *tv; 92 timevalsub(&delta, &tv1); 93 94 /* 95 * If the system is secure, we do not allow the time to be 96 * set to a value earlier than 1 second less than the highest 97 * time we have yet seen. The worst a miscreant can do in 98 * this circumstance is "freeze" time. He couldn't go 99 * back to the past. 100 * 101 * We similarly do not allow the clock to be stepped more 102 * than one second, nor more than once per second. This allows 103 * a miscreant to make the clock march double-time, but no worse. 104 */ 105 if (securelevel > 1) { 106 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 107 /* 108 * Update maxtime to latest time we've seen. 109 */ 110 if (tv1.tv_sec > maxtime.tv_sec) 111 maxtime = tv1; 112 tv2 = *tv; 113 timevalsub(&tv2, &maxtime); 114 if (tv2.tv_sec < -1) { 115 tv->tv_sec = maxtime.tv_sec - 1; 116 kprintf("Time adjustment clamped to -1 second\n"); 117 } 118 } else { 119 if (tv1.tv_sec == laststep.tv_sec) { 120 crit_exit(); 121 return (EPERM); 122 } 123 if (delta.tv_sec > 1) { 124 tv->tv_sec = tv1.tv_sec + 1; 125 kprintf("Time adjustment clamped to +1 second\n"); 126 } 127 laststep = *tv; 128 } 129 } 130 131 ts.tv_sec = tv->tv_sec; 132 ts.tv_nsec = tv->tv_usec * 1000; 133 set_timeofday(&ts); 134 crit_exit(); 135 136 if (origcpu != 0) 137 lwkt_setcpu_self(globaldata_find(origcpu)); 138 139 resettodr(); 140 return (0); 141 } 142 143 /* 144 * MPSAFE 145 */ 146 int 147 kern_clock_gettime(clockid_t clock_id, struct timespec *ats) 148 { 149 int error = 0; 150 151 switch(clock_id) { 152 case CLOCK_REALTIME: 153 nanotime(ats); 154 break; 155 case CLOCK_MONOTONIC: 156 nanouptime(ats); 157 break; 158 default: 159 error = EINVAL; 160 break; 161 } 162 return (error); 163 } 164 165 /* 166 * MPSAFE 167 */ 168 int 169 sys_clock_gettime(struct clock_gettime_args *uap) 170 { 171 struct timespec ats; 172 int error; 173 174 error = kern_clock_gettime(uap->clock_id, &ats); 175 if (error == 0) 176 error = copyout(&ats, uap->tp, sizeof(ats)); 177 178 return (error); 179 } 180 181 int 182 kern_clock_settime(clockid_t clock_id, struct timespec *ats) 183 { 184 struct thread *td = curthread; 185 struct timeval atv; 186 int error; 187 188 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 189 return (error); 190 if (clock_id != CLOCK_REALTIME) 191 return (EINVAL); 192 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 193 return (EINVAL); 194 195 TIMESPEC_TO_TIMEVAL(&atv, ats); 196 error = settime(&atv); 197 return (error); 198 } 199 200 /* 201 * MPALMOSTSAFE 202 */ 203 int 204 sys_clock_settime(struct clock_settime_args *uap) 205 { 206 struct timespec ats; 207 int error; 208 209 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 210 return (error); 211 212 get_mplock(); 213 error = kern_clock_settime(uap->clock_id, &ats); 214 rel_mplock(); 215 return (error); 216 } 217 218 /* 219 * MPSAFE 220 */ 221 int 222 kern_clock_getres(clockid_t clock_id, struct timespec *ts) 223 { 224 int error; 225 226 switch(clock_id) { 227 case CLOCK_REALTIME: 228 case CLOCK_MONOTONIC: 229 /* 230 * Round up the result of the division cheaply 231 * by adding 1. Rounding up is especially important 232 * if rounding down would give 0. Perfect rounding 233 * is unimportant. 234 */ 235 ts->tv_sec = 0; 236 ts->tv_nsec = 1000000000 / sys_cputimer->freq + 1; 237 error = 0; 238 break; 239 default: 240 error = EINVAL; 241 break; 242 } 243 244 return(error); 245 } 246 247 /* 248 * MPSAFE 249 */ 250 int 251 sys_clock_getres(struct clock_getres_args *uap) 252 { 253 int error; 254 struct timespec ts; 255 256 error = kern_clock_getres(uap->clock_id, &ts); 257 if (error == 0) 258 error = copyout(&ts, uap->tp, sizeof(ts)); 259 260 return (error); 261 } 262 263 /* 264 * nanosleep1() 265 * 266 * This is a general helper function for nanosleep() (aka sleep() aka 267 * usleep()). 268 * 269 * If there is less then one tick's worth of time left and 270 * we haven't done a yield, or the remaining microseconds is 271 * ridiculously low, do a yield. This avoids having 272 * to deal with systimer overheads when the system is under 273 * heavy loads. If we have done a yield already then use 274 * a systimer and an uninterruptable thread wait. 275 * 276 * If there is more then a tick's worth of time left, 277 * calculate the baseline ticks and use an interruptable 278 * tsleep, then handle the fine-grained delay on the next 279 * loop. This usually results in two sleeps occuring, a long one 280 * and a short one. 281 * 282 * MPSAFE 283 */ 284 static void 285 ns1_systimer(systimer_t info, int in_ipi __unused, 286 struct intrframe *frame __unused) 287 { 288 lwkt_schedule(info->data); 289 } 290 291 int 292 nanosleep1(struct timespec *rqt, struct timespec *rmt) 293 { 294 static int nanowait; 295 struct timespec ts, ts2, ts3; 296 struct timeval tv; 297 int error; 298 299 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 300 return (EINVAL); 301 /* XXX: imho this should return EINVAL at least for tv_sec < 0 */ 302 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 303 return (0); 304 nanouptime(&ts); 305 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 306 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 307 308 for (;;) { 309 int ticks; 310 struct systimer info; 311 312 ticks = tv.tv_usec / ustick; /* approximate */ 313 314 if (tv.tv_sec == 0 && ticks == 0) { 315 thread_t td = curthread; 316 if (tv.tv_usec < sleep_hard_us) { 317 lwkt_user_yield(); 318 } else { 319 crit_enter_quick(td); 320 systimer_init_oneshot(&info, ns1_systimer, 321 td, tv.tv_usec); 322 lwkt_deschedule_self(td); 323 crit_exit_quick(td); 324 lwkt_switch(); 325 systimer_del(&info); /* make sure it's gone */ 326 } 327 error = iscaught(td->td_lwp); 328 } else if (tv.tv_sec == 0) { 329 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 330 } else { 331 ticks = tvtohz_low(&tv); /* also handles overflow */ 332 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 333 } 334 nanouptime(&ts2); 335 if (error && error != EWOULDBLOCK) { 336 if (error == ERESTART) 337 error = EINTR; 338 if (rmt != NULL) { 339 timespecsub(&ts, &ts2); 340 if (ts.tv_sec < 0) 341 timespecclear(&ts); 342 *rmt = ts; 343 } 344 return (error); 345 } 346 if (timespeccmp(&ts2, &ts, >=)) 347 return (0); 348 ts3 = ts; 349 timespecsub(&ts3, &ts2); 350 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 351 } 352 } 353 354 /* 355 * MPSAFE 356 */ 357 int 358 sys_nanosleep(struct nanosleep_args *uap) 359 { 360 int error; 361 struct timespec rqt; 362 struct timespec rmt; 363 364 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 365 if (error) 366 return (error); 367 368 error = nanosleep1(&rqt, &rmt); 369 370 /* 371 * copyout the residual if nanosleep was interrupted. 372 */ 373 if (error && uap->rmtp) { 374 int error2; 375 376 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 377 if (error2) 378 error = error2; 379 } 380 return (error); 381 } 382 383 /* 384 * MPSAFE 385 */ 386 int 387 sys_gettimeofday(struct gettimeofday_args *uap) 388 { 389 struct timeval atv; 390 int error = 0; 391 392 if (uap->tp) { 393 microtime(&atv); 394 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 395 sizeof (atv)))) 396 return (error); 397 } 398 if (uap->tzp) 399 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 400 sizeof (tz)); 401 return (error); 402 } 403 404 /* 405 * MPALMOSTSAFE 406 */ 407 int 408 sys_settimeofday(struct settimeofday_args *uap) 409 { 410 struct thread *td = curthread; 411 struct timeval atv; 412 struct timezone atz; 413 int error; 414 415 if ((error = priv_check(td, PRIV_SETTIMEOFDAY))) 416 return (error); 417 /* Verify all parameters before changing time. */ 418 if (uap->tv) { 419 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 420 sizeof(atv)))) 421 return (error); 422 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 423 return (EINVAL); 424 } 425 if (uap->tzp && 426 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 427 return (error); 428 429 get_mplock(); 430 if (uap->tv && (error = settime(&atv))) { 431 rel_mplock(); 432 return (error); 433 } 434 rel_mplock(); 435 if (uap->tzp) 436 tz = atz; 437 return (0); 438 } 439 440 static void 441 kern_adjtime_common(void) 442 { 443 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 444 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 445 ntp_tick_delta = ntp_delta; 446 else if (ntp_delta > ntp_big_delta) 447 ntp_tick_delta = 10 * ntp_default_tick_delta; 448 else if (ntp_delta < -ntp_big_delta) 449 ntp_tick_delta = -10 * ntp_default_tick_delta; 450 else if (ntp_delta > 0) 451 ntp_tick_delta = ntp_default_tick_delta; 452 else 453 ntp_tick_delta = -ntp_default_tick_delta; 454 } 455 456 void 457 kern_adjtime(int64_t delta, int64_t *odelta) 458 { 459 int origcpu; 460 461 if ((origcpu = mycpu->gd_cpuid) != 0) 462 lwkt_setcpu_self(globaldata_find(0)); 463 464 crit_enter(); 465 *odelta = ntp_delta; 466 ntp_delta = delta; 467 kern_adjtime_common(); 468 crit_exit(); 469 470 if (origcpu != 0) 471 lwkt_setcpu_self(globaldata_find(origcpu)); 472 } 473 474 static void 475 kern_get_ntp_delta(int64_t *delta) 476 { 477 int origcpu; 478 479 if ((origcpu = mycpu->gd_cpuid) != 0) 480 lwkt_setcpu_self(globaldata_find(0)); 481 482 crit_enter(); 483 *delta = ntp_delta; 484 crit_exit(); 485 486 if (origcpu != 0) 487 lwkt_setcpu_self(globaldata_find(origcpu)); 488 } 489 490 void 491 kern_reladjtime(int64_t delta) 492 { 493 int origcpu; 494 495 if ((origcpu = mycpu->gd_cpuid) != 0) 496 lwkt_setcpu_self(globaldata_find(0)); 497 498 crit_enter(); 499 ntp_delta += delta; 500 kern_adjtime_common(); 501 crit_exit(); 502 503 if (origcpu != 0) 504 lwkt_setcpu_self(globaldata_find(origcpu)); 505 } 506 507 static void 508 kern_adjfreq(int64_t rate) 509 { 510 int origcpu; 511 512 if ((origcpu = mycpu->gd_cpuid) != 0) 513 lwkt_setcpu_self(globaldata_find(0)); 514 515 crit_enter(); 516 ntp_tick_permanent = rate; 517 crit_exit(); 518 519 if (origcpu != 0) 520 lwkt_setcpu_self(globaldata_find(origcpu)); 521 } 522 523 /* 524 * MPALMOSTSAFE 525 */ 526 int 527 sys_adjtime(struct adjtime_args *uap) 528 { 529 struct thread *td = curthread; 530 struct timeval atv; 531 int64_t ndelta, odelta; 532 int error; 533 534 if ((error = priv_check(td, PRIV_ADJTIME))) 535 return (error); 536 error = copyin(uap->delta, &atv, sizeof(struct timeval)); 537 if (error) 538 return (error); 539 540 /* 541 * Compute the total correction and the rate at which to apply it. 542 * Round the adjustment down to a whole multiple of the per-tick 543 * delta, so that after some number of incremental changes in 544 * hardclock(), tickdelta will become zero, lest the correction 545 * overshoot and start taking us away from the desired final time. 546 */ 547 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 548 get_mplock(); 549 kern_adjtime(ndelta, &odelta); 550 rel_mplock(); 551 552 if (uap->olddelta) { 553 atv.tv_sec = odelta / 1000000000; 554 atv.tv_usec = odelta % 1000000000 / 1000; 555 copyout(&atv, uap->olddelta, sizeof(struct timeval)); 556 } 557 return (0); 558 } 559 560 static int 561 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 562 { 563 int64_t delta; 564 int error; 565 566 if (req->newptr != NULL) { 567 if (priv_check(curthread, PRIV_ROOT)) 568 return (EPERM); 569 error = SYSCTL_IN(req, &delta, sizeof(delta)); 570 if (error) 571 return (error); 572 kern_reladjtime(delta); 573 } 574 575 if (req->oldptr) 576 kern_get_ntp_delta(&delta); 577 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 578 return (error); 579 } 580 581 /* 582 * delta is in nanoseconds. 583 */ 584 static int 585 sysctl_delta(SYSCTL_HANDLER_ARGS) 586 { 587 int64_t delta, old_delta; 588 int error; 589 590 if (req->newptr != NULL) { 591 if (priv_check(curthread, PRIV_ROOT)) 592 return (EPERM); 593 error = SYSCTL_IN(req, &delta, sizeof(delta)); 594 if (error) 595 return (error); 596 kern_adjtime(delta, &old_delta); 597 } 598 599 if (req->oldptr != NULL) 600 kern_get_ntp_delta(&old_delta); 601 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 602 return (error); 603 } 604 605 /* 606 * frequency is in nanoseconds per second shifted left 32. 607 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 608 */ 609 static int 610 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 611 { 612 int64_t freqdelta; 613 int error; 614 615 if (req->newptr != NULL) { 616 if (priv_check(curthread, PRIV_ROOT)) 617 return (EPERM); 618 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 619 if (error) 620 return (error); 621 622 freqdelta /= hz; 623 kern_adjfreq(freqdelta); 624 } 625 626 if (req->oldptr != NULL) 627 freqdelta = ntp_tick_permanent * hz; 628 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 629 if (error) 630 return (error); 631 632 return (0); 633 } 634 635 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 636 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 637 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 638 sysctl_adjfreq, "Q", "permanent correction per second"); 639 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 640 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 641 sysctl_delta, "Q", "one-time delta"); 642 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 643 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 644 "threshold for fast adjustment"); 645 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 646 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 647 "per-tick adjustment"); 648 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 649 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 650 "default per-tick adjustment"); 651 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 652 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 653 "next leap second"); 654 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 655 &ntp_leap_insert, 0, "insert or remove leap second"); 656 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 657 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 658 sysctl_adjtime, "Q", "relative adjust for delta"); 659 660 /* 661 * Get value of an interval timer. The process virtual and 662 * profiling virtual time timers are kept in the p_stats area, since 663 * they can be swapped out. These are kept internally in the 664 * way they are specified externally: in time until they expire. 665 * 666 * The real time interval timer is kept in the process table slot 667 * for the process, and its value (it_value) is kept as an 668 * absolute time rather than as a delta, so that it is easy to keep 669 * periodic real-time signals from drifting. 670 * 671 * Virtual time timers are processed in the hardclock() routine of 672 * kern_clock.c. The real time timer is processed by a timeout 673 * routine, called from the softclock() routine. Since a callout 674 * may be delayed in real time due to interrupt processing in the system, 675 * it is possible for the real time timeout routine (realitexpire, given below), 676 * to be delayed in real time past when it is supposed to occur. It 677 * does not suffice, therefore, to reload the real timer .it_value from the 678 * real time timers .it_interval. Rather, we compute the next time in 679 * absolute time the timer should go off. 680 * 681 * MPALMOSTSAFE 682 */ 683 int 684 sys_getitimer(struct getitimer_args *uap) 685 { 686 struct proc *p = curproc; 687 struct timeval ctv; 688 struct itimerval aitv; 689 690 if (uap->which > ITIMER_PROF) 691 return (EINVAL); 692 lwkt_gettoken(&p->p_token); 693 if (uap->which == ITIMER_REAL) { 694 /* 695 * Convert from absolute to relative time in .it_value 696 * part of real time timer. If time for real time timer 697 * has passed return 0, else return difference between 698 * current time and time for the timer to go off. 699 */ 700 aitv = p->p_realtimer; 701 if (timevalisset(&aitv.it_value)) { 702 getmicrouptime(&ctv); 703 if (timevalcmp(&aitv.it_value, &ctv, <)) 704 timevalclear(&aitv.it_value); 705 else 706 timevalsub(&aitv.it_value, &ctv); 707 } 708 } else { 709 aitv = p->p_timer[uap->which]; 710 } 711 lwkt_reltoken(&p->p_token); 712 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 713 } 714 715 /* 716 * MPALMOSTSAFE 717 */ 718 int 719 sys_setitimer(struct setitimer_args *uap) 720 { 721 struct itimerval aitv; 722 struct timeval ctv; 723 struct itimerval *itvp; 724 struct proc *p = curproc; 725 int error; 726 727 if (uap->which > ITIMER_PROF) 728 return (EINVAL); 729 itvp = uap->itv; 730 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 731 sizeof(struct itimerval)))) 732 return (error); 733 if ((uap->itv = uap->oitv) && 734 (error = sys_getitimer((struct getitimer_args *)uap))) 735 return (error); 736 if (itvp == 0) 737 return (0); 738 if (itimerfix(&aitv.it_value)) 739 return (EINVAL); 740 if (!timevalisset(&aitv.it_value)) 741 timevalclear(&aitv.it_interval); 742 else if (itimerfix(&aitv.it_interval)) 743 return (EINVAL); 744 lwkt_gettoken(&p->p_token); 745 if (uap->which == ITIMER_REAL) { 746 if (timevalisset(&p->p_realtimer.it_value)) 747 callout_stop(&p->p_ithandle); 748 if (timevalisset(&aitv.it_value)) 749 callout_reset(&p->p_ithandle, 750 tvtohz_high(&aitv.it_value), realitexpire, p); 751 getmicrouptime(&ctv); 752 timevaladd(&aitv.it_value, &ctv); 753 p->p_realtimer = aitv; 754 } else { 755 p->p_timer[uap->which] = aitv; 756 } 757 lwkt_reltoken(&p->p_token); 758 return (0); 759 } 760 761 /* 762 * Real interval timer expired: 763 * send process whose timer expired an alarm signal. 764 * If time is not set up to reload, then just return. 765 * Else compute next time timer should go off which is > current time. 766 * This is where delay in processing this timeout causes multiple 767 * SIGALRM calls to be compressed into one. 768 * tvtohz_high() always adds 1 to allow for the time until the next clock 769 * interrupt being strictly less than 1 clock tick, but we don't want 770 * that here since we want to appear to be in sync with the clock 771 * interrupt even when we're delayed. 772 */ 773 void 774 realitexpire(void *arg) 775 { 776 struct proc *p; 777 struct timeval ctv, ntv; 778 779 p = (struct proc *)arg; 780 lwkt_gettoken(&p->p_token); 781 ksignal(p, SIGALRM); 782 if (!timevalisset(&p->p_realtimer.it_interval)) { 783 timevalclear(&p->p_realtimer.it_value); 784 lwkt_reltoken(&p->p_token); 785 return; 786 } 787 for (;;) { 788 timevaladd(&p->p_realtimer.it_value, 789 &p->p_realtimer.it_interval); 790 getmicrouptime(&ctv); 791 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 792 ntv = p->p_realtimer.it_value; 793 timevalsub(&ntv, &ctv); 794 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 795 realitexpire, p); 796 lwkt_reltoken(&p->p_token); 797 return; 798 } 799 } 800 lwkt_reltoken(&p->p_token); 801 } 802 803 /* 804 * Check that a proposed value to load into the .it_value or 805 * .it_interval part of an interval timer is acceptable, and 806 * fix it to have at least minimal value (i.e. if it is less 807 * than the resolution of the clock, round it up.) 808 * 809 * MPSAFE 810 */ 811 int 812 itimerfix(struct timeval *tv) 813 { 814 815 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 816 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 817 return (EINVAL); 818 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < ustick) 819 tv->tv_usec = ustick; 820 return (0); 821 } 822 823 /* 824 * Decrement an interval timer by a specified number 825 * of microseconds, which must be less than a second, 826 * i.e. < 1000000. If the timer expires, then reload 827 * it. In this case, carry over (usec - old value) to 828 * reduce the value reloaded into the timer so that 829 * the timer does not drift. This routine assumes 830 * that it is called in a context where the timers 831 * on which it is operating cannot change in value. 832 */ 833 int 834 itimerdecr(struct itimerval *itp, int usec) 835 { 836 837 if (itp->it_value.tv_usec < usec) { 838 if (itp->it_value.tv_sec == 0) { 839 /* expired, and already in next interval */ 840 usec -= itp->it_value.tv_usec; 841 goto expire; 842 } 843 itp->it_value.tv_usec += 1000000; 844 itp->it_value.tv_sec--; 845 } 846 itp->it_value.tv_usec -= usec; 847 usec = 0; 848 if (timevalisset(&itp->it_value)) 849 return (1); 850 /* expired, exactly at end of interval */ 851 expire: 852 if (timevalisset(&itp->it_interval)) { 853 itp->it_value = itp->it_interval; 854 itp->it_value.tv_usec -= usec; 855 if (itp->it_value.tv_usec < 0) { 856 itp->it_value.tv_usec += 1000000; 857 itp->it_value.tv_sec--; 858 } 859 } else 860 itp->it_value.tv_usec = 0; /* sec is already 0 */ 861 return (0); 862 } 863 864 /* 865 * Add and subtract routines for timevals. 866 * N.B.: subtract routine doesn't deal with 867 * results which are before the beginning, 868 * it just gets very confused in this case. 869 * Caveat emptor. 870 */ 871 void 872 timevaladd(struct timeval *t1, const struct timeval *t2) 873 { 874 875 t1->tv_sec += t2->tv_sec; 876 t1->tv_usec += t2->tv_usec; 877 timevalfix(t1); 878 } 879 880 void 881 timevalsub(struct timeval *t1, const struct timeval *t2) 882 { 883 884 t1->tv_sec -= t2->tv_sec; 885 t1->tv_usec -= t2->tv_usec; 886 timevalfix(t1); 887 } 888 889 static void 890 timevalfix(struct timeval *t1) 891 { 892 893 if (t1->tv_usec < 0) { 894 t1->tv_sec--; 895 t1->tv_usec += 1000000; 896 } 897 if (t1->tv_usec >= 1000000) { 898 t1->tv_sec++; 899 t1->tv_usec -= 1000000; 900 } 901 } 902 903 /* 904 * ratecheck(): simple time-based rate-limit checking. 905 */ 906 int 907 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 908 { 909 struct timeval tv, delta; 910 int rv = 0; 911 912 getmicrouptime(&tv); /* NB: 10ms precision */ 913 delta = tv; 914 timevalsub(&delta, lasttime); 915 916 /* 917 * check for 0,0 is so that the message will be seen at least once, 918 * even if interval is huge. 919 */ 920 if (timevalcmp(&delta, mininterval, >=) || 921 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 922 *lasttime = tv; 923 rv = 1; 924 } 925 926 return (rv); 927 } 928 929 /* 930 * ppsratecheck(): packets (or events) per second limitation. 931 * 932 * Return 0 if the limit is to be enforced (e.g. the caller 933 * should drop a packet because of the rate limitation). 934 * 935 * maxpps of 0 always causes zero to be returned. maxpps of -1 936 * always causes 1 to be returned; this effectively defeats rate 937 * limiting. 938 * 939 * Note that we maintain the struct timeval for compatibility 940 * with other bsd systems. We reuse the storage and just monitor 941 * clock ticks for minimal overhead. 942 */ 943 int 944 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 945 { 946 int now; 947 948 /* 949 * Reset the last time and counter if this is the first call 950 * or more than a second has passed since the last update of 951 * lasttime. 952 */ 953 now = ticks; 954 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 955 lasttime->tv_sec = now; 956 *curpps = 1; 957 return (maxpps != 0); 958 } else { 959 (*curpps)++; /* NB: ignore potential overflow */ 960 return (maxpps < 0 || *curpps < maxpps); 961 } 962 } 963 964