1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 35 * $DragonFly: src/sys/kern/kern_time.c,v 1.40 2008/04/02 14:16:16 sephe Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/buf.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/sysunion.h> 48 #include <sys/proc.h> 49 #include <sys/priv.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/sysctl.h> 53 #include <sys/kern_syscall.h> 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <sys/msgport2.h> 57 #include <sys/thread2.h> 58 59 struct timezone tz; 60 61 /* 62 * Time of day and interval timer support. 63 * 64 * These routines provide the kernel entry points to get and set 65 * the time-of-day and per-process interval timers. Subroutines 66 * here provide support for adding and subtracting timeval structures 67 * and decrementing interval timers, optionally reloading the interval 68 * timers when they expire. 69 */ 70 71 static int nanosleep1(struct timespec *rqt, struct timespec *rmt); 72 static int settime(struct timeval *); 73 static void timevalfix(struct timeval *); 74 75 static int sleep_hard_us = 100; 76 SYSCTL_INT(_kern, OID_AUTO, sleep_hard_us, CTLFLAG_RW, &sleep_hard_us, 0, "") 77 78 static int 79 settime(struct timeval *tv) 80 { 81 struct timeval delta, tv1, tv2; 82 static struct timeval maxtime, laststep; 83 struct timespec ts; 84 int origcpu; 85 86 if ((origcpu = mycpu->gd_cpuid) != 0) 87 lwkt_setcpu_self(globaldata_find(0)); 88 89 crit_enter(); 90 microtime(&tv1); 91 delta = *tv; 92 timevalsub(&delta, &tv1); 93 94 /* 95 * If the system is secure, we do not allow the time to be 96 * set to a value earlier than 1 second less than the highest 97 * time we have yet seen. The worst a miscreant can do in 98 * this circumstance is "freeze" time. He couldn't go 99 * back to the past. 100 * 101 * We similarly do not allow the clock to be stepped more 102 * than one second, nor more than once per second. This allows 103 * a miscreant to make the clock march double-time, but no worse. 104 */ 105 if (securelevel > 1) { 106 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 107 /* 108 * Update maxtime to latest time we've seen. 109 */ 110 if (tv1.tv_sec > maxtime.tv_sec) 111 maxtime = tv1; 112 tv2 = *tv; 113 timevalsub(&tv2, &maxtime); 114 if (tv2.tv_sec < -1) { 115 tv->tv_sec = maxtime.tv_sec - 1; 116 kprintf("Time adjustment clamped to -1 second\n"); 117 } 118 } else { 119 if (tv1.tv_sec == laststep.tv_sec) { 120 crit_exit(); 121 return (EPERM); 122 } 123 if (delta.tv_sec > 1) { 124 tv->tv_sec = tv1.tv_sec + 1; 125 kprintf("Time adjustment clamped to +1 second\n"); 126 } 127 laststep = *tv; 128 } 129 } 130 131 ts.tv_sec = tv->tv_sec; 132 ts.tv_nsec = tv->tv_usec * 1000; 133 set_timeofday(&ts); 134 crit_exit(); 135 136 if (origcpu != 0) 137 lwkt_setcpu_self(globaldata_find(origcpu)); 138 139 resettodr(); 140 return (0); 141 } 142 143 int 144 kern_clock_gettime(clockid_t clock_id, struct timespec *ats) 145 { 146 int error = 0; 147 148 switch(clock_id) { 149 case CLOCK_REALTIME: 150 nanotime(ats); 151 break; 152 case CLOCK_MONOTONIC: 153 nanouptime(ats); 154 break; 155 default: 156 error = EINVAL; 157 break; 158 } 159 return (error); 160 } 161 162 /* ARGSUSED */ 163 int 164 sys_clock_gettime(struct clock_gettime_args *uap) 165 { 166 struct timespec ats; 167 int error; 168 169 error = kern_clock_gettime(uap->clock_id, &ats); 170 if (error == 0) 171 error = copyout(&ats, uap->tp, sizeof(ats)); 172 173 return (error); 174 } 175 176 int 177 kern_clock_settime(clockid_t clock_id, struct timespec *ats) 178 { 179 struct thread *td = curthread; 180 struct timeval atv; 181 int error; 182 183 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 184 return (error); 185 if (clock_id != CLOCK_REALTIME) 186 return (EINVAL); 187 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 188 return (EINVAL); 189 190 TIMESPEC_TO_TIMEVAL(&atv, ats); 191 error = settime(&atv); 192 return (error); 193 } 194 195 /* ARGSUSED */ 196 int 197 sys_clock_settime(struct clock_settime_args *uap) 198 { 199 struct timespec ats; 200 int error; 201 202 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 203 return (error); 204 205 return (kern_clock_settime(uap->clock_id, &ats)); 206 } 207 208 int 209 kern_clock_getres(clockid_t clock_id, struct timespec *ts) 210 { 211 int error; 212 213 switch(clock_id) { 214 case CLOCK_REALTIME: 215 case CLOCK_MONOTONIC: 216 /* 217 * Round up the result of the division cheaply 218 * by adding 1. Rounding up is especially important 219 * if rounding down would give 0. Perfect rounding 220 * is unimportant. 221 */ 222 ts->tv_sec = 0; 223 ts->tv_nsec = 1000000000 / sys_cputimer->freq + 1; 224 error = 0; 225 break; 226 default: 227 error = EINVAL; 228 break; 229 } 230 231 return(error); 232 } 233 234 int 235 sys_clock_getres(struct clock_getres_args *uap) 236 { 237 int error; 238 struct timespec ts; 239 240 error = kern_clock_getres(uap->clock_id, &ts); 241 if (error == 0) 242 error = copyout(&ts, uap->tp, sizeof(ts)); 243 244 return (error); 245 } 246 247 /* 248 * nanosleep1() 249 * 250 * This is a general helper function for nanosleep() (aka sleep() aka 251 * usleep()). 252 * 253 * If there is less then one tick's worth of time left and 254 * we haven't done a yield, or the remaining microseconds is 255 * ridiculously low, do a yield. This avoids having 256 * to deal with systimer overheads when the system is under 257 * heavy loads. If we have done a yield already then use 258 * a systimer and an uninterruptable thread wait. 259 * 260 * If there is more then a tick's worth of time left, 261 * calculate the baseline ticks and use an interruptable 262 * tsleep, then handle the fine-grained delay on the next 263 * loop. This usually results in two sleeps occuring, a long one 264 * and a short one. 265 */ 266 static void 267 ns1_systimer(systimer_t info) 268 { 269 lwkt_schedule(info->data); 270 } 271 272 static int 273 nanosleep1(struct timespec *rqt, struct timespec *rmt) 274 { 275 static int nanowait; 276 struct timespec ts, ts2, ts3; 277 struct timeval tv; 278 int error; 279 int tried_yield; 280 281 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 282 return (EINVAL); 283 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 284 return (0); 285 nanouptime(&ts); 286 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 287 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 288 tried_yield = 0; 289 290 for (;;) { 291 int ticks; 292 struct systimer info; 293 294 ticks = tv.tv_usec / tick; /* approximate */ 295 296 if (tv.tv_sec == 0 && ticks == 0) { 297 thread_t td = curthread; 298 if (tried_yield || tv.tv_usec < sleep_hard_us) { 299 tried_yield = 0; 300 uio_yield(); 301 } else { 302 crit_enter_quick(td); 303 systimer_init_oneshot(&info, ns1_systimer, 304 td, tv.tv_usec); 305 lwkt_deschedule_self(td); 306 crit_exit_quick(td); 307 lwkt_switch(); 308 systimer_del(&info); /* make sure it's gone */ 309 } 310 error = iscaught(td->td_lwp); 311 } else if (tv.tv_sec == 0) { 312 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 313 } else { 314 ticks = tvtohz_low(&tv); /* also handles overflow */ 315 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 316 } 317 nanouptime(&ts2); 318 if (error && error != EWOULDBLOCK) { 319 if (error == ERESTART) 320 error = EINTR; 321 if (rmt != NULL) { 322 timespecsub(&ts, &ts2); 323 if (ts.tv_sec < 0) 324 timespecclear(&ts); 325 *rmt = ts; 326 } 327 return (error); 328 } 329 if (timespeccmp(&ts2, &ts, >=)) 330 return (0); 331 ts3 = ts; 332 timespecsub(&ts3, &ts2); 333 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 334 } 335 } 336 337 /* ARGSUSED */ 338 int 339 sys_nanosleep(struct nanosleep_args *uap) 340 { 341 int error; 342 struct timespec rqt; 343 struct timespec rmt; 344 345 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 346 if (error) 347 return (error); 348 349 error = nanosleep1(&rqt, &rmt); 350 351 /* 352 * copyout the residual if nanosleep was interrupted. 353 */ 354 if (error && uap->rmtp) { 355 int error2; 356 357 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 358 if (error2) 359 error = error2; 360 } 361 return (error); 362 } 363 364 /* ARGSUSED */ 365 int 366 sys_gettimeofday(struct gettimeofday_args *uap) 367 { 368 struct timeval atv; 369 int error = 0; 370 371 if (uap->tp) { 372 microtime(&atv); 373 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 374 sizeof (atv)))) 375 return (error); 376 } 377 if (uap->tzp) 378 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 379 sizeof (tz)); 380 return (error); 381 } 382 383 /* ARGSUSED */ 384 int 385 sys_settimeofday(struct settimeofday_args *uap) 386 { 387 struct thread *td = curthread; 388 struct timeval atv; 389 struct timezone atz; 390 int error; 391 392 if ((error = priv_check(td, PRIV_SETTIMEOFDAY))) 393 return (error); 394 /* Verify all parameters before changing time. */ 395 if (uap->tv) { 396 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 397 sizeof(atv)))) 398 return (error); 399 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 400 return (EINVAL); 401 } 402 if (uap->tzp && 403 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 404 return (error); 405 if (uap->tv && (error = settime(&atv))) 406 return (error); 407 if (uap->tzp) 408 tz = atz; 409 return (0); 410 } 411 412 static void 413 kern_adjtime_common(void) 414 { 415 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 416 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 417 ntp_tick_delta = ntp_delta; 418 else if (ntp_delta > ntp_big_delta) 419 ntp_tick_delta = 10 * ntp_default_tick_delta; 420 else if (ntp_delta < -ntp_big_delta) 421 ntp_tick_delta = -10 * ntp_default_tick_delta; 422 else if (ntp_delta > 0) 423 ntp_tick_delta = ntp_default_tick_delta; 424 else 425 ntp_tick_delta = -ntp_default_tick_delta; 426 } 427 428 void 429 kern_adjtime(int64_t delta, int64_t *odelta) 430 { 431 int origcpu; 432 433 if ((origcpu = mycpu->gd_cpuid) != 0) 434 lwkt_setcpu_self(globaldata_find(0)); 435 436 crit_enter(); 437 *odelta = ntp_delta; 438 ntp_delta = delta; 439 kern_adjtime_common(); 440 crit_exit(); 441 442 if (origcpu != 0) 443 lwkt_setcpu_self(globaldata_find(origcpu)); 444 } 445 446 static void 447 kern_get_ntp_delta(int64_t *delta) 448 { 449 int origcpu; 450 451 if ((origcpu = mycpu->gd_cpuid) != 0) 452 lwkt_setcpu_self(globaldata_find(0)); 453 454 crit_enter(); 455 *delta = ntp_delta; 456 crit_exit(); 457 458 if (origcpu != 0) 459 lwkt_setcpu_self(globaldata_find(origcpu)); 460 } 461 462 void 463 kern_reladjtime(int64_t delta) 464 { 465 int origcpu; 466 467 if ((origcpu = mycpu->gd_cpuid) != 0) 468 lwkt_setcpu_self(globaldata_find(0)); 469 470 crit_enter(); 471 ntp_delta += delta; 472 kern_adjtime_common(); 473 crit_exit(); 474 475 if (origcpu != 0) 476 lwkt_setcpu_self(globaldata_find(origcpu)); 477 } 478 479 static void 480 kern_adjfreq(int64_t rate) 481 { 482 int origcpu; 483 484 if ((origcpu = mycpu->gd_cpuid) != 0) 485 lwkt_setcpu_self(globaldata_find(0)); 486 487 crit_enter(); 488 ntp_tick_permanent = rate; 489 crit_exit(); 490 491 if (origcpu != 0) 492 lwkt_setcpu_self(globaldata_find(origcpu)); 493 } 494 495 /* ARGSUSED */ 496 int 497 sys_adjtime(struct adjtime_args *uap) 498 { 499 struct thread *td = curthread; 500 struct timeval atv; 501 int64_t ndelta, odelta; 502 int error; 503 504 if ((error = priv_check(td, PRIV_ADJTIME))) 505 return (error); 506 if ((error = 507 copyin((caddr_t)uap->delta, (caddr_t)&atv, sizeof(struct timeval)))) 508 return (error); 509 510 /* 511 * Compute the total correction and the rate at which to apply it. 512 * Round the adjustment down to a whole multiple of the per-tick 513 * delta, so that after some number of incremental changes in 514 * hardclock(), tickdelta will become zero, lest the correction 515 * overshoot and start taking us away from the desired final time. 516 */ 517 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 518 kern_adjtime(ndelta, &odelta); 519 520 if (uap->olddelta) { 521 atv.tv_sec = odelta / 1000000000; 522 atv.tv_usec = odelta % 1000000000 / 1000; 523 (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta, 524 sizeof(struct timeval)); 525 } 526 return (0); 527 } 528 529 static int 530 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 531 { 532 int64_t delta; 533 int error; 534 535 if (req->newptr != NULL) { 536 if (priv_check(curthread, PRIV_ROOT)) 537 return (EPERM); 538 error = SYSCTL_IN(req, &delta, sizeof(delta)); 539 if (error) 540 return (error); 541 kern_reladjtime(delta); 542 } 543 544 if (req->oldptr) 545 kern_get_ntp_delta(&delta); 546 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 547 return (error); 548 } 549 550 /* 551 * delta is in nanoseconds. 552 */ 553 static int 554 sysctl_delta(SYSCTL_HANDLER_ARGS) 555 { 556 int64_t delta, old_delta; 557 int error; 558 559 if (req->newptr != NULL) { 560 if (priv_check(curthread, PRIV_ROOT)) 561 return (EPERM); 562 error = SYSCTL_IN(req, &delta, sizeof(delta)); 563 if (error) 564 return (error); 565 kern_adjtime(delta, &old_delta); 566 } 567 568 if (req->oldptr != NULL) 569 kern_get_ntp_delta(&old_delta); 570 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 571 return (error); 572 } 573 574 /* 575 * frequency is in nanoseconds per second shifted left 32. 576 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 577 */ 578 static int 579 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 580 { 581 int64_t freqdelta; 582 int error; 583 584 if (req->newptr != NULL) { 585 if (priv_check(curthread, PRIV_ROOT)) 586 return (EPERM); 587 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 588 if (error) 589 return (error); 590 591 freqdelta /= hz; 592 kern_adjfreq(freqdelta); 593 } 594 595 if (req->oldptr != NULL) 596 freqdelta = ntp_tick_permanent * hz; 597 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 598 if (error) 599 return (error); 600 601 return (0); 602 } 603 604 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 605 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 606 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 607 sysctl_adjfreq, "Q", "permanent correction per second"); 608 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 609 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 610 sysctl_delta, "Q", "one-time delta"); 611 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 612 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 613 "threshold for fast adjustment"); 614 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 615 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 616 "per-tick adjustment"); 617 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 618 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 619 "default per-tick adjustment"); 620 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 621 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 622 "next leap second"); 623 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 624 &ntp_leap_insert, 0, "insert or remove leap second"); 625 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 626 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 627 sysctl_adjtime, "Q", "relative adjust for delta"); 628 629 /* 630 * Get value of an interval timer. The process virtual and 631 * profiling virtual time timers are kept in the p_stats area, since 632 * they can be swapped out. These are kept internally in the 633 * way they are specified externally: in time until they expire. 634 * 635 * The real time interval timer is kept in the process table slot 636 * for the process, and its value (it_value) is kept as an 637 * absolute time rather than as a delta, so that it is easy to keep 638 * periodic real-time signals from drifting. 639 * 640 * Virtual time timers are processed in the hardclock() routine of 641 * kern_clock.c. The real time timer is processed by a timeout 642 * routine, called from the softclock() routine. Since a callout 643 * may be delayed in real time due to interrupt processing in the system, 644 * it is possible for the real time timeout routine (realitexpire, given below), 645 * to be delayed in real time past when it is supposed to occur. It 646 * does not suffice, therefore, to reload the real timer .it_value from the 647 * real time timers .it_interval. Rather, we compute the next time in 648 * absolute time the timer should go off. 649 */ 650 /* ARGSUSED */ 651 int 652 sys_getitimer(struct getitimer_args *uap) 653 { 654 struct proc *p = curproc; 655 struct timeval ctv; 656 struct itimerval aitv; 657 658 if (uap->which > ITIMER_PROF) 659 return (EINVAL); 660 crit_enter(); 661 if (uap->which == ITIMER_REAL) { 662 /* 663 * Convert from absolute to relative time in .it_value 664 * part of real time timer. If time for real time timer 665 * has passed return 0, else return difference between 666 * current time and time for the timer to go off. 667 */ 668 aitv = p->p_realtimer; 669 if (timevalisset(&aitv.it_value)) { 670 getmicrouptime(&ctv); 671 if (timevalcmp(&aitv.it_value, &ctv, <)) 672 timevalclear(&aitv.it_value); 673 else 674 timevalsub(&aitv.it_value, &ctv); 675 } 676 } else { 677 aitv = p->p_timer[uap->which]; 678 } 679 crit_exit(); 680 return (copyout((caddr_t)&aitv, (caddr_t)uap->itv, 681 sizeof (struct itimerval))); 682 } 683 684 /* ARGSUSED */ 685 int 686 sys_setitimer(struct setitimer_args *uap) 687 { 688 struct itimerval aitv; 689 struct timeval ctv; 690 struct itimerval *itvp; 691 struct proc *p = curproc; 692 int error; 693 694 if (uap->which > ITIMER_PROF) 695 return (EINVAL); 696 itvp = uap->itv; 697 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 698 sizeof(struct itimerval)))) 699 return (error); 700 if ((uap->itv = uap->oitv) && 701 (error = sys_getitimer((struct getitimer_args *)uap))) 702 return (error); 703 if (itvp == 0) 704 return (0); 705 if (itimerfix(&aitv.it_value)) 706 return (EINVAL); 707 if (!timevalisset(&aitv.it_value)) 708 timevalclear(&aitv.it_interval); 709 else if (itimerfix(&aitv.it_interval)) 710 return (EINVAL); 711 crit_enter(); 712 if (uap->which == ITIMER_REAL) { 713 if (timevalisset(&p->p_realtimer.it_value)) 714 callout_stop(&p->p_ithandle); 715 if (timevalisset(&aitv.it_value)) 716 callout_reset(&p->p_ithandle, 717 tvtohz_high(&aitv.it_value), realitexpire, p); 718 getmicrouptime(&ctv); 719 timevaladd(&aitv.it_value, &ctv); 720 p->p_realtimer = aitv; 721 } else { 722 p->p_timer[uap->which] = aitv; 723 } 724 crit_exit(); 725 return (0); 726 } 727 728 /* 729 * Real interval timer expired: 730 * send process whose timer expired an alarm signal. 731 * If time is not set up to reload, then just return. 732 * Else compute next time timer should go off which is > current time. 733 * This is where delay in processing this timeout causes multiple 734 * SIGALRM calls to be compressed into one. 735 * tvtohz_high() always adds 1 to allow for the time until the next clock 736 * interrupt being strictly less than 1 clock tick, but we don't want 737 * that here since we want to appear to be in sync with the clock 738 * interrupt even when we're delayed. 739 */ 740 void 741 realitexpire(void *arg) 742 { 743 struct proc *p; 744 struct timeval ctv, ntv; 745 746 p = (struct proc *)arg; 747 ksignal(p, SIGALRM); 748 if (!timevalisset(&p->p_realtimer.it_interval)) { 749 timevalclear(&p->p_realtimer.it_value); 750 return; 751 } 752 for (;;) { 753 crit_enter(); 754 timevaladd(&p->p_realtimer.it_value, 755 &p->p_realtimer.it_interval); 756 getmicrouptime(&ctv); 757 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 758 ntv = p->p_realtimer.it_value; 759 timevalsub(&ntv, &ctv); 760 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 761 realitexpire, p); 762 crit_exit(); 763 return; 764 } 765 crit_exit(); 766 } 767 } 768 769 /* 770 * Check that a proposed value to load into the .it_value or 771 * .it_interval part of an interval timer is acceptable, and 772 * fix it to have at least minimal value (i.e. if it is less 773 * than the resolution of the clock, round it up.) 774 */ 775 int 776 itimerfix(struct timeval *tv) 777 { 778 779 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 780 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 781 return (EINVAL); 782 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 783 tv->tv_usec = tick; 784 return (0); 785 } 786 787 /* 788 * Decrement an interval timer by a specified number 789 * of microseconds, which must be less than a second, 790 * i.e. < 1000000. If the timer expires, then reload 791 * it. In this case, carry over (usec - old value) to 792 * reduce the value reloaded into the timer so that 793 * the timer does not drift. This routine assumes 794 * that it is called in a context where the timers 795 * on which it is operating cannot change in value. 796 */ 797 int 798 itimerdecr(struct itimerval *itp, int usec) 799 { 800 801 if (itp->it_value.tv_usec < usec) { 802 if (itp->it_value.tv_sec == 0) { 803 /* expired, and already in next interval */ 804 usec -= itp->it_value.tv_usec; 805 goto expire; 806 } 807 itp->it_value.tv_usec += 1000000; 808 itp->it_value.tv_sec--; 809 } 810 itp->it_value.tv_usec -= usec; 811 usec = 0; 812 if (timevalisset(&itp->it_value)) 813 return (1); 814 /* expired, exactly at end of interval */ 815 expire: 816 if (timevalisset(&itp->it_interval)) { 817 itp->it_value = itp->it_interval; 818 itp->it_value.tv_usec -= usec; 819 if (itp->it_value.tv_usec < 0) { 820 itp->it_value.tv_usec += 1000000; 821 itp->it_value.tv_sec--; 822 } 823 } else 824 itp->it_value.tv_usec = 0; /* sec is already 0 */ 825 return (0); 826 } 827 828 /* 829 * Add and subtract routines for timevals. 830 * N.B.: subtract routine doesn't deal with 831 * results which are before the beginning, 832 * it just gets very confused in this case. 833 * Caveat emptor. 834 */ 835 void 836 timevaladd(struct timeval *t1, const struct timeval *t2) 837 { 838 839 t1->tv_sec += t2->tv_sec; 840 t1->tv_usec += t2->tv_usec; 841 timevalfix(t1); 842 } 843 844 void 845 timevalsub(struct timeval *t1, const struct timeval *t2) 846 { 847 848 t1->tv_sec -= t2->tv_sec; 849 t1->tv_usec -= t2->tv_usec; 850 timevalfix(t1); 851 } 852 853 static void 854 timevalfix(struct timeval *t1) 855 { 856 857 if (t1->tv_usec < 0) { 858 t1->tv_sec--; 859 t1->tv_usec += 1000000; 860 } 861 if (t1->tv_usec >= 1000000) { 862 t1->tv_sec++; 863 t1->tv_usec -= 1000000; 864 } 865 } 866 867 /* 868 * ratecheck(): simple time-based rate-limit checking. 869 */ 870 int 871 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 872 { 873 struct timeval tv, delta; 874 int rv = 0; 875 876 getmicrouptime(&tv); /* NB: 10ms precision */ 877 delta = tv; 878 timevalsub(&delta, lasttime); 879 880 /* 881 * check for 0,0 is so that the message will be seen at least once, 882 * even if interval is huge. 883 */ 884 if (timevalcmp(&delta, mininterval, >=) || 885 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 886 *lasttime = tv; 887 rv = 1; 888 } 889 890 return (rv); 891 } 892 893 /* 894 * ppsratecheck(): packets (or events) per second limitation. 895 * 896 * Return 0 if the limit is to be enforced (e.g. the caller 897 * should drop a packet because of the rate limitation). 898 * 899 * maxpps of 0 always causes zero to be returned. maxpps of -1 900 * always causes 1 to be returned; this effectively defeats rate 901 * limiting. 902 * 903 * Note that we maintain the struct timeval for compatibility 904 * with other bsd systems. We reuse the storage and just monitor 905 * clock ticks for minimal overhead. 906 */ 907 int 908 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 909 { 910 int now; 911 912 /* 913 * Reset the last time and counter if this is the first call 914 * or more than a second has passed since the last update of 915 * lasttime. 916 */ 917 now = ticks; 918 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 919 lasttime->tv_sec = now; 920 *curpps = 1; 921 return (maxpps != 0); 922 } else { 923 (*curpps)++; /* NB: ignore potential overflow */ 924 return (maxpps < 0 || *curpps < maxpps); 925 } 926 } 927 928