1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 35 * $DragonFly: src/sys/kern/kern_time.c,v 1.22 2005/04/18 13:27:44 joerg Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/buf.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/sysunion.h> 48 #include <sys/proc.h> 49 #include <sys/time.h> 50 #include <sys/vnode.h> 51 #include <sys/sysctl.h> 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 #include <sys/msgport2.h> 55 #include <sys/thread2.h> 56 57 struct timezone tz; 58 59 /* 60 * Time of day and interval timer support. 61 * 62 * These routines provide the kernel entry points to get and set 63 * the time-of-day and per-process interval timers. Subroutines 64 * here provide support for adding and subtracting timeval structures 65 * and decrementing interval timers, optionally reloading the interval 66 * timers when they expire. 67 */ 68 69 static int nanosleep1 (struct timespec *rqt, 70 struct timespec *rmt); 71 static int settime (struct timeval *); 72 static void timevalfix (struct timeval *); 73 static void no_lease_updatetime (int); 74 75 static int sleep_hard_us = 100; 76 SYSCTL_INT(_kern, OID_AUTO, sleep_hard_us, CTLFLAG_RW, &sleep_hard_us, 0, "") 77 78 static void 79 no_lease_updatetime(deltat) 80 int deltat; 81 { 82 } 83 84 void (*lease_updatetime) (int) = no_lease_updatetime; 85 86 static int 87 settime(tv) 88 struct timeval *tv; 89 { 90 struct timeval delta, tv1, tv2; 91 static struct timeval maxtime, laststep; 92 struct timespec ts; 93 94 crit_enter(); 95 microtime(&tv1); 96 delta = *tv; 97 timevalsub(&delta, &tv1); 98 99 /* 100 * If the system is secure, we do not allow the time to be 101 * set to a value earlier than 1 second less than the highest 102 * time we have yet seen. The worst a miscreant can do in 103 * this circumstance is "freeze" time. He couldn't go 104 * back to the past. 105 * 106 * We similarly do not allow the clock to be stepped more 107 * than one second, nor more than once per second. This allows 108 * a miscreant to make the clock march double-time, but no worse. 109 */ 110 if (securelevel > 1) { 111 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 112 /* 113 * Update maxtime to latest time we've seen. 114 */ 115 if (tv1.tv_sec > maxtime.tv_sec) 116 maxtime = tv1; 117 tv2 = *tv; 118 timevalsub(&tv2, &maxtime); 119 if (tv2.tv_sec < -1) { 120 tv->tv_sec = maxtime.tv_sec - 1; 121 printf("Time adjustment clamped to -1 second\n"); 122 } 123 } else { 124 if (tv1.tv_sec == laststep.tv_sec) { 125 crit_exit(); 126 return (EPERM); 127 } 128 if (delta.tv_sec > 1) { 129 tv->tv_sec = tv1.tv_sec + 1; 130 printf("Time adjustment clamped to +1 second\n"); 131 } 132 laststep = *tv; 133 } 134 } 135 136 ts.tv_sec = tv->tv_sec; 137 ts.tv_nsec = tv->tv_usec * 1000; 138 set_timeofday(&ts); 139 lease_updatetime(delta.tv_sec); 140 crit_exit(); 141 resettodr(); 142 return (0); 143 } 144 145 /* ARGSUSED */ 146 int 147 clock_gettime(struct clock_gettime_args *uap) 148 { 149 struct timespec ats; 150 151 switch(uap->clock_id) { 152 case CLOCK_REALTIME: 153 nanotime(&ats); 154 return (copyout(&ats, uap->tp, sizeof(ats))); 155 case CLOCK_MONOTONIC: 156 nanouptime(&ats); 157 return (copyout(&ats, uap->tp, sizeof(ats))); 158 default: 159 return (EINVAL); 160 } 161 } 162 163 /* ARGSUSED */ 164 int 165 clock_settime(struct clock_settime_args *uap) 166 { 167 struct thread *td = curthread; 168 struct timeval atv; 169 struct timespec ats; 170 int error; 171 172 if ((error = suser(td)) != 0) 173 return (error); 174 switch(uap->clock_id) { 175 case CLOCK_REALTIME: 176 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 177 return (error); 178 if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000) 179 return (EINVAL); 180 /* XXX Don't convert nsec->usec and back */ 181 TIMESPEC_TO_TIMEVAL(&atv, &ats); 182 error = settime(&atv); 183 return (error); 184 default: 185 return (EINVAL); 186 } 187 } 188 189 int 190 clock_getres(struct clock_getres_args *uap) 191 { 192 struct timespec ts; 193 194 switch(uap->clock_id) { 195 case CLOCK_REALTIME: 196 case CLOCK_MONOTONIC: 197 /* 198 * Round up the result of the division cheaply 199 * by adding 1. Rounding up is especially important 200 * if rounding down would give 0. Perfect rounding 201 * is unimportant. 202 */ 203 ts.tv_sec = 0; 204 ts.tv_nsec = 1000000000 / cputimer_freq + 1; 205 return(copyout(&ts, uap->tp, sizeof(ts))); 206 default: 207 return(EINVAL); 208 } 209 } 210 211 /* 212 * nanosleep1() 213 * 214 * This is a general helper function for nanosleep() (aka sleep() aka 215 * usleep()). 216 * 217 * If there is less then one tick's worth of time left and 218 * we haven't done a yield, or the remaining microseconds is 219 * ridiculously low, do a yield. This avoids having 220 * to deal with systimer overheads when the system is under 221 * heavy loads. If we have done a yield already then use 222 * a systimer and an uninterruptable thread wait. 223 * 224 * If there is more then a tick's worth of time left, 225 * calculate the baseline ticks and use an interruptable 226 * tsleep, then handle the fine-grained delay on the next 227 * loop. This usually results in two sleeps occuring, a long one 228 * and a short one. 229 */ 230 static void 231 ns1_systimer(systimer_t info) 232 { 233 lwkt_schedule(info->data); 234 } 235 236 static int 237 nanosleep1(struct timespec *rqt, struct timespec *rmt) 238 { 239 static int nanowait; 240 struct timespec ts, ts2, ts3; 241 struct timeval tv; 242 int error; 243 int tried_yield; 244 245 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 246 return (EINVAL); 247 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 248 return (0); 249 nanouptime(&ts); 250 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 251 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 252 tried_yield = 0; 253 254 for (;;) { 255 int ticks; 256 struct systimer info; 257 258 ticks = tv.tv_usec / tick; /* approximate */ 259 260 if (tv.tv_sec == 0 && ticks == 0) { 261 thread_t td = curthread; 262 if (tried_yield || tv.tv_usec < sleep_hard_us) { 263 tried_yield = 0; 264 uio_yield(); 265 } else { 266 crit_enter_quick(td); 267 systimer_init_oneshot(&info, ns1_systimer, 268 td, tv.tv_usec); 269 lwkt_deschedule_self(td); 270 crit_exit_quick(td); 271 lwkt_switch(); 272 systimer_del(&info); /* make sure it's gone */ 273 } 274 error = iscaught(td->td_proc); 275 } else if (tv.tv_sec == 0) { 276 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 277 } else { 278 ticks = tvtohz_low(&tv); /* also handles overflow */ 279 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 280 } 281 nanouptime(&ts2); 282 if (error && error != EWOULDBLOCK) { 283 if (error == ERESTART) 284 error = EINTR; 285 if (rmt != NULL) { 286 timespecsub(&ts, &ts2); 287 if (ts.tv_sec < 0) 288 timespecclear(&ts); 289 *rmt = ts; 290 } 291 return (error); 292 } 293 if (timespeccmp(&ts2, &ts, >=)) 294 return (0); 295 ts3 = ts; 296 timespecsub(&ts3, &ts2); 297 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 298 } 299 } 300 301 static void nanosleep_done(void *arg); 302 static void nanosleep_copyout(union sysunion *sysun); 303 304 /* ARGSUSED */ 305 int 306 nanosleep(struct nanosleep_args *uap) 307 { 308 int error; 309 struct sysmsg_sleep *smsleep = &uap->sysmsg.sm.sleep; 310 311 error = copyin(uap->rqtp, &smsleep->rqt, sizeof(smsleep->rqt)); 312 if (error) 313 return (error); 314 /* 315 * YYY clean this up to always use the callout, note that an abort 316 * implementation should record the residual in the async case. 317 */ 318 if (uap->sysmsg.lmsg.ms_flags & MSGF_ASYNC) { 319 quad_t ticks; 320 321 ticks = (quad_t)smsleep->rqt.tv_nsec * hz / 1000000000LL; 322 if (smsleep->rqt.tv_sec) 323 ticks += (quad_t)smsleep->rqt.tv_sec * hz; 324 if (ticks <= 0) { 325 if (ticks == 0) 326 error = 0; 327 else 328 error = EINVAL; 329 } else { 330 uap->sysmsg.copyout = nanosleep_copyout; 331 uap->sysmsg.lmsg.ms_flags &= ~MSGF_DONE; 332 callout_init(&smsleep->timer); 333 callout_reset(&smsleep->timer, ticks, nanosleep_done, uap); 334 error = EASYNC; 335 } 336 } else { 337 /* 338 * Old synchronous sleep code, copyout the residual if 339 * nanosleep was interrupted. 340 */ 341 error = nanosleep1(&smsleep->rqt, &smsleep->rmt); 342 if (error && uap->rmtp) 343 error = copyout(&smsleep->rmt, uap->rmtp, sizeof(smsleep->rmt)); 344 } 345 return (error); 346 } 347 348 /* 349 * Asynch completion for the nanosleep() syscall. This function may be 350 * called from any context and cannot legally access the originating 351 * thread, proc, or its user space. 352 * 353 * YYY change the callout interface API so we can simply assign the replymsg 354 * function to it directly. 355 */ 356 static void 357 nanosleep_done(void *arg) 358 { 359 struct nanosleep_args *uap = arg; 360 lwkt_msg_t msg = &uap->sysmsg.lmsg; 361 362 lwkt_replymsg(msg, 0); 363 } 364 365 /* 366 * Asynch return for the nanosleep() syscall, called in the context of the 367 * originating thread when it pulls the message off the reply port. This 368 * function is responsible for any copyouts to userland. Kernel threads 369 * which do their own internal system calls will not usually call the return 370 * function. 371 */ 372 static void 373 nanosleep_copyout(union sysunion *sysun) 374 { 375 struct nanosleep_args *uap = &sysun->nanosleep; 376 struct sysmsg_sleep *smsleep = &uap->sysmsg.sm.sleep; 377 378 if (sysun->lmsg.ms_error && uap->rmtp) { 379 sysun->lmsg.ms_error = 380 copyout(&smsleep->rmt, uap->rmtp, sizeof(smsleep->rmt)); 381 } 382 } 383 384 /* ARGSUSED */ 385 int 386 gettimeofday(struct gettimeofday_args *uap) 387 { 388 struct timeval atv; 389 int error = 0; 390 391 if (uap->tp) { 392 microtime(&atv); 393 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 394 sizeof (atv)))) 395 return (error); 396 } 397 if (uap->tzp) 398 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 399 sizeof (tz)); 400 return (error); 401 } 402 403 /* ARGSUSED */ 404 int 405 settimeofday(struct settimeofday_args *uap) 406 { 407 struct thread *td = curthread; 408 struct timeval atv; 409 struct timezone atz; 410 int error; 411 412 if ((error = suser(td))) 413 return (error); 414 /* Verify all parameters before changing time. */ 415 if (uap->tv) { 416 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 417 sizeof(atv)))) 418 return (error); 419 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 420 return (EINVAL); 421 } 422 if (uap->tzp && 423 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 424 return (error); 425 if (uap->tv && (error = settime(&atv))) 426 return (error); 427 if (uap->tzp) 428 tz = atz; 429 return (0); 430 } 431 432 static void 433 kern_adjtime_common(void) 434 { 435 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 436 (ntp_delta < 0 && ntp_delta > ntp_default_tick_delta)) 437 ntp_tick_delta = ntp_delta; 438 else if (ntp_delta > ntp_big_delta) 439 ntp_tick_delta = 10 * ntp_default_tick_delta; 440 else if (ntp_delta < -ntp_big_delta) 441 ntp_tick_delta = -10 * ntp_default_tick_delta; 442 else if (ntp_delta > 0) 443 ntp_tick_delta = ntp_default_tick_delta; 444 else 445 ntp_tick_delta = -ntp_default_tick_delta; 446 } 447 448 void 449 kern_adjtime(int64_t delta, int64_t *odelta) 450 { 451 int origcpu; 452 453 if ((origcpu = mycpu->gd_cpuid) != 0) { 454 lwkt_setcpu_self(globaldata_find(0)); 455 cpu_mb1(); 456 } 457 458 crit_enter(); 459 *odelta = ntp_delta; 460 ntp_delta += delta; 461 kern_adjtime_common(); 462 crit_exit(); 463 464 if (origcpu != 0) { 465 lwkt_setcpu_self(globaldata_find(origcpu)); 466 cpu_mb1(); 467 } 468 } 469 470 void 471 kern_reladjtime(int64_t delta) 472 { 473 int origcpu; 474 475 if ((origcpu = mycpu->gd_cpuid) != 0) { 476 lwkt_setcpu_self(globaldata_find(0)); 477 cpu_mb1(); 478 } 479 480 crit_enter(); 481 ntp_delta += delta; 482 kern_adjtime_common(); 483 crit_exit(); 484 485 if (origcpu != 0) { 486 lwkt_setcpu_self(globaldata_find(origcpu)); 487 cpu_mb1(); 488 } 489 } 490 491 static void 492 kern_adjfreq(int64_t rate) 493 { 494 int origcpu; 495 496 if ((origcpu = mycpu->gd_cpuid) != 0) { 497 lwkt_setcpu_self(globaldata_find(0)); 498 cpu_mb1(); 499 } 500 501 crit_enter(); 502 ntp_tick_permanent = rate; 503 crit_exit(); 504 505 if (origcpu != 0) { 506 lwkt_setcpu_self(globaldata_find(origcpu)); 507 cpu_mb1(); 508 } 509 } 510 511 /* ARGSUSED */ 512 int 513 adjtime(struct adjtime_args *uap) 514 { 515 struct thread *td = curthread; 516 struct timeval atv; 517 int64_t ndelta, odelta; 518 int error; 519 520 if ((error = suser(td))) 521 return (error); 522 if ((error = 523 copyin((caddr_t)uap->delta, (caddr_t)&atv, sizeof(struct timeval)))) 524 return (error); 525 526 /* 527 * Compute the total correction and the rate at which to apply it. 528 * Round the adjustment down to a whole multiple of the per-tick 529 * delta, so that after some number of incremental changes in 530 * hardclock(), tickdelta will become zero, lest the correction 531 * overshoot and start taking us away from the desired final time. 532 */ 533 ndelta = atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 534 kern_adjtime(ndelta, &odelta); 535 536 if (uap->olddelta) { 537 atv.tv_sec = odelta / 1000000000; 538 atv.tv_usec = odelta % 1000000 / 1000; 539 (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta, 540 sizeof(struct timeval)); 541 } 542 return (0); 543 } 544 545 static int 546 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 547 { 548 int64_t delta; 549 int error; 550 551 if (req->oldptr != NULL) { 552 delta = 0; 553 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 554 if (error) 555 return (error); 556 } 557 if (req->newptr != NULL) { 558 if (suser(curthread)) 559 return (EPERM); 560 error = SYSCTL_IN(req, &delta, sizeof(delta)); 561 if (error) 562 return (error); 563 kern_reladjtime(delta); 564 } 565 return (0); 566 } 567 568 static int 569 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 570 { 571 int64_t freqdelta; 572 int error; 573 574 if (req->oldptr != NULL) { 575 freqdelta = ntp_tick_permanent * hz; 576 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 577 if (error) 578 return (error); 579 } 580 if (req->newptr != NULL) { 581 if (suser(curthread)) 582 return (EPERM); 583 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 584 if (error) 585 return (error); 586 587 freqdelta /= hz; 588 kern_adjfreq(freqdelta); 589 } 590 return (0); 591 } 592 593 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 594 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 595 CTLTYPE_OPAQUE|CTLFLAG_RW, 0, 0, 596 sysctl_adjfreq, "LU", "permanent correction per second"); 597 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, delta, CTLFLAG_RD, 598 &ntp_delta, sizeof(ntp_delta), "LU", 599 "one-time delta"); 600 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 601 &ntp_big_delta, sizeof(ntp_big_delta), "LU", 602 "threshold for fast adjustment"); 603 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 604 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 605 "per-tick adjustment"); 606 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 607 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 608 "default per-tick adjustment"); 609 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 610 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 611 "next leap second"); 612 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 613 &ntp_leap_insert, 0, "insert or remove leap second"); 614 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 615 CTLTYPE_OPAQUE|CTLFLAG_RW, 0, 0, 616 sysctl_adjtime, "", "relative adjust for delta"); 617 618 /* 619 * Get value of an interval timer. The process virtual and 620 * profiling virtual time timers are kept in the p_stats area, since 621 * they can be swapped out. These are kept internally in the 622 * way they are specified externally: in time until they expire. 623 * 624 * The real time interval timer is kept in the process table slot 625 * for the process, and its value (it_value) is kept as an 626 * absolute time rather than as a delta, so that it is easy to keep 627 * periodic real-time signals from drifting. 628 * 629 * Virtual time timers are processed in the hardclock() routine of 630 * kern_clock.c. The real time timer is processed by a timeout 631 * routine, called from the softclock() routine. Since a callout 632 * may be delayed in real time due to interrupt processing in the system, 633 * it is possible for the real time timeout routine (realitexpire, given below), 634 * to be delayed in real time past when it is supposed to occur. It 635 * does not suffice, therefore, to reload the real timer .it_value from the 636 * real time timers .it_interval. Rather, we compute the next time in 637 * absolute time the timer should go off. 638 */ 639 /* ARGSUSED */ 640 int 641 getitimer(struct getitimer_args *uap) 642 { 643 struct proc *p = curproc; 644 struct timeval ctv; 645 struct itimerval aitv; 646 647 if (uap->which > ITIMER_PROF) 648 return (EINVAL); 649 crit_enter(); 650 if (uap->which == ITIMER_REAL) { 651 /* 652 * Convert from absolute to relative time in .it_value 653 * part of real time timer. If time for real time timer 654 * has passed return 0, else return difference between 655 * current time and time for the timer to go off. 656 */ 657 aitv = p->p_realtimer; 658 if (timevalisset(&aitv.it_value)) { 659 getmicrouptime(&ctv); 660 if (timevalcmp(&aitv.it_value, &ctv, <)) 661 timevalclear(&aitv.it_value); 662 else 663 timevalsub(&aitv.it_value, &ctv); 664 } 665 } else { 666 aitv = p->p_stats->p_timer[uap->which]; 667 } 668 crit_exit(); 669 return (copyout((caddr_t)&aitv, (caddr_t)uap->itv, 670 sizeof (struct itimerval))); 671 } 672 673 /* ARGSUSED */ 674 int 675 setitimer(struct setitimer_args *uap) 676 { 677 struct itimerval aitv; 678 struct timeval ctv; 679 struct itimerval *itvp; 680 struct proc *p = curproc; 681 int error; 682 683 if (uap->which > ITIMER_PROF) 684 return (EINVAL); 685 itvp = uap->itv; 686 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 687 sizeof(struct itimerval)))) 688 return (error); 689 if ((uap->itv = uap->oitv) && 690 (error = getitimer((struct getitimer_args *)uap))) 691 return (error); 692 if (itvp == 0) 693 return (0); 694 if (itimerfix(&aitv.it_value)) 695 return (EINVAL); 696 if (!timevalisset(&aitv.it_value)) 697 timevalclear(&aitv.it_interval); 698 else if (itimerfix(&aitv.it_interval)) 699 return (EINVAL); 700 crit_enter(); 701 if (uap->which == ITIMER_REAL) { 702 if (timevalisset(&p->p_realtimer.it_value)) 703 callout_stop(&p->p_ithandle); 704 if (timevalisset(&aitv.it_value)) 705 callout_reset(&p->p_ithandle, 706 tvtohz_high(&aitv.it_value), realitexpire, p); 707 getmicrouptime(&ctv); 708 timevaladd(&aitv.it_value, &ctv); 709 p->p_realtimer = aitv; 710 } else { 711 p->p_stats->p_timer[uap->which] = aitv; 712 } 713 crit_exit(); 714 return (0); 715 } 716 717 /* 718 * Real interval timer expired: 719 * send process whose timer expired an alarm signal. 720 * If time is not set up to reload, then just return. 721 * Else compute next time timer should go off which is > current time. 722 * This is where delay in processing this timeout causes multiple 723 * SIGALRM calls to be compressed into one. 724 * tvtohz_high() always adds 1 to allow for the time until the next clock 725 * interrupt being strictly less than 1 clock tick, but we don't want 726 * that here since we want to appear to be in sync with the clock 727 * interrupt even when we're delayed. 728 */ 729 void 730 realitexpire(arg) 731 void *arg; 732 { 733 struct proc *p; 734 struct timeval ctv, ntv; 735 736 p = (struct proc *)arg; 737 psignal(p, SIGALRM); 738 if (!timevalisset(&p->p_realtimer.it_interval)) { 739 timevalclear(&p->p_realtimer.it_value); 740 return; 741 } 742 for (;;) { 743 crit_enter(); 744 timevaladd(&p->p_realtimer.it_value, 745 &p->p_realtimer.it_interval); 746 getmicrouptime(&ctv); 747 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 748 ntv = p->p_realtimer.it_value; 749 timevalsub(&ntv, &ctv); 750 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 751 realitexpire, p); 752 crit_exit(); 753 return; 754 } 755 crit_exit(); 756 } 757 } 758 759 /* 760 * Check that a proposed value to load into the .it_value or 761 * .it_interval part of an interval timer is acceptable, and 762 * fix it to have at least minimal value (i.e. if it is less 763 * than the resolution of the clock, round it up.) 764 */ 765 int 766 itimerfix(tv) 767 struct timeval *tv; 768 { 769 770 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 771 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 772 return (EINVAL); 773 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 774 tv->tv_usec = tick; 775 return (0); 776 } 777 778 /* 779 * Decrement an interval timer by a specified number 780 * of microseconds, which must be less than a second, 781 * i.e. < 1000000. If the timer expires, then reload 782 * it. In this case, carry over (usec - old value) to 783 * reduce the value reloaded into the timer so that 784 * the timer does not drift. This routine assumes 785 * that it is called in a context where the timers 786 * on which it is operating cannot change in value. 787 */ 788 int 789 itimerdecr(itp, usec) 790 struct itimerval *itp; 791 int usec; 792 { 793 794 if (itp->it_value.tv_usec < usec) { 795 if (itp->it_value.tv_sec == 0) { 796 /* expired, and already in next interval */ 797 usec -= itp->it_value.tv_usec; 798 goto expire; 799 } 800 itp->it_value.tv_usec += 1000000; 801 itp->it_value.tv_sec--; 802 } 803 itp->it_value.tv_usec -= usec; 804 usec = 0; 805 if (timevalisset(&itp->it_value)) 806 return (1); 807 /* expired, exactly at end of interval */ 808 expire: 809 if (timevalisset(&itp->it_interval)) { 810 itp->it_value = itp->it_interval; 811 itp->it_value.tv_usec -= usec; 812 if (itp->it_value.tv_usec < 0) { 813 itp->it_value.tv_usec += 1000000; 814 itp->it_value.tv_sec--; 815 } 816 } else 817 itp->it_value.tv_usec = 0; /* sec is already 0 */ 818 return (0); 819 } 820 821 /* 822 * Add and subtract routines for timevals. 823 * N.B.: subtract routine doesn't deal with 824 * results which are before the beginning, 825 * it just gets very confused in this case. 826 * Caveat emptor. 827 */ 828 void 829 timevaladd(t1, t2) 830 struct timeval *t1, *t2; 831 { 832 833 t1->tv_sec += t2->tv_sec; 834 t1->tv_usec += t2->tv_usec; 835 timevalfix(t1); 836 } 837 838 void 839 timevalsub(t1, t2) 840 struct timeval *t1, *t2; 841 { 842 843 t1->tv_sec -= t2->tv_sec; 844 t1->tv_usec -= t2->tv_usec; 845 timevalfix(t1); 846 } 847 848 static void 849 timevalfix(t1) 850 struct timeval *t1; 851 { 852 853 if (t1->tv_usec < 0) { 854 t1->tv_sec--; 855 t1->tv_usec += 1000000; 856 } 857 if (t1->tv_usec >= 1000000) { 858 t1->tv_sec++; 859 t1->tv_usec -= 1000000; 860 } 861 } 862 863 /* 864 * ratecheck(): simple time-based rate-limit checking. 865 */ 866 int 867 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 868 { 869 struct timeval tv, delta; 870 int rv = 0; 871 872 getmicrouptime(&tv); /* NB: 10ms precision */ 873 delta = tv; 874 timevalsub(&delta, lasttime); 875 876 /* 877 * check for 0,0 is so that the message will be seen at least once, 878 * even if interval is huge. 879 */ 880 if (timevalcmp(&delta, mininterval, >=) || 881 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 882 *lasttime = tv; 883 rv = 1; 884 } 885 886 return (rv); 887 } 888 889 /* 890 * ppsratecheck(): packets (or events) per second limitation. 891 * 892 * Return 0 if the limit is to be enforced (e.g. the caller 893 * should drop a packet because of the rate limitation). 894 * 895 * maxpps of 0 always causes zero to be returned. maxpps of -1 896 * always causes 1 to be returned; this effectively defeats rate 897 * limiting. 898 * 899 * Note that we maintain the struct timeval for compatibility 900 * with other bsd systems. We reuse the storage and just monitor 901 * clock ticks for minimal overhead. 902 */ 903 int 904 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 905 { 906 int now; 907 908 /* 909 * Reset the last time and counter if this is the first call 910 * or more than a second has passed since the last update of 911 * lasttime. 912 */ 913 now = ticks; 914 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 915 lasttime->tv_sec = now; 916 *curpps = 1; 917 return (maxpps != 0); 918 } else { 919 (*curpps)++; /* NB: ignore potential overflow */ 920 return (maxpps < 0 || *curpps < maxpps); 921 } 922 } 923 924