1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $ 35 * $DragonFly: src/sys/kern/kern_time.c,v 1.33 2006/03/27 16:18:34 dillon Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/buf.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/sysunion.h> 48 #include <sys/proc.h> 49 #include <sys/time.h> 50 #include <sys/vnode.h> 51 #include <sys/sysctl.h> 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 #include <sys/msgport2.h> 55 #include <sys/thread2.h> 56 57 struct timezone tz; 58 59 /* 60 * Time of day and interval timer support. 61 * 62 * These routines provide the kernel entry points to get and set 63 * the time-of-day and per-process interval timers. Subroutines 64 * here provide support for adding and subtracting timeval structures 65 * and decrementing interval timers, optionally reloading the interval 66 * timers when they expire. 67 */ 68 69 static int nanosleep1 (struct timespec *rqt, 70 struct timespec *rmt); 71 static int settime (struct timeval *); 72 static void timevalfix (struct timeval *); 73 74 static int sleep_hard_us = 100; 75 SYSCTL_INT(_kern, OID_AUTO, sleep_hard_us, CTLFLAG_RW, &sleep_hard_us, 0, "") 76 77 static int 78 settime(tv) 79 struct timeval *tv; 80 { 81 struct timeval delta, tv1, tv2; 82 static struct timeval maxtime, laststep; 83 struct timespec ts; 84 int origcpu; 85 86 if ((origcpu = mycpu->gd_cpuid) != 0) 87 lwkt_setcpu_self(globaldata_find(0)); 88 89 crit_enter(); 90 microtime(&tv1); 91 delta = *tv; 92 timevalsub(&delta, &tv1); 93 94 /* 95 * If the system is secure, we do not allow the time to be 96 * set to a value earlier than 1 second less than the highest 97 * time we have yet seen. The worst a miscreant can do in 98 * this circumstance is "freeze" time. He couldn't go 99 * back to the past. 100 * 101 * We similarly do not allow the clock to be stepped more 102 * than one second, nor more than once per second. This allows 103 * a miscreant to make the clock march double-time, but no worse. 104 */ 105 if (securelevel > 1) { 106 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 107 /* 108 * Update maxtime to latest time we've seen. 109 */ 110 if (tv1.tv_sec > maxtime.tv_sec) 111 maxtime = tv1; 112 tv2 = *tv; 113 timevalsub(&tv2, &maxtime); 114 if (tv2.tv_sec < -1) { 115 tv->tv_sec = maxtime.tv_sec - 1; 116 printf("Time adjustment clamped to -1 second\n"); 117 } 118 } else { 119 if (tv1.tv_sec == laststep.tv_sec) { 120 crit_exit(); 121 return (EPERM); 122 } 123 if (delta.tv_sec > 1) { 124 tv->tv_sec = tv1.tv_sec + 1; 125 printf("Time adjustment clamped to +1 second\n"); 126 } 127 laststep = *tv; 128 } 129 } 130 131 ts.tv_sec = tv->tv_sec; 132 ts.tv_nsec = tv->tv_usec * 1000; 133 set_timeofday(&ts); 134 crit_exit(); 135 136 if (origcpu != 0) 137 lwkt_setcpu_self(globaldata_find(origcpu)); 138 139 resettodr(); 140 return (0); 141 } 142 143 /* ARGSUSED */ 144 int 145 clock_gettime(struct clock_gettime_args *uap) 146 { 147 struct timespec ats; 148 149 switch(uap->clock_id) { 150 case CLOCK_REALTIME: 151 nanotime(&ats); 152 return (copyout(&ats, uap->tp, sizeof(ats))); 153 case CLOCK_MONOTONIC: 154 nanouptime(&ats); 155 return (copyout(&ats, uap->tp, sizeof(ats))); 156 default: 157 return (EINVAL); 158 } 159 } 160 161 /* ARGSUSED */ 162 int 163 clock_settime(struct clock_settime_args *uap) 164 { 165 struct thread *td = curthread; 166 struct timeval atv; 167 struct timespec ats; 168 int error; 169 170 if ((error = suser(td)) != 0) 171 return (error); 172 switch(uap->clock_id) { 173 case CLOCK_REALTIME: 174 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 175 return (error); 176 if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000) 177 return (EINVAL); 178 /* XXX Don't convert nsec->usec and back */ 179 TIMESPEC_TO_TIMEVAL(&atv, &ats); 180 error = settime(&atv); 181 return (error); 182 default: 183 return (EINVAL); 184 } 185 } 186 187 int 188 clock_getres(struct clock_getres_args *uap) 189 { 190 struct timespec ts; 191 192 switch(uap->clock_id) { 193 case CLOCK_REALTIME: 194 case CLOCK_MONOTONIC: 195 /* 196 * Round up the result of the division cheaply 197 * by adding 1. Rounding up is especially important 198 * if rounding down would give 0. Perfect rounding 199 * is unimportant. 200 */ 201 ts.tv_sec = 0; 202 ts.tv_nsec = 1000000000 / sys_cputimer->freq + 1; 203 return(copyout(&ts, uap->tp, sizeof(ts))); 204 default: 205 return(EINVAL); 206 } 207 } 208 209 /* 210 * nanosleep1() 211 * 212 * This is a general helper function for nanosleep() (aka sleep() aka 213 * usleep()). 214 * 215 * If there is less then one tick's worth of time left and 216 * we haven't done a yield, or the remaining microseconds is 217 * ridiculously low, do a yield. This avoids having 218 * to deal with systimer overheads when the system is under 219 * heavy loads. If we have done a yield already then use 220 * a systimer and an uninterruptable thread wait. 221 * 222 * If there is more then a tick's worth of time left, 223 * calculate the baseline ticks and use an interruptable 224 * tsleep, then handle the fine-grained delay on the next 225 * loop. This usually results in two sleeps occuring, a long one 226 * and a short one. 227 */ 228 static void 229 ns1_systimer(systimer_t info) 230 { 231 lwkt_schedule(info->data); 232 } 233 234 static int 235 nanosleep1(struct timespec *rqt, struct timespec *rmt) 236 { 237 static int nanowait; 238 struct timespec ts, ts2, ts3; 239 struct timeval tv; 240 int error; 241 int tried_yield; 242 243 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 244 return (EINVAL); 245 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 246 return (0); 247 nanouptime(&ts); 248 timespecadd(&ts, rqt); /* ts = target timestamp compare */ 249 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */ 250 tried_yield = 0; 251 252 for (;;) { 253 int ticks; 254 struct systimer info; 255 256 ticks = tv.tv_usec / tick; /* approximate */ 257 258 if (tv.tv_sec == 0 && ticks == 0) { 259 thread_t td = curthread; 260 if (tried_yield || tv.tv_usec < sleep_hard_us) { 261 tried_yield = 0; 262 uio_yield(); 263 } else { 264 crit_enter_quick(td); 265 systimer_init_oneshot(&info, ns1_systimer, 266 td, tv.tv_usec); 267 lwkt_deschedule_self(td); 268 crit_exit_quick(td); 269 lwkt_switch(); 270 systimer_del(&info); /* make sure it's gone */ 271 } 272 error = iscaught(td->td_proc); 273 } else if (tv.tv_sec == 0) { 274 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 275 } else { 276 ticks = tvtohz_low(&tv); /* also handles overflow */ 277 error = tsleep(&nanowait, PCATCH, "nanslp", ticks); 278 } 279 nanouptime(&ts2); 280 if (error && error != EWOULDBLOCK) { 281 if (error == ERESTART) 282 error = EINTR; 283 if (rmt != NULL) { 284 timespecsub(&ts, &ts2); 285 if (ts.tv_sec < 0) 286 timespecclear(&ts); 287 *rmt = ts; 288 } 289 return (error); 290 } 291 if (timespeccmp(&ts2, &ts, >=)) 292 return (0); 293 ts3 = ts; 294 timespecsub(&ts3, &ts2); 295 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 296 } 297 } 298 299 static void nanosleep_done(void *arg); 300 static void nanosleep_copyout(union sysunion *sysun); 301 302 /* ARGSUSED */ 303 int 304 nanosleep(struct nanosleep_args *uap) 305 { 306 int error; 307 struct sysmsg_sleep *smsleep = &uap->sysmsg.sm.sleep; 308 309 error = copyin(uap->rqtp, &smsleep->rqt, sizeof(smsleep->rqt)); 310 if (error) 311 return (error); 312 /* 313 * YYY clean this up to always use the callout, note that an abort 314 * implementation should record the residual in the async case. 315 */ 316 if (uap->sysmsg.lmsg.ms_flags & MSGF_ASYNC) { 317 quad_t ticks; 318 319 ticks = (quad_t)smsleep->rqt.tv_nsec * hz / 1000000000LL; 320 if (smsleep->rqt.tv_sec) 321 ticks += (quad_t)smsleep->rqt.tv_sec * hz; 322 if (ticks <= 0) { 323 if (ticks == 0) 324 error = 0; 325 else 326 error = EINVAL; 327 } else { 328 uap->sysmsg.copyout = nanosleep_copyout; 329 uap->sysmsg.lmsg.ms_flags &= ~MSGF_DONE; 330 callout_init(&smsleep->timer); 331 callout_reset(&smsleep->timer, ticks, nanosleep_done, uap); 332 error = EASYNC; 333 } 334 } else { 335 /* 336 * Old synchronous sleep code, copyout the residual if 337 * nanosleep was interrupted. 338 */ 339 error = nanosleep1(&smsleep->rqt, &smsleep->rmt); 340 if (error && uap->rmtp) 341 error = copyout(&smsleep->rmt, uap->rmtp, sizeof(smsleep->rmt)); 342 } 343 return (error); 344 } 345 346 /* 347 * Asynch completion for the nanosleep() syscall. This function may be 348 * called from any context and cannot legally access the originating 349 * thread, proc, or its user space. 350 * 351 * YYY change the callout interface API so we can simply assign the replymsg 352 * function to it directly. 353 */ 354 static void 355 nanosleep_done(void *arg) 356 { 357 struct nanosleep_args *uap = arg; 358 lwkt_msg_t msg = &uap->sysmsg.lmsg; 359 360 lwkt_replymsg(msg, 0); 361 } 362 363 /* 364 * Asynch return for the nanosleep() syscall, called in the context of the 365 * originating thread when it pulls the message off the reply port. This 366 * function is responsible for any copyouts to userland. Kernel threads 367 * which do their own internal system calls will not usually call the return 368 * function. 369 */ 370 static void 371 nanosleep_copyout(union sysunion *sysun) 372 { 373 struct nanosleep_args *uap = &sysun->nanosleep; 374 struct sysmsg_sleep *smsleep = &uap->sysmsg.sm.sleep; 375 376 if (sysun->lmsg.ms_error && uap->rmtp) { 377 sysun->lmsg.ms_error = 378 copyout(&smsleep->rmt, uap->rmtp, sizeof(smsleep->rmt)); 379 } 380 } 381 382 /* ARGSUSED */ 383 int 384 gettimeofday(struct gettimeofday_args *uap) 385 { 386 struct timeval atv; 387 int error = 0; 388 389 if (uap->tp) { 390 microtime(&atv); 391 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp, 392 sizeof (atv)))) 393 return (error); 394 } 395 if (uap->tzp) 396 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 397 sizeof (tz)); 398 return (error); 399 } 400 401 /* ARGSUSED */ 402 int 403 settimeofday(struct settimeofday_args *uap) 404 { 405 struct thread *td = curthread; 406 struct timeval atv; 407 struct timezone atz; 408 int error; 409 410 if ((error = suser(td))) 411 return (error); 412 /* Verify all parameters before changing time. */ 413 if (uap->tv) { 414 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 415 sizeof(atv)))) 416 return (error); 417 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 418 return (EINVAL); 419 } 420 if (uap->tzp && 421 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) 422 return (error); 423 if (uap->tv && (error = settime(&atv))) 424 return (error); 425 if (uap->tzp) 426 tz = atz; 427 return (0); 428 } 429 430 static void 431 kern_adjtime_common(void) 432 { 433 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) || 434 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta)) 435 ntp_tick_delta = ntp_delta; 436 else if (ntp_delta > ntp_big_delta) 437 ntp_tick_delta = 10 * ntp_default_tick_delta; 438 else if (ntp_delta < -ntp_big_delta) 439 ntp_tick_delta = -10 * ntp_default_tick_delta; 440 else if (ntp_delta > 0) 441 ntp_tick_delta = ntp_default_tick_delta; 442 else 443 ntp_tick_delta = -ntp_default_tick_delta; 444 } 445 446 void 447 kern_adjtime(int64_t delta, int64_t *odelta) 448 { 449 int origcpu; 450 451 if ((origcpu = mycpu->gd_cpuid) != 0) 452 lwkt_setcpu_self(globaldata_find(0)); 453 454 crit_enter(); 455 *odelta = ntp_delta; 456 ntp_delta = delta; 457 kern_adjtime_common(); 458 crit_exit(); 459 460 if (origcpu != 0) 461 lwkt_setcpu_self(globaldata_find(origcpu)); 462 } 463 464 static void 465 kern_get_ntp_delta(int64_t *delta) 466 { 467 int origcpu; 468 469 if ((origcpu = mycpu->gd_cpuid) != 0) 470 lwkt_setcpu_self(globaldata_find(0)); 471 472 crit_enter(); 473 *delta = ntp_delta; 474 crit_exit(); 475 476 if (origcpu != 0) 477 lwkt_setcpu_self(globaldata_find(origcpu)); 478 } 479 480 void 481 kern_reladjtime(int64_t delta) 482 { 483 int origcpu; 484 485 if ((origcpu = mycpu->gd_cpuid) != 0) 486 lwkt_setcpu_self(globaldata_find(0)); 487 488 crit_enter(); 489 ntp_delta += delta; 490 kern_adjtime_common(); 491 crit_exit(); 492 493 if (origcpu != 0) 494 lwkt_setcpu_self(globaldata_find(origcpu)); 495 } 496 497 static void 498 kern_adjfreq(int64_t rate) 499 { 500 int origcpu; 501 502 if ((origcpu = mycpu->gd_cpuid) != 0) 503 lwkt_setcpu_self(globaldata_find(0)); 504 505 crit_enter(); 506 ntp_tick_permanent = rate; 507 crit_exit(); 508 509 if (origcpu != 0) 510 lwkt_setcpu_self(globaldata_find(origcpu)); 511 } 512 513 /* ARGSUSED */ 514 int 515 adjtime(struct adjtime_args *uap) 516 { 517 struct thread *td = curthread; 518 struct timeval atv; 519 int64_t ndelta, odelta; 520 int error; 521 522 if ((error = suser(td))) 523 return (error); 524 if ((error = 525 copyin((caddr_t)uap->delta, (caddr_t)&atv, sizeof(struct timeval)))) 526 return (error); 527 528 /* 529 * Compute the total correction and the rate at which to apply it. 530 * Round the adjustment down to a whole multiple of the per-tick 531 * delta, so that after some number of incremental changes in 532 * hardclock(), tickdelta will become zero, lest the correction 533 * overshoot and start taking us away from the desired final time. 534 */ 535 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000; 536 kern_adjtime(ndelta, &odelta); 537 538 if (uap->olddelta) { 539 atv.tv_sec = odelta / 1000000000; 540 atv.tv_usec = odelta % 1000000000 / 1000; 541 (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta, 542 sizeof(struct timeval)); 543 } 544 return (0); 545 } 546 547 static int 548 sysctl_adjtime(SYSCTL_HANDLER_ARGS) 549 { 550 int64_t delta; 551 int error; 552 553 if (req->newptr != NULL) { 554 if (suser(curthread)) 555 return (EPERM); 556 error = SYSCTL_IN(req, &delta, sizeof(delta)); 557 if (error) 558 return (error); 559 kern_reladjtime(delta); 560 } 561 562 if (req->oldptr) 563 kern_get_ntp_delta(&delta); 564 error = SYSCTL_OUT(req, &delta, sizeof(delta)); 565 return (error); 566 } 567 568 /* 569 * delta is in nanoseconds. 570 */ 571 static int 572 sysctl_delta(SYSCTL_HANDLER_ARGS) 573 { 574 int64_t delta, old_delta; 575 int error; 576 577 if (req->newptr != NULL) { 578 if (suser(curthread)) 579 return (EPERM); 580 error = SYSCTL_IN(req, &delta, sizeof(delta)); 581 if (error) 582 return (error); 583 kern_adjtime(delta, &old_delta); 584 } 585 586 if (req->oldptr != NULL) 587 kern_get_ntp_delta(&old_delta); 588 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta)); 589 return (error); 590 } 591 592 /* 593 * frequency is in nanoseconds per second shifted left 32. 594 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32. 595 */ 596 static int 597 sysctl_adjfreq(SYSCTL_HANDLER_ARGS) 598 { 599 int64_t freqdelta; 600 int error; 601 602 if (req->newptr != NULL) { 603 if (suser(curthread)) 604 return (EPERM); 605 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta)); 606 if (error) 607 return (error); 608 609 freqdelta /= hz; 610 kern_adjfreq(freqdelta); 611 } 612 613 if (req->oldptr != NULL) 614 freqdelta = ntp_tick_permanent * hz; 615 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta)); 616 if (error) 617 return (error); 618 619 return (0); 620 } 621 622 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls"); 623 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent, 624 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 625 sysctl_adjfreq, "Q", "permanent correction per second"); 626 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta, 627 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 628 sysctl_delta, "Q", "one-time delta"); 629 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD, 630 &ntp_big_delta, sizeof(ntp_big_delta), "Q", 631 "threshold for fast adjustment"); 632 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD, 633 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU", 634 "per-tick adjustment"); 635 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD, 636 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU", 637 "default per-tick adjustment"); 638 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW, 639 &ntp_leap_second, sizeof(ntp_leap_second), "LU", 640 "next leap second"); 641 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW, 642 &ntp_leap_insert, 0, "insert or remove leap second"); 643 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust, 644 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0, 645 sysctl_adjtime, "Q", "relative adjust for delta"); 646 647 /* 648 * Get value of an interval timer. The process virtual and 649 * profiling virtual time timers are kept in the p_stats area, since 650 * they can be swapped out. These are kept internally in the 651 * way they are specified externally: in time until they expire. 652 * 653 * The real time interval timer is kept in the process table slot 654 * for the process, and its value (it_value) is kept as an 655 * absolute time rather than as a delta, so that it is easy to keep 656 * periodic real-time signals from drifting. 657 * 658 * Virtual time timers are processed in the hardclock() routine of 659 * kern_clock.c. The real time timer is processed by a timeout 660 * routine, called from the softclock() routine. Since a callout 661 * may be delayed in real time due to interrupt processing in the system, 662 * it is possible for the real time timeout routine (realitexpire, given below), 663 * to be delayed in real time past when it is supposed to occur. It 664 * does not suffice, therefore, to reload the real timer .it_value from the 665 * real time timers .it_interval. Rather, we compute the next time in 666 * absolute time the timer should go off. 667 */ 668 /* ARGSUSED */ 669 int 670 getitimer(struct getitimer_args *uap) 671 { 672 struct proc *p = curproc; 673 struct timeval ctv; 674 struct itimerval aitv; 675 676 if (uap->which > ITIMER_PROF) 677 return (EINVAL); 678 crit_enter(); 679 if (uap->which == ITIMER_REAL) { 680 /* 681 * Convert from absolute to relative time in .it_value 682 * part of real time timer. If time for real time timer 683 * has passed return 0, else return difference between 684 * current time and time for the timer to go off. 685 */ 686 aitv = p->p_realtimer; 687 if (timevalisset(&aitv.it_value)) { 688 getmicrouptime(&ctv); 689 if (timevalcmp(&aitv.it_value, &ctv, <)) 690 timevalclear(&aitv.it_value); 691 else 692 timevalsub(&aitv.it_value, &ctv); 693 } 694 } else { 695 aitv = p->p_timer[uap->which]; 696 } 697 crit_exit(); 698 return (copyout((caddr_t)&aitv, (caddr_t)uap->itv, 699 sizeof (struct itimerval))); 700 } 701 702 /* ARGSUSED */ 703 int 704 setitimer(struct setitimer_args *uap) 705 { 706 struct itimerval aitv; 707 struct timeval ctv; 708 struct itimerval *itvp; 709 struct proc *p = curproc; 710 int error; 711 712 if (uap->which > ITIMER_PROF) 713 return (EINVAL); 714 itvp = uap->itv; 715 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 716 sizeof(struct itimerval)))) 717 return (error); 718 if ((uap->itv = uap->oitv) && 719 (error = getitimer((struct getitimer_args *)uap))) 720 return (error); 721 if (itvp == 0) 722 return (0); 723 if (itimerfix(&aitv.it_value)) 724 return (EINVAL); 725 if (!timevalisset(&aitv.it_value)) 726 timevalclear(&aitv.it_interval); 727 else if (itimerfix(&aitv.it_interval)) 728 return (EINVAL); 729 crit_enter(); 730 if (uap->which == ITIMER_REAL) { 731 if (timevalisset(&p->p_realtimer.it_value)) 732 callout_stop(&p->p_ithandle); 733 if (timevalisset(&aitv.it_value)) 734 callout_reset(&p->p_ithandle, 735 tvtohz_high(&aitv.it_value), realitexpire, p); 736 getmicrouptime(&ctv); 737 timevaladd(&aitv.it_value, &ctv); 738 p->p_realtimer = aitv; 739 } else { 740 p->p_timer[uap->which] = aitv; 741 } 742 crit_exit(); 743 return (0); 744 } 745 746 /* 747 * Real interval timer expired: 748 * send process whose timer expired an alarm signal. 749 * If time is not set up to reload, then just return. 750 * Else compute next time timer should go off which is > current time. 751 * This is where delay in processing this timeout causes multiple 752 * SIGALRM calls to be compressed into one. 753 * tvtohz_high() always adds 1 to allow for the time until the next clock 754 * interrupt being strictly less than 1 clock tick, but we don't want 755 * that here since we want to appear to be in sync with the clock 756 * interrupt even when we're delayed. 757 */ 758 void 759 realitexpire(arg) 760 void *arg; 761 { 762 struct proc *p; 763 struct timeval ctv, ntv; 764 765 p = (struct proc *)arg; 766 psignal(p, SIGALRM); 767 if (!timevalisset(&p->p_realtimer.it_interval)) { 768 timevalclear(&p->p_realtimer.it_value); 769 return; 770 } 771 for (;;) { 772 crit_enter(); 773 timevaladd(&p->p_realtimer.it_value, 774 &p->p_realtimer.it_interval); 775 getmicrouptime(&ctv); 776 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 777 ntv = p->p_realtimer.it_value; 778 timevalsub(&ntv, &ctv); 779 callout_reset(&p->p_ithandle, tvtohz_low(&ntv), 780 realitexpire, p); 781 crit_exit(); 782 return; 783 } 784 crit_exit(); 785 } 786 } 787 788 /* 789 * Check that a proposed value to load into the .it_value or 790 * .it_interval part of an interval timer is acceptable, and 791 * fix it to have at least minimal value (i.e. if it is less 792 * than the resolution of the clock, round it up.) 793 */ 794 int 795 itimerfix(tv) 796 struct timeval *tv; 797 { 798 799 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 800 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 801 return (EINVAL); 802 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 803 tv->tv_usec = tick; 804 return (0); 805 } 806 807 /* 808 * Decrement an interval timer by a specified number 809 * of microseconds, which must be less than a second, 810 * i.e. < 1000000. If the timer expires, then reload 811 * it. In this case, carry over (usec - old value) to 812 * reduce the value reloaded into the timer so that 813 * the timer does not drift. This routine assumes 814 * that it is called in a context where the timers 815 * on which it is operating cannot change in value. 816 */ 817 int 818 itimerdecr(itp, usec) 819 struct itimerval *itp; 820 int usec; 821 { 822 823 if (itp->it_value.tv_usec < usec) { 824 if (itp->it_value.tv_sec == 0) { 825 /* expired, and already in next interval */ 826 usec -= itp->it_value.tv_usec; 827 goto expire; 828 } 829 itp->it_value.tv_usec += 1000000; 830 itp->it_value.tv_sec--; 831 } 832 itp->it_value.tv_usec -= usec; 833 usec = 0; 834 if (timevalisset(&itp->it_value)) 835 return (1); 836 /* expired, exactly at end of interval */ 837 expire: 838 if (timevalisset(&itp->it_interval)) { 839 itp->it_value = itp->it_interval; 840 itp->it_value.tv_usec -= usec; 841 if (itp->it_value.tv_usec < 0) { 842 itp->it_value.tv_usec += 1000000; 843 itp->it_value.tv_sec--; 844 } 845 } else 846 itp->it_value.tv_usec = 0; /* sec is already 0 */ 847 return (0); 848 } 849 850 /* 851 * Add and subtract routines for timevals. 852 * N.B.: subtract routine doesn't deal with 853 * results which are before the beginning, 854 * it just gets very confused in this case. 855 * Caveat emptor. 856 */ 857 void 858 timevaladd(t1, t2) 859 struct timeval *t1, *t2; 860 { 861 862 t1->tv_sec += t2->tv_sec; 863 t1->tv_usec += t2->tv_usec; 864 timevalfix(t1); 865 } 866 867 void 868 timevalsub(t1, t2) 869 struct timeval *t1, *t2; 870 { 871 872 t1->tv_sec -= t2->tv_sec; 873 t1->tv_usec -= t2->tv_usec; 874 timevalfix(t1); 875 } 876 877 static void 878 timevalfix(t1) 879 struct timeval *t1; 880 { 881 882 if (t1->tv_usec < 0) { 883 t1->tv_sec--; 884 t1->tv_usec += 1000000; 885 } 886 if (t1->tv_usec >= 1000000) { 887 t1->tv_sec++; 888 t1->tv_usec -= 1000000; 889 } 890 } 891 892 /* 893 * ratecheck(): simple time-based rate-limit checking. 894 */ 895 int 896 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 897 { 898 struct timeval tv, delta; 899 int rv = 0; 900 901 getmicrouptime(&tv); /* NB: 10ms precision */ 902 delta = tv; 903 timevalsub(&delta, lasttime); 904 905 /* 906 * check for 0,0 is so that the message will be seen at least once, 907 * even if interval is huge. 908 */ 909 if (timevalcmp(&delta, mininterval, >=) || 910 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 911 *lasttime = tv; 912 rv = 1; 913 } 914 915 return (rv); 916 } 917 918 /* 919 * ppsratecheck(): packets (or events) per second limitation. 920 * 921 * Return 0 if the limit is to be enforced (e.g. the caller 922 * should drop a packet because of the rate limitation). 923 * 924 * maxpps of 0 always causes zero to be returned. maxpps of -1 925 * always causes 1 to be returned; this effectively defeats rate 926 * limiting. 927 * 928 * Note that we maintain the struct timeval for compatibility 929 * with other bsd systems. We reuse the storage and just monitor 930 * clock ticks for minimal overhead. 931 */ 932 int 933 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 934 { 935 int now; 936 937 /* 938 * Reset the last time and counter if this is the first call 939 * or more than a second has passed since the last update of 940 * lasttime. 941 */ 942 now = ticks; 943 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 944 lasttime->tv_sec = now; 945 *curpps = 1; 946 return (maxpps != 0); 947 } else { 948 (*curpps)++; /* NB: ignore potential overflow */ 949 return (maxpps < 0 || *curpps < maxpps); 950 } 951 } 952 953