1 /* $NetBSD: kern_time.c,v 1.175 2012/10/02 01:44:28 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.175 2012/10/02 01:44:28 christos Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/resourcevar.h> 68 #include <sys/kernel.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/vnode.h> 72 #include <sys/signalvar.h> 73 #include <sys/syslog.h> 74 #include <sys/timetc.h> 75 #include <sys/timex.h> 76 #include <sys/kauth.h> 77 #include <sys/mount.h> 78 #include <sys/syscallargs.h> 79 #include <sys/cpu.h> 80 81 static void timer_intr(void *); 82 static void itimerfire(struct ptimer *); 83 static void itimerfree(struct ptimers *, int); 84 85 kmutex_t timer_lock; 86 87 static void *timer_sih; 88 static TAILQ_HEAD(, ptimer) timer_queue; 89 90 struct pool ptimer_pool, ptimers_pool; 91 92 #define CLOCK_VIRTUAL_P(clockid) \ 93 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 94 95 CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 96 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 97 CTASSERT(ITIMER_PROF == CLOCK_PROF); 98 CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC); 99 100 /* 101 * Initialize timekeeping. 102 */ 103 void 104 time_init(void) 105 { 106 107 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 108 &pool_allocator_nointr, IPL_NONE); 109 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 110 &pool_allocator_nointr, IPL_NONE); 111 } 112 113 void 114 time_init2(void) 115 { 116 117 TAILQ_INIT(&timer_queue); 118 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 119 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 120 timer_intr, NULL); 121 } 122 123 /* Time of day and interval timer support. 124 * 125 * These routines provide the kernel entry points to get and set 126 * the time-of-day and per-process interval timers. Subroutines 127 * here provide support for adding and subtracting timeval structures 128 * and decrementing interval timers, optionally reloading the interval 129 * timers when they expire. 130 */ 131 132 /* This function is used by clock_settime and settimeofday */ 133 static int 134 settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 135 { 136 struct timespec delta, now; 137 int s; 138 139 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 140 s = splclock(); 141 nanotime(&now); 142 timespecsub(ts, &now, &delta); 143 144 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 145 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 146 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 147 splx(s); 148 return (EPERM); 149 } 150 151 #ifdef notyet 152 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 153 splx(s); 154 return (EPERM); 155 } 156 #endif 157 158 tc_setclock(ts); 159 160 timespecadd(&boottime, &delta, &boottime); 161 162 resettodr(); 163 splx(s); 164 165 return (0); 166 } 167 168 int 169 settime(struct proc *p, struct timespec *ts) 170 { 171 return (settime1(p, ts, true)); 172 } 173 174 /* ARGSUSED */ 175 int 176 sys___clock_gettime50(struct lwp *l, 177 const struct sys___clock_gettime50_args *uap, register_t *retval) 178 { 179 /* { 180 syscallarg(clockid_t) clock_id; 181 syscallarg(struct timespec *) tp; 182 } */ 183 int error; 184 struct timespec ats; 185 186 error = clock_gettime1(SCARG(uap, clock_id), &ats); 187 if (error != 0) 188 return error; 189 190 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 191 } 192 193 int 194 clock_gettime1(clockid_t clock_id, struct timespec *ts) 195 { 196 197 switch (clock_id) { 198 case CLOCK_REALTIME: 199 nanotime(ts); 200 break; 201 case CLOCK_MONOTONIC: 202 nanouptime(ts); 203 break; 204 default: 205 return EINVAL; 206 } 207 208 return 0; 209 } 210 211 /* ARGSUSED */ 212 int 213 sys___clock_settime50(struct lwp *l, 214 const struct sys___clock_settime50_args *uap, register_t *retval) 215 { 216 /* { 217 syscallarg(clockid_t) clock_id; 218 syscallarg(const struct timespec *) tp; 219 } */ 220 int error; 221 struct timespec ats; 222 223 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 224 return error; 225 226 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 227 } 228 229 230 int 231 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 232 bool check_kauth) 233 { 234 int error; 235 236 switch (clock_id) { 237 case CLOCK_REALTIME: 238 if ((error = settime1(p, tp, check_kauth)) != 0) 239 return (error); 240 break; 241 case CLOCK_MONOTONIC: 242 return (EINVAL); /* read-only clock */ 243 default: 244 return (EINVAL); 245 } 246 247 return 0; 248 } 249 250 int 251 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 252 register_t *retval) 253 { 254 /* { 255 syscallarg(clockid_t) clock_id; 256 syscallarg(struct timespec *) tp; 257 } */ 258 struct timespec ts; 259 int error = 0; 260 261 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 262 return error; 263 264 if (SCARG(uap, tp)) 265 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 266 267 return error; 268 } 269 270 int 271 clock_getres1(clockid_t clock_id, struct timespec *ts) 272 { 273 274 switch (clock_id) { 275 case CLOCK_REALTIME: 276 case CLOCK_MONOTONIC: 277 ts->tv_sec = 0; 278 if (tc_getfrequency() > 1000000000) 279 ts->tv_nsec = 1; 280 else 281 ts->tv_nsec = 1000000000 / tc_getfrequency(); 282 break; 283 default: 284 return EINVAL; 285 } 286 287 return 0; 288 } 289 290 /* ARGSUSED */ 291 int 292 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 293 register_t *retval) 294 { 295 /* { 296 syscallarg(struct timespec *) rqtp; 297 syscallarg(struct timespec *) rmtp; 298 } */ 299 struct timespec rmt, rqt; 300 int error, error1; 301 302 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 303 if (error) 304 return (error); 305 306 error = nanosleep1(l, CLOCK_MONOTONIC, 0, &rqt, 307 SCARG(uap, rmtp) ? &rmt : NULL); 308 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 309 return error; 310 311 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 312 return error1 ? error1 : error; 313 } 314 315 /* ARGSUSED */ 316 int 317 sys_clock_nanosleep(struct lwp *l, const struct sys_clock_nanosleep_args *uap, 318 register_t *retval) 319 { 320 /* { 321 syscallarg(clockid_t) clock_id; 322 syscallarg(int) flags; 323 syscallarg(struct timespec *) rqtp; 324 syscallarg(struct timespec *) rmtp; 325 } */ 326 struct timespec rmt, rqt; 327 int error, error1; 328 329 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 330 if (error) 331 return (error); 332 333 error = nanosleep1(l, SCARG(uap, clock_id), SCARG(uap, flags), &rqt, 334 SCARG(uap, rmtp) ? &rmt : NULL); 335 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 336 return error; 337 338 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 339 return error1 ? error1 : error; 340 } 341 342 int 343 nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt, 344 struct timespec *rmt) 345 { 346 struct timespec rmtstart; 347 int error, timo; 348 349 if ((error = clock_gettime1(clock_id, &rmtstart)) != 0) 350 return ENOTSUP; 351 352 if (flags & TIMER_ABSTIME) 353 timespecsub(rqt, &rmtstart, rqt); 354 355 if ((error = itimespecfix(rqt)) != 0) 356 return error; 357 358 timo = tstohz(rqt); 359 /* 360 * Avoid inadvertently sleeping forever 361 */ 362 if (timo == 0) 363 timo = 1; 364 again: 365 error = kpause("nanoslp", true, timo, NULL); 366 if (rmt != NULL || error == 0) { 367 struct timespec rmtend; 368 struct timespec t0; 369 struct timespec *t; 370 371 (void)clock_gettime1(clock_id, &rmtend); 372 t = (rmt != NULL) ? rmt : &t0; 373 timespecsub(&rmtend, &rmtstart, t); 374 timespecsub(rqt, t, t); 375 if (t->tv_sec < 0) 376 timespecclear(t); 377 if (error == 0) { 378 timo = tstohz(t); 379 if (timo > 0) 380 goto again; 381 } 382 } 383 384 if (error == ERESTART) 385 error = EINTR; 386 if (error == EWOULDBLOCK) 387 error = 0; 388 389 return error; 390 } 391 392 /* ARGSUSED */ 393 int 394 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 395 register_t *retval) 396 { 397 /* { 398 syscallarg(struct timeval *) tp; 399 syscallarg(void *) tzp; really "struct timezone *"; 400 } */ 401 struct timeval atv; 402 int error = 0; 403 struct timezone tzfake; 404 405 if (SCARG(uap, tp)) { 406 microtime(&atv); 407 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 408 if (error) 409 return (error); 410 } 411 if (SCARG(uap, tzp)) { 412 /* 413 * NetBSD has no kernel notion of time zone, so we just 414 * fake up a timezone struct and return it if demanded. 415 */ 416 tzfake.tz_minuteswest = 0; 417 tzfake.tz_dsttime = 0; 418 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 419 } 420 return (error); 421 } 422 423 /* ARGSUSED */ 424 int 425 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 426 register_t *retval) 427 { 428 /* { 429 syscallarg(const struct timeval *) tv; 430 syscallarg(const void *) tzp; really "const struct timezone *"; 431 } */ 432 433 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 434 } 435 436 int 437 settimeofday1(const struct timeval *utv, bool userspace, 438 const void *utzp, struct lwp *l, bool check_kauth) 439 { 440 struct timeval atv; 441 struct timespec ts; 442 int error; 443 444 /* Verify all parameters before changing time. */ 445 446 /* 447 * NetBSD has no kernel notion of time zone, and only an 448 * obsolete program would try to set it, so we log a warning. 449 */ 450 if (utzp) 451 log(LOG_WARNING, "pid %d attempted to set the " 452 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 453 454 if (utv == NULL) 455 return 0; 456 457 if (userspace) { 458 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 459 return error; 460 utv = &atv; 461 } 462 463 TIMEVAL_TO_TIMESPEC(utv, &ts); 464 return settime1(l->l_proc, &ts, check_kauth); 465 } 466 467 int time_adjusted; /* set if an adjustment is made */ 468 469 /* ARGSUSED */ 470 int 471 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 472 register_t *retval) 473 { 474 /* { 475 syscallarg(const struct timeval *) delta; 476 syscallarg(struct timeval *) olddelta; 477 } */ 478 int error = 0; 479 struct timeval atv, oldatv; 480 481 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 482 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 483 return error; 484 485 if (SCARG(uap, delta)) { 486 error = copyin(SCARG(uap, delta), &atv, 487 sizeof(*SCARG(uap, delta))); 488 if (error) 489 return (error); 490 } 491 adjtime1(SCARG(uap, delta) ? &atv : NULL, 492 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 493 if (SCARG(uap, olddelta)) 494 error = copyout(&oldatv, SCARG(uap, olddelta), 495 sizeof(*SCARG(uap, olddelta))); 496 return error; 497 } 498 499 void 500 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 501 { 502 extern int64_t time_adjtime; /* in kern_ntptime.c */ 503 504 if (olddelta) { 505 mutex_spin_enter(&timecounter_lock); 506 olddelta->tv_sec = time_adjtime / 1000000; 507 olddelta->tv_usec = time_adjtime % 1000000; 508 if (olddelta->tv_usec < 0) { 509 olddelta->tv_usec += 1000000; 510 olddelta->tv_sec--; 511 } 512 mutex_spin_exit(&timecounter_lock); 513 } 514 515 if (delta) { 516 mutex_spin_enter(&timecounter_lock); 517 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 518 519 if (time_adjtime) { 520 /* We need to save the system time during shutdown */ 521 time_adjusted |= 1; 522 } 523 mutex_spin_exit(&timecounter_lock); 524 } 525 } 526 527 /* 528 * Interval timer support. Both the BSD getitimer() family and the POSIX 529 * timer_*() family of routines are supported. 530 * 531 * All timers are kept in an array pointed to by p_timers, which is 532 * allocated on demand - many processes don't use timers at all. The 533 * first three elements in this array are reserved for the BSD timers: 534 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element 535 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be 536 * allocated by the timer_create() syscall. 537 * 538 * Realtime timers are kept in the ptimer structure as an absolute 539 * time; virtual time timers are kept as a linked list of deltas. 540 * Virtual time timers are processed in the hardclock() routine of 541 * kern_clock.c. The real time timer is processed by a callout 542 * routine, called from the softclock() routine. Since a callout may 543 * be delayed in real time due to interrupt processing in the system, 544 * it is possible for the real time timeout routine (realtimeexpire, 545 * given below), to be delayed in real time past when it is supposed 546 * to occur. It does not suffice, therefore, to reload the real timer 547 * .it_value from the real time timers .it_interval. Rather, we 548 * compute the next time in absolute time the timer should go off. */ 549 550 /* Allocate a POSIX realtime timer. */ 551 int 552 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 553 register_t *retval) 554 { 555 /* { 556 syscallarg(clockid_t) clock_id; 557 syscallarg(struct sigevent *) evp; 558 syscallarg(timer_t *) timerid; 559 } */ 560 561 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 562 SCARG(uap, evp), copyin, l); 563 } 564 565 int 566 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 567 copyin_t fetch_event, struct lwp *l) 568 { 569 int error; 570 timer_t timerid; 571 struct ptimers *pts; 572 struct ptimer *pt; 573 struct proc *p; 574 575 p = l->l_proc; 576 577 if ((u_int)id > CLOCK_MONOTONIC) 578 return (EINVAL); 579 580 if ((pts = p->p_timers) == NULL) 581 pts = timers_alloc(p); 582 583 pt = pool_get(&ptimer_pool, PR_WAITOK); 584 if (evp != NULL) { 585 if (((error = 586 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 587 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 588 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 589 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 590 (pt->pt_ev.sigev_signo <= 0 || 591 pt->pt_ev.sigev_signo >= NSIG))) { 592 pool_put(&ptimer_pool, pt); 593 return (error ? error : EINVAL); 594 } 595 } 596 597 /* Find a free timer slot, skipping those reserved for setitimer(). */ 598 mutex_spin_enter(&timer_lock); 599 for (timerid = 3; timerid < TIMER_MAX; timerid++) 600 if (pts->pts_timers[timerid] == NULL) 601 break; 602 if (timerid == TIMER_MAX) { 603 mutex_spin_exit(&timer_lock); 604 pool_put(&ptimer_pool, pt); 605 return EAGAIN; 606 } 607 if (evp == NULL) { 608 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 609 switch (id) { 610 case CLOCK_REALTIME: 611 case CLOCK_MONOTONIC: 612 pt->pt_ev.sigev_signo = SIGALRM; 613 break; 614 case CLOCK_VIRTUAL: 615 pt->pt_ev.sigev_signo = SIGVTALRM; 616 break; 617 case CLOCK_PROF: 618 pt->pt_ev.sigev_signo = SIGPROF; 619 break; 620 } 621 pt->pt_ev.sigev_value.sival_int = timerid; 622 } 623 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 624 pt->pt_info.ksi_errno = 0; 625 pt->pt_info.ksi_code = 0; 626 pt->pt_info.ksi_pid = p->p_pid; 627 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 628 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 629 pt->pt_type = id; 630 pt->pt_proc = p; 631 pt->pt_overruns = 0; 632 pt->pt_poverruns = 0; 633 pt->pt_entry = timerid; 634 pt->pt_queued = false; 635 timespecclear(&pt->pt_time.it_value); 636 if (!CLOCK_VIRTUAL_P(id)) 637 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 638 else 639 pt->pt_active = 0; 640 641 pts->pts_timers[timerid] = pt; 642 mutex_spin_exit(&timer_lock); 643 644 return copyout(&timerid, tid, sizeof(timerid)); 645 } 646 647 /* Delete a POSIX realtime timer */ 648 int 649 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 650 register_t *retval) 651 { 652 /* { 653 syscallarg(timer_t) timerid; 654 } */ 655 struct proc *p = l->l_proc; 656 timer_t timerid; 657 struct ptimers *pts; 658 struct ptimer *pt, *ptn; 659 660 timerid = SCARG(uap, timerid); 661 pts = p->p_timers; 662 663 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 664 return (EINVAL); 665 666 mutex_spin_enter(&timer_lock); 667 if ((pt = pts->pts_timers[timerid]) == NULL) { 668 mutex_spin_exit(&timer_lock); 669 return (EINVAL); 670 } 671 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 672 if (pt->pt_active) { 673 ptn = LIST_NEXT(pt, pt_list); 674 LIST_REMOVE(pt, pt_list); 675 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 676 timespecadd(&pt->pt_time.it_value, 677 &ptn->pt_time.it_value, 678 &ptn->pt_time.it_value); 679 pt->pt_active = 0; 680 } 681 } 682 itimerfree(pts, timerid); 683 684 return (0); 685 } 686 687 /* 688 * Set up the given timer. The value in pt->pt_time.it_value is taken 689 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 690 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 691 */ 692 void 693 timer_settime(struct ptimer *pt) 694 { 695 struct ptimer *ptn, *pptn; 696 struct ptlist *ptl; 697 698 KASSERT(mutex_owned(&timer_lock)); 699 700 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 701 callout_halt(&pt->pt_ch, &timer_lock); 702 if (timespecisset(&pt->pt_time.it_value)) { 703 /* 704 * Don't need to check tshzto() return value, here. 705 * callout_reset() does it for us. 706 */ 707 callout_reset(&pt->pt_ch, 708 pt->pt_type == CLOCK_MONOTONIC ? 709 tshztoup(&pt->pt_time.it_value) : 710 tshzto(&pt->pt_time.it_value), 711 realtimerexpire, pt); 712 } 713 } else { 714 if (pt->pt_active) { 715 ptn = LIST_NEXT(pt, pt_list); 716 LIST_REMOVE(pt, pt_list); 717 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 718 timespecadd(&pt->pt_time.it_value, 719 &ptn->pt_time.it_value, 720 &ptn->pt_time.it_value); 721 } 722 if (timespecisset(&pt->pt_time.it_value)) { 723 if (pt->pt_type == CLOCK_VIRTUAL) 724 ptl = &pt->pt_proc->p_timers->pts_virtual; 725 else 726 ptl = &pt->pt_proc->p_timers->pts_prof; 727 728 for (ptn = LIST_FIRST(ptl), pptn = NULL; 729 ptn && timespeccmp(&pt->pt_time.it_value, 730 &ptn->pt_time.it_value, >); 731 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 732 timespecsub(&pt->pt_time.it_value, 733 &ptn->pt_time.it_value, 734 &pt->pt_time.it_value); 735 736 if (pptn) 737 LIST_INSERT_AFTER(pptn, pt, pt_list); 738 else 739 LIST_INSERT_HEAD(ptl, pt, pt_list); 740 741 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 742 timespecsub(&ptn->pt_time.it_value, 743 &pt->pt_time.it_value, 744 &ptn->pt_time.it_value); 745 746 pt->pt_active = 1; 747 } else 748 pt->pt_active = 0; 749 } 750 } 751 752 void 753 timer_gettime(struct ptimer *pt, struct itimerspec *aits) 754 { 755 struct timespec now; 756 struct ptimer *ptn; 757 758 KASSERT(mutex_owned(&timer_lock)); 759 760 *aits = pt->pt_time; 761 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 762 /* 763 * Convert from absolute to relative time in .it_value 764 * part of real time timer. If time for real time 765 * timer has passed return 0, else return difference 766 * between current time and time for the timer to go 767 * off. 768 */ 769 if (timespecisset(&aits->it_value)) { 770 if (pt->pt_type == CLOCK_REALTIME) { 771 getnanotime(&now); 772 } else { /* CLOCK_MONOTONIC */ 773 getnanouptime(&now); 774 } 775 if (timespeccmp(&aits->it_value, &now, <)) 776 timespecclear(&aits->it_value); 777 else 778 timespecsub(&aits->it_value, &now, 779 &aits->it_value); 780 } 781 } else if (pt->pt_active) { 782 if (pt->pt_type == CLOCK_VIRTUAL) 783 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 784 else 785 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 786 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 787 timespecadd(&aits->it_value, 788 &ptn->pt_time.it_value, &aits->it_value); 789 KASSERT(ptn != NULL); /* pt should be findable on the list */ 790 } else 791 timespecclear(&aits->it_value); 792 } 793 794 795 796 /* Set and arm a POSIX realtime timer */ 797 int 798 sys___timer_settime50(struct lwp *l, 799 const struct sys___timer_settime50_args *uap, 800 register_t *retval) 801 { 802 /* { 803 syscallarg(timer_t) timerid; 804 syscallarg(int) flags; 805 syscallarg(const struct itimerspec *) value; 806 syscallarg(struct itimerspec *) ovalue; 807 } */ 808 int error; 809 struct itimerspec value, ovalue, *ovp = NULL; 810 811 if ((error = copyin(SCARG(uap, value), &value, 812 sizeof(struct itimerspec))) != 0) 813 return (error); 814 815 if (SCARG(uap, ovalue)) 816 ovp = &ovalue; 817 818 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 819 SCARG(uap, flags), l->l_proc)) != 0) 820 return error; 821 822 if (ovp) 823 return copyout(&ovalue, SCARG(uap, ovalue), 824 sizeof(struct itimerspec)); 825 return 0; 826 } 827 828 int 829 dotimer_settime(int timerid, struct itimerspec *value, 830 struct itimerspec *ovalue, int flags, struct proc *p) 831 { 832 struct timespec now; 833 struct itimerspec val, oval; 834 struct ptimers *pts; 835 struct ptimer *pt; 836 int error; 837 838 pts = p->p_timers; 839 840 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 841 return EINVAL; 842 val = *value; 843 if ((error = itimespecfix(&val.it_value)) != 0 || 844 (error = itimespecfix(&val.it_interval)) != 0) 845 return error; 846 847 mutex_spin_enter(&timer_lock); 848 if ((pt = pts->pts_timers[timerid]) == NULL) { 849 mutex_spin_exit(&timer_lock); 850 return EINVAL; 851 } 852 853 oval = pt->pt_time; 854 pt->pt_time = val; 855 856 /* 857 * If we've been passed a relative time for a realtime timer, 858 * convert it to absolute; if an absolute time for a virtual 859 * timer, convert it to relative and make sure we don't set it 860 * to zero, which would cancel the timer, or let it go 861 * negative, which would confuse the comparison tests. 862 */ 863 if (timespecisset(&pt->pt_time.it_value)) { 864 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 865 if ((flags & TIMER_ABSTIME) == 0) { 866 if (pt->pt_type == CLOCK_REALTIME) { 867 getnanotime(&now); 868 } else { /* CLOCK_MONOTONIC */ 869 getnanouptime(&now); 870 } 871 timespecadd(&pt->pt_time.it_value, &now, 872 &pt->pt_time.it_value); 873 } 874 } else { 875 if ((flags & TIMER_ABSTIME) != 0) { 876 getnanotime(&now); 877 timespecsub(&pt->pt_time.it_value, &now, 878 &pt->pt_time.it_value); 879 if (!timespecisset(&pt->pt_time.it_value) || 880 pt->pt_time.it_value.tv_sec < 0) { 881 pt->pt_time.it_value.tv_sec = 0; 882 pt->pt_time.it_value.tv_nsec = 1; 883 } 884 } 885 } 886 } 887 888 timer_settime(pt); 889 mutex_spin_exit(&timer_lock); 890 891 if (ovalue) 892 *ovalue = oval; 893 894 return (0); 895 } 896 897 /* Return the time remaining until a POSIX timer fires. */ 898 int 899 sys___timer_gettime50(struct lwp *l, 900 const struct sys___timer_gettime50_args *uap, register_t *retval) 901 { 902 /* { 903 syscallarg(timer_t) timerid; 904 syscallarg(struct itimerspec *) value; 905 } */ 906 struct itimerspec its; 907 int error; 908 909 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 910 &its)) != 0) 911 return error; 912 913 return copyout(&its, SCARG(uap, value), sizeof(its)); 914 } 915 916 int 917 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 918 { 919 struct ptimer *pt; 920 struct ptimers *pts; 921 922 pts = p->p_timers; 923 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 924 return (EINVAL); 925 mutex_spin_enter(&timer_lock); 926 if ((pt = pts->pts_timers[timerid]) == NULL) { 927 mutex_spin_exit(&timer_lock); 928 return (EINVAL); 929 } 930 timer_gettime(pt, its); 931 mutex_spin_exit(&timer_lock); 932 933 return 0; 934 } 935 936 /* 937 * Return the count of the number of times a periodic timer expired 938 * while a notification was already pending. The counter is reset when 939 * a timer expires and a notification can be posted. 940 */ 941 int 942 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 943 register_t *retval) 944 { 945 /* { 946 syscallarg(timer_t) timerid; 947 } */ 948 struct proc *p = l->l_proc; 949 struct ptimers *pts; 950 int timerid; 951 struct ptimer *pt; 952 953 timerid = SCARG(uap, timerid); 954 955 pts = p->p_timers; 956 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 957 return (EINVAL); 958 mutex_spin_enter(&timer_lock); 959 if ((pt = pts->pts_timers[timerid]) == NULL) { 960 mutex_spin_exit(&timer_lock); 961 return (EINVAL); 962 } 963 *retval = pt->pt_poverruns; 964 mutex_spin_exit(&timer_lock); 965 966 return (0); 967 } 968 969 /* 970 * Real interval timer expired: 971 * send process whose timer expired an alarm signal. 972 * If time is not set up to reload, then just return. 973 * Else compute next time timer should go off which is > current time. 974 * This is where delay in processing this timeout causes multiple 975 * SIGALRM calls to be compressed into one. 976 */ 977 void 978 realtimerexpire(void *arg) 979 { 980 uint64_t last_val, next_val, interval, now_ns; 981 struct timespec now, next; 982 struct ptimer *pt; 983 int backwards; 984 985 pt = arg; 986 987 mutex_spin_enter(&timer_lock); 988 itimerfire(pt); 989 990 if (!timespecisset(&pt->pt_time.it_interval)) { 991 timespecclear(&pt->pt_time.it_value); 992 mutex_spin_exit(&timer_lock); 993 return; 994 } 995 996 if (pt->pt_type == CLOCK_MONOTONIC) { 997 getnanouptime(&now); 998 } else { 999 getnanotime(&now); 1000 } 1001 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 1002 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 1003 /* Handle the easy case of non-overflown timers first. */ 1004 if (!backwards && timespeccmp(&next, &now, >)) { 1005 pt->pt_time.it_value = next; 1006 } else { 1007 now_ns = timespec2ns(&now); 1008 last_val = timespec2ns(&pt->pt_time.it_value); 1009 interval = timespec2ns(&pt->pt_time.it_interval); 1010 1011 next_val = now_ns + 1012 (now_ns - last_val + interval - 1) % interval; 1013 1014 if (backwards) 1015 next_val += interval; 1016 else 1017 pt->pt_overruns += (now_ns - last_val) / interval; 1018 1019 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1020 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1021 } 1022 1023 /* 1024 * Don't need to check tshzto() return value, here. 1025 * callout_reset() does it for us. 1026 */ 1027 callout_reset(&pt->pt_ch, pt->pt_type == CLOCK_MONOTONIC ? 1028 tshztoup(&pt->pt_time.it_value) : tshzto(&pt->pt_time.it_value), 1029 realtimerexpire, pt); 1030 mutex_spin_exit(&timer_lock); 1031 } 1032 1033 /* BSD routine to get the value of an interval timer. */ 1034 /* ARGSUSED */ 1035 int 1036 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1037 register_t *retval) 1038 { 1039 /* { 1040 syscallarg(int) which; 1041 syscallarg(struct itimerval *) itv; 1042 } */ 1043 struct proc *p = l->l_proc; 1044 struct itimerval aitv; 1045 int error; 1046 1047 error = dogetitimer(p, SCARG(uap, which), &aitv); 1048 if (error) 1049 return error; 1050 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1051 } 1052 1053 int 1054 dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1055 { 1056 struct ptimers *pts; 1057 struct ptimer *pt; 1058 struct itimerspec its; 1059 1060 if ((u_int)which > ITIMER_MONOTONIC) 1061 return (EINVAL); 1062 1063 mutex_spin_enter(&timer_lock); 1064 pts = p->p_timers; 1065 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1066 timerclear(&itvp->it_value); 1067 timerclear(&itvp->it_interval); 1068 } else { 1069 timer_gettime(pt, &its); 1070 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1071 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1072 } 1073 mutex_spin_exit(&timer_lock); 1074 1075 return 0; 1076 } 1077 1078 /* BSD routine to set/arm an interval timer. */ 1079 /* ARGSUSED */ 1080 int 1081 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1082 register_t *retval) 1083 { 1084 /* { 1085 syscallarg(int) which; 1086 syscallarg(const struct itimerval *) itv; 1087 syscallarg(struct itimerval *) oitv; 1088 } */ 1089 struct proc *p = l->l_proc; 1090 int which = SCARG(uap, which); 1091 struct sys___getitimer50_args getargs; 1092 const struct itimerval *itvp; 1093 struct itimerval aitv; 1094 int error; 1095 1096 if ((u_int)which > ITIMER_MONOTONIC) 1097 return (EINVAL); 1098 itvp = SCARG(uap, itv); 1099 if (itvp && 1100 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0) 1101 return (error); 1102 if (SCARG(uap, oitv) != NULL) { 1103 SCARG(&getargs, which) = which; 1104 SCARG(&getargs, itv) = SCARG(uap, oitv); 1105 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1106 return (error); 1107 } 1108 if (itvp == 0) 1109 return (0); 1110 1111 return dosetitimer(p, which, &aitv); 1112 } 1113 1114 int 1115 dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1116 { 1117 struct timespec now; 1118 struct ptimers *pts; 1119 struct ptimer *pt, *spare; 1120 1121 KASSERT((u_int)which <= CLOCK_MONOTONIC); 1122 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1123 return (EINVAL); 1124 1125 /* 1126 * Don't bother allocating data structures if the process just 1127 * wants to clear the timer. 1128 */ 1129 spare = NULL; 1130 pts = p->p_timers; 1131 retry: 1132 if (!timerisset(&itvp->it_value) && (pts == NULL || 1133 pts->pts_timers[which] == NULL)) 1134 return (0); 1135 if (pts == NULL) 1136 pts = timers_alloc(p); 1137 mutex_spin_enter(&timer_lock); 1138 pt = pts->pts_timers[which]; 1139 if (pt == NULL) { 1140 if (spare == NULL) { 1141 mutex_spin_exit(&timer_lock); 1142 spare = pool_get(&ptimer_pool, PR_WAITOK); 1143 goto retry; 1144 } 1145 pt = spare; 1146 spare = NULL; 1147 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1148 pt->pt_ev.sigev_value.sival_int = which; 1149 pt->pt_overruns = 0; 1150 pt->pt_proc = p; 1151 pt->pt_type = which; 1152 pt->pt_entry = which; 1153 pt->pt_queued = false; 1154 if (pt->pt_type == CLOCK_REALTIME) 1155 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1156 else 1157 pt->pt_active = 0; 1158 1159 switch (which) { 1160 case ITIMER_REAL: 1161 case ITIMER_MONOTONIC: 1162 pt->pt_ev.sigev_signo = SIGALRM; 1163 break; 1164 case ITIMER_VIRTUAL: 1165 pt->pt_ev.sigev_signo = SIGVTALRM; 1166 break; 1167 case ITIMER_PROF: 1168 pt->pt_ev.sigev_signo = SIGPROF; 1169 break; 1170 } 1171 pts->pts_timers[which] = pt; 1172 } 1173 1174 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1175 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1176 1177 if (timespecisset(&pt->pt_time.it_value)) { 1178 /* Convert to absolute time */ 1179 /* XXX need to wrap in splclock for timecounters case? */ 1180 switch (which) { 1181 case ITIMER_REAL: 1182 getnanotime(&now); 1183 timespecadd(&pt->pt_time.it_value, &now, 1184 &pt->pt_time.it_value); 1185 break; 1186 case ITIMER_MONOTONIC: 1187 getnanouptime(&now); 1188 timespecadd(&pt->pt_time.it_value, &now, 1189 &pt->pt_time.it_value); 1190 break; 1191 default: 1192 break; 1193 } 1194 } 1195 timer_settime(pt); 1196 mutex_spin_exit(&timer_lock); 1197 if (spare != NULL) 1198 pool_put(&ptimer_pool, spare); 1199 1200 return (0); 1201 } 1202 1203 /* Utility routines to manage the array of pointers to timers. */ 1204 struct ptimers * 1205 timers_alloc(struct proc *p) 1206 { 1207 struct ptimers *pts; 1208 int i; 1209 1210 pts = pool_get(&ptimers_pool, PR_WAITOK); 1211 LIST_INIT(&pts->pts_virtual); 1212 LIST_INIT(&pts->pts_prof); 1213 for (i = 0; i < TIMER_MAX; i++) 1214 pts->pts_timers[i] = NULL; 1215 pts->pts_fired = 0; 1216 mutex_spin_enter(&timer_lock); 1217 if (p->p_timers == NULL) { 1218 p->p_timers = pts; 1219 mutex_spin_exit(&timer_lock); 1220 return pts; 1221 } 1222 mutex_spin_exit(&timer_lock); 1223 pool_put(&ptimers_pool, pts); 1224 return p->p_timers; 1225 } 1226 1227 /* 1228 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1229 * then clean up all timers and free all the data structures. If 1230 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1231 * by timer_create(), not the BSD setitimer() timers, and only free the 1232 * structure if none of those remain. 1233 */ 1234 void 1235 timers_free(struct proc *p, int which) 1236 { 1237 struct ptimers *pts; 1238 struct ptimer *ptn; 1239 struct timespec ts; 1240 int i; 1241 1242 if (p->p_timers == NULL) 1243 return; 1244 1245 pts = p->p_timers; 1246 mutex_spin_enter(&timer_lock); 1247 if (which == TIMERS_ALL) { 1248 p->p_timers = NULL; 1249 i = 0; 1250 } else { 1251 timespecclear(&ts); 1252 for (ptn = LIST_FIRST(&pts->pts_virtual); 1253 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1254 ptn = LIST_NEXT(ptn, pt_list)) { 1255 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1256 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1257 } 1258 LIST_FIRST(&pts->pts_virtual) = NULL; 1259 if (ptn) { 1260 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1261 timespecadd(&ts, &ptn->pt_time.it_value, 1262 &ptn->pt_time.it_value); 1263 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1264 } 1265 timespecclear(&ts); 1266 for (ptn = LIST_FIRST(&pts->pts_prof); 1267 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1268 ptn = LIST_NEXT(ptn, pt_list)) { 1269 KASSERT(ptn->pt_type == CLOCK_PROF); 1270 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1271 } 1272 LIST_FIRST(&pts->pts_prof) = NULL; 1273 if (ptn) { 1274 KASSERT(ptn->pt_type == CLOCK_PROF); 1275 timespecadd(&ts, &ptn->pt_time.it_value, 1276 &ptn->pt_time.it_value); 1277 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1278 } 1279 i = 3; 1280 } 1281 for ( ; i < TIMER_MAX; i++) { 1282 if (pts->pts_timers[i] != NULL) { 1283 itimerfree(pts, i); 1284 mutex_spin_enter(&timer_lock); 1285 } 1286 } 1287 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1288 pts->pts_timers[2] == NULL) { 1289 p->p_timers = NULL; 1290 mutex_spin_exit(&timer_lock); 1291 pool_put(&ptimers_pool, pts); 1292 } else 1293 mutex_spin_exit(&timer_lock); 1294 } 1295 1296 static void 1297 itimerfree(struct ptimers *pts, int index) 1298 { 1299 struct ptimer *pt; 1300 1301 KASSERT(mutex_owned(&timer_lock)); 1302 1303 pt = pts->pts_timers[index]; 1304 pts->pts_timers[index] = NULL; 1305 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1306 callout_halt(&pt->pt_ch, &timer_lock); 1307 if (pt->pt_queued) 1308 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1309 mutex_spin_exit(&timer_lock); 1310 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1311 callout_destroy(&pt->pt_ch); 1312 pool_put(&ptimer_pool, pt); 1313 } 1314 1315 /* 1316 * Decrement an interval timer by a specified number 1317 * of nanoseconds, which must be less than a second, 1318 * i.e. < 1000000000. If the timer expires, then reload 1319 * it. In this case, carry over (nsec - old value) to 1320 * reduce the value reloaded into the timer so that 1321 * the timer does not drift. This routine assumes 1322 * that it is called in a context where the timers 1323 * on which it is operating cannot change in value. 1324 */ 1325 static int 1326 itimerdecr(struct ptimer *pt, int nsec) 1327 { 1328 struct itimerspec *itp; 1329 1330 KASSERT(mutex_owned(&timer_lock)); 1331 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1332 1333 itp = &pt->pt_time; 1334 if (itp->it_value.tv_nsec < nsec) { 1335 if (itp->it_value.tv_sec == 0) { 1336 /* expired, and already in next interval */ 1337 nsec -= itp->it_value.tv_nsec; 1338 goto expire; 1339 } 1340 itp->it_value.tv_nsec += 1000000000; 1341 itp->it_value.tv_sec--; 1342 } 1343 itp->it_value.tv_nsec -= nsec; 1344 nsec = 0; 1345 if (timespecisset(&itp->it_value)) 1346 return (1); 1347 /* expired, exactly at end of interval */ 1348 expire: 1349 if (timespecisset(&itp->it_interval)) { 1350 itp->it_value = itp->it_interval; 1351 itp->it_value.tv_nsec -= nsec; 1352 if (itp->it_value.tv_nsec < 0) { 1353 itp->it_value.tv_nsec += 1000000000; 1354 itp->it_value.tv_sec--; 1355 } 1356 timer_settime(pt); 1357 } else 1358 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1359 return (0); 1360 } 1361 1362 static void 1363 itimerfire(struct ptimer *pt) 1364 { 1365 1366 KASSERT(mutex_owned(&timer_lock)); 1367 1368 /* 1369 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1370 * XXX Relying on the clock interrupt is stupid. 1371 */ 1372 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL || pt->pt_queued) { 1373 return; 1374 } 1375 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1376 pt->pt_queued = true; 1377 softint_schedule(timer_sih); 1378 } 1379 1380 void 1381 timer_tick(lwp_t *l, bool user) 1382 { 1383 struct ptimers *pts; 1384 struct ptimer *pt; 1385 proc_t *p; 1386 1387 p = l->l_proc; 1388 if (p->p_timers == NULL) 1389 return; 1390 1391 mutex_spin_enter(&timer_lock); 1392 if ((pts = l->l_proc->p_timers) != NULL) { 1393 /* 1394 * Run current process's virtual and profile time, as needed. 1395 */ 1396 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1397 if (itimerdecr(pt, tick * 1000) == 0) 1398 itimerfire(pt); 1399 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1400 if (itimerdecr(pt, tick * 1000) == 0) 1401 itimerfire(pt); 1402 } 1403 mutex_spin_exit(&timer_lock); 1404 } 1405 1406 static void 1407 timer_intr(void *cookie) 1408 { 1409 ksiginfo_t ksi; 1410 struct ptimer *pt; 1411 proc_t *p; 1412 1413 mutex_enter(proc_lock); 1414 mutex_spin_enter(&timer_lock); 1415 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1416 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1417 KASSERT(pt->pt_queued); 1418 pt->pt_queued = false; 1419 1420 if (pt->pt_proc->p_timers == NULL) { 1421 /* Process is dying. */ 1422 continue; 1423 } 1424 p = pt->pt_proc; 1425 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) { 1426 continue; 1427 } 1428 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1429 pt->pt_overruns++; 1430 continue; 1431 } 1432 1433 KSI_INIT(&ksi); 1434 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1435 ksi.ksi_code = SI_TIMER; 1436 ksi.ksi_value = pt->pt_ev.sigev_value; 1437 pt->pt_poverruns = pt->pt_overruns; 1438 pt->pt_overruns = 0; 1439 mutex_spin_exit(&timer_lock); 1440 kpsignal(p, &ksi, NULL); 1441 mutex_spin_enter(&timer_lock); 1442 } 1443 mutex_spin_exit(&timer_lock); 1444 mutex_exit(proc_lock); 1445 } 1446 1447 /* 1448 * Check if the time will wrap if set to ts. 1449 * 1450 * ts - timespec describing the new time 1451 * delta - the delta between the current time and ts 1452 */ 1453 bool 1454 time_wraps(struct timespec *ts, struct timespec *delta) 1455 { 1456 1457 /* 1458 * Don't allow the time to be set forward so far it 1459 * will wrap and become negative, thus allowing an 1460 * attacker to bypass the next check below. The 1461 * cutoff is 1 year before rollover occurs, so even 1462 * if the attacker uses adjtime(2) to move the time 1463 * past the cutoff, it will take a very long time 1464 * to get to the wrap point. 1465 */ 1466 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1467 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1468 return true; 1469 1470 return false; 1471 } 1472