1 /* $NetBSD: kern_time.c,v 1.168 2011/04/08 10:35:37 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.168 2011/04/08 10:35:37 yamt Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/resourcevar.h> 68 #include <sys/kernel.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/vnode.h> 72 #include <sys/signalvar.h> 73 #include <sys/syslog.h> 74 #include <sys/timetc.h> 75 #include <sys/timex.h> 76 #include <sys/kauth.h> 77 #include <sys/mount.h> 78 #include <sys/sa.h> 79 #include <sys/savar.h> 80 #include <sys/syscallargs.h> 81 #include <sys/cpu.h> 82 83 #include <uvm/uvm_extern.h> 84 85 #include "opt_sa.h" 86 87 static void timer_intr(void *); 88 static void itimerfire(struct ptimer *); 89 static void itimerfree(struct ptimers *, int); 90 91 kmutex_t timer_lock; 92 93 static void *timer_sih; 94 static TAILQ_HEAD(, ptimer) timer_queue; 95 96 struct pool ptimer_pool, ptimers_pool; 97 98 #define CLOCK_VIRTUAL_P(clockid) \ 99 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 100 101 CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 102 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 103 CTASSERT(ITIMER_PROF == CLOCK_PROF); 104 105 /* 106 * Initialize timekeeping. 107 */ 108 void 109 time_init(void) 110 { 111 112 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 113 &pool_allocator_nointr, IPL_NONE); 114 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 115 &pool_allocator_nointr, IPL_NONE); 116 } 117 118 void 119 time_init2(void) 120 { 121 122 TAILQ_INIT(&timer_queue); 123 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 124 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 125 timer_intr, NULL); 126 } 127 128 /* Time of day and interval timer support. 129 * 130 * These routines provide the kernel entry points to get and set 131 * the time-of-day and per-process interval timers. Subroutines 132 * here provide support for adding and subtracting timeval structures 133 * and decrementing interval timers, optionally reloading the interval 134 * timers when they expire. 135 */ 136 137 /* This function is used by clock_settime and settimeofday */ 138 static int 139 settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 140 { 141 struct timespec delta, now; 142 int s; 143 144 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 145 s = splclock(); 146 nanotime(&now); 147 timespecsub(ts, &now, &delta); 148 149 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 150 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 151 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 152 splx(s); 153 return (EPERM); 154 } 155 156 #ifdef notyet 157 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 158 splx(s); 159 return (EPERM); 160 } 161 #endif 162 163 tc_setclock(ts); 164 165 timespecadd(&boottime, &delta, &boottime); 166 167 resettodr(); 168 splx(s); 169 170 return (0); 171 } 172 173 int 174 settime(struct proc *p, struct timespec *ts) 175 { 176 return (settime1(p, ts, true)); 177 } 178 179 /* ARGSUSED */ 180 int 181 sys___clock_gettime50(struct lwp *l, 182 const struct sys___clock_gettime50_args *uap, register_t *retval) 183 { 184 /* { 185 syscallarg(clockid_t) clock_id; 186 syscallarg(struct timespec *) tp; 187 } */ 188 int error; 189 struct timespec ats; 190 191 error = clock_gettime1(SCARG(uap, clock_id), &ats); 192 if (error != 0) 193 return error; 194 195 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 196 } 197 198 int 199 clock_gettime1(clockid_t clock_id, struct timespec *ts) 200 { 201 202 switch (clock_id) { 203 case CLOCK_REALTIME: 204 nanotime(ts); 205 break; 206 case CLOCK_MONOTONIC: 207 nanouptime(ts); 208 break; 209 default: 210 return EINVAL; 211 } 212 213 return 0; 214 } 215 216 /* ARGSUSED */ 217 int 218 sys___clock_settime50(struct lwp *l, 219 const struct sys___clock_settime50_args *uap, register_t *retval) 220 { 221 /* { 222 syscallarg(clockid_t) clock_id; 223 syscallarg(const struct timespec *) tp; 224 } */ 225 int error; 226 struct timespec ats; 227 228 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 229 return error; 230 231 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 232 } 233 234 235 int 236 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 237 bool check_kauth) 238 { 239 int error; 240 241 switch (clock_id) { 242 case CLOCK_REALTIME: 243 if ((error = settime1(p, tp, check_kauth)) != 0) 244 return (error); 245 break; 246 case CLOCK_MONOTONIC: 247 return (EINVAL); /* read-only clock */ 248 default: 249 return (EINVAL); 250 } 251 252 return 0; 253 } 254 255 int 256 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 257 register_t *retval) 258 { 259 /* { 260 syscallarg(clockid_t) clock_id; 261 syscallarg(struct timespec *) tp; 262 } */ 263 struct timespec ts; 264 int error = 0; 265 266 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 267 return error; 268 269 if (SCARG(uap, tp)) 270 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 271 272 return error; 273 } 274 275 int 276 clock_getres1(clockid_t clock_id, struct timespec *ts) 277 { 278 279 switch (clock_id) { 280 case CLOCK_REALTIME: 281 case CLOCK_MONOTONIC: 282 ts->tv_sec = 0; 283 if (tc_getfrequency() > 1000000000) 284 ts->tv_nsec = 1; 285 else 286 ts->tv_nsec = 1000000000 / tc_getfrequency(); 287 break; 288 default: 289 return EINVAL; 290 } 291 292 return 0; 293 } 294 295 /* ARGSUSED */ 296 int 297 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 298 register_t *retval) 299 { 300 /* { 301 syscallarg(struct timespec *) rqtp; 302 syscallarg(struct timespec *) rmtp; 303 } */ 304 struct timespec rmt, rqt; 305 int error, error1; 306 307 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 308 if (error) 309 return (error); 310 311 error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL); 312 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 313 return error; 314 315 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 316 return error1 ? error1 : error; 317 } 318 319 int 320 nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt) 321 { 322 struct timespec rmtstart; 323 int error, timo; 324 325 if ((error = itimespecfix(rqt)) != 0) 326 return error; 327 328 timo = tstohz(rqt); 329 /* 330 * Avoid inadvertantly sleeping forever 331 */ 332 if (timo == 0) 333 timo = 1; 334 getnanouptime(&rmtstart); 335 again: 336 error = kpause("nanoslp", true, timo, NULL); 337 if (rmt != NULL || error == 0) { 338 struct timespec rmtend; 339 struct timespec t0; 340 struct timespec *t; 341 342 getnanouptime(&rmtend); 343 t = (rmt != NULL) ? rmt : &t0; 344 timespecsub(&rmtend, &rmtstart, t); 345 timespecsub(rqt, t, t); 346 if (t->tv_sec < 0) 347 timespecclear(t); 348 if (error == 0) { 349 timo = tstohz(t); 350 if (timo > 0) 351 goto again; 352 } 353 } 354 355 if (error == ERESTART) 356 error = EINTR; 357 if (error == EWOULDBLOCK) 358 error = 0; 359 360 return error; 361 } 362 363 /* ARGSUSED */ 364 int 365 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 366 register_t *retval) 367 { 368 /* { 369 syscallarg(struct timeval *) tp; 370 syscallarg(void *) tzp; really "struct timezone *"; 371 } */ 372 struct timeval atv; 373 int error = 0; 374 struct timezone tzfake; 375 376 if (SCARG(uap, tp)) { 377 microtime(&atv); 378 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 379 if (error) 380 return (error); 381 } 382 if (SCARG(uap, tzp)) { 383 /* 384 * NetBSD has no kernel notion of time zone, so we just 385 * fake up a timezone struct and return it if demanded. 386 */ 387 tzfake.tz_minuteswest = 0; 388 tzfake.tz_dsttime = 0; 389 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 390 } 391 return (error); 392 } 393 394 /* ARGSUSED */ 395 int 396 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 397 register_t *retval) 398 { 399 /* { 400 syscallarg(const struct timeval *) tv; 401 syscallarg(const void *) tzp; really "const struct timezone *"; 402 } */ 403 404 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 405 } 406 407 int 408 settimeofday1(const struct timeval *utv, bool userspace, 409 const void *utzp, struct lwp *l, bool check_kauth) 410 { 411 struct timeval atv; 412 struct timespec ts; 413 int error; 414 415 /* Verify all parameters before changing time. */ 416 417 /* 418 * NetBSD has no kernel notion of time zone, and only an 419 * obsolete program would try to set it, so we log a warning. 420 */ 421 if (utzp) 422 log(LOG_WARNING, "pid %d attempted to set the " 423 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 424 425 if (utv == NULL) 426 return 0; 427 428 if (userspace) { 429 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 430 return error; 431 utv = &atv; 432 } 433 434 TIMEVAL_TO_TIMESPEC(utv, &ts); 435 return settime1(l->l_proc, &ts, check_kauth); 436 } 437 438 int time_adjusted; /* set if an adjustment is made */ 439 440 /* ARGSUSED */ 441 int 442 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 443 register_t *retval) 444 { 445 /* { 446 syscallarg(const struct timeval *) delta; 447 syscallarg(struct timeval *) olddelta; 448 } */ 449 int error = 0; 450 struct timeval atv, oldatv; 451 452 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 453 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 454 return error; 455 456 if (SCARG(uap, delta)) { 457 error = copyin(SCARG(uap, delta), &atv, 458 sizeof(*SCARG(uap, delta))); 459 if (error) 460 return (error); 461 } 462 adjtime1(SCARG(uap, delta) ? &atv : NULL, 463 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 464 if (SCARG(uap, olddelta)) 465 error = copyout(&oldatv, SCARG(uap, olddelta), 466 sizeof(*SCARG(uap, olddelta))); 467 return error; 468 } 469 470 void 471 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 472 { 473 extern int64_t time_adjtime; /* in kern_ntptime.c */ 474 475 if (olddelta) { 476 mutex_spin_enter(&timecounter_lock); 477 olddelta->tv_sec = time_adjtime / 1000000; 478 olddelta->tv_usec = time_adjtime % 1000000; 479 if (olddelta->tv_usec < 0) { 480 olddelta->tv_usec += 1000000; 481 olddelta->tv_sec--; 482 } 483 mutex_spin_exit(&timecounter_lock); 484 } 485 486 if (delta) { 487 mutex_spin_enter(&timecounter_lock); 488 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 489 490 if (time_adjtime) { 491 /* We need to save the system time during shutdown */ 492 time_adjusted |= 1; 493 } 494 mutex_spin_exit(&timecounter_lock); 495 } 496 } 497 498 /* 499 * Interval timer support. Both the BSD getitimer() family and the POSIX 500 * timer_*() family of routines are supported. 501 * 502 * All timers are kept in an array pointed to by p_timers, which is 503 * allocated on demand - many processes don't use timers at all. The 504 * first three elements in this array are reserved for the BSD timers: 505 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element 506 * 2 is ITIMER_PROF. The rest may be allocated by the timer_create() 507 * syscall. 508 * 509 * Realtime timers are kept in the ptimer structure as an absolute 510 * time; virtual time timers are kept as a linked list of deltas. 511 * Virtual time timers are processed in the hardclock() routine of 512 * kern_clock.c. The real time timer is processed by a callout 513 * routine, called from the softclock() routine. Since a callout may 514 * be delayed in real time due to interrupt processing in the system, 515 * it is possible for the real time timeout routine (realtimeexpire, 516 * given below), to be delayed in real time past when it is supposed 517 * to occur. It does not suffice, therefore, to reload the real timer 518 * .it_value from the real time timers .it_interval. Rather, we 519 * compute the next time in absolute time the timer should go off. */ 520 521 /* Allocate a POSIX realtime timer. */ 522 int 523 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 524 register_t *retval) 525 { 526 /* { 527 syscallarg(clockid_t) clock_id; 528 syscallarg(struct sigevent *) evp; 529 syscallarg(timer_t *) timerid; 530 } */ 531 532 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 533 SCARG(uap, evp), copyin, l); 534 } 535 536 int 537 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 538 copyin_t fetch_event, struct lwp *l) 539 { 540 int error; 541 timer_t timerid; 542 struct ptimers *pts; 543 struct ptimer *pt; 544 struct proc *p; 545 546 p = l->l_proc; 547 548 if (id != CLOCK_REALTIME && id != CLOCK_VIRTUAL && 549 id != CLOCK_PROF && id != CLOCK_MONOTONIC) 550 return (EINVAL); 551 552 if ((pts = p->p_timers) == NULL) 553 pts = timers_alloc(p); 554 555 pt = pool_get(&ptimer_pool, PR_WAITOK); 556 if (evp != NULL) { 557 if (((error = 558 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 559 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 560 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 561 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 562 (pt->pt_ev.sigev_signo <= 0 || 563 pt->pt_ev.sigev_signo >= NSIG))) { 564 pool_put(&ptimer_pool, pt); 565 return (error ? error : EINVAL); 566 } 567 } 568 569 /* Find a free timer slot, skipping those reserved for setitimer(). */ 570 mutex_spin_enter(&timer_lock); 571 for (timerid = 3; timerid < TIMER_MAX; timerid++) 572 if (pts->pts_timers[timerid] == NULL) 573 break; 574 if (timerid == TIMER_MAX) { 575 mutex_spin_exit(&timer_lock); 576 pool_put(&ptimer_pool, pt); 577 return EAGAIN; 578 } 579 if (evp == NULL) { 580 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 581 switch (id) { 582 case CLOCK_REALTIME: 583 case CLOCK_MONOTONIC: 584 pt->pt_ev.sigev_signo = SIGALRM; 585 break; 586 case CLOCK_VIRTUAL: 587 pt->pt_ev.sigev_signo = SIGVTALRM; 588 break; 589 case CLOCK_PROF: 590 pt->pt_ev.sigev_signo = SIGPROF; 591 break; 592 } 593 pt->pt_ev.sigev_value.sival_int = timerid; 594 } 595 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 596 pt->pt_info.ksi_errno = 0; 597 pt->pt_info.ksi_code = 0; 598 pt->pt_info.ksi_pid = p->p_pid; 599 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 600 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 601 pt->pt_type = id; 602 pt->pt_proc = p; 603 pt->pt_overruns = 0; 604 pt->pt_poverruns = 0; 605 pt->pt_entry = timerid; 606 pt->pt_queued = false; 607 timespecclear(&pt->pt_time.it_value); 608 if (!CLOCK_VIRTUAL_P(id)) 609 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 610 else 611 pt->pt_active = 0; 612 613 pts->pts_timers[timerid] = pt; 614 mutex_spin_exit(&timer_lock); 615 616 return copyout(&timerid, tid, sizeof(timerid)); 617 } 618 619 /* Delete a POSIX realtime timer */ 620 int 621 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 622 register_t *retval) 623 { 624 /* { 625 syscallarg(timer_t) timerid; 626 } */ 627 struct proc *p = l->l_proc; 628 timer_t timerid; 629 struct ptimers *pts; 630 struct ptimer *pt, *ptn; 631 632 timerid = SCARG(uap, timerid); 633 pts = p->p_timers; 634 635 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 636 return (EINVAL); 637 638 mutex_spin_enter(&timer_lock); 639 if ((pt = pts->pts_timers[timerid]) == NULL) { 640 mutex_spin_exit(&timer_lock); 641 return (EINVAL); 642 } 643 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 644 if (pt->pt_active) { 645 ptn = LIST_NEXT(pt, pt_list); 646 LIST_REMOVE(pt, pt_list); 647 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 648 timespecadd(&pt->pt_time.it_value, 649 &ptn->pt_time.it_value, 650 &ptn->pt_time.it_value); 651 pt->pt_active = 0; 652 } 653 } 654 itimerfree(pts, timerid); 655 656 return (0); 657 } 658 659 /* 660 * Set up the given timer. The value in pt->pt_time.it_value is taken 661 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 662 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 663 */ 664 void 665 timer_settime(struct ptimer *pt) 666 { 667 struct ptimer *ptn, *pptn; 668 struct ptlist *ptl; 669 670 KASSERT(mutex_owned(&timer_lock)); 671 672 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 673 callout_halt(&pt->pt_ch, &timer_lock); 674 if (timespecisset(&pt->pt_time.it_value)) { 675 /* 676 * Don't need to check tshzto() return value, here. 677 * callout_reset() does it for us. 678 */ 679 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 680 realtimerexpire, pt); 681 } 682 } else { 683 if (pt->pt_active) { 684 ptn = LIST_NEXT(pt, pt_list); 685 LIST_REMOVE(pt, pt_list); 686 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 687 timespecadd(&pt->pt_time.it_value, 688 &ptn->pt_time.it_value, 689 &ptn->pt_time.it_value); 690 } 691 if (timespecisset(&pt->pt_time.it_value)) { 692 if (pt->pt_type == CLOCK_VIRTUAL) 693 ptl = &pt->pt_proc->p_timers->pts_virtual; 694 else 695 ptl = &pt->pt_proc->p_timers->pts_prof; 696 697 for (ptn = LIST_FIRST(ptl), pptn = NULL; 698 ptn && timespeccmp(&pt->pt_time.it_value, 699 &ptn->pt_time.it_value, >); 700 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 701 timespecsub(&pt->pt_time.it_value, 702 &ptn->pt_time.it_value, 703 &pt->pt_time.it_value); 704 705 if (pptn) 706 LIST_INSERT_AFTER(pptn, pt, pt_list); 707 else 708 LIST_INSERT_HEAD(ptl, pt, pt_list); 709 710 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 711 timespecsub(&ptn->pt_time.it_value, 712 &pt->pt_time.it_value, 713 &ptn->pt_time.it_value); 714 715 pt->pt_active = 1; 716 } else 717 pt->pt_active = 0; 718 } 719 } 720 721 void 722 timer_gettime(struct ptimer *pt, struct itimerspec *aits) 723 { 724 struct timespec now; 725 struct ptimer *ptn; 726 727 KASSERT(mutex_owned(&timer_lock)); 728 729 *aits = pt->pt_time; 730 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 731 /* 732 * Convert from absolute to relative time in .it_value 733 * part of real time timer. If time for real time 734 * timer has passed return 0, else return difference 735 * between current time and time for the timer to go 736 * off. 737 */ 738 if (timespecisset(&aits->it_value)) { 739 if (pt->pt_type == CLOCK_REALTIME) { 740 getnanotime(&now); 741 } else { /* CLOCK_MONOTONIC */ 742 getnanouptime(&now); 743 } 744 if (timespeccmp(&aits->it_value, &now, <)) 745 timespecclear(&aits->it_value); 746 else 747 timespecsub(&aits->it_value, &now, 748 &aits->it_value); 749 } 750 } else if (pt->pt_active) { 751 if (pt->pt_type == CLOCK_VIRTUAL) 752 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 753 else 754 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 755 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 756 timespecadd(&aits->it_value, 757 &ptn->pt_time.it_value, &aits->it_value); 758 KASSERT(ptn != NULL); /* pt should be findable on the list */ 759 } else 760 timespecclear(&aits->it_value); 761 } 762 763 764 765 /* Set and arm a POSIX realtime timer */ 766 int 767 sys___timer_settime50(struct lwp *l, 768 const struct sys___timer_settime50_args *uap, 769 register_t *retval) 770 { 771 /* { 772 syscallarg(timer_t) timerid; 773 syscallarg(int) flags; 774 syscallarg(const struct itimerspec *) value; 775 syscallarg(struct itimerspec *) ovalue; 776 } */ 777 int error; 778 struct itimerspec value, ovalue, *ovp = NULL; 779 780 if ((error = copyin(SCARG(uap, value), &value, 781 sizeof(struct itimerspec))) != 0) 782 return (error); 783 784 if (SCARG(uap, ovalue)) 785 ovp = &ovalue; 786 787 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 788 SCARG(uap, flags), l->l_proc)) != 0) 789 return error; 790 791 if (ovp) 792 return copyout(&ovalue, SCARG(uap, ovalue), 793 sizeof(struct itimerspec)); 794 return 0; 795 } 796 797 int 798 dotimer_settime(int timerid, struct itimerspec *value, 799 struct itimerspec *ovalue, int flags, struct proc *p) 800 { 801 struct timespec now; 802 struct itimerspec val, oval; 803 struct ptimers *pts; 804 struct ptimer *pt; 805 int error; 806 807 pts = p->p_timers; 808 809 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 810 return EINVAL; 811 val = *value; 812 if ((error = itimespecfix(&val.it_value)) != 0 || 813 (error = itimespecfix(&val.it_interval)) != 0) 814 return error; 815 816 mutex_spin_enter(&timer_lock); 817 if ((pt = pts->pts_timers[timerid]) == NULL) { 818 mutex_spin_exit(&timer_lock); 819 return EINVAL; 820 } 821 822 oval = pt->pt_time; 823 pt->pt_time = val; 824 825 /* 826 * If we've been passed a relative time for a realtime timer, 827 * convert it to absolute; if an absolute time for a virtual 828 * timer, convert it to relative and make sure we don't set it 829 * to zero, which would cancel the timer, or let it go 830 * negative, which would confuse the comparison tests. 831 */ 832 if (timespecisset(&pt->pt_time.it_value)) { 833 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 834 if ((flags & TIMER_ABSTIME) == 0) { 835 if (pt->pt_type == CLOCK_REALTIME) { 836 getnanotime(&now); 837 } else { /* CLOCK_MONOTONIC */ 838 getnanouptime(&now); 839 } 840 timespecadd(&pt->pt_time.it_value, &now, 841 &pt->pt_time.it_value); 842 } 843 } else { 844 if ((flags & TIMER_ABSTIME) != 0) { 845 getnanotime(&now); 846 timespecsub(&pt->pt_time.it_value, &now, 847 &pt->pt_time.it_value); 848 if (!timespecisset(&pt->pt_time.it_value) || 849 pt->pt_time.it_value.tv_sec < 0) { 850 pt->pt_time.it_value.tv_sec = 0; 851 pt->pt_time.it_value.tv_nsec = 1; 852 } 853 } 854 } 855 } 856 857 timer_settime(pt); 858 mutex_spin_exit(&timer_lock); 859 860 if (ovalue) 861 *ovalue = oval; 862 863 return (0); 864 } 865 866 /* Return the time remaining until a POSIX timer fires. */ 867 int 868 sys___timer_gettime50(struct lwp *l, 869 const struct sys___timer_gettime50_args *uap, register_t *retval) 870 { 871 /* { 872 syscallarg(timer_t) timerid; 873 syscallarg(struct itimerspec *) value; 874 } */ 875 struct itimerspec its; 876 int error; 877 878 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 879 &its)) != 0) 880 return error; 881 882 return copyout(&its, SCARG(uap, value), sizeof(its)); 883 } 884 885 int 886 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 887 { 888 struct ptimer *pt; 889 struct ptimers *pts; 890 891 pts = p->p_timers; 892 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 893 return (EINVAL); 894 mutex_spin_enter(&timer_lock); 895 if ((pt = pts->pts_timers[timerid]) == NULL) { 896 mutex_spin_exit(&timer_lock); 897 return (EINVAL); 898 } 899 timer_gettime(pt, its); 900 mutex_spin_exit(&timer_lock); 901 902 return 0; 903 } 904 905 /* 906 * Return the count of the number of times a periodic timer expired 907 * while a notification was already pending. The counter is reset when 908 * a timer expires and a notification can be posted. 909 */ 910 int 911 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 912 register_t *retval) 913 { 914 /* { 915 syscallarg(timer_t) timerid; 916 } */ 917 struct proc *p = l->l_proc; 918 struct ptimers *pts; 919 int timerid; 920 struct ptimer *pt; 921 922 timerid = SCARG(uap, timerid); 923 924 pts = p->p_timers; 925 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 926 return (EINVAL); 927 mutex_spin_enter(&timer_lock); 928 if ((pt = pts->pts_timers[timerid]) == NULL) { 929 mutex_spin_exit(&timer_lock); 930 return (EINVAL); 931 } 932 *retval = pt->pt_poverruns; 933 mutex_spin_exit(&timer_lock); 934 935 return (0); 936 } 937 938 #ifdef KERN_SA 939 /* Glue function that triggers an upcall; called from userret(). */ 940 void 941 timerupcall(struct lwp *l) 942 { 943 struct ptimers *pt = l->l_proc->p_timers; 944 struct proc *p = l->l_proc; 945 unsigned int i, fired, done; 946 947 KDASSERT(l->l_proc->p_sa); 948 /* Bail out if we do not own the virtual processor */ 949 if (l->l_savp->savp_lwp != l) 950 return ; 951 952 mutex_enter(p->p_lock); 953 954 fired = pt->pts_fired; 955 done = 0; 956 while ((i = ffs(fired)) != 0) { 957 siginfo_t *si; 958 int mask = 1 << --i; 959 int f; 960 961 f = ~l->l_pflag & LP_SA_NOBLOCK; 962 l->l_pflag |= LP_SA_NOBLOCK; 963 si = siginfo_alloc(PR_WAITOK); 964 si->_info = pt->pts_timers[i]->pt_info.ksi_info; 965 if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l, 966 sizeof(*si), si, siginfo_free) != 0) { 967 siginfo_free(si); 968 /* XXX What do we do here?? */ 969 } else 970 done |= mask; 971 fired &= ~mask; 972 l->l_pflag ^= f; 973 } 974 pt->pts_fired &= ~done; 975 if (pt->pts_fired == 0) 976 l->l_proc->p_timerpend = 0; 977 978 mutex_exit(p->p_lock); 979 } 980 #endif /* KERN_SA */ 981 982 /* 983 * Real interval timer expired: 984 * send process whose timer expired an alarm signal. 985 * If time is not set up to reload, then just return. 986 * Else compute next time timer should go off which is > current time. 987 * This is where delay in processing this timeout causes multiple 988 * SIGALRM calls to be compressed into one. 989 */ 990 void 991 realtimerexpire(void *arg) 992 { 993 uint64_t last_val, next_val, interval, now_ns; 994 struct timespec now, next; 995 struct ptimer *pt; 996 int backwards; 997 998 pt = arg; 999 1000 mutex_spin_enter(&timer_lock); 1001 itimerfire(pt); 1002 1003 if (!timespecisset(&pt->pt_time.it_interval)) { 1004 timespecclear(&pt->pt_time.it_value); 1005 mutex_spin_exit(&timer_lock); 1006 return; 1007 } 1008 1009 getnanotime(&now); 1010 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 1011 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 1012 /* Handle the easy case of non-overflown timers first. */ 1013 if (!backwards && timespeccmp(&next, &now, >)) { 1014 pt->pt_time.it_value = next; 1015 } else { 1016 now_ns = timespec2ns(&now); 1017 last_val = timespec2ns(&pt->pt_time.it_value); 1018 interval = timespec2ns(&pt->pt_time.it_interval); 1019 1020 next_val = now_ns + 1021 (now_ns - last_val + interval - 1) % interval; 1022 1023 if (backwards) 1024 next_val += interval; 1025 else 1026 pt->pt_overruns += (now_ns - last_val) / interval; 1027 1028 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1029 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1030 } 1031 1032 /* 1033 * Don't need to check tshzto() return value, here. 1034 * callout_reset() does it for us. 1035 */ 1036 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 1037 realtimerexpire, pt); 1038 mutex_spin_exit(&timer_lock); 1039 } 1040 1041 /* BSD routine to get the value of an interval timer. */ 1042 /* ARGSUSED */ 1043 int 1044 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1045 register_t *retval) 1046 { 1047 /* { 1048 syscallarg(int) which; 1049 syscallarg(struct itimerval *) itv; 1050 } */ 1051 struct proc *p = l->l_proc; 1052 struct itimerval aitv; 1053 int error; 1054 1055 error = dogetitimer(p, SCARG(uap, which), &aitv); 1056 if (error) 1057 return error; 1058 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1059 } 1060 1061 int 1062 dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1063 { 1064 struct ptimers *pts; 1065 struct ptimer *pt; 1066 struct itimerspec its; 1067 1068 if ((u_int)which > ITIMER_PROF) 1069 return (EINVAL); 1070 1071 mutex_spin_enter(&timer_lock); 1072 pts = p->p_timers; 1073 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1074 timerclear(&itvp->it_value); 1075 timerclear(&itvp->it_interval); 1076 } else { 1077 timer_gettime(pt, &its); 1078 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1079 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1080 } 1081 mutex_spin_exit(&timer_lock); 1082 1083 return 0; 1084 } 1085 1086 /* BSD routine to set/arm an interval timer. */ 1087 /* ARGSUSED */ 1088 int 1089 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1090 register_t *retval) 1091 { 1092 /* { 1093 syscallarg(int) which; 1094 syscallarg(const struct itimerval *) itv; 1095 syscallarg(struct itimerval *) oitv; 1096 } */ 1097 struct proc *p = l->l_proc; 1098 int which = SCARG(uap, which); 1099 struct sys___getitimer50_args getargs; 1100 const struct itimerval *itvp; 1101 struct itimerval aitv; 1102 int error; 1103 1104 if ((u_int)which > ITIMER_PROF) 1105 return (EINVAL); 1106 itvp = SCARG(uap, itv); 1107 if (itvp && 1108 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0)) 1109 return (error); 1110 if (SCARG(uap, oitv) != NULL) { 1111 SCARG(&getargs, which) = which; 1112 SCARG(&getargs, itv) = SCARG(uap, oitv); 1113 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1114 return (error); 1115 } 1116 if (itvp == 0) 1117 return (0); 1118 1119 return dosetitimer(p, which, &aitv); 1120 } 1121 1122 int 1123 dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1124 { 1125 struct timespec now; 1126 struct ptimers *pts; 1127 struct ptimer *pt, *spare; 1128 1129 KASSERT(which == CLOCK_REALTIME || which == CLOCK_VIRTUAL || 1130 which == CLOCK_PROF); 1131 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1132 return (EINVAL); 1133 1134 /* 1135 * Don't bother allocating data structures if the process just 1136 * wants to clear the timer. 1137 */ 1138 spare = NULL; 1139 pts = p->p_timers; 1140 retry: 1141 if (!timerisset(&itvp->it_value) && (pts == NULL || 1142 pts->pts_timers[which] == NULL)) 1143 return (0); 1144 if (pts == NULL) 1145 pts = timers_alloc(p); 1146 mutex_spin_enter(&timer_lock); 1147 pt = pts->pts_timers[which]; 1148 if (pt == NULL) { 1149 if (spare == NULL) { 1150 mutex_spin_exit(&timer_lock); 1151 spare = pool_get(&ptimer_pool, PR_WAITOK); 1152 goto retry; 1153 } 1154 pt = spare; 1155 spare = NULL; 1156 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1157 pt->pt_ev.sigev_value.sival_int = which; 1158 pt->pt_overruns = 0; 1159 pt->pt_proc = p; 1160 pt->pt_type = which; 1161 pt->pt_entry = which; 1162 pt->pt_queued = false; 1163 if (pt->pt_type == CLOCK_REALTIME) 1164 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1165 else 1166 pt->pt_active = 0; 1167 1168 switch (which) { 1169 case ITIMER_REAL: 1170 pt->pt_ev.sigev_signo = SIGALRM; 1171 break; 1172 case ITIMER_VIRTUAL: 1173 pt->pt_ev.sigev_signo = SIGVTALRM; 1174 break; 1175 case ITIMER_PROF: 1176 pt->pt_ev.sigev_signo = SIGPROF; 1177 break; 1178 } 1179 pts->pts_timers[which] = pt; 1180 } 1181 1182 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1183 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1184 1185 if ((which == ITIMER_REAL) && timespecisset(&pt->pt_time.it_value)) { 1186 /* Convert to absolute time */ 1187 /* XXX need to wrap in splclock for timecounters case? */ 1188 getnanotime(&now); 1189 timespecadd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value); 1190 } 1191 timer_settime(pt); 1192 mutex_spin_exit(&timer_lock); 1193 if (spare != NULL) 1194 pool_put(&ptimer_pool, spare); 1195 1196 return (0); 1197 } 1198 1199 /* Utility routines to manage the array of pointers to timers. */ 1200 struct ptimers * 1201 timers_alloc(struct proc *p) 1202 { 1203 struct ptimers *pts; 1204 int i; 1205 1206 pts = pool_get(&ptimers_pool, PR_WAITOK); 1207 LIST_INIT(&pts->pts_virtual); 1208 LIST_INIT(&pts->pts_prof); 1209 for (i = 0; i < TIMER_MAX; i++) 1210 pts->pts_timers[i] = NULL; 1211 pts->pts_fired = 0; 1212 mutex_spin_enter(&timer_lock); 1213 if (p->p_timers == NULL) { 1214 p->p_timers = pts; 1215 mutex_spin_exit(&timer_lock); 1216 return pts; 1217 } 1218 mutex_spin_exit(&timer_lock); 1219 pool_put(&ptimers_pool, pts); 1220 return p->p_timers; 1221 } 1222 1223 /* 1224 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1225 * then clean up all timers and free all the data structures. If 1226 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1227 * by timer_create(), not the BSD setitimer() timers, and only free the 1228 * structure if none of those remain. 1229 */ 1230 void 1231 timers_free(struct proc *p, int which) 1232 { 1233 struct ptimers *pts; 1234 struct ptimer *ptn; 1235 struct timespec ts; 1236 int i; 1237 1238 if (p->p_timers == NULL) 1239 return; 1240 1241 pts = p->p_timers; 1242 mutex_spin_enter(&timer_lock); 1243 if (which == TIMERS_ALL) { 1244 p->p_timers = NULL; 1245 i = 0; 1246 } else { 1247 timespecclear(&ts); 1248 for (ptn = LIST_FIRST(&pts->pts_virtual); 1249 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1250 ptn = LIST_NEXT(ptn, pt_list)) { 1251 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1252 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1253 } 1254 LIST_FIRST(&pts->pts_virtual) = NULL; 1255 if (ptn) { 1256 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1257 timespecadd(&ts, &ptn->pt_time.it_value, 1258 &ptn->pt_time.it_value); 1259 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1260 } 1261 timespecclear(&ts); 1262 for (ptn = LIST_FIRST(&pts->pts_prof); 1263 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1264 ptn = LIST_NEXT(ptn, pt_list)) { 1265 KASSERT(ptn->pt_type == CLOCK_PROF); 1266 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1267 } 1268 LIST_FIRST(&pts->pts_prof) = NULL; 1269 if (ptn) { 1270 KASSERT(ptn->pt_type == CLOCK_PROF); 1271 timespecadd(&ts, &ptn->pt_time.it_value, 1272 &ptn->pt_time.it_value); 1273 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1274 } 1275 i = 3; 1276 } 1277 for ( ; i < TIMER_MAX; i++) { 1278 if (pts->pts_timers[i] != NULL) { 1279 itimerfree(pts, i); 1280 mutex_spin_enter(&timer_lock); 1281 } 1282 } 1283 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1284 pts->pts_timers[2] == NULL) { 1285 p->p_timers = NULL; 1286 mutex_spin_exit(&timer_lock); 1287 pool_put(&ptimers_pool, pts); 1288 } else 1289 mutex_spin_exit(&timer_lock); 1290 } 1291 1292 static void 1293 itimerfree(struct ptimers *pts, int index) 1294 { 1295 struct ptimer *pt; 1296 1297 KASSERT(mutex_owned(&timer_lock)); 1298 1299 pt = pts->pts_timers[index]; 1300 pts->pts_timers[index] = NULL; 1301 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1302 callout_halt(&pt->pt_ch, &timer_lock); 1303 if (pt->pt_queued) 1304 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1305 mutex_spin_exit(&timer_lock); 1306 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1307 callout_destroy(&pt->pt_ch); 1308 pool_put(&ptimer_pool, pt); 1309 } 1310 1311 /* 1312 * Decrement an interval timer by a specified number 1313 * of nanoseconds, which must be less than a second, 1314 * i.e. < 1000000000. If the timer expires, then reload 1315 * it. In this case, carry over (nsec - old value) to 1316 * reduce the value reloaded into the timer so that 1317 * the timer does not drift. This routine assumes 1318 * that it is called in a context where the timers 1319 * on which it is operating cannot change in value. 1320 */ 1321 static int 1322 itimerdecr(struct ptimer *pt, int nsec) 1323 { 1324 struct itimerspec *itp; 1325 1326 KASSERT(mutex_owned(&timer_lock)); 1327 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1328 1329 itp = &pt->pt_time; 1330 if (itp->it_value.tv_nsec < nsec) { 1331 if (itp->it_value.tv_sec == 0) { 1332 /* expired, and already in next interval */ 1333 nsec -= itp->it_value.tv_nsec; 1334 goto expire; 1335 } 1336 itp->it_value.tv_nsec += 1000000000; 1337 itp->it_value.tv_sec--; 1338 } 1339 itp->it_value.tv_nsec -= nsec; 1340 nsec = 0; 1341 if (timespecisset(&itp->it_value)) 1342 return (1); 1343 /* expired, exactly at end of interval */ 1344 expire: 1345 if (timespecisset(&itp->it_interval)) { 1346 itp->it_value = itp->it_interval; 1347 itp->it_value.tv_nsec -= nsec; 1348 if (itp->it_value.tv_nsec < 0) { 1349 itp->it_value.tv_nsec += 1000000000; 1350 itp->it_value.tv_sec--; 1351 } 1352 timer_settime(pt); 1353 } else 1354 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1355 return (0); 1356 } 1357 1358 static void 1359 itimerfire(struct ptimer *pt) 1360 { 1361 1362 KASSERT(mutex_owned(&timer_lock)); 1363 1364 /* 1365 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1366 * XXX Relying on the clock interrupt is stupid. 1367 */ 1368 if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) || 1369 (pt->pt_ev.sigev_notify != SIGEV_SIGNAL && 1370 pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued) 1371 return; 1372 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1373 pt->pt_queued = true; 1374 softint_schedule(timer_sih); 1375 } 1376 1377 void 1378 timer_tick(lwp_t *l, bool user) 1379 { 1380 struct ptimers *pts; 1381 struct ptimer *pt; 1382 proc_t *p; 1383 1384 p = l->l_proc; 1385 if (p->p_timers == NULL) 1386 return; 1387 1388 mutex_spin_enter(&timer_lock); 1389 if ((pts = l->l_proc->p_timers) != NULL) { 1390 /* 1391 * Run current process's virtual and profile time, as needed. 1392 */ 1393 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1394 if (itimerdecr(pt, tick * 1000) == 0) 1395 itimerfire(pt); 1396 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1397 if (itimerdecr(pt, tick * 1000) == 0) 1398 itimerfire(pt); 1399 } 1400 mutex_spin_exit(&timer_lock); 1401 } 1402 1403 #ifdef KERN_SA 1404 /* 1405 * timer_sa_intr: 1406 * 1407 * SIGEV_SA handling for timer_intr(). We are called (and return) 1408 * with the timer lock held. We know that the process had SA enabled 1409 * when this timer was enqueued. As timer_intr() is a soft interrupt 1410 * handler, SA should still be enabled by the time we get here. 1411 */ 1412 static void 1413 timer_sa_intr(struct ptimer *pt, proc_t *p) 1414 { 1415 unsigned int i; 1416 struct sadata *sa; 1417 struct sadata_vp *vp; 1418 1419 /* Cause the process to generate an upcall when it returns. */ 1420 if (!p->p_timerpend) { 1421 /* 1422 * XXX stop signals can be processed inside tsleep, 1423 * which can be inside sa_yield's inner loop, which 1424 * makes testing for sa_idle alone insuffucent to 1425 * determine if we really should call setrunnable. 1426 */ 1427 pt->pt_poverruns = pt->pt_overruns; 1428 pt->pt_overruns = 0; 1429 i = 1 << pt->pt_entry; 1430 p->p_timers->pts_fired = i; 1431 p->p_timerpend = 1; 1432 1433 sa = p->p_sa; 1434 mutex_enter(&sa->sa_mutex); 1435 SLIST_FOREACH(vp, &sa->sa_vps, savp_next) { 1436 struct lwp *vp_lwp = vp->savp_lwp; 1437 lwp_lock(vp_lwp); 1438 lwp_need_userret(vp_lwp); 1439 if (vp_lwp->l_flag & LW_SA_IDLE) { 1440 vp_lwp->l_flag &= ~LW_SA_IDLE; 1441 lwp_unsleep(vp_lwp, true); 1442 break; 1443 } 1444 lwp_unlock(vp_lwp); 1445 } 1446 mutex_exit(&sa->sa_mutex); 1447 } else { 1448 i = 1 << pt->pt_entry; 1449 if ((p->p_timers->pts_fired & i) == 0) { 1450 pt->pt_poverruns = pt->pt_overruns; 1451 pt->pt_overruns = 0; 1452 p->p_timers->pts_fired |= i; 1453 } else 1454 pt->pt_overruns++; 1455 } 1456 } 1457 #endif /* KERN_SA */ 1458 1459 static void 1460 timer_intr(void *cookie) 1461 { 1462 ksiginfo_t ksi; 1463 struct ptimer *pt; 1464 proc_t *p; 1465 1466 mutex_enter(proc_lock); 1467 mutex_spin_enter(&timer_lock); 1468 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1469 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1470 KASSERT(pt->pt_queued); 1471 pt->pt_queued = false; 1472 1473 if (pt->pt_proc->p_timers == NULL) { 1474 /* Process is dying. */ 1475 continue; 1476 } 1477 p = pt->pt_proc; 1478 #ifdef KERN_SA 1479 if (pt->pt_ev.sigev_notify == SIGEV_SA) { 1480 timer_sa_intr(pt, p); 1481 continue; 1482 } 1483 #endif /* KERN_SA */ 1484 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) 1485 continue; 1486 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1487 pt->pt_overruns++; 1488 continue; 1489 } 1490 1491 KSI_INIT(&ksi); 1492 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1493 ksi.ksi_code = SI_TIMER; 1494 ksi.ksi_value = pt->pt_ev.sigev_value; 1495 pt->pt_poverruns = pt->pt_overruns; 1496 pt->pt_overruns = 0; 1497 mutex_spin_exit(&timer_lock); 1498 kpsignal(p, &ksi, NULL); 1499 mutex_spin_enter(&timer_lock); 1500 } 1501 mutex_spin_exit(&timer_lock); 1502 mutex_exit(proc_lock); 1503 } 1504 1505 /* 1506 * Check if the time will wrap if set to ts. 1507 * 1508 * ts - timespec describing the new time 1509 * delta - the delta between the current time and ts 1510 */ 1511 bool 1512 time_wraps(struct timespec *ts, struct timespec *delta) 1513 { 1514 1515 /* 1516 * Don't allow the time to be set forward so far it 1517 * will wrap and become negative, thus allowing an 1518 * attacker to bypass the next check below. The 1519 * cutoff is 1 year before rollover occurs, so even 1520 * if the attacker uses adjtime(2) to move the time 1521 * past the cutoff, it will take a very long time 1522 * to get to the wrap point. 1523 */ 1524 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1525 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1526 return true; 1527 1528 return false; 1529 } 1530