1 /* $NetBSD: kern_time.c,v 1.165 2010/04/08 11:51:13 njoly Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.165 2010/04/08 11:51:13 njoly Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/resourcevar.h> 68 #include <sys/kernel.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/vnode.h> 72 #include <sys/signalvar.h> 73 #include <sys/syslog.h> 74 #include <sys/timetc.h> 75 #include <sys/timex.h> 76 #include <sys/kauth.h> 77 #include <sys/mount.h> 78 #include <sys/sa.h> 79 #include <sys/savar.h> 80 #include <sys/syscallargs.h> 81 #include <sys/cpu.h> 82 83 #include <uvm/uvm_extern.h> 84 85 #include "opt_sa.h" 86 87 static void timer_intr(void *); 88 static void itimerfire(struct ptimer *); 89 static void itimerfree(struct ptimers *, int); 90 91 kmutex_t timer_lock; 92 93 static void *timer_sih; 94 static TAILQ_HEAD(, ptimer) timer_queue; 95 96 struct pool ptimer_pool, ptimers_pool; 97 98 /* 99 * Initialize timekeeping. 100 */ 101 void 102 time_init(void) 103 { 104 105 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 106 &pool_allocator_nointr, IPL_NONE); 107 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 108 &pool_allocator_nointr, IPL_NONE); 109 } 110 111 void 112 time_init2(void) 113 { 114 115 TAILQ_INIT(&timer_queue); 116 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 117 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 118 timer_intr, NULL); 119 } 120 121 /* Time of day and interval timer support. 122 * 123 * These routines provide the kernel entry points to get and set 124 * the time-of-day and per-process interval timers. Subroutines 125 * here provide support for adding and subtracting timeval structures 126 * and decrementing interval timers, optionally reloading the interval 127 * timers when they expire. 128 */ 129 130 /* This function is used by clock_settime and settimeofday */ 131 static int 132 settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 133 { 134 struct timespec delta, now; 135 int s; 136 137 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 138 s = splclock(); 139 nanotime(&now); 140 timespecsub(ts, &now, &delta); 141 142 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 143 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 144 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 145 splx(s); 146 return (EPERM); 147 } 148 149 #ifdef notyet 150 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 151 splx(s); 152 return (EPERM); 153 } 154 #endif 155 156 tc_setclock(ts); 157 158 timespecadd(&boottime, &delta, &boottime); 159 160 resettodr(); 161 splx(s); 162 163 return (0); 164 } 165 166 int 167 settime(struct proc *p, struct timespec *ts) 168 { 169 return (settime1(p, ts, true)); 170 } 171 172 /* ARGSUSED */ 173 int 174 sys___clock_gettime50(struct lwp *l, 175 const struct sys___clock_gettime50_args *uap, register_t *retval) 176 { 177 /* { 178 syscallarg(clockid_t) clock_id; 179 syscallarg(struct timespec *) tp; 180 } */ 181 int error; 182 struct timespec ats; 183 184 error = clock_gettime1(SCARG(uap, clock_id), &ats); 185 if (error != 0) 186 return error; 187 188 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 189 } 190 191 int 192 clock_gettime1(clockid_t clock_id, struct timespec *ts) 193 { 194 195 switch (clock_id) { 196 case CLOCK_REALTIME: 197 nanotime(ts); 198 break; 199 case CLOCK_MONOTONIC: 200 nanouptime(ts); 201 break; 202 default: 203 return EINVAL; 204 } 205 206 return 0; 207 } 208 209 /* ARGSUSED */ 210 int 211 sys___clock_settime50(struct lwp *l, 212 const struct sys___clock_settime50_args *uap, register_t *retval) 213 { 214 /* { 215 syscallarg(clockid_t) clock_id; 216 syscallarg(const struct timespec *) tp; 217 } */ 218 int error; 219 struct timespec ats; 220 221 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 222 return error; 223 224 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 225 } 226 227 228 int 229 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 230 bool check_kauth) 231 { 232 int error; 233 234 switch (clock_id) { 235 case CLOCK_REALTIME: 236 if ((error = settime1(p, tp, check_kauth)) != 0) 237 return (error); 238 break; 239 case CLOCK_MONOTONIC: 240 return (EINVAL); /* read-only clock */ 241 default: 242 return (EINVAL); 243 } 244 245 return 0; 246 } 247 248 int 249 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 250 register_t *retval) 251 { 252 /* { 253 syscallarg(clockid_t) clock_id; 254 syscallarg(struct timespec *) tp; 255 } */ 256 struct timespec ts; 257 int error = 0; 258 259 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 260 return error; 261 262 if (SCARG(uap, tp)) 263 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 264 265 return error; 266 } 267 268 int 269 clock_getres1(clockid_t clock_id, struct timespec *ts) 270 { 271 272 switch (clock_id) { 273 case CLOCK_REALTIME: 274 case CLOCK_MONOTONIC: 275 ts->tv_sec = 0; 276 if (tc_getfrequency() > 1000000000) 277 ts->tv_nsec = 1; 278 else 279 ts->tv_nsec = 1000000000 / tc_getfrequency(); 280 break; 281 default: 282 return EINVAL; 283 } 284 285 return 0; 286 } 287 288 /* ARGSUSED */ 289 int 290 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 291 register_t *retval) 292 { 293 /* { 294 syscallarg(struct timespec *) rqtp; 295 syscallarg(struct timespec *) rmtp; 296 } */ 297 struct timespec rmt, rqt; 298 int error, error1; 299 300 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 301 if (error) 302 return (error); 303 304 error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL); 305 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 306 return error; 307 308 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 309 return error1 ? error1 : error; 310 } 311 312 int 313 nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt) 314 { 315 struct timespec rmtstart; 316 int error, timo; 317 318 if ((error = itimespecfix(rqt)) != 0) 319 return error; 320 321 timo = tstohz(rqt); 322 /* 323 * Avoid inadvertantly sleeping forever 324 */ 325 if (timo == 0) 326 timo = 1; 327 getnanouptime(&rmtstart); 328 again: 329 error = kpause("nanoslp", true, timo, NULL); 330 if (rmt != NULL || error == 0) { 331 struct timespec rmtend; 332 struct timespec t0; 333 struct timespec *t; 334 335 getnanouptime(&rmtend); 336 t = (rmt != NULL) ? rmt : &t0; 337 timespecsub(&rmtend, &rmtstart, t); 338 timespecsub(rqt, t, t); 339 if (t->tv_sec < 0) 340 timespecclear(t); 341 if (error == 0) { 342 timo = tstohz(t); 343 if (timo > 0) 344 goto again; 345 } 346 } 347 348 if (error == ERESTART) 349 error = EINTR; 350 if (error == EWOULDBLOCK) 351 error = 0; 352 353 return error; 354 } 355 356 /* ARGSUSED */ 357 int 358 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 359 register_t *retval) 360 { 361 /* { 362 syscallarg(struct timeval *) tp; 363 syscallarg(void *) tzp; really "struct timezone *"; 364 } */ 365 struct timeval atv; 366 int error = 0; 367 struct timezone tzfake; 368 369 if (SCARG(uap, tp)) { 370 microtime(&atv); 371 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 372 if (error) 373 return (error); 374 } 375 if (SCARG(uap, tzp)) { 376 /* 377 * NetBSD has no kernel notion of time zone, so we just 378 * fake up a timezone struct and return it if demanded. 379 */ 380 tzfake.tz_minuteswest = 0; 381 tzfake.tz_dsttime = 0; 382 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 383 } 384 return (error); 385 } 386 387 /* ARGSUSED */ 388 int 389 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 390 register_t *retval) 391 { 392 /* { 393 syscallarg(const struct timeval *) tv; 394 syscallarg(const void *) tzp; really "const struct timezone *"; 395 } */ 396 397 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 398 } 399 400 int 401 settimeofday1(const struct timeval *utv, bool userspace, 402 const void *utzp, struct lwp *l, bool check_kauth) 403 { 404 struct timeval atv; 405 struct timespec ts; 406 int error; 407 408 /* Verify all parameters before changing time. */ 409 410 /* 411 * NetBSD has no kernel notion of time zone, and only an 412 * obsolete program would try to set it, so we log a warning. 413 */ 414 if (utzp) 415 log(LOG_WARNING, "pid %d attempted to set the " 416 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 417 418 if (utv == NULL) 419 return 0; 420 421 if (userspace) { 422 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 423 return error; 424 utv = &atv; 425 } 426 427 TIMEVAL_TO_TIMESPEC(utv, &ts); 428 return settime1(l->l_proc, &ts, check_kauth); 429 } 430 431 int time_adjusted; /* set if an adjustment is made */ 432 433 /* ARGSUSED */ 434 int 435 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 436 register_t *retval) 437 { 438 /* { 439 syscallarg(const struct timeval *) delta; 440 syscallarg(struct timeval *) olddelta; 441 } */ 442 int error = 0; 443 struct timeval atv, oldatv; 444 445 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 446 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 447 return error; 448 449 if (SCARG(uap, delta)) { 450 error = copyin(SCARG(uap, delta), &atv, 451 sizeof(*SCARG(uap, delta))); 452 if (error) 453 return (error); 454 } 455 adjtime1(SCARG(uap, delta) ? &atv : NULL, 456 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 457 if (SCARG(uap, olddelta)) 458 error = copyout(&oldatv, SCARG(uap, olddelta), 459 sizeof(*SCARG(uap, olddelta))); 460 return error; 461 } 462 463 void 464 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 465 { 466 extern int64_t time_adjtime; /* in kern_ntptime.c */ 467 468 if (olddelta) { 469 mutex_spin_enter(&timecounter_lock); 470 olddelta->tv_sec = time_adjtime / 1000000; 471 olddelta->tv_usec = time_adjtime % 1000000; 472 if (olddelta->tv_usec < 0) { 473 olddelta->tv_usec += 1000000; 474 olddelta->tv_sec--; 475 } 476 mutex_spin_exit(&timecounter_lock); 477 } 478 479 if (delta) { 480 mutex_spin_enter(&timecounter_lock); 481 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 482 483 if (time_adjtime) { 484 /* We need to save the system time during shutdown */ 485 time_adjusted |= 1; 486 } 487 mutex_spin_exit(&timecounter_lock); 488 } 489 } 490 491 /* 492 * Interval timer support. Both the BSD getitimer() family and the POSIX 493 * timer_*() family of routines are supported. 494 * 495 * All timers are kept in an array pointed to by p_timers, which is 496 * allocated on demand - many processes don't use timers at all. The 497 * first three elements in this array are reserved for the BSD timers: 498 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element 499 * 2 is ITIMER_PROF. The rest may be allocated by the timer_create() 500 * syscall. 501 * 502 * Realtime timers are kept in the ptimer structure as an absolute 503 * time; virtual time timers are kept as a linked list of deltas. 504 * Virtual time timers are processed in the hardclock() routine of 505 * kern_clock.c. The real time timer is processed by a callout 506 * routine, called from the softclock() routine. Since a callout may 507 * be delayed in real time due to interrupt processing in the system, 508 * it is possible for the real time timeout routine (realtimeexpire, 509 * given below), to be delayed in real time past when it is supposed 510 * to occur. It does not suffice, therefore, to reload the real timer 511 * .it_value from the real time timers .it_interval. Rather, we 512 * compute the next time in absolute time the timer should go off. */ 513 514 /* Allocate a POSIX realtime timer. */ 515 int 516 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 517 register_t *retval) 518 { 519 /* { 520 syscallarg(clockid_t) clock_id; 521 syscallarg(struct sigevent *) evp; 522 syscallarg(timer_t *) timerid; 523 } */ 524 525 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 526 SCARG(uap, evp), copyin, l); 527 } 528 529 int 530 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 531 copyin_t fetch_event, struct lwp *l) 532 { 533 int error; 534 timer_t timerid; 535 struct ptimers *pts; 536 struct ptimer *pt; 537 struct proc *p; 538 539 p = l->l_proc; 540 541 if (id < CLOCK_REALTIME || id > CLOCK_PROF) 542 return (EINVAL); 543 544 if ((pts = p->p_timers) == NULL) 545 pts = timers_alloc(p); 546 547 pt = pool_get(&ptimer_pool, PR_WAITOK); 548 if (evp != NULL) { 549 if (((error = 550 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 551 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 552 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 553 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 554 (pt->pt_ev.sigev_signo <= 0 || 555 pt->pt_ev.sigev_signo >= NSIG))) { 556 pool_put(&ptimer_pool, pt); 557 return (error ? error : EINVAL); 558 } 559 } 560 561 /* Find a free timer slot, skipping those reserved for setitimer(). */ 562 mutex_spin_enter(&timer_lock); 563 for (timerid = 3; timerid < TIMER_MAX; timerid++) 564 if (pts->pts_timers[timerid] == NULL) 565 break; 566 if (timerid == TIMER_MAX) { 567 mutex_spin_exit(&timer_lock); 568 pool_put(&ptimer_pool, pt); 569 return EAGAIN; 570 } 571 if (evp == NULL) { 572 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 573 switch (id) { 574 case CLOCK_REALTIME: 575 pt->pt_ev.sigev_signo = SIGALRM; 576 break; 577 case CLOCK_VIRTUAL: 578 pt->pt_ev.sigev_signo = SIGVTALRM; 579 break; 580 case CLOCK_PROF: 581 pt->pt_ev.sigev_signo = SIGPROF; 582 break; 583 } 584 pt->pt_ev.sigev_value.sival_int = timerid; 585 } 586 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 587 pt->pt_info.ksi_errno = 0; 588 pt->pt_info.ksi_code = 0; 589 pt->pt_info.ksi_pid = p->p_pid; 590 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 591 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 592 pt->pt_type = id; 593 pt->pt_proc = p; 594 pt->pt_overruns = 0; 595 pt->pt_poverruns = 0; 596 pt->pt_entry = timerid; 597 pt->pt_queued = false; 598 timespecclear(&pt->pt_time.it_value); 599 if (id == CLOCK_REALTIME) 600 callout_init(&pt->pt_ch, 0); 601 else 602 pt->pt_active = 0; 603 604 pts->pts_timers[timerid] = pt; 605 mutex_spin_exit(&timer_lock); 606 607 return copyout(&timerid, tid, sizeof(timerid)); 608 } 609 610 /* Delete a POSIX realtime timer */ 611 int 612 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 613 register_t *retval) 614 { 615 /* { 616 syscallarg(timer_t) timerid; 617 } */ 618 struct proc *p = l->l_proc; 619 timer_t timerid; 620 struct ptimers *pts; 621 struct ptimer *pt, *ptn; 622 623 timerid = SCARG(uap, timerid); 624 pts = p->p_timers; 625 626 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 627 return (EINVAL); 628 629 mutex_spin_enter(&timer_lock); 630 if ((pt = pts->pts_timers[timerid]) == NULL) { 631 mutex_spin_exit(&timer_lock); 632 return (EINVAL); 633 } 634 if (pt->pt_type != CLOCK_REALTIME) { 635 if (pt->pt_active) { 636 ptn = LIST_NEXT(pt, pt_list); 637 LIST_REMOVE(pt, pt_list); 638 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 639 timespecadd(&pt->pt_time.it_value, 640 &ptn->pt_time.it_value, 641 &ptn->pt_time.it_value); 642 pt->pt_active = 0; 643 } 644 } 645 itimerfree(pts, timerid); 646 647 return (0); 648 } 649 650 /* 651 * Set up the given timer. The value in pt->pt_time.it_value is taken 652 * to be an absolute time for CLOCK_REALTIME timers and a relative 653 * time for virtual timers. 654 * Must be called at splclock(). 655 */ 656 void 657 timer_settime(struct ptimer *pt) 658 { 659 struct ptimer *ptn, *pptn; 660 struct ptlist *ptl; 661 662 KASSERT(mutex_owned(&timer_lock)); 663 664 if (pt->pt_type == CLOCK_REALTIME) { 665 callout_stop(&pt->pt_ch); 666 if (timespecisset(&pt->pt_time.it_value)) { 667 /* 668 * Don't need to check tshzto() return value, here. 669 * callout_reset() does it for us. 670 */ 671 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 672 realtimerexpire, pt); 673 } 674 } else { 675 if (pt->pt_active) { 676 ptn = LIST_NEXT(pt, pt_list); 677 LIST_REMOVE(pt, pt_list); 678 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 679 timespecadd(&pt->pt_time.it_value, 680 &ptn->pt_time.it_value, 681 &ptn->pt_time.it_value); 682 } 683 if (timespecisset(&pt->pt_time.it_value)) { 684 if (pt->pt_type == CLOCK_VIRTUAL) 685 ptl = &pt->pt_proc->p_timers->pts_virtual; 686 else 687 ptl = &pt->pt_proc->p_timers->pts_prof; 688 689 for (ptn = LIST_FIRST(ptl), pptn = NULL; 690 ptn && timespeccmp(&pt->pt_time.it_value, 691 &ptn->pt_time.it_value, >); 692 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 693 timespecsub(&pt->pt_time.it_value, 694 &ptn->pt_time.it_value, 695 &pt->pt_time.it_value); 696 697 if (pptn) 698 LIST_INSERT_AFTER(pptn, pt, pt_list); 699 else 700 LIST_INSERT_HEAD(ptl, pt, pt_list); 701 702 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 703 timespecsub(&ptn->pt_time.it_value, 704 &pt->pt_time.it_value, 705 &ptn->pt_time.it_value); 706 707 pt->pt_active = 1; 708 } else 709 pt->pt_active = 0; 710 } 711 } 712 713 void 714 timer_gettime(struct ptimer *pt, struct itimerspec *aits) 715 { 716 struct timespec now; 717 struct ptimer *ptn; 718 719 KASSERT(mutex_owned(&timer_lock)); 720 721 *aits = pt->pt_time; 722 if (pt->pt_type == CLOCK_REALTIME) { 723 /* 724 * Convert from absolute to relative time in .it_value 725 * part of real time timer. If time for real time 726 * timer has passed return 0, else return difference 727 * between current time and time for the timer to go 728 * off. 729 */ 730 if (timespecisset(&aits->it_value)) { 731 getnanotime(&now); 732 if (timespeccmp(&aits->it_value, &now, <)) 733 timespecclear(&aits->it_value); 734 else 735 timespecsub(&aits->it_value, &now, 736 &aits->it_value); 737 } 738 } else if (pt->pt_active) { 739 if (pt->pt_type == CLOCK_VIRTUAL) 740 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 741 else 742 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 743 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 744 timespecadd(&aits->it_value, 745 &ptn->pt_time.it_value, &aits->it_value); 746 KASSERT(ptn != NULL); /* pt should be findable on the list */ 747 } else 748 timespecclear(&aits->it_value); 749 } 750 751 752 753 /* Set and arm a POSIX realtime timer */ 754 int 755 sys___timer_settime50(struct lwp *l, 756 const struct sys___timer_settime50_args *uap, 757 register_t *retval) 758 { 759 /* { 760 syscallarg(timer_t) timerid; 761 syscallarg(int) flags; 762 syscallarg(const struct itimerspec *) value; 763 syscallarg(struct itimerspec *) ovalue; 764 } */ 765 int error; 766 struct itimerspec value, ovalue, *ovp = NULL; 767 768 if ((error = copyin(SCARG(uap, value), &value, 769 sizeof(struct itimerspec))) != 0) 770 return (error); 771 772 if (SCARG(uap, ovalue)) 773 ovp = &ovalue; 774 775 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 776 SCARG(uap, flags), l->l_proc)) != 0) 777 return error; 778 779 if (ovp) 780 return copyout(&ovalue, SCARG(uap, ovalue), 781 sizeof(struct itimerspec)); 782 return 0; 783 } 784 785 int 786 dotimer_settime(int timerid, struct itimerspec *value, 787 struct itimerspec *ovalue, int flags, struct proc *p) 788 { 789 struct timespec now; 790 struct itimerspec val, oval; 791 struct ptimers *pts; 792 struct ptimer *pt; 793 int error; 794 795 pts = p->p_timers; 796 797 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 798 return EINVAL; 799 val = *value; 800 if ((error = itimespecfix(&val.it_value)) != 0 || 801 (error = itimespecfix(&val.it_interval)) != 0) 802 return error; 803 804 mutex_spin_enter(&timer_lock); 805 if ((pt = pts->pts_timers[timerid]) == NULL) { 806 mutex_spin_exit(&timer_lock); 807 return EINVAL; 808 } 809 810 oval = pt->pt_time; 811 pt->pt_time = val; 812 813 /* 814 * If we've been passed a relative time for a realtime timer, 815 * convert it to absolute; if an absolute time for a virtual 816 * timer, convert it to relative and make sure we don't set it 817 * to zero, which would cancel the timer, or let it go 818 * negative, which would confuse the comparison tests. 819 */ 820 if (timespecisset(&pt->pt_time.it_value)) { 821 if (pt->pt_type == CLOCK_REALTIME) { 822 if ((flags & TIMER_ABSTIME) == 0) { 823 getnanotime(&now); 824 timespecadd(&pt->pt_time.it_value, &now, 825 &pt->pt_time.it_value); 826 } 827 } else { 828 if ((flags & TIMER_ABSTIME) != 0) { 829 getnanotime(&now); 830 timespecsub(&pt->pt_time.it_value, &now, 831 &pt->pt_time.it_value); 832 if (!timespecisset(&pt->pt_time.it_value) || 833 pt->pt_time.it_value.tv_sec < 0) { 834 pt->pt_time.it_value.tv_sec = 0; 835 pt->pt_time.it_value.tv_nsec = 1; 836 } 837 } 838 } 839 } 840 841 timer_settime(pt); 842 mutex_spin_exit(&timer_lock); 843 844 if (ovalue) 845 *ovalue = oval; 846 847 return (0); 848 } 849 850 /* Return the time remaining until a POSIX timer fires. */ 851 int 852 sys___timer_gettime50(struct lwp *l, 853 const struct sys___timer_gettime50_args *uap, register_t *retval) 854 { 855 /* { 856 syscallarg(timer_t) timerid; 857 syscallarg(struct itimerspec *) value; 858 } */ 859 struct itimerspec its; 860 int error; 861 862 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 863 &its)) != 0) 864 return error; 865 866 return copyout(&its, SCARG(uap, value), sizeof(its)); 867 } 868 869 int 870 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 871 { 872 struct ptimer *pt; 873 struct ptimers *pts; 874 875 pts = p->p_timers; 876 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 877 return (EINVAL); 878 mutex_spin_enter(&timer_lock); 879 if ((pt = pts->pts_timers[timerid]) == NULL) { 880 mutex_spin_exit(&timer_lock); 881 return (EINVAL); 882 } 883 timer_gettime(pt, its); 884 mutex_spin_exit(&timer_lock); 885 886 return 0; 887 } 888 889 /* 890 * Return the count of the number of times a periodic timer expired 891 * while a notification was already pending. The counter is reset when 892 * a timer expires and a notification can be posted. 893 */ 894 int 895 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 896 register_t *retval) 897 { 898 /* { 899 syscallarg(timer_t) timerid; 900 } */ 901 struct proc *p = l->l_proc; 902 struct ptimers *pts; 903 int timerid; 904 struct ptimer *pt; 905 906 timerid = SCARG(uap, timerid); 907 908 pts = p->p_timers; 909 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 910 return (EINVAL); 911 mutex_spin_enter(&timer_lock); 912 if ((pt = pts->pts_timers[timerid]) == NULL) { 913 mutex_spin_exit(&timer_lock); 914 return (EINVAL); 915 } 916 *retval = pt->pt_poverruns; 917 mutex_spin_exit(&timer_lock); 918 919 return (0); 920 } 921 922 #ifdef KERN_SA 923 /* Glue function that triggers an upcall; called from userret(). */ 924 void 925 timerupcall(struct lwp *l) 926 { 927 struct ptimers *pt = l->l_proc->p_timers; 928 struct proc *p = l->l_proc; 929 unsigned int i, fired, done; 930 931 KDASSERT(l->l_proc->p_sa); 932 /* Bail out if we do not own the virtual processor */ 933 if (l->l_savp->savp_lwp != l) 934 return ; 935 936 mutex_enter(p->p_lock); 937 938 fired = pt->pts_fired; 939 done = 0; 940 while ((i = ffs(fired)) != 0) { 941 siginfo_t *si; 942 int mask = 1 << --i; 943 int f; 944 945 f = ~l->l_pflag & LP_SA_NOBLOCK; 946 l->l_pflag |= LP_SA_NOBLOCK; 947 si = siginfo_alloc(PR_WAITOK); 948 si->_info = pt->pts_timers[i]->pt_info.ksi_info; 949 if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l, 950 sizeof(*si), si, siginfo_free) != 0) { 951 siginfo_free(si); 952 /* XXX What do we do here?? */ 953 } else 954 done |= mask; 955 fired &= ~mask; 956 l->l_pflag ^= f; 957 } 958 pt->pts_fired &= ~done; 959 if (pt->pts_fired == 0) 960 l->l_proc->p_timerpend = 0; 961 962 mutex_exit(p->p_lock); 963 } 964 #endif /* KERN_SA */ 965 966 /* 967 * Real interval timer expired: 968 * send process whose timer expired an alarm signal. 969 * If time is not set up to reload, then just return. 970 * Else compute next time timer should go off which is > current time. 971 * This is where delay in processing this timeout causes multiple 972 * SIGALRM calls to be compressed into one. 973 */ 974 void 975 realtimerexpire(void *arg) 976 { 977 uint64_t last_val, next_val, interval, now_ms; 978 struct timespec now, next; 979 struct ptimer *pt; 980 int backwards; 981 982 pt = arg; 983 984 mutex_spin_enter(&timer_lock); 985 itimerfire(pt); 986 987 if (!timespecisset(&pt->pt_time.it_interval)) { 988 timespecclear(&pt->pt_time.it_value); 989 mutex_spin_exit(&timer_lock); 990 return; 991 } 992 993 getnanotime(&now); 994 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 995 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 996 /* Handle the easy case of non-overflown timers first. */ 997 if (!backwards && timespeccmp(&next, &now, >)) { 998 pt->pt_time.it_value = next; 999 } else { 1000 now_ms = timespec2ns(&now); 1001 last_val = timespec2ns(&pt->pt_time.it_value); 1002 interval = timespec2ns(&pt->pt_time.it_interval); 1003 1004 next_val = now_ms + 1005 (now_ms - last_val + interval - 1) % interval; 1006 1007 if (backwards) 1008 next_val += interval; 1009 else 1010 pt->pt_overruns += (now_ms - last_val) / interval; 1011 1012 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1013 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1014 } 1015 1016 /* 1017 * Don't need to check tshzto() return value, here. 1018 * callout_reset() does it for us. 1019 */ 1020 callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value), 1021 realtimerexpire, pt); 1022 mutex_spin_exit(&timer_lock); 1023 } 1024 1025 /* BSD routine to get the value of an interval timer. */ 1026 /* ARGSUSED */ 1027 int 1028 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1029 register_t *retval) 1030 { 1031 /* { 1032 syscallarg(int) which; 1033 syscallarg(struct itimerval *) itv; 1034 } */ 1035 struct proc *p = l->l_proc; 1036 struct itimerval aitv; 1037 int error; 1038 1039 error = dogetitimer(p, SCARG(uap, which), &aitv); 1040 if (error) 1041 return error; 1042 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1043 } 1044 1045 int 1046 dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1047 { 1048 struct ptimers *pts; 1049 struct ptimer *pt; 1050 struct itimerspec its; 1051 1052 if ((u_int)which > ITIMER_PROF) 1053 return (EINVAL); 1054 1055 mutex_spin_enter(&timer_lock); 1056 pts = p->p_timers; 1057 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1058 timerclear(&itvp->it_value); 1059 timerclear(&itvp->it_interval); 1060 } else { 1061 timer_gettime(pt, &its); 1062 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1063 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1064 } 1065 mutex_spin_exit(&timer_lock); 1066 1067 return 0; 1068 } 1069 1070 /* BSD routine to set/arm an interval timer. */ 1071 /* ARGSUSED */ 1072 int 1073 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1074 register_t *retval) 1075 { 1076 /* { 1077 syscallarg(int) which; 1078 syscallarg(const struct itimerval *) itv; 1079 syscallarg(struct itimerval *) oitv; 1080 } */ 1081 struct proc *p = l->l_proc; 1082 int which = SCARG(uap, which); 1083 struct sys___getitimer50_args getargs; 1084 const struct itimerval *itvp; 1085 struct itimerval aitv; 1086 int error; 1087 1088 if ((u_int)which > ITIMER_PROF) 1089 return (EINVAL); 1090 itvp = SCARG(uap, itv); 1091 if (itvp && 1092 (error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0)) 1093 return (error); 1094 if (SCARG(uap, oitv) != NULL) { 1095 SCARG(&getargs, which) = which; 1096 SCARG(&getargs, itv) = SCARG(uap, oitv); 1097 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1098 return (error); 1099 } 1100 if (itvp == 0) 1101 return (0); 1102 1103 return dosetitimer(p, which, &aitv); 1104 } 1105 1106 int 1107 dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1108 { 1109 struct timespec now; 1110 struct ptimers *pts; 1111 struct ptimer *pt, *spare; 1112 1113 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1114 return (EINVAL); 1115 1116 /* 1117 * Don't bother allocating data structures if the process just 1118 * wants to clear the timer. 1119 */ 1120 spare = NULL; 1121 pts = p->p_timers; 1122 retry: 1123 if (!timerisset(&itvp->it_value) && (pts == NULL || 1124 pts->pts_timers[which] == NULL)) 1125 return (0); 1126 if (pts == NULL) 1127 pts = timers_alloc(p); 1128 mutex_spin_enter(&timer_lock); 1129 pt = pts->pts_timers[which]; 1130 if (pt == NULL) { 1131 if (spare == NULL) { 1132 mutex_spin_exit(&timer_lock); 1133 spare = pool_get(&ptimer_pool, PR_WAITOK); 1134 goto retry; 1135 } 1136 pt = spare; 1137 spare = NULL; 1138 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1139 pt->pt_ev.sigev_value.sival_int = which; 1140 pt->pt_overruns = 0; 1141 pt->pt_proc = p; 1142 pt->pt_type = which; 1143 pt->pt_entry = which; 1144 pt->pt_queued = false; 1145 if (pt->pt_type == CLOCK_REALTIME) 1146 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1147 else 1148 pt->pt_active = 0; 1149 1150 switch (which) { 1151 case ITIMER_REAL: 1152 pt->pt_ev.sigev_signo = SIGALRM; 1153 break; 1154 case ITIMER_VIRTUAL: 1155 pt->pt_ev.sigev_signo = SIGVTALRM; 1156 break; 1157 case ITIMER_PROF: 1158 pt->pt_ev.sigev_signo = SIGPROF; 1159 break; 1160 } 1161 pts->pts_timers[which] = pt; 1162 } 1163 1164 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1165 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1166 1167 if ((which == ITIMER_REAL) && timespecisset(&pt->pt_time.it_value)) { 1168 /* Convert to absolute time */ 1169 /* XXX need to wrap in splclock for timecounters case? */ 1170 getnanotime(&now); 1171 timespecadd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value); 1172 } 1173 timer_settime(pt); 1174 mutex_spin_exit(&timer_lock); 1175 if (spare != NULL) 1176 pool_put(&ptimer_pool, spare); 1177 1178 return (0); 1179 } 1180 1181 /* Utility routines to manage the array of pointers to timers. */ 1182 struct ptimers * 1183 timers_alloc(struct proc *p) 1184 { 1185 struct ptimers *pts; 1186 int i; 1187 1188 pts = pool_get(&ptimers_pool, PR_WAITOK); 1189 LIST_INIT(&pts->pts_virtual); 1190 LIST_INIT(&pts->pts_prof); 1191 for (i = 0; i < TIMER_MAX; i++) 1192 pts->pts_timers[i] = NULL; 1193 pts->pts_fired = 0; 1194 mutex_spin_enter(&timer_lock); 1195 if (p->p_timers == NULL) { 1196 p->p_timers = pts; 1197 mutex_spin_exit(&timer_lock); 1198 return pts; 1199 } 1200 mutex_spin_exit(&timer_lock); 1201 pool_put(&ptimers_pool, pts); 1202 return p->p_timers; 1203 } 1204 1205 /* 1206 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1207 * then clean up all timers and free all the data structures. If 1208 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1209 * by timer_create(), not the BSD setitimer() timers, and only free the 1210 * structure if none of those remain. 1211 */ 1212 void 1213 timers_free(struct proc *p, int which) 1214 { 1215 struct ptimers *pts; 1216 struct ptimer *ptn; 1217 struct timespec ts; 1218 int i; 1219 1220 if (p->p_timers == NULL) 1221 return; 1222 1223 pts = p->p_timers; 1224 mutex_spin_enter(&timer_lock); 1225 if (which == TIMERS_ALL) { 1226 p->p_timers = NULL; 1227 i = 0; 1228 } else { 1229 timespecclear(&ts); 1230 for (ptn = LIST_FIRST(&pts->pts_virtual); 1231 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1232 ptn = LIST_NEXT(ptn, pt_list)) { 1233 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1234 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1235 } 1236 LIST_FIRST(&pts->pts_virtual) = NULL; 1237 if (ptn) { 1238 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1239 timespecadd(&ts, &ptn->pt_time.it_value, 1240 &ptn->pt_time.it_value); 1241 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1242 } 1243 timespecclear(&ts); 1244 for (ptn = LIST_FIRST(&pts->pts_prof); 1245 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1246 ptn = LIST_NEXT(ptn, pt_list)) { 1247 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1248 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1249 } 1250 LIST_FIRST(&pts->pts_prof) = NULL; 1251 if (ptn) { 1252 KASSERT(ptn->pt_type != CLOCK_REALTIME); 1253 timespecadd(&ts, &ptn->pt_time.it_value, 1254 &ptn->pt_time.it_value); 1255 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1256 } 1257 i = 3; 1258 } 1259 for ( ; i < TIMER_MAX; i++) { 1260 if (pts->pts_timers[i] != NULL) { 1261 itimerfree(pts, i); 1262 mutex_spin_enter(&timer_lock); 1263 } 1264 } 1265 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1266 pts->pts_timers[2] == NULL) { 1267 p->p_timers = NULL; 1268 mutex_spin_exit(&timer_lock); 1269 pool_put(&ptimers_pool, pts); 1270 } else 1271 mutex_spin_exit(&timer_lock); 1272 } 1273 1274 static void 1275 itimerfree(struct ptimers *pts, int index) 1276 { 1277 struct ptimer *pt; 1278 1279 KASSERT(mutex_owned(&timer_lock)); 1280 1281 pt = pts->pts_timers[index]; 1282 pts->pts_timers[index] = NULL; 1283 if (pt->pt_type == CLOCK_REALTIME) 1284 callout_halt(&pt->pt_ch, &timer_lock); 1285 else if (pt->pt_queued) 1286 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1287 mutex_spin_exit(&timer_lock); 1288 if (pt->pt_type == CLOCK_REALTIME) 1289 callout_destroy(&pt->pt_ch); 1290 pool_put(&ptimer_pool, pt); 1291 } 1292 1293 /* 1294 * Decrement an interval timer by a specified number 1295 * of nanoseconds, which must be less than a second, 1296 * i.e. < 1000000000. If the timer expires, then reload 1297 * it. In this case, carry over (nsec - old value) to 1298 * reduce the value reloaded into the timer so that 1299 * the timer does not drift. This routine assumes 1300 * that it is called in a context where the timers 1301 * on which it is operating cannot change in value. 1302 */ 1303 static int 1304 itimerdecr(struct ptimer *pt, int nsec) 1305 { 1306 struct itimerspec *itp; 1307 1308 KASSERT(mutex_owned(&timer_lock)); 1309 1310 itp = &pt->pt_time; 1311 if (itp->it_value.tv_nsec < nsec) { 1312 if (itp->it_value.tv_sec == 0) { 1313 /* expired, and already in next interval */ 1314 nsec -= itp->it_value.tv_nsec; 1315 goto expire; 1316 } 1317 itp->it_value.tv_nsec += 1000000000; 1318 itp->it_value.tv_sec--; 1319 } 1320 itp->it_value.tv_nsec -= nsec; 1321 nsec = 0; 1322 if (timespecisset(&itp->it_value)) 1323 return (1); 1324 /* expired, exactly at end of interval */ 1325 expire: 1326 if (timespecisset(&itp->it_interval)) { 1327 itp->it_value = itp->it_interval; 1328 itp->it_value.tv_nsec -= nsec; 1329 if (itp->it_value.tv_nsec < 0) { 1330 itp->it_value.tv_nsec += 1000000000; 1331 itp->it_value.tv_sec--; 1332 } 1333 timer_settime(pt); 1334 } else 1335 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1336 return (0); 1337 } 1338 1339 static void 1340 itimerfire(struct ptimer *pt) 1341 { 1342 1343 KASSERT(mutex_owned(&timer_lock)); 1344 1345 /* 1346 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1347 * XXX Relying on the clock interrupt is stupid. 1348 */ 1349 if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) || 1350 (pt->pt_ev.sigev_notify != SIGEV_SIGNAL && 1351 pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued) 1352 return; 1353 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1354 pt->pt_queued = true; 1355 softint_schedule(timer_sih); 1356 } 1357 1358 void 1359 timer_tick(lwp_t *l, bool user) 1360 { 1361 struct ptimers *pts; 1362 struct ptimer *pt; 1363 proc_t *p; 1364 1365 p = l->l_proc; 1366 if (p->p_timers == NULL) 1367 return; 1368 1369 mutex_spin_enter(&timer_lock); 1370 if ((pts = l->l_proc->p_timers) != NULL) { 1371 /* 1372 * Run current process's virtual and profile time, as needed. 1373 */ 1374 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1375 if (itimerdecr(pt, tick * 1000) == 0) 1376 itimerfire(pt); 1377 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1378 if (itimerdecr(pt, tick * 1000) == 0) 1379 itimerfire(pt); 1380 } 1381 mutex_spin_exit(&timer_lock); 1382 } 1383 1384 #ifdef KERN_SA 1385 /* 1386 * timer_sa_intr: 1387 * 1388 * SIGEV_SA handling for timer_intr(). We are called (and return) 1389 * with the timer lock held. We know that the process had SA enabled 1390 * when this timer was enqueued. As timer_intr() is a soft interrupt 1391 * handler, SA should still be enabled by the time we get here. 1392 */ 1393 static void 1394 timer_sa_intr(struct ptimer *pt, proc_t *p) 1395 { 1396 unsigned int i; 1397 struct sadata *sa; 1398 struct sadata_vp *vp; 1399 1400 /* Cause the process to generate an upcall when it returns. */ 1401 if (!p->p_timerpend) { 1402 /* 1403 * XXX stop signals can be processed inside tsleep, 1404 * which can be inside sa_yield's inner loop, which 1405 * makes testing for sa_idle alone insuffucent to 1406 * determine if we really should call setrunnable. 1407 */ 1408 pt->pt_poverruns = pt->pt_overruns; 1409 pt->pt_overruns = 0; 1410 i = 1 << pt->pt_entry; 1411 p->p_timers->pts_fired = i; 1412 p->p_timerpend = 1; 1413 1414 sa = p->p_sa; 1415 mutex_enter(&sa->sa_mutex); 1416 SLIST_FOREACH(vp, &sa->sa_vps, savp_next) { 1417 struct lwp *vp_lwp = vp->savp_lwp; 1418 lwp_lock(vp_lwp); 1419 lwp_need_userret(vp_lwp); 1420 if (vp_lwp->l_flag & LW_SA_IDLE) { 1421 vp_lwp->l_flag &= ~LW_SA_IDLE; 1422 lwp_unsleep(vp_lwp, true); 1423 break; 1424 } 1425 lwp_unlock(vp_lwp); 1426 } 1427 mutex_exit(&sa->sa_mutex); 1428 } else { 1429 i = 1 << pt->pt_entry; 1430 if ((p->p_timers->pts_fired & i) == 0) { 1431 pt->pt_poverruns = pt->pt_overruns; 1432 pt->pt_overruns = 0; 1433 p->p_timers->pts_fired |= i; 1434 } else 1435 pt->pt_overruns++; 1436 } 1437 } 1438 #endif /* KERN_SA */ 1439 1440 static void 1441 timer_intr(void *cookie) 1442 { 1443 ksiginfo_t ksi; 1444 struct ptimer *pt; 1445 proc_t *p; 1446 1447 mutex_enter(proc_lock); 1448 mutex_spin_enter(&timer_lock); 1449 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1450 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1451 KASSERT(pt->pt_queued); 1452 pt->pt_queued = false; 1453 1454 if (pt->pt_proc->p_timers == NULL) { 1455 /* Process is dying. */ 1456 continue; 1457 } 1458 p = pt->pt_proc; 1459 #ifdef KERN_SA 1460 if (pt->pt_ev.sigev_notify == SIGEV_SA) { 1461 timer_sa_intr(pt, p); 1462 continue; 1463 } 1464 #endif /* KERN_SA */ 1465 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) 1466 continue; 1467 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1468 pt->pt_overruns++; 1469 continue; 1470 } 1471 1472 KSI_INIT(&ksi); 1473 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1474 ksi.ksi_code = SI_TIMER; 1475 ksi.ksi_value = pt->pt_ev.sigev_value; 1476 pt->pt_poverruns = pt->pt_overruns; 1477 pt->pt_overruns = 0; 1478 mutex_spin_exit(&timer_lock); 1479 kpsignal(p, &ksi, NULL); 1480 mutex_spin_enter(&timer_lock); 1481 } 1482 mutex_spin_exit(&timer_lock); 1483 mutex_exit(proc_lock); 1484 } 1485 1486 /* 1487 * Check if the time will wrap if set to ts. 1488 * 1489 * ts - timespec describing the new time 1490 * delta - the delta between the current time and ts 1491 */ 1492 bool 1493 time_wraps(struct timespec *ts, struct timespec *delta) 1494 { 1495 1496 /* 1497 * Don't allow the time to be set forward so far it 1498 * will wrap and become negative, thus allowing an 1499 * attacker to bypass the next check below. The 1500 * cutoff is 1 year before rollover occurs, so even 1501 * if the attacker uses adjtime(2) to move the time 1502 * past the cutoff, it will take a very long time 1503 * to get to the wrap point. 1504 */ 1505 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1506 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1507 return true; 1508 1509 return false; 1510 } 1511