1 /* $NetBSD: kern_time.c,v 1.189 2016/11/11 15:29:36 njoly Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.189 2016/11/11 15:29:36 njoly Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/resourcevar.h> 68 #include <sys/kernel.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/vnode.h> 72 #include <sys/signalvar.h> 73 #include <sys/syslog.h> 74 #include <sys/timetc.h> 75 #include <sys/timex.h> 76 #include <sys/kauth.h> 77 #include <sys/mount.h> 78 #include <sys/syscallargs.h> 79 #include <sys/cpu.h> 80 81 static void timer_intr(void *); 82 static void itimerfire(struct ptimer *); 83 static void itimerfree(struct ptimers *, int); 84 85 kmutex_t timer_lock; 86 87 static void *timer_sih; 88 static TAILQ_HEAD(, ptimer) timer_queue; 89 90 struct pool ptimer_pool, ptimers_pool; 91 92 #define CLOCK_VIRTUAL_P(clockid) \ 93 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 94 95 CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 96 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 97 CTASSERT(ITIMER_PROF == CLOCK_PROF); 98 CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC); 99 100 #define DELAYTIMER_MAX 32 101 102 /* 103 * Initialize timekeeping. 104 */ 105 void 106 time_init(void) 107 { 108 109 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 110 &pool_allocator_nointr, IPL_NONE); 111 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 112 &pool_allocator_nointr, IPL_NONE); 113 } 114 115 void 116 time_init2(void) 117 { 118 119 TAILQ_INIT(&timer_queue); 120 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 121 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 122 timer_intr, NULL); 123 } 124 125 /* Time of day and interval timer support. 126 * 127 * These routines provide the kernel entry points to get and set 128 * the time-of-day and per-process interval timers. Subroutines 129 * here provide support for adding and subtracting timeval structures 130 * and decrementing interval timers, optionally reloading the interval 131 * timers when they expire. 132 */ 133 134 /* This function is used by clock_settime and settimeofday */ 135 static int 136 settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 137 { 138 struct timespec delta, now; 139 int s; 140 141 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 142 s = splclock(); 143 nanotime(&now); 144 timespecsub(ts, &now, &delta); 145 146 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 147 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 148 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 149 splx(s); 150 return (EPERM); 151 } 152 153 #ifdef notyet 154 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 155 splx(s); 156 return (EPERM); 157 } 158 #endif 159 160 tc_setclock(ts); 161 162 timespecadd(&boottime, &delta, &boottime); 163 164 resettodr(); 165 splx(s); 166 167 return (0); 168 } 169 170 int 171 settime(struct proc *p, struct timespec *ts) 172 { 173 return (settime1(p, ts, true)); 174 } 175 176 /* ARGSUSED */ 177 int 178 sys___clock_gettime50(struct lwp *l, 179 const struct sys___clock_gettime50_args *uap, register_t *retval) 180 { 181 /* { 182 syscallarg(clockid_t) clock_id; 183 syscallarg(struct timespec *) tp; 184 } */ 185 int error; 186 struct timespec ats; 187 188 error = clock_gettime1(SCARG(uap, clock_id), &ats); 189 if (error != 0) 190 return error; 191 192 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 193 } 194 195 /* ARGSUSED */ 196 int 197 sys___clock_settime50(struct lwp *l, 198 const struct sys___clock_settime50_args *uap, register_t *retval) 199 { 200 /* { 201 syscallarg(clockid_t) clock_id; 202 syscallarg(const struct timespec *) tp; 203 } */ 204 int error; 205 struct timespec ats; 206 207 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 208 return error; 209 210 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 211 } 212 213 214 int 215 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 216 bool check_kauth) 217 { 218 int error; 219 220 switch (clock_id) { 221 case CLOCK_REALTIME: 222 if ((error = settime1(p, tp, check_kauth)) != 0) 223 return (error); 224 break; 225 case CLOCK_MONOTONIC: 226 return (EINVAL); /* read-only clock */ 227 default: 228 return (EINVAL); 229 } 230 231 return 0; 232 } 233 234 int 235 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 236 register_t *retval) 237 { 238 /* { 239 syscallarg(clockid_t) clock_id; 240 syscallarg(struct timespec *) tp; 241 } */ 242 struct timespec ts; 243 int error; 244 245 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 246 return error; 247 248 if (SCARG(uap, tp)) 249 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 250 251 return error; 252 } 253 254 int 255 clock_getres1(clockid_t clock_id, struct timespec *ts) 256 { 257 258 switch (clock_id) { 259 case CLOCK_REALTIME: 260 case CLOCK_MONOTONIC: 261 ts->tv_sec = 0; 262 if (tc_getfrequency() > 1000000000) 263 ts->tv_nsec = 1; 264 else 265 ts->tv_nsec = 1000000000 / tc_getfrequency(); 266 break; 267 default: 268 return EINVAL; 269 } 270 271 return 0; 272 } 273 274 /* ARGSUSED */ 275 int 276 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 277 register_t *retval) 278 { 279 /* { 280 syscallarg(struct timespec *) rqtp; 281 syscallarg(struct timespec *) rmtp; 282 } */ 283 struct timespec rmt, rqt; 284 int error, error1; 285 286 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 287 if (error) 288 return (error); 289 290 error = nanosleep1(l, CLOCK_MONOTONIC, 0, &rqt, 291 SCARG(uap, rmtp) ? &rmt : NULL); 292 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 293 return error; 294 295 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 296 return error1 ? error1 : error; 297 } 298 299 /* ARGSUSED */ 300 int 301 sys_clock_nanosleep(struct lwp *l, const struct sys_clock_nanosleep_args *uap, 302 register_t *retval) 303 { 304 /* { 305 syscallarg(clockid_t) clock_id; 306 syscallarg(int) flags; 307 syscallarg(struct timespec *) rqtp; 308 syscallarg(struct timespec *) rmtp; 309 } */ 310 struct timespec rmt, rqt; 311 int error, error1; 312 313 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 314 if (error) 315 goto out; 316 317 error = nanosleep1(l, SCARG(uap, clock_id), SCARG(uap, flags), &rqt, 318 SCARG(uap, rmtp) ? &rmt : NULL); 319 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 320 goto out; 321 322 if ((SCARG(uap, flags) & TIMER_ABSTIME) == 0 && 323 (error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt))) != 0) 324 error = error1; 325 out: 326 *retval = error; 327 return 0; 328 } 329 330 int 331 nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt, 332 struct timespec *rmt) 333 { 334 struct timespec rmtstart; 335 int error, timo; 336 337 if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) { 338 if (error == ETIMEDOUT) { 339 error = 0; 340 if (rmt != NULL) 341 rmt->tv_sec = rmt->tv_nsec = 0; 342 } 343 return error; 344 } 345 346 /* 347 * Avoid inadvertently sleeping forever 348 */ 349 if (timo == 0) 350 timo = 1; 351 again: 352 error = kpause("nanoslp", true, timo, NULL); 353 if (rmt != NULL || error == 0) { 354 struct timespec rmtend; 355 struct timespec t0; 356 struct timespec *t; 357 358 (void)clock_gettime1(clock_id, &rmtend); 359 t = (rmt != NULL) ? rmt : &t0; 360 if (flags & TIMER_ABSTIME) { 361 timespecsub(rqt, &rmtend, t); 362 } else { 363 timespecsub(&rmtend, &rmtstart, t); 364 timespecsub(rqt, t, t); 365 } 366 if (t->tv_sec < 0) 367 timespecclear(t); 368 if (error == 0) { 369 timo = tstohz(t); 370 if (timo > 0) 371 goto again; 372 } 373 } 374 375 if (error == ERESTART) 376 error = EINTR; 377 if (error == EWOULDBLOCK) 378 error = 0; 379 380 return error; 381 } 382 383 int 384 sys_clock_getcpuclockid2(struct lwp *l, 385 const struct sys_clock_getcpuclockid2_args *uap, 386 register_t *retval) 387 { 388 /* { 389 syscallarg(idtype_t idtype; 390 syscallarg(id_t id); 391 syscallarg(clockid_t *)clock_id; 392 } */ 393 pid_t pid; 394 lwpid_t lid; 395 clockid_t clock_id; 396 id_t id = SCARG(uap, id); 397 398 switch (SCARG(uap, idtype)) { 399 case P_PID: 400 pid = id == 0 ? l->l_proc->p_pid : id; 401 clock_id = CLOCK_PROCESS_CPUTIME_ID | pid; 402 break; 403 case P_LWPID: 404 lid = id == 0 ? l->l_lid : id; 405 clock_id = CLOCK_THREAD_CPUTIME_ID | lid; 406 break; 407 default: 408 return EINVAL; 409 } 410 return copyout(&clock_id, SCARG(uap, clock_id), sizeof(clock_id)); 411 } 412 413 /* ARGSUSED */ 414 int 415 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 416 register_t *retval) 417 { 418 /* { 419 syscallarg(struct timeval *) tp; 420 syscallarg(void *) tzp; really "struct timezone *"; 421 } */ 422 struct timeval atv; 423 int error = 0; 424 struct timezone tzfake; 425 426 if (SCARG(uap, tp)) { 427 microtime(&atv); 428 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 429 if (error) 430 return (error); 431 } 432 if (SCARG(uap, tzp)) { 433 /* 434 * NetBSD has no kernel notion of time zone, so we just 435 * fake up a timezone struct and return it if demanded. 436 */ 437 tzfake.tz_minuteswest = 0; 438 tzfake.tz_dsttime = 0; 439 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 440 } 441 return (error); 442 } 443 444 /* ARGSUSED */ 445 int 446 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 447 register_t *retval) 448 { 449 /* { 450 syscallarg(const struct timeval *) tv; 451 syscallarg(const void *) tzp; really "const struct timezone *"; 452 } */ 453 454 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 455 } 456 457 int 458 settimeofday1(const struct timeval *utv, bool userspace, 459 const void *utzp, struct lwp *l, bool check_kauth) 460 { 461 struct timeval atv; 462 struct timespec ts; 463 int error; 464 465 /* Verify all parameters before changing time. */ 466 467 /* 468 * NetBSD has no kernel notion of time zone, and only an 469 * obsolete program would try to set it, so we log a warning. 470 */ 471 if (utzp) 472 log(LOG_WARNING, "pid %d attempted to set the " 473 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 474 475 if (utv == NULL) 476 return 0; 477 478 if (userspace) { 479 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 480 return error; 481 utv = &atv; 482 } 483 484 TIMEVAL_TO_TIMESPEC(utv, &ts); 485 return settime1(l->l_proc, &ts, check_kauth); 486 } 487 488 int time_adjusted; /* set if an adjustment is made */ 489 490 /* ARGSUSED */ 491 int 492 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 493 register_t *retval) 494 { 495 /* { 496 syscallarg(const struct timeval *) delta; 497 syscallarg(struct timeval *) olddelta; 498 } */ 499 int error; 500 struct timeval atv, oldatv; 501 502 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 503 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 504 return error; 505 506 if (SCARG(uap, delta)) { 507 error = copyin(SCARG(uap, delta), &atv, 508 sizeof(*SCARG(uap, delta))); 509 if (error) 510 return (error); 511 } 512 adjtime1(SCARG(uap, delta) ? &atv : NULL, 513 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 514 if (SCARG(uap, olddelta)) 515 error = copyout(&oldatv, SCARG(uap, olddelta), 516 sizeof(*SCARG(uap, olddelta))); 517 return error; 518 } 519 520 void 521 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 522 { 523 extern int64_t time_adjtime; /* in kern_ntptime.c */ 524 525 if (olddelta) { 526 mutex_spin_enter(&timecounter_lock); 527 olddelta->tv_sec = time_adjtime / 1000000; 528 olddelta->tv_usec = time_adjtime % 1000000; 529 if (olddelta->tv_usec < 0) { 530 olddelta->tv_usec += 1000000; 531 olddelta->tv_sec--; 532 } 533 mutex_spin_exit(&timecounter_lock); 534 } 535 536 if (delta) { 537 mutex_spin_enter(&timecounter_lock); 538 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 539 540 if (time_adjtime) { 541 /* We need to save the system time during shutdown */ 542 time_adjusted |= 1; 543 } 544 mutex_spin_exit(&timecounter_lock); 545 } 546 } 547 548 /* 549 * Interval timer support. Both the BSD getitimer() family and the POSIX 550 * timer_*() family of routines are supported. 551 * 552 * All timers are kept in an array pointed to by p_timers, which is 553 * allocated on demand - many processes don't use timers at all. The 554 * first four elements in this array are reserved for the BSD timers: 555 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element 556 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be 557 * allocated by the timer_create() syscall. 558 * 559 * Realtime timers are kept in the ptimer structure as an absolute 560 * time; virtual time timers are kept as a linked list of deltas. 561 * Virtual time timers are processed in the hardclock() routine of 562 * kern_clock.c. The real time timer is processed by a callout 563 * routine, called from the softclock() routine. Since a callout may 564 * be delayed in real time due to interrupt processing in the system, 565 * it is possible for the real time timeout routine (realtimeexpire, 566 * given below), to be delayed in real time past when it is supposed 567 * to occur. It does not suffice, therefore, to reload the real timer 568 * .it_value from the real time timers .it_interval. Rather, we 569 * compute the next time in absolute time the timer should go off. */ 570 571 /* Allocate a POSIX realtime timer. */ 572 int 573 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 574 register_t *retval) 575 { 576 /* { 577 syscallarg(clockid_t) clock_id; 578 syscallarg(struct sigevent *) evp; 579 syscallarg(timer_t *) timerid; 580 } */ 581 582 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 583 SCARG(uap, evp), copyin, l); 584 } 585 586 int 587 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 588 copyin_t fetch_event, struct lwp *l) 589 { 590 int error; 591 timer_t timerid; 592 struct ptimers *pts; 593 struct ptimer *pt; 594 struct proc *p; 595 596 p = l->l_proc; 597 598 if ((u_int)id > CLOCK_MONOTONIC) 599 return (EINVAL); 600 601 if ((pts = p->p_timers) == NULL) 602 pts = timers_alloc(p); 603 604 pt = pool_get(&ptimer_pool, PR_WAITOK); 605 if (evp != NULL) { 606 if (((error = 607 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 608 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 609 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 610 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 611 (pt->pt_ev.sigev_signo <= 0 || 612 pt->pt_ev.sigev_signo >= NSIG))) { 613 pool_put(&ptimer_pool, pt); 614 return (error ? error : EINVAL); 615 } 616 } 617 618 /* Find a free timer slot, skipping those reserved for setitimer(). */ 619 mutex_spin_enter(&timer_lock); 620 for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++) 621 if (pts->pts_timers[timerid] == NULL) 622 break; 623 if (timerid == TIMER_MAX) { 624 mutex_spin_exit(&timer_lock); 625 pool_put(&ptimer_pool, pt); 626 return EAGAIN; 627 } 628 if (evp == NULL) { 629 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 630 switch (id) { 631 case CLOCK_REALTIME: 632 case CLOCK_MONOTONIC: 633 pt->pt_ev.sigev_signo = SIGALRM; 634 break; 635 case CLOCK_VIRTUAL: 636 pt->pt_ev.sigev_signo = SIGVTALRM; 637 break; 638 case CLOCK_PROF: 639 pt->pt_ev.sigev_signo = SIGPROF; 640 break; 641 } 642 pt->pt_ev.sigev_value.sival_int = timerid; 643 } 644 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 645 pt->pt_info.ksi_errno = 0; 646 pt->pt_info.ksi_code = 0; 647 pt->pt_info.ksi_pid = p->p_pid; 648 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 649 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 650 pt->pt_type = id; 651 pt->pt_proc = p; 652 pt->pt_overruns = 0; 653 pt->pt_poverruns = 0; 654 pt->pt_entry = timerid; 655 pt->pt_queued = false; 656 timespecclear(&pt->pt_time.it_value); 657 if (!CLOCK_VIRTUAL_P(id)) 658 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 659 else 660 pt->pt_active = 0; 661 662 pts->pts_timers[timerid] = pt; 663 mutex_spin_exit(&timer_lock); 664 665 return copyout(&timerid, tid, sizeof(timerid)); 666 } 667 668 /* Delete a POSIX realtime timer */ 669 int 670 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 671 register_t *retval) 672 { 673 /* { 674 syscallarg(timer_t) timerid; 675 } */ 676 struct proc *p = l->l_proc; 677 timer_t timerid; 678 struct ptimers *pts; 679 struct ptimer *pt, *ptn; 680 681 timerid = SCARG(uap, timerid); 682 pts = p->p_timers; 683 684 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 685 return (EINVAL); 686 687 mutex_spin_enter(&timer_lock); 688 if ((pt = pts->pts_timers[timerid]) == NULL) { 689 mutex_spin_exit(&timer_lock); 690 return (EINVAL); 691 } 692 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 693 if (pt->pt_active) { 694 ptn = LIST_NEXT(pt, pt_list); 695 LIST_REMOVE(pt, pt_list); 696 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 697 timespecadd(&pt->pt_time.it_value, 698 &ptn->pt_time.it_value, 699 &ptn->pt_time.it_value); 700 pt->pt_active = 0; 701 } 702 } 703 itimerfree(pts, timerid); 704 705 return (0); 706 } 707 708 /* 709 * Set up the given timer. The value in pt->pt_time.it_value is taken 710 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 711 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 712 */ 713 void 714 timer_settime(struct ptimer *pt) 715 { 716 struct ptimer *ptn, *pptn; 717 struct ptlist *ptl; 718 719 KASSERT(mutex_owned(&timer_lock)); 720 721 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 722 callout_halt(&pt->pt_ch, &timer_lock); 723 if (timespecisset(&pt->pt_time.it_value)) { 724 /* 725 * Don't need to check tshzto() return value, here. 726 * callout_reset() does it for us. 727 */ 728 callout_reset(&pt->pt_ch, 729 pt->pt_type == CLOCK_MONOTONIC ? 730 tshztoup(&pt->pt_time.it_value) : 731 tshzto(&pt->pt_time.it_value), 732 realtimerexpire, pt); 733 } 734 } else { 735 if (pt->pt_active) { 736 ptn = LIST_NEXT(pt, pt_list); 737 LIST_REMOVE(pt, pt_list); 738 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 739 timespecadd(&pt->pt_time.it_value, 740 &ptn->pt_time.it_value, 741 &ptn->pt_time.it_value); 742 } 743 if (timespecisset(&pt->pt_time.it_value)) { 744 if (pt->pt_type == CLOCK_VIRTUAL) 745 ptl = &pt->pt_proc->p_timers->pts_virtual; 746 else 747 ptl = &pt->pt_proc->p_timers->pts_prof; 748 749 for (ptn = LIST_FIRST(ptl), pptn = NULL; 750 ptn && timespeccmp(&pt->pt_time.it_value, 751 &ptn->pt_time.it_value, >); 752 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 753 timespecsub(&pt->pt_time.it_value, 754 &ptn->pt_time.it_value, 755 &pt->pt_time.it_value); 756 757 if (pptn) 758 LIST_INSERT_AFTER(pptn, pt, pt_list); 759 else 760 LIST_INSERT_HEAD(ptl, pt, pt_list); 761 762 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 763 timespecsub(&ptn->pt_time.it_value, 764 &pt->pt_time.it_value, 765 &ptn->pt_time.it_value); 766 767 pt->pt_active = 1; 768 } else 769 pt->pt_active = 0; 770 } 771 } 772 773 void 774 timer_gettime(struct ptimer *pt, struct itimerspec *aits) 775 { 776 struct timespec now; 777 struct ptimer *ptn; 778 779 KASSERT(mutex_owned(&timer_lock)); 780 781 *aits = pt->pt_time; 782 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 783 /* 784 * Convert from absolute to relative time in .it_value 785 * part of real time timer. If time for real time 786 * timer has passed return 0, else return difference 787 * between current time and time for the timer to go 788 * off. 789 */ 790 if (timespecisset(&aits->it_value)) { 791 if (pt->pt_type == CLOCK_REALTIME) { 792 getnanotime(&now); 793 } else { /* CLOCK_MONOTONIC */ 794 getnanouptime(&now); 795 } 796 if (timespeccmp(&aits->it_value, &now, <)) 797 timespecclear(&aits->it_value); 798 else 799 timespecsub(&aits->it_value, &now, 800 &aits->it_value); 801 } 802 } else if (pt->pt_active) { 803 if (pt->pt_type == CLOCK_VIRTUAL) 804 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 805 else 806 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 807 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 808 timespecadd(&aits->it_value, 809 &ptn->pt_time.it_value, &aits->it_value); 810 KASSERT(ptn != NULL); /* pt should be findable on the list */ 811 } else 812 timespecclear(&aits->it_value); 813 } 814 815 816 817 /* Set and arm a POSIX realtime timer */ 818 int 819 sys___timer_settime50(struct lwp *l, 820 const struct sys___timer_settime50_args *uap, 821 register_t *retval) 822 { 823 /* { 824 syscallarg(timer_t) timerid; 825 syscallarg(int) flags; 826 syscallarg(const struct itimerspec *) value; 827 syscallarg(struct itimerspec *) ovalue; 828 } */ 829 int error; 830 struct itimerspec value, ovalue, *ovp = NULL; 831 832 if ((error = copyin(SCARG(uap, value), &value, 833 sizeof(struct itimerspec))) != 0) 834 return (error); 835 836 if (SCARG(uap, ovalue)) 837 ovp = &ovalue; 838 839 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 840 SCARG(uap, flags), l->l_proc)) != 0) 841 return error; 842 843 if (ovp) 844 return copyout(&ovalue, SCARG(uap, ovalue), 845 sizeof(struct itimerspec)); 846 return 0; 847 } 848 849 int 850 dotimer_settime(int timerid, struct itimerspec *value, 851 struct itimerspec *ovalue, int flags, struct proc *p) 852 { 853 struct timespec now; 854 struct itimerspec val, oval; 855 struct ptimers *pts; 856 struct ptimer *pt; 857 int error; 858 859 pts = p->p_timers; 860 861 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 862 return EINVAL; 863 val = *value; 864 if ((error = itimespecfix(&val.it_value)) != 0 || 865 (error = itimespecfix(&val.it_interval)) != 0) 866 return error; 867 868 mutex_spin_enter(&timer_lock); 869 if ((pt = pts->pts_timers[timerid]) == NULL) { 870 mutex_spin_exit(&timer_lock); 871 return EINVAL; 872 } 873 874 oval = pt->pt_time; 875 pt->pt_time = val; 876 877 /* 878 * If we've been passed a relative time for a realtime timer, 879 * convert it to absolute; if an absolute time for a virtual 880 * timer, convert it to relative and make sure we don't set it 881 * to zero, which would cancel the timer, or let it go 882 * negative, which would confuse the comparison tests. 883 */ 884 if (timespecisset(&pt->pt_time.it_value)) { 885 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 886 if ((flags & TIMER_ABSTIME) == 0) { 887 if (pt->pt_type == CLOCK_REALTIME) { 888 getnanotime(&now); 889 } else { /* CLOCK_MONOTONIC */ 890 getnanouptime(&now); 891 } 892 timespecadd(&pt->pt_time.it_value, &now, 893 &pt->pt_time.it_value); 894 } 895 } else { 896 if ((flags & TIMER_ABSTIME) != 0) { 897 getnanotime(&now); 898 timespecsub(&pt->pt_time.it_value, &now, 899 &pt->pt_time.it_value); 900 if (!timespecisset(&pt->pt_time.it_value) || 901 pt->pt_time.it_value.tv_sec < 0) { 902 pt->pt_time.it_value.tv_sec = 0; 903 pt->pt_time.it_value.tv_nsec = 1; 904 } 905 } 906 } 907 } 908 909 timer_settime(pt); 910 mutex_spin_exit(&timer_lock); 911 912 if (ovalue) 913 *ovalue = oval; 914 915 return (0); 916 } 917 918 /* Return the time remaining until a POSIX timer fires. */ 919 int 920 sys___timer_gettime50(struct lwp *l, 921 const struct sys___timer_gettime50_args *uap, register_t *retval) 922 { 923 /* { 924 syscallarg(timer_t) timerid; 925 syscallarg(struct itimerspec *) value; 926 } */ 927 struct itimerspec its; 928 int error; 929 930 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 931 &its)) != 0) 932 return error; 933 934 return copyout(&its, SCARG(uap, value), sizeof(its)); 935 } 936 937 int 938 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 939 { 940 struct ptimer *pt; 941 struct ptimers *pts; 942 943 pts = p->p_timers; 944 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 945 return (EINVAL); 946 mutex_spin_enter(&timer_lock); 947 if ((pt = pts->pts_timers[timerid]) == NULL) { 948 mutex_spin_exit(&timer_lock); 949 return (EINVAL); 950 } 951 timer_gettime(pt, its); 952 mutex_spin_exit(&timer_lock); 953 954 return 0; 955 } 956 957 /* 958 * Return the count of the number of times a periodic timer expired 959 * while a notification was already pending. The counter is reset when 960 * a timer expires and a notification can be posted. 961 */ 962 int 963 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 964 register_t *retval) 965 { 966 /* { 967 syscallarg(timer_t) timerid; 968 } */ 969 struct proc *p = l->l_proc; 970 struct ptimers *pts; 971 int timerid; 972 struct ptimer *pt; 973 974 timerid = SCARG(uap, timerid); 975 976 pts = p->p_timers; 977 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 978 return (EINVAL); 979 mutex_spin_enter(&timer_lock); 980 if ((pt = pts->pts_timers[timerid]) == NULL) { 981 mutex_spin_exit(&timer_lock); 982 return (EINVAL); 983 } 984 *retval = pt->pt_poverruns; 985 if (*retval >= DELAYTIMER_MAX) 986 *retval = DELAYTIMER_MAX; 987 mutex_spin_exit(&timer_lock); 988 989 return (0); 990 } 991 992 /* 993 * Real interval timer expired: 994 * send process whose timer expired an alarm signal. 995 * If time is not set up to reload, then just return. 996 * Else compute next time timer should go off which is > current time. 997 * This is where delay in processing this timeout causes multiple 998 * SIGALRM calls to be compressed into one. 999 */ 1000 void 1001 realtimerexpire(void *arg) 1002 { 1003 uint64_t last_val, next_val, interval, now_ns; 1004 struct timespec now, next; 1005 struct ptimer *pt; 1006 int backwards; 1007 1008 pt = arg; 1009 1010 mutex_spin_enter(&timer_lock); 1011 itimerfire(pt); 1012 1013 if (!timespecisset(&pt->pt_time.it_interval)) { 1014 timespecclear(&pt->pt_time.it_value); 1015 mutex_spin_exit(&timer_lock); 1016 return; 1017 } 1018 1019 if (pt->pt_type == CLOCK_MONOTONIC) { 1020 getnanouptime(&now); 1021 } else { 1022 getnanotime(&now); 1023 } 1024 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 1025 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 1026 /* Handle the easy case of non-overflown timers first. */ 1027 if (!backwards && timespeccmp(&next, &now, >)) { 1028 pt->pt_time.it_value = next; 1029 } else { 1030 now_ns = timespec2ns(&now); 1031 last_val = timespec2ns(&pt->pt_time.it_value); 1032 interval = timespec2ns(&pt->pt_time.it_interval); 1033 1034 next_val = now_ns + 1035 (now_ns - last_val + interval - 1) % interval; 1036 1037 if (backwards) 1038 next_val += interval; 1039 else 1040 pt->pt_overruns += (now_ns - last_val) / interval; 1041 1042 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1043 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1044 } 1045 1046 /* 1047 * Don't need to check tshzto() return value, here. 1048 * callout_reset() does it for us. 1049 */ 1050 callout_reset(&pt->pt_ch, pt->pt_type == CLOCK_MONOTONIC ? 1051 tshztoup(&pt->pt_time.it_value) : tshzto(&pt->pt_time.it_value), 1052 realtimerexpire, pt); 1053 mutex_spin_exit(&timer_lock); 1054 } 1055 1056 /* BSD routine to get the value of an interval timer. */ 1057 /* ARGSUSED */ 1058 int 1059 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1060 register_t *retval) 1061 { 1062 /* { 1063 syscallarg(int) which; 1064 syscallarg(struct itimerval *) itv; 1065 } */ 1066 struct proc *p = l->l_proc; 1067 struct itimerval aitv; 1068 int error; 1069 1070 error = dogetitimer(p, SCARG(uap, which), &aitv); 1071 if (error) 1072 return error; 1073 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1074 } 1075 1076 int 1077 dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1078 { 1079 struct ptimers *pts; 1080 struct ptimer *pt; 1081 struct itimerspec its; 1082 1083 if ((u_int)which > ITIMER_MONOTONIC) 1084 return (EINVAL); 1085 1086 mutex_spin_enter(&timer_lock); 1087 pts = p->p_timers; 1088 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1089 timerclear(&itvp->it_value); 1090 timerclear(&itvp->it_interval); 1091 } else { 1092 timer_gettime(pt, &its); 1093 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1094 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1095 } 1096 mutex_spin_exit(&timer_lock); 1097 1098 return 0; 1099 } 1100 1101 /* BSD routine to set/arm an interval timer. */ 1102 /* ARGSUSED */ 1103 int 1104 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1105 register_t *retval) 1106 { 1107 /* { 1108 syscallarg(int) which; 1109 syscallarg(const struct itimerval *) itv; 1110 syscallarg(struct itimerval *) oitv; 1111 } */ 1112 struct proc *p = l->l_proc; 1113 int which = SCARG(uap, which); 1114 struct sys___getitimer50_args getargs; 1115 const struct itimerval *itvp; 1116 struct itimerval aitv; 1117 int error; 1118 1119 if ((u_int)which > ITIMER_MONOTONIC) 1120 return (EINVAL); 1121 itvp = SCARG(uap, itv); 1122 if (itvp && 1123 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0) 1124 return (error); 1125 if (SCARG(uap, oitv) != NULL) { 1126 SCARG(&getargs, which) = which; 1127 SCARG(&getargs, itv) = SCARG(uap, oitv); 1128 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1129 return (error); 1130 } 1131 if (itvp == 0) 1132 return (0); 1133 1134 return dosetitimer(p, which, &aitv); 1135 } 1136 1137 int 1138 dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1139 { 1140 struct timespec now; 1141 struct ptimers *pts; 1142 struct ptimer *pt, *spare; 1143 1144 KASSERT((u_int)which <= CLOCK_MONOTONIC); 1145 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1146 return (EINVAL); 1147 1148 /* 1149 * Don't bother allocating data structures if the process just 1150 * wants to clear the timer. 1151 */ 1152 spare = NULL; 1153 pts = p->p_timers; 1154 retry: 1155 if (!timerisset(&itvp->it_value) && (pts == NULL || 1156 pts->pts_timers[which] == NULL)) 1157 return (0); 1158 if (pts == NULL) 1159 pts = timers_alloc(p); 1160 mutex_spin_enter(&timer_lock); 1161 pt = pts->pts_timers[which]; 1162 if (pt == NULL) { 1163 if (spare == NULL) { 1164 mutex_spin_exit(&timer_lock); 1165 spare = pool_get(&ptimer_pool, PR_WAITOK); 1166 goto retry; 1167 } 1168 pt = spare; 1169 spare = NULL; 1170 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1171 pt->pt_ev.sigev_value.sival_int = which; 1172 pt->pt_overruns = 0; 1173 pt->pt_proc = p; 1174 pt->pt_type = which; 1175 pt->pt_entry = which; 1176 pt->pt_queued = false; 1177 if (pt->pt_type == CLOCK_REALTIME) 1178 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1179 else 1180 pt->pt_active = 0; 1181 1182 switch (which) { 1183 case ITIMER_REAL: 1184 case ITIMER_MONOTONIC: 1185 pt->pt_ev.sigev_signo = SIGALRM; 1186 break; 1187 case ITIMER_VIRTUAL: 1188 pt->pt_ev.sigev_signo = SIGVTALRM; 1189 break; 1190 case ITIMER_PROF: 1191 pt->pt_ev.sigev_signo = SIGPROF; 1192 break; 1193 } 1194 pts->pts_timers[which] = pt; 1195 } 1196 1197 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1198 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1199 1200 if (timespecisset(&pt->pt_time.it_value)) { 1201 /* Convert to absolute time */ 1202 /* XXX need to wrap in splclock for timecounters case? */ 1203 switch (which) { 1204 case ITIMER_REAL: 1205 getnanotime(&now); 1206 timespecadd(&pt->pt_time.it_value, &now, 1207 &pt->pt_time.it_value); 1208 break; 1209 case ITIMER_MONOTONIC: 1210 getnanouptime(&now); 1211 timespecadd(&pt->pt_time.it_value, &now, 1212 &pt->pt_time.it_value); 1213 break; 1214 default: 1215 break; 1216 } 1217 } 1218 timer_settime(pt); 1219 mutex_spin_exit(&timer_lock); 1220 if (spare != NULL) 1221 pool_put(&ptimer_pool, spare); 1222 1223 return (0); 1224 } 1225 1226 /* Utility routines to manage the array of pointers to timers. */ 1227 struct ptimers * 1228 timers_alloc(struct proc *p) 1229 { 1230 struct ptimers *pts; 1231 int i; 1232 1233 pts = pool_get(&ptimers_pool, PR_WAITOK); 1234 LIST_INIT(&pts->pts_virtual); 1235 LIST_INIT(&pts->pts_prof); 1236 for (i = 0; i < TIMER_MAX; i++) 1237 pts->pts_timers[i] = NULL; 1238 mutex_spin_enter(&timer_lock); 1239 if (p->p_timers == NULL) { 1240 p->p_timers = pts; 1241 mutex_spin_exit(&timer_lock); 1242 return pts; 1243 } 1244 mutex_spin_exit(&timer_lock); 1245 pool_put(&ptimers_pool, pts); 1246 return p->p_timers; 1247 } 1248 1249 /* 1250 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1251 * then clean up all timers and free all the data structures. If 1252 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1253 * by timer_create(), not the BSD setitimer() timers, and only free the 1254 * structure if none of those remain. 1255 */ 1256 void 1257 timers_free(struct proc *p, int which) 1258 { 1259 struct ptimers *pts; 1260 struct ptimer *ptn; 1261 struct timespec ts; 1262 int i; 1263 1264 if (p->p_timers == NULL) 1265 return; 1266 1267 pts = p->p_timers; 1268 mutex_spin_enter(&timer_lock); 1269 if (which == TIMERS_ALL) { 1270 p->p_timers = NULL; 1271 i = 0; 1272 } else { 1273 timespecclear(&ts); 1274 for (ptn = LIST_FIRST(&pts->pts_virtual); 1275 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1276 ptn = LIST_NEXT(ptn, pt_list)) { 1277 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1278 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1279 } 1280 LIST_FIRST(&pts->pts_virtual) = NULL; 1281 if (ptn) { 1282 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1283 timespecadd(&ts, &ptn->pt_time.it_value, 1284 &ptn->pt_time.it_value); 1285 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1286 } 1287 timespecclear(&ts); 1288 for (ptn = LIST_FIRST(&pts->pts_prof); 1289 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1290 ptn = LIST_NEXT(ptn, pt_list)) { 1291 KASSERT(ptn->pt_type == CLOCK_PROF); 1292 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1293 } 1294 LIST_FIRST(&pts->pts_prof) = NULL; 1295 if (ptn) { 1296 KASSERT(ptn->pt_type == CLOCK_PROF); 1297 timespecadd(&ts, &ptn->pt_time.it_value, 1298 &ptn->pt_time.it_value); 1299 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1300 } 1301 i = TIMER_MIN; 1302 } 1303 for ( ; i < TIMER_MAX; i++) { 1304 if (pts->pts_timers[i] != NULL) { 1305 itimerfree(pts, i); 1306 mutex_spin_enter(&timer_lock); 1307 } 1308 } 1309 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1310 pts->pts_timers[2] == NULL && pts->pts_timers[3] == NULL) { 1311 p->p_timers = NULL; 1312 mutex_spin_exit(&timer_lock); 1313 pool_put(&ptimers_pool, pts); 1314 } else 1315 mutex_spin_exit(&timer_lock); 1316 } 1317 1318 static void 1319 itimerfree(struct ptimers *pts, int index) 1320 { 1321 struct ptimer *pt; 1322 1323 KASSERT(mutex_owned(&timer_lock)); 1324 1325 pt = pts->pts_timers[index]; 1326 pts->pts_timers[index] = NULL; 1327 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1328 callout_halt(&pt->pt_ch, &timer_lock); 1329 if (pt->pt_queued) 1330 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1331 mutex_spin_exit(&timer_lock); 1332 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1333 callout_destroy(&pt->pt_ch); 1334 pool_put(&ptimer_pool, pt); 1335 } 1336 1337 /* 1338 * Decrement an interval timer by a specified number 1339 * of nanoseconds, which must be less than a second, 1340 * i.e. < 1000000000. If the timer expires, then reload 1341 * it. In this case, carry over (nsec - old value) to 1342 * reduce the value reloaded into the timer so that 1343 * the timer does not drift. This routine assumes 1344 * that it is called in a context where the timers 1345 * on which it is operating cannot change in value. 1346 */ 1347 static int 1348 itimerdecr(struct ptimer *pt, int nsec) 1349 { 1350 struct itimerspec *itp; 1351 1352 KASSERT(mutex_owned(&timer_lock)); 1353 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1354 1355 itp = &pt->pt_time; 1356 if (itp->it_value.tv_nsec < nsec) { 1357 if (itp->it_value.tv_sec == 0) { 1358 /* expired, and already in next interval */ 1359 nsec -= itp->it_value.tv_nsec; 1360 goto expire; 1361 } 1362 itp->it_value.tv_nsec += 1000000000; 1363 itp->it_value.tv_sec--; 1364 } 1365 itp->it_value.tv_nsec -= nsec; 1366 nsec = 0; 1367 if (timespecisset(&itp->it_value)) 1368 return (1); 1369 /* expired, exactly at end of interval */ 1370 expire: 1371 if (timespecisset(&itp->it_interval)) { 1372 itp->it_value = itp->it_interval; 1373 itp->it_value.tv_nsec -= nsec; 1374 if (itp->it_value.tv_nsec < 0) { 1375 itp->it_value.tv_nsec += 1000000000; 1376 itp->it_value.tv_sec--; 1377 } 1378 timer_settime(pt); 1379 } else 1380 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1381 return (0); 1382 } 1383 1384 static void 1385 itimerfire(struct ptimer *pt) 1386 { 1387 1388 KASSERT(mutex_owned(&timer_lock)); 1389 1390 /* 1391 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1392 * XXX Relying on the clock interrupt is stupid. 1393 */ 1394 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL || pt->pt_queued) { 1395 return; 1396 } 1397 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1398 pt->pt_queued = true; 1399 softint_schedule(timer_sih); 1400 } 1401 1402 void 1403 timer_tick(lwp_t *l, bool user) 1404 { 1405 struct ptimers *pts; 1406 struct ptimer *pt; 1407 proc_t *p; 1408 1409 p = l->l_proc; 1410 if (p->p_timers == NULL) 1411 return; 1412 1413 mutex_spin_enter(&timer_lock); 1414 if ((pts = l->l_proc->p_timers) != NULL) { 1415 /* 1416 * Run current process's virtual and profile time, as needed. 1417 */ 1418 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1419 if (itimerdecr(pt, tick * 1000) == 0) 1420 itimerfire(pt); 1421 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1422 if (itimerdecr(pt, tick * 1000) == 0) 1423 itimerfire(pt); 1424 } 1425 mutex_spin_exit(&timer_lock); 1426 } 1427 1428 static void 1429 timer_intr(void *cookie) 1430 { 1431 ksiginfo_t ksi; 1432 struct ptimer *pt; 1433 proc_t *p; 1434 1435 mutex_enter(proc_lock); 1436 mutex_spin_enter(&timer_lock); 1437 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1438 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1439 KASSERT(pt->pt_queued); 1440 pt->pt_queued = false; 1441 1442 if (pt->pt_proc->p_timers == NULL) { 1443 /* Process is dying. */ 1444 continue; 1445 } 1446 p = pt->pt_proc; 1447 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) { 1448 continue; 1449 } 1450 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1451 pt->pt_overruns++; 1452 continue; 1453 } 1454 1455 KSI_INIT(&ksi); 1456 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1457 ksi.ksi_code = SI_TIMER; 1458 ksi.ksi_value = pt->pt_ev.sigev_value; 1459 pt->pt_poverruns = pt->pt_overruns; 1460 pt->pt_overruns = 0; 1461 mutex_spin_exit(&timer_lock); 1462 kpsignal(p, &ksi, NULL); 1463 mutex_spin_enter(&timer_lock); 1464 } 1465 mutex_spin_exit(&timer_lock); 1466 mutex_exit(proc_lock); 1467 } 1468 1469 /* 1470 * Check if the time will wrap if set to ts. 1471 * 1472 * ts - timespec describing the new time 1473 * delta - the delta between the current time and ts 1474 */ 1475 bool 1476 time_wraps(struct timespec *ts, struct timespec *delta) 1477 { 1478 1479 /* 1480 * Don't allow the time to be set forward so far it 1481 * will wrap and become negative, thus allowing an 1482 * attacker to bypass the next check below. The 1483 * cutoff is 1 year before rollover occurs, so even 1484 * if the attacker uses adjtime(2) to move the time 1485 * past the cutoff, it will take a very long time 1486 * to get to the wrap point. 1487 */ 1488 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1489 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1490 return true; 1491 1492 return false; 1493 } 1494