1 /* $NetBSD: kern_time.c,v 1.199 2019/08/07 07:22:12 mrg Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.199 2019/08/07 07:22:12 mrg Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/resourcevar.h> 68 #include <sys/kernel.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/vnode.h> 72 #include <sys/signalvar.h> 73 #include <sys/syslog.h> 74 #include <sys/timetc.h> 75 #include <sys/timex.h> 76 #include <sys/kauth.h> 77 #include <sys/mount.h> 78 #include <sys/syscallargs.h> 79 #include <sys/cpu.h> 80 81 static void timer_intr(void *); 82 static void itimerfire(struct ptimer *); 83 static void itimerfree(struct ptimers *, int); 84 85 kmutex_t timer_lock; 86 87 static void *timer_sih; 88 static TAILQ_HEAD(, ptimer) timer_queue; 89 90 struct pool ptimer_pool, ptimers_pool; 91 92 #define CLOCK_VIRTUAL_P(clockid) \ 93 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 94 95 CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 96 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 97 CTASSERT(ITIMER_PROF == CLOCK_PROF); 98 CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC); 99 100 #define DELAYTIMER_MAX 32 101 102 /* 103 * Initialize timekeeping. 104 */ 105 void 106 time_init(void) 107 { 108 109 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 110 &pool_allocator_nointr, IPL_NONE); 111 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 112 &pool_allocator_nointr, IPL_NONE); 113 } 114 115 void 116 time_init2(void) 117 { 118 119 TAILQ_INIT(&timer_queue); 120 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 121 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 122 timer_intr, NULL); 123 } 124 125 /* Time of day and interval timer support. 126 * 127 * These routines provide the kernel entry points to get and set 128 * the time-of-day and per-process interval timers. Subroutines 129 * here provide support for adding and subtracting timeval structures 130 * and decrementing interval timers, optionally reloading the interval 131 * timers when they expire. 132 */ 133 134 /* This function is used by clock_settime and settimeofday */ 135 static int 136 settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 137 { 138 struct timespec delta, now; 139 int s; 140 141 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 142 s = splclock(); 143 nanotime(&now); 144 timespecsub(ts, &now, &delta); 145 146 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 147 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 148 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 149 splx(s); 150 return (EPERM); 151 } 152 153 #ifdef notyet 154 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 155 splx(s); 156 return (EPERM); 157 } 158 #endif 159 160 tc_setclock(ts); 161 162 timespecadd(&boottime, &delta, &boottime); 163 164 resettodr(); 165 splx(s); 166 167 return (0); 168 } 169 170 int 171 settime(struct proc *p, struct timespec *ts) 172 { 173 return (settime1(p, ts, true)); 174 } 175 176 /* ARGSUSED */ 177 int 178 sys___clock_gettime50(struct lwp *l, 179 const struct sys___clock_gettime50_args *uap, register_t *retval) 180 { 181 /* { 182 syscallarg(clockid_t) clock_id; 183 syscallarg(struct timespec *) tp; 184 } */ 185 int error; 186 struct timespec ats; 187 188 error = clock_gettime1(SCARG(uap, clock_id), &ats); 189 if (error != 0) 190 return error; 191 192 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 193 } 194 195 /* ARGSUSED */ 196 int 197 sys___clock_settime50(struct lwp *l, 198 const struct sys___clock_settime50_args *uap, register_t *retval) 199 { 200 /* { 201 syscallarg(clockid_t) clock_id; 202 syscallarg(const struct timespec *) tp; 203 } */ 204 int error; 205 struct timespec ats; 206 207 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 208 return error; 209 210 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 211 } 212 213 214 int 215 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 216 bool check_kauth) 217 { 218 int error; 219 220 switch (clock_id) { 221 case CLOCK_REALTIME: 222 if ((error = settime1(p, tp, check_kauth)) != 0) 223 return (error); 224 break; 225 case CLOCK_MONOTONIC: 226 return (EINVAL); /* read-only clock */ 227 default: 228 return (EINVAL); 229 } 230 231 return 0; 232 } 233 234 int 235 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 236 register_t *retval) 237 { 238 /* { 239 syscallarg(clockid_t) clock_id; 240 syscallarg(struct timespec *) tp; 241 } */ 242 struct timespec ts; 243 int error; 244 245 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 246 return error; 247 248 if (SCARG(uap, tp)) 249 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 250 251 return error; 252 } 253 254 int 255 clock_getres1(clockid_t clock_id, struct timespec *ts) 256 { 257 258 switch (clock_id) { 259 case CLOCK_REALTIME: 260 case CLOCK_MONOTONIC: 261 ts->tv_sec = 0; 262 if (tc_getfrequency() > 1000000000) 263 ts->tv_nsec = 1; 264 else 265 ts->tv_nsec = 1000000000 / tc_getfrequency(); 266 break; 267 default: 268 return EINVAL; 269 } 270 271 return 0; 272 } 273 274 /* ARGSUSED */ 275 int 276 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 277 register_t *retval) 278 { 279 /* { 280 syscallarg(struct timespec *) rqtp; 281 syscallarg(struct timespec *) rmtp; 282 } */ 283 struct timespec rmt, rqt; 284 int error, error1; 285 286 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 287 if (error) 288 return (error); 289 290 error = nanosleep1(l, CLOCK_MONOTONIC, 0, &rqt, 291 SCARG(uap, rmtp) ? &rmt : NULL); 292 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 293 return error; 294 295 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 296 return error1 ? error1 : error; 297 } 298 299 /* ARGSUSED */ 300 int 301 sys_clock_nanosleep(struct lwp *l, const struct sys_clock_nanosleep_args *uap, 302 register_t *retval) 303 { 304 /* { 305 syscallarg(clockid_t) clock_id; 306 syscallarg(int) flags; 307 syscallarg(struct timespec *) rqtp; 308 syscallarg(struct timespec *) rmtp; 309 } */ 310 struct timespec rmt, rqt; 311 int error, error1; 312 313 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 314 if (error) 315 goto out; 316 317 error = nanosleep1(l, SCARG(uap, clock_id), SCARG(uap, flags), &rqt, 318 SCARG(uap, rmtp) ? &rmt : NULL); 319 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 320 goto out; 321 322 if ((SCARG(uap, flags) & TIMER_ABSTIME) == 0 && 323 (error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt))) != 0) 324 error = error1; 325 out: 326 *retval = error; 327 return 0; 328 } 329 330 int 331 nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt, 332 struct timespec *rmt) 333 { 334 struct timespec rmtstart; 335 int error, timo; 336 337 if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) { 338 if (error == ETIMEDOUT) { 339 error = 0; 340 if (rmt != NULL) 341 rmt->tv_sec = rmt->tv_nsec = 0; 342 } 343 return error; 344 } 345 346 /* 347 * Avoid inadvertently sleeping forever 348 */ 349 if (timo == 0) 350 timo = 1; 351 again: 352 error = kpause("nanoslp", true, timo, NULL); 353 if (error == EWOULDBLOCK) 354 error = 0; 355 if (rmt != NULL || error == 0) { 356 struct timespec rmtend; 357 struct timespec t0; 358 struct timespec *t; 359 360 (void)clock_gettime1(clock_id, &rmtend); 361 t = (rmt != NULL) ? rmt : &t0; 362 if (flags & TIMER_ABSTIME) { 363 timespecsub(rqt, &rmtend, t); 364 } else { 365 timespecsub(&rmtend, &rmtstart, t); 366 timespecsub(rqt, t, t); 367 } 368 if (t->tv_sec < 0) 369 timespecclear(t); 370 if (error == 0) { 371 timo = tstohz(t); 372 if (timo > 0) 373 goto again; 374 } 375 } 376 377 if (error == ERESTART) 378 error = EINTR; 379 380 return error; 381 } 382 383 int 384 sys_clock_getcpuclockid2(struct lwp *l, 385 const struct sys_clock_getcpuclockid2_args *uap, 386 register_t *retval) 387 { 388 /* { 389 syscallarg(idtype_t idtype; 390 syscallarg(id_t id); 391 syscallarg(clockid_t *)clock_id; 392 } */ 393 pid_t pid; 394 lwpid_t lid; 395 clockid_t clock_id; 396 id_t id = SCARG(uap, id); 397 398 switch (SCARG(uap, idtype)) { 399 case P_PID: 400 pid = id == 0 ? l->l_proc->p_pid : id; 401 clock_id = CLOCK_PROCESS_CPUTIME_ID | pid; 402 break; 403 case P_LWPID: 404 lid = id == 0 ? l->l_lid : id; 405 clock_id = CLOCK_THREAD_CPUTIME_ID | lid; 406 break; 407 default: 408 return EINVAL; 409 } 410 return copyout(&clock_id, SCARG(uap, clock_id), sizeof(clock_id)); 411 } 412 413 /* ARGSUSED */ 414 int 415 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 416 register_t *retval) 417 { 418 /* { 419 syscallarg(struct timeval *) tp; 420 syscallarg(void *) tzp; really "struct timezone *"; 421 } */ 422 struct timeval atv; 423 int error = 0; 424 struct timezone tzfake; 425 426 if (SCARG(uap, tp)) { 427 memset(&atv, 0, sizeof(atv)); 428 microtime(&atv); 429 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 430 if (error) 431 return (error); 432 } 433 if (SCARG(uap, tzp)) { 434 /* 435 * NetBSD has no kernel notion of time zone, so we just 436 * fake up a timezone struct and return it if demanded. 437 */ 438 tzfake.tz_minuteswest = 0; 439 tzfake.tz_dsttime = 0; 440 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 441 } 442 return (error); 443 } 444 445 /* ARGSUSED */ 446 int 447 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 448 register_t *retval) 449 { 450 /* { 451 syscallarg(const struct timeval *) tv; 452 syscallarg(const void *) tzp; really "const struct timezone *"; 453 } */ 454 455 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 456 } 457 458 int 459 settimeofday1(const struct timeval *utv, bool userspace, 460 const void *utzp, struct lwp *l, bool check_kauth) 461 { 462 struct timeval atv; 463 struct timespec ts; 464 int error; 465 466 /* Verify all parameters before changing time. */ 467 468 /* 469 * NetBSD has no kernel notion of time zone, and only an 470 * obsolete program would try to set it, so we log a warning. 471 */ 472 if (utzp) 473 log(LOG_WARNING, "pid %d attempted to set the " 474 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 475 476 if (utv == NULL) 477 return 0; 478 479 if (userspace) { 480 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 481 return error; 482 utv = &atv; 483 } 484 485 TIMEVAL_TO_TIMESPEC(utv, &ts); 486 return settime1(l->l_proc, &ts, check_kauth); 487 } 488 489 int time_adjusted; /* set if an adjustment is made */ 490 491 /* ARGSUSED */ 492 int 493 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 494 register_t *retval) 495 { 496 /* { 497 syscallarg(const struct timeval *) delta; 498 syscallarg(struct timeval *) olddelta; 499 } */ 500 int error; 501 struct timeval atv, oldatv; 502 503 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 504 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 505 return error; 506 507 if (SCARG(uap, delta)) { 508 error = copyin(SCARG(uap, delta), &atv, 509 sizeof(*SCARG(uap, delta))); 510 if (error) 511 return (error); 512 } 513 adjtime1(SCARG(uap, delta) ? &atv : NULL, 514 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 515 if (SCARG(uap, olddelta)) 516 error = copyout(&oldatv, SCARG(uap, olddelta), 517 sizeof(*SCARG(uap, olddelta))); 518 return error; 519 } 520 521 void 522 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 523 { 524 extern int64_t time_adjtime; /* in kern_ntptime.c */ 525 526 if (olddelta) { 527 memset(olddelta, 0, sizeof(*olddelta)); 528 mutex_spin_enter(&timecounter_lock); 529 olddelta->tv_sec = time_adjtime / 1000000; 530 olddelta->tv_usec = time_adjtime % 1000000; 531 if (olddelta->tv_usec < 0) { 532 olddelta->tv_usec += 1000000; 533 olddelta->tv_sec--; 534 } 535 mutex_spin_exit(&timecounter_lock); 536 } 537 538 if (delta) { 539 mutex_spin_enter(&timecounter_lock); 540 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 541 542 if (time_adjtime) { 543 /* We need to save the system time during shutdown */ 544 time_adjusted |= 1; 545 } 546 mutex_spin_exit(&timecounter_lock); 547 } 548 } 549 550 /* 551 * Interval timer support. Both the BSD getitimer() family and the POSIX 552 * timer_*() family of routines are supported. 553 * 554 * All timers are kept in an array pointed to by p_timers, which is 555 * allocated on demand - many processes don't use timers at all. The 556 * first four elements in this array are reserved for the BSD timers: 557 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element 558 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be 559 * allocated by the timer_create() syscall. 560 * 561 * Realtime timers are kept in the ptimer structure as an absolute 562 * time; virtual time timers are kept as a linked list of deltas. 563 * Virtual time timers are processed in the hardclock() routine of 564 * kern_clock.c. The real time timer is processed by a callout 565 * routine, called from the softclock() routine. Since a callout may 566 * be delayed in real time due to interrupt processing in the system, 567 * it is possible for the real time timeout routine (realtimeexpire, 568 * given below), to be delayed in real time past when it is supposed 569 * to occur. It does not suffice, therefore, to reload the real timer 570 * .it_value from the real time timers .it_interval. Rather, we 571 * compute the next time in absolute time the timer should go off. */ 572 573 /* Allocate a POSIX realtime timer. */ 574 int 575 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 576 register_t *retval) 577 { 578 /* { 579 syscallarg(clockid_t) clock_id; 580 syscallarg(struct sigevent *) evp; 581 syscallarg(timer_t *) timerid; 582 } */ 583 584 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 585 SCARG(uap, evp), copyin, l); 586 } 587 588 int 589 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 590 copyin_t fetch_event, struct lwp *l) 591 { 592 int error; 593 timer_t timerid; 594 struct ptimers *pts; 595 struct ptimer *pt; 596 struct proc *p; 597 598 p = l->l_proc; 599 600 if ((u_int)id > CLOCK_MONOTONIC) 601 return (EINVAL); 602 603 if ((pts = p->p_timers) == NULL) 604 pts = timers_alloc(p); 605 606 pt = pool_get(&ptimer_pool, PR_WAITOK | PR_ZERO); 607 if (evp != NULL) { 608 if (((error = 609 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 610 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 611 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 612 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 613 (pt->pt_ev.sigev_signo <= 0 || 614 pt->pt_ev.sigev_signo >= NSIG))) { 615 pool_put(&ptimer_pool, pt); 616 return (error ? error : EINVAL); 617 } 618 } 619 620 /* Find a free timer slot, skipping those reserved for setitimer(). */ 621 mutex_spin_enter(&timer_lock); 622 for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++) 623 if (pts->pts_timers[timerid] == NULL) 624 break; 625 if (timerid == TIMER_MAX) { 626 mutex_spin_exit(&timer_lock); 627 pool_put(&ptimer_pool, pt); 628 return EAGAIN; 629 } 630 if (evp == NULL) { 631 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 632 switch (id) { 633 case CLOCK_REALTIME: 634 case CLOCK_MONOTONIC: 635 pt->pt_ev.sigev_signo = SIGALRM; 636 break; 637 case CLOCK_VIRTUAL: 638 pt->pt_ev.sigev_signo = SIGVTALRM; 639 break; 640 case CLOCK_PROF: 641 pt->pt_ev.sigev_signo = SIGPROF; 642 break; 643 } 644 pt->pt_ev.sigev_value.sival_int = timerid; 645 } 646 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 647 pt->pt_info.ksi_errno = 0; 648 pt->pt_info.ksi_code = 0; 649 pt->pt_info.ksi_pid = p->p_pid; 650 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 651 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 652 pt->pt_type = id; 653 pt->pt_proc = p; 654 pt->pt_overruns = 0; 655 pt->pt_poverruns = 0; 656 pt->pt_entry = timerid; 657 pt->pt_queued = false; 658 timespecclear(&pt->pt_time.it_value); 659 if (!CLOCK_VIRTUAL_P(id)) 660 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 661 else 662 pt->pt_active = 0; 663 664 pts->pts_timers[timerid] = pt; 665 mutex_spin_exit(&timer_lock); 666 667 return copyout(&timerid, tid, sizeof(timerid)); 668 } 669 670 /* Delete a POSIX realtime timer */ 671 int 672 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 673 register_t *retval) 674 { 675 /* { 676 syscallarg(timer_t) timerid; 677 } */ 678 struct proc *p = l->l_proc; 679 timer_t timerid; 680 struct ptimers *pts; 681 struct ptimer *pt, *ptn; 682 683 timerid = SCARG(uap, timerid); 684 pts = p->p_timers; 685 686 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 687 return (EINVAL); 688 689 mutex_spin_enter(&timer_lock); 690 if ((pt = pts->pts_timers[timerid]) == NULL) { 691 mutex_spin_exit(&timer_lock); 692 return (EINVAL); 693 } 694 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 695 if (pt->pt_active) { 696 ptn = LIST_NEXT(pt, pt_list); 697 LIST_REMOVE(pt, pt_list); 698 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 699 timespecadd(&pt->pt_time.it_value, 700 &ptn->pt_time.it_value, 701 &ptn->pt_time.it_value); 702 pt->pt_active = 0; 703 } 704 } 705 706 /* Free the timer and release the lock. */ 707 itimerfree(pts, timerid); 708 709 return (0); 710 } 711 712 /* 713 * Set up the given timer. The value in pt->pt_time.it_value is taken 714 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 715 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 716 * 717 * If the callout had already fired but not yet run, fails with 718 * ERESTART -- caller must restart from the top to look up a timer. 719 */ 720 int 721 timer_settime(struct ptimer *pt) 722 { 723 struct ptimer *ptn, *pptn; 724 struct ptlist *ptl; 725 726 KASSERT(mutex_owned(&timer_lock)); 727 728 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 729 /* 730 * Try to stop the callout. However, if it had already 731 * fired, we have to drop the lock to wait for it, so 732 * the world may have changed and pt may not be there 733 * any more. In that case, tell the caller to start 734 * over from the top. 735 */ 736 if (callout_halt(&pt->pt_ch, &timer_lock)) 737 return ERESTART; 738 739 /* Now we can touch pt and start it up again. */ 740 if (timespecisset(&pt->pt_time.it_value)) { 741 /* 742 * Don't need to check tshzto() return value, here. 743 * callout_reset() does it for us. 744 */ 745 callout_reset(&pt->pt_ch, 746 pt->pt_type == CLOCK_MONOTONIC ? 747 tshztoup(&pt->pt_time.it_value) : 748 tshzto(&pt->pt_time.it_value), 749 realtimerexpire, pt); 750 } 751 } else { 752 if (pt->pt_active) { 753 ptn = LIST_NEXT(pt, pt_list); 754 LIST_REMOVE(pt, pt_list); 755 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 756 timespecadd(&pt->pt_time.it_value, 757 &ptn->pt_time.it_value, 758 &ptn->pt_time.it_value); 759 } 760 if (timespecisset(&pt->pt_time.it_value)) { 761 if (pt->pt_type == CLOCK_VIRTUAL) 762 ptl = &pt->pt_proc->p_timers->pts_virtual; 763 else 764 ptl = &pt->pt_proc->p_timers->pts_prof; 765 766 for (ptn = LIST_FIRST(ptl), pptn = NULL; 767 ptn && timespeccmp(&pt->pt_time.it_value, 768 &ptn->pt_time.it_value, >); 769 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 770 timespecsub(&pt->pt_time.it_value, 771 &ptn->pt_time.it_value, 772 &pt->pt_time.it_value); 773 774 if (pptn) 775 LIST_INSERT_AFTER(pptn, pt, pt_list); 776 else 777 LIST_INSERT_HEAD(ptl, pt, pt_list); 778 779 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 780 timespecsub(&ptn->pt_time.it_value, 781 &pt->pt_time.it_value, 782 &ptn->pt_time.it_value); 783 784 pt->pt_active = 1; 785 } else 786 pt->pt_active = 0; 787 } 788 789 /* Success! */ 790 return 0; 791 } 792 793 void 794 timer_gettime(struct ptimer *pt, struct itimerspec *aits) 795 { 796 struct timespec now; 797 struct ptimer *ptn; 798 799 KASSERT(mutex_owned(&timer_lock)); 800 801 *aits = pt->pt_time; 802 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 803 /* 804 * Convert from absolute to relative time in .it_value 805 * part of real time timer. If time for real time 806 * timer has passed return 0, else return difference 807 * between current time and time for the timer to go 808 * off. 809 */ 810 if (timespecisset(&aits->it_value)) { 811 if (pt->pt_type == CLOCK_REALTIME) { 812 getnanotime(&now); 813 } else { /* CLOCK_MONOTONIC */ 814 getnanouptime(&now); 815 } 816 if (timespeccmp(&aits->it_value, &now, <)) 817 timespecclear(&aits->it_value); 818 else 819 timespecsub(&aits->it_value, &now, 820 &aits->it_value); 821 } 822 } else if (pt->pt_active) { 823 if (pt->pt_type == CLOCK_VIRTUAL) 824 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 825 else 826 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 827 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 828 timespecadd(&aits->it_value, 829 &ptn->pt_time.it_value, &aits->it_value); 830 KASSERT(ptn != NULL); /* pt should be findable on the list */ 831 } else 832 timespecclear(&aits->it_value); 833 } 834 835 836 837 /* Set and arm a POSIX realtime timer */ 838 int 839 sys___timer_settime50(struct lwp *l, 840 const struct sys___timer_settime50_args *uap, 841 register_t *retval) 842 { 843 /* { 844 syscallarg(timer_t) timerid; 845 syscallarg(int) flags; 846 syscallarg(const struct itimerspec *) value; 847 syscallarg(struct itimerspec *) ovalue; 848 } */ 849 int error; 850 struct itimerspec value, ovalue, *ovp = NULL; 851 852 if ((error = copyin(SCARG(uap, value), &value, 853 sizeof(struct itimerspec))) != 0) 854 return (error); 855 856 if (SCARG(uap, ovalue)) 857 ovp = &ovalue; 858 859 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 860 SCARG(uap, flags), l->l_proc)) != 0) 861 return error; 862 863 if (ovp) 864 return copyout(&ovalue, SCARG(uap, ovalue), 865 sizeof(struct itimerspec)); 866 return 0; 867 } 868 869 int 870 dotimer_settime(int timerid, struct itimerspec *value, 871 struct itimerspec *ovalue, int flags, struct proc *p) 872 { 873 struct timespec now; 874 struct itimerspec val, oval; 875 struct ptimers *pts; 876 struct ptimer *pt; 877 int error; 878 879 pts = p->p_timers; 880 881 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 882 return EINVAL; 883 val = *value; 884 if ((error = itimespecfix(&val.it_value)) != 0 || 885 (error = itimespecfix(&val.it_interval)) != 0) 886 return error; 887 888 mutex_spin_enter(&timer_lock); 889 restart: 890 if ((pt = pts->pts_timers[timerid]) == NULL) { 891 mutex_spin_exit(&timer_lock); 892 return EINVAL; 893 } 894 895 oval = pt->pt_time; 896 pt->pt_time = val; 897 898 /* 899 * If we've been passed a relative time for a realtime timer, 900 * convert it to absolute; if an absolute time for a virtual 901 * timer, convert it to relative and make sure we don't set it 902 * to zero, which would cancel the timer, or let it go 903 * negative, which would confuse the comparison tests. 904 */ 905 if (timespecisset(&pt->pt_time.it_value)) { 906 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 907 if ((flags & TIMER_ABSTIME) == 0) { 908 if (pt->pt_type == CLOCK_REALTIME) { 909 getnanotime(&now); 910 } else { /* CLOCK_MONOTONIC */ 911 getnanouptime(&now); 912 } 913 timespecadd(&pt->pt_time.it_value, &now, 914 &pt->pt_time.it_value); 915 } 916 } else { 917 if ((flags & TIMER_ABSTIME) != 0) { 918 getnanotime(&now); 919 timespecsub(&pt->pt_time.it_value, &now, 920 &pt->pt_time.it_value); 921 if (!timespecisset(&pt->pt_time.it_value) || 922 pt->pt_time.it_value.tv_sec < 0) { 923 pt->pt_time.it_value.tv_sec = 0; 924 pt->pt_time.it_value.tv_nsec = 1; 925 } 926 } 927 } 928 } 929 930 error = timer_settime(pt); 931 if (error == ERESTART) { 932 KASSERT(!CLOCK_VIRTUAL_P(pt->pt_type)); 933 goto restart; 934 } 935 KASSERT(error == 0); 936 mutex_spin_exit(&timer_lock); 937 938 if (ovalue) 939 *ovalue = oval; 940 941 return (0); 942 } 943 944 /* Return the time remaining until a POSIX timer fires. */ 945 int 946 sys___timer_gettime50(struct lwp *l, 947 const struct sys___timer_gettime50_args *uap, register_t *retval) 948 { 949 /* { 950 syscallarg(timer_t) timerid; 951 syscallarg(struct itimerspec *) value; 952 } */ 953 struct itimerspec its; 954 int error; 955 956 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 957 &its)) != 0) 958 return error; 959 960 return copyout(&its, SCARG(uap, value), sizeof(its)); 961 } 962 963 int 964 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 965 { 966 struct ptimer *pt; 967 struct ptimers *pts; 968 969 pts = p->p_timers; 970 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 971 return (EINVAL); 972 mutex_spin_enter(&timer_lock); 973 if ((pt = pts->pts_timers[timerid]) == NULL) { 974 mutex_spin_exit(&timer_lock); 975 return (EINVAL); 976 } 977 timer_gettime(pt, its); 978 mutex_spin_exit(&timer_lock); 979 980 return 0; 981 } 982 983 /* 984 * Return the count of the number of times a periodic timer expired 985 * while a notification was already pending. The counter is reset when 986 * a timer expires and a notification can be posted. 987 */ 988 int 989 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 990 register_t *retval) 991 { 992 /* { 993 syscallarg(timer_t) timerid; 994 } */ 995 struct proc *p = l->l_proc; 996 struct ptimers *pts; 997 int timerid; 998 struct ptimer *pt; 999 1000 timerid = SCARG(uap, timerid); 1001 1002 pts = p->p_timers; 1003 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 1004 return (EINVAL); 1005 mutex_spin_enter(&timer_lock); 1006 if ((pt = pts->pts_timers[timerid]) == NULL) { 1007 mutex_spin_exit(&timer_lock); 1008 return (EINVAL); 1009 } 1010 *retval = pt->pt_poverruns; 1011 if (*retval >= DELAYTIMER_MAX) 1012 *retval = DELAYTIMER_MAX; 1013 mutex_spin_exit(&timer_lock); 1014 1015 return (0); 1016 } 1017 1018 /* 1019 * Real interval timer expired: 1020 * send process whose timer expired an alarm signal. 1021 * If time is not set up to reload, then just return. 1022 * Else compute next time timer should go off which is > current time. 1023 * This is where delay in processing this timeout causes multiple 1024 * SIGALRM calls to be compressed into one. 1025 */ 1026 void 1027 realtimerexpire(void *arg) 1028 { 1029 uint64_t last_val, next_val, interval, now_ns; 1030 struct timespec now, next; 1031 struct ptimer *pt; 1032 int backwards; 1033 1034 pt = arg; 1035 1036 mutex_spin_enter(&timer_lock); 1037 itimerfire(pt); 1038 1039 if (!timespecisset(&pt->pt_time.it_interval)) { 1040 timespecclear(&pt->pt_time.it_value); 1041 mutex_spin_exit(&timer_lock); 1042 return; 1043 } 1044 1045 if (pt->pt_type == CLOCK_MONOTONIC) { 1046 getnanouptime(&now); 1047 } else { 1048 getnanotime(&now); 1049 } 1050 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 1051 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 1052 /* Handle the easy case of non-overflown timers first. */ 1053 if (!backwards && timespeccmp(&next, &now, >)) { 1054 pt->pt_time.it_value = next; 1055 } else { 1056 now_ns = timespec2ns(&now); 1057 last_val = timespec2ns(&pt->pt_time.it_value); 1058 interval = timespec2ns(&pt->pt_time.it_interval); 1059 1060 next_val = now_ns + 1061 (now_ns - last_val + interval - 1) % interval; 1062 1063 if (backwards) 1064 next_val += interval; 1065 else 1066 pt->pt_overruns += (now_ns - last_val) / interval; 1067 1068 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1069 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1070 } 1071 1072 /* 1073 * Reset the callout, if it's not going away. 1074 * 1075 * Don't need to check tshzto() return value, here. 1076 * callout_reset() does it for us. 1077 */ 1078 if (!pt->pt_dying) 1079 callout_reset(&pt->pt_ch, 1080 (pt->pt_type == CLOCK_MONOTONIC 1081 ? tshztoup(&pt->pt_time.it_value) 1082 : tshzto(&pt->pt_time.it_value)), 1083 realtimerexpire, pt); 1084 mutex_spin_exit(&timer_lock); 1085 } 1086 1087 /* BSD routine to get the value of an interval timer. */ 1088 /* ARGSUSED */ 1089 int 1090 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1091 register_t *retval) 1092 { 1093 /* { 1094 syscallarg(int) which; 1095 syscallarg(struct itimerval *) itv; 1096 } */ 1097 struct proc *p = l->l_proc; 1098 struct itimerval aitv; 1099 int error; 1100 1101 memset(&aitv, 0, sizeof(aitv)); 1102 error = dogetitimer(p, SCARG(uap, which), &aitv); 1103 if (error) 1104 return error; 1105 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1106 } 1107 1108 int 1109 dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1110 { 1111 struct ptimers *pts; 1112 struct ptimer *pt; 1113 struct itimerspec its; 1114 1115 if ((u_int)which > ITIMER_MONOTONIC) 1116 return (EINVAL); 1117 1118 mutex_spin_enter(&timer_lock); 1119 pts = p->p_timers; 1120 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1121 timerclear(&itvp->it_value); 1122 timerclear(&itvp->it_interval); 1123 } else { 1124 timer_gettime(pt, &its); 1125 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1126 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1127 } 1128 mutex_spin_exit(&timer_lock); 1129 1130 return 0; 1131 } 1132 1133 /* BSD routine to set/arm an interval timer. */ 1134 /* ARGSUSED */ 1135 int 1136 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1137 register_t *retval) 1138 { 1139 /* { 1140 syscallarg(int) which; 1141 syscallarg(const struct itimerval *) itv; 1142 syscallarg(struct itimerval *) oitv; 1143 } */ 1144 struct proc *p = l->l_proc; 1145 int which = SCARG(uap, which); 1146 struct sys___getitimer50_args getargs; 1147 const struct itimerval *itvp; 1148 struct itimerval aitv; 1149 int error; 1150 1151 if ((u_int)which > ITIMER_MONOTONIC) 1152 return (EINVAL); 1153 itvp = SCARG(uap, itv); 1154 if (itvp && 1155 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0) 1156 return (error); 1157 if (SCARG(uap, oitv) != NULL) { 1158 SCARG(&getargs, which) = which; 1159 SCARG(&getargs, itv) = SCARG(uap, oitv); 1160 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1161 return (error); 1162 } 1163 if (itvp == 0) 1164 return (0); 1165 1166 return dosetitimer(p, which, &aitv); 1167 } 1168 1169 int 1170 dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1171 { 1172 struct timespec now; 1173 struct ptimers *pts; 1174 struct ptimer *pt, *spare; 1175 int error; 1176 1177 KASSERT((u_int)which <= CLOCK_MONOTONIC); 1178 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1179 return (EINVAL); 1180 1181 /* 1182 * Don't bother allocating data structures if the process just 1183 * wants to clear the timer. 1184 */ 1185 spare = NULL; 1186 pts = p->p_timers; 1187 retry: 1188 if (!timerisset(&itvp->it_value) && (pts == NULL || 1189 pts->pts_timers[which] == NULL)) 1190 return (0); 1191 if (pts == NULL) 1192 pts = timers_alloc(p); 1193 mutex_spin_enter(&timer_lock); 1194 restart: 1195 pt = pts->pts_timers[which]; 1196 if (pt == NULL) { 1197 if (spare == NULL) { 1198 mutex_spin_exit(&timer_lock); 1199 spare = pool_get(&ptimer_pool, PR_WAITOK | PR_ZERO); 1200 goto retry; 1201 } 1202 pt = spare; 1203 spare = NULL; 1204 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1205 pt->pt_ev.sigev_value.sival_int = which; 1206 pt->pt_overruns = 0; 1207 pt->pt_proc = p; 1208 pt->pt_type = which; 1209 pt->pt_entry = which; 1210 pt->pt_queued = false; 1211 if (!CLOCK_VIRTUAL_P(which)) 1212 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1213 else 1214 pt->pt_active = 0; 1215 1216 switch (which) { 1217 case ITIMER_REAL: 1218 case ITIMER_MONOTONIC: 1219 pt->pt_ev.sigev_signo = SIGALRM; 1220 break; 1221 case ITIMER_VIRTUAL: 1222 pt->pt_ev.sigev_signo = SIGVTALRM; 1223 break; 1224 case ITIMER_PROF: 1225 pt->pt_ev.sigev_signo = SIGPROF; 1226 break; 1227 } 1228 pts->pts_timers[which] = pt; 1229 } 1230 1231 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1232 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1233 1234 if (timespecisset(&pt->pt_time.it_value)) { 1235 /* Convert to absolute time */ 1236 /* XXX need to wrap in splclock for timecounters case? */ 1237 switch (which) { 1238 case ITIMER_REAL: 1239 getnanotime(&now); 1240 timespecadd(&pt->pt_time.it_value, &now, 1241 &pt->pt_time.it_value); 1242 break; 1243 case ITIMER_MONOTONIC: 1244 getnanouptime(&now); 1245 timespecadd(&pt->pt_time.it_value, &now, 1246 &pt->pt_time.it_value); 1247 break; 1248 default: 1249 break; 1250 } 1251 } 1252 error = timer_settime(pt); 1253 if (error == ERESTART) { 1254 KASSERT(!CLOCK_VIRTUAL_P(pt->pt_type)); 1255 goto restart; 1256 } 1257 KASSERT(error == 0); 1258 mutex_spin_exit(&timer_lock); 1259 if (spare != NULL) 1260 pool_put(&ptimer_pool, spare); 1261 1262 return (0); 1263 } 1264 1265 /* Utility routines to manage the array of pointers to timers. */ 1266 struct ptimers * 1267 timers_alloc(struct proc *p) 1268 { 1269 struct ptimers *pts; 1270 int i; 1271 1272 pts = pool_get(&ptimers_pool, PR_WAITOK); 1273 LIST_INIT(&pts->pts_virtual); 1274 LIST_INIT(&pts->pts_prof); 1275 for (i = 0; i < TIMER_MAX; i++) 1276 pts->pts_timers[i] = NULL; 1277 mutex_spin_enter(&timer_lock); 1278 if (p->p_timers == NULL) { 1279 p->p_timers = pts; 1280 mutex_spin_exit(&timer_lock); 1281 return pts; 1282 } 1283 mutex_spin_exit(&timer_lock); 1284 pool_put(&ptimers_pool, pts); 1285 return p->p_timers; 1286 } 1287 1288 /* 1289 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1290 * then clean up all timers and free all the data structures. If 1291 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1292 * by timer_create(), not the BSD setitimer() timers, and only free the 1293 * structure if none of those remain. 1294 */ 1295 void 1296 timers_free(struct proc *p, int which) 1297 { 1298 struct ptimers *pts; 1299 struct ptimer *ptn; 1300 struct timespec ts; 1301 int i; 1302 1303 if (p->p_timers == NULL) 1304 return; 1305 1306 pts = p->p_timers; 1307 mutex_spin_enter(&timer_lock); 1308 if (which == TIMERS_ALL) { 1309 p->p_timers = NULL; 1310 i = 0; 1311 } else { 1312 timespecclear(&ts); 1313 for (ptn = LIST_FIRST(&pts->pts_virtual); 1314 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1315 ptn = LIST_NEXT(ptn, pt_list)) { 1316 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1317 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1318 } 1319 LIST_FIRST(&pts->pts_virtual) = NULL; 1320 if (ptn) { 1321 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1322 timespecadd(&ts, &ptn->pt_time.it_value, 1323 &ptn->pt_time.it_value); 1324 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1325 } 1326 timespecclear(&ts); 1327 for (ptn = LIST_FIRST(&pts->pts_prof); 1328 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1329 ptn = LIST_NEXT(ptn, pt_list)) { 1330 KASSERT(ptn->pt_type == CLOCK_PROF); 1331 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1332 } 1333 LIST_FIRST(&pts->pts_prof) = NULL; 1334 if (ptn) { 1335 KASSERT(ptn->pt_type == CLOCK_PROF); 1336 timespecadd(&ts, &ptn->pt_time.it_value, 1337 &ptn->pt_time.it_value); 1338 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1339 } 1340 i = TIMER_MIN; 1341 } 1342 for ( ; i < TIMER_MAX; i++) { 1343 if (pts->pts_timers[i] != NULL) { 1344 /* Free the timer and release the lock. */ 1345 itimerfree(pts, i); 1346 /* Reacquire the lock for the next one. */ 1347 mutex_spin_enter(&timer_lock); 1348 } 1349 } 1350 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1351 pts->pts_timers[2] == NULL && pts->pts_timers[3] == NULL) { 1352 p->p_timers = NULL; 1353 mutex_spin_exit(&timer_lock); 1354 pool_put(&ptimers_pool, pts); 1355 } else 1356 mutex_spin_exit(&timer_lock); 1357 } 1358 1359 static void 1360 itimerfree(struct ptimers *pts, int index) 1361 { 1362 struct ptimer *pt; 1363 1364 KASSERT(mutex_owned(&timer_lock)); 1365 1366 pt = pts->pts_timers[index]; 1367 1368 /* 1369 * Prevent new references, and notify the callout not to 1370 * restart itself. 1371 */ 1372 pts->pts_timers[index] = NULL; 1373 pt->pt_dying = true; 1374 1375 /* 1376 * For non-virtual timers, stop the callout, or wait for it to 1377 * run if it has already fired. It cannot restart again after 1378 * this point: the callout won't restart itself when dying, no 1379 * other users holding the lock can restart it, and any other 1380 * users waiting for callout_halt concurrently (timer_settime) 1381 * will restart from the top. 1382 */ 1383 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1384 callout_halt(&pt->pt_ch, &timer_lock); 1385 1386 /* Remove it from the queue to be signalled. */ 1387 if (pt->pt_queued) 1388 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1389 1390 /* All done with the global state. */ 1391 mutex_spin_exit(&timer_lock); 1392 1393 /* Destroy the callout, if needed, and free the ptimer. */ 1394 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1395 callout_destroy(&pt->pt_ch); 1396 pool_put(&ptimer_pool, pt); 1397 } 1398 1399 /* 1400 * Decrement an interval timer by a specified number 1401 * of nanoseconds, which must be less than a second, 1402 * i.e. < 1000000000. If the timer expires, then reload 1403 * it. In this case, carry over (nsec - old value) to 1404 * reduce the value reloaded into the timer so that 1405 * the timer does not drift. This routine assumes 1406 * that it is called in a context where the timers 1407 * on which it is operating cannot change in value. 1408 */ 1409 static int 1410 itimerdecr(struct ptimer *pt, int nsec) 1411 { 1412 struct itimerspec *itp; 1413 int error __diagused; 1414 1415 KASSERT(mutex_owned(&timer_lock)); 1416 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1417 1418 itp = &pt->pt_time; 1419 if (itp->it_value.tv_nsec < nsec) { 1420 if (itp->it_value.tv_sec == 0) { 1421 /* expired, and already in next interval */ 1422 nsec -= itp->it_value.tv_nsec; 1423 goto expire; 1424 } 1425 itp->it_value.tv_nsec += 1000000000; 1426 itp->it_value.tv_sec--; 1427 } 1428 itp->it_value.tv_nsec -= nsec; 1429 nsec = 0; 1430 if (timespecisset(&itp->it_value)) 1431 return (1); 1432 /* expired, exactly at end of interval */ 1433 expire: 1434 if (timespecisset(&itp->it_interval)) { 1435 itp->it_value = itp->it_interval; 1436 itp->it_value.tv_nsec -= nsec; 1437 if (itp->it_value.tv_nsec < 0) { 1438 itp->it_value.tv_nsec += 1000000000; 1439 itp->it_value.tv_sec--; 1440 } 1441 error = timer_settime(pt); 1442 KASSERT(error == 0); /* virtual, never fails */ 1443 } else 1444 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1445 return (0); 1446 } 1447 1448 static void 1449 itimerfire(struct ptimer *pt) 1450 { 1451 1452 KASSERT(mutex_owned(&timer_lock)); 1453 1454 /* 1455 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1456 * XXX Relying on the clock interrupt is stupid. 1457 */ 1458 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL || pt->pt_queued) { 1459 return; 1460 } 1461 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1462 pt->pt_queued = true; 1463 softint_schedule(timer_sih); 1464 } 1465 1466 void 1467 timer_tick(lwp_t *l, bool user) 1468 { 1469 struct ptimers *pts; 1470 struct ptimer *pt; 1471 proc_t *p; 1472 1473 p = l->l_proc; 1474 if (p->p_timers == NULL) 1475 return; 1476 1477 mutex_spin_enter(&timer_lock); 1478 if ((pts = l->l_proc->p_timers) != NULL) { 1479 /* 1480 * Run current process's virtual and profile time, as needed. 1481 */ 1482 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1483 if (itimerdecr(pt, tick * 1000) == 0) 1484 itimerfire(pt); 1485 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1486 if (itimerdecr(pt, tick * 1000) == 0) 1487 itimerfire(pt); 1488 } 1489 mutex_spin_exit(&timer_lock); 1490 } 1491 1492 static void 1493 timer_intr(void *cookie) 1494 { 1495 ksiginfo_t ksi; 1496 struct ptimer *pt; 1497 proc_t *p; 1498 1499 mutex_enter(proc_lock); 1500 mutex_spin_enter(&timer_lock); 1501 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1502 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1503 KASSERT(pt->pt_queued); 1504 pt->pt_queued = false; 1505 1506 if (pt->pt_proc->p_timers == NULL) { 1507 /* Process is dying. */ 1508 continue; 1509 } 1510 p = pt->pt_proc; 1511 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) { 1512 continue; 1513 } 1514 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1515 pt->pt_overruns++; 1516 continue; 1517 } 1518 1519 KSI_INIT(&ksi); 1520 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1521 ksi.ksi_code = SI_TIMER; 1522 ksi.ksi_value = pt->pt_ev.sigev_value; 1523 pt->pt_poverruns = pt->pt_overruns; 1524 pt->pt_overruns = 0; 1525 mutex_spin_exit(&timer_lock); 1526 kpsignal(p, &ksi, NULL); 1527 mutex_spin_enter(&timer_lock); 1528 } 1529 mutex_spin_exit(&timer_lock); 1530 mutex_exit(proc_lock); 1531 } 1532 1533 /* 1534 * Check if the time will wrap if set to ts. 1535 * 1536 * ts - timespec describing the new time 1537 * delta - the delta between the current time and ts 1538 */ 1539 bool 1540 time_wraps(struct timespec *ts, struct timespec *delta) 1541 { 1542 1543 /* 1544 * Don't allow the time to be set forward so far it 1545 * will wrap and become negative, thus allowing an 1546 * attacker to bypass the next check below. The 1547 * cutoff is 1 year before rollover occurs, so even 1548 * if the attacker uses adjtime(2) to move the time 1549 * past the cutoff, it will take a very long time 1550 * to get to the wrap point. 1551 */ 1552 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1553 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1554 return true; 1555 1556 return false; 1557 } 1558