1 /* $NetBSD: kern_time.c,v 1.206 2020/10/27 00:07:18 nia Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.206 2020/10/27 00:07:18 nia Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/resourcevar.h> 68 #include <sys/kernel.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/vnode.h> 72 #include <sys/signalvar.h> 73 #include <sys/syslog.h> 74 #include <sys/timetc.h> 75 #include <sys/timex.h> 76 #include <sys/kauth.h> 77 #include <sys/mount.h> 78 #include <sys/syscallargs.h> 79 #include <sys/cpu.h> 80 81 static void timer_intr(void *); 82 static void itimerfire(struct ptimer *); 83 static void itimerfree(struct ptimers *, int); 84 85 kmutex_t timer_lock; 86 87 static void *timer_sih; 88 static TAILQ_HEAD(, ptimer) timer_queue; 89 90 struct pool ptimer_pool, ptimers_pool; 91 92 #define CLOCK_VIRTUAL_P(clockid) \ 93 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 94 95 CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 96 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 97 CTASSERT(ITIMER_PROF == CLOCK_PROF); 98 CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC); 99 100 #define DELAYTIMER_MAX 32 101 102 /* 103 * Initialize timekeeping. 104 */ 105 void 106 time_init(void) 107 { 108 109 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 110 &pool_allocator_nointr, IPL_NONE); 111 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 112 &pool_allocator_nointr, IPL_NONE); 113 } 114 115 void 116 time_init2(void) 117 { 118 119 TAILQ_INIT(&timer_queue); 120 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 121 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 122 timer_intr, NULL); 123 } 124 125 /* Time of day and interval timer support. 126 * 127 * These routines provide the kernel entry points to get and set 128 * the time-of-day and per-process interval timers. Subroutines 129 * here provide support for adding and subtracting timeval structures 130 * and decrementing interval timers, optionally reloading the interval 131 * timers when they expire. 132 */ 133 134 /* This function is used by clock_settime and settimeofday */ 135 static int 136 settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 137 { 138 struct timespec delta, now; 139 140 /* 141 * The time being set to an unreasonable value will cause 142 * unreasonable system behaviour. 143 */ 144 if (ts->tv_sec < 0 || ts->tv_sec > (1LL << 36)) 145 return (EINVAL); 146 147 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 148 nanotime(&now); 149 timespecsub(ts, &now, &delta); 150 151 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 152 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 153 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 154 return (EPERM); 155 } 156 157 #ifdef notyet 158 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 159 return (EPERM); 160 } 161 #endif 162 163 tc_setclock(ts); 164 165 resettodr(); 166 167 return (0); 168 } 169 170 int 171 settime(struct proc *p, struct timespec *ts) 172 { 173 return (settime1(p, ts, true)); 174 } 175 176 /* ARGSUSED */ 177 int 178 sys___clock_gettime50(struct lwp *l, 179 const struct sys___clock_gettime50_args *uap, register_t *retval) 180 { 181 /* { 182 syscallarg(clockid_t) clock_id; 183 syscallarg(struct timespec *) tp; 184 } */ 185 int error; 186 struct timespec ats; 187 188 error = clock_gettime1(SCARG(uap, clock_id), &ats); 189 if (error != 0) 190 return error; 191 192 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 193 } 194 195 /* ARGSUSED */ 196 int 197 sys___clock_settime50(struct lwp *l, 198 const struct sys___clock_settime50_args *uap, register_t *retval) 199 { 200 /* { 201 syscallarg(clockid_t) clock_id; 202 syscallarg(const struct timespec *) tp; 203 } */ 204 int error; 205 struct timespec ats; 206 207 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 208 return error; 209 210 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 211 } 212 213 214 int 215 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 216 bool check_kauth) 217 { 218 int error; 219 220 if (tp->tv_nsec < 0 || tp->tv_nsec >= 1000000000L) 221 return EINVAL; 222 223 switch (clock_id) { 224 case CLOCK_REALTIME: 225 if ((error = settime1(p, tp, check_kauth)) != 0) 226 return (error); 227 break; 228 case CLOCK_MONOTONIC: 229 return (EINVAL); /* read-only clock */ 230 default: 231 return (EINVAL); 232 } 233 234 return 0; 235 } 236 237 int 238 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 239 register_t *retval) 240 { 241 /* { 242 syscallarg(clockid_t) clock_id; 243 syscallarg(struct timespec *) tp; 244 } */ 245 struct timespec ts; 246 int error; 247 248 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 249 return error; 250 251 if (SCARG(uap, tp)) 252 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 253 254 return error; 255 } 256 257 int 258 clock_getres1(clockid_t clock_id, struct timespec *ts) 259 { 260 261 switch (clock_id) { 262 case CLOCK_REALTIME: 263 case CLOCK_MONOTONIC: 264 ts->tv_sec = 0; 265 if (tc_getfrequency() > 1000000000) 266 ts->tv_nsec = 1; 267 else 268 ts->tv_nsec = 1000000000 / tc_getfrequency(); 269 break; 270 default: 271 return EINVAL; 272 } 273 274 return 0; 275 } 276 277 /* ARGSUSED */ 278 int 279 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 280 register_t *retval) 281 { 282 /* { 283 syscallarg(struct timespec *) rqtp; 284 syscallarg(struct timespec *) rmtp; 285 } */ 286 struct timespec rmt, rqt; 287 int error, error1; 288 289 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 290 if (error) 291 return (error); 292 293 error = nanosleep1(l, CLOCK_MONOTONIC, 0, &rqt, 294 SCARG(uap, rmtp) ? &rmt : NULL); 295 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 296 return error; 297 298 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 299 return error1 ? error1 : error; 300 } 301 302 /* ARGSUSED */ 303 int 304 sys_clock_nanosleep(struct lwp *l, const struct sys_clock_nanosleep_args *uap, 305 register_t *retval) 306 { 307 /* { 308 syscallarg(clockid_t) clock_id; 309 syscallarg(int) flags; 310 syscallarg(struct timespec *) rqtp; 311 syscallarg(struct timespec *) rmtp; 312 } */ 313 struct timespec rmt, rqt; 314 int error, error1; 315 316 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 317 if (error) 318 goto out; 319 320 error = nanosleep1(l, SCARG(uap, clock_id), SCARG(uap, flags), &rqt, 321 SCARG(uap, rmtp) ? &rmt : NULL); 322 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 323 goto out; 324 325 if ((SCARG(uap, flags) & TIMER_ABSTIME) == 0 && 326 (error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt))) != 0) 327 error = error1; 328 out: 329 *retval = error; 330 return 0; 331 } 332 333 int 334 nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt, 335 struct timespec *rmt) 336 { 337 struct timespec rmtstart; 338 int error, timo; 339 340 if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) { 341 if (error == ETIMEDOUT) { 342 error = 0; 343 if (rmt != NULL) 344 rmt->tv_sec = rmt->tv_nsec = 0; 345 } 346 return error; 347 } 348 349 /* 350 * Avoid inadvertently sleeping forever 351 */ 352 if (timo == 0) 353 timo = 1; 354 again: 355 error = kpause("nanoslp", true, timo, NULL); 356 if (error == EWOULDBLOCK) 357 error = 0; 358 if (rmt != NULL || error == 0) { 359 struct timespec rmtend; 360 struct timespec t0; 361 struct timespec *t; 362 int err; 363 364 err = clock_gettime1(clock_id, &rmtend); 365 if (err != 0) 366 return err; 367 368 t = (rmt != NULL) ? rmt : &t0; 369 if (flags & TIMER_ABSTIME) { 370 timespecsub(rqt, &rmtend, t); 371 } else { 372 timespecsub(&rmtend, &rmtstart, t); 373 timespecsub(rqt, t, t); 374 } 375 if (t->tv_sec < 0) 376 timespecclear(t); 377 if (error == 0) { 378 timo = tstohz(t); 379 if (timo > 0) 380 goto again; 381 } 382 } 383 384 if (error == ERESTART) 385 error = EINTR; 386 387 return error; 388 } 389 390 int 391 sys_clock_getcpuclockid2(struct lwp *l, 392 const struct sys_clock_getcpuclockid2_args *uap, 393 register_t *retval) 394 { 395 /* { 396 syscallarg(idtype_t idtype; 397 syscallarg(id_t id); 398 syscallarg(clockid_t *)clock_id; 399 } */ 400 pid_t pid; 401 lwpid_t lid; 402 clockid_t clock_id; 403 id_t id = SCARG(uap, id); 404 405 switch (SCARG(uap, idtype)) { 406 case P_PID: 407 pid = id == 0 ? l->l_proc->p_pid : id; 408 clock_id = CLOCK_PROCESS_CPUTIME_ID | pid; 409 break; 410 case P_LWPID: 411 lid = id == 0 ? l->l_lid : id; 412 clock_id = CLOCK_THREAD_CPUTIME_ID | lid; 413 break; 414 default: 415 return EINVAL; 416 } 417 return copyout(&clock_id, SCARG(uap, clock_id), sizeof(clock_id)); 418 } 419 420 /* ARGSUSED */ 421 int 422 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 423 register_t *retval) 424 { 425 /* { 426 syscallarg(struct timeval *) tp; 427 syscallarg(void *) tzp; really "struct timezone *"; 428 } */ 429 struct timeval atv; 430 int error = 0; 431 struct timezone tzfake; 432 433 if (SCARG(uap, tp)) { 434 memset(&atv, 0, sizeof(atv)); 435 microtime(&atv); 436 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 437 if (error) 438 return (error); 439 } 440 if (SCARG(uap, tzp)) { 441 /* 442 * NetBSD has no kernel notion of time zone, so we just 443 * fake up a timezone struct and return it if demanded. 444 */ 445 tzfake.tz_minuteswest = 0; 446 tzfake.tz_dsttime = 0; 447 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 448 } 449 return (error); 450 } 451 452 /* ARGSUSED */ 453 int 454 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 455 register_t *retval) 456 { 457 /* { 458 syscallarg(const struct timeval *) tv; 459 syscallarg(const void *) tzp; really "const struct timezone *"; 460 } */ 461 462 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 463 } 464 465 int 466 settimeofday1(const struct timeval *utv, bool userspace, 467 const void *utzp, struct lwp *l, bool check_kauth) 468 { 469 struct timeval atv; 470 struct timespec ts; 471 int error; 472 473 /* Verify all parameters before changing time. */ 474 475 /* 476 * NetBSD has no kernel notion of time zone, and only an 477 * obsolete program would try to set it, so we log a warning. 478 */ 479 if (utzp) 480 log(LOG_WARNING, "pid %d attempted to set the " 481 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 482 483 if (utv == NULL) 484 return 0; 485 486 if (userspace) { 487 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 488 return error; 489 utv = &atv; 490 } 491 492 if (utv->tv_usec < 0 || utv->tv_usec >= 1000000) 493 return EINVAL; 494 495 TIMEVAL_TO_TIMESPEC(utv, &ts); 496 return settime1(l->l_proc, &ts, check_kauth); 497 } 498 499 int time_adjusted; /* set if an adjustment is made */ 500 501 /* ARGSUSED */ 502 int 503 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 504 register_t *retval) 505 { 506 /* { 507 syscallarg(const struct timeval *) delta; 508 syscallarg(struct timeval *) olddelta; 509 } */ 510 int error; 511 struct timeval atv, oldatv; 512 513 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 514 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 515 return error; 516 517 if (SCARG(uap, delta)) { 518 error = copyin(SCARG(uap, delta), &atv, 519 sizeof(*SCARG(uap, delta))); 520 if (error) 521 return (error); 522 } 523 adjtime1(SCARG(uap, delta) ? &atv : NULL, 524 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 525 if (SCARG(uap, olddelta)) 526 error = copyout(&oldatv, SCARG(uap, olddelta), 527 sizeof(*SCARG(uap, olddelta))); 528 return error; 529 } 530 531 void 532 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 533 { 534 extern int64_t time_adjtime; /* in kern_ntptime.c */ 535 536 if (olddelta) { 537 memset(olddelta, 0, sizeof(*olddelta)); 538 mutex_spin_enter(&timecounter_lock); 539 olddelta->tv_sec = time_adjtime / 1000000; 540 olddelta->tv_usec = time_adjtime % 1000000; 541 if (olddelta->tv_usec < 0) { 542 olddelta->tv_usec += 1000000; 543 olddelta->tv_sec--; 544 } 545 mutex_spin_exit(&timecounter_lock); 546 } 547 548 if (delta) { 549 mutex_spin_enter(&timecounter_lock); 550 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 551 552 if (time_adjtime) { 553 /* We need to save the system time during shutdown */ 554 time_adjusted |= 1; 555 } 556 mutex_spin_exit(&timecounter_lock); 557 } 558 } 559 560 /* 561 * Interval timer support. Both the BSD getitimer() family and the POSIX 562 * timer_*() family of routines are supported. 563 * 564 * All timers are kept in an array pointed to by p_timers, which is 565 * allocated on demand - many processes don't use timers at all. The 566 * first four elements in this array are reserved for the BSD timers: 567 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element 568 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be 569 * allocated by the timer_create() syscall. 570 * 571 * Realtime timers are kept in the ptimer structure as an absolute 572 * time; virtual time timers are kept as a linked list of deltas. 573 * Virtual time timers are processed in the hardclock() routine of 574 * kern_clock.c. The real time timer is processed by a callout 575 * routine, called from the softclock() routine. Since a callout may 576 * be delayed in real time due to interrupt processing in the system, 577 * it is possible for the real time timeout routine (realtimeexpire, 578 * given below), to be delayed in real time past when it is supposed 579 * to occur. It does not suffice, therefore, to reload the real timer 580 * .it_value from the real time timers .it_interval. Rather, we 581 * compute the next time in absolute time the timer should go off. */ 582 583 /* Allocate a POSIX realtime timer. */ 584 int 585 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 586 register_t *retval) 587 { 588 /* { 589 syscallarg(clockid_t) clock_id; 590 syscallarg(struct sigevent *) evp; 591 syscallarg(timer_t *) timerid; 592 } */ 593 594 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 595 SCARG(uap, evp), copyin, l); 596 } 597 598 int 599 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 600 copyin_t fetch_event, struct lwp *l) 601 { 602 int error; 603 timer_t timerid; 604 struct ptimers *pts; 605 struct ptimer *pt; 606 struct proc *p; 607 608 p = l->l_proc; 609 610 if ((u_int)id > CLOCK_MONOTONIC) 611 return (EINVAL); 612 613 if ((pts = p->p_timers) == NULL) 614 pts = timers_alloc(p); 615 616 pt = pool_get(&ptimer_pool, PR_WAITOK | PR_ZERO); 617 if (evp != NULL) { 618 if (((error = 619 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 620 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 621 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 622 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 623 (pt->pt_ev.sigev_signo <= 0 || 624 pt->pt_ev.sigev_signo >= NSIG))) { 625 pool_put(&ptimer_pool, pt); 626 return (error ? error : EINVAL); 627 } 628 } 629 630 /* Find a free timer slot, skipping those reserved for setitimer(). */ 631 mutex_spin_enter(&timer_lock); 632 for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++) 633 if (pts->pts_timers[timerid] == NULL) 634 break; 635 if (timerid == TIMER_MAX) { 636 mutex_spin_exit(&timer_lock); 637 pool_put(&ptimer_pool, pt); 638 return EAGAIN; 639 } 640 if (evp == NULL) { 641 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 642 switch (id) { 643 case CLOCK_REALTIME: 644 case CLOCK_MONOTONIC: 645 pt->pt_ev.sigev_signo = SIGALRM; 646 break; 647 case CLOCK_VIRTUAL: 648 pt->pt_ev.sigev_signo = SIGVTALRM; 649 break; 650 case CLOCK_PROF: 651 pt->pt_ev.sigev_signo = SIGPROF; 652 break; 653 } 654 pt->pt_ev.sigev_value.sival_int = timerid; 655 } 656 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 657 pt->pt_info.ksi_errno = 0; 658 pt->pt_info.ksi_code = 0; 659 pt->pt_info.ksi_pid = p->p_pid; 660 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 661 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 662 pt->pt_type = id; 663 pt->pt_proc = p; 664 pt->pt_overruns = 0; 665 pt->pt_poverruns = 0; 666 pt->pt_entry = timerid; 667 pt->pt_queued = false; 668 timespecclear(&pt->pt_time.it_value); 669 if (!CLOCK_VIRTUAL_P(id)) 670 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 671 else 672 pt->pt_active = 0; 673 674 pts->pts_timers[timerid] = pt; 675 mutex_spin_exit(&timer_lock); 676 677 return copyout(&timerid, tid, sizeof(timerid)); 678 } 679 680 /* Delete a POSIX realtime timer */ 681 int 682 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 683 register_t *retval) 684 { 685 /* { 686 syscallarg(timer_t) timerid; 687 } */ 688 struct proc *p = l->l_proc; 689 timer_t timerid; 690 struct ptimers *pts; 691 struct ptimer *pt, *ptn; 692 693 timerid = SCARG(uap, timerid); 694 pts = p->p_timers; 695 696 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 697 return (EINVAL); 698 699 mutex_spin_enter(&timer_lock); 700 if ((pt = pts->pts_timers[timerid]) == NULL) { 701 mutex_spin_exit(&timer_lock); 702 return (EINVAL); 703 } 704 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 705 if (pt->pt_active) { 706 ptn = LIST_NEXT(pt, pt_list); 707 LIST_REMOVE(pt, pt_list); 708 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 709 timespecadd(&pt->pt_time.it_value, 710 &ptn->pt_time.it_value, 711 &ptn->pt_time.it_value); 712 pt->pt_active = 0; 713 } 714 } 715 716 /* Free the timer and release the lock. */ 717 itimerfree(pts, timerid); 718 719 return (0); 720 } 721 722 /* 723 * Set up the given timer. The value in pt->pt_time.it_value is taken 724 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 725 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 726 * 727 * If the callout had already fired but not yet run, fails with 728 * ERESTART -- caller must restart from the top to look up a timer. 729 */ 730 int 731 timer_settime(struct ptimer *pt) 732 { 733 struct ptimer *ptn, *pptn; 734 struct ptlist *ptl; 735 736 KASSERT(mutex_owned(&timer_lock)); 737 738 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 739 /* 740 * Try to stop the callout. However, if it had already 741 * fired, we have to drop the lock to wait for it, so 742 * the world may have changed and pt may not be there 743 * any more. In that case, tell the caller to start 744 * over from the top. 745 */ 746 if (callout_halt(&pt->pt_ch, &timer_lock)) 747 return ERESTART; 748 749 /* Now we can touch pt and start it up again. */ 750 if (timespecisset(&pt->pt_time.it_value)) { 751 /* 752 * Don't need to check tshzto() return value, here. 753 * callout_reset() does it for us. 754 */ 755 callout_reset(&pt->pt_ch, 756 pt->pt_type == CLOCK_MONOTONIC ? 757 tshztoup(&pt->pt_time.it_value) : 758 tshzto(&pt->pt_time.it_value), 759 realtimerexpire, pt); 760 } 761 } else { 762 if (pt->pt_active) { 763 ptn = LIST_NEXT(pt, pt_list); 764 LIST_REMOVE(pt, pt_list); 765 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 766 timespecadd(&pt->pt_time.it_value, 767 &ptn->pt_time.it_value, 768 &ptn->pt_time.it_value); 769 } 770 if (timespecisset(&pt->pt_time.it_value)) { 771 if (pt->pt_type == CLOCK_VIRTUAL) 772 ptl = &pt->pt_proc->p_timers->pts_virtual; 773 else 774 ptl = &pt->pt_proc->p_timers->pts_prof; 775 776 for (ptn = LIST_FIRST(ptl), pptn = NULL; 777 ptn && timespeccmp(&pt->pt_time.it_value, 778 &ptn->pt_time.it_value, >); 779 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 780 timespecsub(&pt->pt_time.it_value, 781 &ptn->pt_time.it_value, 782 &pt->pt_time.it_value); 783 784 if (pptn) 785 LIST_INSERT_AFTER(pptn, pt, pt_list); 786 else 787 LIST_INSERT_HEAD(ptl, pt, pt_list); 788 789 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 790 timespecsub(&ptn->pt_time.it_value, 791 &pt->pt_time.it_value, 792 &ptn->pt_time.it_value); 793 794 pt->pt_active = 1; 795 } else 796 pt->pt_active = 0; 797 } 798 799 /* Success! */ 800 return 0; 801 } 802 803 void 804 timer_gettime(struct ptimer *pt, struct itimerspec *aits) 805 { 806 struct timespec now; 807 struct ptimer *ptn; 808 809 KASSERT(mutex_owned(&timer_lock)); 810 811 *aits = pt->pt_time; 812 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 813 /* 814 * Convert from absolute to relative time in .it_value 815 * part of real time timer. If time for real time 816 * timer has passed return 0, else return difference 817 * between current time and time for the timer to go 818 * off. 819 */ 820 if (timespecisset(&aits->it_value)) { 821 if (pt->pt_type == CLOCK_REALTIME) { 822 getnanotime(&now); 823 } else { /* CLOCK_MONOTONIC */ 824 getnanouptime(&now); 825 } 826 if (timespeccmp(&aits->it_value, &now, <)) 827 timespecclear(&aits->it_value); 828 else 829 timespecsub(&aits->it_value, &now, 830 &aits->it_value); 831 } 832 } else if (pt->pt_active) { 833 if (pt->pt_type == CLOCK_VIRTUAL) 834 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 835 else 836 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 837 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 838 timespecadd(&aits->it_value, 839 &ptn->pt_time.it_value, &aits->it_value); 840 KASSERT(ptn != NULL); /* pt should be findable on the list */ 841 } else 842 timespecclear(&aits->it_value); 843 } 844 845 846 847 /* Set and arm a POSIX realtime timer */ 848 int 849 sys___timer_settime50(struct lwp *l, 850 const struct sys___timer_settime50_args *uap, 851 register_t *retval) 852 { 853 /* { 854 syscallarg(timer_t) timerid; 855 syscallarg(int) flags; 856 syscallarg(const struct itimerspec *) value; 857 syscallarg(struct itimerspec *) ovalue; 858 } */ 859 int error; 860 struct itimerspec value, ovalue, *ovp = NULL; 861 862 if ((error = copyin(SCARG(uap, value), &value, 863 sizeof(struct itimerspec))) != 0) 864 return (error); 865 866 if (SCARG(uap, ovalue)) 867 ovp = &ovalue; 868 869 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 870 SCARG(uap, flags), l->l_proc)) != 0) 871 return error; 872 873 if (ovp) 874 return copyout(&ovalue, SCARG(uap, ovalue), 875 sizeof(struct itimerspec)); 876 return 0; 877 } 878 879 int 880 dotimer_settime(int timerid, struct itimerspec *value, 881 struct itimerspec *ovalue, int flags, struct proc *p) 882 { 883 struct timespec now; 884 struct itimerspec val, oval; 885 struct ptimers *pts; 886 struct ptimer *pt; 887 int error; 888 889 pts = p->p_timers; 890 891 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 892 return EINVAL; 893 val = *value; 894 if ((error = itimespecfix(&val.it_value)) != 0 || 895 (error = itimespecfix(&val.it_interval)) != 0) 896 return error; 897 898 mutex_spin_enter(&timer_lock); 899 restart: 900 if ((pt = pts->pts_timers[timerid]) == NULL) { 901 mutex_spin_exit(&timer_lock); 902 return EINVAL; 903 } 904 905 oval = pt->pt_time; 906 pt->pt_time = val; 907 908 /* 909 * If we've been passed a relative time for a realtime timer, 910 * convert it to absolute; if an absolute time for a virtual 911 * timer, convert it to relative and make sure we don't set it 912 * to zero, which would cancel the timer, or let it go 913 * negative, which would confuse the comparison tests. 914 */ 915 if (timespecisset(&pt->pt_time.it_value)) { 916 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 917 if ((flags & TIMER_ABSTIME) == 0) { 918 if (pt->pt_type == CLOCK_REALTIME) { 919 getnanotime(&now); 920 } else { /* CLOCK_MONOTONIC */ 921 getnanouptime(&now); 922 } 923 timespecadd(&pt->pt_time.it_value, &now, 924 &pt->pt_time.it_value); 925 } 926 } else { 927 if ((flags & TIMER_ABSTIME) != 0) { 928 getnanotime(&now); 929 timespecsub(&pt->pt_time.it_value, &now, 930 &pt->pt_time.it_value); 931 if (!timespecisset(&pt->pt_time.it_value) || 932 pt->pt_time.it_value.tv_sec < 0) { 933 pt->pt_time.it_value.tv_sec = 0; 934 pt->pt_time.it_value.tv_nsec = 1; 935 } 936 } 937 } 938 } 939 940 error = timer_settime(pt); 941 if (error == ERESTART) { 942 KASSERT(!CLOCK_VIRTUAL_P(pt->pt_type)); 943 goto restart; 944 } 945 KASSERT(error == 0); 946 mutex_spin_exit(&timer_lock); 947 948 if (ovalue) 949 *ovalue = oval; 950 951 return (0); 952 } 953 954 /* Return the time remaining until a POSIX timer fires. */ 955 int 956 sys___timer_gettime50(struct lwp *l, 957 const struct sys___timer_gettime50_args *uap, register_t *retval) 958 { 959 /* { 960 syscallarg(timer_t) timerid; 961 syscallarg(struct itimerspec *) value; 962 } */ 963 struct itimerspec its; 964 int error; 965 966 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 967 &its)) != 0) 968 return error; 969 970 return copyout(&its, SCARG(uap, value), sizeof(its)); 971 } 972 973 int 974 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 975 { 976 struct ptimer *pt; 977 struct ptimers *pts; 978 979 pts = p->p_timers; 980 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 981 return (EINVAL); 982 mutex_spin_enter(&timer_lock); 983 if ((pt = pts->pts_timers[timerid]) == NULL) { 984 mutex_spin_exit(&timer_lock); 985 return (EINVAL); 986 } 987 timer_gettime(pt, its); 988 mutex_spin_exit(&timer_lock); 989 990 return 0; 991 } 992 993 /* 994 * Return the count of the number of times a periodic timer expired 995 * while a notification was already pending. The counter is reset when 996 * a timer expires and a notification can be posted. 997 */ 998 int 999 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 1000 register_t *retval) 1001 { 1002 /* { 1003 syscallarg(timer_t) timerid; 1004 } */ 1005 struct proc *p = l->l_proc; 1006 struct ptimers *pts; 1007 int timerid; 1008 struct ptimer *pt; 1009 1010 timerid = SCARG(uap, timerid); 1011 1012 pts = p->p_timers; 1013 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 1014 return (EINVAL); 1015 mutex_spin_enter(&timer_lock); 1016 if ((pt = pts->pts_timers[timerid]) == NULL) { 1017 mutex_spin_exit(&timer_lock); 1018 return (EINVAL); 1019 } 1020 *retval = pt->pt_poverruns; 1021 if (*retval >= DELAYTIMER_MAX) 1022 *retval = DELAYTIMER_MAX; 1023 mutex_spin_exit(&timer_lock); 1024 1025 return (0); 1026 } 1027 1028 /* 1029 * Real interval timer expired: 1030 * send process whose timer expired an alarm signal. 1031 * If time is not set up to reload, then just return. 1032 * Else compute next time timer should go off which is > current time. 1033 * This is where delay in processing this timeout causes multiple 1034 * SIGALRM calls to be compressed into one. 1035 */ 1036 void 1037 realtimerexpire(void *arg) 1038 { 1039 uint64_t last_val, next_val, interval, now_ns; 1040 struct timespec now, next; 1041 struct ptimer *pt; 1042 int backwards; 1043 1044 pt = arg; 1045 1046 mutex_spin_enter(&timer_lock); 1047 itimerfire(pt); 1048 1049 if (!timespecisset(&pt->pt_time.it_interval)) { 1050 timespecclear(&pt->pt_time.it_value); 1051 mutex_spin_exit(&timer_lock); 1052 return; 1053 } 1054 1055 if (pt->pt_type == CLOCK_MONOTONIC) { 1056 getnanouptime(&now); 1057 } else { 1058 getnanotime(&now); 1059 } 1060 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 1061 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 1062 /* Handle the easy case of non-overflown timers first. */ 1063 if (!backwards && timespeccmp(&next, &now, >)) { 1064 pt->pt_time.it_value = next; 1065 } else { 1066 now_ns = timespec2ns(&now); 1067 last_val = timespec2ns(&pt->pt_time.it_value); 1068 interval = timespec2ns(&pt->pt_time.it_interval); 1069 1070 next_val = now_ns + 1071 (now_ns - last_val + interval - 1) % interval; 1072 1073 if (backwards) 1074 next_val += interval; 1075 else 1076 pt->pt_overruns += (now_ns - last_val) / interval; 1077 1078 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1079 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1080 } 1081 1082 /* 1083 * Reset the callout, if it's not going away. 1084 * 1085 * Don't need to check tshzto() return value, here. 1086 * callout_reset() does it for us. 1087 */ 1088 if (!pt->pt_dying) 1089 callout_reset(&pt->pt_ch, 1090 (pt->pt_type == CLOCK_MONOTONIC 1091 ? tshztoup(&pt->pt_time.it_value) 1092 : tshzto(&pt->pt_time.it_value)), 1093 realtimerexpire, pt); 1094 mutex_spin_exit(&timer_lock); 1095 } 1096 1097 /* BSD routine to get the value of an interval timer. */ 1098 /* ARGSUSED */ 1099 int 1100 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1101 register_t *retval) 1102 { 1103 /* { 1104 syscallarg(int) which; 1105 syscallarg(struct itimerval *) itv; 1106 } */ 1107 struct proc *p = l->l_proc; 1108 struct itimerval aitv; 1109 int error; 1110 1111 memset(&aitv, 0, sizeof(aitv)); 1112 error = dogetitimer(p, SCARG(uap, which), &aitv); 1113 if (error) 1114 return error; 1115 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1116 } 1117 1118 int 1119 dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1120 { 1121 struct ptimers *pts; 1122 struct ptimer *pt; 1123 struct itimerspec its; 1124 1125 if ((u_int)which > ITIMER_MONOTONIC) 1126 return (EINVAL); 1127 1128 mutex_spin_enter(&timer_lock); 1129 pts = p->p_timers; 1130 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1131 timerclear(&itvp->it_value); 1132 timerclear(&itvp->it_interval); 1133 } else { 1134 timer_gettime(pt, &its); 1135 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1136 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1137 } 1138 mutex_spin_exit(&timer_lock); 1139 1140 return 0; 1141 } 1142 1143 /* BSD routine to set/arm an interval timer. */ 1144 /* ARGSUSED */ 1145 int 1146 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1147 register_t *retval) 1148 { 1149 /* { 1150 syscallarg(int) which; 1151 syscallarg(const struct itimerval *) itv; 1152 syscallarg(struct itimerval *) oitv; 1153 } */ 1154 struct proc *p = l->l_proc; 1155 int which = SCARG(uap, which); 1156 struct sys___getitimer50_args getargs; 1157 const struct itimerval *itvp; 1158 struct itimerval aitv; 1159 int error; 1160 1161 if ((u_int)which > ITIMER_MONOTONIC) 1162 return (EINVAL); 1163 itvp = SCARG(uap, itv); 1164 if (itvp && 1165 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0) 1166 return (error); 1167 if (SCARG(uap, oitv) != NULL) { 1168 SCARG(&getargs, which) = which; 1169 SCARG(&getargs, itv) = SCARG(uap, oitv); 1170 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1171 return (error); 1172 } 1173 if (itvp == 0) 1174 return (0); 1175 1176 return dosetitimer(p, which, &aitv); 1177 } 1178 1179 int 1180 dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1181 { 1182 struct timespec now; 1183 struct ptimers *pts; 1184 struct ptimer *pt, *spare; 1185 int error; 1186 1187 KASSERT((u_int)which <= CLOCK_MONOTONIC); 1188 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1189 return (EINVAL); 1190 1191 /* 1192 * Don't bother allocating data structures if the process just 1193 * wants to clear the timer. 1194 */ 1195 spare = NULL; 1196 pts = p->p_timers; 1197 retry: 1198 if (!timerisset(&itvp->it_value) && (pts == NULL || 1199 pts->pts_timers[which] == NULL)) 1200 return (0); 1201 if (pts == NULL) 1202 pts = timers_alloc(p); 1203 mutex_spin_enter(&timer_lock); 1204 restart: 1205 pt = pts->pts_timers[which]; 1206 if (pt == NULL) { 1207 if (spare == NULL) { 1208 mutex_spin_exit(&timer_lock); 1209 spare = pool_get(&ptimer_pool, PR_WAITOK | PR_ZERO); 1210 goto retry; 1211 } 1212 pt = spare; 1213 spare = NULL; 1214 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1215 pt->pt_ev.sigev_value.sival_int = which; 1216 pt->pt_overruns = 0; 1217 pt->pt_proc = p; 1218 pt->pt_type = which; 1219 pt->pt_entry = which; 1220 pt->pt_queued = false; 1221 if (!CLOCK_VIRTUAL_P(which)) 1222 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1223 else 1224 pt->pt_active = 0; 1225 1226 switch (which) { 1227 case ITIMER_REAL: 1228 case ITIMER_MONOTONIC: 1229 pt->pt_ev.sigev_signo = SIGALRM; 1230 break; 1231 case ITIMER_VIRTUAL: 1232 pt->pt_ev.sigev_signo = SIGVTALRM; 1233 break; 1234 case ITIMER_PROF: 1235 pt->pt_ev.sigev_signo = SIGPROF; 1236 break; 1237 } 1238 pts->pts_timers[which] = pt; 1239 } 1240 1241 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1242 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1243 1244 if (timespecisset(&pt->pt_time.it_value)) { 1245 /* Convert to absolute time */ 1246 /* XXX need to wrap in splclock for timecounters case? */ 1247 switch (which) { 1248 case ITIMER_REAL: 1249 getnanotime(&now); 1250 timespecadd(&pt->pt_time.it_value, &now, 1251 &pt->pt_time.it_value); 1252 break; 1253 case ITIMER_MONOTONIC: 1254 getnanouptime(&now); 1255 timespecadd(&pt->pt_time.it_value, &now, 1256 &pt->pt_time.it_value); 1257 break; 1258 default: 1259 break; 1260 } 1261 } 1262 error = timer_settime(pt); 1263 if (error == ERESTART) { 1264 KASSERT(!CLOCK_VIRTUAL_P(pt->pt_type)); 1265 goto restart; 1266 } 1267 KASSERT(error == 0); 1268 mutex_spin_exit(&timer_lock); 1269 if (spare != NULL) 1270 pool_put(&ptimer_pool, spare); 1271 1272 return (0); 1273 } 1274 1275 /* Utility routines to manage the array of pointers to timers. */ 1276 struct ptimers * 1277 timers_alloc(struct proc *p) 1278 { 1279 struct ptimers *pts; 1280 int i; 1281 1282 pts = pool_get(&ptimers_pool, PR_WAITOK); 1283 LIST_INIT(&pts->pts_virtual); 1284 LIST_INIT(&pts->pts_prof); 1285 for (i = 0; i < TIMER_MAX; i++) 1286 pts->pts_timers[i] = NULL; 1287 mutex_spin_enter(&timer_lock); 1288 if (p->p_timers == NULL) { 1289 p->p_timers = pts; 1290 mutex_spin_exit(&timer_lock); 1291 return pts; 1292 } 1293 mutex_spin_exit(&timer_lock); 1294 pool_put(&ptimers_pool, pts); 1295 return p->p_timers; 1296 } 1297 1298 /* 1299 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1300 * then clean up all timers and free all the data structures. If 1301 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1302 * by timer_create(), not the BSD setitimer() timers, and only free the 1303 * structure if none of those remain. 1304 */ 1305 void 1306 timers_free(struct proc *p, int which) 1307 { 1308 struct ptimers *pts; 1309 struct ptimer *ptn; 1310 struct timespec ts; 1311 int i; 1312 1313 if (p->p_timers == NULL) 1314 return; 1315 1316 pts = p->p_timers; 1317 mutex_spin_enter(&timer_lock); 1318 if (which == TIMERS_ALL) { 1319 p->p_timers = NULL; 1320 i = 0; 1321 } else { 1322 timespecclear(&ts); 1323 for (ptn = LIST_FIRST(&pts->pts_virtual); 1324 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1325 ptn = LIST_NEXT(ptn, pt_list)) { 1326 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1327 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1328 } 1329 LIST_FIRST(&pts->pts_virtual) = NULL; 1330 if (ptn) { 1331 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1332 timespecadd(&ts, &ptn->pt_time.it_value, 1333 &ptn->pt_time.it_value); 1334 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1335 } 1336 timespecclear(&ts); 1337 for (ptn = LIST_FIRST(&pts->pts_prof); 1338 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1339 ptn = LIST_NEXT(ptn, pt_list)) { 1340 KASSERT(ptn->pt_type == CLOCK_PROF); 1341 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1342 } 1343 LIST_FIRST(&pts->pts_prof) = NULL; 1344 if (ptn) { 1345 KASSERT(ptn->pt_type == CLOCK_PROF); 1346 timespecadd(&ts, &ptn->pt_time.it_value, 1347 &ptn->pt_time.it_value); 1348 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1349 } 1350 i = TIMER_MIN; 1351 } 1352 for ( ; i < TIMER_MAX; i++) { 1353 if (pts->pts_timers[i] != NULL) { 1354 /* Free the timer and release the lock. */ 1355 itimerfree(pts, i); 1356 /* Reacquire the lock for the next one. */ 1357 mutex_spin_enter(&timer_lock); 1358 } 1359 } 1360 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1361 pts->pts_timers[2] == NULL && pts->pts_timers[3] == NULL) { 1362 p->p_timers = NULL; 1363 mutex_spin_exit(&timer_lock); 1364 pool_put(&ptimers_pool, pts); 1365 } else 1366 mutex_spin_exit(&timer_lock); 1367 } 1368 1369 static void 1370 itimerfree(struct ptimers *pts, int index) 1371 { 1372 struct ptimer *pt; 1373 1374 KASSERT(mutex_owned(&timer_lock)); 1375 1376 pt = pts->pts_timers[index]; 1377 1378 /* 1379 * Prevent new references, and notify the callout not to 1380 * restart itself. 1381 */ 1382 pts->pts_timers[index] = NULL; 1383 pt->pt_dying = true; 1384 1385 /* 1386 * For non-virtual timers, stop the callout, or wait for it to 1387 * run if it has already fired. It cannot restart again after 1388 * this point: the callout won't restart itself when dying, no 1389 * other users holding the lock can restart it, and any other 1390 * users waiting for callout_halt concurrently (timer_settime) 1391 * will restart from the top. 1392 */ 1393 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1394 callout_halt(&pt->pt_ch, &timer_lock); 1395 1396 /* Remove it from the queue to be signalled. */ 1397 if (pt->pt_queued) 1398 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1399 1400 /* All done with the global state. */ 1401 mutex_spin_exit(&timer_lock); 1402 1403 /* Destroy the callout, if needed, and free the ptimer. */ 1404 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1405 callout_destroy(&pt->pt_ch); 1406 pool_put(&ptimer_pool, pt); 1407 } 1408 1409 /* 1410 * Decrement an interval timer by a specified number 1411 * of nanoseconds, which must be less than a second, 1412 * i.e. < 1000000000. If the timer expires, then reload 1413 * it. In this case, carry over (nsec - old value) to 1414 * reduce the value reloaded into the timer so that 1415 * the timer does not drift. This routine assumes 1416 * that it is called in a context where the timers 1417 * on which it is operating cannot change in value. 1418 */ 1419 static int 1420 itimerdecr(struct ptimer *pt, int nsec) 1421 { 1422 struct itimerspec *itp; 1423 int error __diagused; 1424 1425 KASSERT(mutex_owned(&timer_lock)); 1426 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1427 1428 itp = &pt->pt_time; 1429 if (itp->it_value.tv_nsec < nsec) { 1430 if (itp->it_value.tv_sec == 0) { 1431 /* expired, and already in next interval */ 1432 nsec -= itp->it_value.tv_nsec; 1433 goto expire; 1434 } 1435 itp->it_value.tv_nsec += 1000000000; 1436 itp->it_value.tv_sec--; 1437 } 1438 itp->it_value.tv_nsec -= nsec; 1439 nsec = 0; 1440 if (timespecisset(&itp->it_value)) 1441 return (1); 1442 /* expired, exactly at end of interval */ 1443 expire: 1444 if (timespecisset(&itp->it_interval)) { 1445 itp->it_value = itp->it_interval; 1446 itp->it_value.tv_nsec -= nsec; 1447 if (itp->it_value.tv_nsec < 0) { 1448 itp->it_value.tv_nsec += 1000000000; 1449 itp->it_value.tv_sec--; 1450 } 1451 error = timer_settime(pt); 1452 KASSERT(error == 0); /* virtual, never fails */ 1453 } else 1454 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1455 return (0); 1456 } 1457 1458 static void 1459 itimerfire(struct ptimer *pt) 1460 { 1461 1462 KASSERT(mutex_owned(&timer_lock)); 1463 1464 /* 1465 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1466 * XXX Relying on the clock interrupt is stupid. 1467 */ 1468 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL || pt->pt_queued) { 1469 return; 1470 } 1471 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1472 pt->pt_queued = true; 1473 softint_schedule(timer_sih); 1474 } 1475 1476 void 1477 timer_tick(lwp_t *l, bool user) 1478 { 1479 struct ptimers *pts; 1480 struct ptimer *pt; 1481 proc_t *p; 1482 1483 p = l->l_proc; 1484 if (p->p_timers == NULL) 1485 return; 1486 1487 mutex_spin_enter(&timer_lock); 1488 if ((pts = l->l_proc->p_timers) != NULL) { 1489 /* 1490 * Run current process's virtual and profile time, as needed. 1491 */ 1492 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1493 if (itimerdecr(pt, tick * 1000) == 0) 1494 itimerfire(pt); 1495 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1496 if (itimerdecr(pt, tick * 1000) == 0) 1497 itimerfire(pt); 1498 } 1499 mutex_spin_exit(&timer_lock); 1500 } 1501 1502 static void 1503 timer_intr(void *cookie) 1504 { 1505 ksiginfo_t ksi; 1506 struct ptimer *pt; 1507 proc_t *p; 1508 1509 mutex_enter(&proc_lock); 1510 mutex_spin_enter(&timer_lock); 1511 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1512 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1513 KASSERT(pt->pt_queued); 1514 pt->pt_queued = false; 1515 1516 if (pt->pt_proc->p_timers == NULL) { 1517 /* Process is dying. */ 1518 continue; 1519 } 1520 p = pt->pt_proc; 1521 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) { 1522 continue; 1523 } 1524 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1525 pt->pt_overruns++; 1526 continue; 1527 } 1528 1529 KSI_INIT(&ksi); 1530 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1531 ksi.ksi_code = SI_TIMER; 1532 ksi.ksi_value = pt->pt_ev.sigev_value; 1533 pt->pt_poverruns = pt->pt_overruns; 1534 pt->pt_overruns = 0; 1535 mutex_spin_exit(&timer_lock); 1536 kpsignal(p, &ksi, NULL); 1537 mutex_spin_enter(&timer_lock); 1538 } 1539 mutex_spin_exit(&timer_lock); 1540 mutex_exit(&proc_lock); 1541 } 1542 1543 /* 1544 * Check if the time will wrap if set to ts. 1545 * 1546 * ts - timespec describing the new time 1547 * delta - the delta between the current time and ts 1548 */ 1549 bool 1550 time_wraps(struct timespec *ts, struct timespec *delta) 1551 { 1552 1553 /* 1554 * Don't allow the time to be set forward so far it 1555 * will wrap and become negative, thus allowing an 1556 * attacker to bypass the next check below. The 1557 * cutoff is 1 year before rollover occurs, so even 1558 * if the attacker uses adjtime(2) to move the time 1559 * past the cutoff, it will take a very long time 1560 * to get to the wrap point. 1561 */ 1562 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1563 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1564 return true; 1565 1566 return false; 1567 } 1568