1 /* $NetBSD: kern_time.c,v 1.203 2020/01/02 15:42:27 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christopher G. Demetriou, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.203 2020/01/02 15:42:27 thorpej Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/resourcevar.h> 68 #include <sys/kernel.h> 69 #include <sys/systm.h> 70 #include <sys/proc.h> 71 #include <sys/vnode.h> 72 #include <sys/signalvar.h> 73 #include <sys/syslog.h> 74 #include <sys/timetc.h> 75 #include <sys/timex.h> 76 #include <sys/kauth.h> 77 #include <sys/mount.h> 78 #include <sys/syscallargs.h> 79 #include <sys/cpu.h> 80 81 static void timer_intr(void *); 82 static void itimerfire(struct ptimer *); 83 static void itimerfree(struct ptimers *, int); 84 85 kmutex_t timer_lock; 86 87 static void *timer_sih; 88 static TAILQ_HEAD(, ptimer) timer_queue; 89 90 struct pool ptimer_pool, ptimers_pool; 91 92 #define CLOCK_VIRTUAL_P(clockid) \ 93 ((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF) 94 95 CTASSERT(ITIMER_REAL == CLOCK_REALTIME); 96 CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL); 97 CTASSERT(ITIMER_PROF == CLOCK_PROF); 98 CTASSERT(ITIMER_MONOTONIC == CLOCK_MONOTONIC); 99 100 #define DELAYTIMER_MAX 32 101 102 /* 103 * Initialize timekeeping. 104 */ 105 void 106 time_init(void) 107 { 108 109 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 110 &pool_allocator_nointr, IPL_NONE); 111 pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl", 112 &pool_allocator_nointr, IPL_NONE); 113 } 114 115 void 116 time_init2(void) 117 { 118 119 TAILQ_INIT(&timer_queue); 120 mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED); 121 timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, 122 timer_intr, NULL); 123 } 124 125 /* Time of day and interval timer support. 126 * 127 * These routines provide the kernel entry points to get and set 128 * the time-of-day and per-process interval timers. Subroutines 129 * here provide support for adding and subtracting timeval structures 130 * and decrementing interval timers, optionally reloading the interval 131 * timers when they expire. 132 */ 133 134 /* This function is used by clock_settime and settimeofday */ 135 static int 136 settime1(struct proc *p, const struct timespec *ts, bool check_kauth) 137 { 138 struct timespec delta, now; 139 140 /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ 141 nanotime(&now); 142 timespecsub(ts, &now, &delta); 143 144 if (check_kauth && kauth_authorize_system(kauth_cred_get(), 145 KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), 146 &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { 147 return (EPERM); 148 } 149 150 #ifdef notyet 151 if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ 152 return (EPERM); 153 } 154 #endif 155 156 tc_setclock(ts); 157 158 resettodr(); 159 160 return (0); 161 } 162 163 int 164 settime(struct proc *p, struct timespec *ts) 165 { 166 return (settime1(p, ts, true)); 167 } 168 169 /* ARGSUSED */ 170 int 171 sys___clock_gettime50(struct lwp *l, 172 const struct sys___clock_gettime50_args *uap, register_t *retval) 173 { 174 /* { 175 syscallarg(clockid_t) clock_id; 176 syscallarg(struct timespec *) tp; 177 } */ 178 int error; 179 struct timespec ats; 180 181 error = clock_gettime1(SCARG(uap, clock_id), &ats); 182 if (error != 0) 183 return error; 184 185 return copyout(&ats, SCARG(uap, tp), sizeof(ats)); 186 } 187 188 /* ARGSUSED */ 189 int 190 sys___clock_settime50(struct lwp *l, 191 const struct sys___clock_settime50_args *uap, register_t *retval) 192 { 193 /* { 194 syscallarg(clockid_t) clock_id; 195 syscallarg(const struct timespec *) tp; 196 } */ 197 int error; 198 struct timespec ats; 199 200 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 201 return error; 202 203 return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true); 204 } 205 206 207 int 208 clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp, 209 bool check_kauth) 210 { 211 int error; 212 213 if (tp->tv_nsec < 0 || tp->tv_nsec >= 1000000000L) 214 return EINVAL; 215 216 switch (clock_id) { 217 case CLOCK_REALTIME: 218 if ((error = settime1(p, tp, check_kauth)) != 0) 219 return (error); 220 break; 221 case CLOCK_MONOTONIC: 222 return (EINVAL); /* read-only clock */ 223 default: 224 return (EINVAL); 225 } 226 227 return 0; 228 } 229 230 int 231 sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap, 232 register_t *retval) 233 { 234 /* { 235 syscallarg(clockid_t) clock_id; 236 syscallarg(struct timespec *) tp; 237 } */ 238 struct timespec ts; 239 int error; 240 241 if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0) 242 return error; 243 244 if (SCARG(uap, tp)) 245 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 246 247 return error; 248 } 249 250 int 251 clock_getres1(clockid_t clock_id, struct timespec *ts) 252 { 253 254 switch (clock_id) { 255 case CLOCK_REALTIME: 256 case CLOCK_MONOTONIC: 257 ts->tv_sec = 0; 258 if (tc_getfrequency() > 1000000000) 259 ts->tv_nsec = 1; 260 else 261 ts->tv_nsec = 1000000000 / tc_getfrequency(); 262 break; 263 default: 264 return EINVAL; 265 } 266 267 return 0; 268 } 269 270 /* ARGSUSED */ 271 int 272 sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap, 273 register_t *retval) 274 { 275 /* { 276 syscallarg(struct timespec *) rqtp; 277 syscallarg(struct timespec *) rmtp; 278 } */ 279 struct timespec rmt, rqt; 280 int error, error1; 281 282 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 283 if (error) 284 return (error); 285 286 error = nanosleep1(l, CLOCK_MONOTONIC, 0, &rqt, 287 SCARG(uap, rmtp) ? &rmt : NULL); 288 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 289 return error; 290 291 error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 292 return error1 ? error1 : error; 293 } 294 295 /* ARGSUSED */ 296 int 297 sys_clock_nanosleep(struct lwp *l, const struct sys_clock_nanosleep_args *uap, 298 register_t *retval) 299 { 300 /* { 301 syscallarg(clockid_t) clock_id; 302 syscallarg(int) flags; 303 syscallarg(struct timespec *) rqtp; 304 syscallarg(struct timespec *) rmtp; 305 } */ 306 struct timespec rmt, rqt; 307 int error, error1; 308 309 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec)); 310 if (error) 311 goto out; 312 313 error = nanosleep1(l, SCARG(uap, clock_id), SCARG(uap, flags), &rqt, 314 SCARG(uap, rmtp) ? &rmt : NULL); 315 if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR)) 316 goto out; 317 318 if ((SCARG(uap, flags) & TIMER_ABSTIME) == 0 && 319 (error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt))) != 0) 320 error = error1; 321 out: 322 *retval = error; 323 return 0; 324 } 325 326 int 327 nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt, 328 struct timespec *rmt) 329 { 330 struct timespec rmtstart; 331 int error, timo; 332 333 if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) { 334 if (error == ETIMEDOUT) { 335 error = 0; 336 if (rmt != NULL) 337 rmt->tv_sec = rmt->tv_nsec = 0; 338 } 339 return error; 340 } 341 342 /* 343 * Avoid inadvertently sleeping forever 344 */ 345 if (timo == 0) 346 timo = 1; 347 again: 348 error = kpause("nanoslp", true, timo, NULL); 349 if (error == EWOULDBLOCK) 350 error = 0; 351 if (rmt != NULL || error == 0) { 352 struct timespec rmtend; 353 struct timespec t0; 354 struct timespec *t; 355 356 (void)clock_gettime1(clock_id, &rmtend); 357 t = (rmt != NULL) ? rmt : &t0; 358 if (flags & TIMER_ABSTIME) { 359 timespecsub(rqt, &rmtend, t); 360 } else { 361 timespecsub(&rmtend, &rmtstart, t); 362 timespecsub(rqt, t, t); 363 } 364 if (t->tv_sec < 0) 365 timespecclear(t); 366 if (error == 0) { 367 timo = tstohz(t); 368 if (timo > 0) 369 goto again; 370 } 371 } 372 373 if (error == ERESTART) 374 error = EINTR; 375 376 return error; 377 } 378 379 int 380 sys_clock_getcpuclockid2(struct lwp *l, 381 const struct sys_clock_getcpuclockid2_args *uap, 382 register_t *retval) 383 { 384 /* { 385 syscallarg(idtype_t idtype; 386 syscallarg(id_t id); 387 syscallarg(clockid_t *)clock_id; 388 } */ 389 pid_t pid; 390 lwpid_t lid; 391 clockid_t clock_id; 392 id_t id = SCARG(uap, id); 393 394 switch (SCARG(uap, idtype)) { 395 case P_PID: 396 pid = id == 0 ? l->l_proc->p_pid : id; 397 clock_id = CLOCK_PROCESS_CPUTIME_ID | pid; 398 break; 399 case P_LWPID: 400 lid = id == 0 ? l->l_lid : id; 401 clock_id = CLOCK_THREAD_CPUTIME_ID | lid; 402 break; 403 default: 404 return EINVAL; 405 } 406 return copyout(&clock_id, SCARG(uap, clock_id), sizeof(clock_id)); 407 } 408 409 /* ARGSUSED */ 410 int 411 sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap, 412 register_t *retval) 413 { 414 /* { 415 syscallarg(struct timeval *) tp; 416 syscallarg(void *) tzp; really "struct timezone *"; 417 } */ 418 struct timeval atv; 419 int error = 0; 420 struct timezone tzfake; 421 422 if (SCARG(uap, tp)) { 423 memset(&atv, 0, sizeof(atv)); 424 microtime(&atv); 425 error = copyout(&atv, SCARG(uap, tp), sizeof(atv)); 426 if (error) 427 return (error); 428 } 429 if (SCARG(uap, tzp)) { 430 /* 431 * NetBSD has no kernel notion of time zone, so we just 432 * fake up a timezone struct and return it if demanded. 433 */ 434 tzfake.tz_minuteswest = 0; 435 tzfake.tz_dsttime = 0; 436 error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake)); 437 } 438 return (error); 439 } 440 441 /* ARGSUSED */ 442 int 443 sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap, 444 register_t *retval) 445 { 446 /* { 447 syscallarg(const struct timeval *) tv; 448 syscallarg(const void *) tzp; really "const struct timezone *"; 449 } */ 450 451 return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true); 452 } 453 454 int 455 settimeofday1(const struct timeval *utv, bool userspace, 456 const void *utzp, struct lwp *l, bool check_kauth) 457 { 458 struct timeval atv; 459 struct timespec ts; 460 int error; 461 462 /* Verify all parameters before changing time. */ 463 464 /* 465 * NetBSD has no kernel notion of time zone, and only an 466 * obsolete program would try to set it, so we log a warning. 467 */ 468 if (utzp) 469 log(LOG_WARNING, "pid %d attempted to set the " 470 "(obsolete) kernel time zone\n", l->l_proc->p_pid); 471 472 if (utv == NULL) 473 return 0; 474 475 if (userspace) { 476 if ((error = copyin(utv, &atv, sizeof(atv))) != 0) 477 return error; 478 utv = &atv; 479 } 480 481 if (utv->tv_usec < 0 || utv->tv_usec >= 1000000) 482 return EINVAL; 483 484 TIMEVAL_TO_TIMESPEC(utv, &ts); 485 return settime1(l->l_proc, &ts, check_kauth); 486 } 487 488 int time_adjusted; /* set if an adjustment is made */ 489 490 /* ARGSUSED */ 491 int 492 sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap, 493 register_t *retval) 494 { 495 /* { 496 syscallarg(const struct timeval *) delta; 497 syscallarg(struct timeval *) olddelta; 498 } */ 499 int error; 500 struct timeval atv, oldatv; 501 502 if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME, 503 KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0) 504 return error; 505 506 if (SCARG(uap, delta)) { 507 error = copyin(SCARG(uap, delta), &atv, 508 sizeof(*SCARG(uap, delta))); 509 if (error) 510 return (error); 511 } 512 adjtime1(SCARG(uap, delta) ? &atv : NULL, 513 SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc); 514 if (SCARG(uap, olddelta)) 515 error = copyout(&oldatv, SCARG(uap, olddelta), 516 sizeof(*SCARG(uap, olddelta))); 517 return error; 518 } 519 520 void 521 adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) 522 { 523 extern int64_t time_adjtime; /* in kern_ntptime.c */ 524 525 if (olddelta) { 526 memset(olddelta, 0, sizeof(*olddelta)); 527 mutex_spin_enter(&timecounter_lock); 528 olddelta->tv_sec = time_adjtime / 1000000; 529 olddelta->tv_usec = time_adjtime % 1000000; 530 if (olddelta->tv_usec < 0) { 531 olddelta->tv_usec += 1000000; 532 olddelta->tv_sec--; 533 } 534 mutex_spin_exit(&timecounter_lock); 535 } 536 537 if (delta) { 538 mutex_spin_enter(&timecounter_lock); 539 time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; 540 541 if (time_adjtime) { 542 /* We need to save the system time during shutdown */ 543 time_adjusted |= 1; 544 } 545 mutex_spin_exit(&timecounter_lock); 546 } 547 } 548 549 /* 550 * Interval timer support. Both the BSD getitimer() family and the POSIX 551 * timer_*() family of routines are supported. 552 * 553 * All timers are kept in an array pointed to by p_timers, which is 554 * allocated on demand - many processes don't use timers at all. The 555 * first four elements in this array are reserved for the BSD timers: 556 * element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, element 557 * 2 is ITIMER_PROF, and element 3 is ITIMER_MONOTONIC. The rest may be 558 * allocated by the timer_create() syscall. 559 * 560 * Realtime timers are kept in the ptimer structure as an absolute 561 * time; virtual time timers are kept as a linked list of deltas. 562 * Virtual time timers are processed in the hardclock() routine of 563 * kern_clock.c. The real time timer is processed by a callout 564 * routine, called from the softclock() routine. Since a callout may 565 * be delayed in real time due to interrupt processing in the system, 566 * it is possible for the real time timeout routine (realtimeexpire, 567 * given below), to be delayed in real time past when it is supposed 568 * to occur. It does not suffice, therefore, to reload the real timer 569 * .it_value from the real time timers .it_interval. Rather, we 570 * compute the next time in absolute time the timer should go off. */ 571 572 /* Allocate a POSIX realtime timer. */ 573 int 574 sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap, 575 register_t *retval) 576 { 577 /* { 578 syscallarg(clockid_t) clock_id; 579 syscallarg(struct sigevent *) evp; 580 syscallarg(timer_t *) timerid; 581 } */ 582 583 return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id), 584 SCARG(uap, evp), copyin, l); 585 } 586 587 int 588 timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, 589 copyin_t fetch_event, struct lwp *l) 590 { 591 int error; 592 timer_t timerid; 593 struct ptimers *pts; 594 struct ptimer *pt; 595 struct proc *p; 596 597 p = l->l_proc; 598 599 if ((u_int)id > CLOCK_MONOTONIC) 600 return (EINVAL); 601 602 if ((pts = p->p_timers) == NULL) 603 pts = timers_alloc(p); 604 605 pt = pool_get(&ptimer_pool, PR_WAITOK | PR_ZERO); 606 if (evp != NULL) { 607 if (((error = 608 (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || 609 ((pt->pt_ev.sigev_notify < SIGEV_NONE) || 610 (pt->pt_ev.sigev_notify > SIGEV_SA)) || 611 (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && 612 (pt->pt_ev.sigev_signo <= 0 || 613 pt->pt_ev.sigev_signo >= NSIG))) { 614 pool_put(&ptimer_pool, pt); 615 return (error ? error : EINVAL); 616 } 617 } 618 619 /* Find a free timer slot, skipping those reserved for setitimer(). */ 620 mutex_spin_enter(&timer_lock); 621 for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++) 622 if (pts->pts_timers[timerid] == NULL) 623 break; 624 if (timerid == TIMER_MAX) { 625 mutex_spin_exit(&timer_lock); 626 pool_put(&ptimer_pool, pt); 627 return EAGAIN; 628 } 629 if (evp == NULL) { 630 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 631 switch (id) { 632 case CLOCK_REALTIME: 633 case CLOCK_MONOTONIC: 634 pt->pt_ev.sigev_signo = SIGALRM; 635 break; 636 case CLOCK_VIRTUAL: 637 pt->pt_ev.sigev_signo = SIGVTALRM; 638 break; 639 case CLOCK_PROF: 640 pt->pt_ev.sigev_signo = SIGPROF; 641 break; 642 } 643 pt->pt_ev.sigev_value.sival_int = timerid; 644 } 645 pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; 646 pt->pt_info.ksi_errno = 0; 647 pt->pt_info.ksi_code = 0; 648 pt->pt_info.ksi_pid = p->p_pid; 649 pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); 650 pt->pt_info.ksi_value = pt->pt_ev.sigev_value; 651 pt->pt_type = id; 652 pt->pt_proc = p; 653 pt->pt_overruns = 0; 654 pt->pt_poverruns = 0; 655 pt->pt_entry = timerid; 656 pt->pt_queued = false; 657 timespecclear(&pt->pt_time.it_value); 658 if (!CLOCK_VIRTUAL_P(id)) 659 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 660 else 661 pt->pt_active = 0; 662 663 pts->pts_timers[timerid] = pt; 664 mutex_spin_exit(&timer_lock); 665 666 return copyout(&timerid, tid, sizeof(timerid)); 667 } 668 669 /* Delete a POSIX realtime timer */ 670 int 671 sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap, 672 register_t *retval) 673 { 674 /* { 675 syscallarg(timer_t) timerid; 676 } */ 677 struct proc *p = l->l_proc; 678 timer_t timerid; 679 struct ptimers *pts; 680 struct ptimer *pt, *ptn; 681 682 timerid = SCARG(uap, timerid); 683 pts = p->p_timers; 684 685 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 686 return (EINVAL); 687 688 mutex_spin_enter(&timer_lock); 689 if ((pt = pts->pts_timers[timerid]) == NULL) { 690 mutex_spin_exit(&timer_lock); 691 return (EINVAL); 692 } 693 if (CLOCK_VIRTUAL_P(pt->pt_type)) { 694 if (pt->pt_active) { 695 ptn = LIST_NEXT(pt, pt_list); 696 LIST_REMOVE(pt, pt_list); 697 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 698 timespecadd(&pt->pt_time.it_value, 699 &ptn->pt_time.it_value, 700 &ptn->pt_time.it_value); 701 pt->pt_active = 0; 702 } 703 } 704 705 /* Free the timer and release the lock. */ 706 itimerfree(pts, timerid); 707 708 return (0); 709 } 710 711 /* 712 * Set up the given timer. The value in pt->pt_time.it_value is taken 713 * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and 714 * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. 715 * 716 * If the callout had already fired but not yet run, fails with 717 * ERESTART -- caller must restart from the top to look up a timer. 718 */ 719 int 720 timer_settime(struct ptimer *pt) 721 { 722 struct ptimer *ptn, *pptn; 723 struct ptlist *ptl; 724 725 KASSERT(mutex_owned(&timer_lock)); 726 727 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 728 /* 729 * Try to stop the callout. However, if it had already 730 * fired, we have to drop the lock to wait for it, so 731 * the world may have changed and pt may not be there 732 * any more. In that case, tell the caller to start 733 * over from the top. 734 */ 735 if (callout_halt(&pt->pt_ch, &timer_lock)) 736 return ERESTART; 737 738 /* Now we can touch pt and start it up again. */ 739 if (timespecisset(&pt->pt_time.it_value)) { 740 /* 741 * Don't need to check tshzto() return value, here. 742 * callout_reset() does it for us. 743 */ 744 callout_reset(&pt->pt_ch, 745 pt->pt_type == CLOCK_MONOTONIC ? 746 tshztoup(&pt->pt_time.it_value) : 747 tshzto(&pt->pt_time.it_value), 748 realtimerexpire, pt); 749 } 750 } else { 751 if (pt->pt_active) { 752 ptn = LIST_NEXT(pt, pt_list); 753 LIST_REMOVE(pt, pt_list); 754 for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) 755 timespecadd(&pt->pt_time.it_value, 756 &ptn->pt_time.it_value, 757 &ptn->pt_time.it_value); 758 } 759 if (timespecisset(&pt->pt_time.it_value)) { 760 if (pt->pt_type == CLOCK_VIRTUAL) 761 ptl = &pt->pt_proc->p_timers->pts_virtual; 762 else 763 ptl = &pt->pt_proc->p_timers->pts_prof; 764 765 for (ptn = LIST_FIRST(ptl), pptn = NULL; 766 ptn && timespeccmp(&pt->pt_time.it_value, 767 &ptn->pt_time.it_value, >); 768 pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) 769 timespecsub(&pt->pt_time.it_value, 770 &ptn->pt_time.it_value, 771 &pt->pt_time.it_value); 772 773 if (pptn) 774 LIST_INSERT_AFTER(pptn, pt, pt_list); 775 else 776 LIST_INSERT_HEAD(ptl, pt, pt_list); 777 778 for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) 779 timespecsub(&ptn->pt_time.it_value, 780 &pt->pt_time.it_value, 781 &ptn->pt_time.it_value); 782 783 pt->pt_active = 1; 784 } else 785 pt->pt_active = 0; 786 } 787 788 /* Success! */ 789 return 0; 790 } 791 792 void 793 timer_gettime(struct ptimer *pt, struct itimerspec *aits) 794 { 795 struct timespec now; 796 struct ptimer *ptn; 797 798 KASSERT(mutex_owned(&timer_lock)); 799 800 *aits = pt->pt_time; 801 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 802 /* 803 * Convert from absolute to relative time in .it_value 804 * part of real time timer. If time for real time 805 * timer has passed return 0, else return difference 806 * between current time and time for the timer to go 807 * off. 808 */ 809 if (timespecisset(&aits->it_value)) { 810 if (pt->pt_type == CLOCK_REALTIME) { 811 getnanotime(&now); 812 } else { /* CLOCK_MONOTONIC */ 813 getnanouptime(&now); 814 } 815 if (timespeccmp(&aits->it_value, &now, <)) 816 timespecclear(&aits->it_value); 817 else 818 timespecsub(&aits->it_value, &now, 819 &aits->it_value); 820 } 821 } else if (pt->pt_active) { 822 if (pt->pt_type == CLOCK_VIRTUAL) 823 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual); 824 else 825 ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof); 826 for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list)) 827 timespecadd(&aits->it_value, 828 &ptn->pt_time.it_value, &aits->it_value); 829 KASSERT(ptn != NULL); /* pt should be findable on the list */ 830 } else 831 timespecclear(&aits->it_value); 832 } 833 834 835 836 /* Set and arm a POSIX realtime timer */ 837 int 838 sys___timer_settime50(struct lwp *l, 839 const struct sys___timer_settime50_args *uap, 840 register_t *retval) 841 { 842 /* { 843 syscallarg(timer_t) timerid; 844 syscallarg(int) flags; 845 syscallarg(const struct itimerspec *) value; 846 syscallarg(struct itimerspec *) ovalue; 847 } */ 848 int error; 849 struct itimerspec value, ovalue, *ovp = NULL; 850 851 if ((error = copyin(SCARG(uap, value), &value, 852 sizeof(struct itimerspec))) != 0) 853 return (error); 854 855 if (SCARG(uap, ovalue)) 856 ovp = &ovalue; 857 858 if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp, 859 SCARG(uap, flags), l->l_proc)) != 0) 860 return error; 861 862 if (ovp) 863 return copyout(&ovalue, SCARG(uap, ovalue), 864 sizeof(struct itimerspec)); 865 return 0; 866 } 867 868 int 869 dotimer_settime(int timerid, struct itimerspec *value, 870 struct itimerspec *ovalue, int flags, struct proc *p) 871 { 872 struct timespec now; 873 struct itimerspec val, oval; 874 struct ptimers *pts; 875 struct ptimer *pt; 876 int error; 877 878 pts = p->p_timers; 879 880 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 881 return EINVAL; 882 val = *value; 883 if ((error = itimespecfix(&val.it_value)) != 0 || 884 (error = itimespecfix(&val.it_interval)) != 0) 885 return error; 886 887 mutex_spin_enter(&timer_lock); 888 restart: 889 if ((pt = pts->pts_timers[timerid]) == NULL) { 890 mutex_spin_exit(&timer_lock); 891 return EINVAL; 892 } 893 894 oval = pt->pt_time; 895 pt->pt_time = val; 896 897 /* 898 * If we've been passed a relative time for a realtime timer, 899 * convert it to absolute; if an absolute time for a virtual 900 * timer, convert it to relative and make sure we don't set it 901 * to zero, which would cancel the timer, or let it go 902 * negative, which would confuse the comparison tests. 903 */ 904 if (timespecisset(&pt->pt_time.it_value)) { 905 if (!CLOCK_VIRTUAL_P(pt->pt_type)) { 906 if ((flags & TIMER_ABSTIME) == 0) { 907 if (pt->pt_type == CLOCK_REALTIME) { 908 getnanotime(&now); 909 } else { /* CLOCK_MONOTONIC */ 910 getnanouptime(&now); 911 } 912 timespecadd(&pt->pt_time.it_value, &now, 913 &pt->pt_time.it_value); 914 } 915 } else { 916 if ((flags & TIMER_ABSTIME) != 0) { 917 getnanotime(&now); 918 timespecsub(&pt->pt_time.it_value, &now, 919 &pt->pt_time.it_value); 920 if (!timespecisset(&pt->pt_time.it_value) || 921 pt->pt_time.it_value.tv_sec < 0) { 922 pt->pt_time.it_value.tv_sec = 0; 923 pt->pt_time.it_value.tv_nsec = 1; 924 } 925 } 926 } 927 } 928 929 error = timer_settime(pt); 930 if (error == ERESTART) { 931 KASSERT(!CLOCK_VIRTUAL_P(pt->pt_type)); 932 goto restart; 933 } 934 KASSERT(error == 0); 935 mutex_spin_exit(&timer_lock); 936 937 if (ovalue) 938 *ovalue = oval; 939 940 return (0); 941 } 942 943 /* Return the time remaining until a POSIX timer fires. */ 944 int 945 sys___timer_gettime50(struct lwp *l, 946 const struct sys___timer_gettime50_args *uap, register_t *retval) 947 { 948 /* { 949 syscallarg(timer_t) timerid; 950 syscallarg(struct itimerspec *) value; 951 } */ 952 struct itimerspec its; 953 int error; 954 955 if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc, 956 &its)) != 0) 957 return error; 958 959 return copyout(&its, SCARG(uap, value), sizeof(its)); 960 } 961 962 int 963 dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its) 964 { 965 struct ptimer *pt; 966 struct ptimers *pts; 967 968 pts = p->p_timers; 969 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 970 return (EINVAL); 971 mutex_spin_enter(&timer_lock); 972 if ((pt = pts->pts_timers[timerid]) == NULL) { 973 mutex_spin_exit(&timer_lock); 974 return (EINVAL); 975 } 976 timer_gettime(pt, its); 977 mutex_spin_exit(&timer_lock); 978 979 return 0; 980 } 981 982 /* 983 * Return the count of the number of times a periodic timer expired 984 * while a notification was already pending. The counter is reset when 985 * a timer expires and a notification can be posted. 986 */ 987 int 988 sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap, 989 register_t *retval) 990 { 991 /* { 992 syscallarg(timer_t) timerid; 993 } */ 994 struct proc *p = l->l_proc; 995 struct ptimers *pts; 996 int timerid; 997 struct ptimer *pt; 998 999 timerid = SCARG(uap, timerid); 1000 1001 pts = p->p_timers; 1002 if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX) 1003 return (EINVAL); 1004 mutex_spin_enter(&timer_lock); 1005 if ((pt = pts->pts_timers[timerid]) == NULL) { 1006 mutex_spin_exit(&timer_lock); 1007 return (EINVAL); 1008 } 1009 *retval = pt->pt_poverruns; 1010 if (*retval >= DELAYTIMER_MAX) 1011 *retval = DELAYTIMER_MAX; 1012 mutex_spin_exit(&timer_lock); 1013 1014 return (0); 1015 } 1016 1017 /* 1018 * Real interval timer expired: 1019 * send process whose timer expired an alarm signal. 1020 * If time is not set up to reload, then just return. 1021 * Else compute next time timer should go off which is > current time. 1022 * This is where delay in processing this timeout causes multiple 1023 * SIGALRM calls to be compressed into one. 1024 */ 1025 void 1026 realtimerexpire(void *arg) 1027 { 1028 uint64_t last_val, next_val, interval, now_ns; 1029 struct timespec now, next; 1030 struct ptimer *pt; 1031 int backwards; 1032 1033 pt = arg; 1034 1035 mutex_spin_enter(&timer_lock); 1036 itimerfire(pt); 1037 1038 if (!timespecisset(&pt->pt_time.it_interval)) { 1039 timespecclear(&pt->pt_time.it_value); 1040 mutex_spin_exit(&timer_lock); 1041 return; 1042 } 1043 1044 if (pt->pt_type == CLOCK_MONOTONIC) { 1045 getnanouptime(&now); 1046 } else { 1047 getnanotime(&now); 1048 } 1049 backwards = (timespeccmp(&pt->pt_time.it_value, &now, >)); 1050 timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next); 1051 /* Handle the easy case of non-overflown timers first. */ 1052 if (!backwards && timespeccmp(&next, &now, >)) { 1053 pt->pt_time.it_value = next; 1054 } else { 1055 now_ns = timespec2ns(&now); 1056 last_val = timespec2ns(&pt->pt_time.it_value); 1057 interval = timespec2ns(&pt->pt_time.it_interval); 1058 1059 next_val = now_ns + 1060 (now_ns - last_val + interval - 1) % interval; 1061 1062 if (backwards) 1063 next_val += interval; 1064 else 1065 pt->pt_overruns += (now_ns - last_val) / interval; 1066 1067 pt->pt_time.it_value.tv_sec = next_val / 1000000000; 1068 pt->pt_time.it_value.tv_nsec = next_val % 1000000000; 1069 } 1070 1071 /* 1072 * Reset the callout, if it's not going away. 1073 * 1074 * Don't need to check tshzto() return value, here. 1075 * callout_reset() does it for us. 1076 */ 1077 if (!pt->pt_dying) 1078 callout_reset(&pt->pt_ch, 1079 (pt->pt_type == CLOCK_MONOTONIC 1080 ? tshztoup(&pt->pt_time.it_value) 1081 : tshzto(&pt->pt_time.it_value)), 1082 realtimerexpire, pt); 1083 mutex_spin_exit(&timer_lock); 1084 } 1085 1086 /* BSD routine to get the value of an interval timer. */ 1087 /* ARGSUSED */ 1088 int 1089 sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap, 1090 register_t *retval) 1091 { 1092 /* { 1093 syscallarg(int) which; 1094 syscallarg(struct itimerval *) itv; 1095 } */ 1096 struct proc *p = l->l_proc; 1097 struct itimerval aitv; 1098 int error; 1099 1100 memset(&aitv, 0, sizeof(aitv)); 1101 error = dogetitimer(p, SCARG(uap, which), &aitv); 1102 if (error) 1103 return error; 1104 return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval))); 1105 } 1106 1107 int 1108 dogetitimer(struct proc *p, int which, struct itimerval *itvp) 1109 { 1110 struct ptimers *pts; 1111 struct ptimer *pt; 1112 struct itimerspec its; 1113 1114 if ((u_int)which > ITIMER_MONOTONIC) 1115 return (EINVAL); 1116 1117 mutex_spin_enter(&timer_lock); 1118 pts = p->p_timers; 1119 if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) { 1120 timerclear(&itvp->it_value); 1121 timerclear(&itvp->it_interval); 1122 } else { 1123 timer_gettime(pt, &its); 1124 TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value); 1125 TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval); 1126 } 1127 mutex_spin_exit(&timer_lock); 1128 1129 return 0; 1130 } 1131 1132 /* BSD routine to set/arm an interval timer. */ 1133 /* ARGSUSED */ 1134 int 1135 sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap, 1136 register_t *retval) 1137 { 1138 /* { 1139 syscallarg(int) which; 1140 syscallarg(const struct itimerval *) itv; 1141 syscallarg(struct itimerval *) oitv; 1142 } */ 1143 struct proc *p = l->l_proc; 1144 int which = SCARG(uap, which); 1145 struct sys___getitimer50_args getargs; 1146 const struct itimerval *itvp; 1147 struct itimerval aitv; 1148 int error; 1149 1150 if ((u_int)which > ITIMER_MONOTONIC) 1151 return (EINVAL); 1152 itvp = SCARG(uap, itv); 1153 if (itvp && 1154 (error = copyin(itvp, &aitv, sizeof(struct itimerval))) != 0) 1155 return (error); 1156 if (SCARG(uap, oitv) != NULL) { 1157 SCARG(&getargs, which) = which; 1158 SCARG(&getargs, itv) = SCARG(uap, oitv); 1159 if ((error = sys___getitimer50(l, &getargs, retval)) != 0) 1160 return (error); 1161 } 1162 if (itvp == 0) 1163 return (0); 1164 1165 return dosetitimer(p, which, &aitv); 1166 } 1167 1168 int 1169 dosetitimer(struct proc *p, int which, struct itimerval *itvp) 1170 { 1171 struct timespec now; 1172 struct ptimers *pts; 1173 struct ptimer *pt, *spare; 1174 int error; 1175 1176 KASSERT((u_int)which <= CLOCK_MONOTONIC); 1177 if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval)) 1178 return (EINVAL); 1179 1180 /* 1181 * Don't bother allocating data structures if the process just 1182 * wants to clear the timer. 1183 */ 1184 spare = NULL; 1185 pts = p->p_timers; 1186 retry: 1187 if (!timerisset(&itvp->it_value) && (pts == NULL || 1188 pts->pts_timers[which] == NULL)) 1189 return (0); 1190 if (pts == NULL) 1191 pts = timers_alloc(p); 1192 mutex_spin_enter(&timer_lock); 1193 restart: 1194 pt = pts->pts_timers[which]; 1195 if (pt == NULL) { 1196 if (spare == NULL) { 1197 mutex_spin_exit(&timer_lock); 1198 spare = pool_get(&ptimer_pool, PR_WAITOK | PR_ZERO); 1199 goto retry; 1200 } 1201 pt = spare; 1202 spare = NULL; 1203 pt->pt_ev.sigev_notify = SIGEV_SIGNAL; 1204 pt->pt_ev.sigev_value.sival_int = which; 1205 pt->pt_overruns = 0; 1206 pt->pt_proc = p; 1207 pt->pt_type = which; 1208 pt->pt_entry = which; 1209 pt->pt_queued = false; 1210 if (!CLOCK_VIRTUAL_P(which)) 1211 callout_init(&pt->pt_ch, CALLOUT_MPSAFE); 1212 else 1213 pt->pt_active = 0; 1214 1215 switch (which) { 1216 case ITIMER_REAL: 1217 case ITIMER_MONOTONIC: 1218 pt->pt_ev.sigev_signo = SIGALRM; 1219 break; 1220 case ITIMER_VIRTUAL: 1221 pt->pt_ev.sigev_signo = SIGVTALRM; 1222 break; 1223 case ITIMER_PROF: 1224 pt->pt_ev.sigev_signo = SIGPROF; 1225 break; 1226 } 1227 pts->pts_timers[which] = pt; 1228 } 1229 1230 TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value); 1231 TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval); 1232 1233 if (timespecisset(&pt->pt_time.it_value)) { 1234 /* Convert to absolute time */ 1235 /* XXX need to wrap in splclock for timecounters case? */ 1236 switch (which) { 1237 case ITIMER_REAL: 1238 getnanotime(&now); 1239 timespecadd(&pt->pt_time.it_value, &now, 1240 &pt->pt_time.it_value); 1241 break; 1242 case ITIMER_MONOTONIC: 1243 getnanouptime(&now); 1244 timespecadd(&pt->pt_time.it_value, &now, 1245 &pt->pt_time.it_value); 1246 break; 1247 default: 1248 break; 1249 } 1250 } 1251 error = timer_settime(pt); 1252 if (error == ERESTART) { 1253 KASSERT(!CLOCK_VIRTUAL_P(pt->pt_type)); 1254 goto restart; 1255 } 1256 KASSERT(error == 0); 1257 mutex_spin_exit(&timer_lock); 1258 if (spare != NULL) 1259 pool_put(&ptimer_pool, spare); 1260 1261 return (0); 1262 } 1263 1264 /* Utility routines to manage the array of pointers to timers. */ 1265 struct ptimers * 1266 timers_alloc(struct proc *p) 1267 { 1268 struct ptimers *pts; 1269 int i; 1270 1271 pts = pool_get(&ptimers_pool, PR_WAITOK); 1272 LIST_INIT(&pts->pts_virtual); 1273 LIST_INIT(&pts->pts_prof); 1274 for (i = 0; i < TIMER_MAX; i++) 1275 pts->pts_timers[i] = NULL; 1276 mutex_spin_enter(&timer_lock); 1277 if (p->p_timers == NULL) { 1278 p->p_timers = pts; 1279 mutex_spin_exit(&timer_lock); 1280 return pts; 1281 } 1282 mutex_spin_exit(&timer_lock); 1283 pool_put(&ptimers_pool, pts); 1284 return p->p_timers; 1285 } 1286 1287 /* 1288 * Clean up the per-process timers. If "which" is set to TIMERS_ALL, 1289 * then clean up all timers and free all the data structures. If 1290 * "which" is set to TIMERS_POSIX, only clean up the timers allocated 1291 * by timer_create(), not the BSD setitimer() timers, and only free the 1292 * structure if none of those remain. 1293 */ 1294 void 1295 timers_free(struct proc *p, int which) 1296 { 1297 struct ptimers *pts; 1298 struct ptimer *ptn; 1299 struct timespec ts; 1300 int i; 1301 1302 if (p->p_timers == NULL) 1303 return; 1304 1305 pts = p->p_timers; 1306 mutex_spin_enter(&timer_lock); 1307 if (which == TIMERS_ALL) { 1308 p->p_timers = NULL; 1309 i = 0; 1310 } else { 1311 timespecclear(&ts); 1312 for (ptn = LIST_FIRST(&pts->pts_virtual); 1313 ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL]; 1314 ptn = LIST_NEXT(ptn, pt_list)) { 1315 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1316 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1317 } 1318 LIST_FIRST(&pts->pts_virtual) = NULL; 1319 if (ptn) { 1320 KASSERT(ptn->pt_type == CLOCK_VIRTUAL); 1321 timespecadd(&ts, &ptn->pt_time.it_value, 1322 &ptn->pt_time.it_value); 1323 LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list); 1324 } 1325 timespecclear(&ts); 1326 for (ptn = LIST_FIRST(&pts->pts_prof); 1327 ptn && ptn != pts->pts_timers[ITIMER_PROF]; 1328 ptn = LIST_NEXT(ptn, pt_list)) { 1329 KASSERT(ptn->pt_type == CLOCK_PROF); 1330 timespecadd(&ts, &ptn->pt_time.it_value, &ts); 1331 } 1332 LIST_FIRST(&pts->pts_prof) = NULL; 1333 if (ptn) { 1334 KASSERT(ptn->pt_type == CLOCK_PROF); 1335 timespecadd(&ts, &ptn->pt_time.it_value, 1336 &ptn->pt_time.it_value); 1337 LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list); 1338 } 1339 i = TIMER_MIN; 1340 } 1341 for ( ; i < TIMER_MAX; i++) { 1342 if (pts->pts_timers[i] != NULL) { 1343 /* Free the timer and release the lock. */ 1344 itimerfree(pts, i); 1345 /* Reacquire the lock for the next one. */ 1346 mutex_spin_enter(&timer_lock); 1347 } 1348 } 1349 if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL && 1350 pts->pts_timers[2] == NULL && pts->pts_timers[3] == NULL) { 1351 p->p_timers = NULL; 1352 mutex_spin_exit(&timer_lock); 1353 pool_put(&ptimers_pool, pts); 1354 } else 1355 mutex_spin_exit(&timer_lock); 1356 } 1357 1358 static void 1359 itimerfree(struct ptimers *pts, int index) 1360 { 1361 struct ptimer *pt; 1362 1363 KASSERT(mutex_owned(&timer_lock)); 1364 1365 pt = pts->pts_timers[index]; 1366 1367 /* 1368 * Prevent new references, and notify the callout not to 1369 * restart itself. 1370 */ 1371 pts->pts_timers[index] = NULL; 1372 pt->pt_dying = true; 1373 1374 /* 1375 * For non-virtual timers, stop the callout, or wait for it to 1376 * run if it has already fired. It cannot restart again after 1377 * this point: the callout won't restart itself when dying, no 1378 * other users holding the lock can restart it, and any other 1379 * users waiting for callout_halt concurrently (timer_settime) 1380 * will restart from the top. 1381 */ 1382 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1383 callout_halt(&pt->pt_ch, &timer_lock); 1384 1385 /* Remove it from the queue to be signalled. */ 1386 if (pt->pt_queued) 1387 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1388 1389 /* All done with the global state. */ 1390 mutex_spin_exit(&timer_lock); 1391 1392 /* Destroy the callout, if needed, and free the ptimer. */ 1393 if (!CLOCK_VIRTUAL_P(pt->pt_type)) 1394 callout_destroy(&pt->pt_ch); 1395 pool_put(&ptimer_pool, pt); 1396 } 1397 1398 /* 1399 * Decrement an interval timer by a specified number 1400 * of nanoseconds, which must be less than a second, 1401 * i.e. < 1000000000. If the timer expires, then reload 1402 * it. In this case, carry over (nsec - old value) to 1403 * reduce the value reloaded into the timer so that 1404 * the timer does not drift. This routine assumes 1405 * that it is called in a context where the timers 1406 * on which it is operating cannot change in value. 1407 */ 1408 static int 1409 itimerdecr(struct ptimer *pt, int nsec) 1410 { 1411 struct itimerspec *itp; 1412 int error __diagused; 1413 1414 KASSERT(mutex_owned(&timer_lock)); 1415 KASSERT(CLOCK_VIRTUAL_P(pt->pt_type)); 1416 1417 itp = &pt->pt_time; 1418 if (itp->it_value.tv_nsec < nsec) { 1419 if (itp->it_value.tv_sec == 0) { 1420 /* expired, and already in next interval */ 1421 nsec -= itp->it_value.tv_nsec; 1422 goto expire; 1423 } 1424 itp->it_value.tv_nsec += 1000000000; 1425 itp->it_value.tv_sec--; 1426 } 1427 itp->it_value.tv_nsec -= nsec; 1428 nsec = 0; 1429 if (timespecisset(&itp->it_value)) 1430 return (1); 1431 /* expired, exactly at end of interval */ 1432 expire: 1433 if (timespecisset(&itp->it_interval)) { 1434 itp->it_value = itp->it_interval; 1435 itp->it_value.tv_nsec -= nsec; 1436 if (itp->it_value.tv_nsec < 0) { 1437 itp->it_value.tv_nsec += 1000000000; 1438 itp->it_value.tv_sec--; 1439 } 1440 error = timer_settime(pt); 1441 KASSERT(error == 0); /* virtual, never fails */ 1442 } else 1443 itp->it_value.tv_nsec = 0; /* sec is already 0 */ 1444 return (0); 1445 } 1446 1447 static void 1448 itimerfire(struct ptimer *pt) 1449 { 1450 1451 KASSERT(mutex_owned(&timer_lock)); 1452 1453 /* 1454 * XXX Can overrun, but we don't do signal queueing yet, anyway. 1455 * XXX Relying on the clock interrupt is stupid. 1456 */ 1457 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL || pt->pt_queued) { 1458 return; 1459 } 1460 TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain); 1461 pt->pt_queued = true; 1462 softint_schedule(timer_sih); 1463 } 1464 1465 void 1466 timer_tick(lwp_t *l, bool user) 1467 { 1468 struct ptimers *pts; 1469 struct ptimer *pt; 1470 proc_t *p; 1471 1472 p = l->l_proc; 1473 if (p->p_timers == NULL) 1474 return; 1475 1476 mutex_spin_enter(&timer_lock); 1477 if ((pts = l->l_proc->p_timers) != NULL) { 1478 /* 1479 * Run current process's virtual and profile time, as needed. 1480 */ 1481 if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL) 1482 if (itimerdecr(pt, tick * 1000) == 0) 1483 itimerfire(pt); 1484 if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL) 1485 if (itimerdecr(pt, tick * 1000) == 0) 1486 itimerfire(pt); 1487 } 1488 mutex_spin_exit(&timer_lock); 1489 } 1490 1491 static void 1492 timer_intr(void *cookie) 1493 { 1494 ksiginfo_t ksi; 1495 struct ptimer *pt; 1496 proc_t *p; 1497 1498 mutex_enter(proc_lock); 1499 mutex_spin_enter(&timer_lock); 1500 while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) { 1501 TAILQ_REMOVE(&timer_queue, pt, pt_chain); 1502 KASSERT(pt->pt_queued); 1503 pt->pt_queued = false; 1504 1505 if (pt->pt_proc->p_timers == NULL) { 1506 /* Process is dying. */ 1507 continue; 1508 } 1509 p = pt->pt_proc; 1510 if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL) { 1511 continue; 1512 } 1513 if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) { 1514 pt->pt_overruns++; 1515 continue; 1516 } 1517 1518 KSI_INIT(&ksi); 1519 ksi.ksi_signo = pt->pt_ev.sigev_signo; 1520 ksi.ksi_code = SI_TIMER; 1521 ksi.ksi_value = pt->pt_ev.sigev_value; 1522 pt->pt_poverruns = pt->pt_overruns; 1523 pt->pt_overruns = 0; 1524 mutex_spin_exit(&timer_lock); 1525 kpsignal(p, &ksi, NULL); 1526 mutex_spin_enter(&timer_lock); 1527 } 1528 mutex_spin_exit(&timer_lock); 1529 mutex_exit(proc_lock); 1530 } 1531 1532 /* 1533 * Check if the time will wrap if set to ts. 1534 * 1535 * ts - timespec describing the new time 1536 * delta - the delta between the current time and ts 1537 */ 1538 bool 1539 time_wraps(struct timespec *ts, struct timespec *delta) 1540 { 1541 1542 /* 1543 * Don't allow the time to be set forward so far it 1544 * will wrap and become negative, thus allowing an 1545 * attacker to bypass the next check below. The 1546 * cutoff is 1 year before rollover occurs, so even 1547 * if the attacker uses adjtime(2) to move the time 1548 * past the cutoff, it will take a very long time 1549 * to get to the wrap point. 1550 */ 1551 if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) || 1552 (delta->tv_sec < 0 || delta->tv_nsec < 0)) 1553 return true; 1554 1555 return false; 1556 } 1557